From c68d735331158f655db965daae8d925b8a1b975c Mon Sep 17 00:00:00 2001 From: Ian Davis Date: Tue, 2 Nov 2021 06:23:18 -0700 Subject: [PATCH] Migrating repository --- .cargo/config.toml.template | 18 + .devcontainer/Dockerfile | 19 + .devcontainer/devcontainer.json | 69 + .github/workflows/ci.yml | 57 + .gitignore | 151 + .gitmodules | 5 + Cargo.toml | 7 + LICENSE | 21 + README.md | 2 + build.cmd | 14 + build.ps1 | 7 + build.sh | 15 + eng/build.ps1 | 53 + eng/manylinux.Dockerfile | 49 + eng/psakefile.ps1 | 272 ++ eng/settings.ps1 | 14 + eng/utils.ps1 | 224 ++ external/llvm-project | 1 + pyqir-generator/Cargo.toml | 42 + pyqir-generator/MANIFEST.in | 2 + pyqir-generator/README.md | 22 + pyqir-generator/pyproject.toml | 7 + pyqir-generator/pyqir_generator/__init__.py | 4 + pyqir-generator/pyqir_generator/builder.py | 268 ++ pyqir-generator/requirements-dev.txt | 3 + pyqir-generator/src/emit.rs | 141 + pyqir-generator/src/interop.rs | 138 + pyqir-generator/src/lib.rs | 10 + pyqir-generator/src/python.rs | 220 ++ pyqir-generator/src/qir/array1d.rs | 241 ++ pyqir-generator/src/qir/basic_values.rs | 55 + pyqir-generator/src/qir/calls.rs | 32 + pyqir-generator/src/qir/instructions.rs | 226 ++ pyqir-generator/src/qir/mod.rs | 66 + pyqir-generator/src/qir/qubits.rs | 24 + pyqir-generator/tests/test_api.py | 129 + pyqir-generator/tox.ini | 26 + pyqir-jit/Cargo.toml | 48 + pyqir-jit/MANIFEST.in | 2 + pyqir-jit/README.md | 22 + pyqir-jit/pyproject.toml | 7 + pyqir-jit/pyqir_jit/__init__.py | 5 + pyqir-jit/pyqir_jit/gateset.py | 58 + pyqir-jit/pyqir_jit/jit.py | 26 + pyqir-jit/requirements-dev.txt | 3 + pyqir-jit/src/gates.rs | 214 + pyqir-jit/src/interop.rs | 138 + pyqir-jit/src/intrinsics.rs | 243 ++ pyqir-jit/src/jit.rs | 98 + pyqir-jit/src/lib.rs | 11 + pyqir-jit/src/python.rs | 131 + pyqir-jit/src/runtime.rs | 121 + pyqir-jit/tests/bell_qir_measure.ll | 487 +++ pyqir-jit/tests/test_api.py | 73 + pyqir-jit/tox.ini | 26 + pyqir-parser/Cargo.toml | 39 + pyqir-parser/MANIFEST.in | 2 + pyqir-parser/README.md | 22 + pyqir-parser/pyproject.toml | 7 + pyqir-parser/pyqir_parser/__init__.py | 4 + pyqir-parser/pyqir_parser/parser.py | 912 +++++ pyqir-parser/requirements-dev.txt | 3 + pyqir-parser/src/lib.rs | 7 + pyqir-parser/src/parse.rs | 307 ++ pyqir-parser/src/python.rs | 1119 ++++++ .../tests/teleportchain.baseprofile.bc | Bin 0 -> 2896 bytes .../teleportchain.baseprofile.ll.reference | 111 + pyqir-parser/tests/teleportchain.ll.reference | 3448 +++++++++++++++++ pyqir-parser/tests/test_api.py | 91 + pyqir-parser/tox.ini | 26 + qirlib/Cargo.toml | 23 + qirlib/src/constants.rs | 58 + qirlib/src/context.rs | 188 + qirlib/src/intrinsics.rs | 202 + qirlib/src/lib.rs | 11 + qirlib/src/module.bc | Bin 0 -> 8556 bytes qirlib/src/module.ll | 579 +++ qirlib/src/passes.rs | 19 + qirlib/src/runtime_library.rs | 113 + qirlib/src/types.rs | 84 + 80 files changed, 11742 insertions(+) create mode 100644 .cargo/config.toml.template create mode 100644 .devcontainer/Dockerfile create mode 100644 .devcontainer/devcontainer.json create mode 100644 .github/workflows/ci.yml create mode 100644 .gitignore create mode 100644 .gitmodules create mode 100644 Cargo.toml create mode 100644 LICENSE create mode 100644 README.md create mode 100644 build.cmd create mode 100755 build.ps1 create mode 100755 build.sh create mode 100644 eng/build.ps1 create mode 100644 eng/manylinux.Dockerfile create mode 100644 eng/psakefile.ps1 create mode 100644 eng/settings.ps1 create mode 100644 eng/utils.ps1 create mode 160000 external/llvm-project create mode 100644 pyqir-generator/Cargo.toml create mode 100644 pyqir-generator/MANIFEST.in create mode 100644 pyqir-generator/README.md create mode 100644 pyqir-generator/pyproject.toml create mode 100644 pyqir-generator/pyqir_generator/__init__.py create mode 100644 pyqir-generator/pyqir_generator/builder.py create mode 100644 pyqir-generator/requirements-dev.txt create mode 100644 pyqir-generator/src/emit.rs create mode 100644 pyqir-generator/src/interop.rs create mode 100644 pyqir-generator/src/lib.rs create mode 100644 pyqir-generator/src/python.rs create mode 100644 pyqir-generator/src/qir/array1d.rs create mode 100644 pyqir-generator/src/qir/basic_values.rs create mode 100644 pyqir-generator/src/qir/calls.rs create mode 100644 pyqir-generator/src/qir/instructions.rs create mode 100644 pyqir-generator/src/qir/mod.rs create mode 100644 pyqir-generator/src/qir/qubits.rs create mode 100644 pyqir-generator/tests/test_api.py create mode 100644 pyqir-generator/tox.ini create mode 100644 pyqir-jit/Cargo.toml create mode 100644 pyqir-jit/MANIFEST.in create mode 100644 pyqir-jit/README.md create mode 100644 pyqir-jit/pyproject.toml create mode 100644 pyqir-jit/pyqir_jit/__init__.py create mode 100644 pyqir-jit/pyqir_jit/gateset.py create mode 100644 pyqir-jit/pyqir_jit/jit.py create mode 100644 pyqir-jit/requirements-dev.txt create mode 100644 pyqir-jit/src/gates.rs create mode 100644 pyqir-jit/src/interop.rs create mode 100644 pyqir-jit/src/intrinsics.rs create mode 100644 pyqir-jit/src/jit.rs create mode 100644 pyqir-jit/src/lib.rs create mode 100644 pyqir-jit/src/python.rs create mode 100644 pyqir-jit/src/runtime.rs create mode 100644 pyqir-jit/tests/bell_qir_measure.ll create mode 100644 pyqir-jit/tests/test_api.py create mode 100644 pyqir-jit/tox.ini create mode 100644 pyqir-parser/Cargo.toml create mode 100644 pyqir-parser/MANIFEST.in create mode 100644 pyqir-parser/README.md create mode 100644 pyqir-parser/pyproject.toml create mode 100644 pyqir-parser/pyqir_parser/__init__.py create mode 100644 pyqir-parser/pyqir_parser/parser.py create mode 100644 pyqir-parser/requirements-dev.txt create mode 100644 pyqir-parser/src/lib.rs create mode 100644 pyqir-parser/src/parse.rs create mode 100644 pyqir-parser/src/python.rs create mode 100644 pyqir-parser/tests/teleportchain.baseprofile.bc create mode 100644 pyqir-parser/tests/teleportchain.baseprofile.ll.reference create mode 100644 pyqir-parser/tests/teleportchain.ll.reference create mode 100644 pyqir-parser/tests/test_api.py create mode 100644 pyqir-parser/tox.ini create mode 100644 qirlib/Cargo.toml create mode 100644 qirlib/src/constants.rs create mode 100644 qirlib/src/context.rs create mode 100644 qirlib/src/intrinsics.rs create mode 100644 qirlib/src/lib.rs create mode 100644 qirlib/src/module.bc create mode 100644 qirlib/src/module.ll create mode 100644 qirlib/src/passes.rs create mode 100644 qirlib/src/runtime_library.rs create mode 100644 qirlib/src/types.rs diff --git a/.cargo/config.toml.template b/.cargo/config.toml.template new file mode 100644 index 00000000..4d6cd125 --- /dev/null +++ b/.cargo/config.toml.template @@ -0,0 +1,18 @@ +[target.x86_64-apple-darwin] +rustflags = [ + "-C", "link-arg=-undefined", + "-C", "link-arg=dynamic_lookup", +] + +[target.aarch64-apple-darwin] +rustflags = [ + "-C", "link-arg=-undefined", + "-C", "link-arg=dynamic_lookup", +] + +[target.x86_64-pc-windows-msvc] +rustflags = [ + "-C", "target-feature=+crt-static", + "-C", "control-flow-guard", +] + diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 00000000..da86c1e5 --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,19 @@ +# See here for image contents: https://github.com/microsoft/vscode-dev-containers/blob/v0.191.0/containers/codespaces-linux/.devcontainer/base.Dockerfile + +FROM mcr.microsoft.com/vscode/devcontainers/universal:1-linux + +USER root + +# Install CMake 3.20 (required since apt-get uses 3.16 and repo requires 3.20) +RUN curl -SsL https://github.com/Kitware/CMake/releases/download/v3.20.5/cmake-3.20.5-linux-x86_64.sh -o cmakeinstall.sh \ + && echo "f582e02696ceee81818dc3378531804b2213ed41c2a8bc566253d16d894cefab cmakeinstall.sh" | sha256sum -c --strict - \ + && chmod +x cmakeinstall.sh \ + && ./cmakeinstall.sh --prefix=/usr/local --exclude-subdir \ + && rm cmakeinstall.sh + +RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ + && apt-get install -y --no-install-recommends ninja-build clang-11 clang-tidy-11 build-essential \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/ + +USER codespace diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 00000000..d13a7c9d --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,69 @@ +{ + "name": "pyqir", + "build": { + "dockerfile": "Dockerfile", + }, + "settings": { + "python.pythonPath": "/opt/python/latest/bin/python", + "python.linting.enabled": true, + "python.linting.pylintEnabled": true, + "python.formatting.autopep8Path": "/usr/local/py-utils/bin/autopep8", + "python.formatting.blackPath": "/usr/local/py-utils/bin/black", + "python.formatting.yapfPath": "/usr/local/py-utils/bin/yapf", + "python.linting.banditPath": "/usr/local/py-utils/bin/bandit", + "python.linting.flake8Path": "/usr/local/py-utils/bin/flake8", + "python.linting.mypyPath": "/usr/local/py-utils/bin/mypy", + "python.linting.pycodestylePath": "/usr/local/py-utils/bin/pycodestyle", + "python.linting.pydocstylePath": "/usr/local/py-utils/bin/pydocstyle", + "python.linting.pylintPath": "/usr/local/py-utils/bin/pylint", + "lldb.executable": "/usr/bin/lldb", + "files.watcherExclude": { + "**/target/**": true + }, + "terminal.integrated.defaultProfile.linux": "pwsh", + "terminal.integrated.profiles.linux": { + "bash": { + "path": "bash" + }, + "zsh": { + "path": "zsh" + }, + "fish": { + "path": "fish" + }, + "pwsh": { + "path": "pwsh", + "icon": "terminal-powershell" + } + } + }, + "remoteUser": "codespace", + "workspaceMount": "source=${localWorkspaceFolder},target=/home/pyqir,type=bind,consistency=cached", + "workspaceFolder": "/home/pyqir", + "mounts": [ + "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" + ], + "runArgs": [ + "--cap-add=SYS_PTRACE", + "--security-opt", + "seccomp=unconfined", + "--init" + ], + "containerEnv": { + "IN_DEV_CONTAINER": "true" + }, + // Add the IDs of extensions you want installed when the container is created. + "extensions": [ + "GitHub.vscode-pull-request-github", + "matklad.rust-analyzer", + "ms-vscode.powershell", + "ms-python.python", + "ms-vscode.cpptools", + "MS-vsliveshare.vsliveshare", + "VisualStudioExptTeam.vscodeintellicode" + ], + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + // "oryx build" will automatically install your dependencies and attempt to build your project + "postCreateCommand": "oryx build -p virtualenv_name=.venv --log-file /tmp/oryx-build.log --manifest-dir /tmp || echo 'Could not auto-build. Skipping.'" +} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..9fd74fc5 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,57 @@ +name: CI +on: + push: + branches: [ $default-branch ] + pull_request: + branches: [ $default-branch ] + +env: + CARGO_TERM_COLOR: always + +jobs: + build: + runs-on: ${{ matrix.config.os }} + env: ${{ matrix.config.env }} + strategy: + fail-fast: false + matrix: + config: + - { + os: "ubuntu-20.04", + arch: "amd64", + env: {}, + } + - { + os: "windows-2019", + arch: "amd64", + env: {}, + } + - { + os: "macOS-10.15", + arch: "amd64", + env: {}, + } + steps: + - uses: actions/checkout@v2 + with: + submodules: 'recursive' + - name: Linux - Install build dependencies, ccache, and update PATH to use linked versions of gcc, cc, etc + run: sudo apt-get install -y ccache ninja-build + if: ${{ matrix.config.os == 'ubuntu-20.04' }} + - name: Windows - Install build dependencies, ccache, and update PATH to use linked versions of gcc, cc, etc + run: choco install --accept-license -y sccache ninja + if: ${{ matrix.config.os == 'windows-2019' }} + - name: MacOS - Install build dependencies, ccache, and update PATH to use linked versions of gcc, cc, etc + run: brew install ccache ninja + if: ${{ matrix.config.os == 'macOS-10.15' }} + - name: Windows - Install LLVM 11.1.0 + run: choco install llvm --version=11.1.0 --allow-downgrade + if: ${{ matrix.config.os == 'windows-2019' }} + - name: "Build all" + run: ./build.ps1 + shell: pwsh + - name: Artifacts + uses: actions/upload-artifact@v2 + with: + name: artifacts + path: target/wheels/* \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..ca0bf593 --- /dev/null +++ b/.gitignore @@ -0,0 +1,151 @@ +# Generated by Cargo +# will have compiled files and executables +target/ + +# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries +# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html +Cargo.lock + +# These are backup files generated by rustfmt +**/*.rs.bk + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +/.cargo/config.toml \ No newline at end of file diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..6b21389f --- /dev/null +++ b/.gitmodules @@ -0,0 +1,5 @@ +[submodule "external/llvm-project"] + path = external/llvm-project + url = https://github.com/llvm/llvm-project.git + branch = release/11.x + shallow = true diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 00000000..c606b834 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,7 @@ +[workspace] +members = [ + "pyqir-generator", + "pyqir-jit", + "pyqir-parser", + "qirlib", +] diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..f136ca20 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 QIR Alliance + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 00000000..48724939 --- /dev/null +++ b/README.md @@ -0,0 +1,2 @@ +# PyQIR: Python APIs for QIR + diff --git a/build.cmd b/build.cmd new file mode 100644 index 00000000..fb83b67d --- /dev/null +++ b/build.cmd @@ -0,0 +1,14 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +@echo off + +if '%1'=='/?' goto help +if '%1'=='-help' goto help +if '%1'=='-h' goto help + +pwsh -NoProfile -NonInteractive -ExecutionPolicy Bypass -Command "& '%~dp0\eng\build.ps1' %*" +exit /B %errorlevel% + +:help +pwsh -NoProfile -NonInteractive -ExecutionPolicy Bypass -Command "& '%~dp0\eng\build.ps1' -help" diff --git a/build.ps1 b/build.ps1 new file mode 100755 index 00000000..513cf50b --- /dev/null +++ b/build.ps1 @@ -0,0 +1,7 @@ +#!/usr/bin/env pwsh + +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +pwsh -NoProfile -NonInteractive -ExecutionPolicy Bypass -Command "& '$(Join-Path $pwd eng build.ps1)' $args" +exit $LASTEXITCODE diff --git a/build.sh b/build.sh new file mode 100755 index 00000000..76a423f8 --- /dev/null +++ b/build.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env sh + +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +# Use greadlink on macOS. +if [ "$(uname)" = "Darwin" ]; then + which greadlink > /dev/null || { + printf 'GNU readlink not found\n' + exit 1 + } + alias readlink="greadlink" +fi + +pwsh -NoProfile -NonInteractive -Command "& $(dirname "$(readlink -f -- "$0")")/eng/build.ps1 $@" diff --git a/eng/build.ps1 b/eng/build.ps1 new file mode 100644 index 00000000..830f9ef2 --- /dev/null +++ b/eng/build.ps1 @@ -0,0 +1,53 @@ + +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +<# + .SYNOPSIS + Build: Bootstraps psake and invokes the build. +#> +[cmdletbinding()] +param( + [Parameter(Position = 0, Mandatory = 0)] + [string]$buildFile = "$(Join-Path $PSScriptRoot psakefile.ps1)", + [Parameter(Position = 1, Mandatory = 0)] + [string[]]$taskList = @(), + [Parameter(Position = 2, Mandatory = 0)] + [switch]$docs = $false, + [Parameter(Position = 3, Mandatory = 0)] + [System.Collections.Hashtable]$parameters = @{}, + [Parameter(Position = 4, Mandatory = 0)] + [System.Collections.Hashtable]$properties = @{}, + [Parameter(Position = 5, Mandatory = $false)] + [switch]$detailedDocs = $false +) + +if ($null -eq (Import-Module -Name psake -PassThru -ErrorAction SilentlyContinue)) { + Install-Module -Name Psake -Scope CurrentUser -Repository PSGallery -Force -Verbose +} + +$scriptPath = $(Split-Path -Path $MyInvocation.MyCommand.path -Parent) + +# '[p]sake' is the same as 'psake' but $Error is not polluted +Remove-Module -Name [p]sake -Verbose:$false +Import-Module -Name psake -Verbose:$false +if ($help) { + Get-Help -Name Invoke-psake -Full + return +} + +if ($buildFile -and (-not (Test-Path -Path $buildFile))) { + $absoluteBuildFile = (Join-Path -Path $scriptPath -ChildPath $buildFile) + if (Test-path -Path $absoluteBuildFile) { + $buildFile = $absoluteBuildFile + } +} + +$nologo = $true +$framework = $null +$initialization = {} +Invoke-psake $buildFile $taskList $framework $docs $parameters $properties $initialization $nologo $detailedDocs $notr + +if (!$psake.build_success) { + exit 1 +} \ No newline at end of file diff --git a/eng/manylinux.Dockerfile b/eng/manylinux.Dockerfile new file mode 100644 index 00000000..94bd7c0f --- /dev/null +++ b/eng/manylinux.Dockerfile @@ -0,0 +1,49 @@ +FROM quay.io/pypa/manylinux2014_x86_64 as builder + +ENV PATH /root/.cargo/bin:$PATH + +# todo, lock down version +RUN curl --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y + +WORKDIR /tmp +RUN curl -SsL https://github.com/PyO3/maturin/archive/refs/tags/v0.11.1.tar.gz -o v0.11.1.tar.gz && \ + tar -xz -f ./v0.11.1.tar.gz + +RUN mv ./maturin-0.11.1 /maturin + +# Manually update the timestamps as ADD keeps the local timestamps and cargo would then believe the cache is fresh +RUN touch /maturin/src/lib.rs /maturin/src/main.rs + +RUN cargo rustc --bin maturin --manifest-path /maturin/Cargo.toml --release -- -C link-arg=-s \ + && mv /maturin/target/release/maturin /usr/bin/maturin \ + && rm -rf /maturin + +FROM quay.io/pypa/manylinux2014_x86_64 + +ENV PATH /root/.cargo/bin:$PATH +# Add all supported python versions +ENV PATH /opt/python/cp36-cp36m/bin/:/opt/python/cp37-cp37m/bin/:/opt/python/cp38-cp38/bin/:/opt/python/cp39-cp39/bin/:$PATH +# Otherwise `cargo new` errors +ENV USER root + +RUN curl --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y \ + && python3 -m pip install --no-cache-dir cffi \ + && mkdir /io + +COPY --from=builder /usr/bin/maturin /usr/bin/maturin + +WORKDIR /io + +RUN yum install -y libffi-devel ninja-build + +ADD https://repo.anaconda.com/miniconda/Miniconda3-py39_4.10.3-Linux-x86_64.sh /tmp/Minoconda.sh + +RUN /bin/bash /tmp/Minoconda.sh -b + +ENV PATH="/root/miniconda3/bin:${PATH}" + +RUN conda init && \ + conda install -y -c conda-forge clang-11 libstdcxx-devel_linux-64 libgcc-devel_linux-64 && \ + cp /root/miniconda3/bin/clang-11 /root/miniconda3/bin/clang++-11 + +RUN python -m pip install -U tox diff --git a/eng/psakefile.ps1 b/eng/psakefile.ps1 new file mode 100644 index 00000000..a8df1439 --- /dev/null +++ b/eng/psakefile.ps1 @@ -0,0 +1,272 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +properties { + $repo = @{} + $repo.root = Resolve-Path (Split-Path -parent $PSScriptRoot) + + $pyqir = @{} + + $pyqir.parser = @{} + $pyqir.parser.name = "pyqir-parser" + $pyqir.parser.dir = Join-Path $repo.root "pyqir-parser" + + $pyqir.generator = @{} + $pyqir.generator.name = "pyqir-generator" + $pyqir.generator.dir = Join-Path $repo.root "pyqir-generator" + + $pyqir.jit = @{} + $pyqir.jit.name = "pyqir-jit" + $pyqir.jit.dir = Join-Path $repo.root "pyqir-jit" +} + +Include settings.ps1 +Include utils.ps1 + +Task default -Depends parser, generator, jit + +Task init { + Restore-ConfigTomlWithLlvmInfo + Test-Prerequisites + Initialize-Environment +} + +Task generator -Depends init { + Build-PyQIR($pyqir.generator.name) +} + +Task jit -Depends init { + Build-PyQIR($pyqir.jit.name) +} + +Task parser -Depends init { + Build-PyQIR($pyqir.parser.name) +} + +function Use-ExternalLlvmInstallation { + Write-BuildLog "Using LLVM installation specified by PYQIR_LLVM_EXTERNAL_DIR" + Assert (Test-Path $env:PYQIR_LLVM_EXTERNAL_DIR) "PYQIR_LLVM_EXTERNAL_DIR folder does not exist" + Use-LlvmInstallation $env:PYQIR_LLVM_EXTERNAL_DIR +} + +function Test-AllowedToDownloadLlvm { + # If PYQIR_DOWNLOAD_LLVM isn't set, we allow for download + # If it is set, then we use its value + !((Test-Path env:\PYQIR_DOWNLOAD_LLVM) -and ($env:PYQIR_DOWNLOAD_LLVM -eq $false)) +} + +function Get-LlvmDownloadBaseUrl { + if (Test-Path env:\PYQIR_LLVM_BUILDS_URL) { + $env:PYQIR_LLVM_BUILDS_URL + } + else + { "https://msquantumpublic.blob.core.windows.net/llvm-builds" } +} + +function Get-PackageExt { + $extension = ".tar.gz" + if ($IsWindows) { + $extension = ".zip" + } + $extension +} + +function Get-LlvmArchiveUrl { + $extension = Get-PackageExt + $baseUrl = Get-LlvmDownloadBaseUrl + "$baseUrl/$($packageName)$extension" +} + +function Get-LlvmArchiveShaUrl { + $extension = Get-PackageExt + $baseUrl = Get-LlvmDownloadBaseUrl + "$baseUrl/$($packageName)$extension.sha256" +} + +function Get-LlvmArchiveFileName { + $packageName = Get-PackageName + $extension = Get-PackageExt + "$($packageName)$extension" +} + +function Get-LlvmArchiveShaFileName { + $filename = Get-LlvmArchiveFileName + "$filename.sha256" +} + +function Install-LlvmFromBuildArtifacts { + [CmdletBinding()] + param ( + [Parameter()] + [string] + $packagePath + ) + + $outFile = Join-Path $($env:TEMP) (Get-LlvmArchiveFileName) + if ((Test-Path $outFile)) { + Remove-Item $outFile + } + + $archiveUrl = Get-LlvmArchiveUrl + Write-BuildLog "Dowloading $archiveUrl to $outFile" + Invoke-WebRequest -Uri $archiveUrl -OutFile $outFile + + $shaFile = Join-Path $($env:TEMP) (Get-LlvmArchiveShaFileName) + if ((Test-Path $shaFile)) { + Remove-Item $shaFile + } + + $sha256Url = Get-LlvmArchiveShaUrl + Write-BuildLog "Dowloading $sha256Url to $shaFile" + Invoke-WebRequest -Uri $sha256Url -OutFile $shaFile + Write-BuildLog "Calculating hash for $outFile" + $calculatedHash = (Get-FileHash -Path $outFile -Algorithm SHA256).Hash + + Write-BuildLog "Reading hash from $shaFile" + $expectedHash = (Get-Content -Path $shaFile) + + Assert ("$calculatedHash" -eq "$expectedHash") "The calculated hash $calculatedHash did not match the expected hash $expectedHash" + + $packagesRoot = Get-AqCacheDirectory + if ($IsWindows) { + Expand-Archive -Path $outFile -DestinationPath $packagesRoot + } + else { + tar -zxvf $outFile -C $packagesRoot + } + + $packageName = Get-PackageName + $packagePath = Get-InstallationDirectory $packageName + Use-LlvmInstallation $packagePath +} + +function Install-LlvmFromSource { + [CmdletBinding()] + param ( + [Parameter()] + [string] + $packagePath + ) + $Env:PKG_NAME = Get-PackageName + $Env:CMAKE_INSTALL_PREFIX = $packagePath + $Env:INSTALL_LLVM_PACKAGE = $true + . (Join-Path (Get-RepoRoot) "build" "llvm.ps1") + Use-LlvmInstallation $packagePath +} + +function Test-Prerequisites { + if (!(Test-LlvmSubmoduleInitialized)) { + Write-BuildLog "llvm-project submodule isn't initialized" + Write-BuildLog "Initializing submodules: git submodule init" + exec -workingDirectory ($repo.root ) { git submodule init } + Write-BuildLog "Updating submodules: git submodule update --depth 1 --recursive" + exec -workingDirectory ($repo.root ) { git submodule update --depth 1 --recursive } + } + Assert (Test-LlvmSubmoduleInitialized) "Failed to read initialized llvm-project submodule" +} + +function Initialize-Environment { + # if an external LLVM is specified, make sure it exist and + # skip further bootstapping + if (Test-Path env:\PYQIR_LLVM_EXTERNAL_DIR) { + Use-ExternalLlvmInstallation + } + else { + $PYQIR_LLVM_PACKAGE_GIT_VERSION = Get-LlvmSha + Write-BuildLog "llvm-project sha: $PYQIR_LLVM_PACKAGE_GIT_VERSION" + $packageName = Get-PackageName + + $packagePath = Get-InstallationDirectory $packageName + if (Test-Path $packagePath) { + Write-BuildLog "LLVM target $($PYQIR_LLVM_PACKAGE_GIT_VERSION) is already installed." + # LLVM is already downloaded + Use-LlvmInstallation $packagePath + } + else { + Write-BuildLog "LLVM target $($PYQIR_LLVM_PACKAGE_GIT_VERSION) is not installed." + if (Test-AllowedToDownloadLlvm) { + Write-BuildLog "Downloading LLVM target $packageName " + Install-LlvmFromBuildArtifacts $packagePath + } + else { + Write-BuildLog "Downloading LLVM Disabled, building from source." + # We don't have an external LLVM installation specified + # We are not downloading LLVM + # So we need to build it. + Install-LlvmFromSource $packagePath + } + } + } +} + + +# Only run the nested ManyLinux container +# build on Linux while not in a dev container +function Test-RunInContainer { + if ($IsLinux -and (Test-CI)) { + # If we are in a dev container, our workspace is already + # mounted into the container. If we try to mount our 'local' workspace + # into a nested container it will silently fail to mount. + !(Test-InDevContainer) + } + else { + $false + } +} + +function Build-PyQIR([string]$project) { + $srcPath = $repo.root + $installationDirectory = Resolve-InstallationDirectory + + if (Test-RunInContainer) { + function Build-ContainerImage { + Write-BuildLog "Building container image manylinux-llvm-builder" + exec -workingDirectory (Join-Path $srcPath eng) { + Get-Content manylinux.Dockerfile | docker build -t manylinux2014_x86_64_maturin - + } + } + function Invoke-ContainerImage { + Write-BuildLog "Running container image:" + $ioVolume = "$($srcPath):/io" + $llvmVolume = "$($installationDirectory):/usr/lib/llvm" + $userSpec = "" + + Write-BuildLog "docker run --rm $userSpec -v $ioVolume -v $llvmVolume -e LLVM_SYS_110_PREFIX=/usr/lib/llvm -w /io/$project manylinux2014_x86_64_maturin conda run --no-capture-output cargo test --release --lib -vv -- --nocapture" "command" + exec { + docker run --rm $userSpec -v $ioVolume -v $llvmVolume -e LLVM_SYS_110_PREFIX=/usr/lib/llvm -w /io/$project manylinux2014_x86_64_maturin conda run --no-capture-output cargo test --release --lib -vv -- --nocapture + } + + Write-BuildLog "docker run --rm $userSpec -v $ioVolume -v $llvmVolume -e LLVM_SYS_110_PREFIX=/usr/lib/llvm -w /io/$project manylinux2014_x86_64_maturin conda run --no-capture-output /usr/bin/maturin build --release" "command" + exec { + docker run --rm $userSpec -v $ioVolume -v $llvmVolume -e LLVM_SYS_110_PREFIX=/usr/lib/llvm -w /io/$project manylinux2014_x86_64_maturin conda run --no-capture-output /usr/bin/maturin build --release + } + + Write-BuildLog "docker run --rm $userSpec -v $ioVolume -v $llvmVolume -e LLVM_SYS_110_PREFIX=/usr/lib/llvm -w /io/$project manylinux2014_x86_64_maturin conda run --no-capture-output python -m tox -e test" "command" + exec { + docker run --rm $userSpec -v $ioVolume -v $llvmVolume -e LLVM_SYS_110_PREFIX=/usr/lib/llvm -w /io/$project manylinux2014_x86_64_maturin conda run --no-capture-output python -m tox -e test + } + } + + Build-ContainerImage + Invoke-ContainerImage + } + else { + exec -workingDirectory (Join-Path $srcPath $project) { + Write-BuildLog "& $python -m pip install --user -U pip" "command" + exec { & $python -m pip install --user -U pip } + + Write-BuildLog "& $python -m pip install --user maturin tox" "command" + exec { & $python -m pip install --user maturin tox } + + Write-BuildLog "& $python -m tox -e test" "command" + exec { & $python -m tox -e test } + #exec { & maturin develop && pytest } + + Write-BuildLog "& $python -m tox -e pack" "command" + exec { & $python -m tox -e pack } + } + + #Write-BuildLog "& cargo test --package qirlib --lib -vv -- --nocapture" "command" + #exec -workingDirectory $srcPath { & cargo test --package qirlib --lib -vv -- --nocapture } + } +} diff --git a/eng/settings.ps1 b/eng/settings.ps1 new file mode 100644 index 00000000..7037d6f4 --- /dev/null +++ b/eng/settings.ps1 @@ -0,0 +1,14 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +$python = "python3" +if ($null -ne (Get-Command python -ErrorAction SilentlyContinue)) { + $pythonIsPython3 = (python --version) -match "Python 3.*" + if ($pythonIsPython3) { + $python = "python" + } +} + +properties { + $python = $python +} \ No newline at end of file diff --git a/eng/utils.ps1 b/eng/utils.ps1 new file mode 100644 index 00000000..7d017b70 --- /dev/null +++ b/eng/utils.ps1 @@ -0,0 +1,224 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +if (!(Test-Path function:\Get-RepoRoot)) { + # pin working directory to this repo in case + # we are ever in a submodule + function Get-RepoRoot { + exec -workingDirectory $PSScriptRoot { + git rev-parse --show-toplevel + } + } +} + +# Fix temp path for non-windows platforms if missing +if (!(Test-Path env:\TEMP)) { + $env:TEMP = [System.IO.Path]::GetTempPath() +} + +#### +# Utilities +#### + +# returns true if the script is running on a build agent, false otherwise +function Test-CI { + if (Test-Path env:\TF_BUILD) { + $true + } + elseif ((Test-Path env:\CI)) { + $env:CI -eq $true + } + else { + $false + } +} + +# Writes an Azure DevOps message with default debug severity +function Write-BuildLog { + param ( + [Parameter(Mandatory = $true)] + [string]$message, + [Parameter(Mandatory = $false)] + [ValidateSet("group", "warning", "error", "section", "debug", "command", "endgroup")] + [string]$severity = "debug" + ) + Write-Host "##[$severity]$message" +} + +# Returns true if a command with the specified name exists. +function Test-CommandExists($name) { + $null -ne (Get-Command $name -ErrorAction SilentlyContinue) +} + +# Returns true if the current environment is a dev container. +function Test-InDevContainer { + $IsLinux -and (Test-Path env:\IN_DEV_CONTAINER) +} + +# Updates the cargo package version with the version specified. +function Restore-CargoTomlWithVersionInfo ($inputFile, $outputFile, $version) { + $outFile = New-Item -ItemType File -Path $outputFile + $inPackageSection = $false + switch -regex -file $inputFile { + "^\[(.+)\]" { + # Section + $section = $matches[1] + $inPackageSection = $section -eq "package" + Add-Content -Path $outFile -Value $_ + } + "(.+?)\s*=(.*)" { + # Key/Value + $key, $value = $matches[1..2] + if ($inPackageSection -and ($key -eq "version")) { + $value = "version = ""$($version)""" + Add-Content -Path $outFile -Value $value + } + else { + Add-Content -Path $outFile -Value $_ + } + } + default { + Add-Content -Path $outFile -Value $_ + } + } +} + +# Copies the default config.toml and sets the [env] config +# section to specify the variables needed for llvm-sys/inkwell +# This allows us to not need the user to specify env vars to build. +function Restore-ConfigTomlWithLlvmInfo { + $cargoPath = Resolve-Path (Join-Path (Get-RepoRoot) '.cargo') + $configTemplatePath = Join-Path $cargoPath config.toml.template + $configPath = Join-Path $cargoPath config.toml + + # remove the old file if it exists. + if (Test-Path $configPath) { + Remove-Item $configPath + } + + # ensure the output folder is there, `mkdir -p` equivalent + New-Item -ItemType Directory -Path $cargoPath -Force | Out-Null + + # copy the template + Copy-Item $configTemplatePath $configPath + + # append the env vars to the new config + $installationDirectory = Resolve-InstallationDirectory + Add-Content -Path $configPath -Value "[env]" + Add-Content -Path $configPath -Value "LLVM_SYS_110_PREFIX = '$installationDirectory'" +} + +function Get-LlvmSubmoduleSha { + $status = Get-LlvmSubmoduleStatus + $sha = $status.Substring(1, 9) + $sha +} + +function Get-LlvmSubmoduleStatus { + Write-BuildLog "Detected submodules: $(git submodule status --cached)" + $statusResult = exec -workingDirectory (Get-RepoRoot) { git submodule status --cached } + # on all platforms, the status uses '/' in the module path. + $status = $statusResult.Split([Environment]::NewLine) | Where-Object { $_.Contains("external/llvm-project") } | Select-Object -First 1 + $status +} + +function Test-LlvmSubmoduleInitialized { + $status = Get-LlvmSubmoduleStatus + if ($status.Substring(0, 1) -eq "-") { + Write-BuildLog "LLVM Submodule Uninitialized" + return $false + } + else { + Write-BuildLog "LLVM Submodule Initialized" + return $true + } +} + +# Gets the LLVM package triple for the current platform +function Get-TargetTriple { + $triple = "unknown" + if ($IsWindows) { + $triple = "x86_64-pc-windows-msvc-static" + } + elseif ($IsLinux) { + $triple = "x86_64-unknown-linux-gnu" + } + elseif ($IsMacOS) { + $triple = "x86_64-apple-darwin" + } + $triple +} + +# This method should be able to be removed when Rust 1.56 is released +# which contains the feature for env sections in the .cargo/config.toml +function Use-LlvmInstallation { + param ( + [string]$path + ) + Write-BuildLog "LLVM installation set to: $path" + $env:LLVM_SYS_110_PREFIX = $path +} + +# Gets the LLVM version git hash +# on the CI this will come as an env var +function Get-LlvmSha { + # Sometimes the CI fails to initilize PYQIR_LLVM_PACKAGE_GIT_VERSION correctly + # so we need to make sure it isn't empty. + if ((Test-Path env:\PYQIR_LLVM_PACKAGE_GIT_VERSION) -and ![string]::IsNullOrWhiteSpace($Env:PYQIR_LLVM_PACKAGE_GIT_VERSION)) { + Write-BuildLog "Use environment submodule version: $($env:PYQIR_LLVM_PACKAGE_GIT_VERSION)" + $env:PYQIR_LLVM_PACKAGE_GIT_VERSION + } + else { + $sha = exec { Get-LlvmSubmoduleSha } + Write-BuildLog "Use cached submodule version: $sha" + $sha + } +} + +function Get-PackageName { + $sha = Get-LlvmSha + $TARGET_TRIPLE = Get-TargetTriple + $packageName = "aq-llvm-$($TARGET_TRIPLE)-$($sha)" + $packageName +} + +function Get-DefaultInstallDirectory { + if (Test-Path env:\PYQIR_CACHE_DIR) { + $env:PYQIR_CACHE_DIR + } + else { + Join-Path "$HOME" ".pyqir" + } +} + +function Get-AqCacheDirectory { + $aqCacheDirectory = (Get-DefaultInstallDirectory) + if (!(Test-Path $aqCacheDirectory)) { + mkdir $aqCacheDirectory | Out-Null + } + Resolve-Path $aqCacheDirectory +} + +function Get-InstallationDirectory { + [CmdletBinding()] + param ( + [Parameter()] + [string] + $packageName + ) + $aqCacheDirectory = Get-AqCacheDirectory + $packagePath = Join-Path $aqCacheDirectory $packageName + $packagePath +} + +function Resolve-InstallationDirectory { + if (Test-Path env:\PYQIR_LLVM_EXTERNAL_DIR) { + return $env:PYQIR_LLVM_EXTERNAL_DIR + } + else { + $packageName = Get-PackageName + + $packagePath = Get-InstallationDirectory $packageName + return $packagePath + } +} diff --git a/external/llvm-project b/external/llvm-project new file mode 160000 index 00000000..1fdec59b --- /dev/null +++ b/external/llvm-project @@ -0,0 +1 @@ +Subproject commit 1fdec59bffc11ae37eb51a1b9869f0696bfd5312 diff --git a/pyqir-generator/Cargo.toml b/pyqir-generator/Cargo.toml new file mode 100644 index 00000000..ae656267 --- /dev/null +++ b/pyqir-generator/Cargo.toml @@ -0,0 +1,42 @@ +[package] +authors = ["Microsoft"] +name = "pyqir-generator" +version = "0.1.0" +edition = "2018" +license = "MIT" +description = "Python based QIR generator library." +readme = "README.md" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +qirlib = { path = "../qirlib" } +inkwell = { git = "https://github.com/TheDan64/inkwell", branch = "master", default-features = false, features = ["llvm11-0", "target-x86"] } +env_logger = "0.9.0" +log = "0.4.14" + +[dependencies.pyo3] +version = "0.14.2" + +[features] +extension-module = ["pyo3/abi3-py36", "pyo3/extension-module"] +default = ["extension-module"] + +[lib] +name = "pyqir_generator" +crate-type = ["cdylib"] + +[package.metadata.maturin] +classifier=[ + "License :: OSI Approved :: MIT License", + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python", + "Programming Language :: Rust", + "Operating System :: MacOS", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX :: Linux", +] diff --git a/pyqir-generator/MANIFEST.in b/pyqir-generator/MANIFEST.in new file mode 100644 index 00000000..4b8d0b8f --- /dev/null +++ b/pyqir-generator/MANIFEST.in @@ -0,0 +1,2 @@ +include pyproject.toml Cargo.toml +recursive-include src * \ No newline at end of file diff --git a/pyqir-generator/README.md b/pyqir-generator/README.md new file mode 100644 index 00000000..dae6d935 --- /dev/null +++ b/pyqir-generator/README.md @@ -0,0 +1,22 @@ +# pyqir-generator + +## Building and Testing + +To build this package, first install `maturin`: + +```shell +pip install maturin +``` + +To build and test use `maturin develop`: + +```shell +pip install -r requirements-dev.txt +maturin develop && pytest +``` + +Alternatively, install tox and run the tests inside an isolated environment: + +```shell +tox -e py +``` \ No newline at end of file diff --git a/pyqir-generator/pyproject.toml b/pyqir-generator/pyproject.toml new file mode 100644 index 00000000..d221dc75 --- /dev/null +++ b/pyqir-generator/pyproject.toml @@ -0,0 +1,7 @@ +[project] +name = "pyqir_generator" +requires-python = ">=3.6" + +[build-system] +requires = ["maturin>=0.10,<0.12"] +build-backend = "maturin" diff --git a/pyqir-generator/pyqir_generator/__init__.py b/pyqir-generator/pyqir_generator/__init__.py new file mode 100644 index 00000000..d240b890 --- /dev/null +++ b/pyqir-generator/pyqir_generator/__init__.py @@ -0,0 +1,4 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT License. + +from .builder import * diff --git a/pyqir-generator/pyqir_generator/builder.py b/pyqir-generator/pyqir_generator/builder.py new file mode 100644 index 00000000..da39c0db --- /dev/null +++ b/pyqir-generator/pyqir_generator/builder.py @@ -0,0 +1,268 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT License. + +from typing import Any +from .pyqir_generator import * + +class QirBuilder: + """ + The QirBuilder object describes quantum circuits and emits QIR + + :param module: name of the QIR module + :type module: str + """ + + def __init__(self, module: str): + self.pyqir = PyQIR(module) + + def cx(self, control: str, target: str): + """ + Applies controlled X operation to the target qubit + + :param control: name of the control qubit + :type control: str + :param target: name of the target qubit + :type target: str + """ + self.pyqir.cx(control, target) + + def cz(self, control: str, target: str): + """ + Applies controlled Z operation to the target qubit + + :param control: name of the control qubit + :type control: str + :param target: name of the target qubit + :type target: str + """ + self.pyqir.cz(control, target) + + def h(self, target: str): + """ + Applies H operation to the target qubit + + :param target: name of the target qubit + :type target: str + """ + self.pyqir.h(target) + + def m(self, qubit: str, target: str): + """ + Applies measurement operation or the source qubit into the target register + + :param qubit: name of the source qubit + :type qubit: str + :param target: name of the target register + :type target: str + """ + self.pyqir.m(qubit, target) + + def reset(self, target: str): + """ + Applies Reset operation to the target qubit + + :param target: name of the target qubit + :type target: str + """ + self.pyqir.reset(target) + + def rx(self, theta: float, qubit: str): + """ + Applies Rx operation to the target qubit + + :param theta: rotation value for target qubit + :type theta: float + :param qubit: name of the target qubit + :type qubit: str + """ + self.pyqir.rx(theta, qubit) + + def ry(self, theta: float, qubit: str): + """ + Applies Ry operation to the target qubit + + :param theta: rotation value for target qubit + :type theta: float + :param qubit: name of the target qubit + :type qubit: str + """ + self.pyqir.ry(theta, qubit) + + def rz(self, theta: float, qubit: str): + """ + Applies Rz operation to the target qubit + + :param theta: rotation value for target qubit + :type theta: float + :param qubit: name of the target qubit + :type qubit: str + """ + self.pyqir.rz(theta, qubit) + + def s(self, qubit: str): + """ + Applies S operation to the target qubit + + :param qubit: name of the target qubit + :type qubit: str + """ + self.pyqir.s(qubit) + + def s_adj(self, qubit: str): + """ + Applies SAdj operation to the target qubit + + :param qubit: name of the target qubit + :type qubit: str + """ + self.pyqir.s_adj(qubit) + + def t(self, qubit: str): + """ + Applies T operation to the target qubit + + :param qubit: name of the target qubit + :type qubit: str + """ + self.pyqir.t(qubit) + + def t_adj(self, qubit: str): + """ + Applies TAdj operation to the target qubit + + :param qubit: name of the target qubit + :type qubit: str + """ + self.pyqir.t_adj(qubit) + + def x(self, qubit: str): + """ + Applies X operation to the target qubit + + :param qubit: name of the target qubit + :type qubit: str + """ + self.pyqir.x(qubit) + + def y(self, qubit: str): + """ + Applies Y operation to the target qubit + + :param qubit: name of the target qubit + :type qubit: str + """ + self.pyqir.y(qubit) + + def z(self, qubit: str): + """ + Applies Z operation to the target qubit + + :param qubit: name of the target qubit + :type qubit: str + """ + self.pyqir.z(qubit) + + def dump_machine(self): + """ + + """ + self.pyqir.dump_machine() + + def add_classical_register(self, name: str, size: int): + """ + Models a classical register of the given size. The individual values + are accessed by name "" with 0 based indicies. + Example: + builder = QirBuilder("Bell circuit") + builder.add_quantum_register("qr", 2) + builder.add_classical_register("qc", 2) + builder.h("qr0") + builder.cx("qr0", "qr1") + builder.m("qr0", "qc0") + builder.m("qr1", "qc1") + builder.build("bell_measure.ll") + + :param name: name of the register + :type name: str + :param size: size of the register + :type size: int + """ + self.pyqir.add_classical_register(name, size) + + def add_quantum_register(self, name: str, size: int): + """ + Models an array of qubits of the given size. The individual values + are accessed by name "" with 0 based indicies. + Example: + builder = QirBuilder("Bell circuit") + builder.add_quantum_register("qr", 2) + builder.add_classical_register("qc", 2) + builder.h("qr0") + builder.cx("qr0", "qr1") + builder.m("qr0", "qc0") + builder.m("qr1", "qc1") + builder.build("bell_measure.ll") + + :param name: name of the register + :type name: str + :param size: size of the register + :type size: int + """ + self.pyqir.add_quantum_register(name, size) + + def build(self, file_path: str): + """ + Writes the modeled circuit to the supplied file. + + :param file_path: file path of generated QIR + :type file_path: str + """ + self.pyqir.write(file_path) + + def build_with(self, pyobj: Any): + """ + JIT compiles the circuit delegating quantum operations to the supplied object + + :param pyobj: python GateSet object defining the quantum operations + :type pyobj: str + """ + self.pyqir.build_with_python(pyobj) + + def get_ir_string(self): + """ + Returns the modeled circuit as an LLVM IR module (human readable) string. + """ + return self.pyqir.get_ir_string() + + def get_bitcode_base64_string(self): + """ + Returns the modeled circuit as a base64 encoded LLVM bitcode module. + """ + return self.pyqir.get_bitcode_base64_string() + + def enable_logging(self): + """ + Enables the logging infrastructure + Controlled via the RUST_LOG environment variable. + See https://docs.rs/env_logger/0.9.0/env_logger/#enabling-logging for details + + Example: + in tests.py: + def test_logging(): + builder = QirBuilder("logging test") + builder.enable_logging() + builder.add_quantum_register("qr", 1) + builder.h("qr0") + builder.build("test.ll") + + PowerShell: + $env:RUST_LOG="info" + python -m pytest + Bash: + RUST_LOG=info python -m pytest + + Example Output: + [2021-09-15T16:55:46Z INFO pyqir::python] Adding qr[0] + [2021-09-15T16:55:46Z INFO pyqir::python] h => qr0 + """ + self.pyqir.enable_logging() diff --git a/pyqir-generator/requirements-dev.txt b/pyqir-generator/requirements-dev.txt new file mode 100644 index 00000000..e5581af7 --- /dev/null +++ b/pyqir-generator/requirements-dev.txt @@ -0,0 +1,3 @@ +pytest>=3.5.0 +pip>=21.3 +maturin>=0.10,<0.12 diff --git a/pyqir-generator/src/emit.rs b/pyqir-generator/src/emit.rs new file mode 100644 index 00000000..2bd7da80 --- /dev/null +++ b/pyqir-generator/src/emit.rs @@ -0,0 +1,141 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use std::collections::HashMap; + +use inkwell::values::BasicValueEnum; +use qirlib::context::{Context, ContextType}; + +use crate::{interop::SemanticModel, qir}; + +pub fn write_model_to_file(model: &SemanticModel, file_name: &str) -> Result<(), String> { + let ctx = inkwell::context::Context::create(); + let context = populate_context(&ctx, &model)?; + + context.emit_ir(file_name)?; + + Ok(()) +} + +pub fn get_ir_string(model: &SemanticModel) -> Result { + let ctx = inkwell::context::Context::create(); + let context = populate_context(&ctx, &model)?; + + let ir = context.get_ir_string(); + + Ok(ir) +} + +pub fn get_bitcode_base64_string(model: &SemanticModel) -> Result { + let ctx = inkwell::context::Context::create(); + let context = populate_context(&ctx, &model)?; + + let b64 = context.get_bitcode_base64_string(); + + Ok(b64) +} + + +pub fn populate_context<'a>( + ctx: &'a inkwell::context::Context, + model: &'a SemanticModel, +) -> Result, String> { + let context_type = ContextType::Template(&model.name); + match Context::new(&ctx, context_type) { + Err(err) => { + let message = err.to_string(); + return Err(message); + } + Ok(context) => { + build_entry_function(&context, model)?; + Ok(context) + } + } +} + +fn build_entry_function(context: &Context<'_>, model: &SemanticModel) -> Result<(), String> { + let entrypoint = qir::get_entry_function(context); + + let entry = context.context.append_basic_block(entrypoint, "entry"); + context.builder.position_at_end(entry); + + let qubits = write_qubits(&model, context); + + let registers = write_registers(&model, context); + + write_instructions(&model, context, &qubits, ®isters); + + free_qubits(context, &qubits); + + let output = registers.get("results").unwrap(); + context.builder.build_return(Some(&output.0)); + + if let Err(err) = context.module.verify() { + let message = err.to_string(); + return Err(message); + } + Ok(()) +} + +fn free_qubits<'ctx>(context: &Context<'ctx>, qubits: &HashMap>) { + for (_, value) in qubits.iter() { + qir::qubits::emit_release(context, value); + } +} + +fn write_qubits<'ctx>( + model: &SemanticModel, + context: &Context<'ctx>, +) -> HashMap> { + let qubits = model + .qubits + .iter() + .map(|reg| { + let indexed_name = format!("{}{}", ®.name[..], reg.index); + let value = qir::qubits::emit_allocate(&context, indexed_name.as_str()); + (indexed_name, value) + }) + .collect(); + + qubits +} + +fn write_registers<'ctx>( + model: &SemanticModel, + context: &Context<'ctx>, +) -> HashMap, Option)> { + let mut registers = HashMap::new(); + let number_of_registers = model.registers.len() as u64; + if number_of_registers > 0 { + let results = + qir::array1d::emit_array_allocate1d(&context, 8, number_of_registers, "results"); + registers.insert(String::from("results"), (results, None)); + let mut sub_results = vec![]; + for reg in model.registers.iter() { + let (sub_result, entries) = + qir::array1d::emit_array_1d(context, reg.name.as_str(), reg.size.clone()); + sub_results.push(sub_result); + registers.insert(reg.name.clone(), (sub_result, None)); + for (index, _) in entries { + registers.insert(format!("{}{}", reg.name, index), (sub_result, Some(index))); + } + } + qir::array1d::set_elements(&context, &results, sub_results, "results"); + registers + } else { + let results = qir::array1d::emit_empty_result_array_allocate1d(&context, "results"); + registers.insert(String::from("results"), (results, None)); + registers + } +} + +fn write_instructions<'ctx>( + model: &SemanticModel, + context: &Context<'ctx>, + qubits: &HashMap>, + registers: &HashMap, Option)>, +) { + for inst in model.instructions.iter() { + qir::instructions::emit(context, inst, qubits, registers); + } +} diff --git a/pyqir-generator/src/interop.rs b/pyqir-generator/src/interop.rs new file mode 100644 index 00000000..93c12277 --- /dev/null +++ b/pyqir-generator/src/interop.rs @@ -0,0 +1,138 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct QuantumRegister { + pub name: String, + pub index: u64, +} + +impl QuantumRegister { + pub fn new(name: String, index: u64) -> Self { + QuantumRegister { name, index } + } + + pub fn as_register(&self) -> Register { + Register::Quantum(self.clone()) + } +} + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct ClassicalRegister { + pub name: String, + pub size: u64, +} + +impl ClassicalRegister { + pub fn new(name: String, size: u64) -> Self { + ClassicalRegister { name, size } + } + + pub fn as_register(&self) -> Register { + Register::Classical(self.clone()) + } +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum Register { + Quantum(QuantumRegister), + Classical(ClassicalRegister), +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Controlled { + pub control: String, + pub target: String, +} + +impl Controlled { + pub fn new(control: String, target: String) -> Self { + Controlled { control, target } + } +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Measured { + pub qubit: String, + pub target: String, +} + +impl Measured { + pub fn new(qubit: String, target: String) -> Self { + Measured { qubit, target } + } +} + +#[derive(Clone, Debug, PartialEq)] +pub struct Rotated { + pub theta: f64, + pub qubit: String, +} + +impl Rotated { + pub fn new(theta: f64, qubit: String) -> Self { + Rotated { theta, qubit } + } +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Single { + pub qubit: String, +} + +impl Single { + pub fn new(qubit: String) -> Self { + Single { qubit } + } +} + +// https://github.com/microsoft/qsharp-language/blob/ageller/profile/Specifications/QIR/Base-Profile.md +#[derive(Clone, Debug, PartialEq)] +pub enum Instruction { + Cx(Controlled), + Cz(Controlled), + H(Single), + M(Measured), + Reset(Single), + Rx(Rotated), + Ry(Rotated), + Rz(Rotated), + S(Single), + SAdj(Single), + T(Single), + TAdj(Single), + X(Single), + Y(Single), + Z(Single), + DumpMachine, +} + +#[derive(Clone, Default)] +pub struct SemanticModel { + pub name: String, + pub registers: Vec, + pub qubits: Vec, + pub instructions: Vec, +} + +impl SemanticModel { + pub fn new(name: String) -> Self { + SemanticModel { + name: name, + registers: vec![], + qubits: vec![], + instructions: vec![], + } + } + + pub fn add_reg(&mut self, reg: Register) { + match ® { + Register::Classical(creg) => self.registers.push(creg.to_owned()), + Register::Quantum(qreg) => self.qubits.push(qreg.to_owned()), + } + } + + pub fn add_inst(&mut self, inst: Instruction) { + self.instructions.push(inst); + } +} diff --git a/pyqir-generator/src/lib.rs b/pyqir-generator/src/lib.rs new file mode 100644 index 00000000..0e2b5b10 --- /dev/null +++ b/pyqir-generator/src/lib.rs @@ -0,0 +1,10 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#![deny(clippy::all, clippy::pedantic)] + +pub mod emit; +pub mod interop; +pub mod python; +pub mod qir; + diff --git a/pyqir-generator/src/python.rs b/pyqir-generator/src/python.rs new file mode 100644 index 00000000..8790bb9f --- /dev/null +++ b/pyqir-generator/src/python.rs @@ -0,0 +1,220 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use log; +use pyo3::exceptions::{PyOSError}; +use pyo3::prelude::*; +use pyo3::PyErr; + +use crate::emit::{get_bitcode_base64_string, get_ir_string, write_model_to_file}; +use crate::interop::{ClassicalRegister, Controlled, Instruction, Measured, QuantumRegister, Rotated, SemanticModel, Single}; + +#[pymodule] +fn pyqir_generator(_py: Python<'_>, m: &PyModule) -> PyResult<()> { + m.add_class::()?; + + Ok(()) +} + +#[pyclass] +pub struct PyQIR { + pub(super) model: SemanticModel, +} + +#[pymethods] +impl PyQIR { + #[new] + fn new(name: String) -> Self { + PyQIR { + model: SemanticModel::new(name), + } + } + + fn add_measurement(&mut self, qubit: String, target: String) -> PyResult<()> { + log::info!("measure {} => {}", qubit, target); + Ok(()) + } + + fn cx(&mut self, control: String, target: String) -> PyResult<()> { + log::info!("cx {} => {}", control, target); + let controlled = Controlled::new(control, target); + let inst = Instruction::Cx(controlled); + self.model.add_inst(inst); + Ok(()) + } + + fn cz(&mut self, control: String, target: String) -> PyResult<()> { + log::info!("cz {} => {}", control, target); + let controlled = Controlled::new(control, target); + let inst = Instruction::Cz(controlled); + self.model.add_inst(inst); + Ok(()) + } + + fn h(&mut self, qubit: String) -> PyResult<()> { + log::info!("h => {}", qubit); + let single = Single::new(qubit); + let inst = Instruction::H(single); + self.model.add_inst(inst); + Ok(()) + } + + fn m(&mut self, qubit: String, target: String) -> PyResult<()> { + log::info!("m {}[{}]", qubit, target); + let inst = Measured::new(qubit, target); + let inst = Instruction::M(inst); + self.model.add_inst(inst); + Ok(()) + } + + fn reset(&mut self, qubit: String) -> PyResult<()> { + log::info!("reset => {}", qubit); + let single = Single::new(qubit); + let inst = Instruction::Reset(single); + self.model.add_inst(inst); + Ok(()) + } + + fn rx(&mut self, theta: f64, qubit: String) -> PyResult<()> { + log::info!("rx {} => {}", qubit, theta); + let rotated = Rotated::new(theta, qubit); + let inst = Instruction::Rx(rotated); + self.model.add_inst(inst); + Ok(()) + } + + fn ry(&mut self, theta: f64, qubit: String) -> PyResult<()> { + log::info!("ry {} => {}", qubit, theta); + let rotated = Rotated::new(theta, qubit); + let inst = Instruction::Ry(rotated); + self.model.add_inst(inst); + Ok(()) + } + + fn rz(&mut self, theta: f64, qubit: String) -> PyResult<()> { + log::info!("rz {} => {}", qubit, theta); + let rotated = Rotated::new(theta, qubit); + let inst = Instruction::Rz(rotated); + self.model.add_inst(inst); + Ok(()) + } + + fn s(&mut self, qubit: String) -> PyResult<()> { + log::info!("s => {}", qubit); + let single = Single::new(qubit); + let inst = Instruction::S(single); + self.model.add_inst(inst); + Ok(()) + } + + fn s_adj(&mut self, qubit: String) -> PyResult<()> { + log::info!("s_adj => {}", qubit); + let single = Single::new(qubit); + let inst = Instruction::SAdj(single); + self.model.add_inst(inst); + Ok(()) + } + + fn t(&mut self, qubit: String) -> PyResult<()> { + log::info!("t => {}", qubit); + let single = Single::new(qubit); + let inst = Instruction::T(single); + self.model.add_inst(inst); + Ok(()) + } + + fn t_adj(&mut self, qubit: String) -> PyResult<()> { + log::info!("t_adj => {}", qubit); + let single = Single::new(qubit); + let inst = Instruction::TAdj(single); + self.model.add_inst(inst); + Ok(()) + } + + fn x(&mut self, qubit: String) -> PyResult<()> { + log::info!("x => {}", qubit); + let single = Single::new(qubit); + let inst = Instruction::X(single); + self.model.add_inst(inst); + Ok(()) + } + + fn y(&mut self, qubit: String) -> PyResult<()> { + log::info!("y => {}", qubit); + let single = Single::new(qubit); + let inst = Instruction::Y(single); + self.model.add_inst(inst); + Ok(()) + } + + fn dump_machine(&mut self) -> PyResult<()> { + log::info!("dump_machine"); + let inst = Instruction::DumpMachine; + self.model.add_inst(inst); + Ok(()) + } + + fn z(&mut self, qubit: String) -> PyResult<()> { + log::info!("z => {}", qubit); + let single = Single::new(qubit); + let inst = Instruction::Z(single); + self.model.add_inst(inst); + Ok(()) + } + + fn add_quantum_register(&mut self, name: String, size: u64) -> PyResult<()> { + let ns = name.as_str(); + for index in 0..size { + let register_name = format!("{}[{}]", ns, index); + log::info!("Adding {}", register_name); + let reg = QuantumRegister { + name: String::from(ns), + index, + }; + self.model.add_reg(reg.as_register()); + } + Ok(()) + } + + fn add_classical_register(&mut self, name: String, size: u64) -> PyResult<()> { + let ns = name.clone(); + let reg = ClassicalRegister { name, size }; + log::info!("Adding {}({})", ns, size); + self.model.add_reg(reg.as_register()); + Ok(()) + } + + fn write(&self, file_name: &str) -> PyResult<()> { + if let Err(msg) = write_model_to_file(&self.model, file_name) { + let err: PyErr = PyOSError::new_err::(msg); + return Err(err); + } + Ok(()) + } + + fn get_ir_string(&self) -> PyResult { + match get_ir_string(&self.model) { + Err(msg) => { + let err: PyErr = PyOSError::new_err::(msg); + Err(err) + } + Ok(ir) => Ok(ir), + } + } + + fn get_bitcode_base64_string(&self) -> PyResult { + match get_bitcode_base64_string(&self.model) { + Err(msg) => { + let err: PyErr = PyOSError::new_err::(msg); + Err(err) + } + Ok(ir) => Ok(ir), + } + } + + fn enable_logging(&self) -> PyResult<()> { + let _ = env_logger::try_init(); + Ok(()) + } +} + diff --git a/pyqir-generator/src/qir/array1d.rs b/pyqir-generator/src/qir/array1d.rs new file mode 100644 index 00000000..4739d4e4 --- /dev/null +++ b/pyqir-generator/src/qir/array1d.rs @@ -0,0 +1,241 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use inkwell::values::{BasicValue, BasicValueEnum}; +use inkwell::AddressSpace; + +use crate::qir::basic_values; + +use qirlib::context::Context; + +use super::calls; + +pub(crate) fn emit_array_1d<'ctx>( + context: &Context<'ctx>, + name: &str, + size: u64, +) -> (BasicValueEnum<'ctx>, Vec<(u64, BasicValueEnum<'ctx>)>) { + let sub_result_name = format!("{}", &name[..]); + let sub_result = emit_array_allocate1d(&context, 8, size, sub_result_name.as_str()); + let mut items = vec![]; + for index in 0..size { + let cast = get_bitcast_result_pointer_array_element( + context, + index, + &sub_result, + sub_result_name.as_str(), + ); + items.push((index, cast)); + let zero = context + .builder + .build_call( + context.runtime_library.result_get_zero, + &[], + format!("zero_{}", index).as_str(), + ) + .try_as_basic_value() + .left() + .unwrap(); + let one = basic_values::u64_to_i32(context, 1); + context.builder.build_call( + context.runtime_library.result_update_reference_count, + &[zero.into(), one], + name, + ); + context.builder.build_store(cast.into_pointer_value(), zero); + } + + (sub_result, items) +} + +fn get_bitcast_array_pointer_element<'ctx>( + context: &Context<'ctx>, + index: u64, + sub_result: &BasicValueEnum<'ctx>, + sub_result_name: &str, +) -> BasicValueEnum<'ctx> { + let element_raw_ptr_name = format!("{}_{}_raw", sub_result_name, index); + let sub_result_element_ptr = emit_array_get_element_ptr_1d( + context, + index, + sub_result.as_basic_value_enum(), + element_raw_ptr_name.as_str(), + ); + + let element_result_ptr_name = format!("{}_result_{}", sub_result_name, index); + let target_type = context.types.array.ptr_type(AddressSpace::Generic); + let cast = context.builder.build_bitcast( + sub_result_element_ptr, + target_type, + element_result_ptr_name.as_str(), + ); + cast +} + +fn get_bitcast_qubit_pointer_element<'ctx>( + context: &Context<'ctx>, + i: u64, + sub_result: &BasicValueEnum<'ctx>, + sub_result_name: &str, +) -> BasicValueEnum<'ctx> { + let element_raw_ptr_name = format!("{}_{}_raw", sub_result_name, i); + let sub_result_element_ptr = emit_array_get_element_ptr_1d( + context, + i, + sub_result.as_basic_value_enum(), + element_raw_ptr_name.as_str(), + ); + + let element_result_ptr_name = format!("{}_result_{}", sub_result_name, i); + let target_type = context.types.qubit; + let cast = context.builder.build_bitcast( + sub_result_element_ptr, + target_type.ptr_type(AddressSpace::Generic), + element_result_ptr_name.as_str(), + ); + cast +} + +fn get_bitcast_array_element<'ctx>( + context: &Context<'ctx>, + index: u64, + sub_result: &BasicValueEnum<'ctx>, + sub_result_name: &str, +) -> BasicValueEnum<'ctx> { + let element_raw_ptr_name = format!("{}_{}_raw", sub_result_name, index); + let sub_result_element_ptr = emit_array_get_element_ptr_1d( + context, + index, + sub_result.as_basic_value_enum(), + element_raw_ptr_name.as_str(), + ); + + let element_result_ptr_name = format!("{}_result_{}", sub_result_name, index); + let target_type = context.types.array; + let cast = context.builder.build_bitcast( + sub_result_element_ptr, + target_type, + element_result_ptr_name.as_str(), + ); + cast +} + +pub fn get_bitcast_result_pointer_array_element<'ctx>( + context: &Context<'ctx>, + index: u64, + sub_result: &BasicValueEnum<'ctx>, + sub_result_name: &str, +) -> BasicValueEnum<'ctx> { + let element_raw_ptr_name = format!("{}_{}_raw", sub_result_name, index); + let sub_result_element_ptr = emit_array_get_element_ptr_1d( + context, + index, + sub_result.as_basic_value_enum(), + element_raw_ptr_name.as_str(), + ); + + let element_result_ptr_name = format!("{}_result_{}", sub_result_name, index); + let target_type = context.types.result.ptr_type(AddressSpace::Generic); + let cast = context.builder.build_bitcast( + sub_result_element_ptr, + target_type, + element_result_ptr_name.as_str(), + ); + cast +} + +pub(crate) fn emit_empty_result_array_allocate1d<'ctx>( + context: &Context<'ctx>, + result_name: &str, +) -> BasicValueEnum<'ctx> { + let results = emit_array_allocate1d(&context, 8, 0, &result_name[..]); + results +} + +pub(crate) fn emit_array_allocate1d<'ctx>( + context: &Context<'ctx>, + bits: u64, + length: u64, + result_name: &str, +) -> BasicValueEnum<'ctx> { + let args = &[ + basic_values::u64_to_i32(context, bits), + basic_values::u64_to_i64(context, length), + ]; + calls::emit_call_with_return( + context, + context.runtime_library.array_create_1d, + args, + result_name, + ) +} + +pub(crate) fn emit_array_get_element_ptr_1d<'ctx>( + context: &Context<'ctx>, + index: u64, + target: BasicValueEnum<'ctx>, + result_name: &str, +) -> BasicValueEnum<'ctx> { + let args = &[target.into(), basic_values::u64_to_i64(context, index)]; + let value = context + .builder + .build_call( + context.runtime_library.array_get_element_ptr_1d, + args, + result_name, + ) + .try_as_basic_value(); + value.left().unwrap() +} + +pub(crate) fn set_elements<'ctx>( + context: &Context<'ctx>, + results: &BasicValueEnum<'ctx>, + sub_results: Vec>, + name: &str, +) -> () { + for index in 0..sub_results.len() { + let result_indexed_name = format!("{}_result_tmp", &name[..]); + let result_indexed = get_bitcast_array_pointer_element( + context, + index as u64, + &results, + result_indexed_name.as_str(), + ); + + let _ = context + .builder + .build_store(result_indexed.into_pointer_value(), sub_results[index]); + } +} + +pub(crate) fn create_ctl_wrapper<'ctx>( + context: &Context<'ctx>, + control_qubit: &BasicValueEnum<'ctx>, +) -> BasicValueEnum<'ctx> { + let name = String::from("__controlQubits__"); + let control_qubits = emit_array_allocate1d(&context, 8, 1, &name[..]); + wrap_value_in_array( + context, + &control_qubits.into(), + control_qubit, + format!("{}{}", name, 0).as_str(), + ); + control_qubits.into() +} + +pub(crate) fn wrap_value_in_array<'ctx>( + context: &Context<'ctx>, + results: &BasicValueEnum<'ctx>, + sub_results: &BasicValueEnum<'ctx>, + name: &str, +) -> () { + let result_indexed_name = format!("{}_result_tmp", &name[..]); + let result_indexed = + get_bitcast_qubit_pointer_element(context, 0, &results, result_indexed_name.as_str()); + + let _ = context.builder.build_store( + result_indexed.into_pointer_value(), + sub_results.as_basic_value_enum(), + ); +} diff --git a/pyqir-generator/src/qir/basic_values.rs b/pyqir-generator/src/qir/basic_values.rs new file mode 100644 index 00000000..60d124aa --- /dev/null +++ b/pyqir-generator/src/qir/basic_values.rs @@ -0,0 +1,55 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use inkwell::values::{BasicMetadataValueEnum, BasicValue}; + +use qirlib::context::Context; + +pub(crate) fn i8_null_ptr<'ctx>(context: &Context<'ctx>) -> BasicMetadataValueEnum<'ctx> { + context + .context + .i8_type() + .ptr_type(inkwell::AddressSpace::Generic) + .const_null() + .as_basic_value_enum() + .into() +} + +pub(crate) fn f64_to_f64<'ctx>(context: &Context<'ctx>, value: &f64) -> BasicMetadataValueEnum<'ctx> { + context + .types + .double + .const_float(value.clone()) + .as_basic_value_enum() + .into() +} + +pub(crate) fn u64_to_i32<'ctx>(context: &Context<'ctx>, value: u64) -> BasicMetadataValueEnum<'ctx> { + context + .context + .i32_type() + .const_int(value, false) + .as_basic_value_enum() + .into() +} + +pub(crate) fn i64_to_i32<'ctx>(context: &Context<'ctx>, value: i64) -> BasicMetadataValueEnum<'ctx> { + // convert to capture negative values. + let target: u64 = value as u64; + + context + .context + .i32_type() + .const_int(target, false) + .as_basic_value_enum() + .into() +} + +pub(crate) fn u64_to_i64<'ctx>(context: &Context<'ctx>, value: u64) -> BasicMetadataValueEnum<'ctx> { + context + .types + .int + .const_int(value, false) + .as_basic_value_enum() + .into() +} diff --git a/pyqir-generator/src/qir/calls.rs b/pyqir-generator/src/qir/calls.rs new file mode 100644 index 00000000..f9dbd119 --- /dev/null +++ b/pyqir-generator/src/qir/calls.rs @@ -0,0 +1,32 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use qirlib::context::Context; +use inkwell::values::{BasicMetadataValueEnum, BasicValueEnum, FunctionValue}; + +pub(crate) fn emit_void_call<'ctx>( + context: &Context<'ctx>, + function: FunctionValue<'ctx>, + args: &[BasicMetadataValueEnum<'ctx>], +) { + let _ = context + .builder + .build_call(function, args, "") + .try_as_basic_value() + .right() + .unwrap(); +} + +pub(crate) fn emit_call_with_return<'ctx>( + context: &Context<'ctx>, + function: FunctionValue<'ctx>, + args: &[BasicMetadataValueEnum<'ctx>], + name: &str, +) -> BasicValueEnum<'ctx> { + context + .builder + .build_call(function, args, name) + .try_as_basic_value() + .left() + .unwrap() +} diff --git a/pyqir-generator/src/qir/instructions.rs b/pyqir-generator/src/qir/instructions.rs new file mode 100644 index 00000000..974bf3d0 --- /dev/null +++ b/pyqir-generator/src/qir/instructions.rs @@ -0,0 +1,226 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use crate::interop::Instruction; + +use super::{ + array1d::{self, create_ctl_wrapper}, + basic_values, calls, +}; +use qirlib::context::Context; +use inkwell::values::{BasicMetadataValueEnum, BasicValueEnum, FunctionValue}; +use std::collections::HashMap; + +/// # Panics +/// +/// Panics if the qubit name doesn't exist +fn get_qubit<'ctx>( + name: &String, + qubits: &HashMap>, +) -> BasicValueEnum<'ctx> { + qubits.get(name).unwrap().to_owned() +} + +/// # Panics +/// +/// Panics if the register name doesn't exist +fn get_register<'ctx>( + name: &String, + registers: &HashMap, Option)>, +) -> (BasicValueEnum<'ctx>, Option) { + registers.get(name).unwrap().to_owned() +} + +pub(crate) fn emit<'ctx>( + context: &Context<'ctx>, + inst: &Instruction, + qubits: &HashMap>, + registers: &HashMap, Option)>, +) -> () { + let intrinsics = &context.intrinsics; + let find_qubit = |name| get_qubit(name, qubits); + let ctl = |value| create_ctl_wrapper(context, value); + match inst { + Instruction::Cx(inst) => { + let control = ctl(&find_qubit(&inst.control)); + let qubit = find_qubit(&inst.target); + controlled( + context, + intrinsics + .x_ctl + .expect("x_ctl must be defined in the template"), + control, + qubit, + ); + } + Instruction::Cz(inst) => { + let control = ctl(&find_qubit(&inst.control)); + let qubit = find_qubit(&inst.target); + controlled( + context, + intrinsics + .z_ctl + .expect("z_ctl must be defined in the template"), + control, + qubit, + ); + } + Instruction::H(inst) => calls::emit_void_call( + context, + intrinsics.h.expect("h must be defined in the template"), + &[find_qubit(&inst.qubit).into()], + ), + Instruction::M(inst) => { + measure(context, &inst.qubit, &inst.target, qubits, registers); + } + Instruction::Reset(inst) => calls::emit_void_call( + context, + intrinsics + .reset + .expect("reset must be defined in the template"), + &[find_qubit(&inst.qubit).into()], + ), + Instruction::Rx(inst) => calls::emit_void_call( + context, + intrinsics.r_x.expect("r_x must be defined in the template"), + &[ + basic_values::f64_to_f64(context, &inst.theta), + find_qubit(&inst.qubit).into(), + ], + ), + Instruction::Ry(inst) => calls::emit_void_call( + context, + intrinsics.r_y.expect("r_y must be defined in the template"), + &[ + basic_values::f64_to_f64(context, &inst.theta), + find_qubit(&inst.qubit).into(), + ], + ), + Instruction::Rz(inst) => calls::emit_void_call( + context, + intrinsics.r_z.expect("r_z must be defined in the template"), + &[ + basic_values::f64_to_f64(context, &inst.theta), + find_qubit(&inst.qubit).into(), + ], + ), + Instruction::S(inst) => calls::emit_void_call( + context, + intrinsics.s.expect("s must be defined in the template"), + &[find_qubit(&inst.qubit).into()], + ), + Instruction::SAdj(inst) => calls::emit_void_call( + context, + intrinsics + .s_adj + .expect("s_adj must be defined in the template"), + &[find_qubit(&inst.qubit).into()], + ), + Instruction::T(inst) => calls::emit_void_call( + context, + intrinsics.t.expect("t must be defined in the template"), + &[find_qubit(&inst.qubit).into()], + ), + Instruction::TAdj(inst) => calls::emit_void_call( + context, + intrinsics + .t_adj + .expect("t_adj must be defined in the template"), + &[find_qubit(&inst.qubit).into()], + ), + Instruction::X(inst) => calls::emit_void_call( + context, + intrinsics.x.expect("x must be defined in the template"), + &[find_qubit(&inst.qubit).into()], + ), + Instruction::Y(inst) => calls::emit_void_call( + context, + intrinsics.y.expect("y must be defined in the template"), + &[find_qubit(&inst.qubit).into()], + ), + Instruction::Z(inst) => calls::emit_void_call( + context, + intrinsics.z.expect("z must be defined in the template"), + &[find_qubit(&inst.qubit).into()], + ), + Instruction::DumpMachine => calls::emit_void_call( + context, + intrinsics + .dumpmachine + .expect("dumpmachine must be defined before use"), + &[basic_values::i8_null_ptr(context)], + ), + } + + fn measure<'ctx>( + context: &Context<'ctx>, + qubit: &String, + target: &String, + qubits: &HashMap>, + registers: &HashMap, Option)>, + ) { + let find_qubit = |name| get_qubit(name, qubits); + let find_register = |name| get_register(name, registers); + + // measure the qubit and save the result to a temporary value + let result = calls::emit_call_with_return( + context, + context + .intrinsics + .m + .expect("m must be defined in the template"), + &[find_qubit(qubit).into()], + "measurement", + ); + + // find the parent register and offset for the given target + let (register, index) = find_register(target); + + // get the bitcast pointer to the target location + let bitcast_indexed_target_register = array1d::get_bitcast_result_pointer_array_element( + context, + index.unwrap(), + ®ister, + target, + ); + + // get the existing value from that location and decrement its ref count as its + // being replaced with the measurement. + let existing_value = context.builder.build_load( + bitcast_indexed_target_register.into_pointer_value(), + "existing_value", + ); + let minus_one = basic_values::i64_to_i32(context, -1); + context.builder.build_call( + context.runtime_library.result_update_reference_count, + &[existing_value.into(), minus_one.into()], + "", + ); + + // increase the ref count of the new value and store it in the target register + let one = basic_values::i64_to_i32(context, 1); + context.builder.build_call( + context.runtime_library.result_update_reference_count, + &[result.into(), one.into()], + "", + ); + let _ = context + .builder + .build_store(bitcast_indexed_target_register.into_pointer_value(), result); + } + + fn controlled<'ctx>( + context: &Context<'ctx>, + intrinsic: FunctionValue<'ctx>, + control: BasicValueEnum<'ctx>, + qubit: BasicValueEnum<'ctx>, + ) { + calls::emit_void_call(context, intrinsic, &[control.into(), qubit.into()]); + let minus_one = basic_values::i64_to_i32(context, -1); + context.builder.build_call( + context.runtime_library.array_update_reference_count, + &[control.into(), minus_one], + "", + ); + } +} diff --git a/pyqir-generator/src/qir/mod.rs b/pyqir-generator/src/qir/mod.rs new file mode 100644 index 00000000..b8640e1c --- /dev/null +++ b/pyqir-generator/src/qir/mod.rs @@ -0,0 +1,66 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use inkwell::values::BasicValue; +use inkwell::values::FunctionValue; +use inkwell::AddressSpace; + +use qirlib::context::Context; + +pub mod array1d; +pub mod basic_values; +pub mod calls; +pub mod instructions; +pub mod qubits; + +pub(crate) fn get_entry_function<'ctx>(context: &Context<'ctx>) -> FunctionValue<'ctx> { + let ns = "QuantumApplication"; + let method = "Run"; + let entrypoint_name = format!("{}__{}__body", ns, method); + let entrypoint = context.module.get_function(&entrypoint_name).unwrap(); + + while let Some(basic_block) = entrypoint.get_last_basic_block() { + unsafe { + basic_block.delete().unwrap(); + } + } + entrypoint +} + +pub(crate) fn remove_quantumapplication_run<'ctx>(context: &Context<'ctx>) -> FunctionValue<'ctx> { + let ns = "QuantumApplication"; + let method = "Run"; + let entrypoint_name = format!("{}__{}", ns, method); + let entrypoint = context.module.get_function(&entrypoint_name).unwrap(); + while let Some(basic_block) = entrypoint.get_last_basic_block() { + unsafe { + basic_block.delete().unwrap(); + } + } + entrypoint +} +pub(crate) fn remove_quantumapplication_run_interop<'ctx>( + context: &Context<'ctx>, +) -> FunctionValue<'ctx> { + let ns = "QuantumApplication"; + let method = "Run"; + let entrypoint_name = format!("{}__{}__Interop", ns, method); + let entrypoint = context.module.get_function(&entrypoint_name).unwrap(); + while let Some(basic_block) = entrypoint.get_last_basic_block() { + unsafe { + basic_block.delete().unwrap(); + } + } + let entry = context.context.append_basic_block(entrypoint, "entry"); + context.builder.position_at_end(entry); + + let v = entrypoint + .get_type() + .ptr_type(AddressSpace::Generic) + .const_null() + .as_basic_value_enum(); + context.builder.build_return(Some(&v)); + entrypoint +} + + diff --git a/pyqir-generator/src/qir/qubits.rs b/pyqir-generator/src/qir/qubits.rs new file mode 100644 index 00000000..98fb502f --- /dev/null +++ b/pyqir-generator/src/qir/qubits.rs @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use inkwell::values::{BasicValue, BasicValueEnum}; + +use super::{calls, Context}; + +pub(crate) fn emit_allocate<'ctx>( + context: &Context<'ctx>, + result_name: &str, +) -> BasicValueEnum<'ctx> { + let args = []; + calls::emit_call_with_return( + context, + context.runtime_library.qubit_allocate, + &args, + result_name, + ) +} + +pub(crate) fn emit_release<'ctx>(context: &Context<'ctx>, qubit: &BasicValueEnum<'ctx>) { + let args = [qubit.as_basic_value_enum().into()]; + calls::emit_void_call(context, context.runtime_library.qubit_release, &args); +} diff --git a/pyqir-generator/tests/test_api.py b/pyqir-generator/tests/test_api.py new file mode 100644 index 00000000..2273ec4d --- /dev/null +++ b/pyqir-generator/tests/test_api.py @@ -0,0 +1,129 @@ +from pyqir_generator import * +import pytest + +def test_bell(tmpdir): + builder = QirBuilder("Bell circuit") + builder.add_quantum_register("qr", 2) + builder.add_classical_register("qc", 2) + builder.h("qr0") + builder.cx("qr0", "qr1") + builder.m("qr0", "qc0") + builder.m("qr1", "qc1") + + file = tmpdir.mkdir("sub").join("bell_measure.ll") + print(f'Writing {file}') + builder.build(str(file)) + +def test_bell_no_measure(tmpdir): + builder = QirBuilder("Bell circuit") + builder.add_quantum_register("qr", 2) + builder.h("qr0") + builder.cx("qr0", "qr1") + + builder.dump_machine() + + file = tmpdir.mkdir("sub").join("bell_no_measure.ll") + print(f'Writing {file}') + builder.build(str(file)) + +def test_bernstein_vazirani(tmpdir): + builder = QirBuilder("Bernstein-Vazirani") + builder.add_quantum_register("input", 5) + builder.add_quantum_register("target", 1) + builder.add_classical_register("output", 5) + + builder.x("target0") + + builder.h("input0") + builder.h("input1") + builder.h("input2") + builder.h("input3") + builder.h("input4") + + builder.h("target0") + + builder.cx("input1", "target0") + builder.cx("input3", "target0") + builder.cx("input4", "target0") + + builder.h("input0") + builder.h("input1") + builder.h("input2") + builder.h("input3") + builder.h("input4") + + builder.m("input0", "output0") + builder.m("input1", "output1") + builder.m("input2", "output2") + builder.m("input3", "output3") + builder.m("input4", "output4") + + file = tmpdir.mkdir("sub").join("bernstein_vazirani.ll") + print(f'Writing {file}') + builder.build(str(file)) + +def test_all_gates(tmpdir): + builder = QirBuilder("All Gates") + builder.add_quantum_register("q", 4) + builder.add_quantum_register("control", 1) + builder.add_classical_register("c", 4) + builder.add_classical_register("i", 3) + builder.add_classical_register("j", 2) + builder.cx("q0", "control0") + builder.cz("q1", "control0") + builder.h("q0") + builder.reset("q0") + builder.rx(15.0,"q1") + builder.ry(16.0,"q2") + builder.rz(17.0,"q3") + builder.s("q0") + builder.s_adj("q1") + builder.t("q2") + builder.t_adj("q3") + builder.x("q0") + builder.y("q1") + builder.z("q2") + + builder.m("q0", "c0") + builder.m("q1", "c1") + builder.m("q2", "c2") + builder.m("q3", "c3") + + file = tmpdir.mkdir("sub").join("all_gates.ll") + print(f'Writing {file}') + builder.build(str(file)) + +def test_bernstein_vazirani_ir_string(): + builder = QirBuilder("Bernstein-Vazirani") + builder.add_quantum_register("input", 5) + builder.add_quantum_register("target", 1) + builder.add_classical_register("output", 5) + + builder.x("target0") + + builder.h("input0") + builder.h("input1") + builder.h("input2") + builder.h("input3") + builder.h("input4") + + builder.h("target0") + + builder.cx("input1", "target0") + builder.cx("input3", "target0") + builder.cx("input4", "target0") + + builder.h("input0") + builder.h("input1") + builder.h("input2") + builder.h("input3") + builder.h("input4") + + builder.m("input0", "output0") + builder.m("input1", "output1") + builder.m("input2", "output2") + builder.m("input3", "output3") + builder.m("input4", "output4") + + ir = builder.get_ir_string() + assert ir.startswith("; ModuleID = 'Bernstein-Vazirani'") diff --git a/pyqir-generator/tox.ini b/pyqir-generator/tox.ini new file mode 100644 index 00000000..969767a2 --- /dev/null +++ b/pyqir-generator/tox.ini @@ -0,0 +1,26 @@ +[tox] +isolated_build = True + +[testenv] + +# https://github.com/tox-dev/tox/issues/1550 +# PYTHONIOENCODING = utf-8 needs to be set to work around windows bug +setenv = + LLVM_SYS_110_PREFIX = {env:LLVM_SYS_110_PREFIX} + PYTHONIOENCODING = utf-8 + +# needed temporarily for build to find cl.exe +passenv = * + +deps = -rrequirements-dev.txt + +[testenv:test] +description = Run the unit tests under {basepython} +commands = + python -m pip install . + pytest {posargs} + +[testenv:pack] +description = Build the wheels under all installed platforms +commands = + maturin build --release diff --git a/pyqir-jit/Cargo.toml b/pyqir-jit/Cargo.toml new file mode 100644 index 00000000..401151f5 --- /dev/null +++ b/pyqir-jit/Cargo.toml @@ -0,0 +1,48 @@ +[package] +authors = ["Microsoft"] +name = "pyqir-jit" +version = "0.1.0" +edition = "2018" +license = "MIT" +description = "Python based QIR JIT library." +readme = "README.md" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +microsoft-quantum-qir-runtime-sys = { git = "https://github.com/microsoft/qsharp-runtime", rev = "d955aec7", default-features = false, features = ["runtime", "foundation", "llvm-libloading"] } +qirlib = { path = "../qirlib" } +inkwell = { git = "https://github.com/TheDan64/inkwell", branch = "master", default-features = false, features = ["llvm11-0", "target-x86"] } +lazy_static = "1.4.0" +mut_static = "5.0.0" +log = "0.4.14" + +[dependencies.pyo3] +version = "0.14.2" + +[features] +extension-module = ["pyo3/abi3-py36", "pyo3/extension-module"] +default = ["extension-module"] + +[dev-dependencies] +serial_test = "0.5.1" +tempfile = "3.2.0" + +[lib] +name = "pyqir_jit" +crate-type = ["cdylib"] + +[package.metadata.maturin] +classifier=[ + "License :: OSI Approved :: MIT License", + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python", + "Programming Language :: Rust", + "Operating System :: MacOS", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX :: Linux", +] diff --git a/pyqir-jit/MANIFEST.in b/pyqir-jit/MANIFEST.in new file mode 100644 index 00000000..4b8d0b8f --- /dev/null +++ b/pyqir-jit/MANIFEST.in @@ -0,0 +1,2 @@ +include pyproject.toml Cargo.toml +recursive-include src * \ No newline at end of file diff --git a/pyqir-jit/README.md b/pyqir-jit/README.md new file mode 100644 index 00000000..9824d2de --- /dev/null +++ b/pyqir-jit/README.md @@ -0,0 +1,22 @@ +# pyqir-jit + +## Building and Testing + +To build this package, first install `maturin`: + +```shell +pip install maturin +``` + +To build and test use `maturin develop`: + +```shell +pip install -r requirements-dev.txt +maturin develop && pytest +``` + +Alternatively, install tox and run the tests inside an isolated environment: + +```shell +tox -e py +``` \ No newline at end of file diff --git a/pyqir-jit/pyproject.toml b/pyqir-jit/pyproject.toml new file mode 100644 index 00000000..10e5b2f4 --- /dev/null +++ b/pyqir-jit/pyproject.toml @@ -0,0 +1,7 @@ +[project] +name = "pyqir_jit" +requires-python = ">=3.6" + +[build-system] +requires = ["maturin>=0.10,<0.12"] +build-backend = "maturin" diff --git a/pyqir-jit/pyqir_jit/__init__.py b/pyqir-jit/pyqir_jit/__init__.py new file mode 100644 index 00000000..0fb7784c --- /dev/null +++ b/pyqir-jit/pyqir_jit/__init__.py @@ -0,0 +1,5 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT License. + +from .jit import * +from .gateset import * diff --git a/pyqir-jit/pyqir_jit/gateset.py b/pyqir-jit/pyqir_jit/gateset.py new file mode 100644 index 00000000..9957f36b --- /dev/null +++ b/pyqir-jit/pyqir_jit/gateset.py @@ -0,0 +1,58 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT License. + +class GateSet(object): + def __init__(self): + self.number_of_qubits = 0 + + def cx(self, control: str, target: str): + pass + + def cz(self, control: str, target: str): + pass + + def h(self, target: str): + pass + + def m(self, qubit: str, target: str): + pass + + def reset(self, target: str): + pass + + def rx(self, theta: float, qubit: str): + pass + + def ry(self, theta: float, qubit: str): + pass + + def rz(self, theta: float, qubit: str): + pass + + def s(self, qubit: str): + pass + + def s_adj(self, qubit: str): + pass + + def t(self, qubit: str): + pass + + def t_adj(self, qubit: str): + pass + + def x(self, qubit: str): + pass + + def y(self, qubit: str): + pass + + def z(self, qubit: str): + pass + + def dump_machine(self): + pass + + def finish(self, metadata: dict): + self.number_of_qubits = metadata["number_of_qubits"] + pass diff --git a/pyqir-jit/pyqir_jit/jit.py b/pyqir-jit/pyqir_jit/jit.py new file mode 100644 index 00000000..acb490e9 --- /dev/null +++ b/pyqir-jit/pyqir_jit/jit.py @@ -0,0 +1,26 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT License. + +from typing import Any +from .pyqir_jit import * + +class QirJit(object): + """ + The QirJit object loads bitcode/QIR for evaluation and processing + + """ + + def __init__(self): + self.pyqirjit = PyQirJit() + + def eval(self, file_path: str, pyobj: Any): + """ + JIT compiles the circuit delegating quantum operations to the supplied object + + :param file_path: file path of existing QIR in a ll or bc file + :type file_path: str + + :param pyobj: python GateSet object defining the quantum operations + :type pyobj: str + """ + self.pyqirjit.eval(file_path, pyobj) diff --git a/pyqir-jit/requirements-dev.txt b/pyqir-jit/requirements-dev.txt new file mode 100644 index 00000000..e5581af7 --- /dev/null +++ b/pyqir-jit/requirements-dev.txt @@ -0,0 +1,3 @@ +pytest>=3.5.0 +pip>=21.3 +maturin>=0.10,<0.12 diff --git a/pyqir-jit/src/gates.rs b/pyqir-jit/src/gates.rs new file mode 100644 index 00000000..3ae55fb1 --- /dev/null +++ b/pyqir-jit/src/gates.rs @@ -0,0 +1,214 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use lazy_static::lazy_static; +use microsoft_quantum_qir_runtime_sys::runtime::QUBIT; +use mut_static::MutStatic; + +use crate::interop::{ + ClassicalRegister, Controlled, Instruction, Measured, QuantumRegister, Rotated, SemanticModel, + Single, +}; + +lazy_static! { + pub static ref CURRENT_GATES: MutStatic = MutStatic::from(BaseProfile::new()); +} + +#[derive(Default)] +pub struct BaseProfile { + model: SemanticModel, + max_id: QUBIT, + declared_cubits: bool, +} + +pub struct GateScope {} + +impl GateScope { + pub fn new() -> GateScope { + let mut gs = CURRENT_GATES.write().unwrap(); + gs.reset(); + GateScope {} + } +} + +impl Drop for GateScope { + fn drop(&mut self) { + let mut gs = CURRENT_GATES.write().unwrap(); + gs.reset(); + } +} + +impl BaseProfile { + pub fn new() -> Self { + BaseProfile { + model: SemanticModel::new(String::from("QIR")), + max_id: 0, + declared_cubits: false, + } + } + + pub fn reset(&mut self) { + self.model = SemanticModel::new(String::from("QIR")); + self.max_id = 0; + self.declared_cubits = false; + } + + fn record_max_qubit_id(&mut self, qubit: QUBIT) { + self.declared_cubits = true; + if qubit > self.max_id { + self.max_id = qubit + } + } + pub fn get_model(&self) -> SemanticModel { + self.model.clone() + } + pub fn infer_allocations(&mut self) { + if self.declared_cubits == false { + return; + } + for index in 0..self.max_id + 1 { + let qr = QuantumRegister::new(String::from("qubit"), index); + self.model.add_reg(qr.as_register()); + } + let cr = ClassicalRegister::new(String::from("output"), self.max_id + 1); + self.model.add_reg(cr.as_register()); + } + + pub fn cx(&mut self, control: QUBIT, target: QUBIT) { + self.record_max_qubit_id(control); + self.record_max_qubit_id(target); + + log::debug!("cx {}:{}", control, target); + self.model + .add_inst(Instruction::Cx(BaseProfile::controlled(control, target))); + } + + pub fn cz(&mut self, control: QUBIT, target: QUBIT) { + self.record_max_qubit_id(control); + self.record_max_qubit_id(target); + + log::debug!("cz {}:{}", control, target); + self.model + .add_inst(Instruction::Cz(BaseProfile::controlled(control, target))); + } + + pub fn h(&mut self, qubit: QUBIT) { + self.record_max_qubit_id(qubit); + + log::debug!("h {}", qubit); + self.model + .add_inst(Instruction::H(BaseProfile::single(qubit))); + } + + pub fn m(&mut self, qubit: QUBIT /* , target: QUBIT */) { + self.record_max_qubit_id(qubit); + //self.record_max_qubit_id(target); + + log::debug!("m {}", qubit /* , target*/); + self.model + .add_inst(Instruction::M(BaseProfile::measured(qubit))); + } + + pub fn rx(&mut self, theta: f64, qubit: QUBIT) { + self.record_max_qubit_id(qubit); + + log::debug!("rx {}({})", qubit, theta); + self.model + .add_inst(Instruction::Rx(BaseProfile::rotated(theta, qubit))); + } + pub fn ry(&mut self, theta: f64, qubit: QUBIT) { + self.record_max_qubit_id(qubit); + + log::debug!("ry {}({})", qubit, theta); + self.model + .add_inst(Instruction::Ry(BaseProfile::rotated(theta, qubit))); + } + pub fn rz(&mut self, theta: f64, qubit: QUBIT) { + self.record_max_qubit_id(qubit); + + log::debug!("rz {}({})", qubit, theta); + self.model + .add_inst(Instruction::Rz(BaseProfile::rotated(theta, qubit))); + } + pub fn s(&mut self, qubit: QUBIT) { + self.record_max_qubit_id(qubit); + + log::debug!("s {}", qubit); + self.model + .add_inst(Instruction::S(BaseProfile::single(qubit))); + } + pub fn s_adj(&mut self, qubit: QUBIT) { + self.record_max_qubit_id(qubit); + + log::debug!("s_adj {}", qubit); + self.model + .add_inst(Instruction::SAdj(BaseProfile::single(qubit))); + } + + pub fn t(&mut self, qubit: QUBIT) { + self.record_max_qubit_id(qubit); + + log::debug!("t {}", qubit); + self.model + .add_inst(Instruction::T(BaseProfile::single(qubit))); + } + pub fn t_adj(&mut self, qubit: QUBIT) { + self.record_max_qubit_id(qubit); + + log::debug!("t_adj {}", qubit); + self.model + .add_inst(Instruction::TAdj(BaseProfile::single(qubit))); + } + + pub fn x(&mut self, qubit: QUBIT) { + self.record_max_qubit_id(qubit); + + log::debug!("x {}", qubit); + self.model + .add_inst(Instruction::X(BaseProfile::single(qubit))); + } + pub fn y(&mut self, qubit: QUBIT) { + self.record_max_qubit_id(qubit); + + log::debug!("y {}", qubit); + self.model + .add_inst(Instruction::Y(BaseProfile::single(qubit))); + } + pub fn z(&mut self, qubit: QUBIT) { + self.record_max_qubit_id(qubit); + + log::debug!("z {}", qubit); + self.model + .add_inst(Instruction::Z(BaseProfile::single(qubit))); + } + + pub fn dump_machine(&mut self) { + log::debug!("dumpmachine"); + } + + fn controlled(control: QUBIT, target: QUBIT) -> Controlled { + Controlled::new( + BaseProfile::get_cubit_string(control), + BaseProfile::get_cubit_string(target), + ) + } + + fn measured(qubit: QUBIT /*, target: QUBIT*/) -> Measured { + Measured::new( + BaseProfile::get_cubit_string(qubit), + String::from(""), /*BaseProfile::get_cubit_string(target),*/ + ) + } + + fn rotated(theta: f64, qubit: QUBIT) -> Rotated { + Rotated::new(theta, BaseProfile::get_cubit_string(qubit)) + } + + fn single(qubit: QUBIT) -> Single { + Single::new(BaseProfile::get_cubit_string(qubit)) + } + + fn get_cubit_string(qubit: QUBIT) -> String { + String::from(format!("{}", qubit)) + } +} diff --git a/pyqir-jit/src/interop.rs b/pyqir-jit/src/interop.rs new file mode 100644 index 00000000..93c12277 --- /dev/null +++ b/pyqir-jit/src/interop.rs @@ -0,0 +1,138 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct QuantumRegister { + pub name: String, + pub index: u64, +} + +impl QuantumRegister { + pub fn new(name: String, index: u64) -> Self { + QuantumRegister { name, index } + } + + pub fn as_register(&self) -> Register { + Register::Quantum(self.clone()) + } +} + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct ClassicalRegister { + pub name: String, + pub size: u64, +} + +impl ClassicalRegister { + pub fn new(name: String, size: u64) -> Self { + ClassicalRegister { name, size } + } + + pub fn as_register(&self) -> Register { + Register::Classical(self.clone()) + } +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum Register { + Quantum(QuantumRegister), + Classical(ClassicalRegister), +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Controlled { + pub control: String, + pub target: String, +} + +impl Controlled { + pub fn new(control: String, target: String) -> Self { + Controlled { control, target } + } +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Measured { + pub qubit: String, + pub target: String, +} + +impl Measured { + pub fn new(qubit: String, target: String) -> Self { + Measured { qubit, target } + } +} + +#[derive(Clone, Debug, PartialEq)] +pub struct Rotated { + pub theta: f64, + pub qubit: String, +} + +impl Rotated { + pub fn new(theta: f64, qubit: String) -> Self { + Rotated { theta, qubit } + } +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Single { + pub qubit: String, +} + +impl Single { + pub fn new(qubit: String) -> Self { + Single { qubit } + } +} + +// https://github.com/microsoft/qsharp-language/blob/ageller/profile/Specifications/QIR/Base-Profile.md +#[derive(Clone, Debug, PartialEq)] +pub enum Instruction { + Cx(Controlled), + Cz(Controlled), + H(Single), + M(Measured), + Reset(Single), + Rx(Rotated), + Ry(Rotated), + Rz(Rotated), + S(Single), + SAdj(Single), + T(Single), + TAdj(Single), + X(Single), + Y(Single), + Z(Single), + DumpMachine, +} + +#[derive(Clone, Default)] +pub struct SemanticModel { + pub name: String, + pub registers: Vec, + pub qubits: Vec, + pub instructions: Vec, +} + +impl SemanticModel { + pub fn new(name: String) -> Self { + SemanticModel { + name: name, + registers: vec![], + qubits: vec![], + instructions: vec![], + } + } + + pub fn add_reg(&mut self, reg: Register) { + match ® { + Register::Classical(creg) => self.registers.push(creg.to_owned()), + Register::Quantum(qreg) => self.qubits.push(qreg.to_owned()), + } + } + + pub fn add_inst(&mut self, inst: Instruction) { + self.instructions.push(inst); + } +} diff --git a/pyqir-jit/src/intrinsics.rs b/pyqir-jit/src/intrinsics.rs new file mode 100644 index 00000000..1ea57705 --- /dev/null +++ b/pyqir-jit/src/intrinsics.rs @@ -0,0 +1,243 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#![allow(dead_code)] +#![allow(unused_variables)] + +#[repr(C)] +pub struct QirRTuple { + private: [u8; 0], +} + +pub type PauliId = i8; + +use microsoft_quantum_qir_runtime_sys::runtime::{QirArray,QirRuntime, QUBIT}; +use mut_static::ForceSomeRwLockWriteGuard; + +use super::gates::BaseProfile; + +fn get_current_gate_processor() -> ForceSomeRwLockWriteGuard<'static, BaseProfile> { + let v = crate::gates::CURRENT_GATES.write().unwrap(); + v +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__h__body(qubit: QUBIT) { + log::debug!("/__quantum__qis__h__body/"); + let mut gs = get_current_gate_processor(); + gs.h(qubit); +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__h__ctl(ctls: *mut QirArray, qubit: QUBIT) { + log::debug!("/__quantum__qis__h__ctl/"); + let control = get_qubit_id(ctls); + //let mut gs = get_current_gate_processor(); + todo!("Not yet implemented."); + //gs.h_ctl(control, get_cubit_string(qubit)); +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__measure__body( + qubits: *mut QirArray, + registers: *mut QirArray, +) { + log::debug!("/__quantum__qis__measure__body/"); + + // get_qubit_id may return something like 94420488984834 + // which will use up all computer memory + + // let qubit = get_qubit_id(qubits); + // let mut gs = get_current_gate_processor(); + // gs.m(qubit); +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__r__body(pauli: PauliId, theta: f64, qubit: QUBIT) { + log::debug!("/__quantum__qis__r__body/"); + let mut gs = get_current_gate_processor(); + match pauli { + 0 => { /* identity */ } + 1 => gs.rx(theta, qubit), + 3 => gs.ry(theta, qubit), + 2 => gs.rz(theta, qubit), + _ => panic!("Unsupported Pauli value: {}", pauli), + } +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__r__adj(pauli: PauliId, theta: f64, qubit: QUBIT) { + log::debug!("/__quantum__qis__r__adj/"); + //let mut gs = get_current_gate_processor(); + todo!("Not yet implemented."); + //gs.r_adj(pauli, theta, get_cubit_string(qubit)); +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__r__ctl(ctls: *mut QirArray, qubit: *mut QirRTuple) { + log::debug!("/__quantum__qis__r__ctl/"); + todo!("Not yet implemented."); +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__r__ctladj(ctls: *mut QirArray, qubit: *mut QirRTuple) { + log::debug!("/__quantum__qis__r__ctladj/"); + todo!("Not yet implemented."); +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__s__body(qubit: QUBIT) { + log::debug!("/__quantum__qis__s__body/"); + let mut gs = get_current_gate_processor(); + gs.s(qubit); +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__s__adj(qubit: QUBIT) { + log::debug!("/__quantum__qis__s__adj/"); + let mut gs = get_current_gate_processor(); + gs.s_adj(qubit); +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__s__ctl(ctls: *mut QirArray, qubit: QUBIT) { + log::debug!("/__quantum__qis__s__ctl/"); + todo!("Not yet implemented."); +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__s__ctladj(ctls: *mut QirArray, qubit: QUBIT) { + log::debug!("/__quantum__qis__s__ctladj/"); + todo!("Not yet implemented."); +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__t__body(qubit: QUBIT) { + log::debug!("/__quantum__qis__t__body/"); + let mut gs = get_current_gate_processor(); + gs.t(qubit); +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__t__adj(qubit: QUBIT) { + log::debug!("/__quantum__qis__t__adj/"); + let mut gs = get_current_gate_processor(); + gs.t_adj(qubit); +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__t__ctl(ctls: *mut QirArray, qubit: QUBIT) { + log::debug!("/__quantum__qis__t__ctl/"); + todo!("Not yet implemented."); +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__t__ctladj(ctls: *mut QirArray, qubit: QUBIT) { + log::debug!("/__quantum__qis__t__ctladj/"); + todo!("Not yet implemented."); +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__x__body(qubit: QUBIT) { + log::debug!("/__quantum__qis__x__body/"); + let mut gs = get_current_gate_processor(); + gs.x(qubit); +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__x__ctl(ctls: *mut QirArray, qubit: QUBIT) { + log::debug!("/__quantum__qis__x__ctl/"); + let control = get_qubit_id(ctls); + let mut gs = get_current_gate_processor(); + gs.cx(control, qubit); +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__y__body(qubit: QUBIT) { + log::debug!("/__quantum__qis__y__body/"); + let mut gs = get_current_gate_processor(); + gs.y(qubit); +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__y__ctl(ctls: *mut QirArray, qubit: QUBIT) { + log::debug!("/__quantum__qis__y__ctl/"); + todo!("Not yet implemented."); +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__z__body(qubit: QUBIT) { + log::debug!("/__quantum__qis__z__body/"); + let mut gs = get_current_gate_processor(); + gs.y(qubit); +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__z__ctl(ctls: *mut QirArray, qubit: QUBIT) { + log::debug!("/__quantum__qis__z__ctl/"); + let control = get_qubit_id(ctls); + let mut gs = get_current_gate_processor(); + gs.cz(control, qubit); +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__dumpmachine__body(location: *mut u8) { + log::debug!("/__quantum__qis__dumpmachine__body/"); + log::debug!("/__quantum__qis__h__body/"); + let mut gs = get_current_gate_processor(); + gs.dump_machine(); +} + +#[no_mangle] +pub unsafe extern "C" fn __quantum__qis__dumpregister__body( + location: *mut u8, + qubits: *mut QirArray, +) { + log::debug!("/__quantum__qis__dumpregister__body/"); + todo!("Not yet implemented."); +} + +pub unsafe fn get_qubit_id(ctls: *mut QirArray) -> QUBIT { + let ctrl_qubit_ptr = QirRuntime::quantum_rt_array_get_element_ptr_1d(ctls, 0) as *mut u64; + let ctrl_qubit = *ctrl_qubit_ptr; + log::debug!("ctrl_qubit {}", ctrl_qubit); + ctrl_qubit as QUBIT +} + +/* +extern "C" +{ + // Q# Gate Set + QIR_SHARED_API void __quantum__qis__exp__body(QirArray*, double, QirArray*); // NOLINT + QIR_SHARED_API void __quantum__qis__exp__adj(QirArray*, double, QirArray*); // NOLINT + QIR_SHARED_API void __quantum__qis__exp__ctl(QirArray*, QirExpTuple*); // NOLINT + QIR_SHARED_API void __quantum__qis__exp__ctladj(QirArray*, QirExpTuple*); // NOLINT + QIR_SHARED_API void __quantum__qis__h__body(QUBIT*); // NOLINT + QIR_SHARED_API void __quantum__qis__h__ctl(QirArray*, QUBIT*); // NOLINT + QIR_SHARED_API RESULT* __quantum__qis__measure__body(QirArray*, QirArray*); // NOLINT + QIR_SHARED_API void __quantum__qis__r__body(PauliId, double, QUBIT*); // NOLINT + QIR_SHARED_API void __quantum__qis__r__adj(PauliId, double, QUBIT*); // NOLINT + QIR_SHARED_API void __quantum__qis__r__ctl(QirArray*, QirRTuple*); // NOLINT + QIR_SHARED_API void __quantum__qis__r__ctladj(QirArray*, QirRTuple*); // NOLINT + QIR_SHARED_API void __quantum__qis__s__body(QUBIT*); // NOLINT + QIR_SHARED_API void __quantum__qis__s__adj(QUBIT*); // NOLINT + QIR_SHARED_API void __quantum__qis__s__ctl(QirArray*, QUBIT*); // NOLINT + QIR_SHARED_API void __quantum__qis__s__ctladj(QirArray*, QUBIT*); // NOLINT + QIR_SHARED_API void __quantum__qis__t__body(QUBIT*); // NOLINT + QIR_SHARED_API void __quantum__qis__t__adj(QUBIT*); // NOLINT + QIR_SHARED_API void __quantum__qis__t__ctl(QirArray*, QUBIT*); // NOLINT + QIR_SHARED_API void __quantum__qis__t__ctladj(QirArray*, QUBIT*); // NOLINT + QIR_SHARED_API void __quantum__qis__x__body(QUBIT*); // NOLINT + QIR_SHARED_API void __quantum__qis__x__ctl(QirArray*, QUBIT*); // NOLINT + QIR_SHARED_API void __quantum__qis__y__body(QUBIT*); // NOLINT + QIR_SHARED_API void __quantum__qis__y__ctl(QirArray*, QUBIT*); // NOLINT + QIR_SHARED_API void __quantum__qis__z__body(QUBIT*); // NOLINT + QIR_SHARED_API void __quantum__qis__z__ctl(QirArray*, QUBIT*); // NOLINT + + // Q# Dump: + // Note: The param `location` must be `const void*`, + // but it is called from .ll, where `const void*` is not supported. + QIR_SHARED_API void __quantum__qis__dumpmachine__body(uint8_t* location); // NOLINT + QIR_SHARED_API void __quantum__qis__dumpregister__body(uint8_t* location, const QirArray* qubits); // NOLINT +} +*/ diff --git a/pyqir-jit/src/jit.rs b/pyqir-jit/src/jit.rs new file mode 100644 index 00000000..b37f1d0f --- /dev/null +++ b/pyqir-jit/src/jit.rs @@ -0,0 +1,98 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use qirlib::context::{Context, ContextType}; +use crate::runtime::Simulator; +use crate::interop::SemanticModel; +use inkwell::targets::TargetMachine; +use inkwell::{ + passes::PassManagerBuilder, + targets::{InitializationConfig, Target}, + OptimizationLevel, +}; +use microsoft_quantum_qir_runtime_sys::runtime::BasicRuntimeDriver; +use qirlib::passes::run_basic_passes_on; + +pub fn run_module(module: String) -> Result { + let ctx = inkwell::context::Context::create(); + let context_type = ContextType::File(&module); + let context = Context::new(&ctx, context_type)?; + let model = run_ctx(context)?; + Ok(model) +} + +pub fn run_ctx<'ctx>(context: Context<'ctx>) -> Result { + Target::initialize_native(&InitializationConfig::default()).unwrap(); + + let default_triple = TargetMachine::get_default_triple(); + + let target = Target::from_triple(&default_triple).expect("Unable to create target machine"); + + assert!(target.has_asm_backend()); + assert!(target.has_target_machine()); + + run_basic_passes_on(&context); + + unsafe { + BasicRuntimeDriver::initialize_qir_context(true); + let _ = microsoft_quantum_qir_runtime_sys::foundation::QSharpFoundation::new(); + + let _ = inkwell::support::load_library_permanently(""); + let simulator = Simulator::new(&context, &context.execution_engine); + let main = context + .execution_engine + .get_function:: ()>("QuantumApplication__Run") + .unwrap(); + main.call(); + Ok(simulator.get_model()) + } +} + +#[cfg(test)] +mod tests { + + use crate::interop::{ClassicalRegister, Measured, QuantumRegister, SemanticModel}; + use crate::interop::{Controlled, Instruction, Single}; + use tempfile::tempdir; + use super::run_ctx; + + #[ignore = "CI Requires runtime recompilation"] + #[test] + fn eval_test() -> Result<(), String> { + let dir = tempdir().expect(""); + let tmp_path = dir.into_path(); + + let name = String::from("Bell circuit"); + let mut model = SemanticModel::new(name); + model.add_reg(QuantumRegister::new(String::from("qr"), 0).as_register()); + model.add_reg(QuantumRegister::new(String::from("qr"), 1).as_register()); + model.add_reg(ClassicalRegister::new(String::from("qc"), 2).as_register()); + + model.add_inst(Instruction::H(Single::new(String::from("qr0")))); + model.add_inst(Instruction::Cx(Controlled::new( + String::from("qr0"), + String::from("qr1"), + ))); + + model.add_inst(Instruction::M(Measured::new( + String::from("qr0"), + String::from("qc0"), + ))); + model.add_inst(Instruction::M(Measured::new( + String::from("qr1"), + String::from("qc1"), + ))); + + let generated_model = run(&model)?; + + assert!(generated_model.instructions.len() == 2); + Ok(()) + } + + pub fn run(model: &SemanticModel) -> Result { + //let ctx = inkwell::context::Context::create(); + //let context = pyqir_generator::populate_context(&ctx, &model).unwrap(); + //let model = run_ctx(context)?; + Ok(model.clone()) + } +} diff --git a/pyqir-jit/src/lib.rs b/pyqir-jit/src/lib.rs new file mode 100644 index 00000000..b75826af --- /dev/null +++ b/pyqir-jit/src/lib.rs @@ -0,0 +1,11 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#![deny(clippy::all, clippy::pedantic)] + +pub mod gates; +pub mod interop; +pub mod intrinsics; +pub mod jit; +pub mod python; +pub mod runtime; diff --git a/pyqir-jit/src/python.rs b/pyqir-jit/src/python.rs new file mode 100644 index 00000000..e5adc740 --- /dev/null +++ b/pyqir-jit/src/python.rs @@ -0,0 +1,131 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use log; +use pyo3::exceptions::{PyOSError}; +use pyo3::prelude::*; +use pyo3::types::PyDict; +use pyo3::PyErr; +use crate::interop::{ + Instruction +}; + + +#[pymodule] +fn pyqir_jit(_py: Python<'_>, m: &PyModule) -> PyResult<()> { + m.add_class::()?; + + Ok(()) +} + +#[pyclass] +pub struct PyQirJit { +} + +#[pymethods] +impl PyQirJit { + #[new] + fn new() -> Self { + PyQirJit { } + } + + fn controlled( + &self, + pyobj: &PyAny, + gate: &str, + control: String, + target: String, + ) -> PyResult<()> { + let has_gate = pyobj.hasattr(gate)?; + if has_gate { + let func = pyobj.getattr(gate)?; + let args = (control, target); + func.call1(args)?; + } + Ok(()) + } + + fn measured(&self, pyobj: &PyAny, gate: &str, qubit: String, target: String) -> PyResult<()> { + let has_gate = pyobj.hasattr(gate)?; + if has_gate { + let func = pyobj.getattr(gate)?; + let args = (qubit, target); + func.call1(args)?; + } + Ok(()) + } + + fn single(&self, pyobj: &PyAny, gate: &str, qubit: String) -> PyResult<()> { + let has_gate = pyobj.hasattr(gate)?; + if has_gate { + let func = pyobj.getattr(gate)?; + let args = (qubit,); + func.call1(args)?; + } + Ok(()) + } + + fn rotated(&self, pyobj: &PyAny, gate: &str, theta: f64, qubit: String) -> PyResult<()> { + let has_gate = pyobj.hasattr(gate)?; + if has_gate { + let func = pyobj.getattr(gate)?; + let args = (theta, qubit); + func.call1(args)?; + } + Ok(()) + } + + fn finish(&self, pyobj: &PyAny, dict: &PyDict) -> PyResult<()> { + let has_gate = pyobj.hasattr("finish")?; + if has_gate { + let func = pyobj.getattr("finish")?; + let args = (dict,); + func.call1(args)?; + } + Ok(()) + } + + fn eval(&self, file: String, pyobj: &PyAny) -> PyResult<()> { + let result = crate::jit::run_module(file); + if let Err(msg) = result { + let err: PyErr = PyOSError::new_err::(msg); + return Err(err); + } + let gen_model = result.unwrap(); + Python::with_gil(|py| -> PyResult<()> { + for instruction in gen_model.instructions { + match instruction { + Instruction::Cx(ins) => { + self.controlled(pyobj, "cx", ins.control, ins.target)? + } + Instruction::Cz(ins) => { + self.controlled(pyobj, "cz", ins.control, ins.target)? + } + Instruction::H(ins) => self.single(pyobj, "h", ins.qubit)?, + Instruction::M(ins) => self.measured(pyobj, "m", ins.qubit, ins.target)?, + Instruction::Reset(_ins) => { + todo!("Not Implemented") + } + Instruction::Rx(ins) => self.rotated(pyobj, "rx", ins.theta, ins.qubit)?, + Instruction::Ry(ins) => self.rotated(pyobj, "ry", ins.theta, ins.qubit)?, + Instruction::Rz(ins) => self.rotated(pyobj, "rz", ins.theta, ins.qubit)?, + Instruction::S(ins) => self.single(pyobj, "s", ins.qubit)?, + Instruction::SAdj(ins) => self.single(pyobj, "s_adj", ins.qubit)?, + Instruction::T(ins) => self.single(pyobj, "t", ins.qubit)?, + Instruction::TAdj(ins) => self.single(pyobj, "t_adj", ins.qubit)?, + Instruction::X(ins) => self.single(pyobj, "x", ins.qubit)?, + Instruction::Y(ins) => self.single(pyobj, "y", ins.qubit)?, + Instruction::Z(ins) => self.single(pyobj, "z", ins.qubit)?, + Instruction::DumpMachine => { + todo!("Not Implemented") + } + } + } + let dict = PyDict::new(py); + dict.set_item("number_of_qubits", gen_model.qubits.len())?; + self.finish(pyobj, dict)?; + Ok(()) + })?; + Ok(()) + } +} \ No newline at end of file diff --git a/pyqir-jit/src/runtime.rs b/pyqir-jit/src/runtime.rs new file mode 100644 index 00000000..71349a62 --- /dev/null +++ b/pyqir-jit/src/runtime.rs @@ -0,0 +1,121 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use qirlib::context::Context; +use qirlib::intrinsics::Intrinsics; +use crate::gates::CURRENT_GATES; +use crate::interop::SemanticModel; +use inkwell::execution_engine::ExecutionEngine; + +use super::gates::GateScope; + +pub(crate) struct Simulator { + scope: GateScope, +} + +impl<'ctx> Simulator { + pub fn new(context: &Context<'ctx>, ee: &ExecutionEngine<'ctx>) -> Self { + let simulator = Simulator { + scope: crate::gates::GateScope::new(), + }; + simulator.bind(context, ee); + simulator + } + + pub fn get_model(&self) -> SemanticModel { + let mut gs = CURRENT_GATES.write().unwrap(); + gs.infer_allocations(); + gs.get_model() + } + + fn bind(&self, context: &Context<'ctx>, ee: &ExecutionEngine<'ctx>) { + let intrinsics = Intrinsics::new(&context.module); + + if let Some(ins) = intrinsics.h_ins { + ee.add_global_mapping(&ins, super::intrinsics::__quantum__qis__h__body as usize); + } + + if let Some(ins) = intrinsics.h_ctl_ins { + ee.add_global_mapping(&ins, super::intrinsics::__quantum__qis__h__ctl as usize); + } + if let Some(ins) = intrinsics.m_ins { + ee.add_global_mapping( + &ins, + super::intrinsics::__quantum__qis__measure__body as usize, + ); + } + if let Some(ins) = intrinsics.r_ins { + ee.add_global_mapping(&ins, super::intrinsics::__quantum__qis__r__body as usize); + } + if let Some(ins) = intrinsics.r_adj_ins { + ee.add_global_mapping(&ins, super::intrinsics::__quantum__qis__r__adj as usize); + } + if let Some(ins) = intrinsics.r_ctl_ins { + ee.add_global_mapping(&ins, super::intrinsics::__quantum__qis__r__ctl as usize); + } + if let Some(ins) = intrinsics.r_ctl_adj_ins { + ee.add_global_mapping(&ins, super::intrinsics::__quantum__qis__r__ctladj as usize); + } + + if let Some(ins) = intrinsics.s_ins { + ee.add_global_mapping(&ins, super::intrinsics::__quantum__qis__s__body as usize); + } + if let Some(ins) = intrinsics.s_adj_ins { + ee.add_global_mapping(&ins, super::intrinsics::__quantum__qis__s__adj as usize); + } + if let Some(ins) = intrinsics.s_ctl_ins { + ee.add_global_mapping(&ins, super::intrinsics::__quantum__qis__s__ctl as usize); + } + if let Some(ins) = intrinsics.s_ctl_adj_ins { + ee.add_global_mapping(&ins, super::intrinsics::__quantum__qis__s__ctladj as usize); + } + + if let Some(ins) = intrinsics.t_ins { + ee.add_global_mapping(&ins, super::intrinsics::__quantum__qis__t__body as usize); + } + if let Some(ins) = intrinsics.t_adj_ins { + ee.add_global_mapping(&ins, super::intrinsics::__quantum__qis__t__adj as usize); + } + if let Some(ins) = intrinsics.t_ctl_ins { + ee.add_global_mapping(&ins, super::intrinsics::__quantum__qis__t__ctl as usize); + } + if let Some(ins) = intrinsics.t_ctl_adj_ins { + ee.add_global_mapping(&ins, super::intrinsics::__quantum__qis__t__ctladj as usize); + } + + if let Some(ins) = intrinsics.x_ins { + ee.add_global_mapping(&ins, super::intrinsics::__quantum__qis__x__body as usize); + } + if let Some(ins) = intrinsics.x_ctl_ins { + ee.add_global_mapping(&ins, super::intrinsics::__quantum__qis__x__ctl as usize); + } + + if let Some(ins) = intrinsics.y_ins { + ee.add_global_mapping(&ins, super::intrinsics::__quantum__qis__y__body as usize); + } + if let Some(ins) = intrinsics.y_ctl_ins { + ee.add_global_mapping(&ins, super::intrinsics::__quantum__qis__y__ctl as usize); + } + + if let Some(ins) = intrinsics.z_ins { + ee.add_global_mapping(&ins, super::intrinsics::__quantum__qis__z__body as usize); + } + if let Some(ins) = intrinsics.z_ctl_ins { + ee.add_global_mapping(&ins, super::intrinsics::__quantum__qis__z__ctl as usize); + } + + if let Some(ins) = intrinsics.dumpmachine { + ee.add_global_mapping( + &ins, + super::intrinsics::__quantum__qis__dumpmachine__body as usize, + ); + } + + if let Some(ins) = intrinsics.dumpregister { + ee.add_global_mapping( + &ins, + super::intrinsics::__quantum__qis__dumpregister__body as usize, + ); + } + } +} diff --git a/pyqir-jit/tests/bell_qir_measure.ll b/pyqir-jit/tests/bell_qir_measure.ll new file mode 100644 index 00000000..2046aefc --- /dev/null +++ b/pyqir-jit/tests/bell_qir_measure.ll @@ -0,0 +1,487 @@ +; ModuleID = 'Bell circuit' +source_filename = "./module.ll" + +%Array = type opaque +%Qubit = type opaque +%Result = type opaque +%String = type opaque + +@PauliX = internal constant i2 1 +@PauliY = internal constant i2 -1 +@PauliZ = internal constant i2 -2 +@0 = internal constant [3 x i8] c", \00" +@1 = internal constant [2 x i8] c"[\00" +@2 = internal constant [3 x i8] c", \00" +@3 = internal constant [2 x i8] c"[\00" +@4 = internal constant [2 x i8] c"]\00" +@5 = internal constant [2 x i8] c"]\00" + +define internal %Array* @QuantumApplication__Run__body() { +entry: + %qr0 = call %Qubit* @__quantum__rt__qubit_allocate() + %qr1 = call %Qubit* @__quantum__rt__qubit_allocate() + %results = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %qc = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %qc_0_raw = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qc, i64 0) + %qc_result_0 = bitcast i8* %qc_0_raw to %Result** + %zero_0 = call %Result* @__quantum__rt__result_get_zero() + call void @__quantum__rt__result_update_reference_count(%Result* %zero_0, i32 1) + store %Result* %zero_0, %Result** %qc_result_0, align 8 + %qc_1_raw = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qc, i64 1) + %qc_result_1 = bitcast i8* %qc_1_raw to %Result** + %zero_1 = call %Result* @__quantum__rt__result_get_zero() + call void @__quantum__rt__result_update_reference_count(%Result* %zero_1, i32 1) + store %Result* %zero_1, %Result** %qc_result_1, align 8 + %results_result_tmp_0_raw = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %results, i64 0) + %results_result_tmp_result_0 = bitcast i8* %results_result_tmp_0_raw to %Array** + store %Array* %qc, %Array** %results_result_tmp_result_0, align 8 + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qr0) + %__controlQubits__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %__controlQubits__0_result_tmp_0_raw = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__controlQubits__, i64 0) + %__controlQubits__0_result_tmp_result_0 = bitcast i8* %__controlQubits__0_result_tmp_0_raw to %Qubit** + store %Qubit* %qr0, %Qubit** %__controlQubits__0_result_tmp_result_0, align 8 + call void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qr1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__qubit_release(%Qubit* %qr0) + call void @__quantum__rt__qubit_release(%Qubit* %qr1) + ret %Array* %results +} + +declare void @__quantum__qis__dumpmachine__body(i8*) + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare void @__quantum__rt__qubit_release(%Qubit*) + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +declare %Result* @__quantum__rt__result_get_zero() + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) + +declare i64 @__quantum__rt__array_get_size_1d(%Array*) + +declare void @__quantum__rt__array_update_reference_count(%Array*, i32) + +declare void @__quantum__qis__x__body(%Qubit*) + +declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) + +declare void @__quantum__qis__y__body(%Qubit*) + +declare void @__quantum__qis__z__body(%Qubit*) + +declare void @__quantum__qis__z__ctl(%Array*, %Qubit*) + +declare void @__quantum__qis__h__body(%Qubit*) + +define internal %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { +entry: + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %1 = bitcast i8* %0 to i2* + %2 = load i2, i2* @PauliZ, align 1 + store i2 %2, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %4 = bitcast i8* %3 to %Qubit** + store %Qubit* %qubit, %Qubit** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %5 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + ret %Result* %5 +} + +define internal void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %qubit) { +entry: + %0 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) + %1 = call %Result* @__quantum__rt__result_get_one() + %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) + call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %qubit) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__body(double %theta, %Qubit* %qubit) { +entry: + %pauli = load i2, i2* @PauliX, align 1 + call void @__quantum__qis__r__body(i2 %pauli, double %theta, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Ry__body(double %theta, %Qubit* %qubit) { +entry: + %pauli = load i2, i2* @PauliY, align 1 + call void @__quantum__qis__r__body(i2 %pauli, double %theta, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qubit) { +entry: + %pauli = load i2, i2* @PauliZ, align 1 + call void @__quantum__qis__r__body(i2 %pauli, double %theta, %Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__s__body(%Qubit*) + +declare void @__quantum__qis__s__adj(%Qubit*) + +declare void @__quantum__qis__t__body(%Qubit*) + +declare void @__quantum__qis__t__adj(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h__body(%Qubit* %qubit) + ret void +} + +declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) + +declare void @__quantum__qis__r__body(i2, double, %Qubit*) + +declare %Result* @__quantum__rt__result_get_one() + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) + +define internal void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__s__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__s__adj(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__t__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__t__adj(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__y__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define { i64, i8* }* @QuantumApplication__Run__Interop() #0 { +entry: + %0 = call %Array* @QuantumApplication__Run__body() + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) + %2 = mul i64 %1, 8 + %3 = call i8* @__quantum__rt__memory_allocate(i64 %2) + %4 = ptrtoint i8* %3 to i64 + %5 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %6 = phi i64 [ 0, %entry ], [ %19, %exiting__1 ] + %7 = icmp sle i64 %6, %5 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = mul i64 %6, 8 + %9 = add i64 %4, %8 + %10 = inttoptr i64 %9 to { i64, i8* }** + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %6) + %12 = bitcast i8* %11 to %Array** + %13 = load %Array*, %Array** %12, align 8 + %14 = call i64 @__quantum__rt__array_get_size_1d(%Array* %13) + %15 = mul i64 %14, 1 + %16 = call i8* @__quantum__rt__memory_allocate(i64 %15) + %17 = ptrtoint i8* %16 to i64 + %18 = sub i64 %14, 1 + br label %header__2 + +exiting__1: ; preds = %exit__2 + %19 = add i64 %6, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %20 = call i8* @__quantum__rt__memory_allocate(i64 ptrtoint ({ i64, i8* }* getelementptr ({ i64, i8* }, { i64, i8* }* null, i32 1) to i64)) + %21 = bitcast i8* %20 to { i64, i8* }* + %22 = getelementptr { i64, i8* }, { i64, i8* }* %21, i64 0, i32 0 + store i64 %1, i64* %22, align 4 + %23 = getelementptr { i64, i8* }, { i64, i8* }* %21, i64 0, i32 1 + store i8* %3, i8** %23, align 8 + %24 = sub i64 %1, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %body__1 + %25 = phi i64 [ 0, %body__1 ], [ %36, %exiting__2 ] + %26 = icmp sle i64 %25, %18 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = mul i64 %25, 1 + %28 = add i64 %17, %27 + %29 = inttoptr i64 %28 to i8* + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %13, i64 %25) + %31 = bitcast i8* %30 to %Result** + %32 = load %Result*, %Result** %31, align 8 + %33 = call %Result* @__quantum__rt__result_get_zero() + %34 = call i1 @__quantum__rt__result_equal(%Result* %32, %Result* %33) + %35 = select i1 %34, i8 0, i8 -1 + store i8 %35, i8* %29, align 1 + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %36 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %37 = call i8* @__quantum__rt__memory_allocate(i64 ptrtoint ({ i64, i8* }* getelementptr ({ i64, i8* }, { i64, i8* }* null, i32 1) to i64)) + %38 = bitcast i8* %37 to { i64, i8* }* + %39 = getelementptr { i64, i8* }, { i64, i8* }* %38, i64 0, i32 0 + store i64 %14, i64* %39, align 4 + %40 = getelementptr { i64, i8* }, { i64, i8* }* %38, i64 0, i32 1 + store i8* %16, i8** %40, align 8 + store { i64, i8* }* %38, { i64, i8* }** %10, align 8 + br label %exiting__1 + +header__3: ; preds = %exiting__3, %exit__1 + %41 = phi i64 [ 0, %exit__1 ], [ %48, %exiting__3 ] + %42 = icmp sle i64 %41, %24 + br i1 %42, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %41) + %44 = bitcast i8* %43 to %Array** + %45 = load %Array*, %Array** %44, align 8 + %46 = call i64 @__quantum__rt__array_get_size_1d(%Array* %45) + %47 = sub i64 %46, 1 + br label %header__4 + +exiting__3: ; preds = %exit__4 + %48 = add i64 %41, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + ret { i64, i8* }* %21 + +header__4: ; preds = %exiting__4, %body__3 + %49 = phi i64 [ 0, %body__3 ], [ %54, %exiting__4 ] + %50 = icmp sle i64 %49, %47 + br i1 %50, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %45, i64 %49) + %52 = bitcast i8* %51 to %Result** + %53 = load %Result*, %Result** %52, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %53, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %54 = add i64 %49, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %45, i32 -1) + br label %exiting__3 +} + +declare i8* @__quantum__rt__memory_allocate(i64) + +define void @QuantumApplication__Run() #1 { +entry: + %0 = call %Array* @QuantumApplication__Run__body() + %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @0, i32 0, i32 0)) + %2 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @1, i32 0, i32 0)) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 1) + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi %String* [ %2, %entry ], [ %36, %exiting__1 ] + %6 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %7 = icmp sle i64 %6, %4 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %6) + %9 = bitcast i8* %8 to %Array** + %10 = load %Array*, %Array** %9, align 8 + %11 = icmp ne %String* %5, %2 + br i1 %11, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %body__1 + %12 = call %String* @__quantum__rt__string_concatenate(%String* %5, %String* %1) + call void @__quantum__rt__string_update_reference_count(%String* %5, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %body__1 + %13 = phi %String* [ %12, %condTrue__1 ], [ %5, %body__1 ] + %14 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @2, i32 0, i32 0)) + %15 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @3, i32 0, i32 0)) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 1) + %16 = call i64 @__quantum__rt__array_get_size_1d(%Array* %10) + %17 = sub i64 %16, 1 + br label %header__2 + +exiting__1: ; preds = %exit__2 + %18 = add i64 %6, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %19 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @5, i32 0, i32 0)) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %5, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %5, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + call void @__quantum__rt__message(%String* %20) + %21 = sub i64 %3, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %condContinue__1 + %22 = phi %String* [ %15, %condContinue__1 ], [ %32, %exiting__2 ] + %23 = phi i64 [ 0, %condContinue__1 ], [ %33, %exiting__2 ] + %24 = icmp sle i64 %23, %17 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %23) + %26 = bitcast i8* %25 to %Result** + %27 = load %Result*, %Result** %26, align 8 + %28 = icmp ne %String* %22, %15 + br i1 %28, label %condTrue__2, label %condContinue__2 + +condTrue__2: ; preds = %body__2 + %29 = call %String* @__quantum__rt__string_concatenate(%String* %22, %String* %14) + call void @__quantum__rt__string_update_reference_count(%String* %22, i32 -1) + br label %condContinue__2 + +condContinue__2: ; preds = %condTrue__2, %body__2 + %30 = phi %String* [ %29, %condTrue__2 ], [ %22, %body__2 ] + %31 = call %String* @__quantum__rt__result_to_string(%Result* %27) + %32 = call %String* @__quantum__rt__string_concatenate(%String* %30, %String* %31) + call void @__quantum__rt__string_update_reference_count(%String* %30, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %31, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %condContinue__2 + %33 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %34 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @4, i32 0, i32 0)) + %35 = call %String* @__quantum__rt__string_concatenate(%String* %22, %String* %34) + call void @__quantum__rt__string_update_reference_count(%String* %22, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %34, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %14, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + %36 = call %String* @__quantum__rt__string_concatenate(%String* %13, %String* %35) + call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + br label %exiting__1 + +header__3: ; preds = %exiting__3, %exit__1 + %37 = phi i64 [ 0, %exit__1 ], [ %44, %exiting__3 ] + %38 = icmp sle i64 %37, %21 + br i1 %38, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %37) + %40 = bitcast i8* %39 to %Array** + %41 = load %Array*, %Array** %40, align 8 + %42 = call i64 @__quantum__rt__array_get_size_1d(%Array* %41) + %43 = sub i64 %42, 1 + br label %header__4 + +exiting__3: ; preds = %exit__4 + %44 = add i64 %37, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %20, i32 -1) + ret void + +header__4: ; preds = %exiting__4, %body__3 + %45 = phi i64 [ 0, %body__3 ], [ %50, %exiting__4 ] + %46 = icmp sle i64 %45, %43 + br i1 %46, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %41, i64 %45) + %48 = bitcast i8* %47 to %Result** + %49 = load %Result*, %Result** %48, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %49, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %50 = add i64 %45, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %41, i32 -1) + br label %exiting__3 +} + +declare void @__quantum__rt__message(%String*) + +declare %String* @__quantum__rt__string_create(i8*) + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) + +declare %String* @__quantum__rt__string_concatenate(%String*, %String*) + +declare %String* @__quantum__rt__result_to_string(%Result*) + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } diff --git a/pyqir-jit/tests/test_api.py b/pyqir-jit/tests/test_api.py new file mode 100644 index 00000000..1677f219 --- /dev/null +++ b/pyqir-jit/tests/test_api.py @@ -0,0 +1,73 @@ +from pyqir_jit import * +import pytest + +class GateLogger(GateSet): + def __init__(self): + # call parent class constructor + super().__init__() + self.number_of_registers = 0 + self.instructions = [] + + def cx(self, control: str, target: str): + self.instructions.append(f"cx control[{control}], target[{target}]") + + def cz(self, control: str, target: str): + self.instructions.append(f"cz control[{control}], target[{target}]") + + def h(self, target: str): + self.instructions.append(f"h qubit[{target}]") + + def m(self, qubit: str, target: str): + self.instructions.append(f"m qubit[{qubit}] => out[{target}]") + + def reset(self, target: str): + self.instructions.append(f"reset {target}") + + def rx(self, theta: float, qubit: str): + self.instructions.append(f"rx theta[{theta}] qubit[{qubit}]") + + def ry(self, theta: float, qubit: str): + self.instructions.append(f"ry theta[{theta}] qubit[{qubit}]") + + def rz(self, theta: float, qubit: str): + self.instructions.append(f"rz theta[{theta}] qubit[{qubit}]") + + def s(self, qubit: str): + self.instructions.append(f"s qubit[{qubit}]") + + def s_adj(self, qubit: str): + self.instructions.append(f"s_adj qubit[{qubit}]") + + def t(self, qubit: str): + self.instructions.append(f"t qubit[{qubit}]") + + def t_adj(self, qubit: str): + self.instructions.append(f"t_adj qubit[{qubit}]") + + def x(self, qubit: str): + self.instructions.append(f"x qubit[{qubit}]") + + def y(self, qubit: str): + self.instructions.append(f"y qubit[{qubit}]") + + def z(self, qubit: str): + self.instructions.append(f"z qubit[{qubit}]") + + def dump_machine(self): + self.instructions.append(f"dumpmachine") + + def finish(self, metadata: dict): + print("finished") + super().finish(metadata) + self.number_of_registers = self.number_of_qubits + +def test_bell_qir(): + file = "tests/bell_qir_measure.ll" + qirjit = QirJit() + generator = GateLogger() + qirjit.eval(file, generator) + + assert len(generator.instructions) == 2 + assert str(generator.instructions[0]).startswith("h") + assert str(generator.instructions[1]).startswith("cx") + diff --git a/pyqir-jit/tox.ini b/pyqir-jit/tox.ini new file mode 100644 index 00000000..969767a2 --- /dev/null +++ b/pyqir-jit/tox.ini @@ -0,0 +1,26 @@ +[tox] +isolated_build = True + +[testenv] + +# https://github.com/tox-dev/tox/issues/1550 +# PYTHONIOENCODING = utf-8 needs to be set to work around windows bug +setenv = + LLVM_SYS_110_PREFIX = {env:LLVM_SYS_110_PREFIX} + PYTHONIOENCODING = utf-8 + +# needed temporarily for build to find cl.exe +passenv = * + +deps = -rrequirements-dev.txt + +[testenv:test] +description = Run the unit tests under {basepython} +commands = + python -m pip install . + pytest {posargs} + +[testenv:pack] +description = Build the wheels under all installed platforms +commands = + maturin build --release diff --git a/pyqir-parser/Cargo.toml b/pyqir-parser/Cargo.toml new file mode 100644 index 00000000..ec428399 --- /dev/null +++ b/pyqir-parser/Cargo.toml @@ -0,0 +1,39 @@ +[package] +authors = ["Microsoft"] +name = "pyqir-parser" +version = "0.1.0" +edition = "2018" +license = "MIT" +description = "Python based QIR parser library." +readme = "README.md" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +llvm-ir = { version = "0.8.0", features = ["llvm-11"] } + +[dependencies.pyo3] +version = "0.14.2" + +[features] +extension-module = ["pyo3/abi3-py36", "pyo3/extension-module"] +default = ["extension-module"] + +[lib] +name = "pyqir_parser" +crate-type = ["cdylib"] + +[package.metadata.maturin] +classifier=[ + "License :: OSI Approved :: MIT License", + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python", + "Programming Language :: Rust", + "Operating System :: MacOS", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX :: Linux", +] diff --git a/pyqir-parser/MANIFEST.in b/pyqir-parser/MANIFEST.in new file mode 100644 index 00000000..4b8d0b8f --- /dev/null +++ b/pyqir-parser/MANIFEST.in @@ -0,0 +1,2 @@ +include pyproject.toml Cargo.toml +recursive-include src * \ No newline at end of file diff --git a/pyqir-parser/README.md b/pyqir-parser/README.md new file mode 100644 index 00000000..d9e77525 --- /dev/null +++ b/pyqir-parser/README.md @@ -0,0 +1,22 @@ +# pyqir-parser + +## Building and Testing + +To build this package, first install `maturin`: + +```shell +pip install maturin +``` + +To build and test use `maturin develop`: + +```shell +pip install -r requirements-dev.txt +maturin develop && pytest +``` + +Alternatively, install tox and run the tests inside an isolated environment: + +```shell +tox -e py +``` \ No newline at end of file diff --git a/pyqir-parser/pyproject.toml b/pyqir-parser/pyproject.toml new file mode 100644 index 00000000..7bf4f8e7 --- /dev/null +++ b/pyqir-parser/pyproject.toml @@ -0,0 +1,7 @@ +[project] +name = "pyqir_parser" +requires-python = ">=3.6" + +[build-system] +requires = ["maturin>=0.10,<0.12"] +build-backend = "maturin" diff --git a/pyqir-parser/pyqir_parser/__init__.py b/pyqir-parser/pyqir_parser/__init__.py new file mode 100644 index 00000000..dca76d7a --- /dev/null +++ b/pyqir-parser/pyqir_parser/__init__.py @@ -0,0 +1,4 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT License. + +from .parser import * diff --git a/pyqir-parser/pyqir_parser/parser.py b/pyqir-parser/pyqir_parser/parser.py new file mode 100644 index 00000000..a040d6e3 --- /dev/null +++ b/pyqir-parser/pyqir_parser/parser.py @@ -0,0 +1,912 @@ +# Copyright(c) Microsoft Corporation. +# Licensed under the MIT License. + +from .pyqir_parser import * +from typing import List, Optional, Tuple + + +class QirType: + """ + Instances of QirType represent a type description in QIR. Specific subclasses may contain + additional properties of that type. + """ + def __new__(cls, ty: PyQirType): + if ty.is_qubit: + return super().__new__(QirQubitType) + elif ty.is_result: + return super().__new__(QirResultType) + elif ty.is_void: + return super().__new__(QirVoidType) + elif ty.is_integer: + return super().__new__(QirIntegerType) + elif ty.is_pointer: + return super().__new__(QirPointerType) + elif ty.is_double: + return super().__new__(QirDoubleType) + elif ty.is_array: + return super().__new__(QirArrayType) + elif ty.is_struct: + return super().__new__(QirStructType) + elif ty.is_named_struct: + return super().__new__(QirNamedStructType) + else: + return super().__new__(cls) + + def __init__(self, ty: PyQirType): + self.ty = ty + +class QirVoidType(QirType): + """ + Instances of QirVoidType represent a void type in QIR. + """ + pass + +class QirIntegerType(QirType): + """ + Instances of QirIntegerType represent a signed integer in QIR. Note that there is no unsigned + integer type, just unsigned arithmetic instructions. + """ + + @property + def width(self) -> int: + """ + Gets the bit width of this integer type. + """ + return self.ty.integer_width + +class QirPointerType(QirType): + """ + Instances of QirPointerType represent a pointer to some other type in QIR. + """ + + @property + def type(self) -> QirType: + """ + Gets the QirType this to which this pointer points. + """ + return QirType(self.ty.pointer_type) + + @property + def addrspace(self): + """ + Gets the address space to which this pointer points. + """ + return self.ty.pointer_addrspace + +class QirDoubleType(QirType): + """ + Instances of QirDoubleType represent the double-sized floating point type in a QIR program. + """ + pass + +class QirArrayType(QirType): + """ + Instances of the QirArrayType represent the native LLVM fixed-length array type in a QIR program. + """ + + @property + def element_types(self) -> List[QirType]: + """ + Gets the ordered list of QirTypes representing the underlying array types. + """ + return [QirType(i) for i in self.ty.array_element_type] + + @property + def element_count(self) -> int: + """ + Gets the count of elements in the array. + """ + return self.ty.array_num_elements + +class QirStructType(QirType): + """ + Instances of QirStructType represent an anonymous struct with inline defined types in QIR. + """ + + @property + def struct_element_types(self) -> List[QirType]: + """ + Gets the ordered list of QirTypes representing the underlying struct types. + """ + return [QirType(i) for i in self.ty.struct_element_types] + +class QirNamedStructType(QirType): + """ + Instances of QirNamedStruct represent a globally defined struct, often used to represent opaque + poitners. + """ + + @property + def name(self) -> str: + """ + Gets the name of this struct. + """ + return self.ty.named_struct_name + +class QirQubitType(QirNamedStructType): + """ + Instances of QirQubitType are specific QIR opaque pointer corresponding to the Qubit special + type. + """ + pass + +class QirResultType(QirNamedStructType): + """ + Instances of QirResultType are specific QIR opaque pointer corresponding to the Result special + type. + """ + pass + +class QirOperand: + """ + Instances of QirOperand represent an instance in a QIR program, either a local operand (variable) + or constant. + """ + def __new__(cls, op: PyQirOperand): + if op.is_local: + return super().__new__(QirLocalOperand) + elif op.is_constant: + if op.constant.is_qubit: + return super().__new__(QirQubitConstant) + elif op.constant.is_result: + return super().__new__(QirResultConstant) + elif op.constant.is_int: + return super().__new__(QirIntConstant) + elif op.constant.is_float: + return super().__new__(QirDoubleConstant) + elif op.constant.is_null: + return super().__new__(QirNullConstant) + else: + return super().__new__(cls) + else: + return super().__new__(cls) + + def __init__(self, op: PyQirOperand): + self.op = op + self.const = op.constant + +class QirLocalOperand(QirOperand): + """ + Instances of QirLocalOperand represent a typed local variable in a QIR program. + """ + + @property + def name(self) -> str: + """ + Gets the name identifier for this operand. This could be an identifier from the original + source language, a generated name based on an identifier, or a generated integer name. + """ + return self.op.local_name + + @property + def type(self) -> QirType: + """ + Gets the QirType instance representing the type for this operand. + """ + return QirType(self.op.local_type) + +class QirConstant(QirOperand): + """ + Instances of QirConstant represent a constant value in a QIR program. + """ + + @property + def type(self) -> QirType: + """ + Gets the QirType instance representing the type of this constant. + """ + return QirType(self.const.type) + +class QirIntConstant(QirConstant): + """ + Instances of QirIntConstant represent a constant integer value in a QIR program. + """ + + @property + def value(self) -> int: + """ + Gets the integer value for this constant. + """ + return self.const.int_value + + @property + def width(self) -> int: + """ + Gets the bit width for this integer constant. + """ + return self.const.int_width + +class QirDoubleConstant(QirConstant): + """ + Instances of QirDoubleConstant represent a constant double-sized float value in a QIR program. + """ + + @property + def value(self) -> float: + """ + Gets the double-sized float value for this constant. + """ + return self.const.float_double_value + +class QirNullConstant(QirConstant): + """ + Instances of QirNullConstant represent a constant null pointer in a QIR program. Use the type + property to inspect which pointer type this null represents. + """ + + @property + def value(self): + """ + The value of QirNullConstant instances is always None. + """ + return None + +class QirQubitConstant(QirConstant): + """ + Instances of QirQubitConstant represent a statically allocated qubit id in a QIR program. + """ + + @property + def value(self) -> int: + """ + Gets the integer identifier for this qubit constant. + """ + return self.const.qubit_static_id + + @property + def id(self) -> int: + """ + Gets the integer identifier for this qubit constant. + """ + return self.value + +class QirResultConstant(QirConstant): + """ + Instances of QirResultConstant represent a statically allocated result id in a QIR program. + """ + + @property + def value(self) -> int: + """ + Gets the integer identifier for the is result constant. + """ + return self.const.result_static_id + + @property + def id(self) -> int: + """ + gets the integer identifier for this result constant. + """ + return self.value + +class QirTerminator: + """ + Instances of QirTerminator represent the special final instruction at the end of a block that + indicates how control flow should transfer. + """ + + def __new__(cls, term: PyQirTerminator): + if term.is_ret: + return super().__new__(QirRetTerminator) + elif term.is_br: + return super().__new__(QirBrTerminator) + elif term.is_condbr: + return super().__new__(QirCondBrTerminator) + elif term.is_switch: + return super().__new__(QirSwitchTerminator) + elif term.is_unreachable: + return super().__new__(QirUnreachableTerminator) + else: + return super().__new__(cls) + + def __init__(self, term: PyQirTerminator) -> None: + self.term = term + +class QirRetTerminator(QirTerminator): + """ + Instances of QirRetTerminator represent the ret instruction in a QIR program. + """ + + @property + def operand(self) -> QirOperand: + """ + Gets the operand that will be returned by the ret instruction. + """ + return QirOperand(self.term.ret_operand) + +class QirBrTerminator(QirTerminator): + """ + Instances of QirBrTerminator represent a branch terminator instruction that unconditionally + jumps execution to the named destination block. + """ + + @property + def dest(self) -> str: + """ + Gets the name of the block this branch jumps to. + """ + return self.term.br_dest + +class QirCondBrTerminator(QirTerminator): + """ + Instances of QirCondBrTerminator represent a conditional branch terminator instruction that + decides which named block to jump to based on an given operand. + """ + + @property + def condition(self) -> QirOperand: + """ + Gets the QirOperand representing the condition used to determine the block to jump to. + """ + return QirOperand(self.term.condbr_condition) + + @property + def true_dest(self) -> str: + """ + Gets the name of the block that will be jumped to if the condition evaluates to true. + """ + return self.term.condbr_true_dest + + @property + def false_dest(self) -> str: + """ + Gets the name of the block that will be jumped to if the condition evaluates to false. + """ + return self.term.condbr_false_dest + +class QirSwitchTerminator(QirTerminator): + """ + Instances of QirSwitchTerminator represent a switch terminator instruction that can jump + to one or more blocks based on matching values of a given operand, or jump to a fallback block + in the case that no matches are found. + """ + + @property + def operand(self) -> QirLocalOperand: + """ + Gets the operand variable of the switch statement. + """ + return QirLocalOperand(self.term.switch_operand) + + @property + def dest_pairs(self) -> List[Tuple[QirConstant, str]]: + """ + Gets a list of pairs representing the constant values to compare the operand against and the + matching block name to jump to if the comparison succeeds. + """ + return [(QirConstant(p[0]), p[1]) for p in self.term.switch_dests] + + @property + def default_dest(self) -> str: + """ + Gets the name of the default block that the switch will jump to if no values match the given + operand. + """ + return self.term.switch_default_dest + +class QirUnreachableTerminator(QirTerminator): + """ + Instances of QirUnreachableTerminator represent an unreachable terminator instruction. As the name + implies, this terminator is not expected to be reached such that some instruction in the block + before this terminator should halt program execution. + """ + pass + +class QirInstr: + """ + Instances of QirInstr represent an instruction within a block of a QIR program. See the subclasses + of this type for specifically supported instructions. + """ + + def __new__(cls, instr: PyQirInstruction): + if instr.is_qis_call: + return super().__new__(QirQisCallInstr) + elif instr.is_rt_call: + return super().__new__(QirRtCallInstr) + elif instr.is_qir_call: + return super().__new__(QirQirCallInstr) + elif instr.is_call: + return super().__new__(QirCallInstr) + elif instr.is_add: + return super().__new__(QirAddInstr) + elif instr.is_sub: + return super().__new__(QirSubInstr) + elif instr.is_mul: + return super().__new__(QirMulInstr) + elif instr.is_udiv: + return super().__new__(QirUDivInstr) + elif instr.is_sdiv: + return super().__new__(QirSDivInstr) + elif instr.is_urem: + return super().__new__(QirURemInstr) + elif instr.is_srem: + return super().__new__(QirSRemInstr) + elif instr.is_and: + return super().__new__(QirAndInstr) + elif instr.is_or: + return super().__new__(QirOrInstr) + elif instr.is_xor: + return super().__new__(QirXorInstr) + elif instr.is_shl: + return super().__new__(QirShlInstr) + elif instr.is_lshr: + return super().__new__(QirLShrInstr) + elif instr.is_ashr: + return super().__new__(QirAShrInstr) + elif instr.is_fadd: + return super().__new__(QirFAddInstr) + elif instr.is_fsub: + return super().__new__(QirFSubInstr) + elif instr.is_fmul: + return super().__new__(QirFMulInstr) + elif instr.is_fdiv: + return super().__new__(QirFDivInstr) + elif instr.is_frem: + return super().__new__(QirFRemInstr) + elif instr.is_fneg: + return super().__new__(QirFNegInstr) + elif instr.is_icmp: + return super().__new__(QirICmpInstr) + elif instr.is_fcmp: + return super().__new__(QirFCmpInstr) + elif instr.is_phi: + return super().__new__(QirPhiInstr) + else: + return super().__new__(cls) + + def __init__(self, instr: PyQirInstruction): + self.instr = instr + + @property + def output_name(self) -> Optional[str]: + """ + Gets the name of the local operand that receives the output of this instruction, or + None if the instruction does not return a value. + """ + return self.instr.output_name + + @property + def type(self) -> QirType: + """ + Gets the QirType instance representing the output of this instruction. If the instruction + has no output, the type will be an instance of QirVoidType. + """ + return QirType(self.instr.type) + +class QirOpInstr(QirInstr): + """ + Instances of QirOpInstr represent the class of instructions that have one or more operands that + they operate on. + """ + + @property + def target_operands(self) -> List[QirOperand]: + """ + Gets the list of operands that this instruction operates on. + """ + return [QirOperand(i) for i in self.instr.target_operands] + +class QirAddInstr(QirOpInstr): + """ + Instances of QirAddIntr represent an integer addition instruction that takes two operands. + """ + pass + +class QirSubInstr(QirOpInstr): + """ + Instances of QirSubIntr represent an integer subtraction instruction that takes two operands. + """ + pass + +class QirMulInstr(QirOpInstr): + """ + Instances of QirMulIntr represent an integer multiplication instruction that takes two operands. + """ + pass + +class QirUDivInstr(QirOpInstr): + """ + Instances of QirUDivIntr represent an unsigned integer division instruction that takes two operands. + """ + pass + +class QirSDivInstr(QirOpInstr): + """ + Instances of QirSDivIntr represent a signed integer division instruction that takes two operands. + """ + pass + +class QirURemInstr(QirOpInstr): + """ + Instances of QirURemIntr represent an unsigned integer remainder instruction that takes two operands. + """ + pass + +class QirSRemInstr(QirOpInstr): + """ + Instances of QirSRemIntr represent a signed integer remainder instruction that takes two operands. + """ + pass + +class QirAndInstr(QirOpInstr): + """ + Instances of QirAndIntr represent a boolean and instruction that takes two operands. + """ + pass + +class QirOrInstr(QirOpInstr): + """ + Instances of QirOrIntr represent a boolean or instruction that takes two operands. + """ + pass + +class QirXorInstr(QirOpInstr): + """ + Instances of QirXorIntr represent a boolean xor instruction that takes two operands. + """ + pass + +class QirShlInstr(QirOpInstr): + """ + Instances of QirShlIntr represent a bitwise shift left instruction that takes two operands. + """ + pass + +class QirLShrInstr(QirOpInstr): + """ + Instances of QirLShrIntr represent a logical bitwise shift right instruction that takes two operands. + """ + pass + +class QirAShrInstr(QirOpInstr): + """ + Instances of QirLShrIntr represent an arithmetic bitwise shift right instruction that takes two operands. + """ + pass + +class QirFAddInstr(QirOpInstr): + """ + Instances of QirFAddIntr represent a floating-point addition instruction that takes two operands. + """ + pass + +class QirFSubInstr(QirOpInstr): + """ + Instances of QirFSubIntr represent a floating-point subtraction instruction that takes two operands. + """ + pass + +class QirFMulInstr(QirOpInstr): + """ + Instances of QirFMulIntr represent a floating-point multiplication instruction that takes two operands. + """ + pass + +class QirFDivInstr(QirOpInstr): + """ + Instances of QirFDivIntr represent a floating-point division instruction that takes two operands. + """ + pass + +class QirFRemInstr(QirOpInstr): + """ + Instances of QirFRemIntr represent a floating-point remainder instruction that takes two operands. + """ + pass + +class QirFNegInstr(QirOpInstr): + """ + Instances of QirFNegIntr represent a floating-point negation instruction that takes one operand. + """ + pass + +class QirICmpInstr(QirOpInstr): + """ + Instances of QirICmpIntr represent an integer comparison instruction that takes two operands, + and uses a specific predicate to output the boolean result of the comparison. + """ + + @property + def predicate(self) -> str: + """ + Gets a string representing the predicate operation to perform. Possible values are + "eq", "ne", "ugt", "uge", "ult", "ule", "sgt", "sge", "slt", and "sle". + """ + return self.instr.icmp_predicate + +class QirFCmpInstr(QirOpInstr): + """ + Instances of QirFCmpInstr represent a floating-point comparison instruction that takes two operands, + and uses a specific predicate to output the boolean result of the comparison. + """ + + @property + def predicate(self) -> str: + """ + Gets a string representing the predicate operation to perform. Possible values are + "false", "oeq", "ogt", "oge", "olt", "ole", "one", "ord", "uno", "ueq", "ugt", "uge", "ult", + "ule", "une", and "true" + """ + return self.instr.fcmp_predicate + +class QirPhiInstr(QirInstr): + """ + Instances of QirPhiInstr represent a phi instruction that selects a value for an operand based + on the name of the block that transferred execution to the current block. + """ + + @property + def incoming_values(self) -> List[Tuple[QirOperand, str]]: + """ + Gets a list of all the incoming value pairs for this phi node, where each pair is the QirOperand + for the value to use and the string name of the originating block. + """ + return [(QirOperand(p[0]), p[1]) for p in self.instr.phi_incoming_values] + + def get_incoming_value_for_name(self, name: str) -> Optional[QirOperand]: + """ + Gets the QirOperand representing the value for a given originating block, or None if that + name is not found. + :param name: the block name to search for. + """ + op = self.instr.get_phi_incoming_value_for_name(name) + if isinstance(op, PyQirOperand): + return QirOperand(op) + else: + return None + +class QirCallInstr(QirInstr): + """ + Instances of QirCallInstr represent a call instruction in a QIR program. + """ + + @property + def func_name(self) -> str: + """ + Gets the name of the function called by this instruction. + """ + return self.instr.call_func_name + + @property + def func_args(self) -> List[QirOperand]: + """ + Gets the list of QirOperand instances that are passed as arguments to the function call. + """ + return [QirOperand(i) for i in self.instr.call_func_params] + +class QirQisCallInstr(QirCallInstr): + """ + Instances of QirQisCallInstr represent a call instruction where the function name begins with + "__quantum__qis__" indicating that it is a function from the QIR quantum intrinsic set. + """ + pass + +class QirRtCallInstr(QirCallInstr): + """ + Instances of QirRtCallInstr represent a call instruction where the function name begins with + "__quantum__rt__" indicating that it is a function from the QIR runtime. + """ + pass + +class QirQirCallInstr(QirCallInstr): + """ + Instances of QirQirCallInstr represent a call instruction where the function name begins with + "__quantum__qir__" indicating that it is a function from the QIR base profile. + """ + pass + +class QirBlock: + """ + Instances of the QirBlock type represent a basic block within a function body. Each basic block is + comprised of a list of instructions executed in sequence and a single, special final instruction + called a terminator that indicates where execution should jump at the end of the block. + """ + + def __init__(self, block: PyQirBasicBlock): + self.block = block + + @property + def name(self) -> str: + """ + Gets the identifying name for this block. This is unique within a given function and acts + as a label for any branches that transfer execution to this block. + """ + return self.block.name + + @property + def instructions(self) -> List[QirInstr]: + """ + Gets the list of instructions that make up this block. The list is ordered; instructions are + executed from first to last unconditionally. This list does not include the special + terminator instruction (see QirBlock.terminator). + """ + return [QirInstr(i) for i in self.block.instructions] + + @property + def terminator(self) -> QirTerminator: + """ + Gets the terminator instruction for this block. Every block has exactly one terminator + and it is the last intruction in the block. + """ + return QirTerminator(self.block.terminator) + + @property + def phi_nodes(self) -> List[QirPhiInstr]: + """ + Gets any phi nodes defined for this block. Phi nodes are a special instruction that defines + variables based on which block transferred execution to this block. A block may have any number + of phi nodes, but they are always the first instructions in any given block. A block with no + phi nodes will return an empty list. + """ + return [QirPhiInstr(i) for i in self.block.phi_nodes] + + def get_phi_pairs_by_source_name(self, name: str) -> List[Tuple[str, QirOperand]]: + """ + Gets the variable name, variable value pairs for any phi nodes in this block that correspond + to the given name. If the name doesn't match a block that can branch to this block or if + this block doesn't include any phi nodes, the list will be empty. + """ + return [(p[0], QirOperand(p[1])) for p in self.block.get_phi_pairs_by_source_name(name)] + +class QirParameter: + """ + Instances of the QirParameter type describe a parameter in a function definition or declaration. They + include a type and a name, where the name is used in the function body as a variable. + """ + + def __init__(self, param: PyQirParameter): + self.param = param + + @property + def name(self) -> str: + """ + Gets the name of this parameter, used as the variable identifier within the body of the + function. + """ + return self.param.name + + @property + def type(self) -> QirType: + """ + Gets the type of this parameter as represented in the QIR. + """ + return QirType(self.param.type) + +class QirFunction: + """ + Instances of the QirFunction type represent a single function in the QIR program. They + are made up of one or more blocks that represent function execution flow. + """ + + def __init__(self, func: PyQirFunction): + self.func = func + + @property + def name(self) -> str: + """ + Gets the string name for this function. + """ + return self.func.name + + @property + def parameters(self) -> List[QirParameter]: + """ + Gets the list of parameters used when calling this function. + """ + return [QirParameter(i) for i in self.func.parameters] + + @property + def return_type(self) -> QirType: + """ + Gets the return type for this function. + """ + return QirType(self.func.return_type) + + @property + def blocks(self) -> List[QirBlock]: + """ + Gets all the basic blocks for this function. + """ + return [QirBlock(i) for i in self.func.blocks] + + @property + def required_qubits(self) -> Optional[int]: + """ + Gets the number of qubits needed to execute this function based on the + "RequiredQubits" attribute, or None if that attribute is not present. + """ + return self.func.required_qubits + + @property + def required_results(self) -> Optional[int]: + """ + Gets the number of result bits needed to execute this function based on the + "RequiredResults" attribute, or None if that attribute is not present. + """ + return self.func.required_results + + def get_attribute_value(self, name: str) -> Optional[str]: + """ + Gets the string value of the given attribute key name, or None if that attribute + is missing or has no defined value. + :param name: the name of the attribute to look for + """ + return self.func.get_attribute_value(name) + + def get_block_by_name(self, name: str) -> Optional[QirBlock]: + """ + Gets the block with the given name, or None if no block with that name is found. + :param name: the name of the block to look for + """ + b = self.func.get_block_by_name(name) + if b is not None: + return QirBlock(b) + return None + + def get_instruction_by_output_name(self, name: str) -> Optional[QirInstr]: + """ + Gets the instruction anywhere in the function where the variable with a given name + is set. Since LLVM requires any variable is defined by only one instruction, this is + guaranteed to be no more than one instruction. This will return None if no such instruction + can be found. + :param name: the name of the variable to search for + """ + instr = self.func.get_instruction_by_output_name(name) + if instr is not None: + return QirInstr(instr) + return None + +class QirModule: + """ + Instances of QirModule parse a QIR program from bitcode into an in-memory + representation for iterating over the program structure. They contain all the + functions and global definitions from the program. + """ + + def __init__(self, *args): + if isinstance(args[0], PyQirModule): + self.module = args[0] + elif isinstance(args[0], str): + self.module = module_from_bitcode(args[0]) + else: + raise TypeError("Unrecognized argument type. Input must be string path to bitcode or PyQirModule object.") + + @property + def functions(self) -> List[QirFunction]: + """ + Gets all the functions defined in this module. + """ + return [QirFunction(i) for i in self.module.functions] + + + def get_func_by_name(self, name: str) -> Optional[QirFunction]: + """ + Gets the function with the given name, or None if no matching function is found. + :param name: the name of the function to get + """ + f = self.module.get_func_by_name(name) + if isinstance(f, PyQirFunction): + return QirFunction(f) + else: + return None + + def get_funcs_by_attr(self, attr: str) -> List[QirFunction]: + """ + Gets any functions that have an attribute whose name matches the provided string. + :param attr: the attribute to use when looking for functions + """ + return [QirFunction(i) for i in self.module.get_funcs_by_attr(attr)] + + @property + def entrypoint_funcs(self) -> List[QirFunction]: + """ + Gets any functions with the "EntryPoint" attribute. + """ + return [QirFunction(i) for i in self.module.get_entrypoint_funcs()] + + @property + def interop_funcs(self) -> List[QirFunction]: + """ + Gets any functions with the "InteropFriendly" attribute. + """ + return [QirFunction(i) for i in self.module.get_interop_funcs()] diff --git a/pyqir-parser/requirements-dev.txt b/pyqir-parser/requirements-dev.txt new file mode 100644 index 00000000..e5581af7 --- /dev/null +++ b/pyqir-parser/requirements-dev.txt @@ -0,0 +1,3 @@ +pytest>=3.5.0 +pip>=21.3 +maturin>=0.10,<0.12 diff --git a/pyqir-parser/src/lib.rs b/pyqir-parser/src/lib.rs new file mode 100644 index 00000000..37530890 --- /dev/null +++ b/pyqir-parser/src/lib.rs @@ -0,0 +1,7 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#![deny(clippy::all, clippy::pedantic)] + +pub mod parse; +pub mod python; diff --git a/pyqir-parser/src/parse.rs b/pyqir-parser/src/parse.rs new file mode 100644 index 00000000..4d909319 --- /dev/null +++ b/pyqir-parser/src/parse.rs @@ -0,0 +1,307 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use std::convert::TryFrom; +use std::num::ParseIntError; + +use llvm_ir; + +// This module introduces extensions to the existing types exposed by llvm_ir to bring in some +// convenience functions as well as QIR-specific utilities. + +pub trait ModuleExt { + fn get_funcs_by_attr_name(&self, name: &str) -> Vec<&llvm_ir::Function>; + fn get_entrypoint_funcs(&self) -> Vec<&llvm_ir::Function>; + fn get_interop_funcs(&self) -> Vec<&llvm_ir::Function>; +} + +impl ModuleExt for llvm_ir::Module { + fn get_funcs_by_attr_name(&self, name: &str) -> Vec<&llvm_ir::Function> { + self.functions + .iter() + .filter(|f| { + f.function_attributes.contains( + &llvm_ir::function::FunctionAttribute::StringAttribute { + kind: name.to_string(), + value: String::new(), + }, + ) + }) + .collect() + } + + fn get_entrypoint_funcs(&self) -> Vec<&llvm_ir::Function> { + self.get_funcs_by_attr_name("EntryPoint") + } + + fn get_interop_funcs(&self) -> Vec<&llvm_ir::Function> { + self.get_funcs_by_attr_name("InteropFriendly") + } +} + +pub trait FunctionExt { + fn get_attribute_value(&self, name: &str) -> Option; + fn get_required_qubits(&self) -> Result, ParseIntError>; + fn get_required_results(&self) -> Result, ParseIntError>; + fn get_instruction_by_output_name(&self, name: &str) -> Option<&llvm_ir::Instruction>; +} + +impl FunctionExt for llvm_ir::Function { + fn get_attribute_value(&self, name: &str) -> Option { + for attr in &self.function_attributes { + match attr { + llvm_ir::function::FunctionAttribute::StringAttribute { kind, value } => { + if kind.to_string().eq(name) { + return Some(value.to_string()); + } + } + _ => continue, + } + } + None + } + + fn get_required_qubits(&self) -> Result, ParseIntError> { + match self.get_attribute_value("requiredQubits") { + Some(s) => Ok(Some(s.parse()?)), + None => Ok(None), + } + } + + fn get_required_results(&self) -> Result, ParseIntError> { + match self.get_attribute_value("requiredResults") { + Some(s) => Ok(Some(s.parse()?)), + None => Ok(None), + } + } + + fn get_instruction_by_output_name(&self, name: &str) -> Option<&llvm_ir::Instruction> { + for block in &self.basic_blocks { + for instr in &block.instrs { + match instr.try_get_result() { + Some(resname) => { + if resname.get_string().eq(name) { + return Some(instr); + } + } + None => continue, + } + } + } + None + } +} + +pub trait BasicBlockExt { + fn get_phi_nodes(&self) -> Vec; + fn get_phi_pairs_by_source_name(&self, name: &str) -> Vec<(llvm_ir::Name, llvm_ir::Operand)>; +} + +impl BasicBlockExt for llvm_ir::BasicBlock { + fn get_phi_nodes(&self) -> Vec { + self.instrs + .iter() + .filter_map(|i| llvm_ir::instruction::Phi::try_from(i.clone()).ok()) + .collect() + } + + fn get_phi_pairs_by_source_name(&self, name: &str) -> Vec<(llvm_ir::Name, llvm_ir::Operand)> { + self.get_phi_nodes() + .iter() + .filter_map(|phi| Some((phi.dest.clone(), phi.get_incoming_value_for_name(name)?))) + .collect() + } +} + +pub trait IntructionExt { + fn get_target_operands(&self) -> Vec; +} + +impl IntructionExt for llvm_ir::Instruction { + fn get_target_operands(&self) -> Vec { + match &self { + llvm_ir::Instruction::Add(instr) => { + vec![instr.operand0.clone(), instr.operand1.clone()] + } + llvm_ir::Instruction::Sub(instr) => { + vec![instr.operand0.clone(), instr.operand1.clone()] + } + llvm_ir::Instruction::Mul(instr) => { + vec![instr.operand0.clone(), instr.operand1.clone()] + } + llvm_ir::Instruction::UDiv(instr) => { + vec![instr.operand0.clone(), instr.operand1.clone()] + } + llvm_ir::Instruction::SDiv(instr) => { + vec![instr.operand0.clone(), instr.operand1.clone()] + } + llvm_ir::Instruction::URem(instr) => { + vec![instr.operand0.clone(), instr.operand1.clone()] + } + llvm_ir::Instruction::SRem(instr) => { + vec![instr.operand0.clone(), instr.operand1.clone()] + } + llvm_ir::Instruction::And(instr) => { + vec![instr.operand0.clone(), instr.operand1.clone()] + } + llvm_ir::Instruction::Or(instr) => vec![instr.operand0.clone(), instr.operand1.clone()], + llvm_ir::Instruction::Xor(instr) => { + vec![instr.operand0.clone(), instr.operand1.clone()] + } + llvm_ir::Instruction::Shl(instr) => { + vec![instr.operand0.clone(), instr.operand1.clone()] + } + llvm_ir::Instruction::LShr(instr) => { + vec![instr.operand0.clone(), instr.operand1.clone()] + } + llvm_ir::Instruction::AShr(instr) => { + vec![instr.operand0.clone(), instr.operand1.clone()] + } + llvm_ir::Instruction::FAdd(instr) => { + vec![instr.operand0.clone(), instr.operand1.clone()] + } + llvm_ir::Instruction::FSub(instr) => { + vec![instr.operand0.clone(), instr.operand1.clone()] + } + llvm_ir::Instruction::FMul(instr) => { + vec![instr.operand0.clone(), instr.operand1.clone()] + } + llvm_ir::Instruction::FDiv(instr) => { + vec![instr.operand0.clone(), instr.operand1.clone()] + } + llvm_ir::Instruction::FRem(instr) => { + vec![instr.operand0.clone(), instr.operand1.clone()] + } + llvm_ir::Instruction::FNeg(instr) => vec![instr.operand.clone()], + llvm_ir::Instruction::ICmp(instr) => { + vec![instr.operand0.clone(), instr.operand1.clone()] + } + llvm_ir::Instruction::FCmp(instr) => { + vec![instr.operand0.clone(), instr.operand1.clone()] + } + _ => vec![], + } + } +} + +pub trait CallExt { + fn get_func_name(&self) -> Option; + fn is_qis(&self) -> bool; + fn is_rt(&self) -> bool; + fn is_qir(&self) -> bool; +} + +impl CallExt for llvm_ir::instruction::Call { + fn get_func_name(&self) -> Option { + match self.function.clone().right()? { + llvm_ir::Operand::ConstantOperand(c) => match c.as_ref() { + llvm_ir::constant::Constant::GlobalReference { name, ty: _ } => Some(name.clone()), + _ => None, + }, + _ => None, + } + } + + fn is_qis(&self) -> bool { + self.get_func_name() + .map_or(false, |n| n.get_string().starts_with("__quantum__qis__")) + } + fn is_rt(&self) -> bool { + self.get_func_name() + .map_or(false, |n| n.get_string().starts_with("__quantum__rt__")) + } + fn is_qir(&self) -> bool { + self.get_func_name() + .map_or(false, |n| n.get_string().starts_with("__quantum__qir__")) + } +} + +pub trait PhiExt { + fn get_incoming_value_for_name(&self, name: &str) -> Option; +} + +impl PhiExt for llvm_ir::instruction::Phi { + fn get_incoming_value_for_name(&self, name: &str) -> Option { + self.incoming_values.iter().find_map(|(op, block_name)| { + match block_name.get_string().eq(name) { + true => Some(op.clone()), + false => None, + } + }) + } +} + +pub trait TypeExt { + fn is_qubit(&self) -> bool; + fn is_result(&self) -> bool; +} + +impl TypeExt for llvm_ir::Type { + fn is_qubit(&self) -> bool { + match self { + llvm_ir::Type::PointerType { + pointee_type, + addr_space: _, + } => pointee_type.as_ref().is_qubit(), + llvm_ir::Type::NamedStructType { name } => name == "Qubit", + _ => false, + } + } + + fn is_result(&self) -> bool { + match self { + llvm_ir::Type::PointerType { + pointee_type, + addr_space: _, + } => pointee_type.as_ref().is_result(), + llvm_ir::Type::NamedStructType { name } => name == "Result", + _ => false, + } + } +} + +pub trait ConstantExt { + fn qubit_id(&self) -> Option; + fn result_id(&self) -> Option; +} + +macro_rules! constant_id { + ($name:ident, $check_func:path) => { + fn $name(&self) -> Option { + match &self { + llvm_ir::Constant::Null(t) => { + if $check_func(t.as_ref()) { + Some(0) + } else { + None + } + } + llvm_ir::Constant::IntToPtr(llvm_ir::constant::IntToPtr { operand, to_type }) => { + match ($check_func(to_type.as_ref()), operand.as_ref()) { + (true, llvm_ir::Constant::Int { bits: 64, value }) => Some(value.clone()), + _ => None, + } + } + _ => None, + } + } + }; +} + +impl ConstantExt for llvm_ir::Constant { + constant_id!(qubit_id, TypeExt::is_qubit); + constant_id!(result_id, TypeExt::is_result); +} + +pub trait NameExt { + fn get_string(&self) -> String; +} + +impl NameExt for llvm_ir::Name { + fn get_string(&self) -> String { + match &self { + llvm_ir::Name::Name(n) => n.to_string(), + llvm_ir::Name::Number(n) => n.to_string(), + } + } +} diff --git a/pyqir-parser/src/python.rs b/pyqir-parser/src/python.rs new file mode 100644 index 00000000..f3c162d1 --- /dev/null +++ b/pyqir-parser/src/python.rs @@ -0,0 +1,1119 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +// TODO(swernli): The initial version of the parser exposes a subset of the llvm_ir crate API into +// python directly, along with some extensions that provide QIR specific support (such as `get_qubit_static_id`). +// Eventually this should be split up similar to how QIR emission functionality works; these wrappers will +// remain here and provide the pyclass-compatible implementation, the QIR specific extensions will be implemented +// as traits and extended onto the llvm_ir types as part of the qirlib such that they can be conveniently used +// from within rust, and wrappers for each class and function will be added to __init__.py so that the +// parser API can have full python doc comments for usability. + +use pyo3::exceptions::{PyRuntimeError}; +use llvm_ir; +use llvm_ir::types::Typed; +use pyo3::prelude::*; +use super::parse::*; +use std::convert::TryFrom; + +#[pymodule] +fn pyqir_parser(_py: Python<'_>, m: &PyModule) -> PyResult<()> { + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + + #[pyfn(m)] + #[pyo3(name = "module_from_bitcode")] + fn module_from_bitcode_py(_py: Python, bc_path: String) -> PyResult { + match llvm_ir::Module::from_bc_path(&bc_path) { + Ok(m) => Ok(PyQirModule { module: m }), + Err(s) => Err(PyRuntimeError::new_err(s)), + } + } + + Ok(()) +} + +#[pyclass] +pub struct PyQirModule { + pub(super) module: llvm_ir::Module, +} + +#[pyclass] +pub struct PyQirFunction { + pub(super) function: llvm_ir::Function, + pub(super) types: llvm_ir::types::Types, +} + +#[pyclass] +pub struct PyQirParameter { + pub(super) param: llvm_ir::function::Parameter, +} + +#[pyclass] +pub struct PyQirBasicBlock { + pub(super) block: llvm_ir::BasicBlock, + pub(super) types: llvm_ir::types::Types, +} + +#[pyclass] +pub struct PyQirInstruction { + pub(super) instr: llvm_ir::instruction::Instruction, + pub(super) types: llvm_ir::types::Types, +} + +#[pyclass] +pub struct PyQirTerminator { + pub(super) term: llvm_ir::terminator::Terminator, + pub(super) types: llvm_ir::types::Types, +} + +#[pyclass] +pub struct PyQirOperand { + pub(super) op: llvm_ir::Operand, + pub(super) types: llvm_ir::types::Types, +} + +#[pyclass] +pub struct PyQirConstant { + pub(super) constantref: llvm_ir::ConstantRef, + pub(super) types: llvm_ir::types::Types, +} + +#[pyclass] +pub struct PyQirType { + pub(super) typeref: llvm_ir::TypeRef, +} + +macro_rules! match_contents { + ($target:expr, $pattern:pat, $property:expr) => { + match $target { + $pattern => Some($property), + _ => None, + } + }; +} + +#[pymethods] +impl PyQirModule { + #[getter] + fn get_functions(&self) -> Vec { + self.module + .functions + .iter() + .map(|f| PyQirFunction { + function: f.clone(), + types: self.module.types.clone(), + }) + .collect() + } + + fn get_func_by_name(&self, name: String) -> Option { + match self.module.get_func_by_name(&name) { + Some(f) => Some(PyQirFunction { + function: f.clone(), + types: self.module.types.clone(), + }), + None => None, + } + } + + fn get_funcs_by_attr(&self, attr: String) -> Vec { + self.module + .get_funcs_by_attr_name(&attr) + .iter() + .map(|f| PyQirFunction { + function: (*f).clone(), + types: self.module.types.clone(), + }) + .collect() + } + + fn get_entrypoint_funcs(&self) -> Vec { + self.module + .get_entrypoint_funcs() + .iter() + .map(|f| PyQirFunction { + function: (*f).clone(), + types: self.module.types.clone(), + }) + .collect() + } + + fn get_interop_funcs(&self) -> Vec { + self.module + .get_interop_funcs() + .iter() + .map(|f| PyQirFunction { + function: (*f).clone(), + types: self.module.types.clone(), + }) + .collect() + } +} + +#[pymethods] +impl PyQirFunction { + #[getter] + fn get_name(&self) -> String { + self.function.name.clone() + } + + #[getter] + fn get_parameters(&self) -> Vec { + self.function + .parameters + .iter() + .map(|p| PyQirParameter { param: p.clone() }) + .collect() + } + + #[getter] + fn get_return_type(&self) -> PyQirType { + PyQirType { + typeref: self.function.return_type.clone(), + } + } + + #[getter] + fn get_blocks(&self) -> Vec { + self.function + .basic_blocks + .iter() + .map(|b| PyQirBasicBlock { + block: b.clone(), + types: self.types.clone(), + }) + .collect() + } + + #[getter] + fn get_required_qubits(&self) -> PyResult> { + Ok(self.function.get_required_qubits()?) + } + + #[getter] + fn get_required_results(&self) -> PyResult> { + Ok(self.function.get_required_results()?) + } + + fn get_attribute_value(&self, attr_name: String) -> Option { + self.function.get_attribute_value(&attr_name) + } + + fn get_block_by_name(&self, name: String) -> Option { + Some(PyQirBasicBlock { + block: self + .function + .get_bb_by_name(&llvm_ir::Name::from(name.clone()))? + .clone(), + types: self.types.clone(), + }) + } + + fn get_instruction_by_output_name(&self, name: String) -> Option { + Some(PyQirInstruction { + instr: self.function.get_instruction_by_output_name(&name)?.clone(), + types: self.types.clone(), + }) + } +} + +#[pymethods] +impl PyQirParameter { + #[getter] + fn get_name(&self) -> String { + self.param.name.get_string() + } + + #[getter] + fn get_type(&self) -> PyQirType { + PyQirType { + typeref: self.param.ty.clone(), + } + } +} + +#[pymethods] +impl PyQirBasicBlock { + #[getter] + fn get_name(&self) -> String { + self.block.name.get_string() + } + + #[getter] + fn get_instructions(&self) -> Vec { + self.block + .instrs + .iter() + .map(|i| PyQirInstruction { + instr: i.clone(), + types: self.types.clone(), + }) + .collect() + } + + #[getter] + fn get_phi_nodes(&self) -> Vec { + self.block + .get_phi_nodes() + .iter() + .map(|phi| PyQirInstruction { + instr: llvm_ir::Instruction::from(phi.clone()), + types: self.types.clone(), + }) + .collect() + } + + fn get_phi_pairs_by_source_name(&self, name: String) -> Vec<(String, PyQirOperand)> { + self.block + .get_phi_pairs_by_source_name(&name) + .iter() + .map(|(n, op)| { + ( + n.get_string(), + PyQirOperand { + op: op.clone(), + types: self.types.clone(), + }, + ) + }) + .collect() + } + + #[getter] + fn get_terminator(&self) -> PyQirTerminator { + PyQirTerminator { + term: self.block.term.clone(), + types: self.types.clone(), + } + } +} + +#[pymethods] +impl PyQirInstruction { + #[getter] + fn get_target_operands(&self) -> Vec { + self.instr + .get_target_operands() + .iter() + .map(|op| PyQirOperand { + op: op.clone(), + types: self.types.clone(), + }) + .collect() + } + + #[getter] + fn get_type(&self) -> Option { + Some(PyQirType { + typeref: self.instr.get_type(&self.types), + }) + } + + #[getter] + fn get_is_add(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::Add(_)) + } + + #[getter] + fn get_is_sub(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::Sub(_)) + } + + #[getter] + fn get_is_mul(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::Mul(_)) + } + + #[getter] + fn get_is_udiv(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::UDiv(_)) + } + + #[getter] + fn get_is_sdiv(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::SDiv(_)) + } + + #[getter] + fn get_is_urem(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::URem(_)) + } + + #[getter] + fn get_is_srem(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::SRem(_)) + } + + #[getter] + fn get_is_and(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::And(_)) + } + + #[getter] + fn get_is_or(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::Or(_)) + } + + #[getter] + fn get_is_xor(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::Xor(_)) + } + + #[getter] + fn get_is_shl(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::Shl(_)) + } + + #[getter] + fn get_is_lshr(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::LShr(_)) + } + + #[getter] + fn get_is_ashr(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::AShr(_)) + } + + #[getter] + fn get_is_fadd(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::FAdd(_)) + } + + #[getter] + fn get_is_fsub(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::FSub(_)) + } + + #[getter] + fn get_is_fmul(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::FMul(_)) + } + + #[getter] + fn get_is_fdiv(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::FDiv(_)) + } + + #[getter] + fn get_is_frem(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::FRem(_)) + } + + #[getter] + fn get_is_fneg(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::FNeg(_)) + } + + #[getter] + fn get_is_extractelement(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::ExtractElement(_)) + } + + #[getter] + fn get_is_insertelement(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::InsertElement(_)) + } + + #[getter] + fn get_is_shufflevector(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::ShuffleVector(_)) + } + + #[getter] + fn get_is_extractvalue(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::ExtractValue(_)) + } + + #[getter] + fn get_is_insertvalue(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::InsertValue(_)) + } + + #[getter] + fn get_is_alloca(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::Alloca(_)) + } + + #[getter] + fn get_is_load(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::Load(_)) + } + + #[getter] + fn get_is_store(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::Store(_)) + } + + #[getter] + fn get_is_getelementptr(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::GetElementPtr(_)) + } + + #[getter] + fn get_is_trunc(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::Trunc(_)) + } + + #[getter] + fn get_is_zext(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::ZExt(_)) + } + + #[getter] + fn get_is_sext(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::SExt(_)) + } + + #[getter] + fn get_is_fptrunc(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::FPTrunc(_)) + } + + #[getter] + fn get_is_fpext(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::FPExt(_)) + } + + #[getter] + fn get_is_fptoui(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::FPToUI(_)) + } + + #[getter] + fn get_is_fptosi(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::FPToSI(_)) + } + + #[getter] + fn get_is_uitofp(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::UIToFP(_)) + } + + #[getter] + fn get_is_sitofp(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::SIToFP(_)) + } + + #[getter] + fn get_is_ptrtoint(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::PtrToInt(_)) + } + + #[getter] + fn get_is_inttoptr(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::IntToPtr(_)) + } + + #[getter] + fn get_is_bitcast(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::BitCast(_)) + } + + #[getter] + fn get_is_addrspacecast(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::AddrSpaceCast(_)) + } + + #[getter] + fn get_is_icmp(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::ICmp(_)) + } + + #[getter] + fn get_icmp_predicate(&self) -> Option { + Some( + llvm_ir::instruction::ICmp::try_from(self.instr.clone()) + .ok()? + .predicate + .to_string(), + ) + } + + #[getter] + fn get_is_fcmp(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::FCmp(_)) + } + + #[getter] + fn get_fcmp_predicate(&self) -> Option { + Some( + llvm_ir::instruction::FCmp::try_from(self.instr.clone()) + .ok()? + .predicate + .to_string(), + ) + } + + #[getter] + fn get_is_phi(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::Phi(_)) + } + + #[getter] + fn get_phi_incoming_values(&self) -> Option> { + Some( + llvm_ir::instruction::Phi::try_from(self.instr.clone()) + .ok()? + .incoming_values + .iter() + .map(|(op, name)| { + ( + PyQirOperand { + op: op.clone(), + types: self.types.clone(), + }, + name.get_string(), + ) + }) + .collect(), + ) + } + + fn get_phi_incoming_value_for_name(&self, name: String) -> Option { + Some(PyQirOperand { + op: llvm_ir::instruction::Phi::try_from(self.instr.clone()) + .ok()? + .get_incoming_value_for_name(&name)?, + types: self.types.clone(), + }) + } + + #[getter] + fn get_is_select(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::Select(_)) + } + + #[getter] + fn get_is_call(&self) -> bool { + matches!(self.instr, llvm_ir::Instruction::Call(_)) + } + + #[getter] + fn get_call_func_name(&self) -> Option { + Some( + llvm_ir::instruction::Call::try_from(self.instr.clone()) + .ok()? + .get_func_name()? + .get_string(), + ) + } + + #[getter] + fn get_call_func_params(&self) -> Option> { + Some( + llvm_ir::instruction::Call::try_from(self.instr.clone()) + .ok()? + .arguments + .iter() + .map(|o| PyQirOperand { + op: o.0.clone(), + types: self.types.clone(), + }) + .collect(), + ) + } + + #[getter] + fn get_is_qis_call(&self) -> bool { + llvm_ir::instruction::Call::try_from(self.instr.clone()).map_or(false, |c| c.is_qis()) + } + + #[getter] + fn get_is_rt_call(&self) -> bool { + llvm_ir::instruction::Call::try_from(self.instr.clone()).map_or(false, |c| c.is_rt()) + } + + #[getter] + fn get_is_qir_call(&self) -> bool { + llvm_ir::instruction::Call::try_from(self.instr.clone()).map_or(false, |c| c.is_qir()) + } + + #[getter] + fn get_has_output(&self) -> bool { + match self.instr.try_get_result() { + Some(_) => true, + None => false, + } + } + + #[getter] + fn get_output_name(&self) -> Option { + Some(self.instr.try_get_result()?.get_string()) + } +} + +#[pymethods] +impl PyQirTerminator { + #[getter] + fn get_is_ret(&self) -> bool { + matches!(self.term, llvm_ir::Terminator::Ret(_)) + } + + #[getter] + fn get_ret_operand(&self) -> Option { + match_contents!( + &self.term, + llvm_ir::Terminator::Ret(llvm_ir::terminator::Ret { + return_operand, + debugloc: _, + }), + PyQirOperand { + op: return_operand.as_ref()?.clone(), + types: self.types.clone(), + } + ) + } + + #[getter] + fn get_is_br(&self) -> bool { + matches!(self.term, llvm_ir::Terminator::Br(_)) + } + + #[getter] + fn get_br_dest(&self) -> Option { + match_contents!( + &self.term, + llvm_ir::Terminator::Br(llvm_ir::terminator::Br { dest, debugloc: _ }), + dest.get_string() + ) + } + + #[getter] + fn get_is_condbr(&self) -> bool { + matches!(self.term, llvm_ir::Terminator::CondBr(_)) + } + + #[getter] + fn get_condbr_condition(&self) -> Option { + match_contents!( + &self.term, + llvm_ir::Terminator::CondBr(llvm_ir::terminator::CondBr { + condition, + true_dest: _, + false_dest: _, + debugloc: _, + }), + PyQirOperand { + op: condition.clone(), + types: self.types.clone(), + } + ) + } + + #[getter] + fn get_condbr_true_dest(&self) -> Option { + match_contents!( + &self.term, + llvm_ir::Terminator::CondBr(llvm_ir::terminator::CondBr { + condition: _, + true_dest, + false_dest: _, + debugloc: _, + }), + true_dest.get_string() + ) + } + + #[getter] + fn get_condbr_false_dest(&self) -> Option { + match_contents!( + &self.term, + llvm_ir::Terminator::CondBr(llvm_ir::terminator::CondBr { + condition: _, + true_dest: _, + false_dest, + debugloc: _, + }), + false_dest.get_string() + ) + } + + #[getter] + fn get_is_switch(&self) -> bool { + matches!(self.term, llvm_ir::Terminator::Switch(_)) + } + + #[getter] + fn get_switch_operand(&self) -> Option { + match_contents!( + &self.term, + llvm_ir::Terminator::Switch(llvm_ir::terminator::Switch { + operand, + dests: _, + default_dest: _, + debugloc: _, + }), + PyQirOperand { + op: operand.clone(), + types: self.types.clone(), + } + ) + } + + #[getter] + fn get_switch_dests(&self) -> Option> { + match_contents!( + &self.term, + llvm_ir::Terminator::Switch(llvm_ir::terminator::Switch { + operand: _, + dests, + default_dest: _, + debugloc: _, + }), + dests + .iter() + .map(|(cref, name)| ( + PyQirOperand { + op: llvm_ir::Operand::ConstantOperand(cref.clone()), + types: self.types.clone() + }, + name.get_string() + )) + .collect() + ) + } + + #[getter] + fn get_switch_default_dest(&self) -> Option { + match_contents!( + &self.term, + llvm_ir::Terminator::Switch(llvm_ir::terminator::Switch { + operand: _, + dests: _, + default_dest, + debugloc: _, + }), + default_dest.get_string() + ) + } + + #[getter] + fn get_is_unreachable(&self) -> bool { + matches!(self.term, llvm_ir::Terminator::Unreachable(_)) + } +} + +#[pymethods] +impl PyQirOperand { + #[getter] + fn get_is_local(&self) -> bool { + matches!(self.op, llvm_ir::Operand::LocalOperand { name: _, ty: _ }) + } + + #[getter] + fn get_local_name(&self) -> Option { + match_contents!( + &self.op, + llvm_ir::Operand::LocalOperand { name, ty: _ }, + name.get_string() + ) + } + + #[getter] + fn get_local_type(&self) -> Option { + match_contents!( + &self.op, + llvm_ir::Operand::LocalOperand { name: _, ty }, + PyQirType { + typeref: ty.clone(), + } + ) + } + + #[getter] + fn get_is_constant(&self) -> bool { + matches!(self.op, llvm_ir::Operand::ConstantOperand(_)) + } + + #[getter] + fn get_constant(&self) -> Option { + match_contents!( + &self.op, + llvm_ir::Operand::ConstantOperand(cref), + PyQirConstant { + constantref: cref.clone(), + types: self.types.clone(), + } + ) + } +} + +#[pymethods] +impl PyQirConstant { + #[getter] + fn get_is_int(&self) -> bool { + matches!( + self.constantref.as_ref(), + llvm_ir::Constant::Int { bits: _, value: _ } + ) + } + + #[getter] + fn get_int_value(&self) -> Option { + match_contents!( + self.constantref.as_ref(), + llvm_ir::Constant::Int { bits: _, value }, + value.clone() as i64 + ) + } + + #[getter] + fn get_int_width(&self) -> Option { + match_contents!( + &self.constantref.as_ref(), + llvm_ir::Constant::Int { bits, value: _ }, + bits.clone() + ) + } + + #[getter] + fn get_is_float(&self) -> bool { + matches!(self.constantref.as_ref(), llvm_ir::Constant::Float(_)) + } + + #[getter] + fn get_float_double_value(&self) -> Option { + match_contents!( + &self.constantref.as_ref(), + llvm_ir::Constant::Float(llvm_ir::constant::Float::Double(d)), + d.clone() + ) + } + + #[getter] + fn get_is_null(&self) -> bool { + matches!(self.constantref.as_ref(), llvm_ir::Constant::Null(_)) + } + + #[getter] + fn get_is_aggregate_zero(&self) -> bool { + matches!( + self.constantref.as_ref(), + llvm_ir::Constant::AggregateZero(_) + ) + } + + #[getter] + fn get_is_array(&self) -> bool { + matches!( + self.constantref.as_ref(), + llvm_ir::Constant::Array { + element_type: _, + elements: _, + } + ) + } + + #[getter] + fn get_is_vector(&self) -> bool { + matches!(self.constantref.as_ref(), llvm_ir::Constant::Vector(_)) + } + + #[getter] + fn get_is_undef(&self) -> bool { + matches!(self.constantref.as_ref(), llvm_ir::Constant::Undef(_)) + } + + #[getter] + fn get_is_global_reference(&self) -> bool { + matches!( + self.constantref.as_ref(), + llvm_ir::Constant::GlobalReference { name: _, ty: _ } + ) + } + + #[getter] + fn get_type(&self) -> PyQirType { + PyQirType { + typeref: self.constantref.get_type(&self.types), + } + } + + #[getter] + fn get_is_qubit(&self) -> bool { + self.get_type().get_is_qubit() + } + + #[getter] + fn get_qubit_static_id(&self) -> Option { + self.constantref.qubit_id() + } + + #[getter] + fn get_is_result(&self) -> bool { + self.get_type().get_is_result() + } + + #[getter] + fn get_result_static_id(&self) -> Option { + self.constantref.result_id() + } +} + +#[pymethods] +impl PyQirType { + #[getter] + fn get_is_void(&self) -> bool { + matches!(self.typeref.as_ref(), llvm_ir::Type::VoidType) + } + + #[getter] + fn get_is_integer(&self) -> bool { + matches!( + self.typeref.as_ref(), + llvm_ir::Type::IntegerType { bits: _ } + ) + } + + #[getter] + fn get_integer_width(&self) -> Option { + match_contents!( + self.typeref.as_ref(), + llvm_ir::Type::IntegerType { bits }, + bits.clone() + ) + } + + #[getter] + fn get_is_pointer(&self) -> bool { + matches!( + self.typeref.as_ref(), + llvm_ir::Type::PointerType { + pointee_type: _, + addr_space: _, + } + ) + } + + #[getter] + fn get_pointer_type(&self) -> Option { + match_contents!( + self.typeref.as_ref(), + llvm_ir::Type::PointerType { + pointee_type, + addr_space: _ + }, + PyQirType { + typeref: pointee_type.clone() + } + ) + } + + #[getter] + fn get_pointer_addrspace(&self) -> Option { + match_contents!( + self.typeref.as_ref(), + llvm_ir::Type::PointerType { + pointee_type: _, + addr_space + }, + addr_space.clone() + ) + } + + #[getter] + fn get_is_double(&self) -> bool { + matches!( + self.typeref.as_ref(), + llvm_ir::Type::FPType(llvm_ir::types::FPType::Double) + ) + } + + #[getter] + fn get_is_array(&self) -> bool { + matches!( + self.typeref.as_ref(), + llvm_ir::Type::ArrayType { + element_type: _, + num_elements: _, + } + ) + } + + #[getter] + fn get_array_element_type(&self) -> Option { + match_contents!( + self.typeref.as_ref(), + llvm_ir::Type::ArrayType { + element_type, + num_elements: _, + }, + PyQirType { + typeref: element_type.clone() + } + ) + } + + #[getter] + fn get_array_num_elements(&self) -> Option { + match_contents!( + self.typeref.as_ref(), + llvm_ir::Type::ArrayType { + element_type: _, + num_elements, + }, + num_elements.clone() + ) + } + + #[getter] + fn get_is_struct(&self) -> bool { + matches!( + self.typeref.as_ref(), + llvm_ir::Type::StructType { + element_types: _, + is_packed: _, + } + ) + } + + #[getter] + fn get_struct_element_types(&self) -> Option> { + match_contents!( + self.typeref.as_ref(), + llvm_ir::Type::StructType { + element_types, + is_packed: _ + }, + element_types + .iter() + .map(|t| PyQirType { typeref: t.clone() }) + .collect() + ) + } + + #[getter] + fn get_is_named_struct(&self) -> bool { + matches!( + self.typeref.as_ref(), + llvm_ir::Type::NamedStructType { name: _ } + ) + } + + #[getter] + fn get_named_struct_name(&self) -> Option { + match_contents!( + self.typeref.as_ref(), + llvm_ir::Type::NamedStructType { name }, + name.clone() + ) + } + + #[getter] + fn get_is_qubit(&self) -> bool { + self.typeref.is_qubit() + } + + #[getter] + fn get_is_result(&self) -> bool { + self.typeref.is_result() + } +} diff --git a/pyqir-parser/tests/teleportchain.baseprofile.bc b/pyqir-parser/tests/teleportchain.baseprofile.bc new file mode 100644 index 0000000000000000000000000000000000000000..9da436f993da8bab4ced07471402fd4c74dc784c GIT binary patch literal 2896 zcmai04^R`?8Gj*RH$vQOFnU3f+{S1Rk=}(r_JRQ>Vc{-lY9r#Eb?Ai+1W7OE&l2R% zU3&=$Y#iem72BNS+(5@#&*^lq=gry9wL=KfoJI#kd$w8&2cl=u;d-{?jLy)vN$lAi zZ8yn#dB6AjzVG*a@7rXn$l+TX#QMO|MJdd`*yJgr-}_bYF^6?pH}>mFVO80N&M&5)bqO5o#GdA zNqp`5k*%UDk|rKFRFnTg@&4g{|4+Q@^Xe`R<15e9gbQdi<0O;_wn+fUfpgvk;?W|! zD1Bzf07HkE4iCCL#JKa(4g%d9Lp>pe4lvz%v@0K_1HIii;~rqTz&yCFL=~3)xIIiRI*cHJJU?6Kal=bp=veRhS z-X7v0X|#Kv|angbol>T}=J?PQr&v;j}>h@ioL$uFer-9yv=igHRfbWOqN8nZo-y!#(0qAuZbgi&2y!dpok6z~eT$EF=^3XJ?RIka zb}~+a$&HJ!`4Bd(653V5nFVaYh?U@@vRex2b%kt%lDDY#X+H63#>uj?-11L+<$kUv zz98P)h`!Z|zTC>taP%LGH|ai1n`T$@Uyk?H6}JHy)y4?{w>0`ecqFHq9E3fZlQ?8h8A zD_9|0ppg1rpE@4Z43QPEAWufhXGu)}E~CNoSuj(k1M1_+)&5*C+tS`?WRR_tkc(nXnq%0PNIhM!uXI*?0s&c^s?qqUr zjA8SQ!uwOm0wKJu65b|+|1@HD9Gg*LaQW2~^2HQ3Ys4O@$mu^TWOIYk1x`Xa^~%*h zMb+@IhN3TBCpX2(O#yNf%zB)x1RucP1rhA2>?}H;{cY*q=?s$PPC7QY z{H~5|r8$KxS%X!jGmY3CBP^|OmMyUGjuCsvcI*Va4-hz`@$wTs&2e~O+=>BTIXt{j zl_YtIfr!V@_jT7Tl88 z9{`YWa#^2sggVv=0RWUpCf?IHKurX^3O^wUL{?TPk@E@YQoyTAK-UVNLZ3c`j!KmX zBI;=*kk2NA5;4i|@%BN(F=#l9Og5#UF$l5lr;x)%7jb?hpeuq7TUVECA%YS~HNS_G zXMDHFY!oQ?$gVpnqddb{VN}aYg>+pi%oq#wIB z_d`Li=&6GBAM(bMqKCZaB`K#riYJZE=`8VLsa}<|Joa%shfpGYi5y7V$0Q&y3&V>H!43@ zR?@tmkLP56sh${>fh$km*-|(_D2Ok9vN_#e(9jz+gTIxbQ zrHg+R1t;~}d4}plpcN{`)|vkn=rb5-GFRU-Y0#6Rg!1c^>_Y^+TtxE!t{B7D_<@i8 z>q}fefX^Ghg5Pqu{cp0*B=r`X#p$5kWGiL0n@rV~Jq~-Do2J~Br#qC}>ag!~6}w+h*BgvAa#CTD#jqJDetytDUmD+xM7EG(=tPW~X2Dt&Q6KtRhx!iu!*`nJmUq bI%zg%syUT-k?KYGa#8MY+pP9C6aD)ySK`aJ literal 0 HcmV?d00001 diff --git a/pyqir-parser/tests/teleportchain.baseprofile.ll.reference b/pyqir-parser/tests/teleportchain.baseprofile.ll.reference new file mode 100644 index 00000000..d2fec52f --- /dev/null +++ b/pyqir-parser/tests/teleportchain.baseprofile.ll.reference @@ -0,0 +1,111 @@ +; ModuleID = 'qat-link' +source_filename = "qat-link" + +%Qubit = type opaque +%Result = type opaque +%Array = type opaque +%String = type opaque + +define i8 @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() local_unnamed_addr #0 { +entry: + tail call void @__quantum__qis__h__body(%Qubit* null) + tail call void @__quantum__qis__cnot__body(%Qubit* null, %Qubit* nonnull inttoptr (i64 1 to %Qubit*)) + tail call void @__quantum__qis__h__body(%Qubit* nonnull inttoptr (i64 2 to %Qubit*)) + tail call void @__quantum__qis__cnot__body(%Qubit* nonnull inttoptr (i64 2 to %Qubit*), %Qubit* nonnull inttoptr (i64 4 to %Qubit*)) + tail call void @__quantum__qis__h__body(%Qubit* nonnull inttoptr (i64 3 to %Qubit*)) + tail call void @__quantum__qis__cnot__body(%Qubit* nonnull inttoptr (i64 3 to %Qubit*), %Qubit* nonnull inttoptr (i64 5 to %Qubit*)) + tail call void @__quantum__qis__cnot__body(%Qubit* nonnull inttoptr (i64 1 to %Qubit*), %Qubit* nonnull inttoptr (i64 2 to %Qubit*)) + tail call void @__quantum__qis__h__body(%Qubit* nonnull inttoptr (i64 1 to %Qubit*)) + tail call void @__quantum__qis__mz__body(%Qubit* nonnull inttoptr (i64 1 to %Qubit*), %Result* null) + tail call void @__quantum__qis__reset__body(%Qubit* nonnull inttoptr (i64 1 to %Qubit*)) + %0 = tail call i1 @__quantum__qir__read_result(%Result* null) + br i1 %0, label %then0__1.i.i.i, label %continue__1.i.i.i + +then0__1.i.i.i: ; preds = %entry + tail call void @__quantum__qis__z__body(%Qubit* nonnull inttoptr (i64 4 to %Qubit*)) + br label %continue__1.i.i.i + +continue__1.i.i.i: ; preds = %then0__1.i.i.i, %entry + tail call void @__quantum__qis__mz__body(%Qubit* nonnull inttoptr (i64 2 to %Qubit*), %Result* nonnull inttoptr (i64 1 to %Result*)) + tail call void @__quantum__qis__reset__body(%Qubit* nonnull inttoptr (i64 2 to %Qubit*)) + %1 = tail call i1 @__quantum__qir__read_result(%Result* nonnull inttoptr (i64 1 to %Result*)) + br i1 %1, label %then0__2.i.i.i, label %TeleportChain__TeleportQubitUsingPresharedEntanglement__body.2.exit.i + +then0__2.i.i.i: ; preds = %continue__1.i.i.i + tail call void @__quantum__qis__x__body(%Qubit* nonnull inttoptr (i64 4 to %Qubit*)) + br label %TeleportChain__TeleportQubitUsingPresharedEntanglement__body.2.exit.i + +TeleportChain__TeleportQubitUsingPresharedEntanglement__body.2.exit.i: ; preds = %then0__2.i.i.i, %continue__1.i.i.i + tail call void @__quantum__qis__cnot__body(%Qubit* nonnull inttoptr (i64 4 to %Qubit*), %Qubit* nonnull inttoptr (i64 3 to %Qubit*)) + tail call void @__quantum__qis__h__body(%Qubit* nonnull inttoptr (i64 4 to %Qubit*)) + tail call void @__quantum__qis__mz__body(%Qubit* nonnull inttoptr (i64 4 to %Qubit*), %Result* nonnull inttoptr (i64 2 to %Result*)) + tail call void @__quantum__qis__reset__body(%Qubit* nonnull inttoptr (i64 4 to %Qubit*)) + %2 = tail call i1 @__quantum__qir__read_result(%Result* nonnull inttoptr (i64 2 to %Result*)) + br i1 %2, label %then0__1.i.i1.i, label %continue__1.i.i2.i + +then0__1.i.i1.i: ; preds = %TeleportChain__TeleportQubitUsingPresharedEntanglement__body.2.exit.i + tail call void @__quantum__qis__z__body(%Qubit* nonnull inttoptr (i64 5 to %Qubit*)) + br label %continue__1.i.i2.i + +continue__1.i.i2.i: ; preds = %then0__1.i.i1.i, %TeleportChain__TeleportQubitUsingPresharedEntanglement__body.2.exit.i + tail call void @__quantum__qis__mz__body(%Qubit* nonnull inttoptr (i64 3 to %Qubit*), %Result* nonnull inttoptr (i64 3 to %Result*)) + tail call void @__quantum__qis__reset__body(%Qubit* nonnull inttoptr (i64 3 to %Qubit*)) + %3 = tail call i1 @__quantum__qir__read_result(%Result* nonnull inttoptr (i64 3 to %Result*)) + br i1 %3, label %then0__2.i.i3.i, label %TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.1.exit + +then0__2.i.i3.i: ; preds = %continue__1.i.i2.i + tail call void @__quantum__qis__x__body(%Qubit* nonnull inttoptr (i64 5 to %Qubit*)) + br label %TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.1.exit + +TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.1.exit: ; preds = %continue__1.i.i2.i, %then0__2.i.i3.i + tail call void @__quantum__qis__mz__body(%Qubit* null, %Result* nonnull inttoptr (i64 4 to %Result*)) + tail call void @__quantum__qis__reset__body(%Qubit* null) + tail call void @__quantum__qis__mz__body(%Qubit* nonnull inttoptr (i64 5 to %Qubit*), %Result* nonnull inttoptr (i64 5 to %Result*)) + tail call void @__quantum__qis__reset__body(%Qubit* nonnull inttoptr (i64 5 to %Qubit*)) + %4 = tail call i1 @__quantum__rt__result_equal(%Result* nonnull inttoptr (i64 4 to %Result*), %Result* nonnull inttoptr (i64 5 to %Result*)) + %5 = sext i1 %4 to i8 + ret i8 %5 +} + +declare %Qubit* @__quantum__rt__qubit_allocate() local_unnamed_addr + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) local_unnamed_addr + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) local_unnamed_addr + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) local_unnamed_addr + +declare void @__quantum__rt__qubit_release(%Qubit*) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare %Result* @__quantum__qis__m__body(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__reset__body(%Qubit*) local_unnamed_addr + +declare %Result* @__quantum__rt__result_get_one() local_unnamed_addr + +declare void @__quantum__qis__x__body(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__z__body(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__h__body(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__cnot__body(%Qubit*, %Qubit*) local_unnamed_addr + +declare %String* @__quantum__rt__bool_to_string(i1) local_unnamed_addr + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +declare void @__quantum__qis__mz__body(%Qubit*, %Result*) + +declare i1 @__quantum__qir__read_result(%Result*) + +attributes #0 = { "InteropFriendly" "requiredQubits"="6" } + diff --git a/pyqir-parser/tests/teleportchain.ll.reference b/pyqir-parser/tests/teleportchain.ll.reference new file mode 100644 index 00000000..c4b4b529 --- /dev/null +++ b/pyqir-parser/tests/teleportchain.ll.reference @@ -0,0 +1,3448 @@ + +%Range = type { i64, i64, i64 } +%Tuple = type opaque +%Qubit = type opaque +%Result = type opaque +%Array = type opaque +%Callable = type opaque +%String = type opaque + +@PauliI = internal constant i2 0 +@PauliX = internal constant i2 1 +@PauliY = internal constant i2 -1 +@PauliZ = internal constant i2 -2 +@EmptyRange = internal constant %Range { i64 0, i64 1, i64 -1 } +@0 = internal constant [18 x i8] c"Unsupported input\00" +@1 = internal constant [18 x i8] c"Unsupported input\00" +@Microsoft__Quantum__Intrinsic__CNOT = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CNOT__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CNOT__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CNOT__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CNOT__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__H = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__Rx = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rx__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rx__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rx__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rx__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__Ry = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Ry__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Ry__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Ry__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Ry__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__Rz = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rz__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rz__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rz__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rz__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__S = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__T = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__T__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__T__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__T__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__T__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__X = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__Z = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Z__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Z__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Z__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Z__ctladj__wrapper] + +define internal void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) { +entry: + %0 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %src) + %1 = call %Result* @__quantum__rt__result_get_one() + %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) + call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %dest) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + %3 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %intermediary) + %4 = call %Result* @__quantum__rt__result_get_one() + %5 = call i1 @__quantum__rt__result_equal(%Result* %3, %Result* %4) + call void @__quantum__rt__result_update_reference_count(%Result* %3, i32 -1) + br i1 %5, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + call void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %dest) + br label %continue__2 + +continue__2: ; preds = %then0__2, %continue__1 + ret void +} + +define internal %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %target) { +entry: + %result = call %Result* @__quantum__qis__m__body(%Qubit* %target) + call void @__quantum__qis__reset__body(%Qubit* %target) + ret %Result* %result +} + +declare %Result* @__quantum__rt__result_get_one() + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) + +define internal void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal i1 @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() { +entry: + %leftMessage = call %Qubit* @__quantum__rt__qubit_allocate() + %rightMessage = call %Qubit* @__quantum__rt__qubit_allocate() + %leftPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 1) + %rightPreshared = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 1) + call void @TeleportChain__PrepareEntangledPair__body(%Qubit* %leftMessage, %Qubit* %rightMessage) + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %i = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %0 = icmp sle i64 %i, 1 + br i1 %0, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 %i) + %2 = bitcast i8* %1 to %Qubit** + %3 = load %Qubit*, %Qubit** %2, align 8 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 %i) + %5 = bitcast i8* %4 to %Qubit** + %6 = load %Qubit*, %Qubit** %5, align 8 + call void @TeleportChain__PrepareEntangledPair__body(%Qubit* %3, %Qubit* %6) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %i, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 0) + %12 = bitcast i8* %11 to %Qubit** + %13 = load %Qubit*, %Qubit** %12, align 8 + call void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %rightMessage, %Qubit* %10, %Qubit* %13) + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %i__1 = phi i64 [ 1, %exit__1 ], [ %25, %exiting__2 ] + %14 = icmp sle i64 %i__1, 1 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = sub i64 %i__1, 1 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 %15) + %17 = bitcast i8* %16 to %Qubit** + %18 = load %Qubit*, %Qubit** %17, align 8 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %leftPreshared, i64 %i__1) + %20 = bitcast i8* %19 to %Qubit** + %21 = load %Qubit*, %Qubit** %20, align 8 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 %i__1) + %23 = bitcast i8* %22 to %Qubit** + %24 = load %Qubit*, %Qubit** %23, align 8 + call void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %18, %Qubit* %21, %Qubit* %24) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %25 = add i64 %i__1, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %26 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %leftMessage) + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %rightPreshared, i64 1) + %28 = bitcast i8* %27 to %Qubit** + %29 = load %Qubit*, %Qubit** %28, align 8 + %30 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %29) + %31 = call i1 @__quantum__rt__result_equal(%Result* %26, %Result* %30) + call void @__quantum__rt__array_update_alias_count(%Array* %leftPreshared, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %rightPreshared, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %26, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %30, i32 -1) + call void @__quantum__rt__qubit_release(%Qubit* %leftMessage) + call void @__quantum__rt__qubit_release(%Qubit* %rightMessage) + call void @__quantum__rt__qubit_release_array(%Array* %leftPreshared) + call void @__quantum__rt__qubit_release_array(%Array* %rightPreshared) + ret i1 %31 +} + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__rt__qubit_release(%Qubit*) + +declare void @__quantum__rt__qubit_release_array(%Array*) + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) + +define internal void @TeleportChain__PrepareEntangledPair__body(%Qubit* %left, %Qubit* %right) { +entry: + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %left) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %left, %Qubit* %right) + ret void +} + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +define internal void @TeleportChain__TeleportQubitUsingPresharedEntanglement__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) { +entry: + call void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %src, %Qubit* %intermediary) + call void @TeleportChain__ApplyCorrection__body(%Qubit* %src, %Qubit* %intermediary, %Qubit* %dest) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { +entry: + call void @__quantum__qis__cnot__body(%Qubit* %control, %Qubit* %target) + ret void +} + +define internal void @TeleportChain__PrepareEntangledPair__adj(%Qubit* %left, %Qubit* %right) { +entry: + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %left, %Qubit* %right) + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %left) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) { +entry: + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) + ret void +} + +define internal void @TeleportChain__PrepareEntangledPair__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %left = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %right = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %__controlQubits__, %Qubit* %left) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* + %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 + store %Qubit* %left, %Qubit** %5, align 8 + store %Qubit* %right, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %ctls, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 0 + br i1 %1, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__h__body(%Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %3 = icmp eq i64 %2, 1 + br i1 %3, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qubit) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %5 = bitcast i8* %4 to %Qubit** + %6 = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %6, %Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %test1__1 + %7 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %9 = bitcast %Tuple* %8 to { %Array*, %Qubit* }* + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Array* %ctls, %Array** %10, align 8 + store %Qubit* %qubit, %Qubit** %11, align 8 + call void @Microsoft__Quantum__Intrinsic___fff485d029404f308fe7e714aec986f1___QsRef23__ApplyWithLessControlsA____body(%Callable* %7, { %Array*, %Qubit* }* %9) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %ctls, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %4 = icmp eq i64 %3, 0 + br i1 %4, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__cnot__body(%Qubit* %control, %Qubit* %target) + br label %continue__1 + +test1__1: ; preds = %entry + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %6 = icmp eq i64 %5, 1 + br i1 %6, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %8 = bitcast i8* %7 to %Qubit** + %9 = load %Qubit*, %Qubit** %8, align 8 + call void @Microsoft__Quantum__Intrinsic__CCNOT__body(%Qubit* %9, %Qubit* %control, %Qubit* %target) + br label %continue__1 + +else__1: ; preds = %test1__1 + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__CNOT, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_controlled(%Callable* %10) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %12 = bitcast %Tuple* %11 to { %Array*, { %Qubit*, %Qubit* }* }* + %13 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %12, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %16 = bitcast %Tuple* %15 to { %Qubit*, %Qubit* }* + %17 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %16, i32 0, i32 1 + store %Qubit* %control, %Qubit** %17, align 8 + store %Qubit* %target, %Qubit** %18, align 8 + store %Array* %ctls, %Array** %13, align 8 + store { %Qubit*, %Qubit* }* %16, { %Qubit*, %Qubit* }** %14, align 8 + call void @Microsoft__Quantum__Intrinsic___9befc69676a248a794d7a83b374c573e___QsRef23__ApplyWithLessControlsA____body(%Callable* %10, { %Array*, { %Qubit*, %Qubit* }* }* %12) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + ret void +} + +declare %Tuple* @__quantum__rt__tuple_create(i64) + +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) + +define internal void @TeleportChain__PrepareEntangledPair__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %left = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %right = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* + %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 + store %Qubit* %left, %Qubit** %5, align 8 + store %Qubit* %right, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) + call void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %__controlQubits__, %Qubit* %left) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* + %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 + store %Qubit* %control, %Qubit** %5, align 8 + store %Qubit* %target, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal double @Microsoft__Quantum__Math__PI__body() { +entry: + ret double 0x400921FB54442D18 +} + +declare %Result* @__quantum__qis__m__body(%Qubit*) + +declare void @__quantum__qis__reset__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyControlledX____body(%Qubit* %control, %Qubit* %target) { +entry: + call void @__quantum__qis__cnot__body(%Qubit* %control, %Qubit* %target) + ret void +} + +declare void @__quantum__qis__cnot__body(%Qubit*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyControlledX____adj(%Qubit* %control, %Qubit* %target) { +entry: + call void @__quantum__qis__cnot__body(%Qubit* %control, %Qubit* %target) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyControlledZ____body(%Qubit* %control, %Qubit* %target) { +entry: + call void @__quantum__qis__cz__body(%Qubit* %control, %Qubit* %target) + ret void +} + +declare void @__quantum__qis__cz__body(%Qubit*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyControlledZ____adj(%Qubit* %control, %Qubit* %target) { +entry: + call void @__quantum__qis__cz__body(%Qubit* %control, %Qubit* %target) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____body(double %theta) { +entry: + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____adj(double %theta) { +entry: + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____ctl(%Array* %controls, double %theta) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) + %1 = icmp sgt i64 %0, 0 + br i1 %1, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 0) + %3 = bitcast i8* %2 to %Qubit** + %qubit = load %Qubit*, %Qubit** %3, align 8 + %4 = sub i64 %0, 1 + %5 = load %Range, %Range* @EmptyRange, align 4 + %6 = insertvalue %Range %5, i64 1, 0 + %7 = insertvalue %Range %6, i64 1, 1 + %8 = insertvalue %Range %7, i64 %4, 2 + %rest = call %Array* @__quantum__rt__array_slice_1d(%Array* %controls, %Range %8, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %rest, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { double, %Qubit* }* + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 1 + store double %theta, double* %11, align 8 + store %Qubit* %qubit, %Qubit** %12, align 8 + call void @Microsoft__Quantum__Intrinsic__R1__ctl(%Array* %rest, { double, %Qubit* }* %10) + call void @__quantum__rt__array_update_alias_count(%Array* %rest, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %rest, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) + ret void +} + +declare i64 @__quantum__rt__array_get_size_1d(%Array*) + +declare %Array* @__quantum__rt__array_slice_1d(%Array*, %Range, i1) + +define internal void @Microsoft__Quantum__Intrinsic__R1__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { i2, double, %Qubit* }* + %5 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 1 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 2 + %8 = load i2, i2* @PauliZ, align 1 + store i2 %8, i2* %5, align 1 + store double %theta, double* %6, align 8 + store %Qubit* %qubit, %Qubit** %7, align 8 + call void @Microsoft__Quantum__Intrinsic__R__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %4) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { i2, double, %Qubit* }* + %11 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %10, i32 0, i32 1 + %13 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %10, i32 0, i32 2 + %14 = load i2, i2* @PauliI, align 1 + %15 = fneg double %theta + store i2 %14, i2* %11, align 1 + store double %15, double* %12, align 8 + store %Qubit* %qubit, %Qubit** %13, align 8 + call void @Microsoft__Quantum__Intrinsic__R__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %10) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + ret void +} + +declare void @__quantum__rt__array_update_reference_count(%Array*, i32) + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____ctladj(%Array* %controls, double %theta) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) + %1 = icmp sgt i64 %0, 0 + br i1 %1, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 0) + %3 = bitcast i8* %2 to %Qubit** + %__qsVar0__qubit__ = load %Qubit*, %Qubit** %3, align 8 + %4 = sub i64 %0, 1 + %5 = load %Range, %Range* @EmptyRange, align 4 + %6 = insertvalue %Range %5, i64 1, 0 + %7 = insertvalue %Range %6, i64 1, 1 + %8 = insertvalue %Range %7, i64 %4, 2 + %__qsVar1__rest__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %controls, %Range %8, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__rest__, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { double, %Qubit* }* + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 1 + store double %theta, double* %11, align 8 + store %Qubit* %__qsVar0__qubit__, %Qubit** %12, align 8 + call void @Microsoft__Quantum__Intrinsic__R1__ctladj(%Array* %__qsVar1__rest__, { double, %Qubit* }* %10) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__rest__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__rest__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R1__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { i2, double, %Qubit* }* + %5 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 1 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 2 + %8 = load i2, i2* @PauliI, align 1 + %9 = fneg double %theta + store i2 %8, i2* %5, align 1 + store double %9, double* %6, align 8 + store %Qubit* %qubit, %Qubit** %7, align 8 + call void @Microsoft__Quantum__Intrinsic__R__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %4) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i2, double, %Qubit* }* + %12 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %11, i32 0, i32 2 + %15 = load i2, i2* @PauliZ, align 1 + store i2 %15, i2* %12, align 1 + store double %theta, double* %13, align 8 + store %Qubit* %qubit, %Qubit** %14, align 8 + call void @Microsoft__Quantum__Intrinsic__R__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %11) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyUncontrolledH____body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__h__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyUncontrolledH____adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyUncontrolledX____body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__x__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyUncontrolledX____adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyUncontrolledZ____body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__z__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyUncontrolledZ____adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__MapPauli____body(%Qubit* %qubit, i2 %from, i2 %to) { +entry: + %0 = icmp eq i2 %from, %to + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + br label %continue__1 + +test1__1: ; preds = %entry + %1 = load i2, i2* @PauliZ, align 1 + %2 = icmp eq i2 %from, %1 + br i1 %2, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %test1__1 + %3 = load i2, i2* @PauliX, align 1 + %4 = icmp eq i2 %to, %3 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %test1__1 + %5 = phi i1 [ %4, %condTrue__1 ], [ %2, %test1__1 ] + %6 = xor i1 %5, true + br i1 %6, label %condTrue__2, label %condContinue__2 + +condTrue__2: ; preds = %condContinue__1 + %7 = load i2, i2* @PauliX, align 1 + %8 = icmp eq i2 %from, %7 + br i1 %8, label %condTrue__3, label %condContinue__3 + +condTrue__3: ; preds = %condTrue__2 + %9 = load i2, i2* @PauliZ, align 1 + %10 = icmp eq i2 %to, %9 + br label %condContinue__3 + +condContinue__3: ; preds = %condTrue__3, %condTrue__2 + %11 = phi i1 [ %10, %condTrue__3 ], [ %8, %condTrue__2 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condContinue__1 + %12 = phi i1 [ %11, %condContinue__3 ], [ %5, %condContinue__1 ] + br i1 %12, label %then1__1, label %test2__1 + +then1__1: ; preds = %condContinue__2 + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) + br label %continue__1 + +test2__1: ; preds = %condContinue__2 + %13 = load i2, i2* @PauliZ, align 1 + %14 = icmp eq i2 %from, %13 + br i1 %14, label %condTrue__4, label %condContinue__4 + +condTrue__4: ; preds = %test2__1 + %15 = load i2, i2* @PauliY, align 1 + %16 = icmp eq i2 %to, %15 + br label %condContinue__4 + +condContinue__4: ; preds = %condTrue__4, %test2__1 + %17 = phi i1 [ %16, %condTrue__4 ], [ %14, %test2__1 ] + br i1 %17, label %then2__1, label %test3__1 + +then2__1: ; preds = %condContinue__4 + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) + br label %continue__1 + +test3__1: ; preds = %condContinue__4 + %18 = load i2, i2* @PauliY, align 1 + %19 = icmp eq i2 %from, %18 + br i1 %19, label %condTrue__5, label %condContinue__5 + +condTrue__5: ; preds = %test3__1 + %20 = load i2, i2* @PauliZ, align 1 + %21 = icmp eq i2 %to, %20 + br label %condContinue__5 + +condContinue__5: ; preds = %condTrue__5, %test3__1 + %22 = phi i1 [ %21, %condTrue__5 ], [ %19, %test3__1 ] + br i1 %22, label %then3__1, label %test4__1 + +then3__1: ; preds = %condContinue__5 + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) + br label %continue__1 + +test4__1: ; preds = %condContinue__5 + %23 = load i2, i2* @PauliY, align 1 + %24 = icmp eq i2 %from, %23 + br i1 %24, label %condTrue__6, label %condContinue__6 + +condTrue__6: ; preds = %test4__1 + %25 = load i2, i2* @PauliX, align 1 + %26 = icmp eq i2 %to, %25 + br label %condContinue__6 + +condContinue__6: ; preds = %condTrue__6, %test4__1 + %27 = phi i1 [ %26, %condTrue__6 ], [ %24, %test4__1 ] + br i1 %27, label %then4__1, label %test5__1 + +then4__1: ; preds = %condContinue__6 + call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) + br label %continue__1 + +test5__1: ; preds = %condContinue__6 + %28 = load i2, i2* @PauliX, align 1 + %29 = icmp eq i2 %from, %28 + br i1 %29, label %condTrue__7, label %condContinue__7 + +condTrue__7: ; preds = %test5__1 + %30 = load i2, i2* @PauliY, align 1 + %31 = icmp eq i2 %to, %30 + br label %condContinue__7 + +condContinue__7: ; preds = %condTrue__7, %test5__1 + %32 = phi i1 [ %31, %condTrue__7 ], [ %29, %test5__1 ] + br i1 %32, label %then5__1, label %else__1 + +then5__1: ; preds = %condContinue__7 + call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %condContinue__7 + %33 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @0, i32 0, i32 0)) + call void @__quantum__rt__fail(%String* %33) + unreachable + +continue__1: ; preds = %then5__1, %then4__1, %then3__1, %then2__1, %then1__1, %then0__1 + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__s__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__s__adj(%Qubit* %qubit) + ret void +} + +declare %String* @__quantum__rt__string_create(i8*) + +declare void @__quantum__rt__fail(%String*) + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__MapPauli____adj(%Qubit* %qubit, i2 %from, i2 %to) { +entry: + %0 = icmp eq i2 %from, %to + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + br label %continue__1 + +test1__1: ; preds = %entry + %1 = load i2, i2* @PauliZ, align 1 + %2 = icmp eq i2 %from, %1 + br i1 %2, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %test1__1 + %3 = load i2, i2* @PauliX, align 1 + %4 = icmp eq i2 %to, %3 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %test1__1 + %5 = phi i1 [ %4, %condTrue__1 ], [ %2, %test1__1 ] + %6 = xor i1 %5, true + br i1 %6, label %condTrue__2, label %condContinue__2 + +condTrue__2: ; preds = %condContinue__1 + %7 = load i2, i2* @PauliX, align 1 + %8 = icmp eq i2 %from, %7 + br i1 %8, label %condTrue__3, label %condContinue__3 + +condTrue__3: ; preds = %condTrue__2 + %9 = load i2, i2* @PauliZ, align 1 + %10 = icmp eq i2 %to, %9 + br label %condContinue__3 + +condContinue__3: ; preds = %condTrue__3, %condTrue__2 + %11 = phi i1 [ %10, %condTrue__3 ], [ %8, %condTrue__2 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condContinue__1 + %12 = phi i1 [ %11, %condContinue__3 ], [ %5, %condContinue__1 ] + br i1 %12, label %then1__1, label %test2__1 + +then1__1: ; preds = %condContinue__2 + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) + br label %continue__1 + +test2__1: ; preds = %condContinue__2 + %13 = load i2, i2* @PauliZ, align 1 + %14 = icmp eq i2 %from, %13 + br i1 %14, label %condTrue__4, label %condContinue__4 + +condTrue__4: ; preds = %test2__1 + %15 = load i2, i2* @PauliY, align 1 + %16 = icmp eq i2 %to, %15 + br label %condContinue__4 + +condContinue__4: ; preds = %condTrue__4, %test2__1 + %17 = phi i1 [ %16, %condTrue__4 ], [ %14, %test2__1 ] + br i1 %17, label %then2__1, label %test3__1 + +then2__1: ; preds = %condContinue__4 + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) + br label %continue__1 + +test3__1: ; preds = %condContinue__4 + %18 = load i2, i2* @PauliY, align 1 + %19 = icmp eq i2 %from, %18 + br i1 %19, label %condTrue__5, label %condContinue__5 + +condTrue__5: ; preds = %test3__1 + %20 = load i2, i2* @PauliZ, align 1 + %21 = icmp eq i2 %to, %20 + br label %condContinue__5 + +condContinue__5: ; preds = %condTrue__5, %test3__1 + %22 = phi i1 [ %21, %condTrue__5 ], [ %19, %test3__1 ] + br i1 %22, label %then3__1, label %test4__1 + +then3__1: ; preds = %condContinue__5 + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) + br label %continue__1 + +test4__1: ; preds = %condContinue__5 + %23 = load i2, i2* @PauliY, align 1 + %24 = icmp eq i2 %from, %23 + br i1 %24, label %condTrue__6, label %condContinue__6 + +condTrue__6: ; preds = %test4__1 + %25 = load i2, i2* @PauliX, align 1 + %26 = icmp eq i2 %to, %25 + br label %condContinue__6 + +condContinue__6: ; preds = %condTrue__6, %test4__1 + %27 = phi i1 [ %26, %condTrue__6 ], [ %24, %test4__1 ] + br i1 %27, label %then4__1, label %test5__1 + +then4__1: ; preds = %condContinue__6 + call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) + br label %continue__1 + +test5__1: ; preds = %condContinue__6 + %28 = load i2, i2* @PauliX, align 1 + %29 = icmp eq i2 %from, %28 + br i1 %29, label %condTrue__7, label %condContinue__7 + +condTrue__7: ; preds = %test5__1 + %30 = load i2, i2* @PauliY, align 1 + %31 = icmp eq i2 %to, %30 + br label %condContinue__7 + +condContinue__7: ; preds = %condTrue__7, %test5__1 + %32 = phi i1 [ %31, %condTrue__7 ], [ %29, %test5__1 ] + br i1 %32, label %then5__1, label %else__1 + +then5__1: ; preds = %condContinue__7 + call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %condContinue__7 + %33 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @1, i32 0, i32 0)) + call void @__quantum__rt__fail(%String* %33) + unreachable + +continue__1: ; preds = %then5__1, %then4__1, %then3__1, %then2__1, %then1__1, %then0__1 + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____body(%Qubit* %control1, %Qubit* %control2, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %target) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %target, %Qubit* %control1) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control1, %Qubit* %control2) + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %control2) + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %control1) + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %target) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %target, %Qubit* %control1) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control1, %Qubit* %control2) + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %control2) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %target, %Qubit* %control2) + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %target) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__t__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__t__adj(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____adj(%Qubit* %control1, %Qubit* %control2, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %target) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %target, %Qubit* %control2) + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %control2) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control1, %Qubit* %control2) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %target, %Qubit* %control1) + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %target) + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %control1) + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %control2) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control1, %Qubit* %control2) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %target, %Qubit* %control1) + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %target) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CCNOT__body(%Qubit* %control1, %Qubit* %control2, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %target) + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to %Qubit** + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 1) + %4 = bitcast i8* %3 to %Qubit** + store %Qubit* %control1, %Qubit** %2, align 8 + store %Qubit* %control2, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %0, %Qubit* %target) + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %target) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %ctls, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 0 + br i1 %1, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__z__body(%Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %3 = icmp eq i64 %2, 1 + br i1 %3, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %5 = bitcast i8* %4 to %Qubit** + %control = load %Qubit*, %Qubit** %5, align 8 + call void @__quantum__qis__cz__body(%Qubit* %control, %Qubit* %qubit) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %7 = icmp eq i64 %6, 2 + br i1 %7, label %then2__1, label %else__1 + +then2__1: ; preds = %test2__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %10) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 1) + %12 = bitcast i8* %11 to %Qubit** + %13 = load %Qubit*, %Qubit** %12, align 8 + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %13) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %15 = bitcast i8* %14 to %Qubit** + %16 = load %Qubit*, %Qubit** %15, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %qubit, %Qubit* %16) + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %18 = bitcast i8* %17 to %Qubit** + %19 = load %Qubit*, %Qubit** %18, align 8 + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %19) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 1) + %21 = bitcast i8* %20 to %Qubit** + %22 = load %Qubit*, %Qubit** %21, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %22, %Qubit* %qubit) + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 1) + %24 = bitcast i8* %23 to %Qubit** + %25 = load %Qubit*, %Qubit** %24, align 8 + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %27 = bitcast i8* %26 to %Qubit** + %28 = load %Qubit*, %Qubit** %27, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %25, %Qubit* %28) + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qubit) + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %30 = bitcast i8* %29 to %Qubit** + %31 = load %Qubit*, %Qubit** %30, align 8 + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %31) + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 1) + %33 = bitcast i8* %32 to %Qubit** + %34 = load %Qubit*, %Qubit** %33, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %34, %Qubit* %qubit) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %36 = bitcast i8* %35 to %Qubit** + %37 = load %Qubit*, %Qubit** %36, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %qubit, %Qubit* %37) + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qubit) + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %39 = bitcast i8* %38 to %Qubit** + %40 = load %Qubit*, %Qubit** %39, align 8 + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %40) + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 1) + %42 = bitcast i8* %41 to %Qubit** + %43 = load %Qubit*, %Qubit** %42, align 8 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %45 = bitcast i8* %44 to %Qubit** + %46 = load %Qubit*, %Qubit** %45, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %43, %Qubit* %46) + br label %continue__1 + +else__1: ; preds = %test2__1 + %47 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Z, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_controlled(%Callable* %47) + %48 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %49 = bitcast %Tuple* %48 to { %Array*, %Qubit* }* + %50 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %49, i32 0, i32 0 + %51 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %49, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Array* %ctls, %Array** %50, align 8 + store %Qubit* %qubit, %Qubit** %51, align 8 + call void @Microsoft__Quantum__Intrinsic___fff485d029404f308fe7e714aec986f1___QsRef23__ApplyWithLessControlsA____body(%Callable* %47, { %Array*, %Qubit* }* %49) + call void @__quantum__rt__capture_update_reference_count(%Callable* %47, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %47, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %48, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + ret void +} + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +define internal void @Microsoft__Quantum__Intrinsic__CCNOT__adj(%Qubit* %control1, %Qubit* %control2, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__CCNOT__body(%Qubit* %control1, %Qubit* %control2, %Qubit* %target) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CCNOT__ctl(%Array* %ctls, { %Qubit*, %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control1 = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %control2 = load %Qubit*, %Qubit** %2, align 8 + %3 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 2 + %target = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 0) + %6 = bitcast i8* %5 to %Qubit** + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 1) + %8 = bitcast i8* %7 to %Qubit** + store %Qubit* %control1, %Qubit** %6, align 8 + store %Qubit* %control2, %Qubit** %8, align 8 + %9 = call %Array* @__quantum__rt__array_concatenate(%Array* %ctls, %Array* %4) + call void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %9, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %ctls, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 0 + br i1 %1, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %3 = icmp eq i64 %2, 1 + br i1 %3, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %5 = bitcast i8* %4 to %Qubit** + %control = load %Qubit*, %Qubit** %5, align 8 + call void @__quantum__qis__cnot__body(%Qubit* %control, %Qubit* %qubit) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %7 = icmp eq i64 %6, 2 + br i1 %7, label %then2__1, label %else__1 + +then2__1: ; preds = %test2__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 1) + %12 = bitcast i8* %11 to %Qubit** + %13 = load %Qubit*, %Qubit** %12, align 8 + call void @Microsoft__Quantum__Intrinsic__CCNOT__body(%Qubit* %10, %Qubit* %13, %Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %test2__1 + %14 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_controlled(%Callable* %14) + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %16 = bitcast %Tuple* %15 to { %Array*, %Qubit* }* + %17 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %16, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Array* %ctls, %Array** %17, align 8 + store %Qubit* %qubit, %Qubit** %18, align 8 + call void @Microsoft__Quantum__Intrinsic___fff485d029404f308fe7e714aec986f1___QsRef23__ApplyWithLessControlsA____body(%Callable* %14, { %Array*, %Qubit* }* %16) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + ret void +} + +declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) + +define internal void @Microsoft__Quantum__Intrinsic__CCNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control1 = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %control2 = load %Qubit*, %Qubit** %2, align 8 + %3 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 2 + %target = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 3)) + %5 = bitcast %Tuple* %4 to { %Qubit*, %Qubit*, %Qubit* }* + %6 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %5, i32 0, i32 2 + store %Qubit* %control1, %Qubit** %6, align 8 + store %Qubit* %control2, %Qubit** %7, align 8 + store %Qubit* %target, %Qubit** %8, align 8 + call void @Microsoft__Quantum__Intrinsic__CCNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit*, %Qubit* }* %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic___9befc69676a248a794d7a83b374c573e___QsRef23__ApplyWithLessControlsA____body(%Callable* %op, { %Array*, { %Qubit*, %Qubit* }* }* %0) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %1 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 0 + %controls = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) + %2 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 1 + %arg = load { %Qubit*, %Qubit* }*, { %Qubit*, %Qubit* }** %2, align 8 + %3 = bitcast { %Qubit*, %Qubit* }* %arg to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %numControls = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) + %numControlPairs = sdiv i64 %numControls, 2 + %temps = call %Array* @__quantum__rt__qubit_allocate_array(i64 %numControlPairs) + call void @__quantum__rt__array_update_alias_count(%Array* %temps, i32 1) + %4 = sub i64 %numControlPairs, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %__qsVar0__numPair__ = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %5 = icmp sle i64 %__qsVar0__numPair__, %4 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = mul i64 2, %__qsVar0__numPair__ + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %6) + %8 = bitcast i8* %7 to %Qubit** + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = mul i64 2, %__qsVar0__numPair__ + %11 = add i64 %10, 1 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %11) + %13 = bitcast i8* %12 to %Qubit** + %14 = load %Qubit*, %Qubit** %13, align 8 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %temps, i64 %__qsVar0__numPair__) + %16 = bitcast i8* %15 to %Qubit** + %17 = load %Qubit*, %Qubit** %16, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____body(%Qubit* %9, %Qubit* %14, %Qubit* %17) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %__qsVar0__numPair__, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %19 = srem i64 %numControls, 2 + %20 = icmp eq i64 %19, 0 + br i1 %20, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__1 + call void @__quantum__rt__array_update_reference_count(%Array* %temps, i32 1) + br label %condContinue__1 + +condFalse__1: ; preds = %exit__1 + %21 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 0) + %23 = bitcast i8* %22 to %Qubit** + %24 = sub i64 %numControls, 1 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %24) + %26 = bitcast i8* %25 to %Qubit** + %27 = load %Qubit*, %Qubit** %26, align 8 + store %Qubit* %27, %Qubit** %23, align 8 + %28 = call %Array* @__quantum__rt__array_concatenate(%Array* %temps, %Array* %21) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %__qsVar1__newControls__ = phi %Array* [ %temps, %condTrue__1 ], [ %28, %condFalse__1 ] + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__newControls__, i32 1) + %29 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %30 = bitcast %Tuple* %29 to { %Array*, { %Qubit*, %Qubit* }* }* + %31 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %30, i32 0, i32 0 + %32 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %30, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 1) + store %Array* %__qsVar1__newControls__, %Array** %31, align 8 + store { %Qubit*, %Qubit* }* %arg, { %Qubit*, %Qubit* }** %32, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %29, %Tuple* null) + %33 = sub i64 %numControlPairs, 1 + %34 = sub i64 %33, 0 + %35 = sdiv i64 %34, 1 + %36 = mul i64 1, %35 + %37 = add i64 0, %36 + %38 = load %Range, %Range* @EmptyRange, align 4 + %39 = insertvalue %Range %38, i64 %37, 0 + %40 = insertvalue %Range %39, i64 -1, 1 + %41 = insertvalue %Range %40, i64 0, 2 + %42 = extractvalue %Range %41, 0 + %43 = extractvalue %Range %41, 1 + %44 = extractvalue %Range %41, 2 + br label %preheader__1 + +preheader__1: ; preds = %condContinue__1 + %45 = icmp sgt i64 %43, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar0____qsVar0__numPair____ = phi i64 [ %42, %preheader__1 ], [ %61, %exiting__2 ] + %46 = icmp sle i64 %__qsVar0____qsVar0__numPair____, %44 + %47 = icmp sge i64 %__qsVar0____qsVar0__numPair____, %44 + %48 = select i1 %45, i1 %46, i1 %47 + br i1 %48, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %49 = mul i64 2, %__qsVar0____qsVar0__numPair____ + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %49) + %51 = bitcast i8* %50 to %Qubit** + %52 = load %Qubit*, %Qubit** %51, align 8 + %53 = mul i64 2, %__qsVar0____qsVar0__numPair____ + %54 = add i64 %53, 1 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %54) + %56 = bitcast i8* %55 to %Qubit** + %57 = load %Qubit*, %Qubit** %56, align 8 + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %temps, i64 %__qsVar0____qsVar0__numPair____) + %59 = bitcast i8* %58 to %Qubit** + %60 = load %Qubit*, %Qubit** %59, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____adj(%Qubit* %52, %Qubit* %57, %Qubit* %60) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %61 = add i64 %__qsVar0____qsVar0__numPair____, %43 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %temps, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__newControls__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %temps) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, %Qubit* }* + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Qubit*, %Qubit** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, %Qubit* }* + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Qubit*, %Qubit** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Qubit*, %Qubit* }*, { %Qubit*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %3, { %Qubit*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Qubit*, %Qubit* }*, { %Qubit*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %3, { %Qubit*, %Qubit* }* %4) + ret void +} + +declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) + +declare void @__quantum__rt__callable_make_controlled(%Callable*) + +declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) + +define internal void @Microsoft__Quantum__Intrinsic___fff485d029404f308fe7e714aec986f1___QsRef23__ApplyWithLessControlsA____body(%Callable* %op, { %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %controls = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %arg = load %Qubit*, %Qubit** %2, align 8 + %numControls = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) + %numControlPairs = sdiv i64 %numControls, 2 + %temps = call %Array* @__quantum__rt__qubit_allocate_array(i64 %numControlPairs) + call void @__quantum__rt__array_update_alias_count(%Array* %temps, i32 1) + %3 = sub i64 %numControlPairs, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %__qsVar0__numPair__ = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %4 = icmp sle i64 %__qsVar0__numPair__, %3 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = mul i64 2, %__qsVar0__numPair__ + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %5) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7, align 8 + %9 = mul i64 2, %__qsVar0__numPair__ + %10 = add i64 %9, 1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %10) + %12 = bitcast i8* %11 to %Qubit** + %13 = load %Qubit*, %Qubit** %12, align 8 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %temps, i64 %__qsVar0__numPair__) + %15 = bitcast i8* %14 to %Qubit** + %16 = load %Qubit*, %Qubit** %15, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____body(%Qubit* %8, %Qubit* %13, %Qubit* %16) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %__qsVar0__numPair__, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %18 = srem i64 %numControls, 2 + %19 = icmp eq i64 %18, 0 + br i1 %19, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__1 + call void @__quantum__rt__array_update_reference_count(%Array* %temps, i32 1) + br label %condContinue__1 + +condFalse__1: ; preds = %exit__1 + %20 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %20, i64 0) + %22 = bitcast i8* %21 to %Qubit** + %23 = sub i64 %numControls, 1 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %23) + %25 = bitcast i8* %24 to %Qubit** + %26 = load %Qubit*, %Qubit** %25, align 8 + store %Qubit* %26, %Qubit** %22, align 8 + %27 = call %Array* @__quantum__rt__array_concatenate(%Array* %temps, %Array* %20) + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %__qsVar1__newControls__ = phi %Array* [ %temps, %condTrue__1 ], [ %27, %condFalse__1 ] + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__newControls__, i32 1) + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %29 = bitcast %Tuple* %28 to { %Array*, %Qubit* }* + %30 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %29, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 1) + store %Array* %__qsVar1__newControls__, %Array** %30, align 8 + store %Qubit* %arg, %Qubit** %31, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %28, %Tuple* null) + %32 = sub i64 %numControlPairs, 1 + %33 = sub i64 %32, 0 + %34 = sdiv i64 %33, 1 + %35 = mul i64 1, %34 + %36 = add i64 0, %35 + %37 = load %Range, %Range* @EmptyRange, align 4 + %38 = insertvalue %Range %37, i64 %36, 0 + %39 = insertvalue %Range %38, i64 -1, 1 + %40 = insertvalue %Range %39, i64 0, 2 + %41 = extractvalue %Range %40, 0 + %42 = extractvalue %Range %40, 1 + %43 = extractvalue %Range %40, 2 + br label %preheader__1 + +preheader__1: ; preds = %condContinue__1 + %44 = icmp sgt i64 %42, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar0____qsVar0__numPair____ = phi i64 [ %41, %preheader__1 ], [ %60, %exiting__2 ] + %45 = icmp sle i64 %__qsVar0____qsVar0__numPair____, %43 + %46 = icmp sge i64 %__qsVar0____qsVar0__numPair____, %43 + %47 = select i1 %44, i1 %45, i1 %46 + br i1 %47, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %48 = mul i64 2, %__qsVar0____qsVar0__numPair____ + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %48) + %50 = bitcast i8* %49 to %Qubit** + %51 = load %Qubit*, %Qubit** %50, align 8 + %52 = mul i64 2, %__qsVar0____qsVar0__numPair____ + %53 = add i64 %52, 1 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %53) + %55 = bitcast i8* %54 to %Qubit** + %56 = load %Qubit*, %Qubit** %55, align 8 + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %temps, i64 %__qsVar0____qsVar0__numPair____) + %58 = bitcast i8* %57 to %Qubit** + %59 = load %Qubit*, %Qubit** %58, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____adj(%Qubit* %51, %Qubit* %56, %Qubit* %59) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %60 = add i64 %__qsVar0____qsVar0__numPair____, %42 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %temps, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__newControls__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %temps) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { +entry: + %0 = call %Result* @__quantum__qis__m__body(%Qubit* %qubit) + ret %Result* %0 +} + +define internal void @Microsoft__Quantum__Intrinsic__R__body(i2 %pauli, double %theta, %Qubit* %qubit) { +entry: + %0 = load i2, i2* @PauliX, align 1 + %1 = icmp eq i2 %pauli, %0 + br i1 %1, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Intrinsic__Rx__body(double %theta, %Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %2 = load i2, i2* @PauliY, align 1 + %3 = icmp eq i2 %pauli, %2 + br i1 %3, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @Microsoft__Quantum__Intrinsic__Ry__body(double %theta, %Qubit* %qubit) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %4 = load i2, i2* @PauliZ, align 1 + %5 = icmp eq i2 %pauli, %4 + br i1 %5, label %then2__1, label %else__1 + +then2__1: ; preds = %test2__1 + call void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %test2__1 + %6 = fneg double %theta + %7 = fdiv double %6, 2.000000e+00 + call void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____body(double %7) + br label %continue__1 + +continue__1: ; preds = %else__1, %then2__1, %then1__1, %then0__1 + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__body(double %theta, %Qubit* %qubit) { +entry: + call void @__quantum__qis__rx__body(double %theta, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Ry__body(double %theta, %Qubit* %qubit) { +entry: + call void @__quantum__qis__ry__body(double %theta, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qubit) { +entry: + call void @__quantum__qis__rz__body(double %theta, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R__adj(i2 %pauli, double %theta, %Qubit* %qubit) { +entry: + %0 = load i2, i2* @PauliX, align 1 + %1 = icmp eq i2 %pauli, %0 + br i1 %1, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Intrinsic__Rx__adj(double %theta, %Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %2 = load i2, i2* @PauliY, align 1 + %3 = icmp eq i2 %pauli, %2 + br i1 %3, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @Microsoft__Quantum__Intrinsic__Ry__adj(double %theta, %Qubit* %qubit) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %4 = load i2, i2* @PauliZ, align 1 + %5 = icmp eq i2 %pauli, %4 + br i1 %5, label %then2__1, label %else__1 + +then2__1: ; preds = %test2__1 + call void @Microsoft__Quantum__Intrinsic__Rz__adj(double %theta, %Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %test2__1 + %6 = fneg double %theta + %7 = fdiv double %6, 2.000000e+00 + call void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____adj(double %7) + br label %continue__1 + +continue__1: ; preds = %else__1, %then2__1, %then1__1, %then0__1 + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__adj(double %theta, %Qubit* %qubit) { +entry: + %0 = fneg double %theta + call void @Microsoft__Quantum__Intrinsic__Rx__body(double %0, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Ry__adj(double %theta, %Qubit* %qubit) { +entry: + %0 = fneg double %theta + call void @Microsoft__Quantum__Intrinsic__Ry__body(double %0, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rz__adj(double %theta, %Qubit* %qubit) { +entry: + %0 = fneg double %theta + call void @Microsoft__Quantum__Intrinsic__Rz__body(double %0, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 2 + %qubit = load %Qubit*, %Qubit** %3, align 8 + %4 = load i2, i2* @PauliX, align 1 + %5 = icmp eq i2 %pauli, %4 + br i1 %5, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Qubit* }* + %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 + store double %theta, double* %8, align 8 + store %Qubit* %qubit, %Qubit** %9, align 8 + call void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %7) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %10 = load i2, i2* @PauliY, align 1 + %11 = icmp eq i2 %pauli, %10 + br i1 %11, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Qubit* }* + %14 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %13, i32 0, i32 1 + store double %theta, double* %14, align 8 + store %Qubit* %qubit, %Qubit** %15, align 8 + call void @Microsoft__Quantum__Intrinsic__Ry__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %13) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %16 = load i2, i2* @PauliZ, align 1 + %17 = icmp eq i2 %pauli, %16 + br i1 %17, label %then2__1, label %else__1 + +then2__1: ; preds = %test2__1 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Qubit* }* + %20 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %19, i32 0, i32 1 + store double %theta, double* %20, align 8 + store %Qubit* %qubit, %Qubit** %21, align 8 + call void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %19) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + br label %continue__1 + +else__1: ; preds = %test2__1 + %22 = fneg double %theta + %23 = fdiv double %22, 2.000000e+00 + call void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____ctl(%Array* %__controlQubits__, double %23) + br label %continue__1 + +continue__1: ; preds = %else__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %4 = icmp eq i64 %3, 0 + br i1 %4, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__rx__body(double %theta, %Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %6 = icmp eq i64 %5, 1 + br i1 %6, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %7 = load i2, i2* @PauliZ, align 1 + %8 = load i2, i2* @PauliX, align 1 + call void @Microsoft__Quantum__Intrinsic____QsRef23__MapPauli____body(%Qubit* %qubit, i2 %7, i2 %8) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { double, %Qubit* }* + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 1 + store double %theta, double* %11, align 8 + store %Qubit* %qubit, %Qubit** %12, align 8 + call void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %ctls, { double, %Qubit* }* %10) + %13 = load i2, i2* @PauliZ, align 1 + %14 = load i2, i2* @PauliX, align 1 + call void @Microsoft__Quantum__Intrinsic____QsRef23__MapPauli____adj(%Qubit* %qubit, i2 %13, i2 %14) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + br label %continue__1 + +else__1: ; preds = %test1__1 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Rx, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_controlled(%Callable* %15) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %17 = bitcast %Tuple* %16 to { %Array*, { double, %Qubit* }* }* + %18 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %17, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Qubit* }* + %22 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %21, i32 0, i32 1 + store double %theta, double* %22, align 8 + store %Qubit* %qubit, %Qubit** %23, align 8 + store %Array* %ctls, %Array** %18, align 8 + store { double, %Qubit* }* %21, { double, %Qubit* }** %19, align 8 + call void @Microsoft__Quantum__Intrinsic___ad761790e5b447849d6dea7cab2a3295___QsRef23__ApplyWithLessControlsA____body(%Callable* %15, { %Array*, { double, %Qubit* }* }* %17) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Ry__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %4 = icmp eq i64 %3, 0 + br i1 %4, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__ry__body(double %theta, %Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %6 = icmp eq i64 %5, 1 + br i1 %6, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %7 = load i2, i2* @PauliZ, align 1 + %8 = load i2, i2* @PauliY, align 1 + call void @Microsoft__Quantum__Intrinsic____QsRef23__MapPauli____body(%Qubit* %qubit, i2 %7, i2 %8) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { double, %Qubit* }* + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %10, i32 0, i32 1 + store double %theta, double* %11, align 8 + store %Qubit* %qubit, %Qubit** %12, align 8 + call void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %ctls, { double, %Qubit* }* %10) + %13 = load i2, i2* @PauliZ, align 1 + %14 = load i2, i2* @PauliY, align 1 + call void @Microsoft__Quantum__Intrinsic____QsRef23__MapPauli____adj(%Qubit* %qubit, i2 %13, i2 %14) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + br label %continue__1 + +else__1: ; preds = %test1__1 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Ry, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_controlled(%Callable* %15) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %17 = bitcast %Tuple* %16 to { %Array*, { double, %Qubit* }* }* + %18 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %17, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Qubit* }* + %22 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %21, i32 0, i32 1 + store double %theta, double* %22, align 8 + store %Qubit* %qubit, %Qubit** %23, align 8 + store %Array* %ctls, %Array** %18, align 8 + store { double, %Qubit* }* %21, { double, %Qubit* }** %19, align 8 + call void @Microsoft__Quantum__Intrinsic___ad761790e5b447849d6dea7cab2a3295___QsRef23__ApplyWithLessControlsA____body(%Callable* %15, { %Array*, { double, %Qubit* }* }* %17) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %ctls, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %4 = icmp eq i64 %3, 0 + br i1 %4, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %6 = icmp eq i64 %5, 1 + br i1 %6, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %7 = fdiv double %theta, 2.000000e+00 + call void @Microsoft__Quantum__Intrinsic__Rz__body(double %7, %Qubit* %qubit) + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %10, %Qubit* %qubit) + %11 = fneg double %theta + %12 = fdiv double %11, 2.000000e+00 + call void @Microsoft__Quantum__Intrinsic__Rz__body(double %12, %Qubit* %qubit) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %14 = bitcast i8* %13 to %Qubit** + %15 = load %Qubit*, %Qubit** %14, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %15, %Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %test1__1 + %16 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Rz, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_controlled(%Callable* %16) + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %18 = bitcast %Tuple* %17 to { %Array*, { double, %Qubit* }* }* + %19 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %18, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { double, %Qubit* }* + %23 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %22, i32 0, i32 1 + store double %theta, double* %23, align 8 + store %Qubit* %qubit, %Qubit** %24, align 8 + store %Array* %ctls, %Array** %19, align 8 + store { double, %Qubit* }* %22, { double, %Qubit* }** %20, align 8 + call void @Microsoft__Quantum__Intrinsic___ad761790e5b447849d6dea7cab2a3295___QsRef23__ApplyWithLessControlsA____body(%Callable* %16, { %Array*, { double, %Qubit* }* }* %18) + call void @__quantum__rt__capture_update_reference_count(%Callable* %16, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 2 + %qubit = load %Qubit*, %Qubit** %3, align 8 + %4 = load i2, i2* @PauliX, align 1 + %5 = icmp eq i2 %pauli, %4 + br i1 %5, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Qubit* }* + %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 + store double %theta, double* %8, align 8 + store %Qubit* %qubit, %Qubit** %9, align 8 + call void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %7) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %10 = load i2, i2* @PauliY, align 1 + %11 = icmp eq i2 %pauli, %10 + br i1 %11, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Qubit* }* + %14 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %13, i32 0, i32 1 + store double %theta, double* %14, align 8 + store %Qubit* %qubit, %Qubit** %15, align 8 + call void @Microsoft__Quantum__Intrinsic__Ry__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %13) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %16 = load i2, i2* @PauliZ, align 1 + %17 = icmp eq i2 %pauli, %16 + br i1 %17, label %then2__1, label %else__1 + +then2__1: ; preds = %test2__1 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Qubit* }* + %20 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %19, i32 0, i32 1 + store double %theta, double* %20, align 8 + store %Qubit* %qubit, %Qubit** %21, align 8 + call void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %19) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + br label %continue__1 + +else__1: ; preds = %test2__1 + %22 = fneg double %theta + %23 = fdiv double %22, 2.000000e+00 + call void @Microsoft__Quantum__Intrinsic____QsRef23__ApplyGlobalPhase____ctladj(%Array* %__controlQubits__, double %23) + br label %continue__1 + +continue__1: ; preds = %else__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { double, %Qubit* }* + %5 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %4, i32 0, i32 1 + %7 = fneg double %theta + store double %7, double* %5, align 8 + store %Qubit* %qubit, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Ry__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { double, %Qubit* }* + %5 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %4, i32 0, i32 1 + %7 = fneg double %theta + store double %7, double* %5, align 8 + store %Qubit* %qubit, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Intrinsic__Ry__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { double, %Qubit* }* + %5 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %4, i32 0, i32 1 + %7 = fneg double %theta + store double %7, double* %5, align 8 + store %Qubit* %qubit, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R1__body(double %theta, %Qubit* %qubit) { +entry: + %0 = load i2, i2* @PauliZ, align 1 + call void @Microsoft__Quantum__Intrinsic__R__body(i2 %0, double %theta, %Qubit* %qubit) + %1 = load i2, i2* @PauliI, align 1 + %2 = fneg double %theta + call void @Microsoft__Quantum__Intrinsic__R__body(i2 %1, double %2, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R1__adj(double %theta, %Qubit* %qubit) { +entry: + %0 = load i2, i2* @PauliI, align 1 + %1 = fneg double %theta + call void @Microsoft__Quantum__Intrinsic__R__adj(i2 %0, double %1, %Qubit* %qubit) + %2 = load i2, i2* @PauliZ, align 1 + call void @Microsoft__Quantum__Intrinsic__R__adj(i2 %2, double %theta, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R1Frac__body(i64 %numerator, i64 %power, %Qubit* %qubit) { +entry: + %0 = load i2, i2* @PauliZ, align 1 + %1 = sub i64 0, %numerator + %2 = add i64 %power, 1 + call void @Microsoft__Quantum__Intrinsic__RFrac__body(i2 %0, i64 %1, i64 %2, %Qubit* %qubit) + %3 = load i2, i2* @PauliI, align 1 + %4 = add i64 %power, 1 + call void @Microsoft__Quantum__Intrinsic__RFrac__body(i2 %3, i64 %numerator, i64 %4, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__RFrac__body(i2 %pauli, i64 %numerator, i64 %power, %Qubit* %qubit) { +entry: + %0 = call double @Microsoft__Quantum__Math__PI__body() + %1 = fmul double -2.000000e+00, %0 + %2 = sitofp i64 %numerator to double + %3 = fmul double %1, %2 + %4 = sitofp i64 %power to double + %5 = call double @llvm.pow.f64(double 2.000000e+00, double %4) + %angle = fdiv double %3, %5 + call void @Microsoft__Quantum__Intrinsic__R__body(i2 %pauli, double %angle, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R1Frac__adj(i64 %numerator, i64 %power, %Qubit* %qubit) { +entry: + %0 = load i2, i2* @PauliI, align 1 + %1 = add i64 %power, 1 + call void @Microsoft__Quantum__Intrinsic__RFrac__adj(i2 %0, i64 %numerator, i64 %1, %Qubit* %qubit) + %2 = load i2, i2* @PauliZ, align 1 + %3 = sub i64 0, %numerator + %4 = add i64 %power, 1 + call void @Microsoft__Quantum__Intrinsic__RFrac__adj(i2 %2, i64 %3, i64 %4, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__RFrac__adj(i2 %pauli, i64 %numerator, i64 %power, %Qubit* %qubit) { +entry: + %0 = call double @Microsoft__Quantum__Math__PI__body() + %1 = fmul double -2.000000e+00, %0 + %2 = sitofp i64 %numerator to double + %3 = fmul double %1, %2 + %4 = sitofp i64 %power to double + %5 = call double @llvm.pow.f64(double 2.000000e+00, double %4) + %__qsVar0__angle__ = fdiv double %3, %5 + call void @Microsoft__Quantum__Intrinsic__R__adj(i2 %pauli, double %__qsVar0__angle__, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R1Frac__ctl(%Array* %__controlQubits__, { i64, i64, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, i64, %Qubit* }, { i64, i64, %Qubit* }* %0, i32 0, i32 0 + %numerator = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, i64, %Qubit* }, { i64, i64, %Qubit* }* %0, i32 0, i32 1 + %power = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { i64, i64, %Qubit* }, { i64, i64, %Qubit* }* %0, i32 0, i32 2 + %qubit = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, i64, i64, %Qubit* }* getelementptr ({ i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { i2, i64, i64, %Qubit* }* + %6 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 2 + %9 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 3 + %10 = load i2, i2* @PauliZ, align 1 + %11 = sub i64 0, %numerator + %12 = add i64 %power, 1 + store i2 %10, i2* %6, align 1 + store i64 %11, i64* %7, align 4 + store i64 %12, i64* %8, align 4 + store %Qubit* %qubit, %Qubit** %9, align 8 + call void @Microsoft__Quantum__Intrinsic__RFrac__ctl(%Array* %__controlQubits__, { i2, i64, i64, %Qubit* }* %5) + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, i64, i64, %Qubit* }* getelementptr ({ i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { i2, i64, i64, %Qubit* }* + %15 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %14, i32 0, i32 0 + %16 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %14, i32 0, i32 1 + %17 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %14, i32 0, i32 2 + %18 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %14, i32 0, i32 3 + %19 = load i2, i2* @PauliI, align 1 + %20 = add i64 %power, 1 + store i2 %19, i2* %15, align 1 + store i64 %numerator, i64* %16, align 4 + store i64 %20, i64* %17, align 4 + store %Qubit* %qubit, %Qubit** %18, align 8 + call void @Microsoft__Quantum__Intrinsic__RFrac__ctl(%Array* %__controlQubits__, { i2, i64, i64, %Qubit* }* %14) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__RFrac__ctl(%Array* %__controlQubits__, { i2, i64, i64, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 1 + %numerator = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 2 + %power = load i64, i64* %3, align 4 + %4 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 3 + %qubit = load %Qubit*, %Qubit** %4, align 8 + %5 = call double @Microsoft__Quantum__Math__PI__body() + %6 = fmul double -2.000000e+00, %5 + %7 = sitofp i64 %numerator to double + %8 = fmul double %6, %7 + %9 = sitofp i64 %power to double + %10 = call double @llvm.pow.f64(double 2.000000e+00, double %9) + %angle = fdiv double %8, %10 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i2, double, %Qubit* }* + %13 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 2 + store i2 %pauli, i2* %13, align 1 + store double %angle, double* %14, align 8 + store %Qubit* %qubit, %Qubit** %15, align 8 + call void @Microsoft__Quantum__Intrinsic__R__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %12) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R1Frac__ctladj(%Array* %__controlQubits__, { i64, i64, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, i64, %Qubit* }, { i64, i64, %Qubit* }* %0, i32 0, i32 0 + %numerator = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, i64, %Qubit* }, { i64, i64, %Qubit* }* %0, i32 0, i32 1 + %power = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { i64, i64, %Qubit* }, { i64, i64, %Qubit* }* %0, i32 0, i32 2 + %qubit = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, i64, i64, %Qubit* }* getelementptr ({ i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { i2, i64, i64, %Qubit* }* + %6 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 2 + %9 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %5, i32 0, i32 3 + %10 = load i2, i2* @PauliI, align 1 + %11 = add i64 %power, 1 + store i2 %10, i2* %6, align 1 + store i64 %numerator, i64* %7, align 4 + store i64 %11, i64* %8, align 4 + store %Qubit* %qubit, %Qubit** %9, align 8 + call void @Microsoft__Quantum__Intrinsic__RFrac__ctladj(%Array* %__controlQubits__, { i2, i64, i64, %Qubit* }* %5) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, i64, i64, %Qubit* }* getelementptr ({ i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i2, i64, i64, %Qubit* }* + %14 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %13, i32 0, i32 3 + %18 = load i2, i2* @PauliZ, align 1 + %19 = sub i64 0, %numerator + %20 = add i64 %power, 1 + store i2 %18, i2* %14, align 1 + store i64 %19, i64* %15, align 4 + store i64 %20, i64* %16, align 4 + store %Qubit* %qubit, %Qubit** %17, align 8 + call void @Microsoft__Quantum__Intrinsic__RFrac__ctladj(%Array* %__controlQubits__, { i2, i64, i64, %Qubit* }* %13) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__RFrac__ctladj(%Array* %__controlQubits__, { i2, i64, i64, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 1 + %numerator = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 2 + %power = load i64, i64* %3, align 4 + %4 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 3 + %qubit = load %Qubit*, %Qubit** %4, align 8 + %5 = call double @Microsoft__Quantum__Math__PI__body() + %6 = fmul double -2.000000e+00, %5 + %7 = sitofp i64 %numerator to double + %8 = fmul double %6, %7 + %9 = sitofp i64 %power to double + %10 = call double @llvm.pow.f64(double 2.000000e+00, double %9) + %__qsVar0__angle__ = fdiv double %8, %10 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i2, double, %Qubit* }* + %13 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 2 + store i2 %pauli, i2* %13, align 1 + store double %__qsVar0__angle__, double* %14, align 8 + store %Qubit* %qubit, %Qubit** %15, align 8 + call void @Microsoft__Quantum__Intrinsic__R__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %12) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__reset__body(%Qubit* %qubit) + ret void +} + +; Function Attrs: nounwind readnone speculatable willreturn +declare double @llvm.pow.f64(double, double) #0 + +declare void @__quantum__qis__rx__body(double, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic___ad761790e5b447849d6dea7cab2a3295___QsRef23__ApplyWithLessControlsA____body(%Callable* %op, { %Array*, { double, %Qubit* }* }* %0) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 + %controls = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) + %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 + %arg = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 + %3 = bitcast { double, %Qubit* }* %arg to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %numControls = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) + %numControlPairs = sdiv i64 %numControls, 2 + %temps = call %Array* @__quantum__rt__qubit_allocate_array(i64 %numControlPairs) + call void @__quantum__rt__array_update_alias_count(%Array* %temps, i32 1) + %4 = sub i64 %numControlPairs, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %__qsVar0__numPair__ = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %5 = icmp sle i64 %__qsVar0__numPair__, %4 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = mul i64 2, %__qsVar0__numPair__ + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %6) + %8 = bitcast i8* %7 to %Qubit** + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = mul i64 2, %__qsVar0__numPair__ + %11 = add i64 %10, 1 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %11) + %13 = bitcast i8* %12 to %Qubit** + %14 = load %Qubit*, %Qubit** %13, align 8 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %temps, i64 %__qsVar0__numPair__) + %16 = bitcast i8* %15 to %Qubit** + %17 = load %Qubit*, %Qubit** %16, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____body(%Qubit* %9, %Qubit* %14, %Qubit* %17) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %__qsVar0__numPair__, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %19 = srem i64 %numControls, 2 + %20 = icmp eq i64 %19, 0 + br i1 %20, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__1 + call void @__quantum__rt__array_update_reference_count(%Array* %temps, i32 1) + br label %condContinue__1 + +condFalse__1: ; preds = %exit__1 + %21 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 0) + %23 = bitcast i8* %22 to %Qubit** + %24 = sub i64 %numControls, 1 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %24) + %26 = bitcast i8* %25 to %Qubit** + %27 = load %Qubit*, %Qubit** %26, align 8 + store %Qubit* %27, %Qubit** %23, align 8 + %28 = call %Array* @__quantum__rt__array_concatenate(%Array* %temps, %Array* %21) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %__qsVar1__newControls__ = phi %Array* [ %temps, %condTrue__1 ], [ %28, %condFalse__1 ] + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__newControls__, i32 1) + %29 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %30 = bitcast %Tuple* %29 to { %Array*, { double, %Qubit* }* }* + %31 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %30, i32 0, i32 0 + %32 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %30, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 1) + store %Array* %__qsVar1__newControls__, %Array** %31, align 8 + store { double, %Qubit* }* %arg, { double, %Qubit* }** %32, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %29, %Tuple* null) + %33 = sub i64 %numControlPairs, 1 + %34 = sub i64 %33, 0 + %35 = sdiv i64 %34, 1 + %36 = mul i64 1, %35 + %37 = add i64 0, %36 + %38 = load %Range, %Range* @EmptyRange, align 4 + %39 = insertvalue %Range %38, i64 %37, 0 + %40 = insertvalue %Range %39, i64 -1, 1 + %41 = insertvalue %Range %40, i64 0, 2 + %42 = extractvalue %Range %41, 0 + %43 = extractvalue %Range %41, 1 + %44 = extractvalue %Range %41, 2 + br label %preheader__1 + +preheader__1: ; preds = %condContinue__1 + %45 = icmp sgt i64 %43, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar0____qsVar0__numPair____ = phi i64 [ %42, %preheader__1 ], [ %61, %exiting__2 ] + %46 = icmp sle i64 %__qsVar0____qsVar0__numPair____, %44 + %47 = icmp sge i64 %__qsVar0____qsVar0__numPair____, %44 + %48 = select i1 %45, i1 %46, i1 %47 + br i1 %48, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %49 = mul i64 2, %__qsVar0____qsVar0__numPair____ + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %49) + %51 = bitcast i8* %50 to %Qubit** + %52 = load %Qubit*, %Qubit** %51, align 8 + %53 = mul i64 2, %__qsVar0____qsVar0__numPair____ + %54 = add i64 %53, 1 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %54) + %56 = bitcast i8* %55 to %Qubit** + %57 = load %Qubit*, %Qubit** %56, align 8 + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %temps, i64 %__qsVar0____qsVar0__numPair____) + %59 = bitcast i8* %58 to %Qubit** + %60 = load %Qubit*, %Qubit** %59, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____adj(%Qubit* %52, %Qubit* %57, %Qubit* %60) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %61 = add i64 %__qsVar0____qsVar0__numPair____, %43 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %temps, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__newControls__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__newControls__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %temps) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Rx__body(double %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Rx__adj(double %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %3, { double, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %3, { double, %Qubit* }* %4) + ret void +} + +declare void @__quantum__qis__ry__body(double, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Ry__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Ry__body(double %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Ry__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Ry__adj(double %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Ry__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Ry__ctl(%Array* %3, { double, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Ry__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Ry__ctladj(%Array* %3, { double, %Qubit* }* %4) + ret void +} + +declare void @__quantum__qis__rz__body(double, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Rz__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Rz__body(double %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rz__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Rz__adj(double %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rz__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %3, { double, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rz__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %3, { double, %Qubit* }* %4) + ret void +} + +declare void @__quantum__qis__s__body(%Qubit*) + +declare void @__quantum__qis__s__adj(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %ctls, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 0 + br i1 %1, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__s__body(%Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %3 = icmp eq i64 %2, 1 + br i1 %3, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %5 = bitcast i8* %4 to %Qubit** + %6 = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %6) + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qubit) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %8 = bitcast i8* %7 to %Qubit** + %9 = load %Qubit*, %Qubit** %8, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %9, %Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qubit) + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %11 = bitcast i8* %10 to %Qubit** + %12 = load %Qubit*, %Qubit** %11, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %12, %Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %test1__1 + %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_controlled(%Callable* %13) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %15 = bitcast %Tuple* %14 to { %Array*, %Qubit* }* + %16 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Array* %ctls, %Array** %16, align 8 + store %Qubit* %qubit, %Qubit** %17, align 8 + call void @Microsoft__Quantum__Intrinsic___fff485d029404f308fe7e714aec986f1___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %ctls, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 0 + br i1 %1, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__s__adj(%Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %3 = icmp eq i64 %2, 1 + br i1 %3, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %5 = bitcast i8* %4 to %Qubit** + %6 = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %6) + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qubit) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %8 = bitcast i8* %7 to %Qubit** + %9 = load %Qubit*, %Qubit** %8, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %9, %Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qubit) + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %11 = bitcast i8* %10 to %Qubit** + %12 = load %Qubit*, %Qubit** %11, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %12, %Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %test1__1 + %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %13) + call void @__quantum__rt__callable_make_controlled(%Callable* %13) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %15 = bitcast %Tuple* %14 to { %Array*, %Qubit* }* + %16 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Array* %ctls, %Array** %16, align 8 + store %Qubit* %qubit, %Qubit** %17, align 8 + call void @Microsoft__Quantum__Intrinsic___fff485d029404f308fe7e714aec986f1___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + ret void +} + +declare void @__quantum__rt__callable_make_adjoint(%Callable*) + +declare void @__quantum__qis__t__body(%Qubit*) + +declare void @__quantum__qis__t__adj(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__T__ctl(%Array* %ctls, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 0 + br i1 %1, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__t__body(%Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %3 = icmp eq i64 %2, 1 + br i1 %3, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %5 = bitcast i8* %4 to %Qubit** + %6 = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Intrinsic__R1Frac__body(i64 1, i64 3, %Qubit* %6) + call void @Microsoft__Quantum__Intrinsic__R1Frac__body(i64 1, i64 3, %Qubit* %qubit) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %8 = bitcast i8* %7 to %Qubit** + %9 = load %Qubit*, %Qubit** %8, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %9, %Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__R1Frac__adj(i64 1, i64 3, %Qubit* %qubit) + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %11 = bitcast i8* %10 to %Qubit** + %12 = load %Qubit*, %Qubit** %11, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %12, %Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %test1__1 + %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__T, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_controlled(%Callable* %13) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %15 = bitcast %Tuple* %14 to { %Array*, %Qubit* }* + %16 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Array* %ctls, %Array** %16, align 8 + store %Qubit* %qubit, %Qubit** %17, align 8 + call void @Microsoft__Quantum__Intrinsic___fff485d029404f308fe7e714aec986f1___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__T__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__T__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__T__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__T__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__T__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__T__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__T__ctladj(%Array* %ctls, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %1 = icmp eq i64 %0, 0 + br i1 %1, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__t__adj(%Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctls) + %3 = icmp eq i64 %2, 1 + br i1 %3, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %5 = bitcast i8* %4 to %Qubit** + %6 = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Intrinsic__R1Frac__adj(i64 1, i64 3, %Qubit* %6) + call void @Microsoft__Quantum__Intrinsic__R1Frac__adj(i64 1, i64 3, %Qubit* %qubit) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %8 = bitcast i8* %7 to %Qubit** + %9 = load %Qubit*, %Qubit** %8, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %9, %Qubit* %qubit) + call void @Microsoft__Quantum__Intrinsic__R1Frac__body(i64 1, i64 3, %Qubit* %qubit) + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctls, i64 0) + %11 = bitcast i8* %10 to %Qubit** + %12 = load %Qubit*, %Qubit** %11, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %12, %Qubit* %qubit) + br label %continue__1 + +else__1: ; preds = %test1__1 + %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__T, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %13) + call void @__quantum__rt__callable_make_controlled(%Callable* %13) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %15 = bitcast %Tuple* %14 to { %Array*, %Qubit* }* + %16 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %15, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) + store %Array* %ctls, %Array** %16, align 8 + store %Qubit* %qubit, %Qubit** %17, align 8 + call void @Microsoft__Quantum__Intrinsic___fff485d029404f308fe7e714aec986f1___QsRef23__ApplyWithLessControlsA____body(%Callable* %13, { %Array*, %Qubit* }* %15) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { +entry: + call void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qubit) { +entry: + call void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) + +declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) + +define internal void @Microsoft__Quantum__Intrinsic___fff485d029404f308fe7e714aec986f1___QsRef23__ApplyWithLessControlsA____adj(%Callable* %op, { %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %controls = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %arg = load %Qubit*, %Qubit** %2, align 8 + %__qsVar0__numControls__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) + %__qsVar1__numControlPairs__ = sdiv i64 %__qsVar0__numControls__, 2 + %__qsVar2__temps__ = call %Array* @__quantum__rt__qubit_allocate_array(i64 %__qsVar1__numControlPairs__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__temps__, i32 1) + %3 = sub i64 %__qsVar1__numControlPairs__, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %__qsVar0____qsVar3__numPair____ = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %4 = icmp sle i64 %__qsVar0____qsVar3__numPair____, %3 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = mul i64 2, %__qsVar0____qsVar3__numPair____ + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %5) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7, align 8 + %9 = mul i64 2, %__qsVar0____qsVar3__numPair____ + %10 = add i64 %9, 1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %10) + %12 = bitcast i8* %11 to %Qubit** + %13 = load %Qubit*, %Qubit** %12, align 8 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__temps__, i64 %__qsVar0____qsVar3__numPair____) + %15 = bitcast i8* %14 to %Qubit** + %16 = load %Qubit*, %Qubit** %15, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____body(%Qubit* %8, %Qubit* %13, %Qubit* %16) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %__qsVar0____qsVar3__numPair____, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %18 = srem i64 %__qsVar0__numControls__, 2 + %19 = icmp eq i64 %18, 0 + br i1 %19, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__temps__, i32 1) + br label %condContinue__1 + +condFalse__1: ; preds = %exit__1 + %20 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %20, i64 0) + %22 = bitcast i8* %21 to %Qubit** + %23 = sub i64 %__qsVar0__numControls__, 1 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %23) + %25 = bitcast i8* %24 to %Qubit** + %26 = load %Qubit*, %Qubit** %25, align 8 + store %Qubit* %26, %Qubit** %22, align 8 + %27 = call %Array* @__quantum__rt__array_concatenate(%Array* %__qsVar2__temps__, %Array* %20) + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %__qsVar1____qsVar4__newControls____ = phi %Array* [ %__qsVar2__temps__, %condTrue__1 ], [ %27, %condFalse__1 ] + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1____qsVar4__newControls____, i32 1) + %28 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %28, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %28) + %29 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %30 = bitcast %Tuple* %29 to { %Array*, %Qubit* }* + %31 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %30, i32 0, i32 0 + %32 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %30, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 1) + store %Array* %__qsVar1____qsVar4__newControls____, %Array** %31, align 8 + store %Qubit* %arg, %Qubit** %32, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %28, %Tuple* %29, %Tuple* null) + %33 = sub i64 %__qsVar1__numControlPairs__, 1 + %34 = sub i64 %33, 0 + %35 = sdiv i64 %34, 1 + %36 = mul i64 1, %35 + %37 = add i64 0, %36 + %38 = load %Range, %Range* @EmptyRange, align 4 + %39 = insertvalue %Range %38, i64 %37, 0 + %40 = insertvalue %Range %39, i64 -1, 1 + %41 = insertvalue %Range %40, i64 0, 2 + %42 = extractvalue %Range %41, 0 + %43 = extractvalue %Range %41, 1 + %44 = extractvalue %Range %41, 2 + br label %preheader__1 + +preheader__1: ; preds = %condContinue__1 + %45 = icmp sgt i64 %43, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar0____qsVar0____qsVar3__numPair______ = phi i64 [ %42, %preheader__1 ], [ %61, %exiting__2 ] + %46 = icmp sle i64 %__qsVar0____qsVar0____qsVar3__numPair______, %44 + %47 = icmp sge i64 %__qsVar0____qsVar0____qsVar3__numPair______, %44 + %48 = select i1 %45, i1 %46, i1 %47 + br i1 %48, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %49 = mul i64 2, %__qsVar0____qsVar0____qsVar3__numPair______ + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %49) + %51 = bitcast i8* %50 to %Qubit** + %52 = load %Qubit*, %Qubit** %51, align 8 + %53 = mul i64 2, %__qsVar0____qsVar0____qsVar3__numPair______ + %54 = add i64 %53, 1 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %54) + %56 = bitcast i8* %55 to %Qubit** + %57 = load %Qubit*, %Qubit** %56, align 8 + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__temps__, i64 %__qsVar0____qsVar0____qsVar3__numPair______) + %59 = bitcast i8* %58 to %Qubit** + %60 = load %Qubit*, %Qubit** %59, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____adj(%Qubit* %52, %Qubit* %57, %Qubit* %60) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %61 = add i64 %__qsVar0____qsVar0____qsVar3__numPair______, %43 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__temps__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %28, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %28, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %__qsVar2__temps__) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) + ret void +} + +declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) + +declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i32) + +define internal void @Microsoft__Quantum__Intrinsic___ad761790e5b447849d6dea7cab2a3295___QsRef23__ApplyWithLessControlsA____adj(%Callable* %op, { %Array*, { double, %Qubit* }* }* %0) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 + %controls = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) + %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 + %arg = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 + %3 = bitcast { double, %Qubit* }* %arg to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %__qsVar0__numControls__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) + %__qsVar1__numControlPairs__ = sdiv i64 %__qsVar0__numControls__, 2 + %__qsVar2__temps__ = call %Array* @__quantum__rt__qubit_allocate_array(i64 %__qsVar1__numControlPairs__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__temps__, i32 1) + %4 = sub i64 %__qsVar1__numControlPairs__, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %__qsVar0____qsVar3__numPair____ = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %5 = icmp sle i64 %__qsVar0____qsVar3__numPair____, %4 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = mul i64 2, %__qsVar0____qsVar3__numPair____ + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %6) + %8 = bitcast i8* %7 to %Qubit** + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = mul i64 2, %__qsVar0____qsVar3__numPair____ + %11 = add i64 %10, 1 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %11) + %13 = bitcast i8* %12 to %Qubit** + %14 = load %Qubit*, %Qubit** %13, align 8 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__temps__, i64 %__qsVar0____qsVar3__numPair____) + %16 = bitcast i8* %15 to %Qubit** + %17 = load %Qubit*, %Qubit** %16, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____body(%Qubit* %9, %Qubit* %14, %Qubit* %17) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %__qsVar0____qsVar3__numPair____, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %19 = srem i64 %__qsVar0__numControls__, 2 + %20 = icmp eq i64 %19, 0 + br i1 %20, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__temps__, i32 1) + br label %condContinue__1 + +condFalse__1: ; preds = %exit__1 + %21 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 0) + %23 = bitcast i8* %22 to %Qubit** + %24 = sub i64 %__qsVar0__numControls__, 1 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %24) + %26 = bitcast i8* %25 to %Qubit** + %27 = load %Qubit*, %Qubit** %26, align 8 + store %Qubit* %27, %Qubit** %23, align 8 + %28 = call %Array* @__quantum__rt__array_concatenate(%Array* %__qsVar2__temps__, %Array* %21) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %__qsVar1____qsVar4__newControls____ = phi %Array* [ %__qsVar2__temps__, %condTrue__1 ], [ %28, %condFalse__1 ] + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1____qsVar4__newControls____, i32 1) + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %29) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %31 = bitcast %Tuple* %30 to { %Array*, { double, %Qubit* }* }* + %32 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %31, i32 0, i32 0 + %33 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %31, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 1) + store %Array* %__qsVar1____qsVar4__newControls____, %Array** %32, align 8 + store { double, %Qubit* }* %arg, { double, %Qubit* }** %33, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %30, %Tuple* null) + %34 = sub i64 %__qsVar1__numControlPairs__, 1 + %35 = sub i64 %34, 0 + %36 = sdiv i64 %35, 1 + %37 = mul i64 1, %36 + %38 = add i64 0, %37 + %39 = load %Range, %Range* @EmptyRange, align 4 + %40 = insertvalue %Range %39, i64 %38, 0 + %41 = insertvalue %Range %40, i64 -1, 1 + %42 = insertvalue %Range %41, i64 0, 2 + %43 = extractvalue %Range %42, 0 + %44 = extractvalue %Range %42, 1 + %45 = extractvalue %Range %42, 2 + br label %preheader__1 + +preheader__1: ; preds = %condContinue__1 + %46 = icmp sgt i64 %44, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar0____qsVar0____qsVar3__numPair______ = phi i64 [ %43, %preheader__1 ], [ %62, %exiting__2 ] + %47 = icmp sle i64 %__qsVar0____qsVar0____qsVar3__numPair______, %45 + %48 = icmp sge i64 %__qsVar0____qsVar0____qsVar3__numPair______, %45 + %49 = select i1 %46, i1 %47, i1 %48 + br i1 %49, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %50 = mul i64 2, %__qsVar0____qsVar0____qsVar3__numPair______ + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %50) + %52 = bitcast i8* %51 to %Qubit** + %53 = load %Qubit*, %Qubit** %52, align 8 + %54 = mul i64 2, %__qsVar0____qsVar0____qsVar3__numPair______ + %55 = add i64 %54, 1 + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %55) + %57 = bitcast i8* %56 to %Qubit** + %58 = load %Qubit*, %Qubit** %57, align 8 + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__temps__, i64 %__qsVar0____qsVar0____qsVar3__numPair______) + %60 = bitcast i8* %59 to %Qubit** + %61 = load %Qubit*, %Qubit** %60, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____adj(%Qubit* %53, %Qubit* %58, %Qubit* %61) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %62 = add i64 %__qsVar0____qsVar0____qsVar3__numPair______, %44 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__temps__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %__qsVar2__temps__) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic___9befc69676a248a794d7a83b374c573e___QsRef23__ApplyWithLessControlsA____adj(%Callable* %op, { %Array*, { %Qubit*, %Qubit* }* }* %0) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %1 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 0 + %controls = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) + %2 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 1 + %arg = load { %Qubit*, %Qubit* }*, { %Qubit*, %Qubit* }** %2, align 8 + %3 = bitcast { %Qubit*, %Qubit* }* %arg to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %__qsVar0__numControls__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) + %__qsVar1__numControlPairs__ = sdiv i64 %__qsVar0__numControls__, 2 + %__qsVar2__temps__ = call %Array* @__quantum__rt__qubit_allocate_array(i64 %__qsVar1__numControlPairs__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__temps__, i32 1) + %4 = sub i64 %__qsVar1__numControlPairs__, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %__qsVar0____qsVar3__numPair____ = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %5 = icmp sle i64 %__qsVar0____qsVar3__numPair____, %4 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = mul i64 2, %__qsVar0____qsVar3__numPair____ + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %6) + %8 = bitcast i8* %7 to %Qubit** + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = mul i64 2, %__qsVar0____qsVar3__numPair____ + %11 = add i64 %10, 1 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %11) + %13 = bitcast i8* %12 to %Qubit** + %14 = load %Qubit*, %Qubit** %13, align 8 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__temps__, i64 %__qsVar0____qsVar3__numPair____) + %16 = bitcast i8* %15 to %Qubit** + %17 = load %Qubit*, %Qubit** %16, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____body(%Qubit* %9, %Qubit* %14, %Qubit* %17) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %__qsVar0____qsVar3__numPair____, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %19 = srem i64 %__qsVar0__numControls__, 2 + %20 = icmp eq i64 %19, 0 + br i1 %20, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__temps__, i32 1) + br label %condContinue__1 + +condFalse__1: ; preds = %exit__1 + %21 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 0) + %23 = bitcast i8* %22 to %Qubit** + %24 = sub i64 %__qsVar0__numControls__, 1 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %24) + %26 = bitcast i8* %25 to %Qubit** + %27 = load %Qubit*, %Qubit** %26, align 8 + store %Qubit* %27, %Qubit** %23, align 8 + %28 = call %Array* @__quantum__rt__array_concatenate(%Array* %__qsVar2__temps__, %Array* %21) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %__qsVar1____qsVar4__newControls____ = phi %Array* [ %__qsVar2__temps__, %condTrue__1 ], [ %28, %condFalse__1 ] + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1____qsVar4__newControls____, i32 1) + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %29) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) + %31 = bitcast %Tuple* %30 to { %Array*, { %Qubit*, %Qubit* }* }* + %32 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %31, i32 0, i32 0 + %33 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %31, i32 0, i32 1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 1) + store %Array* %__qsVar1____qsVar4__newControls____, %Array** %32, align 8 + store { %Qubit*, %Qubit* }* %arg, { %Qubit*, %Qubit* }** %33, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %30, %Tuple* null) + %34 = sub i64 %__qsVar1__numControlPairs__, 1 + %35 = sub i64 %34, 0 + %36 = sdiv i64 %35, 1 + %37 = mul i64 1, %36 + %38 = add i64 0, %37 + %39 = load %Range, %Range* @EmptyRange, align 4 + %40 = insertvalue %Range %39, i64 %38, 0 + %41 = insertvalue %Range %40, i64 -1, 1 + %42 = insertvalue %Range %41, i64 0, 2 + %43 = extractvalue %Range %42, 0 + %44 = extractvalue %Range %42, 1 + %45 = extractvalue %Range %42, 2 + br label %preheader__1 + +preheader__1: ; preds = %condContinue__1 + %46 = icmp sgt i64 %44, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar0____qsVar0____qsVar3__numPair______ = phi i64 [ %43, %preheader__1 ], [ %62, %exiting__2 ] + %47 = icmp sle i64 %__qsVar0____qsVar0____qsVar3__numPair______, %45 + %48 = icmp sge i64 %__qsVar0____qsVar0____qsVar3__numPair______, %45 + %49 = select i1 %46, i1 %47, i1 %48 + br i1 %49, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %50 = mul i64 2, %__qsVar0____qsVar0____qsVar3__numPair______ + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %50) + %52 = bitcast i8* %51 to %Qubit** + %53 = load %Qubit*, %Qubit** %52, align 8 + %54 = mul i64 2, %__qsVar0____qsVar0____qsVar3__numPair______ + %55 = add i64 %54, 1 + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %55) + %57 = bitcast i8* %56 to %Qubit** + %58 = load %Qubit*, %Qubit** %57, align 8 + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__temps__, i64 %__qsVar0____qsVar0____qsVar3__numPair______) + %60 = bitcast i8* %59 to %Qubit** + %61 = load %Qubit*, %Qubit** %60, align 8 + call void @Microsoft__Quantum__Intrinsic____QsRef23__PhaseCCX____adj(%Qubit* %53, %Qubit* %58, %Qubit* %61) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %62 = add i64 %__qsVar0____qsVar0____qsVar3__numPair______, %44 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__temps__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1____qsVar4__newControls____, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %__qsVar2__temps__) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + ret void +} + +define i8 @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() #1 { +entry: + %0 = call i1 @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + %1 = sext i1 %0 to i8 + ret i8 %1 +} + +define void @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement() #2 { +entry: + %0 = call i1 @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body() + %1 = call %String* @__quantum__rt__bool_to_string(i1 %0) + call void @__quantum__rt__message(%String* %1) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) + +declare %String* @__quantum__rt__bool_to_string(i1) + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) + +attributes #0 = { nounwind readnone speculatable willreturn } +attributes #1 = { "InteropFriendly" } +attributes #2 = { "EntryPoint" } diff --git a/pyqir-parser/tests/test_api.py b/pyqir-parser/tests/test_api.py new file mode 100644 index 00000000..1523a5d7 --- /dev/null +++ b/pyqir-parser/tests/test_api.py @@ -0,0 +1,91 @@ +from pyqir_parser import * +import pytest + +def test_parser_pythonic(): + mod = QirModule("tests/teleportchain.baseprofile.bc") + func_name = "TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop" + func = mod.get_func_by_name(func_name) + assert(func.name == func_name) + func_list = mod.functions + assert(len(func_list) == 1) + assert(func_list[0].name == func_name) + interop_funcs = mod.get_funcs_by_attr("InteropFriendly") + assert(len(interop_funcs) == 1) + assert(len(mod.interop_funcs) == 1) + assert(mod.interop_funcs[0].name == interop_funcs[0].name) + assert(len(mod.entrypoint_funcs) == 0) + blocks = func.blocks + assert(len(blocks) == 9) + assert(blocks[0].name == "entry") + term = blocks[0].terminator + assert(isinstance(term, QirTerminator)) + assert(isinstance(term, QirCondBrTerminator)) + assert(term.true_dest == "then0__1.i.i.i") + assert(term.false_dest == "continue__1.i.i.i") + assert(term.condition.name == "0") + assert(blocks[1].terminator.dest == "continue__1.i.i.i") + assert(blocks[8].terminator.operand.type.width == 8) + block = func.get_block_by_name("then0__2.i.i3.i") + assert(isinstance(block.instructions[0], QirQisCallInstr)) + assert(isinstance(block.instructions[0].func_args[0], QirQubitConstant)) + assert(block.instructions[0].func_args[0].id == 5) + block = func.get_block_by_name("continue__1.i.i2.i") + var_name = block.terminator.condition.name + instr = func.get_instruction_by_output_name(var_name) + assert(isinstance(instr, QirQirCallInstr)) + assert(instr.output_name == var_name) + assert(instr.func_name == "__quantum__qir__read_result") + assert(instr.func_args[0].id == 3) + assert(isinstance(instr.type, QirIntegerType)) + assert(instr.type.width == 1) + + +def test_parser(): + mod = module_from_bitcode("tests/teleportchain.baseprofile.bc") + func_name = "TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop" + func = mod.get_func_by_name(func_name) + assert(func.name == func_name) + assert(len(func.parameters) == 0) + assert(func.return_type.is_integer) + func_list = mod.functions + assert(len(func_list) == 1) + assert(func_list[0].name == func_name) + interop_funcs = mod.get_funcs_by_attr("InteropFriendly") + assert(len(interop_funcs) == 1) + assert(interop_funcs[0].name == func_name) + assert(interop_funcs[0].get_attribute_value("requiredQubits") == "6") + assert(interop_funcs[0].required_qubits == 6) + blocks = func.blocks + assert(len(blocks) == 9) + assert(blocks[0].name == "entry") + entry_block = func.get_block_by_name("entry") + assert(entry_block.name == "entry") + assert(entry_block.terminator.is_condbr) + assert(not entry_block.terminator.is_ret) + assert(entry_block.terminator.condbr_true_dest == "then0__1.i.i.i") + assert(entry_block.terminator.condbr_false_dest == "continue__1.i.i.i") + assert(blocks[1].terminator.is_br) + assert(blocks[1].terminator.br_dest == "continue__1.i.i.i") + assert(blocks[8].terminator.is_ret) + assert(len(entry_block.instructions) == 11) + assert(entry_block.instructions[0].is_call) + assert(entry_block.instructions[0].call_func_name == "__quantum__qis__h__body") + assert(entry_block.instructions[0].is_qis_call) + param_list = entry_block.instructions[0].call_func_params + assert(len(param_list) == 1) + assert(param_list[0].is_constant) + assert(param_list[0].constant.is_qubit) + assert(param_list[0].constant.qubit_static_id == 0) + assert(entry_block.instructions[8].is_qis_call) + assert(entry_block.instructions[8].call_func_name == "__quantum__qis__mz__body") + assert(entry_block.instructions[8].call_func_params[0].constant.qubit_static_id == 1) + assert(entry_block.instructions[8].call_func_params[1].constant.result_static_id == 0) + branch_cond = entry_block.terminator.condbr_condition + assert(branch_cond.local_name == "0") + assert(entry_block.instructions[10].is_qir_call) + assert(entry_block.instructions[10].call_func_name == "__quantum__qir__read_result") + assert(entry_block.instructions[10].call_func_params[0].constant.result_static_id == 0) + assert(entry_block.instructions[10].has_output) + assert(entry_block.instructions[10].output_name == "0") + source_instr = func.get_instruction_by_output_name(branch_cond.local_name) + assert(source_instr.call_func_params[0].constant.result_static_id == 0) diff --git a/pyqir-parser/tox.ini b/pyqir-parser/tox.ini new file mode 100644 index 00000000..969767a2 --- /dev/null +++ b/pyqir-parser/tox.ini @@ -0,0 +1,26 @@ +[tox] +isolated_build = True + +[testenv] + +# https://github.com/tox-dev/tox/issues/1550 +# PYTHONIOENCODING = utf-8 needs to be set to work around windows bug +setenv = + LLVM_SYS_110_PREFIX = {env:LLVM_SYS_110_PREFIX} + PYTHONIOENCODING = utf-8 + +# needed temporarily for build to find cl.exe +passenv = * + +deps = -rrequirements-dev.txt + +[testenv:test] +description = Run the unit tests under {basepython} +commands = + python -m pip install . + pytest {posargs} + +[testenv:pack] +description = Build the wheels under all installed platforms +commands = + maturin build --release diff --git a/qirlib/Cargo.toml b/qirlib/Cargo.toml new file mode 100644 index 00000000..0e42d845 --- /dev/null +++ b/qirlib/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "qirlib" +version = "0.1.0" +edition = "2018" +description = "Base Profile QIR library" + +[dependencies] +llvm-sys = "110" +inkwell = { git = "https://github.com/TheDan64/inkwell", branch = "master", default-features = false, features = ["llvm11-0", "target-x86"] } +log = "0.4.14" +libloading = "0.7.0" +cty="0.2.1" +rand="0.8.4" +base64 = "0.13.0" +lazy_static = "1.4.0" +mut_static = "5.0.0" + +[dev-dependencies] +serial_test = "0.5.1" +tempfile = "3.2.0" + +[lib] +name = "qirlib" diff --git a/qirlib/src/constants.rs b/qirlib/src/constants.rs new file mode 100644 index 00000000..291746ab --- /dev/null +++ b/qirlib/src/constants.rs @@ -0,0 +1,58 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use log; + +use super::types::Types; +use inkwell::module::Module; +use inkwell::values::GlobalValue; +use inkwell::values::PointerValue; + +pub struct Constants<'ctx> { + pub unit: Option>, + pub pauli_i: Option>, + pub pauli_x: GlobalValue<'ctx>, + pub pauli_y: GlobalValue<'ctx>, + pub pauli_z: GlobalValue<'ctx>, + pub empty_range: Option>, +} + +impl<'ctx> Constants<'ctx> { + pub fn new(module: &Module<'ctx>, types: &Types<'ctx>) -> Self { + Constants { + unit: types.tuple.map_or_else(|| None, |t| Some(t.const_null())), + pauli_i: Constants::get_global(module, "PauliI"), + pauli_x: Constants::get_global(module, "PauliX").expect("PauliX must be defined"), + pauli_y: Constants::get_global(module, "PauliY").expect("PauliY must be defined"), + pauli_z: Constants::get_global(module, "PauliZ").expect("PauliZ must be defined"), + empty_range: Constants::get_global(module, "EmptyRange"), + } + } + + fn get_global(module: &Module<'ctx>, name: &str) -> Option> { + let defined_global = module.get_global(name); + match defined_global { + None => { + log::debug!("{} global constant was not defined in the module", name); + None + } + Some(value) => Some(value), + } + } +} + +#[cfg(test)] +mod tests { + use crate::context::{Context, ContextType}; + + use super::*; + + #[test] + fn constants_can_be_loaded() { + let ctx = inkwell::context::Context::create(); + let name = String::from("temp"); + let context = Context::new(&ctx, ContextType::Template(&name)).unwrap(); + let types = Types::new(&context.context, &context.module); + let _ = Constants::new(&context.module, &types); + } +} diff --git a/qirlib/src/context.rs b/qirlib/src/context.rs new file mode 100644 index 00000000..23a11107 --- /dev/null +++ b/qirlib/src/context.rs @@ -0,0 +1,188 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + + +use inkwell::{OptimizationLevel, memory_buffer::MemoryBuffer, module::Module}; + +use std::path::Path; + +use crate::{constants::Constants, intrinsics::Intrinsics, runtime_library::RuntimeLibrary, types::Types}; + +pub struct Context<'ctx> { + pub context: &'ctx inkwell::context::Context, + pub module: inkwell::module::Module<'ctx>, + pub execution_engine: inkwell::execution_engine::ExecutionEngine<'ctx>, + pub builder: inkwell::builder::Builder<'ctx>, + pub types: Types<'ctx>, + pub runtime_library: RuntimeLibrary<'ctx>, + pub intrinsics: Intrinsics<'ctx>, + pub constants: Constants<'ctx>, +} + +pub enum ContextType<'ctx> { + Template(&'ctx String), + File(&'ctx String), +} + +impl<'ctx> Context<'ctx> { + pub fn new( + context: &'ctx inkwell::context::Context, + context_type: ContextType<'ctx>, + ) -> Result { + let builder = context.create_builder(); + let module = Context::load_module(context, context_type)?; + let execution_engine = module + .create_jit_execution_engine(OptimizationLevel::None) + .expect("Could not create JIT Engine"); + let types = Types::new(&context, &module); + let runtime_library = RuntimeLibrary::new(&module); + let intrinsics = Intrinsics::new(&module); + let constants = Constants::new(&module, &types); + Ok(Context { + builder, + module, + execution_engine, + types, + context, + runtime_library, + intrinsics, + constants, + }) + } + fn load_module( + context: &'ctx inkwell::context::Context, + context_type: ContextType<'ctx>, + ) -> Result, String> { + let module = match context_type { + ContextType::Template(name) => { + Context::load_module_from_bitcode_template(&context, &name[..])? + } + ContextType::File(file_name) => { + let file_path = Path::new(&file_name[..]); + let ext = file_path.extension().and_then(std::ffi::OsStr::to_str); + let module = match ext { + Some("ll") => Context::load_module_from_ir_file(file_path, context)?, + Some("bc") => Context::load_module_from_bitcode_file(file_path, context)?, + _ => panic!("Unsupported module exetension {:?}", ext), + }; + module + } + }; + Ok(module) + } + fn load_module_from_bitcode_template( + context: &'ctx inkwell::context::Context, + name: &'ctx str, + ) -> Result, String> { + let module_contents = include_bytes!("module.bc"); + let buffer = MemoryBuffer::create_from_memory_range_copy(module_contents, name); + match Module::parse_bitcode_from_buffer(&buffer, context) { + Err(err) => { + let message = err.to_string(); + return Err(message); + } + Ok(module) => Ok(module), + } + } + + fn load_module_from_bitcode_file>( + path: P, + context: &'ctx inkwell::context::Context, + ) -> Result, String> { + match Module::parse_bitcode_from_path(path, context) { + Err(err) => { + let message = err.to_string(); + return Err(message); + } + Ok(module) => Ok(module), + } + } + + fn load_module_from_ir_file>( + path: P, + context: &'ctx inkwell::context::Context, + ) -> Result, String> { + let memory_buffer = Context::load_memory_buffer_from_ir_file(path)?; + + match context.create_module_from_ir(memory_buffer) { + Err(err) => { + let message = err.to_string(); + return Err(message); + } + Ok(module) => Ok(module), + } + } + + fn load_memory_buffer_from_ir_file>(path: P) -> Result { + match MemoryBuffer::create_from_file(path.as_ref()) { + Err(err) => { + let message = err.to_string(); + return Err(message); + } + Ok(memory_buffer) => Ok(memory_buffer), + } + } + + pub fn emit_bitcode(&self, file_path: &str) { + let bitcode_path = Path::new(file_path); + self.module.write_bitcode_to_path(&bitcode_path); + } + + pub fn emit_ir(&self, file_path: &str) -> Result<(), String> { + let ir_path = Path::new(file_path); + if let Err(llvmstr) = self.module.print_to_file(ir_path) { + return Err(llvmstr.to_string()); + } + Ok(()) + } + + pub fn get_ir_string(&self) -> String { + let ir = self.module.print_to_string(); + let result = ir.to_string(); + result + } + + pub fn get_bitcode_base64_string(&self) -> String { + let buffer = self.module.write_bitcode_to_memory(); + let bytes = buffer.as_slice(); + let result = base64::encode(bytes); + result + } +} + +#[cfg(test)] +mod tests { + use crate::context::{Context, ContextType}; + use std::fs::File; + use std::io::prelude::*; + + use tempfile::tempdir; + + #[test] + fn emitted_bitcode_files_are_identical_to_base64_encoded() { + let dir = tempdir().expect(""); + let tmp_path = dir.into_path(); + let name = String::from("test"); + let file_path = tmp_path.join(format!("{}.bc", name)); + let file_path_string = file_path.display().to_string(); + + let ctx = inkwell::context::Context::create(); + let name = String::from("temp"); + let context = Context::new(&ctx, ContextType::Template(&name)).unwrap(); + context.emit_bitcode(file_path_string.as_str()); + let mut emitted_bitcode_file = + File::open(file_path_string.as_str()).expect("Could not open emitted bitcode file"); + let mut buffer = vec![]; + + emitted_bitcode_file + .read_to_end(&mut buffer) + .expect("Could not read emitted bitcode file"); + let emitted_bitcode_bytes = buffer.as_slice(); + + let b64_bitcode = context.get_bitcode_base64_string(); + let decoded = base64::decode(b64_bitcode).expect("could not decode base64 encoded module"); + let decoded_bitcode_bytes = decoded.as_slice(); + + assert_eq!(emitted_bitcode_bytes, decoded_bitcode_bytes); + } +} diff --git a/qirlib/src/intrinsics.rs b/qirlib/src/intrinsics.rs new file mode 100644 index 00000000..0dc350dd --- /dev/null +++ b/qirlib/src/intrinsics.rs @@ -0,0 +1,202 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use log; + +use inkwell::module::Module; +use inkwell::values::FunctionValue; + +pub struct Intrinsics<'ctx> { + pub m: Option>, + pub m_ins: Option>, + pub r_x: Option>, + pub r_y: Option>, + pub r_z: Option>, + pub r_ins: Option>, + pub r_adj_ins: Option>, + pub r_ctl_ins: Option>, + pub r_ctl_adj_ins: Option>, + pub reset: Option>, + pub h: Option>, + pub h_ins: Option>, + pub h_ctl_ins: Option>, + pub x: Option>, + pub x_ins: Option>, + pub x_ctl: Option>, + pub x_ctl_ins: Option>, + pub y: Option>, + pub y_ins: Option>, + pub y_ctl: Option>, + pub y_ctl_ins: Option>, + pub z: Option>, + pub z_ins: Option>, + pub z_ctl: Option>, + pub z_ctl_ins: Option>, + pub s: Option>, + pub s_ins: Option>, + pub s_adj: Option>, + pub s_adj_ins: Option>, + pub s_ctl_ins: Option>, + pub s_ctl_adj_ins: Option>, + pub t: Option>, + pub t_ins: Option>, + pub t_adj: Option>, + pub t_adj_ins: Option>, + pub t_ctl_ins: Option>, + pub t_ctl_adj_ins: Option>, + pub dumpmachine: Option>, + pub dumpregister: Option>, +} + +impl<'ctx> Intrinsics<'ctx> { + pub fn new(module: &Module<'ctx>) -> Self { + let intrinsics = Intrinsics { + m: Intrinsics::get_mqi_body(module, "M"), + m_ins: Intrinsics::get_qis_intrinsic_function_body(module, "measure"), + r_x: Intrinsics::get_mqi_body(module, "Rx"), + r_y: Intrinsics::get_mqi_body(module, "Ry"), + r_z: Intrinsics::get_mqi_body(module, "Rz"), + r_ins: Intrinsics::get_qis_intrinsic_function_body(module, "r"), + r_adj_ins: Intrinsics::get_qis_intrinsic_function_adj(module, "r"), + r_ctl_ins: Intrinsics::get_qis_intrinsic_function_ctl(module, "r"), + r_ctl_adj_ins: Intrinsics::get_qis_intrinsic_function_ctladj(module, "r"), + reset: Intrinsics::get_mqi_body(module, "Reset"), + h: Intrinsics::get_mqi_body(module, "H"), + h_ins: Intrinsics::get_qis_intrinsic_function_body(module, "H"), + h_ctl_ins: Intrinsics::get_qis_intrinsic_function_ctl(module, "H"), + x: Intrinsics::get_mqi_body(module, "X"), + x_ins: Intrinsics::get_qis_intrinsic_function_body(module, "X"), + x_ctl: Intrinsics::get_mqi_ctl(module, "X"), + x_ctl_ins: Intrinsics::get_qis_intrinsic_function_ctl(module, "X"), + y: Intrinsics::get_mqi_body(module, "Y"), + y_ins: Intrinsics::get_qis_intrinsic_function_body(module, "Y"), + y_ctl: Intrinsics::get_mqi_ctl(module, "Y"), + y_ctl_ins: Intrinsics::get_qis_intrinsic_function_ctl(module, "Y"), + z: Intrinsics::get_mqi_body(module, "Z"), + z_ins: Intrinsics::get_qis_intrinsic_function_body(module, "Z"), + z_ctl: Intrinsics::get_mqi_ctl(module, "Z"), + z_ctl_ins: Intrinsics::get_qis_intrinsic_function_ctl(module, "Z"), + s: Intrinsics::get_mqi_body(module, "S"), + s_ins: Intrinsics::get_qis_intrinsic_function_body(module, "S"), + s_adj: Intrinsics::get_mqi_adj(module, "S"), + s_adj_ins: Intrinsics::get_qis_intrinsic_function_adj(module, "S"), + s_ctl_ins: Intrinsics::get_qis_intrinsic_function_ctl(module, "S"), + s_ctl_adj_ins: Intrinsics::get_qis_intrinsic_function_ctladj(module, "S"), + t: Intrinsics::get_mqi_body(module, "T"), + t_ins: Intrinsics::get_qis_intrinsic_function_body(module, "T"), + t_adj: Intrinsics::get_mqi_adj(module, "T"), + t_adj_ins: Intrinsics::get_qis_intrinsic_function_adj(module, "T"), + t_ctl_ins: Intrinsics::get_qis_intrinsic_function_ctl(module, "T"), + t_ctl_adj_ins: Intrinsics::get_qis_intrinsic_function_ctladj(module, "T"), + dumpmachine: Intrinsics::get_qis_intrinsic_function_body(module, "dumpmachine"), + dumpregister: Intrinsics::get_qis_intrinsic_function_body(module, "dumpregister"), + }; + //Intrinsics::test_template_intrinsics(&intrinsics); + intrinsics + } + + fn get_qis_intrinsic_function( + module: &Module<'ctx>, + name: &str, + ) -> Option> { + let function_name = format!("__quantum__qis__{}", name.to_lowercase()); + Intrinsics::get_function(module, function_name.as_str()) + } + + fn get_qis_intrinsic_function_ctl( + module: &Module<'ctx>, + name: &str, + ) -> Option> { + let function_name = format!("__quantum__qis__{}__ctl", name.to_lowercase()); + Intrinsics::get_function(module, function_name.as_str()) + } + + fn get_qis_intrinsic_function_ctladj( + module: &Module<'ctx>, + name: &str, + ) -> Option> { + let function_name = format!("__quantum__qis__{}__ctladj", name.to_lowercase()); + Intrinsics::get_function(module, function_name.as_str()) + } + + fn get_qis_intrinsic_function_body( + module: &Module<'ctx>, + name: &str, + ) -> Option> { + let function_name = format!("__quantum__qis__{}__body", name.to_lowercase()); + Intrinsics::get_function(module, function_name.as_str()) + } + + fn get_qis_intrinsic_function_adj( + module: &Module<'ctx>, + name: &str, + ) -> Option> { + let function_name = format!("__quantum__qis__{}__adj", name.to_lowercase()); + Intrinsics::get_function(module, function_name.as_str()) + } + + fn get_mqi_body(module: &Module<'ctx>, name: &str) -> Option> { + let function_name = format!("Microsoft__Quantum__Intrinsic__{}__body", name); + Intrinsics::get_function(module, function_name.as_str()) + } + + fn get_mqi_ctl(module: &Module<'ctx>, name: &str) -> Option> { + let function_name = format!("Microsoft__Quantum__Intrinsic__{}__ctl", name); + Intrinsics::get_function(module, function_name.as_str()) + } + + fn get_mqi_adj(module: &Module<'ctx>, name: &str) -> Option> { + let function_name = format!("Microsoft__Quantum__Intrinsic__{}__adj", name); + Intrinsics::get_function(module, function_name.as_str()) + } + + fn get_function(module: &Module<'ctx>, function_name: &str) -> Option> { + let defined_function = module.get_function(&function_name); + match defined_function { + None => { + log::debug!( + "{} global function was not defined in the module", + function_name + ); + None + } + Some(value) => Some(value), + } + } + + fn test_template_intrinsics(intrinsics: &Intrinsics<'ctx>) { + intrinsics.m.expect("M gate must be defined"); + intrinsics.r_x.expect("Rx gate must be defined"); + intrinsics.r_y.expect("Ry gate must be defined"); + intrinsics.r_z.expect("Rz gate must be defined"); + intrinsics.reset.expect("Reset gate must be defined"); + intrinsics.h.expect("H gate must be defined"); + intrinsics.x.expect("X gate must be defined"); + intrinsics.x_ctl.expect("X_ctl function must be defined"); + intrinsics.x_ctl_ins.expect("X_ctl gate must be defined"); + intrinsics.y.expect("Y gate must be defined"); + intrinsics.z.expect("Z gate must be defined"); + intrinsics.z_ctl.expect("Z_ctl function must be defined"); + intrinsics.z_ctl_ins.expect("Z_ctl gate must be defined"); + intrinsics.s.expect("S gate must be defined"); + intrinsics.s_adj.expect("S_adj gate must be defined"); + intrinsics.t.expect("T gate must be defined"); + intrinsics.t_adj.expect("T_adj gate must be defined"); + intrinsics.dumpmachine.expect("dumpmachine must be defined"); + } +} + +#[cfg(test)] +mod tests { + use crate::context::{Context, ContextType}; + + use super::*; + + #[test] + fn intrinsics_can_be_loaded() { + let ctx = inkwell::context::Context::create(); + let name = String::from("temp"); + let context = Context::new(&ctx, ContextType::Template(&name)).unwrap(); + let _ = Intrinsics::new(&context.module); + } +} diff --git a/qirlib/src/lib.rs b/qirlib/src/lib.rs new file mode 100644 index 00000000..7cff4361 --- /dev/null +++ b/qirlib/src/lib.rs @@ -0,0 +1,11 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#![deny(clippy::all, clippy::pedantic)] + +pub mod constants; +pub mod context; +pub mod intrinsics; +pub mod runtime_library; +pub mod passes; +pub mod types; \ No newline at end of file diff --git a/qirlib/src/module.bc b/qirlib/src/module.bc new file mode 100644 index 0000000000000000000000000000000000000000..a064d0d20d1e4805af7f0cb88dca606d75435e08 GIT binary patch literal 8556 zcmbtZ3s@7^o%m70YUYbY(6HyBkZ9udjs0jf@r8Xe#mDV;15CkuQ7=o`>nIvFn z#WpHZY`eSYYODRMja7TKdMg3ch}2Tw7H@@$)po5P*P`osW!W>C2}sn7y?Z{GIgj7@ zpa1*(2U3+yKTZh&zy|=3hs(yz&HbkEjei_?**}ZSMBtnh2jCbN0DqPgu;6$U{6)=? z8x$_i9IhZfX(LjFkN3eu;hc4Oq&S_Gy3s$iM-U&9hzaNU#T#of46V}0v>;opK(Uck z;KKHX5XxCK#7QLHSS5^!im}JN?O3#dIN^eH-yr~)2H){xUA{y?HD^=g_A2)h4xDej+Eoto|- zigMQauu^x7H4vsTtf-c;KGIi2i5O?E-(Ikt==m@bYXjE|QH30TYvK6Dtj{sr|8H|0 zjaNjsLZs{CAjLhHfB4;ZoANup!2RE>w&=S4z9J!(^}h0$8T=%$5DP$Bq<TVD#Fif6<;xnFG(ZFfcJW-^%j-iZ8`sro$u)7{nEwj zgF6A318Gop3ASib3HYAN#{xGh*4EYmBq6oo<4Jq|^pBVJ>^n7S>RUFBu&EJcLkw&J zVH5A9jMVa>X{koYNJ&sWU`If6Q%$Y`$>Z3qhXBA9X8%ik`xF(}w)M7iroK39=o9t~3+E*D7;so|%S>ZEjoS%~Oo! z`HLI9VRLs)RsNsi2%j{0N_xHE&VU=L$AMifiqkcF+qnX5S|m^8_M`wgSU(^ zr`t6w(qj-3Ky{t}yrRTQt1+t%U-4G{^A{`Rk5Gp)rXHTiBOjT9GEP}lC_|b&b8^bu zG5u}cCvNuVJ_dW2XN~vP*yeR+QKpw-ADn2!+W=tV1qL1E7zLsRCkHSw1popaBj!B* zn53a^b9E|j6MXhPz<-7A>tPyD1S`g@Snj2h&vFCyq`-(A#8IC1HTTsB;L#>rf%5tl zuy6CpK@^}&e{3HTR>ksiB-M+UHOtF&UwOb)M-L-NwsOtQhoQK+1m&CY;SEAjV5Ys+ zcf#!B93`tx9*~0s%08=bFOCxN{%kT*fxWQ-KIc8d!`km)tb#HyCsR1#ge0xfS? zivJAwzX^DKByTdF8s6-boZc!fl=-+}*06T=beD2^Eqi+VidvHaeG5ZZX4nh{bd`a< zY=zB0*hKAtyjyl&w~SvZ<6rCL^{RN21+9`Fr7%TGnziADvUie?Sd%-HN&DF;N31DT z?Bw>|kYyV5ts?ZrBAXFDNt?lLGicCdQRuS!K9KBk(x%32ONBOLgKcGlIrerh?{*jO zZWHg8jGgD&D(=}TZg)SgXvJ+(ajRLo8T-WjfJoKR@ z`5Zgh(Sn~Swiy=Mj0UTz2wkDEnS?e|qfK;r0sl7O-%;^;?7VAoemB9p1*IUOBv+i_ z%`oFjVa5-?(k1Tcmi&r@7Yefy9m;7RTa(*W;}}6Sw&g3(6%ARLeRe*jf$$~aZCde7 zspLneWDOEdF)WqzYPmVp*2MOfWQTG(RLH~TlpbYr4IH-A9`l2e+Ll9pFmzQEYHTns zx+dV=YT{j2@qU*@I?pJ=d%8zo>XyJus4zp6?66MXZw;$zNvu^SwYQ`kbSAl4po}T) z8T*%FDCBA>RNz9JsPj1QmWKaZ7q^$-UzhQ(6a0HBUa5e0O~!-6-@CZ*#_Cb=?#q(REs6V)7KPj`3J$R zAd<@g7LB0aS%V-XAxvwO<;ekoakHXog#v(C6yy9D$ol3QGX+_+4NQ;Za^Wt+`qaK! zp^!pN^>c1QjI(hJaGZ^i)q!j_P{{6QNNd&xU{Zpl$F58>885$0pJe(}mZzakx*E+n z=Rl+!4l~Y;5odE{?LWHD)SNMFM(Ec`R_xtNptw=Z)@=;|#ZFtDEp4 z$q#!ZpTsGWmB=v!VYGO`zg1K)$mQD%tY0g;oaGF{qLoD%8N7e>sU$ZZ{ik0X2;Qo;_{y(6tGS1Tw-V^XNpy{Y>U?(ps-_CgCPoo{BQ=*@;cg8U^JxnMVOi>#vdfjSMYg zcv#ZmZ|vfF7LEu|J+pID?BLG5y%697H7|2o!fJ&f(BYhYeuKd02Z6f>fu9coMI>A7 zQ7`Y$c2q=sz~YH$6MZLPr7*I7C#jh(oJw#k(82sDhr?Z3Ms{MI?Zr!0+&&MnF&^<8 zABh<3wnjkbU@4$;P%Ij_H#Ll$_VX>``wFgW2dQL~xX%VcJY6&|1jdFL#)g&h$wc-t zmq)*Mx_R@Z?4Il3uMZzoF4uf zj}~W=%^qL}dKm4N0{{-fU+%Jkw4R41{hR2Wvz45Mqm~hs{j*730eYts4nJ|PCX}wh zl^`fI5dS{&ufI&hk&K))S=!{w=cNztMIiNelUl1Yg3kymePTnj79_v}t8r;>t zRU;qhgK#M~wz+?HRocX}m7N+e_R>-6yzF;--BT~WzIp7!K=>upck!Mn4T3fmEM%6- zI_sIGtFuP;AMHw5&=U(kt2Xrw9RCZ}7ZDhAAakZZfA*}j)9TI(ZoVr@EAo2>EN3$} z?Aw_`W}X>1TbY(|F1|nb_lJ;?3rI-X0uwPd`LrC zXI~Y9z|mu0%E*5h>QJTW!!%7}&(Vw>=$EGUUxZughe6hV6g)QZ+mIxGy$z``4E#4d ze|5BrkvPxqj+;~U>su_e<~q5d*esy~yXQt6)xUw{Emq_$Kl7-VUvzekqBM5KjTvZ7 zp&8hJOizhFu78*k)SY#dm}d)hl&>7ni9~@(NA-(lK~-0FUU?FouXaSAWcXXT_$elE zA941`UItmIH!h^CGiT67ly4o<4(Er`C)buvIYysb?3r=jQHBsfU%U(P>5^t>LCuy6 z(uYt@)E@_6(-HtAz_;hFq&^pNqLDA_nlSK+-!^v%O^21CTcKc*#99>DpwOHQ0a}>M zEx?lso7iKSq_K{V7tq&5{0z)a=fMC^1{dCPHqnHTutsMbLsQs`Y#i!t@N{nu&E7;d zln8i`K@{?css!JLXzwkML+7n8pq?8;LlGj-ePP3OEdhW8zJobRcMU+*1v$Hr!3?yR zVD9b{>*?EBY)EC&ZG;4Gja3pHPd-(|%_E8uVIe`)*7Uj0Ik5SYr2**=EA=yd(tKPd z#AOl$^bbsd2>NUudx|@~&U^rg;2l1Leh*vjKN&4COdGinA zOGe~pG?d&EGc4H8i8hacFQ)tvaE)?|4jUD>RF2bKYL+vG#~Jd0d%?q!HSH1<8MhA% z*XfcGc^Zx{9ufZsWcc&HD#J%mmS9enI|eEK3ZiR*gb48i%r0}uqLz?yIl4`?o|PM0 zoR%X(2qz{FQ0;(OC(>PW+pCzHxmFR*8J!SM7fj0?KB9$pYMv|fwV3@*vA@zDlnTyh z>J@|~c*3EvB;GKMu9`1L$EI^n?f?q9T+!f}(u2e%x|TS|PQcJfcs2{uE2iNtmzMf((hT?V@`tb_CLym~^k(~+S#_+Ix~Fvpt9W&6&LVjmh$ z;zH)kEtAsHnR3Qm=U-#${2FVv!14=sa+DR8sfiZQFg~$#DZ62O6jMs^9>@dY8}%Or zAmZIl2NGDs?kKbhUk3I3%L-PT~X(AS# zV#@mF&{*c$;JzhW+y^X>^YQ|Y5t&P+- z+i?(D>+gUr_TEWNJt1U}a_bTrIjbB0cjlZ_X#k;T_DcgeL*1yM?gTwe^?@e9vgsgn zDsIgECh2Fo#^y8ZZLh5jvqT0IiUbpwDq#1haJnvU378ek-G{#uL|^63s3zy%nOi-| zg~W6BaoXfQ5twfktwcq1D-uV??SINp&t1G#g<3_PUFXaixa(AO5sZWrbjAMADXC?2 zS!467jSVQwO-zX*+-bF-`#3LCwTKM* z<8w>t_zUzzhRQL$RIRf= zF#X}t)q~NA(kCAE1m(bL&ih>n>>CNeXH;;@r@e5Cfv%8_u4d8kYGdM+HyE!{_>UFp z4)8n_C;m$PayM z@;)S^zx3Vt(?{gkXMB_(B$6od@!9$GZ&N_5(C?=`K_s9mYfH|nTuaRpvG z>D9$LlNxT;>q<>(gUR?f%&04`C^4xi+$x>1%!>%KT45-l&M@iULmqlgB- zRGe=tD=#aADqvhet)5eAG8UJX7w4?V(6+* z(hr>07A)~rCd2p>rXhF)1~Q&}kRG~th4JwiaU0M0*d8i69&z3>D!PV+4zs9(lO-c? zwBVV{08t|gm-9@dAZlb%3!jM;L_tR#LBQN+BK6v{qTqpy-#?xLVI06bV(>DCUZ*cJ zt{lvAPu)Gtf_hzfxpuL)UCXIBTkOeu!)?y2jhHEpSVaUfF}1C5KD Y6gNpW=>>gRK}CsfQb~#XwgBM&06&F(Y5)KL literal 0 HcmV?d00001 diff --git a/qirlib/src/module.ll b/qirlib/src/module.ll new file mode 100644 index 00000000..d9280fda --- /dev/null +++ b/qirlib/src/module.ll @@ -0,0 +1,579 @@ + +%Array = type opaque +%Qubit = type opaque +%Result = type opaque +%String = type opaque + +@PauliX = internal constant i2 1 +@PauliY = internal constant i2 -1 +@PauliZ = internal constant i2 -2 +@0 = internal constant [3 x i8] c", \00" +@1 = internal constant [2 x i8] c"[\00" +@2 = internal constant [3 x i8] c", \00" +@3 = internal constant [2 x i8] c"[\00" +@4 = internal constant [2 x i8] c"]\00" +@5 = internal constant [2 x i8] c"]\00" + +define internal %Array* @QuantumApplication__Run__body() { +entry: + %q0 = call %Qubit* @__quantum__rt__qubit_allocate() + %q1 = call %Qubit* @__quantum__rt__qubit_allocate() + %q2 = call %Qubit* @__quantum__rt__qubit_allocate() + %q3 = call %Qubit* @__quantum__rt__qubit_allocate() + %control0 = call %Qubit* @__quantum__rt__qubit_allocate() + %results = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 3) + %c = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %c_0_raw = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %c, i64 0) + %c_result_0 = bitcast i8* %c_0_raw to %Result** + %zero_0 = call %Result* @__quantum__rt__result_get_zero() + call void @__quantum__rt__result_update_reference_count(%Result* %zero_0, i32 1) + store %Result* %zero_0, %Result** %c_result_0, align 8 + %c_1_raw = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %c, i64 1) + %c_result_1 = bitcast i8* %c_1_raw to %Result** + %zero_1 = call %Result* @__quantum__rt__result_get_zero() + call void @__quantum__rt__result_update_reference_count(%Result* %zero_1, i32 1) + store %Result* %zero_1, %Result** %c_result_1, align 8 + %c_2_raw = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %c, i64 2) + %c_result_2 = bitcast i8* %c_2_raw to %Result** + %zero_2 = call %Result* @__quantum__rt__result_get_zero() + call void @__quantum__rt__result_update_reference_count(%Result* %zero_2, i32 1) + store %Result* %zero_2, %Result** %c_result_2, align 8 + %c_3_raw = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %c, i64 3) + %c_result_3 = bitcast i8* %c_3_raw to %Result** + %zero_3 = call %Result* @__quantum__rt__result_get_zero() + call void @__quantum__rt__result_update_reference_count(%Result* %zero_3, i32 1) + store %Result* %zero_3, %Result** %c_result_3, align 8 + %i = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 3) + %i_0_raw = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %i, i64 0) + %i_result_0 = bitcast i8* %i_0_raw to %Result** + %zero_01 = call %Result* @__quantum__rt__result_get_zero() + call void @__quantum__rt__result_update_reference_count(%Result* %zero_01, i32 1) + store %Result* %zero_01, %Result** %i_result_0, align 8 + %i_1_raw = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %i, i64 1) + %i_result_1 = bitcast i8* %i_1_raw to %Result** + %zero_12 = call %Result* @__quantum__rt__result_get_zero() + call void @__quantum__rt__result_update_reference_count(%Result* %zero_12, i32 1) + store %Result* %zero_12, %Result** %i_result_1, align 8 + %i_2_raw = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %i, i64 2) + %i_result_2 = bitcast i8* %i_2_raw to %Result** + %zero_23 = call %Result* @__quantum__rt__result_get_zero() + call void @__quantum__rt__result_update_reference_count(%Result* %zero_23, i32 1) + store %Result* %zero_23, %Result** %i_result_2, align 8 + %j = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %j_0_raw = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %j, i64 0) + %j_result_0 = bitcast i8* %j_0_raw to %Result** + %zero_04 = call %Result* @__quantum__rt__result_get_zero() + call void @__quantum__rt__result_update_reference_count(%Result* %zero_04, i32 1) + store %Result* %zero_04, %Result** %j_result_0, align 8 + %j_1_raw = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %j, i64 1) + %j_result_1 = bitcast i8* %j_1_raw to %Result** + %zero_15 = call %Result* @__quantum__rt__result_get_zero() + call void @__quantum__rt__result_update_reference_count(%Result* %zero_15, i32 1) + store %Result* %zero_15, %Result** %j_result_1, align 8 + %results_result_tmp_0_raw = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %results, i64 0) + %results_result_tmp_result_0 = bitcast i8* %results_result_tmp_0_raw to %Array** + store %Array* %c, %Array** %results_result_tmp_result_0, align 8 + %results_result_tmp_1_raw = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %results, i64 1) + %results_result_tmp_result_1 = bitcast i8* %results_result_tmp_1_raw to %Array** + store %Array* %i, %Array** %results_result_tmp_result_1, align 8 + %results_result_tmp_2_raw = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %results, i64 2) + %results_result_tmp_result_2 = bitcast i8* %results_result_tmp_2_raw to %Array** + store %Array* %j, %Array** %results_result_tmp_result_2, align 8 + %__controlQubits__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %__controlQubits__0_result_tmp_0_raw = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__controlQubits__, i64 0) + %__controlQubits__0_result_tmp_result_0 = bitcast i8* %__controlQubits__0_result_tmp_0_raw to %Qubit** + store %Qubit* %q0, %Qubit** %__controlQubits__0_result_tmp_result_0, align 8 + call void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %control0) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + %__controlQubits__6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %__controlQubits__0_result_tmp_0_raw7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__controlQubits__6, i64 0) + %__controlQubits__0_result_tmp_result_08 = bitcast i8* %__controlQubits__0_result_tmp_0_raw7 to %Qubit** + store %Qubit* %q1, %Qubit** %__controlQubits__0_result_tmp_result_08, align 8 + call void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %__controlQubits__6, %Qubit* %control0) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__6, i32 -1) + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %q0) + call void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %q0) + call void @Microsoft__Quantum__Intrinsic__Rx__body(double 1.500000e+01, %Qubit* %q1) + call void @Microsoft__Quantum__Intrinsic__Ry__body(double 1.600000e+01, %Qubit* %q2) + call void @Microsoft__Quantum__Intrinsic__Rz__body(double 1.700000e+01, %Qubit* %q3) + call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %q0) + call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %q1) + call void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %q2) + call void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %q3) + call void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %q0) + call void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %q1) + call void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %q2) + %measurement = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q0) + %c0_0_raw = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %c, i64 0) + %c0_result_0 = bitcast i8* %c0_0_raw to %Result** + %existing_value = load %Result*, %Result** %c0_result_0, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %existing_value, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %measurement, i32 1) + store %Result* %measurement, %Result** %c0_result_0, align 8 + %measurement9 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q1) + %c1_1_raw = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %c, i64 1) + %c1_result_1 = bitcast i8* %c1_1_raw to %Result** + %existing_value10 = load %Result*, %Result** %c1_result_1, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %existing_value10, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %measurement9, i32 1) + store %Result* %measurement9, %Result** %c1_result_1, align 8 + %measurement11 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q2) + %c2_2_raw = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %c, i64 2) + %c2_result_2 = bitcast i8* %c2_2_raw to %Result** + %existing_value12 = load %Result*, %Result** %c2_result_2, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %existing_value12, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %measurement11, i32 1) + store %Result* %measurement11, %Result** %c2_result_2, align 8 + %measurement13 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %q3) + %c3_3_raw = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %c, i64 3) + %c3_result_3 = bitcast i8* %c3_3_raw to %Result** + %existing_value14 = load %Result*, %Result** %c3_result_3, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %existing_value14, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %measurement13, i32 1) + store %Result* %measurement13, %Result** %c3_result_3, align 8 + call void @__quantum__rt__qubit_release(%Qubit* %control0) + call void @__quantum__rt__qubit_release(%Qubit* %q1) + call void @__quantum__rt__qubit_release(%Qubit* %q2) + call void @__quantum__rt__qubit_release(%Qubit* %q0) + call void @__quantum__rt__qubit_release(%Qubit* %q3) + ret %Array* %results +} + +declare void @__quantum__qis__dumpmachine__body(i8*) + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare void @__quantum__rt__qubit_release(%Qubit*) + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +declare %Result* @__quantum__rt__result_get_zero() + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) + +declare i64 @__quantum__rt__array_get_size_1d(%Array*) + +declare void @__quantum__rt__array_update_reference_count(%Array*, i32) + +declare void @__quantum__qis__x__body(%Qubit*) + +declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) + +declare void @__quantum__qis__y__body(%Qubit*) + +declare void @__quantum__qis__z__body(%Qubit*) + +declare void @__quantum__qis__z__ctl(%Array*, %Qubit*) + +declare void @__quantum__qis__h__body(%Qubit*) + +define internal %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { +entry: + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %1 = bitcast i8* %0 to i2* + %2 = load i2, i2* @PauliZ, align 1 + store i2 %2, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %4 = bitcast i8* %3 to %Qubit** + store %Qubit* %qubit, %Qubit** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %5 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + ret %Result* %5 +} + +define internal void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %qubit) { +entry: + %0 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) + %1 = call %Result* @__quantum__rt__result_get_one() + %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) + call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %qubit) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__body(double %theta, %Qubit* %qubit) { +entry: + %pauli = load i2, i2* @PauliX, align 1 + call void @__quantum__qis__r__body(i2 %pauli, double %theta, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Ry__body(double %theta, %Qubit* %qubit) { +entry: + %pauli = load i2, i2* @PauliY, align 1 + call void @__quantum__qis__r__body(i2 %pauli, double %theta, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qubit) { +entry: + %pauli = load i2, i2* @PauliZ, align 1 + call void @__quantum__qis__r__body(i2 %pauli, double %theta, %Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__s__body(%Qubit*) + +declare void @__quantum__qis__s__adj(%Qubit*) + +declare void @__quantum__qis__t__body(%Qubit*) + +declare void @__quantum__qis__t__adj(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h__body(%Qubit* %qubit) + ret void +} + +declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) + +declare void @__quantum__qis__r__body(i2, double, %Qubit*) + +declare %Result* @__quantum__rt__result_get_one() + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) + +define internal void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__s__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__s__adj(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__t__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__t__adj(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__y__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define { i64, i8* }* @QuantumApplication__Run__Interop() #0 { +entry: + %0 = call %Array* @QuantumApplication__Run__body() + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) + %2 = mul i64 %1, 8 + %3 = call i8* @__quantum__rt__memory_allocate(i64 %2) + %4 = ptrtoint i8* %3 to i64 + %5 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %6 = phi i64 [ 0, %entry ], [ %19, %exiting__1 ] + %7 = icmp sle i64 %6, %5 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = mul i64 %6, 8 + %9 = add i64 %4, %8 + %10 = inttoptr i64 %9 to { i64, i8* }** + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %6) + %12 = bitcast i8* %11 to %Array** + %13 = load %Array*, %Array** %12, align 8 + %14 = call i64 @__quantum__rt__array_get_size_1d(%Array* %13) + %15 = mul i64 %14, 1 + %16 = call i8* @__quantum__rt__memory_allocate(i64 %15) + %17 = ptrtoint i8* %16 to i64 + %18 = sub i64 %14, 1 + br label %header__2 + +exiting__1: ; preds = %exit__2 + %19 = add i64 %6, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %20 = call i8* @__quantum__rt__memory_allocate(i64 ptrtoint ({ i64, i8* }* getelementptr ({ i64, i8* }, { i64, i8* }* null, i32 1) to i64)) + %21 = bitcast i8* %20 to { i64, i8* }* + %22 = getelementptr { i64, i8* }, { i64, i8* }* %21, i64 0, i32 0 + store i64 %1, i64* %22, align 4 + %23 = getelementptr { i64, i8* }, { i64, i8* }* %21, i64 0, i32 1 + store i8* %3, i8** %23, align 8 + %24 = sub i64 %1, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %body__1 + %25 = phi i64 [ 0, %body__1 ], [ %36, %exiting__2 ] + %26 = icmp sle i64 %25, %18 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = mul i64 %25, 1 + %28 = add i64 %17, %27 + %29 = inttoptr i64 %28 to i8* + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %13, i64 %25) + %31 = bitcast i8* %30 to %Result** + %32 = load %Result*, %Result** %31, align 8 + %33 = call %Result* @__quantum__rt__result_get_zero() + %34 = call i1 @__quantum__rt__result_equal(%Result* %32, %Result* %33) + %35 = select i1 %34, i8 0, i8 -1 + store i8 %35, i8* %29, align 1 + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %36 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %37 = call i8* @__quantum__rt__memory_allocate(i64 ptrtoint ({ i64, i8* }* getelementptr ({ i64, i8* }, { i64, i8* }* null, i32 1) to i64)) + %38 = bitcast i8* %37 to { i64, i8* }* + %39 = getelementptr { i64, i8* }, { i64, i8* }* %38, i64 0, i32 0 + store i64 %14, i64* %39, align 4 + %40 = getelementptr { i64, i8* }, { i64, i8* }* %38, i64 0, i32 1 + store i8* %16, i8** %40, align 8 + store { i64, i8* }* %38, { i64, i8* }** %10, align 8 + br label %exiting__1 + +header__3: ; preds = %exiting__3, %exit__1 + %41 = phi i64 [ 0, %exit__1 ], [ %48, %exiting__3 ] + %42 = icmp sle i64 %41, %24 + br i1 %42, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %41) + %44 = bitcast i8* %43 to %Array** + %45 = load %Array*, %Array** %44, align 8 + %46 = call i64 @__quantum__rt__array_get_size_1d(%Array* %45) + %47 = sub i64 %46, 1 + br label %header__4 + +exiting__3: ; preds = %exit__4 + %48 = add i64 %41, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + ret { i64, i8* }* %21 + +header__4: ; preds = %exiting__4, %body__3 + %49 = phi i64 [ 0, %body__3 ], [ %54, %exiting__4 ] + %50 = icmp sle i64 %49, %47 + br i1 %50, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %45, i64 %49) + %52 = bitcast i8* %51 to %Result** + %53 = load %Result*, %Result** %52, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %53, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %54 = add i64 %49, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %45, i32 -1) + br label %exiting__3 +} + +declare i8* @__quantum__rt__memory_allocate(i64) + +define void @QuantumApplication__Run() #1 { +entry: + %0 = call %Array* @QuantumApplication__Run__body() + %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @0, i32 0, i32 0)) + %2 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @1, i32 0, i32 0)) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 1) + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi %String* [ %2, %entry ], [ %36, %exiting__1 ] + %6 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %7 = icmp sle i64 %6, %4 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %6) + %9 = bitcast i8* %8 to %Array** + %10 = load %Array*, %Array** %9, align 8 + %11 = icmp ne %String* %5, %2 + br i1 %11, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %body__1 + %12 = call %String* @__quantum__rt__string_concatenate(%String* %5, %String* %1) + call void @__quantum__rt__string_update_reference_count(%String* %5, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %body__1 + %13 = phi %String* [ %12, %condTrue__1 ], [ %5, %body__1 ] + %14 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @2, i32 0, i32 0)) + %15 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @3, i32 0, i32 0)) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 1) + %16 = call i64 @__quantum__rt__array_get_size_1d(%Array* %10) + %17 = sub i64 %16, 1 + br label %header__2 + +exiting__1: ; preds = %exit__2 + %18 = add i64 %6, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %19 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @5, i32 0, i32 0)) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %5, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %5, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + call void @__quantum__rt__message(%String* %20) + %21 = sub i64 %3, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %condContinue__1 + %22 = phi %String* [ %15, %condContinue__1 ], [ %32, %exiting__2 ] + %23 = phi i64 [ 0, %condContinue__1 ], [ %33, %exiting__2 ] + %24 = icmp sle i64 %23, %17 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %23) + %26 = bitcast i8* %25 to %Result** + %27 = load %Result*, %Result** %26, align 8 + %28 = icmp ne %String* %22, %15 + br i1 %28, label %condTrue__2, label %condContinue__2 + +condTrue__2: ; preds = %body__2 + %29 = call %String* @__quantum__rt__string_concatenate(%String* %22, %String* %14) + call void @__quantum__rt__string_update_reference_count(%String* %22, i32 -1) + br label %condContinue__2 + +condContinue__2: ; preds = %condTrue__2, %body__2 + %30 = phi %String* [ %29, %condTrue__2 ], [ %22, %body__2 ] + %31 = call %String* @__quantum__rt__result_to_string(%Result* %27) + %32 = call %String* @__quantum__rt__string_concatenate(%String* %30, %String* %31) + call void @__quantum__rt__string_update_reference_count(%String* %30, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %31, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %condContinue__2 + %33 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %34 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @4, i32 0, i32 0)) + %35 = call %String* @__quantum__rt__string_concatenate(%String* %22, %String* %34) + call void @__quantum__rt__string_update_reference_count(%String* %22, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %34, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %14, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + %36 = call %String* @__quantum__rt__string_concatenate(%String* %13, %String* %35) + call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + br label %exiting__1 + +header__3: ; preds = %exiting__3, %exit__1 + %37 = phi i64 [ 0, %exit__1 ], [ %44, %exiting__3 ] + %38 = icmp sle i64 %37, %21 + br i1 %38, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %37) + %40 = bitcast i8* %39 to %Array** + %41 = load %Array*, %Array** %40, align 8 + %42 = call i64 @__quantum__rt__array_get_size_1d(%Array* %41) + %43 = sub i64 %42, 1 + br label %header__4 + +exiting__3: ; preds = %exit__4 + %44 = add i64 %37, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %20, i32 -1) + ret void + +header__4: ; preds = %exiting__4, %body__3 + %45 = phi i64 [ 0, %body__3 ], [ %50, %exiting__4 ] + %46 = icmp sle i64 %45, %43 + br i1 %46, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %41, i64 %45) + %48 = bitcast i8* %47 to %Result** + %49 = load %Result*, %Result** %48, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %49, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %50 = add i64 %45, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %41, i32 -1) + br label %exiting__3 +} + +declare void @__quantum__rt__message(%String*) + +declare %String* @__quantum__rt__string_create(i8*) + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) + +declare %String* @__quantum__rt__string_concatenate(%String*, %String*) + +declare %String* @__quantum__rt__result_to_string(%Result*) + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } diff --git a/qirlib/src/passes.rs b/qirlib/src/passes.rs new file mode 100644 index 00000000..1919636f --- /dev/null +++ b/qirlib/src/passes.rs @@ -0,0 +1,19 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use crate::context::Context; +use inkwell::passes::PassManager; +use inkwell::{ + passes::PassManagerBuilder, + OptimizationLevel, +}; + +pub fn run_basic_passes_on<'ctx>(context: &Context<'ctx>) -> bool { + let pass_manager_builder = PassManagerBuilder::create(); + pass_manager_builder.set_optimization_level(OptimizationLevel::None); + let fpm = PassManager::create(()); + fpm.add_global_dce_pass(); + fpm.add_strip_dead_prototypes_pass(); + pass_manager_builder.populate_module_pass_manager(&fpm); + fpm.run_on(&context.module) +} diff --git a/qirlib/src/runtime_library.rs b/qirlib/src/runtime_library.rs new file mode 100644 index 00000000..389863dc --- /dev/null +++ b/qirlib/src/runtime_library.rs @@ -0,0 +1,113 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use inkwell::module::Module; +use inkwell::values::FunctionValue; + +pub struct RuntimeLibrary<'ctx> { + pub result_get_zero: FunctionValue<'ctx>, + pub result_get_one: FunctionValue<'ctx>, + pub result_update_reference_count: FunctionValue<'ctx>, + pub result_equal: FunctionValue<'ctx>, + pub array_create_1d: FunctionValue<'ctx>, + pub array_get_size_1d: FunctionValue<'ctx>, + pub array_get_element_ptr_1d: FunctionValue<'ctx>, + pub array_update_alias_count: FunctionValue<'ctx>, + pub array_update_reference_count: FunctionValue<'ctx>, + pub memory_allocate: Option>, + pub message: FunctionValue<'ctx>, + pub string_create: FunctionValue<'ctx>, + pub string_update_reference_count: FunctionValue<'ctx>, + pub string_concatenate: FunctionValue<'ctx>, + pub result_to_string: FunctionValue<'ctx>, + pub qubit_allocate: FunctionValue<'ctx>, + pub qubit_allocate_array: Option>, + pub qubit_release: FunctionValue<'ctx>, + pub qubit_release_array: Option>, +} + +impl<'ctx> RuntimeLibrary<'ctx> { + pub fn new(module: &Module<'ctx>) -> Self { + RuntimeLibrary { + result_get_zero: RuntimeLibrary::get_function(module, "result_get_zero") + .expect("__quantum__rt__result_get_zero function must be defined"), + result_get_one: RuntimeLibrary::get_function(module, "result_get_one") + .expect("__quantum__rt__result_get_one function must be defined"), + result_update_reference_count: RuntimeLibrary::get_function( + module, + "result_update_reference_count", + ) + .expect("__quantum__rt__result_update_reference_count function must be defined"), + result_equal: RuntimeLibrary::get_function(module, "result_equal") + .expect("__quantum__rt__result_equal function must be defined"), + + array_create_1d: RuntimeLibrary::get_function(module, "array_create_1d") + .expect("__quantum__rt__array_create_1d function must be defined"), + array_get_size_1d: RuntimeLibrary::get_function(module, "array_get_size_1d") + .expect("__quantum__rt__array_get_size_1d function must be defined"), + array_get_element_ptr_1d: RuntimeLibrary::get_function( + module, + "array_get_element_ptr_1d", + ) + .expect("__quantum__rt__array_get_element_ptr_1d function must be defined"), + array_update_alias_count: RuntimeLibrary::get_function( + module, + "array_update_alias_count", + ) + .expect("__quantum__rt__array_update_alias_count function must be defined"), + array_update_reference_count: RuntimeLibrary::get_function( + module, + "array_update_reference_count", + ) + .expect("__quantum__rt__array_update_reference_count function must be defined"), + memory_allocate: RuntimeLibrary::get_function(module, "memory_allocate"), + message: RuntimeLibrary::get_function(module, "message") + .expect("__quantum__rt__message function must be defined"), + string_create: RuntimeLibrary::get_function(module, "string_create") + .expect("__quantum__rt__string_create function must be defined"), + string_update_reference_count: RuntimeLibrary::get_function( + module, + "string_update_reference_count", + ) + .expect("__quantum__rt__string_update_reference_count function must be defined"), + string_concatenate: RuntimeLibrary::get_function(module, "string_concatenate") + .expect("__quantum__rt__string_concatenate function must be defined"), + result_to_string: RuntimeLibrary::get_function(module, "result_to_string") + .expect("__quantum__rt__result_to_string function must be defined"), + qubit_allocate: RuntimeLibrary::get_function(module, "qubit_allocate") + .expect("__quantum__rt__qubit_allocate function must be defined"), + qubit_allocate_array: RuntimeLibrary::get_function(module, "qubit_allocate_array"), + qubit_release: RuntimeLibrary::get_function(module, "qubit_release") + .expect("__quantum__rt__qubit_release function must be defined"), + qubit_release_array: RuntimeLibrary::get_function(module, "qubit_release_array"), + } + } + + fn get_function(module: &Module<'ctx>, name: &str) -> Option> { + let function_name = format!("__quantum__rt__{}", name); + let defined_function = module.get_function(&function_name[..]); + + match defined_function { + None => { + log::debug!("{} was not defined in the module", function_name); + None + } + Some(value) => Some(value), + } + } +} + +#[cfg(test)] +mod tests { + use crate::context::{Context, ContextType}; + + use super::*; + + #[test] + fn runtime_library_can_be_loaded() { + let ctx = inkwell::context::Context::create(); + let name = String::from("temp"); + let context = Context::new(&ctx, ContextType::Template(&name)).unwrap(); + let _ = RuntimeLibrary::new(&context.module); + } +} diff --git a/qirlib/src/types.rs b/qirlib/src/types.rs new file mode 100644 index 00000000..3596d1fa --- /dev/null +++ b/qirlib/src/types.rs @@ -0,0 +1,84 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use log; + +use inkwell::context::Context; +use inkwell::module::Module; +use inkwell::types::FloatType; +use inkwell::types::IntType; +use inkwell::types::PointerType; +use inkwell::types::StructType; +use inkwell::AddressSpace; + +pub struct Types<'ctx> { + pub int: IntType<'ctx>, + pub double: FloatType<'ctx>, + pub bool: IntType<'ctx>, + pub pauli: IntType<'ctx>, + pub range: Option>, + pub result: PointerType<'ctx>, + pub qubit: PointerType<'ctx>, + pub string: PointerType<'ctx>, + pub big_int: Option>, + pub tuple: Option>, + pub array: PointerType<'ctx>, + pub callable: Option>, +} + +impl<'ctx> Types<'ctx> { + pub fn new(context: &'ctx Context, module: &Module<'ctx>) -> Self { + Types { + int: context.i64_type(), + double: context.f64_type(), + bool: context.bool_type(), + pauli: context.custom_width_int_type(2), + + range: Types::get_struct(module, "Range"), + result: Types::get_struct_pointer(module, "Result").expect("Result must be defined"), + qubit: Types::get_struct_pointer(module, "Qubit").expect("Qubit must be defined"), + string: Types::get_struct_pointer(module, "String").expect("String must be defined"), + big_int: Types::get_struct_pointer(module, "BigInt"), + tuple: Types::get_struct_pointer(module, "Tuple"), + array: Types::get_struct_pointer(module, "Array").expect("Array must be defined"), + callable: Types::get_struct_pointer(module, "Callable"), + } + } + + fn get_struct(module: &Module<'ctx>, name: &str) -> Option> { + let defined_struct = module.get_struct_type(name); + match defined_struct { + None => { + log::debug!("{} was not defined in the module", name); + None + } + Some(value) => Some(value), + } + } + + fn get_struct_pointer(module: &Module<'ctx>, name: &str) -> Option> { + let defined_struct = module.get_struct_type(name); + match defined_struct { + None => { + log::debug!("{} struct was not defined in the module", name); + None + } + Some(value) => Some(value.ptr_type(AddressSpace::Generic)), + } + } +} + +#[cfg(test)] +mod tests { + use crate::context::{Context, ContextType}; + + use super::*; + + #[test] + fn types_can_be_loaded() { + let ctx = inkwell::context::Context::create(); + let name = String::from("temp"); + let context = Context::new(&ctx, ContextType::Template(&name)).unwrap(); + let _ = Types::new(&context.context, &context.module); + } +}