Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[GHA] test install #3080

Merged
merged 16 commits into from
Nov 25, 2024
93 changes: 93 additions & 0 deletions .github/workflows/install.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
name: Test install
permissions: read-all

on:
workflow_dispatch:
inputs:
pull_request_number:
description: 'The pull request number'
default: ''
schedule:
- cron: '0 0 * * *'

jobs:
install-cpu:
name: Test install [${{ matrix.backend }} - ${{ matrix.runner }}]
runs-on: ${{ matrix.runner }}
strategy:
fail-fast: false
matrix:
backend: ["torch", "tf", "onnx", "openvino"]
runner: ["windows-latest", "ubuntu-22.04"]
defaults:
run:
shell: bash
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
lfs: true
fetch-depth: 0 # Fetch full history to allow checking out any branch or PR
- name: Fetch and Checkout the Pull Request Branch
if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.pull_request_number != '' }}
run: |
git fetch origin pull/${{ github.event.inputs.pull_request_number }}/head:pr-${{ github.event.inputs.pull_request_number }}
git checkout pr-${{ github.event.inputs.pull_request_number }}
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
with:
python-version: "3.10"
cache: pip
- name: Install test requirements
run: |
pip install -r tests/cross_fw/examples/requirements.txt
- name: Print installed modules
run: pip list
- name: Run install test scope
run: pytest tests/cross_fw/install -rA -s --host-configuration cpu --backend ${{ matrix.backend }}

install-torch-gpu:
name: Test install [torch - ubuntu-gpu]
defaults:
run:
shell: bash
runs-on: aks-linux-4-cores-28gb-gpu-tesla-t4
env:
DEBIAN_FRONTEND: noninteractive
steps:
- name: Install dependencies
run : |
sudo apt-get update
sudo apt-get --assume-yes install build-essential ninja-build libgl1-mesa-dev libglib2.0-0 wget make virtualenv
- name: Download CUDA
run: |
wget -q https://developer.download.nvidia.com/compute/cuda/12.4.0/local_installers/cuda_12.4.0_550.54.14_linux.run
sudo sh cuda_12.4.0_550.54.14_linux.run --toolkit --silent
- name: Runner info
continue-on-error: true
run: |
export PATH=/usr/local/cuda-12.4/bin${PATH:+:${PATH}}
export LD_LIBRARY_PATH=/usr/local/cuda-12.4/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
nvidia-smi
cat /proc/cpuinfo
nvcc --version
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
with:
lfs: true
- name: Fetch and Checkout the Pull Request Branch
if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.pull_request_number != '' }}
run: |
git fetch origin pull/${{ github.event.inputs.pull_request_number }}/head:pr-${{ github.event.inputs.pull_request_number }}
git checkout pr-${{ github.event.inputs.pull_request_number }}
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
with:
python-version: 3.10.14
cache: pip
- name: Install test requirements
run: |
pip install -r tests/cross_fw/examples/requirements.txt
- name: Print installed modules
run: pip list
- name: Run install test scope
run: |
export PATH=/usr/local/cuda-12.4/bin${PATH:+:${PATH}}
export LD_LIBRARY_PATH=/usr/local/cuda-12.4/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
pytest tests/cross_fw/install -rA -s --host-configuration gpu --backend torch
2 changes: 1 addition & 1 deletion tests/cross_fw/examples/test_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from tests.cross_fw.shared.helpers import create_venv_with_nncf
from tests.cross_fw.shared.helpers import get_pip_executable_with_venv
from tests.cross_fw.shared.helpers import get_python_executable_with_venv
from tests.cross_fw.shared.helpers import load_json
from tests.cross_fw.shared.json import load_json
from tests.cross_fw.shared.paths import PROJECT_ROOT
from tests.cross_fw.shared.paths import TEST_ROOT

Expand Down
70 changes: 13 additions & 57 deletions tests/cross_fw/install/test_install.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,6 @@

import pytest

import nncf
from nncf.common.utils.os import is_linux
from nncf.common.utils.os import is_windows
from tests.cross_fw.install.conftest import TESTED_BACKENDS
from tests.cross_fw.shared.case_collection import skip_if_backend_not_selected
from tests.cross_fw.shared.helpers import create_venv_with_nncf
Expand All @@ -31,27 +28,9 @@

def run_install_checks(venv_path: Path, tmp_path: Path, package_type: str, backend: str, install_type: str):
if install_type.lower() not in ["cpu", "gpu"]:
raise nncf.ValidationError("Unknown installation mode - must be either 'cpu' or 'gpu'")
raise ValueError("Unknown installation mode - must be either 'cpu' or 'gpu'")

python_executable_with_venv = get_python_executable_with_venv(venv_path)
pip_with_venv = get_pip_executable_with_venv(venv_path)

if package_type in ["build_s", "build_w"]:
# Do additional install step for sdist/bdist packages
def find_file_by_extension(directory: Path, extension: str) -> str:
for file_path in directory.iterdir():
file_path_str = str(file_path)
if file_path_str.endswith(extension):
return file_path_str
raise FileNotFoundError("NNCF package not found")

if package_type == "build_s":
package_path = find_file_by_extension(PROJECT_ROOT / "dist", ".tar.gz")
elif package_type == "build_w":
package_path = find_file_by_extension(PROJECT_ROOT / "dist", ".whl")

run_cmd_line = f"{pip_with_venv} install {package_path}"
subprocess.run(run_cmd_line, check=True, shell=True)

run_path = tmp_path / "run"
install_checks_py_name = f"install_checks_{backend}.py"
Expand All @@ -69,16 +48,17 @@ def find_file_by_extension(directory: Path, extension: str) -> str:
)


@pytest.fixture(name="venv_type", params=["virtualenv", "venv"])
def venv_type_(request):
@pytest.fixture(name="package_type", params=["pip_local", "pip_git_develop", "pip_pypi", "build_s", "build_w"])
def package_type_(request):
return request.param


@pytest.fixture(
name="package_type", params=["pip_local", "pip_e_local", "pip_git_develop", "pip_pypi", "build_s", "build_w"]
)
def package_type_(request):
return request.param
@pytest.fixture
def removable_tmp_path(tmp_path: Path):
# The default tmp_path is automatically removed after some time,
# but we need to remove the venv after each test to avoid exceeding the space limit.
yield tmp_path
shutil.rmtree(tmp_path)


@pytest.fixture(name="backend_to_test")
Expand All @@ -96,9 +76,8 @@ def backend_to_test_(request, backend_clopt: List[str]):
class TestInstall:
@staticmethod
def test_install(
tmp_path: Path,
removable_tmp_path: Path,
backend: str,
venv_type: str,
package_type: str,
backend_clopt: List[str],
host_configuration_clopt: str,
Expand All @@ -107,34 +86,11 @@ def test_install(
skip_if_backend_not_selected(backend, backend_clopt)
if "pypi" in package_type:
pytest.xfail("Disabled until NNCF is exposed in a release")
venv_path = create_venv_with_nncf(tmp_path, package_type, venv_type, {backend})
venv_path = create_venv_with_nncf(removable_tmp_path, package_type, "venv", {backend})
if ov_version_override is not None:
pip_with_venv = get_pip_executable_with_venv(venv_path)
ov_version_cmd_line = f"{pip_with_venv} install {ov_version_override}"
subprocess.run(ov_version_cmd_line, check=True, shell=True)
run_install_checks(venv_path, tmp_path, package_type, backend=backend, install_type=host_configuration_clopt)

@staticmethod
def test_install_with_tests_requirements(
tmp_path: Path,
backend: str,
venv_type: str,
package_type: str,
backend_clopt: List[str],
host_configuration_clopt: str,
):
skip_if_backend_not_selected(backend, backend_clopt)
if "pypi" in package_type:
pytest.xfail("Disabled until NNCF is exposed in a release")
venv_path = create_venv_with_nncf(tmp_path, package_type, venv_type, backends={backend})

if is_linux():
pip_with_venv = f". {venv_path}/bin/activate && {venv_path}/bin/pip"
elif is_windows():
pip_with_venv = f" {venv_path}\\Scripts\\activate && python -m pip"

backend_name = "tensorflow" if backend == "tf" else backend
subprocess.check_call(
f"{pip_with_venv} install -r {PROJECT_ROOT}/tests/{backend_name}/requirements.txt", shell=True
run_install_checks(
venv_path, removable_tmp_path, package_type, backend=backend, install_type=host_configuration_clopt
)
run_install_checks(venv_path, tmp_path, package_type, backend=backend, install_type=host_configuration_clopt)
92 changes: 92 additions & 0 deletions tests/cross_fw/shared/comparator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
# Copyright (c) 2024 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from abc import ABC
from abc import abstractmethod
from typing import Callable, Dict, List, TypeVar, Union

import numpy as np

TensorType = TypeVar("TensorType")


class BaseTensorListComparator(ABC):
@classmethod
@abstractmethod
def _to_numpy(cls, tensor: TensorType) -> np.ndarray:
pass

@classmethod
def _check_assertion(
cls,
test: Union[TensorType, List[TensorType]],
reference: Union[TensorType, List[TensorType]],
assert_fn: Callable[[np.ndarray, np.ndarray], bool],
):
if not isinstance(test, list):
test = [test]
if not isinstance(reference, list):
reference = [reference]
assert len(test) == len(reference)

for x, y in zip(test, reference):
x = cls._to_numpy(x)
y = cls._to_numpy(y)
assert_fn(x, y)

@classmethod
def check_equal(
cls,
test: Union[TensorType, List[TensorType]],
reference: Union[TensorType, List[TensorType]],
rtol: float = 1e-1,
atol=0,
):
cls._check_assertion(test, reference, lambda x, y: np.testing.assert_allclose(x, y, rtol=rtol, atol=atol))

@classmethod
def check_not_equal(
cls,
test: Union[TensorType, List[TensorType]],
reference: Union[TensorType, List[TensorType]],
rtol: float = 1e-4,
):
cls._check_assertion(
test,
reference,
lambda x, y: np.testing.assert_raises(AssertionError, np.testing.assert_allclose, x, y, rtol=rtol),
)

@classmethod
def check_less(
cls, test: Union[TensorType, List[TensorType]], reference: Union[TensorType, List[TensorType]], rtol=1e-4
):
cls.check_not_equal(test, reference, rtol=rtol)
cls._check_assertion(test, reference, np.testing.assert_array_less)

@classmethod
def check_greater(
cls, test: Union[TensorType, List[TensorType]], reference: Union[TensorType, List[TensorType]], rtol=1e-4
):
cls.check_not_equal(test, reference, rtol=rtol)
cls._check_assertion(
test, reference, lambda x, y: np.testing.assert_raises(AssertionError, np.testing.assert_array_less, x, y)
)


def compare_stats(expected: Dict[str, np.ndarray], actual: Dict[str, np.ndarray]):
assert len(expected) == len(actual)
for ref_node_name, ref_stats in expected.items():
actual_stats = actual[ref_node_name]
for param_name, ref_param in ref_stats.items():
actual_param = actual_stats.get(param_name)
assert np.array(ref_param).shape == np.array(actual_param).shape
assert np.allclose(ref_param, actual_param, atol=1e-5)
Loading
Loading