diff --git a/.github/actions/setup_python/action.yml b/.github/actions/setup_python/action.yml index d067842135cd9d..c076c5156da039 100644 --- a/.github/actions/setup_python/action.yml +++ b/.github/actions/setup_python/action.yml @@ -29,9 +29,9 @@ runs: run: apt-get update && apt-get install -y ca-certificates software-properties-common - if: ${{ runner.os == 'Linux' && runner.arch == 'ARM64' }} - name: Setup sudo + name: Setup sudo and python3 shell: bash - run: apt-get update && apt-get install -y sudo # Needed for the deadsnakes action + run: apt-get update && apt-get install -y sudo python3 # Needed for the deadsnakes action - if: ${{ runner.os == 'Linux' && runner.arch == 'ARM64' }} name: Setup Python ${{ inputs.version }} diff --git a/.github/workflows/android_arm64.yml b/.github/workflows/android_arm64.yml index 1b648d780268d3..33f9c7c97b56e5 100644 --- a/.github/workflows/android_arm64.yml +++ b/.github/workflows/android_arm64.yml @@ -45,7 +45,7 @@ jobs: container: image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 volumes: - - /mount/caches:/mount/caches + - /mount:/mount options: -e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING env: DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input @@ -169,3 +169,17 @@ jobs: - name: Show ccache stats run: ${SCCACHE_PATH} --show-stats + + Overall_Status: + name: ci/gha_overall_status_android + needs: [Smart_CI, Build] + if: ${{ always() }} + runs-on: ubuntu-latest + steps: + - name: Check status of all jobs + if: >- + ${{ + contains(needs.*.result, 'failure') || + contains(needs.*.result, 'cancelled') + }} + run: exit 1 diff --git a/.github/workflows/build_doc.yml b/.github/workflows/build_doc.yml index 8baa49922369cd..40e12d507cba54 100644 --- a/.github/workflows/build_doc.yml +++ b/.github/workflows/build_doc.yml @@ -25,7 +25,7 @@ jobs: packages: graphviz texlive liblua5.2-0 libclang1-9 libclang-cpp9 version: 3.0 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 id: cp310 with: python-version: '3.10' diff --git a/.github/workflows/cleanup_pip_cache.yml b/.github/workflows/cleanup_pip_cache.yml index cd66e1150c3ef1..355d0d68d7cf93 100644 --- a/.github/workflows/cleanup_pip_cache.yml +++ b/.github/workflows/cleanup_pip_cache.yml @@ -11,7 +11,7 @@ jobs: container: image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 volumes: - - /mount/caches:/mount/caches + - /mount:/mount env: PIP_CACHE_PATH: /mount/caches/pip diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index d89666b898fa80..50114d986b23c6 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -16,7 +16,7 @@ jobs: steps: - name: Setup python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.10.10' architecture: 'x64' diff --git a/.github/workflows/coverity.yml b/.github/workflows/coverity.yml new file mode 100644 index 00000000000000..efcdb154c9dbd0 --- /dev/null +++ b/.github/workflows/coverity.yml @@ -0,0 +1,140 @@ +name: Coverity (Ubuntu 20.04, Python 3.11) +on: + schedule: + # run daily at 00:00 + - cron: '0 0 * * *' + +concurrency: + # github.ref is not unique in post-commit + group: ${{ github.event_name == 'push' && github.run_id || github.ref }}-linux-coverity + cancel-in-progress: true + +env: + PIP_CACHE_PATH: /mount/caches/pip/linux + PYTHON_VERSION: '3.11' + +jobs: + Build: + timeout-minutes: 150 + defaults: + run: + shell: bash + runs-on: aks-linux-16-cores-32gb + container: + image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 + volumes: + - /mount/caches:/mount/caches + options: -e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING + env: + DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input + CMAKE_BUILD_TYPE: 'Release' + CMAKE_GENERATOR: 'Ninja Multi-Config' + CMAKE_CXX_COMPILER_LAUNCHER: sccache + CMAKE_C_COMPILER_LAUNCHER: sccache + GITHUB_WORKSPACE: '/__w/openvino/openvino' + OPENVINO_REPO: /__w/openvino/openvino/openvino + OPENVINO_CONTRIB_REPO: /__w/openvino/openvino/openvino_contrib + BUILD_DIR: /__w/openvino/openvino/openvino_build + SCCACHE_AZURE_KEY_PREFIX: coverity_ubuntu20_x86_64 + COVERITY_TOOL_DIR: /__w/openvino/openvino/coverity_tool + + steps: + - name: Install git + run: | + apt-get update + apt-get install --assume-yes --no-install-recommends git ca-certificates + + - name: Clone OpenVINO + uses: actions/checkout@v4 + with: + path: ${{ env.OPENVINO_REPO }} + submodules: 'true' + + - name: Clone OpenVINO Contrib + uses: actions/checkout@v4 + with: + repository: 'openvinotoolkit/openvino_contrib' + path: ${{ env.OPENVINO_CONTRIB_REPO }} + submodules: 'true' + ref: 'master' + + # + # Dependencies + # + + - name: Install build dependencies + run: | + bash ${OPENVINO_REPO}/install_build_dependencies.sh + # default-jdk - Java API + apt install --assume-yes --no-install-recommends default-jdk + + - name: Install sccache + uses: mozilla-actions/sccache-action@v0.0.3 + with: + version: "v0.5.4" + + - name: Setup Python ${{ env.PYTHON_VERSION }} + uses: ./openvino/.github/actions/setup_python + with: + version: ${{ env.PYTHON_VERSION }} + pip-cache-path: ${{ env.PIP_CACHE_PATH }} + should-setup-pip-paths: 'true' + self-hosted-runner: 'true' + + # + # Build + # + + - name: CMake configure - OpenVINO + run: | + cmake \ + -G "${{ env.CMAKE_GENERATOR }}" \ + -DENABLE_CPPLINT=OFF \ + -DENABLE_STRICT_DEPENDENCIES=OFF \ + -DENABLE_SYSTEM_TBB=ON \ + -DENABLE_SYSTEM_OPENCL=ON \ + -DCMAKE_VERBOSE_MAKEFILE=ON \ + -DCPACK_GENERATOR=TGZ \ + -DBUILD_nvidia_plugin=OFF \ + -DOPENVINO_EXTRA_MODULES=${OPENVINO_CONTRIB_REPO}/modules \ + -DCMAKE_CXX_COMPILER_LAUNCHER=${{ env.CMAKE_CXX_COMPILER_LAUNCHER }} \ + -DCMAKE_C_COMPILER_LAUNCHER=${{ env.CMAKE_C_COMPILER_LAUNCHER }} \ + -S ${OPENVINO_REPO} \ + -B ${BUILD_DIR} + + - name: Clean sccache stats + run: ${SCCACHE_PATH} --zero-stats + + - name: Install Coverity tool + run: | + rm -rf ${COVERITY_TOOL_DIR} && mkdir -p ${COVERITY_TOOL_DIR} + pushd ${COVERITY_TOOL_DIR} + wget https://scan.coverity.com/download/linux64 --progress=bar:force:noscroll --post-data "token=${{ secrets.COVERITY_TOKEN }}&project=openvino" -O coverity_tool.tgz + tar xvf coverity_tool.tgz && rm coverity_tool.tgz + popd + + - name: Cmake build - OpenVINO with Coverity + run: | + ${COVERITY_TOOL_DIR}/cov-analysis*/bin/cov-build --dir ${BUILD_DIR}/cov-int \ + cmake --build ${BUILD_DIR} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} + + - name: Show sccache stats + run: ${SCCACHE_PATH} --show-stats + + - name: Pack Artefacts + run: | + pushd ${BUILD_DIR} + tar -C ${BUILD_DIR} -czvf openvino.tgz cov-int + popd + + - name: Submit artefacts + run: | + apt-get update && apt-get install -y curl + pushd ${BUILD_DIR} + curl --form token=${{ secrets.COVERITY_TOKEN }} \ + --form email=${{ secrets.COVERITY_USER }} \ + --form file=@openvino.tgz \ + --form version="${{ github.sha }}" \ + --form description="https://github.com/openvinotoolkit/openvino/runs/${{ github.run_number }}" \ + https://scan.coverity.com/builds?project=openvino + popd diff --git a/.github/workflows/fedora.yml b/.github/workflows/fedora.yml index 6308d07910ecac..19d32ef74e07c9 100644 --- a/.github/workflows/fedora.yml +++ b/.github/workflows/fedora.yml @@ -46,7 +46,7 @@ jobs: container: image: fedora:33 volumes: - - /mount/caches:/mount/caches + - /mount:/mount options: -e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING env: CMAKE_BUILD_TYPE: 'Release' @@ -242,3 +242,17 @@ jobs: python3 -c 'from openvino.frontend import FrontEndManager; assert len(FrontEndManager().get_available_front_ends()) == 6' benchmark_app --help ovc --help + + Overall_Status: + name: ci/gha_overall_status_fedora + needs: [Smart_CI, Build, RPM_Packages] + if: ${{ always() }} + runs-on: ubuntu-latest + steps: + - name: Check status of all jobs + if: >- + ${{ + contains(needs.*.result, 'failure') || + contains(needs.*.result, 'cancelled') + }} + run: exit 1 diff --git a/.github/workflows/job_cpu_functional_tests.yml b/.github/workflows/job_cpu_functional_tests.yml new file mode 100644 index 00000000000000..b1f2e6bbf08b59 --- /dev/null +++ b/.github/workflows/job_cpu_functional_tests.yml @@ -0,0 +1,124 @@ +name: CPU functional tests + +on: + workflow_call: + inputs: + runner: + description: 'Machine on which the tests would run' + type: string + required: true + image: + description: 'Docker image in which the tests would run' + type: string + required: false + default: null + +jobs: + CPU_Functional_Tests: + name: CPU functional tests + timeout-minutes: 25 + runs-on: ${{ inputs.runner }} + container: + image: ${{ inputs.image }} + defaults: + run: + shell: bash + env: + DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input + INSTALL_DIR: ${{ github.workspace }}/install + INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests + PARALLEL_TEST_SCRIPT: ${{ github.workspace }}/install/tests/functional_test_utils/layer_tests_summary/run_parallel.py + PARALLEL_TEST_CACHE: ${{ github.workspace }}/install/tests/test_cache.lst + steps: + - name: Download OpenVINO package + uses: actions/download-artifact@v3 + with: + name: openvino_package + path: ${{ env.INSTALL_DIR }} + + - name: Download OpenVINO tests package + uses: actions/download-artifact@v3 + with: + name: openvino_tests + path: ${{ env.INSTALL_TEST_DIR }} + + # Needed as ${{ github.workspace }} is not working correctly when using Docker + - name: Setup Variables + run: | + echo "INSTALL_DIR=$GITHUB_WORKSPACE/install" >> "$GITHUB_ENV" + echo "INSTALL_TEST_DIR=$GITHUB_WORKSPACE/install/tests" >> "$GITHUB_ENV" + echo "PARALLEL_TEST_SCRIPT=$GITHUB_WORKSPACE/install/tests/functional_test_utils/layer_tests_summary/run_parallel.py" >> "$GITHUB_ENV" + echo "PARALLEL_TEST_CACHE=$GITHUB_WORKSPACE/install/tests/test_cache.lst" >> "$GITHUB_ENV" + + - name: Extract OpenVINO packages + run: | + pushd $INSTALL_DIR + tar -xzf openvino_package.tar.gz -C $INSTALL_DIR + popd + + pushd $INSTALL_TEST_DIR + tar -xzf openvino_tests.tar.gz -C $INSTALL_DIR + popd + + - name: Install OpenVINO dependencies (Linux) + if: runner.os == 'Linux' + run: $INSTALL_DIR/install_dependencies/install_openvino_dependencies.sh -c=core -c=dev -c=gpu -y + + - name: Fetch setup_python action + uses: actions/checkout@v4 + with: + sparse-checkout: | + .github/actions/setup_python/action.yml + sparse-checkout-cone-mode: false + path: 'openvino' + + - name: Setup Python 3.11 + uses: ./openvino/.github/actions/setup_python + with: + version: '3.11' + should-setup-pip-paths: 'false' + self-hosted-runner: ${{ runner.os == 'Linux' }} + + - name: Install python dependencies for run_parallel.py + run: python3 -m pip install -r ${INSTALL_TEST_DIR}/functional_test_utils/layer_tests_summary/requirements.txt + + - name: Restore tests execution time + uses: actions/cache/restore@v3 + with: + path: ${{ env.PARALLEL_TEST_CACHE }} + key: ${{ runner.os }}-${{ runner.arch }}-tests-functional-cpu-stamp-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-${{ runner.arch }}-tests-functional-cpu-stamp + + - name: Intel CPU plugin func tests (parallel) + run: | + # Needed as the Linux CC does not require setupvars to work + if [[ -f "${INSTALL_DIR}/setupvars.sh" ]]; then + source ${INSTALL_DIR}/setupvars.sh + fi + + python3 ${PARALLEL_TEST_SCRIPT} -e ${INSTALL_TEST_DIR}/ov_cpu_func_tests -c ${PARALLEL_TEST_CACHE} -w ${INSTALL_TEST_DIR} -s suite -rf 0 -- --gtest_print_time=1 --gtest_filter=*smoke* + timeout-minutes: 20 + + - name: Save tests execution time + uses: actions/cache/save@v3 + if: github.ref_name == 'master' + with: + path: ${{ env.PARALLEL_TEST_CACHE }} + key: ${{ runner.os }}-${{ runner.arch }}-tests-functional-cpu-stamp-${{ github.sha }} + + - name: Upload Test Results + uses: actions/upload-artifact@v3 + if: ${{ !cancelled() }} + with: + name: test-results-functional-cpu + path: | + ${{ env.INSTALL_TEST_DIR }}/temp/*.log + ${{ env.INSTALL_TEST_DIR }}/logs/*.log + ${{ env.INSTALL_TEST_DIR }}/logs/failed/*.log + ${{ env.INSTALL_TEST_DIR }}/logs/crashed/*.log + ${{ env.INSTALL_TEST_DIR }}/logs/hanged/*.log + ${{ env.INSTALL_TEST_DIR }}/logs/interapted/*.log + ${{ env.INSTALL_TEST_DIR }}/logs/hash_table.csv + ${{ env.PARALLEL_TEST_CACHE }} + if-no-files-found: 'error' diff --git a/.github/workflows/job_cxx_unit_tests.yml b/.github/workflows/job_cxx_unit_tests.yml new file mode 100644 index 00000000000000..bc6157d3478a8c --- /dev/null +++ b/.github/workflows/job_cxx_unit_tests.yml @@ -0,0 +1,279 @@ +name: Samples + +on: + workflow_call: + inputs: + runner: + description: 'Machine on which the tests would run' + type: string + required: true + image: + description: 'Docker image in which the tests would run' + type: string + required: false + default: null + affected-components: + description: 'Components that are affected by changes in the commit defined by the Smart CI Action' + type: string + required: true + +jobs: + CXX_Unit_Tests: + name: C++ unit tests + timeout-minutes: 30 + runs-on: ${{ inputs.runner }} + container: + image: ${{ inputs.image }} + defaults: + run: + shell: bash + env: + DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input + INSTALL_DIR: ${{ github.workspace }}/install + INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests + steps: + - name: Download OpenVINO package + uses: actions/download-artifact@v3 + with: + name: openvino_package + path: ${{ env.INSTALL_DIR }} + + - name: Download OpenVINO tests package + uses: actions/download-artifact@v3 + with: + name: openvino_tests + path: ${{ env.INSTALL_TEST_DIR }} + + # Needed as ${{ github.workspace }} is not working correctly when using Docker + - name: Setup Variables + run: | + echo "INSTALL_DIR=$GITHUB_WORKSPACE/install" >> "$GITHUB_ENV" + echo "INSTALL_TEST_DIR=$GITHUB_WORKSPACE/install/tests" >> "$GITHUB_ENV" + + - name: Extract OpenVINO packages + run: | + pushd $INSTALL_DIR + tar -xzf openvino_package.tar.gz -C $INSTALL_DIR + popd + pushd $INSTALL_TEST_DIR + tar -xzf openvino_tests.tar.gz -C $INSTALL_DIR + popd + + - name: Install OpenVINO dependencies (Linux) + if: runner.os == 'Linux' + run: $INSTALL_DIR/install_dependencies/install_openvino_dependencies.sh -c=core -c=dev -c=gpu -y + + # + # Tests + # + + - name: OpenVINO Core Unit Tests + if: fromJSON(inputs.affected-components).Core.test + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OVCoreUT.xml + + - name: OpenVINO Inference Functional Tests + if: fromJSON(inputs.affected-components).inference.test + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_inference_functional_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-InferenceFunc.xml + + - name: OpenVINO Inference Unit Tests + if: fromJSON(inputs.affected-components).inference.test + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_inference_unit_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-InferenceUnit.xml + + - name: Low Precision Transformations Tests + if: fromJSON(inputs.affected-components).LP_transformations.test + run: | + source ${INSTALL_DIR}/setupvars.sh + + ${INSTALL_TEST_DIR}/ov_lp_transformations_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-LpTransformations.xml + + - name: OpenVINO Conditional compilation tests + if: fromJSON(inputs.affected-components).Core.test + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_conditional_compilation_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ConditionalCompilation.xml + + - name: IR frontend tests + if: fromJSON(inputs.affected-components).IR_FE.test + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_ir_frontend_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-IRFrontend.xml + + - name: PaddlePaddle frontend tests + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/paddle_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-PaddleTests.xml + + - name: ONNX frontend tests + if: ${{ fromJSON(inputs.affected-components).ONNX_FE.test && runner.arch != 'ARM64' }} # Ticket for macOS ARM64: 122663, for Linux ARM64: 126280 + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_onnx_frontend_tests --gtest_print_time=1 \ + --gtest_filter=-*IE_GPU* \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ONNXFrontend.xml + + - name: TensorFlow Common frontend tests + if: fromJSON(inputs.affected-components).TF_FE.test || + fromJSON(inputs.affected-components).TFL_FE.test && + (runner.os != 'macOS' && runner.arch != 'ARM64') + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_tensorflow_common_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TensorFlowCommonFrontend.xml + + - name: TensorFlow frontend tests + if: fromJSON(inputs.affected-components).TF_FE.test + run: | + source ${INSTALL_DIR}/setupvars.sh + + ${INSTALL_TEST_DIR}/ov_tensorflow_frontend_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TensorFlowFrontend.xml + + - name: TensorFlow Lite frontend tests + if: fromJSON(inputs.affected-components).TFL_FE.test + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_tensorflow_lite_frontend_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TensorFlowLiteFrontend.xml + + - name: Transformations func tests + if: ${{ fromJSON(inputs.affected-components).transformations.test && runner.arch != 'ARM64' }} # Ticket: 126281 + run: | + source ${INSTALL_DIR}/setupvars.sh + + ${INSTALL_TEST_DIR}/ov_transformations_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-Transformations.xml + + - name: Legacy Transformations func tests + if: fromJSON(inputs.affected-components).GNA.test && + (runner.os != 'macOS' && runner.arch != 'ARM64') + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_legacy_transformations_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-LegacyTransformations.xml + + - name: Inference Engine 1.0 unit tests + if: fromJSON(inputs.affected-components).GNA.test && + (runner.os != 'macOS' && runner.arch != 'ARM64') + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/InferenceEngineUnitTests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-InferenceEngineUnitTests.xml + + - name: Common test utils tests + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_util_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-CommonUtilTests.xml + + - name: Snippets func tests + if: fromJSON(inputs.affected-components).CPU.test + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_snippets_func_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-SnippetsFuncTests.xml + + - name: CPU plugin unit tests + if: fromJSON(inputs.affected-components).CPU.test + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_cpu_unit_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-CPUUnitTests.xml + + - name: ov_subgraphs_dumper_tests tests + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_subgraphs_dumper_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_subgraphs_dumper_tests.xml + + - name: Template OpImpl tests + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_op_conformance_tests --gtest_print_time=1 --device=TEMPLATE --gtest_filter=*OpImpl*\ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OpImplTests.xml + + - name: AUTO unit tests + if: fromJSON(inputs.affected-components).AUTO.test + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_auto_unit_tests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_auto_unit_tests.xml + + - name: AUTO func Tests + if: fromJSON(inputs.affected-components).AUTO.test + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_auto_func_tests --gtest_print_time=1 \ + --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_func_tests.xml + + - name: Template plugin func tests + if: fromJSON(inputs.affected-components).TEMPLATE.test + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_template_func_tests --gtest_print_time=1 \ + --gtest_filter=*smoke* \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TemplateFuncTests.xml + + - name: Inference Engine C API tests + if: fromJSON(inputs.affected-components).C_API.test + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/InferenceEngineCAPITests --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-InferenceEngineCAPITests.xml + + - name: OpenVINO C API tests + if: fromJSON(inputs.affected-components).C_API.test + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_capi_test --gtest_print_time=1 \ + --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OpenVINOCAPITests.xml + + - name: AutoBatch unit tests + if: fromJSON(inputs.affected-components).AUTO_BATCH.test + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_auto_batch_unit_tests --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_auto_batch_unit_tests.xml + + - name: AutoBatch func tests + if: fromJSON(inputs.affected-components).AUTO_BATCH.test + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_auto_batch_func_tests --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_auto_batch_func_tests.xml + + - name: Proxy Plugin func tests + if: fromJSON(inputs.affected-components).PROXY.test + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_proxy_plugin_tests --gtest_print_time=1 --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OVProxyTests.xml + + - name: Hetero unit tests + if: fromJSON(inputs.affected-components).HETERO.test + run: | + source ${{ env.INSTALL_DIR }}/setupvars.sh + ${{ env.INSTALL_TEST_DIR }}/ov_hetero_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroUnitTests.xml + + - name: Hetero func tests + if: fromJSON(inputs.affected-components).HETERO.test + run: | + source ${INSTALL_DIR}/setupvars.sh + ${INSTALL_TEST_DIR}/ov_hetero_func_tests --gtest_print_time=1 --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OVHeteroFuncTests.xml + + - name: Upload Test Results + uses: actions/upload-artifact@v3 + if: ${{ !cancelled() }} + with: + name: test-results-cpp + path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml + if-no-files-found: 'warn' diff --git a/.github/workflows/job_debian_packages.yml b/.github/workflows/job_debian_packages.yml new file mode 100644 index 00000000000000..f063a7734b8aec --- /dev/null +++ b/.github/workflows/job_debian_packages.yml @@ -0,0 +1,83 @@ +name: Debian Packages + +on: + workflow_call: + inputs: + runner: + description: 'Machine on which the tests would run' + type: string + required: true + image: + description: 'Docker image in which the tests would run' + type: string + required: false + default: null + +jobs: + Debian_Packages: + name: Debian Packages + runs-on: ${{ inputs.runner }} + container: + image: ${{ inputs.image }} + defaults: + run: + shell: bash + env: + DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input + DEBIAN_PACKAGES_DIR: ${{ github.workspace }}/packages + steps: + + - name: Download OpenVINO debian packages + uses: actions/download-artifact@v3 + with: + name: openvino_debian_packages + path: ${{ env.DEBIAN_PACKAGES_DIR }} + + # Needed as ${{ github.workspace }} is not working correctly when using Docker + - name: Setup Variables + run: echo "DEBIAN_PACKAGES_DIR=$GITHUB_WORKSPACE/packages" >> "$GITHUB_ENV" + + - name: Install debian packages & check conflicts + run: | + apt-get update -y + + if [[ "${{ runner.arch }}" == "X64" ]]; then + # Install debian packages from previous release + apt-get install --no-install-recommends -y gnupg wget ca-certificates + wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB + apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB + echo "deb https://apt.repos.intel.com/openvino/2023 ubuntu20 main" | tee /etc/apt/sources.list.d/intel-openvino-2023.list + apt-get update -y + apt-get install -y openvino + fi + + # install our local one and make sure the conflicts are resolved + apt-get install --no-install-recommends -y dpkg-dev + dpkg-scanpackages . /dev/null | gzip -9c > Packages.gz + echo "deb [trusted=yes] file:${DEBIAN_PACKAGES_DIR} ./" | tee /etc/apt/sources.list.d/openvino-local.list + apt-get update -y + apt-get install openvino -y + working-directory: ${{ env.DEBIAN_PACKAGES_DIR }} + + - name: Test debian packages + run: | + /usr/share/openvino/samples/cpp/build_samples.sh + /usr/share/openvino/samples/c/build_samples.sh + + [[ "${{ runner.arch }}" == "X64" ]] && path_by_arch="intel64" || path_by_arch="aarch64" + ~/openvino_cpp_samples_build/$path_by_arch/Release/hello_query_device + + python3 /usr/share/openvino/samples/python/hello_query_device/hello_query_device.py + python3 -c 'from openvino import Core; Core().get_property("CPU", "AVAILABLE_DEVICES")' + + if [[ "${{ runner.arch }}" == "X64" ]]; then + python3 -c 'from openvino import Core; Core().get_property("GPU", "AVAILABLE_DEVICES")' + fi + + python3 -c 'from openvino import Core; Core().get_property("AUTO", "SUPPORTED_METRICS")' + python3 -c 'from openvino import Core; Core().get_property("MULTI", "SUPPORTED_METRICS")' + python3 -c 'from openvino import Core; Core().get_property("HETERO", "SUPPORTED_METRICS")' + python3 -c 'from openvino import Core; Core().get_property("BATCH", "SUPPORTED_METRICS")' + python3 -c 'from openvino.frontend import FrontEndManager; assert len(FrontEndManager().get_available_front_ends()) == 6' + benchmark_app --help + ovc --help diff --git a/.github/workflows/job_onnx_runtime.yml b/.github/workflows/job_onnx_runtime.yml new file mode 100644 index 00000000000000..5a6f5cb27eceae --- /dev/null +++ b/.github/workflows/job_onnx_runtime.yml @@ -0,0 +1,157 @@ +name: ONNX Runtime Integration + +on: + workflow_call: + inputs: + runner: + description: 'Machine on which the tests would run' + type: string + required: true + container: + description: 'JSON to be converted to the value of the "container" configuration for the job' + type: string + required: false + default: '{"image": null}' + sccache-azure-key-prefix: + description: 'Key prefix for the cache folder on the Azure' + type: string + required: true + +jobs: + ONNX_Runtime: + name: ONNX Runtime Integration + timeout-minutes: 60 + runs-on: ${{ inputs.runner }} + container: ${{ fromJSON(inputs.container) }} + defaults: + run: + shell: bash + env: + DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input + OPENVINO_REPO: ${{ github.workspace }}/openvino + INSTALL_DIR: ${{ github.workspace }}/install + CMAKE_GENERATOR: 'Ninja Multi-Config' + CMAKE_CXX_COMPILER_LAUNCHER: sccache + CMAKE_C_COMPILER_LAUNCHER: sccache + SCCACHE_AZURE_KEY_PREFIX: ${{ inputs.sccache-azure-key-prefix }} + ONNX_RUNTIME_REPO: ${{ github.workspace }}/onnxruntime + ONNX_RUNTIME_UTILS: ${{ github.workspace }}/install/onnxruntime + ONNX_RUNTIME_BUILD_DIR: ${{ github.workspace }}/onnxruntime/build + steps: + - name: Download OpenVINO package + uses: actions/download-artifact@v3 + with: + name: openvino_package + path: ${{ env.INSTALL_DIR }} + + # Needed as ${{ github.workspace }} is not working correctly when using Docker + - name: Setup Variables + run: | + echo "OPENVINO_REPO=$GITHUB_WORKSPACE/openvino" >> "$GITHUB_ENV" + echo "INSTALL_DIR=$GITHUB_WORKSPACE/install" >> "$GITHUB_ENV" + echo "ONNX_RUNTIME_REPO=$GITHUB_WORKSPACE/onnxruntime" >> "$GITHUB_ENV" + echo "ONNX_RUNTIME_UTILS=$GITHUB_WORKSPACE/install/onnxruntime" >> "$GITHUB_ENV" + echo "ONNX_RUNTIME_BUILD_DIR=$GITHUB_WORKSPACE/onnxruntime/build" >> "$GITHUB_ENV" + + - name: Fetch install_build_dependencies.sh and setup_python action + uses: actions/checkout@v4 + with: + sparse-checkout: | + install_build_dependencies.sh + .github/actions/setup_python/action.yml + sparse-checkout-cone-mode: false + path: 'openvino' + + - name: Install git + run: | + apt-get update + apt-get install --assume-yes --no-install-recommends git ca-certificates + + - name: Setup Python ${{ env.PYTHON_VERSION }} + uses: ./openvino/.github/actions/setup_python + with: + version: '3.11' + should-setup-pip-paths: 'false' + + - name: Extract OpenVINO package + run: | + pushd ${INSTALL_DIR} + tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} + popd + + - name: Install OpenVINO dependencies + run: ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -c=dev -y + + - name: Clone ONNX Runtime + run: | + branch=`tr -s '\n ' < ${ONNX_RUNTIME_UTILS}/version` + git clone --branch $branch --single-branch --recursive https://github.com/microsoft/onnxruntime.git ${ONNX_RUNTIME_REPO} + + # + # Tests + # + + - name: Install Build Dependencies + run: bash ${OPENVINO_REPO}/install_build_dependencies.sh + + - name: Install sccache + uses: mozilla-actions/sccache-action@v0.0.3 + with: + version: "v0.5.4" + + - name: Build Lin ONNX Runtime + run: | + source ${INSTALL_DIR}/setupvars.sh + + ${ONNX_RUNTIME_REPO}/build.sh \ + --config RelWithDebInfo \ + --use_openvino CPU_FP32 \ + --build_shared_lib \ + --parallel \ + --skip_tests \ + --compile_no_warning_as_error \ + --build_dir ${ONNX_RUNTIME_BUILD_DIR} + env: + CXXFLAGS: "-Wno-error=deprecated-declarations" + + - name: Show sccache stats + run: ${SCCACHE_PATH} --show-stats + + - name: Run onnxruntime_test_all + if: ${{ runner.arch != 'ARM64' }} # Ticket: 126277 + run: | + source ${INSTALL_DIR}/setupvars.sh + skip_tests=$(tr -s '\n ' ':' < ${ONNX_RUNTIME_UTILS}/skip_tests) + + ./onnxruntime_test_all --gtest_filter=-$skip_tests + working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo + + - name: Run onnxruntime_shared_lib_test + run: | + source ${INSTALL_DIR}/setupvars.sh + ./onnxruntime_shared_lib_test --gtest_filter=-CApiTest.test_custom_op_openvino_wrapper_library + working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo + + - name: Run onnxruntime_global_thread_pools_test + run: | + source ${INSTALL_DIR}/setupvars.sh + ./onnxruntime_global_thread_pools_test + working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo + + - name: Run onnxruntime_api_tests_without_env + run: | + source ${INSTALL_DIR}/setupvars.sh + ./onnxruntime_api_tests_without_env + working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo + + - name: Run pytorch-converted tests + run: | + source ${INSTALL_DIR}/setupvars.sh + ./onnx_test_runner "${ONNX_RUNTIME_REPO}/cmake/external/onnx/onnx/backend/test/data/pytorch-converted" + working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo + + - name: Run pytorch-operator tests + run: | + source ${INSTALL_DIR}/setupvars.sh + ./onnx_test_runner "${ONNX_RUNTIME_REPO}/cmake/external/onnx/onnx/backend/test/data/pytorch-operator" + working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo diff --git a/.github/workflows/job_python_unit_tests.yml b/.github/workflows/job_python_unit_tests.yml new file mode 100644 index 00000000000000..38b35d17623f38 --- /dev/null +++ b/.github/workflows/job_python_unit_tests.yml @@ -0,0 +1,323 @@ +name: Python unit tests + +on: + workflow_call: + inputs: + runner: + description: 'Machine on which the tests would run' + type: string + required: true + container: + description: 'JSON to be converted to the value of the "container" configuration for the job' + type: string + required: false + default: '{"image": null}' + affected-components: + description: 'Components that are affected by changes in the commit defined by the Smart CI Action' + type: string + required: true + +env: + PIP_CACHE_PATH: /mount/caches/pip/linux + PYTHON_VERSION: '3.11' + +jobs: + Python_Unit_Tests: + name: Python unit tests + timeout-minutes: 60 + runs-on: ${{ inputs.runner }} + container: ${{ fromJSON(inputs.container) }} + defaults: + run: + shell: bash + env: + DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input + OPENVINO_REPO: ${{ github.workspace }}/openvino + INSTALL_DIR: ${{ github.workspace }}/install + INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests + LAYER_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/layer_tests + steps: + - name: Download OpenVINO package + uses: actions/download-artifact@v3 + with: + name: openvino_package + path: ${{ env.INSTALL_DIR }} + + - name: Download OpenVINO tests package + uses: actions/download-artifact@v3 + with: + name: openvino_tests + path: ${{ env.INSTALL_TEST_DIR }} + + # Needed as ${{ github.workspace }} is not working correctly when using Docker + - name: Setup Variables + run: | + echo "OPENVINO_REPO=$GITHUB_WORKSPACE/openvino" >> "$GITHUB_ENV" + echo "INSTALL_DIR=$GITHUB_WORKSPACE/install" >> "$GITHUB_ENV" + echo "INSTALL_TEST_DIR=$GITHUB_WORKSPACE/install/tests" >> "$GITHUB_ENV" + echo "LAYER_TESTS_INSTALL_DIR=$GITHUB_WORKSPACE/install/tests/layer_tests" >> "$GITHUB_ENV" + + - name: Extract OpenVINO packages + run: | + pushd $INSTALL_DIR + tar -xzf openvino_package.tar.gz -C $INSTALL_DIR + popd + pushd $INSTALL_TEST_DIR + tar -xzf openvino_tests.tar.gz -C $INSTALL_DIR + popd + + - name: Install OpenVINO dependencies (Linux) + if: runner.os == 'Linux' + run: $INSTALL_DIR/install_dependencies/install_openvino_dependencies.sh -c=core -c=dev -y + + - name: Fetch setup_python action + uses: actions/checkout@v4 + with: + sparse-checkout: | + .github/actions/setup_python/action.yml + sparse-checkout-cone-mode: false + path: 'openvino' + + - name: Setup Python ${{ env.PYTHON_VERSION }} + uses: ./openvino/.github/actions/setup_python + with: + version: ${{ env.PYTHON_VERSION }} + pip-cache-path: ${{ runner.os == 'Linux' && env.PIP_CACHE_PATH || '' }} + should-setup-pip-paths: ${{ runner.os == 'Linux' }} + self-hosted-runner: ${{ runner.os == 'Linux' }} + + # + # Tests + # + + - name: Install OpenVINO Python wheels + run: | + # Install the core OV wheel + python3 -m pip install ${INSTALL_DIR}/tools/openvino-*.whl + + extras_to_install="caffe,kaldi,onnx,tensorflow2,pytorch" + + if [[ "${{ runner.arch }}" != "ARM64" ]]; then + extras_to_install="mxnet,$extras_to_install" + fi + + # Find and install OV dev wheel + pushd ${INSTALL_DIR}/tools + ov_dev_wheel_name=$(find . -name 'openvino_dev*.whl') + python3 -m pip install $ov_dev_wheel_name[$extras_to_install] + popd + + - name: Install Python API tests dependencies + run: | + # To enable pytest parallel features + python3 -m pip install pytest-xdist[psutil] + # For torchvision to OpenVINO preprocessing converter + python3 -m pip install -r ${INSTALL_TEST_DIR}/python/preprocess/torchvision/requirements.txt + + # TODO: replace with Python API tests requirements + python3 -m pip install -r ${INSTALL_TEST_DIR}/mo/requirements_dev.txt + + # + # Tests + # + + - name: Python API 1.0 Tests + # if: fromJSON(inputs.affected-components).Python_API.test # Ticket: 127101 + run: | + python3 -m pytest -s ${INSTALL_TEST_DIR}/pyngraph \ + --junitxml=${INSTALL_TEST_DIR}/TEST-Pyngraph.xml \ + --ignore=${INSTALL_TEST_DIR}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py \ + --ignore=${INSTALL_TEST_DIR}/pyngraph/tests_compatibility/test_onnx/test_backend.py + + - name: Python API 2.0 Tests + # if: ${{ fromJSON(inputs.affected-components).Python_API.test && runner.arch != 'ARM64' }} # Ticket: 126380, 127101 + run: | + # for 'template' extension + export LD_LIBRARY_PATH=${INSTALL_TEST_DIR}:$LD_LIBRARY_PATH + python3 -m pytest -sv ${INSTALL_TEST_DIR}/pyopenvino \ + --junitxml=${INSTALL_TEST_DIR}/TEST-Pyngraph.xml \ + --ignore=${INSTALL_TEST_DIR}/pyopenvino/tests/test_utils/test_utils.py + + - name: Model Optimizer unit tests + if: fromJSON(inputs.affected-components).MO.test + run: | + skip_filter='' + if [[ "${{ runner.os }}" != "Linux" ]] && [[ "${{ runner.arch }} != "ARM64" ]] || [[ "${{ runner.os }} != "macOS" ]]; then + # required for MxNet + apt-get install -y libgomp1 libquadmath0 + else + # Skips under Ticket: 122666 + skip_filter='--ignore-glob=**/mo/unit_tests/mo/front/mxnet/**' + fi + + python3 -m pytest -s ${INSTALL_TEST_DIR}/mo/unit_tests \ + --junitxml=${INSTALL_TEST_DIR}/TEST-ModelOptimizer.xml \ + "$skip_filter" + + - name: Python ONNX operators tests + if: fromJSON(inputs.affected-components).Python_API.test || + fromJSON(inputs.affected-components).ONNX_FE.test && runner.os != 'macOS' # Ticket: 123325 + run: | + # Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time - ONNX Model Zoo tests are run separately + python3 -m pytest -sv ${INSTALL_TEST_DIR}/onnx -k 'not cuda' \ + --junitxml=${INSTALL_TEST_DIR}/TEST-onnx_frontend.xml \ + --ignore=${INSTALL_TEST_DIR}/onnx/test_python/test_zoo_models.py + + - name: OVC unit tests + if: fromJSON(inputs.affected-components).MO.test + run: python3 -m pytest -s ${INSTALL_TEST_DIR}/ovc/unit_tests --junitxml=${INSTALL_TEST_DIR}/TEST-OpenVinoConversion.xml + + - name: Install Python Layer tests dependencies + run: | + # layer test requirements + python3 -m pip install -r ${LAYER_TESTS_INSTALL_DIR}/requirements.txt + + - name: MO Python API Tests + if: fromJSON(inputs.affected-components).MO.test + run: | + # Import 'test_utils' installed in '/tests/python/openvino' + export LD_LIBRARY_PATH=${PIP_INSTALL_PATH}/openvino/libs:$LD_LIBRARY_PATH + export PYTHONPATH=${INSTALL_TEST_DIR}/python + + if [[ "${{ runner.os }}" == "Linux" ]] && [[ "${{ runner.arch }}" == "ARM64" ]]; then + # Find gomp lib + GOMP_LIB=$(find "${PIP_INSTALL_PATH}/torch/lib/../../torch.libs/" -name '*libgomp-*so*') + export LD_PRELOAD=${GOMP_LIB} + fi + + python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/mo_python_api_tests --junitxml=${INSTALL_TEST_DIR}/TEST-test_mo_convert.xml + env: + TEST_DEVICE: CPU + TEST_PRECISION: FP16 + + - name: OVC Python API Tests + if: fromJSON(inputs.affected-components).MO.test + run: | + # Import 'test_utils' installed in '/tests/python/openvino' + export PYTHONPATH=${INSTALL_TEST_DIR}/python + export LD_LIBRARY_PATH=${PIP_INSTALL_PATH}/openvino/libs:$LD_LIBRARY_PATH + + if [[ "${{ runner.os }}" == "Linux" ]] && [[ "${{ runner.arch }}" == "ARM64" ]]; then + # Find gomp lib + GOMP_LIB=$(find "${PIP_INSTALL_PATH}/torch/lib/../../torch.libs/" -name '*libgomp-*so*') + export LD_PRELOAD=${GOMP_LIB} + fi + + python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/ovc_python_api_tests --junitxml=${INSTALL_TEST_DIR}/TEST-test_ovc_convert.xml + env: + TEST_DEVICE: CPU + TEST_PRECISION: FP16 + + - name: Python Frontend tests + if: fromJSON(inputs.affected-components).PyTorch_FE.test || + fromJSON(inputs.affected-components).PDPD_FE.test + run: | + # to allow 'libtest_builtin_extensions.so' to find 'libopenvino_onnx_frontend.so' + export LD_LIBRARY_PATH=${PIP_INSTALL_PATH}/openvino/libs:$LD_LIBRARY_PATH + python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/py_frontend_tests --junitxml=${INSTALL_TEST_DIR}/TEST-test_py_fontend.xml + + - name: PyTorch Layer Tests + if: ${{ fromJSON(inputs.affected-components).PyTorch_FE.test && runner.arch != 'ARM64' }} # Ticket: 126287 + run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/pytorch_tests -n logical -m precommit --junitxml=${INSTALL_TEST_DIR}/TEST-pytorch.xml + env: + TEST_DEVICE: CPU + TEST_PRECISION: FP32 + + - name: PyTorch torch.compile TORCHFX Layer Tests + if: ${{ fromJSON(inputs.affected-components).PyTorch_FE.test && runner.os != 'macOS' }} + run: | + python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/pytorch_tests -m precommit_fx_backend --junitxml=${INSTALL_TEST_DIR}/TEST-pytorch.xml + env: + TEST_DEVICE: CPU + TEST_PRECISION: FP32 + PYTORCH_TRACING_MODE: TORCHFX + + - name: PyTorch torch.compile TORCHSCRIPT Layer Tests + if: ${{ fromJSON(inputs.affected-components).PyTorch_FE.test && runner.os != 'macOS' }} + run: | + python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/pytorch_tests -m precommit_ts_backend --junitxml=${INSTALL_TEST_DIR}/TEST-pytorch.xml + env: + TEST_DEVICE: CPU + TEST_PRECISION: FP32 + PYTORCH_TRACING_MODE: TORCHSCRIPT + + - name: ONNX Layer Tests + if: fromJSON(inputs.affected-components).ONNX_FE.test + run: | + # requires 'unit_tests' from 'tools/mo' + export PYTHONPATH=${INSTALL_TEST_DIR}/mo:$PYTHONPATH + python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/onnx_tests -m "not launch_only_if_manually_specified and precommit" --junitxml=${INSTALL_TEST_DIR}/TEST-onnx.xml + env: + TEST_DEVICE: CPU + TEST_PRECISION: FP16 + + - name: TensorFlow 1 Layer Tests - TF FE + if: fromJSON(inputs.affected-components).TF_FE.test + run: | + # requires 'unit_tests' from 'mo' + export PYTHONPATH=${INSTALL_TEST_DIR}/mo + python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_tests/ --use_new_frontend -m precommit_tf_fe --junitxml=${INSTALL_TEST_DIR}/TEST-tf_fe.xml + env: + TEST_DEVICE: CPU + TEST_PRECISION: FP16 + + - name: TensorFlow 2 Layer Tests - TF FE + if: fromJSON(inputs.affected-components).TF_FE.test && runner.os != 'macOS' # Ticket: 123322 + run: | + # requires 'unit_tests' from 'mo' + export PYTHONPATH=${INSTALL_TEST_DIR}/mo + python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow2_keras_tests/ --use_new_frontend -m precommit_tf_fe --junitxml=${INSTALL_TEST_DIR}/TEST-tf2_fe.xml + env: + TEST_DEVICE: CPU + TEST_PRECISION: FP16 + + - name: JAX Layer Tests - TF FE + if: ${{ fromJSON(inputs.affected-components).TF_FE.test && runner.arch != 'ARM64' }} + run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/jax_tests/ -m precommit --junitxml=${INSTALL_TEST_DIR}/TEST-jax.xml + env: + TEST_DEVICE: CPU + + - name: TensorFlow 1 Layer Tests - Legacy FE + if: fromJSON(inputs.affected-components).TF_FE.test + run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=${INSTALL_TEST_DIR}/TEST-tf_Roll.xml + + - name: TensorFlow 2 Layer Tests - Legacy FE + if: fromJSON(inputs.affected-components).TF_FE.test + run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow2_keras_tests/test_tf2_keras_activation.py --ir_version=11 -k "sigmoid" --junitxml=${INSTALL_TEST_DIR}/TEST-tf2_Activation.xml + env: + TEST_DEVICE: CPU + TEST_PRECISION: FP16 + + - name: TensorFlow Lite Layer Tests - TFL FE + if: fromJSON(inputs.affected-components).TFL_FE.test + run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_lite_tests/ --junitxml=${INSTALL_TEST_DIR}/TEST-tfl_fe.xml + env: + TEST_DEVICE: CPU + TEST_PRECISION: FP16 + + - name: Clone API snippets + if: runner.os != 'macOS' + uses: actions/checkout@v4 + with: + sparse-checkout: openvino/docs/snippets + path: ${{ env.OPENVINO_REPO }} + submodules: 'false' + + - name: Docs Python snippets + if: runner.os != 'macOS' + run: | + # to find 'snippets' module in docs + export PYTHONPATH=${OPENVINO_REPO}/docs + # for 'template' extension + export LD_LIBRARY_PATH=${INSTALL_TEST_DIR}:$LD_LIBRARY_PATH + python3 ${OPENVINO_REPO}/docs/snippets/main.py + + - name: Upload Test Results + uses: actions/upload-artifact@v3 + if: ${{ !cancelled() }} + with: + name: test-results-python + path: | + ${{ env.INSTALL_TEST_DIR }}/TEST*.html + ${{ env.INSTALL_TEST_DIR }}/TEST*.xml + if-no-files-found: 'warn' diff --git a/.github/workflows/job_pytorch_models_tests.yml b/.github/workflows/job_pytorch_models_tests.yml new file mode 100644 index 00000000000000..8904b2212e9e33 --- /dev/null +++ b/.github/workflows/job_pytorch_models_tests.yml @@ -0,0 +1,132 @@ +name: PyTorch Models tests + +on: + workflow_call: + inputs: + runner: + description: 'Machine on which the tests would run' + type: string + required: true + container: + description: 'JSON to be converted to the value of the "container" configuration for the job' + type: string + required: false + default: '{"image": null}' + event: + description: 'Event that triggered the workflow. E.g., "schedule" for nightly runs' + type: string + required: true + +jobs: + PyTorch_Models_Tests: + name: PyTorch Models tests + timeout-minutes: ${{ inputs.event == 'schedule' && 400 || 30 }} + runs-on: ${{ inputs.runner }} + container: ${{ fromJSON(inputs.container) }} + defaults: + run: + shell: bash + env: + DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input + OPENVINO_REPO: ${{ github.workspace }}/openvino + INSTALL_DIR: ${{ github.workspace }}/install + INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests + MODEL_HUB_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/model_hub_tests + steps: + + - name: Check sudo + if: ${{ runner.os == 'Linux' }} + run: if [ "$(id -u)" -eq 0 ]; then apt update && apt --assume-yes install sudo; fi + + - name: Download OpenVINO package + uses: actions/download-artifact@v3 + with: + name: openvino_package + path: ${{ env.INSTALL_DIR }} + + - name: Download OpenVINO tests package + uses: actions/download-artifact@v3 + with: + name: openvino_tests + path: ${{ env.INSTALL_TEST_DIR }} + + # Needed as ${{ github.workspace }} is not working correctly when using Docker + - name: Setup Variables + run: | + echo "OPENVINO_REPO=$GITHUB_WORKSPACE/openvino" >> "$GITHUB_ENV" + echo "INSTALL_DIR=$GITHUB_WORKSPACE/install" >> "$GITHUB_ENV" + echo "INSTALL_TEST_DIR=$GITHUB_WORKSPACE/install/tests" >> "$GITHUB_ENV" + echo "MODEL_HUB_TESTS_INSTALL_DIR=$GITHUB_WORKSPACE/install/tests/model_hub_tests" >> "$GITHUB_ENV" + + - name: Extract OpenVINO packages + run: | + pushd ${INSTALL_DIR} + tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} + popd + + pushd ${INSTALL_TEST_DIR} + tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} + popd + + - name: Fetch setup_python action + uses: actions/checkout@v4 + with: + sparse-checkout: | + .github/actions/setup_python/action.yml + sparse-checkout-cone-mode: false + path: 'openvino' + + - name: Install dependencies + if: ${{ runner.os == 'Linux' }} + run: | + # install git (required to build pip deps from the sources) + # install 'g++' to build 'detectron2' and 'natten' wheels + sudo apt-get install --assume-yes --no-install-recommends g++ git ca-certificates + + - name: Setup Python 3.11 + uses: ./openvino/.github/actions/setup_python + with: + version: '3.11' + should-setup-pip-paths: 'false' + self-hosted-runner: ${{ contains(inputs.runner, 'aks') }} + + - name: Install OpenVINO Python wheels + run: python3 -m pip install ${INSTALL_DIR}/tools/openvino-* + + - name: Install PyTorch tests requirements + run: | + python3 -m pip install -r ${MODEL_HUB_TESTS_INSTALL_DIR}/torch_tests/requirements.txt + python3 -m pip install -r ${MODEL_HUB_TESTS_INSTALL_DIR}/torch_tests/requirements_secondary.txt + echo "Available storage:" + df -h + env: + CPLUS_INCLUDE_PATH: ${{ env.Python_ROOT_DIR }}/include/python${{ env.PYTHON_VERSION }} + + - name: PyTorch Models Tests + run: | + export PYTHONPATH=${MODEL_HUB_TESTS_INSTALL_DIR}:$PYTHONPATH + python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/torch_tests -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-torch_model_tests.html --self-contained-html -v + env: + TYPE: ${{ inputs.event == 'schedule' && 'nightly' || 'precommit'}} + TEST_DEVICE: CPU + USE_SYSTEM_CACHE: False + OP_REPORT_FILE: ${{ env.INSTALL_TEST_DIR }}/TEST-torch_unsupported_ops.log + + - name: Reformat unsupported ops file + if: '!cancelled()' + run: | + python3 ${MODEL_HUB_TESTS_INSTALL_DIR}/torch_tests/scripts/process_op_report.py ${INSTALL_TEST_DIR}/TEST-torch_unsupported_ops.log + + - name: Available storage after tests + run: | + echo "Available storage:" + df -h + + - name: Upload Test Results + uses: actions/upload-artifact@v3 + if: ${{ !cancelled() }} + with: + name: test-results-torch-models + path: | + ${{ env.INSTALL_TEST_DIR }}/TEST-torch* + if-no-files-found: 'error' diff --git a/.github/workflows/job_samples_tests.yml b/.github/workflows/job_samples_tests.yml new file mode 100644 index 00000000000000..8c1401a819d845 --- /dev/null +++ b/.github/workflows/job_samples_tests.yml @@ -0,0 +1,132 @@ +name: Samples + +on: + workflow_call: + inputs: + runner: + description: 'Machine on which the tests would run' + type: string + required: true + image: + description: 'Docker image in which the tests would run' + type: string + required: false + default: null + affected-components: + description: 'Components that are affected by changes in the commit defined by the Smart CI Action' + type: string + required: true + +jobs: + Samples: + runs-on: ${{ inputs.runner }} + container: + image: ${{ inputs.image }} + defaults: + run: + shell: bash + env: + DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input + INSTALL_DIR: ${{ github.workspace }}/install + INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests + BUILD_DIR: ${{ github.workspace }}/build + steps: + - name: Download OpenVINO package + uses: actions/download-artifact@v3 + with: + name: openvino_package + path: ${{ env.INSTALL_DIR }} + + - name: Download OpenVINO tests package + uses: actions/download-artifact@v3 + with: + name: openvino_tests + path: ${{ env.INSTALL_TEST_DIR }} + + # Needed as ${{ github.workspace }} is not working correctly when using Docker + - name: Setup Variables + run: | + echo "INSTALL_DIR=$GITHUB_WORKSPACE/install" >> "$GITHUB_ENV" + echo "INSTALL_TEST_DIR=$GITHUB_WORKSPACE/install/tests" >> "$GITHUB_ENV" + echo "BUILD_DIR=$GITHUB_WORKSPACE/build" >> "$GITHUB_ENV" + + - name: Extract OpenVINO packages + run: | + pushd $INSTALL_DIR + tar -xzf openvino_package.tar.gz -C $INSTALL_DIR + popd + pushd $INSTALL_TEST_DIR + tar -xzf openvino_tests.tar.gz -C $INSTALL_DIR + popd + + - name: Install OpenVINO dependencies (Linux) + if: runner.os == 'Linux' + run: $INSTALL_DIR/install_dependencies/install_openvino_dependencies.sh -c=core -c=dev -y + + - name: Install OpenVINO dependencies (mac) + if: runner.os == 'macOS' + run: brew install coreutils + + - name: Fetch setup_python action + uses: actions/checkout@v4 + with: + sparse-checkout: | + .github/actions/setup_python/action.yml + sparse-checkout-cone-mode: false + path: 'openvino' + + - name: Setup Python 3.11 + uses: ./openvino/.github/actions/setup_python + with: + version: '3.11' + should-setup-pip-paths: 'false' + self-hosted-runner: ${{ runner.os == 'Linux' }} + + - name: Build cpp samples - GCC + run: $INSTALL_DIR/samples/cpp/build_samples.sh -i $INSTALL_DIR -b $BUILD_DIR/cpp_samples + env: + CMAKE_COMPILE_WARNING_AS_ERROR: 'ON' + + - name: Build cpp samples - Clang + if: runner.os == 'Linux' + run: | + apt-get install -y clang + $INSTALL_DIR/samples/cpp/build_samples.sh -i $INSTALL_DIR -b $BUILD_DIR/cpp_samples_clang + env: + CMAKE_COMPILE_WARNING_AS_ERROR: 'ON' + CC: clang + CXX: clang++ + + - name: Build c samples + run: $INSTALL_DIR/samples/c/build_samples.sh -i $INSTALL_DIR -b $BUILD_DIR/c_samples + env: + CMAKE_COMPILE_WARNING_AS_ERROR: 'ON' + + # + # Tests + # + + - name: Samples tests + if: fromJSON(inputs.affected-components).samples.test + run: | + export WORKSPACE=$INSTALL_DIR + export IE_APP_PATH=$INSTALL_DIR/samples_bin + export IE_APP_PYTHON_PATH=$INSTALL_DIR/samples/python + export SHARE=$INSTALL_TEST_DIR/smoke_tests/samples_smoke_tests_data + + python3 -m pip install --ignore-installed PyYAML -r $INSTALL_TEST_DIR/smoke_tests/requirements.txt + export LD_LIBRARY_PATH=${IE_APP_PATH}:$LD_LIBRARY_PATH + + source ${INSTALL_DIR}/setupvars.sh + + python3 -m pytest -sv $INSTALL_TEST_DIR/smoke_tests \ + --env_conf $INSTALL_TEST_DIR/smoke_tests/env_config.yml \ + --junitxml=$INSTALL_TEST_DIR/TEST-SamplesSmokeTests.xml + + - name: Upload Test Results + uses: actions/upload-artifact@v3 + if: ${{ !cancelled() }} + with: + name: test-results-samples + path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml + if-no-files-found: 'warn' diff --git a/.github/workflows/job_tensorflow_hub_models_tests.yml b/.github/workflows/job_tensorflow_hub_models_tests.yml new file mode 100644 index 00000000000000..6dcecaa267e8c4 --- /dev/null +++ b/.github/workflows/job_tensorflow_hub_models_tests.yml @@ -0,0 +1,113 @@ +name: TensorFlow Hub Models tests + +on: + workflow_call: + inputs: + runner: + description: 'Machine on which the tests would run' + type: string + required: true + container: + description: 'JSON to be converted to the value of the "container" configuration for the job' + type: string + required: false + default: '{"image": null}' + event: + description: 'Event that triggered the workflow. E.g., "schedule" for nightly runs' + type: string + required: true + +jobs: + TensorFlow_Hub_Models_Tests: + name: TensorFlow Hub Models tests + timeout-minutes: ${{ inputs.event == 'schedule' && 400 || 25 }} + runs-on: ${{ inputs.runner }} + container: ${{ fromJSON(inputs.container) }} + defaults: + run: + shell: bash + env: + DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input + OPENVINO_REPO: ${{ github.workspace }}/openvino + INSTALL_DIR: ${{ github.workspace }}/install + INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests + MODEL_HUB_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/model_hub_tests + steps: + + - name: Check sudo + if: ${{ runner.os == 'Linux' }} + run: if [ "$(id -u)" -eq 0 ]; then apt update && apt --assume-yes install sudo; fi + + - name: Download OpenVINO package + uses: actions/download-artifact@v3 + with: + name: openvino_package + path: ${{ env.INSTALL_DIR }} + + - name: Download OpenVINO tests package + uses: actions/download-artifact@v3 + with: + name: openvino_tests + path: ${{ env.INSTALL_TEST_DIR }} + + # Needed as ${{ github.workspace }} is not working correctly when using Docker + - name: Setup Variables + run: | + echo "OPENVINO_REPO=$GITHUB_WORKSPACE/openvino" >> "$GITHUB_ENV" + echo "INSTALL_DIR=$GITHUB_WORKSPACE/install" >> "$GITHUB_ENV" + echo "INSTALL_TEST_DIR=$GITHUB_WORKSPACE/install/tests" >> "$GITHUB_ENV" + echo "MODEL_HUB_TESTS_INSTALL_DIR=$GITHUB_WORKSPACE/install/tests/model_hub_tests" >> "$GITHUB_ENV" + + - name: Extract OpenVINO packages + run: | + pushd ${INSTALL_DIR} + tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} + popd + + pushd ${INSTALL_TEST_DIR} + tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} + popd + + - name: Fetch setup_python action + uses: actions/checkout@v4 + with: + sparse-checkout: | + .github/actions/setup_python/action.yml + sparse-checkout-cone-mode: false + path: 'openvino' + + - name: Install dependencies + if: ${{ runner.os == 'Linux' }} + run: | + # install git (required to build pip deps from the sources) + sudo apt-get install --assume-yes --no-install-recommends g++ git ca-certificates + + - name: Setup Python 3.11 + uses: ./openvino/.github/actions/setup_python + with: + version: '3.11' + should-setup-pip-paths: 'false' + self-hosted-runner: ${{ contains(inputs.runner, 'aks') }} + + - name: Install OpenVINO Python wheels + run: python3 -m pip install ${INSTALL_DIR}/tools/openvino-* + + - name: Install TF Hub tests requirements + run: python3 -m pip install -r ${MODEL_HUB_TESTS_INSTALL_DIR}/tf_hub_tests/requirements.txt + + - name: TensorFlow Hub Tests - TF FE + run: | + export PYTHONPATH=${MODEL_HUB_TESTS_INSTALL_DIR}:$PYTHONPATH + python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/tf_hub_tests/ -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-tf_hub_tf_fe.html --self-contained-html -v + env: + TYPE: ${{ inputs.event == 'schedule' && 'nightly' || 'precommit'}} + TEST_DEVICE: CPU + + - name: Upload Test Results + uses: actions/upload-artifact@v3 + if: ${{ !cancelled() }} + with: + name: test-results-tensorflow-hub-models + path: | + ${{ env.INSTALL_TEST_DIR }}/TEST*.html + if-no-files-found: 'error' diff --git a/.github/workflows/job_tensorflow_hub_performance_models_tests.yml b/.github/workflows/job_tensorflow_hub_performance_models_tests.yml new file mode 100644 index 00000000000000..6e00f25e0c0ebb --- /dev/null +++ b/.github/workflows/job_tensorflow_hub_performance_models_tests.yml @@ -0,0 +1,116 @@ +name: TensorFlow Hub Performance Models tests + +on: + workflow_call: + inputs: + runner: + description: 'Machine on which the tests would run' + type: string + required: true + container: + description: 'JSON to be converted to the value of the "container" configuration for the job' + type: string + required: false + default: '{"image": null}' + event: + description: 'Event that triggered the workflow. E.g., "schedule" for nightly runs' + type: string + required: true + +jobs: + TensorFlow_Hub_Performance_Models_Tests: + name: TensorFlow Hub Performance Models tests + timeout-minutes: ${{ inputs.event == 'schedule' && 400 || 25 }} + runs-on: ${{ inputs.runner }} + container: ${{ fromJSON(inputs.container) }} + defaults: + run: + shell: bash + env: + DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input + OPENVINO_REPO: ${{ github.workspace }}/openvino + INSTALL_DIR: ${{ github.workspace }}/install + INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests + MODEL_HUB_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/model_hub_tests + steps: + + - name: Check sudo + if: ${{ runner.os == 'Linux' }} + run: if [ "$(id -u)" -eq 0 ]; then apt update && apt --assume-yes install sudo; fi + + - name: Download OpenVINO package + uses: actions/download-artifact@v3 + with: + name: openvino_package + path: ${{ env.INSTALL_DIR }} + + - name: Download OpenVINO tests package + uses: actions/download-artifact@v3 + with: + name: openvino_tests + path: ${{ env.INSTALL_TEST_DIR }} + + # Needed as ${{ github.workspace }} is not working correctly when using Docker + - name: Setup Variables + run: | + echo "OPENVINO_REPO=$GITHUB_WORKSPACE/openvino" >> "$GITHUB_ENV" + echo "INSTALL_DIR=$GITHUB_WORKSPACE/install" >> "$GITHUB_ENV" + echo "INSTALL_TEST_DIR=$GITHUB_WORKSPACE/install/tests" >> "$GITHUB_ENV" + echo "MODEL_HUB_TESTS_INSTALL_DIR=$GITHUB_WORKSPACE/install/tests/model_hub_tests" >> "$GITHUB_ENV" + + - name: Extract OpenVINO packages + run: | + pushd ${INSTALL_DIR} + tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} + popd + + pushd ${INSTALL_TEST_DIR} + tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} + popd + + - name: Fetch setup_python action + uses: actions/checkout@v4 + with: + sparse-checkout: | + .github/actions/setup_python/action.yml + sparse-checkout-cone-mode: false + path: 'openvino' + + - name: Install dependencies + if: ${{ runner.os == 'Linux' }} + run: | + # install git (required to build pip deps from the sources) + sudo apt-get install --assume-yes --no-install-recommends g++ git ca-certificates + + - name: Setup Python 3.11 + uses: ./openvino/.github/actions/setup_python + with: + version: '3.11' + should-setup-pip-paths: 'false' + self-hosted-runner: ${{ contains(inputs.runner, 'aks') }} + + - name: Install OpenVINO Python wheels + run: python3 -m pip install ${INSTALL_DIR}/tools/openvino-* + + - name: Install TF Hub tests requirements + run: python3 -m pip install -r ${MODEL_HUB_TESTS_INSTALL_DIR}/tf_hub_tests/requirements.txt + + - name: Install Hub Performance tests requirements + run: python3 -m pip install -r ${MODEL_HUB_TESTS_INSTALL_DIR}/performance_tests/requirements.txt + + - name: Performance Hub Tests + run: | + export PYTHONPATH=${MODEL_HUB_TESTS_INSTALL_DIR}:$PYTHONPATH + python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/performance_tests/ -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-tf_hub_performance.html --self-contained-html -v + env: + TYPE: ${{ inputs.event == 'schedule' && 'nightly' || 'precommit'}} + TEST_DEVICE: CPU + + - name: Upload Test Results + uses: actions/upload-artifact@v3 + if: ${{ !cancelled() }} + with: + name: test-results-tensorflow-hub-performance-models + path: | + ${{ env.INSTALL_TEST_DIR }}/TEST*.html + if-no-files-found: 'error' diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 900d29a34d7fd7..54330a974d8266 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -53,7 +53,7 @@ jobs: container: image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 volumes: - - /mount/caches:/mount/caches + - /mount:/mount options: -e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING env: DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input @@ -261,164 +261,22 @@ jobs: Debian_Packages: name: Debian Packages needs: Build - timeout-minutes: 5 - defaults: - run: - shell: bash - runs-on: ubuntu-20.04 - container: - image: ubuntu:20.04 - env: - DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input - DEBIAN_PACKAGES_DIR: /__w/openvino/packages/ - - steps: - - name: Download OpenVINO debian packages - uses: actions/download-artifact@v3 - with: - name: openvino_debian_packages - path: ${{ env.DEBIAN_PACKAGES_DIR }} - - - name: Install debian packages & check conflicts - run: | - apt-get update -y - # Install debian packages from previous release - apt-get install --no-install-recommends -y gnupg wget ca-certificates - wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB - apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB - echo "deb https://apt.repos.intel.com/openvino/2023 ubuntu20 main" | tee /etc/apt/sources.list.d/intel-openvino-2023.list - apt-get update -y - apt-get install -y openvino - # install our local one and make sure the conflicts are resolved - apt-get install --no-install-recommends -y dpkg-dev - dpkg-scanpackages . /dev/null | gzip -9c > Packages.gz - echo "deb [trusted=yes] file:${DEBIAN_PACKAGES_DIR} ./" | tee /etc/apt/sources.list.d/openvino-local.list - apt-get update -y - apt-get install openvino -y - working-directory: ${{ env.DEBIAN_PACKAGES_DIR }} - - - name: Test debian packages - run: | - /usr/share/openvino/samples/cpp/build_samples.sh - /usr/share/openvino/samples/c/build_samples.sh - ~/openvino_cpp_samples_build/intel64/Release/hello_query_device - python3 /usr/share/openvino/samples/python/hello_query_device/hello_query_device.py - python3 -c 'from openvino import Core; Core().get_property("CPU", "AVAILABLE_DEVICES")' - python3 -c 'from openvino import Core; Core().get_property("GPU", "AVAILABLE_DEVICES")' - python3 -c 'from openvino import Core; Core().get_property("AUTO", "SUPPORTED_METRICS")' - python3 -c 'from openvino import Core; Core().get_property("MULTI", "SUPPORTED_METRICS")' - python3 -c 'from openvino import Core; Core().get_property("HETERO", "SUPPORTED_METRICS")' - python3 -c 'from openvino import Core; Core().get_property("BATCH", "SUPPORTED_METRICS")' - python3 -c 'from openvino.frontend import FrontEndManager; assert len(FrontEndManager().get_available_front_ends()) == 6' - benchmark_app --help - ovc --help + uses: ./.github/workflows/job_debian_packages.yml + with: + runner: 'aks-linux-4-cores-16gb' + image: 'openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04' Samples: - needs: [Build, Smart_CI] - timeout-minutes: 20 - defaults: - run: - shell: bash - runs-on: ubuntu-20.04 - container: - image: ubuntu:20.04 - env: - DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input - INSTALL_DIR: /__w/openvino/openvino/install - INSTALL_TEST_DIR: /__w/openvino/openvino/install/tests - BUILD_DIR: /__w/openvino/openvino/build + needs: [ Build, Smart_CI ] if: fromJSON(needs.smart_ci.outputs.affected_components).samples - - steps: - - name: Download OpenVINO package - uses: actions/download-artifact@v3 - with: - name: openvino_package - path: ${{ env.INSTALL_DIR }} - - - name: Download OpenVINO tests package - uses: actions/download-artifact@v3 - with: - name: openvino_tests - path: ${{ env.INSTALL_TEST_DIR }} - - - name: Extract OpenVINO packages - run: | - pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} - popd - pushd ${INSTALL_TEST_DIR} - tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} - popd - - - name: Install OpenVINO dependencies - run: ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -c=dev -y - - - name: Fetch setup_python action - uses: actions/checkout@v4 - with: - sparse-checkout: | - .github/actions/setup_python/action.yml - sparse-checkout-cone-mode: false - path: 'openvino' - - - name: Setup Python ${{ env.PYTHON_VERSION }} - uses: ./openvino/.github/actions/setup_python - with: - version: ${{ env.PYTHON_VERSION }} - should-setup-pip-paths: 'false' - self-hosted-runner: 'false' - - - name: Build cpp samples - GCC - run: ${INSTALL_DIR}/samples/cpp/build_samples.sh -i ${INSTALL_DIR} -b ${BUILD_DIR}/cpp_samples - env: - CMAKE_COMPILE_WARNING_AS_ERROR: 'ON' - - - name: Build cpp samples - Clang - run: | - apt-get install -y clang - ${INSTALL_DIR}/samples/cpp/build_samples.sh -i ${INSTALL_DIR} -b ${BUILD_DIR}/cpp_samples_clang - env: - CMAKE_COMPILE_WARNING_AS_ERROR: 'ON' - CC: clang - CXX: clang++ - - - name: Build c samples - run: ${INSTALL_DIR}/samples/c/build_samples.sh -i ${INSTALL_DIR} -b ${BUILD_DIR}/c_samples - env: - CMAKE_COMPILE_WARNING_AS_ERROR: 'ON' - - # - # Tests - # - - - name: Samples tests - if: fromJSON(needs.smart_ci.outputs.affected_components).samples.test - run: | - export WORKSPACE=${INSTALL_DIR} - export IE_APP_PATH=${INSTALL_DIR}/samples_bin - export IE_APP_PYTHON_PATH=${INSTALL_DIR}/samples/python - export SHARE=${INSTALL_TEST_DIR}/smoke_tests/samples_smoke_tests_data - - python3 -m pip install --ignore-installed PyYAML -r ${INSTALL_TEST_DIR}/smoke_tests/requirements.txt - export LD_LIBRARY_PATH=${IE_APP_PATH}:$LD_LIBRARY_PATH - - source ${INSTALL_DIR}/setupvars.sh - - python3 -m pytest -sv ${INSTALL_TEST_DIR}/smoke_tests \ - --env_conf ${INSTALL_TEST_DIR}/smoke_tests/env_config.yml \ - --junitxml=${INSTALL_TEST_DIR}/TEST-SamplesSmokeTests.xml - - - name: Upload Test Results - uses: actions/upload-artifact@v3 - if: ${{ !cancelled() }} - with: - name: test-results-samples - path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml - if-no-files-found: 'warn' + uses: ./.github/workflows/job_samples_tests.yml + with: + runner: 'aks-linux-4-cores-16gb' + image: 'openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} Conformance: - needs: [Build, Smart_CI] + needs: [ Build, Smart_CI ] timeout-minutes: ${{ matrix.TEST_TYPE == 'API' && 5 || 15 }} defaults: run: @@ -522,1027 +380,88 @@ jobs: ONNX_Runtime: name: ONNX Runtime Integration - needs: [Build, Smart_CI] - timeout-minutes: 20 - defaults: - run: - shell: bash - runs-on: aks-linux-16-cores-32gb - container: - image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 - volumes: - - /mount/caches:/mount/caches - options: -e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING - env: - DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input - CMAKE_GENERATOR: 'Ninja Multi-Config' - CMAKE_CXX_COMPILER_LAUNCHER: sccache - CMAKE_C_COMPILER_LAUNCHER: sccache - OPENVINO_REPO: /__w/openvino/openvino/openvino - INSTALL_DIR: /__w/openvino/openvino/install - SCCACHE_AZURE_KEY_PREFIX: ubuntu20_x86_64_onnxruntime - ONNX_RUNTIME_REPO: /__w/openvino/openvino/onnxruntime - ONNX_RUNTIME_UTILS: /__w/openvino/openvino/install/onnxruntime - ONNX_RUNTIME_BUILD_DIR: /__w/openvino/openvino/onnxruntime/build if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_RT - - steps: - - name: Fetch install_build_dependencies.sh and setup_python action - uses: actions/checkout@v4 - with: - sparse-checkout: | - install_build_dependencies.sh - .github/actions/setup_python/action.yml - sparse-checkout-cone-mode: false - path: ${{ env.OPENVINO_REPO }} - - - name: Install git - run: | - apt-get update - apt-get install --assume-yes --no-install-recommends git ca-certificates - - - name: Setup Python ${{ env.PYTHON_VERSION }} - uses: ./openvino/.github/actions/setup_python - with: - version: ${{ env.PYTHON_VERSION }} - should-setup-pip-paths: 'false' - - # - # Initialize OpenVINO - # - - - name: Download OpenVINO package - uses: actions/download-artifact@v3 - with: - name: openvino_package - path: ${{ env.INSTALL_DIR }} - - - name: Extract OpenVINO package - run: | - pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} - popd - - - name: Install OpenVINO dependencies - run: ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -c=dev -y - - - name: Clone ONNX Runtime - run: | - branch=`tr -s '\n ' < ${ONNX_RUNTIME_UTILS}/version` - git clone --branch $branch --single-branch --recursive https://github.com/microsoft/onnxruntime.git ${ONNX_RUNTIME_REPO} - - # - # Tests - # - - - name: Install Build Dependencies - run: bash ${OPENVINO_REPO}/install_build_dependencies.sh - - - name: Install sccache - uses: mozilla-actions/sccache-action@v0.0.3 - with: - version: "v0.5.4" - - - name: Build Lin ONNX Runtime - run: | - source ${INSTALL_DIR}/setupvars.sh - - ${ONNX_RUNTIME_REPO}/build.sh \ - --config RelWithDebInfo \ - --use_openvino CPU_FP32 \ - --build_shared_lib \ - --parallel \ - --skip_tests \ - --compile_no_warning_as_error \ - --build_dir ${ONNX_RUNTIME_BUILD_DIR} - env: - CXXFLAGS: "-Wno-error=deprecated-declarations" - - - name: Show sccache stats - run: ${SCCACHE_PATH} --show-stats - - - name: Run onnxruntime_test_all - run: | - source ${INSTALL_DIR}/setupvars.sh - skip_tests=$(tr -s '\n ' ':' < ${ONNX_RUNTIME_UTILS}/skip_tests) - - ./onnxruntime_test_all --gtest_filter=-$skip_tests - working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo - - - name: Run onnxruntime_shared_lib_test - run: | - source ${INSTALL_DIR}/setupvars.sh - ./onnxruntime_shared_lib_test --gtest_filter=-CApiTest.test_custom_op_openvino_wrapper_library - working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo - - - name: Run onnxruntime_global_thread_pools_test - run: | - source ${INSTALL_DIR}/setupvars.sh - ./onnxruntime_global_thread_pools_test - working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo - - - name: Run onnxruntime_api_tests_without_env - run: | - source ${INSTALL_DIR}/setupvars.sh - ./onnxruntime_api_tests_without_env - working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo - - - name: Run pytorch-converted tests - run: | - source ${INSTALL_DIR}/setupvars.sh - ./onnx_test_runner "${ONNX_RUNTIME_REPO}/cmake/external/onnx/onnx/backend/test/data/pytorch-converted" - working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo - - - name: Run pytorch-operator tests - run: | - source ${INSTALL_DIR}/setupvars.sh - ./onnx_test_runner "${ONNX_RUNTIME_REPO}/cmake/external/onnx/onnx/backend/test/data/pytorch-operator" - working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_onnx_runtime.yml + with: + runner: 'aks-linux-16-cores-32gb' + container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04", "volumes": ["/mount:/mount"], "options": "-e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING"}' + sccache-azure-key-prefix: 'ubuntu20_x86_64_onnxruntime' CXX_Unit_Tests: name: C++ unit tests - needs: [Build, Smart_CI] - timeout-minutes: 20 - defaults: - run: - shell: bash - runs-on: aks-linux-4-cores-16gb - container: - image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 - env: - INSTALL_DIR: /__w/openvino/openvino/install - INSTALL_TEST_DIR: /__w/openvino/openvino/install/tests - - steps: - - name: Download OpenVINO package - uses: actions/download-artifact@v3 - with: - name: openvino_package - path: ${{ env.INSTALL_DIR }} - - - name: Download OpenVINO tests package - uses: actions/download-artifact@v3 - with: - name: openvino_tests - path: ${{ env.INSTALL_TEST_DIR }} - - - name: Extract OpenVINO packages - run: | - pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} - popd - pushd ${INSTALL_TEST_DIR} - tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} - popd + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_cxx_unit_tests.yml + with: + runner: 'aks-linux-4-cores-16gb' + image: 'openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} - - name: Install OpenVINO dependencies - run: ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -c=gpu -y - - # - # Tests - # - - - name: OpenVINO Core Unit Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).Core.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OVCoreUT.xml - - - name: OpenVINO Inference Functional Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).inference.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_inference_functional_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-InferenceFunc.xml - - - name: OpenVINO Inference Unit Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).inference.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_inference_unit_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-InferenceUnit.xml - - - name: Low Precision Transformations Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).LP_transformations.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_lp_transformations_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-LpTransformations.xml - - - name: OpenVINO Conditional compilation tests - if: fromJSON(needs.smart_ci.outputs.affected_components).Core.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_conditional_compilation_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ConditionalCompilation.xml - - - name: IR frontend tests - if: fromJSON(needs.smart_ci.outputs.affected_components).IR_FE.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_ir_frontend_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-IRFrontend.xml - - - name: PaddlePaddle frontend tests - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/paddle_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-PaddleTests.xml - - - name: ONNX frontend tests - if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_onnx_frontend_tests --gtest_print_time=1 \ - --gtest_filter=-*IE_GPU* \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ONNXFrontend.xml - - - name: TensorFlow Common frontend tests - if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test || - fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_tensorflow_common_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TensorFlowCommonFrontend.xml - - - name: TensorFlow frontend tests - if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_tensorflow_frontend_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TensorFlowFrontend.xml - - - name: TensorFlow Lite frontend tests - if: fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_tensorflow_lite_frontend_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TensorFlowLiteFrontend.xml - - - name: Transformations func tests - if: fromJSON(needs.smart_ci.outputs.affected_components).transformations.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_transformations_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-Transformations.xml - - - name: Legacy Transformations func tests - if: fromJSON(needs.smart_ci.outputs.affected_components).GNA.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_legacy_transformations_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-LegacyTransformations.xml - - - name: Inference Engine 1.0 unit tests - if: fromJSON(needs.smart_ci.outputs.affected_components).GNA.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/InferenceEngineUnitTests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-InferenceEngineUnitTests.xml - - - name: Common test utils tests - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_util_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-CommonUtilTests.xml - - - name: Snippets func tests - if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_snippets_func_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-SnippetsFuncTests.xml - - - name: CPU plugin unit tests - if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_cpu_unit_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-CPUUnitTests.xml - - - name: ov_subgraphs_dumper_tests tests - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_subgraphs_dumper_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_subgraphs_dumper_tests.xml - - - name: Template OpImpl tests - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_op_conformance_tests --gtest_print_time=1 --device=TEMPLATE --gtest_filter=*OpImpl*\ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OpImplTests.xml - - - name: AUTO unit tests - if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_auto_unit_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_auto_unit_tests.xml - - - name: AUTO func Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO.test - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_auto_func_tests --gtest_print_time=1 \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_func_tests.xml - - - name: Template plugin func tests - if: fromJSON(needs.smart_ci.outputs.affected_components).TEMPLATE.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_template_func_tests --gtest_print_time=1 \ - --gtest_filter=*smoke* \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TemplateFuncTests.xml - - - name: Inference Engine C API tests - if: fromJSON(needs.smart_ci.outputs.affected_components).C_API.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/InferenceEngineCAPITests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-InferenceEngineCAPITests.xml - - - name: OpenVINO C API tests - if: fromJSON(needs.smart_ci.outputs.affected_components).C_API.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_capi_test --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OpenVINOCAPITests.xml - - - name: AutoBatch unit tests - if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO_BATCH.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_auto_batch_unit_tests --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_auto_batch_unit_tests.xml - - - name: AutoBatch func tests - if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO_BATCH.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_auto_batch_func_tests --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_auto_batch_func_tests.xml - - - name: Proxy Plugin func tests - if: fromJSON(needs.smart_ci.outputs.affected_components).PROXY.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_proxy_plugin_tests --gtest_print_time=1 --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OVProxyTests.xml - - - name: Hetero unit tests - if: fromJSON(needs.smart_ci.outputs.affected_components).HETERO.test - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_hetero_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroUnitTests.xml - - - name: Hetero func tests - if: fromJSON(needs.smart_ci.outputs.affected_components).HETERO.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_hetero_func_tests --gtest_print_time=1 --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OVHeteroFuncTests.xml - - - name: Upload Test Results - uses: actions/upload-artifact@v3 - if: ${{ !cancelled() }} - with: - name: test-results-cpp - path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml - if-no-files-found: 'warn' - - Python_Unit_Tests: - name: Python unit tests - needs: [Build, Smart_CI] - timeout-minutes: 40 - defaults: - run: - shell: bash - runs-on: aks-linux-4-cores-16gb - container: - image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 - volumes: - - /mount/caches:/mount/caches - env: - OPENVINO_REPO: /__w/openvino/openvino/openvino - INSTALL_DIR: /__w/openvino/openvino/install - INSTALL_TEST_DIR: /__w/openvino/openvino/install/tests - LAYER_TESTS_INSTALL_DIR: /__w/openvino/openvino/install/tests/layer_tests - - steps: - # - # Initialize OpenVINO - # - - name: Download OpenVINO package - uses: actions/download-artifact@v3 - with: - name: openvino_package - path: ${{ env.INSTALL_DIR }} - - - name: Download OpenVINO tests package - uses: actions/download-artifact@v3 - with: - name: openvino_tests - path: ${{ env.INSTALL_TEST_DIR }} - - - name: Extract OpenVINO packages - run: | - pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} - popd - - pushd ${INSTALL_TEST_DIR} - tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} - popd - - - name: Fetch setup_python action - uses: actions/checkout@v4 - with: - sparse-checkout: | - .github/actions/setup_python/action.yml - sparse-checkout-cone-mode: false - path: ${{ env.OPENVINO_REPO }} - - - name: Setup Python ${{ env.PYTHON_VERSION }} - uses: ./openvino/.github/actions/setup_python - with: - version: ${{ env.PYTHON_VERSION }} - pip-cache-path: ${{ env.PIP_CACHE_PATH }} - should-setup-pip-paths: 'true' - - - name: Install OpenVINO dependencies - run: ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -y - - - name: Install OpenVINO Python wheels - run: | - # Install the core OV wheel - python3 -m pip install ${INSTALL_DIR}/tools/openvino-*.whl - - # Find and install OV dev wheel - pushd ${INSTALL_DIR}/tools - ov_dev_wheel_name=$(find . -name 'openvino_dev*.whl') - python3 -m pip install $ov_dev_wheel_name[mxnet,caffe,kaldi,onnx,tensorflow2,pytorch] - popd - - - name: Install Python API tests dependencies - run: | - # To enable pytest parallel features - python3 -m pip install pytest-xdist[psutil] - # For torchvision to OpenVINO preprocessing converter - python3 -m pip install -r ${INSTALL_TEST_DIR}/python/preprocess/torchvision/requirements.txt - - # TODO: replace with Python API tests requirements - python3 -m pip install -r ${INSTALL_TEST_DIR}/mo/requirements_dev.txt - - # - # Tests - # - - - name: Python API 1.0 Tests - #if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test - run: | - python3 -m pytest -s ${INSTALL_TEST_DIR}/pyngraph \ - --junitxml=${INSTALL_TEST_DIR}/TEST-Pyngraph.xml \ - --ignore=${INSTALL_TEST_DIR}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py \ - --ignore=${INSTALL_TEST_DIR}/pyngraph/tests_compatibility/test_onnx/test_backend.py - - - name: Python API 2.0 Tests - #if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test - run: | - # for 'template' extension - export LD_LIBRARY_PATH=${INSTALL_TEST_DIR}:$LD_LIBRARY_PATH - python3 -m pytest -sv ${INSTALL_TEST_DIR}/pyopenvino \ - --junitxml=${INSTALL_TEST_DIR}/TEST-Pyngraph.xml \ - --ignore=${INSTALL_TEST_DIR}/pyopenvino/tests/test_utils/test_utils.py - - - name: Model Optimizer unit tests - if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test - run: | - # required for MxNet - apt-get install -y libgomp1 libquadmath0 - - python3 -m pytest -s ${INSTALL_TEST_DIR}/mo/unit_tests \ - --junitxml=${INSTALL_TEST_DIR}/TEST-ModelOptimizer.xml - - - name: Python ONNX operators tests - if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test || - fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE.test - run: | - # Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time - ONNX Model Zoo tests are run separately - python3 -m pytest -sv ${INSTALL_TEST_DIR}/onnx -k 'not cuda' \ - --junitxml=${INSTALL_TEST_DIR}/TEST-onnx_frontend.xml \ - --ignore=${INSTALL_TEST_DIR}/onnx/test_python/test_zoo_models.py - - - name: OVC unit tests - if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test - run: python3 -m pytest -s ${INSTALL_TEST_DIR}/ovc/unit_tests --junitxml=${INSTALL_TEST_DIR}/TEST-OpenVinoConversion.xml - - - name: Install Python Layer tests dependencies - run: | - # layer test requirements - python3 -m pip install -r ${LAYER_TESTS_INSTALL_DIR}/requirements.txt - - - name: MO Python API Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test - run: | - # Import 'test_utils' installed in '/tests/python/openvino' - export LD_LIBRARY_PATH=${PIP_INSTALL_PATH}/openvino/libs:$LD_LIBRARY_PATH - export PYTHONPATH=${INSTALL_TEST_DIR}/python - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/mo_python_api_tests --junitxml=${INSTALL_TEST_DIR}/TEST-test_mo_convert.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - - name: OVC Python API Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test - run: | - # Import 'test_utils' installed in '/tests/python/openvino' - export PYTHONPATH=${INSTALL_TEST_DIR}/python - export LD_LIBRARY_PATH=${PIP_INSTALL_PATH}/openvino/libs:$LD_LIBRARY_PATH - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/ovc_python_api_tests --junitxml=${INSTALL_TEST_DIR}/TEST-test_ovc_convert.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - - name: Python Frontend tests - if: fromJSON(needs.smart_ci.outputs.affected_components).PyTorch_FE.test || - fromJSON(needs.smart_ci.outputs.affected_components).PDPD_FE.test - run: | - # to allow 'libtest_builtin_extensions.so' to find 'libopenvino_onnx_frontend.so' - export LD_LIBRARY_PATH=${PIP_INSTALL_PATH}/openvino/libs:$LD_LIBRARY_PATH - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/py_frontend_tests --junitxml=${INSTALL_TEST_DIR}/TEST-test_py_fontend.xml - - - name: PyTorch Layer Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).PyTorch_FE.test - run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/pytorch_tests -n logical -m precommit --junitxml=${INSTALL_TEST_DIR}/TEST-pytorch.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP32 - - - name: PyTorch torch.compile TORCHFX Layer Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).PyTorch_FE.test - run: | - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/pytorch_tests -m precommit_fx_backend --junitxml=${INSTALL_TEST_DIR}/TEST-pytorch.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP32 - PYTORCH_TRACING_MODE: TORCHFX - - - name: PyTorch torch.compile TORCHSCRIPT Layer Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).PyTorch_FE.test - run: | - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/pytorch_tests -m precommit_ts_backend --junitxml=${INSTALL_TEST_DIR}/TEST-pytorch.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP32 - PYTORCH_TRACING_MODE: TORCHSCRIPT - - - name: ONNX Layer Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE.test - run: | - # requires 'unit_tests' from 'tools/mo' - export PYTHONPATH=${INSTALL_TEST_DIR}/mo:$PYTHONPATH - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/onnx_tests -m "not launch_only_if_manually_specified and precommit" --junitxml=${INSTALL_TEST_DIR}/TEST-onnx.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - - name: TensorFlow 1 Layer Tests - TF FE - if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test - run: | - # requires 'unit_tests' from 'mo' - export PYTHONPATH=${INSTALL_TEST_DIR}/mo - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_tests/ --use_new_frontend -m precommit_tf_fe --junitxml=${INSTALL_TEST_DIR}/TEST-tf_fe.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - - name: TensorFlow 2 Layer Tests - TF FE - if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test - run: | - # requires 'unit_tests' from 'mo' - export PYTHONPATH=${INSTALL_TEST_DIR}/mo - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow2_keras_tests/ --use_new_frontend -m precommit_tf_fe --junitxml=${INSTALL_TEST_DIR}/TEST-tf2_fe.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - - name: JAX Layer Tests - TF FE - if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test - run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/jax_tests/ -m precommit --junitxml=${INSTALL_TEST_DIR}/TEST-jax.xml - env: - TEST_DEVICE: CPU - - - name: TensorFlow 1 Layer Tests - Legacy FE - if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test - run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=${INSTALL_TEST_DIR}/TEST-tf_Roll.xml - - - name: TensorFlow 2 Layer Tests - Legacy FE - if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test - run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow2_keras_tests/test_tf2_keras_activation.py --ir_version=11 -k "sigmoid" --junitxml=${INSTALL_TEST_DIR}/TEST-tf2_Activation.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - - name: TensorFlow Lite Layer Tests - TFL FE - if: fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test - run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_lite_tests/ --junitxml=${INSTALL_TEST_DIR}/TEST-tfl_fe.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - - name: Clone API snippets - uses: actions/checkout@v4 - with: - sparse-checkout: openvino/docs/snippets - path: ${{ env.OPENVINO_REPO }} - submodules: 'false' - - - name: Docs Python snippets - run: | - # to find 'snippets' module in docs - export PYTHONPATH=${OPENVINO_REPO}/docs - # for 'template' extension - export LD_LIBRARY_PATH=${INSTALL_TEST_DIR}:$LD_LIBRARY_PATH - python3 ${OPENVINO_REPO}/docs/snippets/main.py - - - name: Upload Test Results - uses: actions/upload-artifact@v3 - if: ${{ !cancelled() }} - with: - name: test-results-python - path: | - ${{ env.INSTALL_TEST_DIR }}/TEST*.html - ${{ env.INSTALL_TEST_DIR }}/TEST*.xml - if-no-files-found: 'warn' + Python_Unit_Tests: + name: Python unit tests + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_python_unit_tests.yml + with: + runner: 'aks-linux-4-cores-16gb' + container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04", "volumes": ["/mount:/mount"]}' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} CPU_Functional_Tests: name: CPU functional tests - needs: [Build, Smart_CI] - timeout-minutes: 25 - defaults: - run: - shell: bash - runs-on: aks-linux-8-cores-32gb - container: - image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 - env: - OPENVINO_REPO: /__w/openvino/openvino/openvino - INSTALL_DIR: /__w/openvino/openvino/install - INSTALL_TEST_DIR: /__w/openvino/openvino/install/tests - PARALLEL_TEST_SCRIPT: /__w/openvino/openvino/install/tests/functional_test_utils/layer_tests_summary/run_parallel.py - PARALLEL_TEST_CACHE: /__w/openvino/openvino/install/tests/test_cache.lst if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test - steps: - - name: Download OpenVINO package - uses: actions/download-artifact@v3 - with: - name: openvino_package - path: ${{ env.INSTALL_DIR }} - - - name: Download OpenVINO tests package - uses: actions/download-artifact@v3 - with: - name: openvino_tests - path: ${{ env.INSTALL_TEST_DIR }} - - - name: Extract OpenVINO packages - run: | - pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} - popd - pushd ${INSTALL_TEST_DIR} - tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} - popd - - - name: Install OpenVINO dependencies - run: bash ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -y - - - name: Fetch setup_python action - uses: actions/checkout@v4 - with: - sparse-checkout: | - .github/actions/setup_python/action.yml - sparse-checkout-cone-mode: false - path: ${{ env.OPENVINO_REPO }} - - - name: Setup Python ${{ env.PYTHON_VERSION }} - uses: ./openvino/.github/actions/setup_python - with: - version: ${{ env.PYTHON_VERSION }} - should-setup-pip-paths: 'false' - - - name: Install python dependencies for run_parallel.py - run: python3 -m pip install -r ${INSTALL_TEST_DIR}/functional_test_utils/layer_tests_summary/requirements.txt - - - name: Restore tests execution time - uses: actions/cache/restore@v3 - with: - path: ${{ env.PARALLEL_TEST_CACHE }} - key: ${{ runner.os }}-tests-functional-cpu-stamp-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-tests-functional-cpu-stamp - - - name: Intel CPU plugin func tests (parallel) - run: | - source ${INSTALL_DIR}/setupvars.sh - python3 ${PARALLEL_TEST_SCRIPT} -e ${INSTALL_TEST_DIR}/ov_cpu_func_tests -c ${PARALLEL_TEST_CACHE} -w ${INSTALL_TEST_DIR} -s suite -rf 0 -- --gtest_print_time=1 --gtest_filter=*smoke* - timeout-minutes: 20 - - - name: Save tests execution time - uses: actions/cache/save@v3 - if: github.ref_name == 'master' - with: - path: ${{ env.PARALLEL_TEST_CACHE }} - key: ${{ runner.os }}-tests-functional-cpu-stamp-${{ github.sha }} - - - name: Upload Test Results - uses: actions/upload-artifact@v3 - if: ${{ !cancelled() }} - with: - name: test-results-functional-cpu - path: | - ${{ env.INSTALL_TEST_DIR }}/temp/*.log - ${{ env.INSTALL_TEST_DIR }}/logs/*.log - ${{ env.INSTALL_TEST_DIR }}/logs/failed/*.log - ${{ env.INSTALL_TEST_DIR }}/logs/crashed/*.log - ${{ env.INSTALL_TEST_DIR }}/logs/hanged/*.log - ${{ env.INSTALL_TEST_DIR }}/logs/interapted/*.log - ${{ env.INSTALL_TEST_DIR }}/logs/hash_table.csv - ${{ env.PARALLEL_TEST_CACHE }} - if-no-files-found: 'error' - + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_cpu_functional_tests.yml + with: + runner: 'aks-linux-8-cores-32gb' + image: 'openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04' + + # TODO: Switch back to self-hosted runners + # container: + # image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 + # volumes: + # - /mount:/mount TensorFlow_Hub_Models_Tests: name: TensorFlow Hub Models tests - needs: [Build, Smart_CI] - defaults: - run: - shell: bash - runs-on: ${{ github.event_name == 'schedule' && 'ubuntu-20.04-16-cores' || 'ubuntu-20.04-8-cores'}} - timeout-minutes: ${{ github.event_name == 'schedule' && 400 || 25 }} - # TODO: Switch back to self-hosted runners - # container: - # image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 - # volumes: - # - /mount/caches:/mount/caches - env: - OPENVINO_REPO: ${{ github.workspace }}/openvino - INSTALL_DIR: ${{ github.workspace }}/install - INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests - MODEL_HUB_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/model_hub_tests if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test || fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test - - steps: - - name: Check sudo - run: if [ "$(id -u)" -eq 0 ]; then apt update && apt --assume-yes install sudo; fi - - - name: Download OpenVINO package - uses: actions/download-artifact@v3 - with: - name: openvino_package - path: ${{ env.INSTALL_DIR }} - - - name: Download OpenVINO tests package - uses: actions/download-artifact@v3 - with: - name: openvino_tests - path: ${{ env.INSTALL_TEST_DIR }} - - - name: Extract OpenVINO packages - run: | - pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} - popd - - pushd ${INSTALL_TEST_DIR} - tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} - popd - - - name: Fetch setup_python action - uses: actions/checkout@v4 - with: - sparse-checkout: | - .github/actions/setup_python/action.yml - sparse-checkout-cone-mode: false - path: 'openvino' - - - name: Setup Python ${{ env.PYTHON_VERSION }} - uses: ./openvino/.github/actions/setup_python - with: - version: ${{ env.PYTHON_VERSION }} - should-setup-pip-paths: 'false' - self-hosted-runner: 'false' - - - name: Install OpenVINO Python wheels - run: python3 -m pip install ${INSTALL_DIR}/tools/openvino-* - - - name: Install TF Hub tests requirements - run: | - python3 -m pip install -r ${MODEL_HUB_TESTS_INSTALL_DIR}/tf_hub_tests/requirements.txt - - - name: TensorFlow Hub Tests - TF FE - run: | - export PYTHONPATH=${MODEL_HUB_TESTS_INSTALL_DIR}:$PYTHONPATH - python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/tf_hub_tests/ -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-tf_hub_tf_fe.html --self-contained-html -v - env: - TYPE: ${{ github.event_name == 'schedule' && 'nightly' || 'precommit'}} - TEST_DEVICE: CPU - - - name: Upload Test Results - uses: actions/upload-artifact@v3 - if: ${{ !cancelled() }} - with: - name: test-results-tensorflow-hub-models - path: | - ${{ env.INSTALL_TEST_DIR }}/TEST*.html - if-no-files-found: 'error' - + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_tensorflow_hub_models_tests.yml + with: + runner: ${{ github.event_name == 'schedule' && 'ubuntu-20.04-16-cores' || 'ubuntu-20.04-8-cores' }} + event: ${{ github.event_name }} + + # TODO: Switch back to self-hosted runners + # container: + # image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 + # volumes: + # - /mount:/mount TensorFlow_Hub_Performance_Models_Tests: name: TensorFlow Hub Performance Models tests - needs: [Build, Smart_CI] - defaults: - run: - shell: bash - runs-on: ${{ github.event_name == 'schedule' && 'ubuntu-20.04-16-cores' || 'ubuntu-20.04-8-cores'}} - timeout-minutes: ${{ github.event_name == 'schedule' && 400 || 25 }} - env: - OPENVINO_REPO: ${{ github.workspace }}/openvino - INSTALL_DIR: ${{ github.workspace }}/install - INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests - MODEL_HUB_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/model_hub_tests if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test || fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test - - steps: - - name: Check sudo - run: if [ "$(id -u)" -eq 0 ]; then apt update && apt --assume-yes install sudo; fi - - - name: Download OpenVINO package - uses: actions/download-artifact@v3 - with: - name: openvino_package - path: ${{ env.INSTALL_DIR }} - - - name: Download OpenVINO tests package - uses: actions/download-artifact@v3 - with: - name: openvino_tests - path: ${{ env.INSTALL_TEST_DIR }} - - - name: Extract OpenVINO packages - run: | - pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} - popd - - pushd ${INSTALL_TEST_DIR} - tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} - popd - - - name: Fetch setup_python action - uses: actions/checkout@v4 - with: - sparse-checkout: | - .github/actions/setup_python/action.yml - sparse-checkout-cone-mode: false - path: 'openvino' - - - name: Setup Python ${{ env.PYTHON_VERSION }} - uses: ./openvino/.github/actions/setup_python - with: - version: ${{ env.PYTHON_VERSION }} - should-setup-pip-paths: 'false' - self-hosted-runner: 'false' - - - name: Install OpenVINO Python wheels - run: python3 -m pip install ${INSTALL_DIR}/tools/openvino-* - - - name: Install TF Hub tests requirements - run: | - python3 -m pip install -r ${MODEL_HUB_TESTS_INSTALL_DIR}/tf_hub_tests/requirements.txt - - - name: Install Hub Performance tests requirements - run: | - python3 -m pip install -r ${MODEL_HUB_TESTS_INSTALL_DIR}/performance_tests/requirements.txt - - - name: Performance Hub Tests - run: | - export PYTHONPATH=${MODEL_HUB_TESTS_INSTALL_DIR}:$PYTHONPATH - python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/performance_tests/ -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-tf_hub_performance.html --self-contained-html -v - env: - TYPE: ${{ github.event_name == 'schedule' && 'nightly' || 'precommit'}} - TEST_DEVICE: CPU - - - name: Upload Test Results - uses: actions/upload-artifact@v3 - if: ${{ !cancelled() }} - with: - name: test-results-tensorflow-hub-performance-models - path: | - ${{ env.INSTALL_TEST_DIR }}/TEST*.html - if-no-files-found: 'error' - + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_tensorflow_hub_performance_models_tests.yml + with: + runner: ${{ github.event_name == 'schedule' && 'ubuntu-20.04-16-cores' || 'ubuntu-20.04-8-cores' }} + event: ${{ github.event_name }} + + # TODO: Switch back to self-hosted runners + # container: + # image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 + # volumes: + # - /mount:/mount PyTorch_Models_Tests: name: PyTorch Models tests - needs: [Build, Smart_CI] - timeout-minutes: ${{ github.event_name == 'schedule' && 400 || 30 }} - defaults: - run: - shell: bash - runs-on: ${{ github.event_name == 'schedule' && 'ubuntu-20.04-16-cores' || 'ubuntu-20.04-8-cores'}} - # TODO: Switch back to self-hosted runners - # container: - # image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 - # volumes: - # - /mount/caches:/mount/caches - env: - OPENVINO_REPO: ${{ github.workspace }}/openvino - INSTALL_DIR: ${{ github.workspace }}/install - INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests - MODEL_HUB_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/model_hub_tests if: fromJSON(needs.smart_ci.outputs.affected_components).PyTorch_FE.test - - steps: - - name: Check sudo - run: if [ "$(id -u)" -eq 0 ]; then apt update && apt --assume-yes install sudo; fi - - - name: Install dependencies - run: | - # install git (required to build pip deps from the sources) - # install 'g++' to build 'detectron2' and 'natten' wheels - sudo apt-get install --assume-yes --no-install-recommends g++ git ca-certificates - - - name: Download OpenVINO package - uses: actions/download-artifact@v3 - with: - name: openvino_package - path: ${{ env.INSTALL_DIR }} - - - name: Download OpenVINO tests package - uses: actions/download-artifact@v3 - with: - name: openvino_tests - path: ${{ env.INSTALL_TEST_DIR }} - - - name: Extract OpenVINO packages - run: | - pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} - popd - pushd ${INSTALL_TEST_DIR} - tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} - popd - - - name: Fetch setup_python action - uses: actions/checkout@v4 - with: - sparse-checkout: | - .github/actions/setup_python/action.yml - sparse-checkout-cone-mode: false - path: 'openvino' - - - name: Setup Python ${{ env.PYTHON_VERSION }} - uses: ./openvino/.github/actions/setup_python - with: - version: ${{ env.PYTHON_VERSION }} - should-setup-pip-paths: 'false' - self-hosted-runner: 'false' - - - name: Install OpenVINO Python wheels - run: python3 -m pip install ${INSTALL_DIR}/tools/openvino-* - - - name: Install PyTorch tests requirements - run: | - python3 -m pip install -r ${MODEL_HUB_TESTS_INSTALL_DIR}/torch_tests/requirements.txt - python3 -m pip install -r ${MODEL_HUB_TESTS_INSTALL_DIR}/torch_tests/requirements_secondary.txt - echo "Available storage:" - df -h - env: - CPLUS_INCLUDE_PATH: ${{ env.Python_ROOT_DIR }}/include/python${{ env.PYTHON_VERSION }} - - - name: PyTorch Models Tests - run: | - export PYTHONPATH=${MODEL_HUB_TESTS_INSTALL_DIR}:$PYTHONPATH - python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/torch_tests -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-torch_model_tests.html --self-contained-html -v - env: - TYPE: ${{ github.event_name == 'schedule' && 'nightly' || 'precommit'}} - TEST_DEVICE: CPU - USE_SYSTEM_CACHE: False - - - name: Available storage after tests - run: | - echo "Available storage:" - df -h - - - name: Upload Test Results - uses: actions/upload-artifact@v3 - if: ${{ !cancelled() }} - with: - name: test-results-torch-models - path: | - ${{ env.INSTALL_TEST_DIR }}/TEST*.html - if-no-files-found: 'error' + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_pytorch_models_tests.yml + with: + runner: ${{ github.event_name == 'schedule' && 'ubuntu-20.04-16-cores' || 'ubuntu-20.04-8-cores' }} + event: ${{ github.event_name }} NVIDIA_Plugin: name: NVIDIA plugin - needs: [Build, Smart_CI] + needs: [ Build, Smart_CI ] timeout-minutes: 15 defaults: run: @@ -1551,7 +470,7 @@ jobs: container: image: openvinogithubactions.azurecr.io/dockerhub/nvidia/cuda:11.8.0-runtime-ubuntu20.04 volumes: - - /mount/caches:/mount/caches + - /mount:/mount options: -e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING env: CMAKE_BUILD_TYPE: 'Release' @@ -1659,7 +578,7 @@ jobs: run: ${SCCACHE_PATH} --show-stats GPU_Stub: - needs: [Build, Smart_CI] + needs: [ Build, Smart_CI ] runs-on: ubuntu-latest if: fromJSON(needs.smart_ci.outputs.affected_components).GPU steps: diff --git a/.github/workflows/linux_arm64.yml b/.github/workflows/linux_arm64.yml index a611f59db5a55c..1b485611d91dec 100644 --- a/.github/workflows/linux_arm64.yml +++ b/.github/workflows/linux_arm64.yml @@ -1,7 +1,7 @@ name: Linux ARM64 (Ubuntu 20.04, Python 3.11) on: workflow_dispatch: - # pull_request: + pull_request: push: branches: - master @@ -52,7 +52,7 @@ jobs: container: image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 volumes: - - /mount/caches:/mount/caches + - /mount:/mount options: -e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING env: DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input @@ -117,7 +117,6 @@ jobs: version: ${{ env.PYTHON_VERSION }} pip-cache-path: ${{ env.PIP_CACHE_PATH }} should-setup-pip-paths: 'true' - self-hosted-runner: 'true' show-cache-info: 'true' - name: Install python dependencies @@ -150,7 +149,7 @@ jobs: -DENABLE_NCC_STYLE=OFF \ -DENABLE_TESTS=ON \ -DENABLE_STRICT_DEPENDENCIES=OFF \ - -DENABLE_SYSTEM_TBB=ON \ + -DENABLE_SYSTEM_TBB=OFF \ -DENABLE_SYSTEM_OPENCL=ON \ -DCMAKE_VERBOSE_MAKEFILE=ON \ -DCPACK_GENERATOR=TGZ \ @@ -197,6 +196,7 @@ jobs: popd - name: Build Debian packages + if: ${{ 'false' }} run: | /usr/bin/python3.8 -m pip install -U pip /usr/bin/python3.8 -m pip install -r ${OPENVINO_REPO}/src/bindings/python/wheel/requirements-dev.txt @@ -240,7 +240,7 @@ jobs: if-no-files-found: 'error' - name: Upload openvino debian packages - if: ${{ always() }} + if: ${{ 'false' }} uses: actions/upload-artifact@v3 with: name: openvino_debian_packages @@ -258,1208 +258,97 @@ jobs: Debian_Packages: name: Debian Packages needs: Build - timeout-minutes: 10 - defaults: - run: - shell: bash - runs-on: 'aks-linux-16-cores-arm' - container: + if: ${{ 'false' }} + uses: ./.github/workflows/job_debian_packages.yml + with: + runner: 'aks-linux-16-cores-arm' image: 'openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04' - env: - DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input - DEBIAN_PACKAGES_DIR: /__w/openvino/packages/ - - steps: - - name: Download OpenVINO debian packages - uses: actions/download-artifact@v3 - with: - name: openvino_debian_packages - path: ${{ env.DEBIAN_PACKAGES_DIR }} - - - name: Install debian packages & check conflicts - run: | - apt-get update -y - # install our local one - apt-get install --no-install-recommends -y dpkg-dev - dpkg-scanpackages . /dev/null | gzip -9c > Packages.gz - echo "deb [trusted=yes] file:${DEBIAN_PACKAGES_DIR} ./" | tee /etc/apt/sources.list.d/openvino-local.list - apt-get update -y - apt-get install openvino -y - working-directory: ${{ env.DEBIAN_PACKAGES_DIR }} - - - name: Test debian packages - run: | - /usr/share/openvino/samples/cpp/build_samples.sh - /usr/share/openvino/samples/c/build_samples.sh - - ~/openvino_cpp_samples_build/aarch64/Release/hello_query_device - - python3 /usr/share/openvino/samples/python/hello_query_device/hello_query_device.py - python3 -c 'from openvino import Core; Core().get_property("CPU", "AVAILABLE_DEVICES")' - python3 -c 'from openvino import Core; Core().get_property("AUTO", "SUPPORTED_METRICS")' - python3 -c 'from openvino import Core; Core().get_property("MULTI", "SUPPORTED_METRICS")' - python3 -c 'from openvino import Core; Core().get_property("HETERO", "SUPPORTED_METRICS")' - python3 -c 'from openvino import Core; Core().get_property("BATCH", "SUPPORTED_METRICS")' - python3 -c 'from openvino.frontend import FrontEndManager; assert len(FrontEndManager().get_available_front_ends()) == 6' - benchmark_app --help - ovc --help Samples: - needs: [Build, Smart_CI] - timeout-minutes: 20 - defaults: - run: - shell: bash - runs-on: 'aks-linux-16-cores-arm' - container: - image: 'openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04' - env: - DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input - INSTALL_DIR: /__w/openvino/openvino/install - INSTALL_TEST_DIR: /__w/openvino/openvino/install/tests - BUILD_DIR: /__w/openvino/openvino/build + needs: [ Build, Smart_CI ] if: fromJSON(needs.smart_ci.outputs.affected_components).samples - - steps: - - name: Download OpenVINO package - uses: actions/download-artifact@v3 - with: - name: openvino_package - path: ${{ env.INSTALL_DIR }} - - - name: Download OpenVINO tests package - uses: actions/download-artifact@v3 - with: - name: openvino_tests - path: ${{ env.INSTALL_TEST_DIR }} - - - name: Extract OpenVINO packages - run: | - pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} - popd - pushd ${INSTALL_TEST_DIR} - tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} - popd - - - name: Install OpenVINO dependencies - run: ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -c=dev -y - - - name: Fetch setup_python action - uses: actions/checkout@v4 - with: - sparse-checkout: | - .github/actions/setup_python/action.yml - sparse-checkout-cone-mode: false - path: 'openvino' - - - name: Setup Python ${{ env.PYTHON_VERSION }} - uses: ./openvino/.github/actions/setup_python - with: - version: ${{ env.PYTHON_VERSION }} - should-setup-pip-paths: 'false' - self-hosted-runner: 'true' - - - name: Build cpp samples - GCC - run: ${INSTALL_DIR}/samples/cpp/build_samples.sh -i ${INSTALL_DIR} -b ${BUILD_DIR}/cpp_samples - env: - CMAKE_COMPILE_WARNING_AS_ERROR: 'ON' - - - name: Build cpp samples - Clang - run: | - apt-get install -y clang - ${INSTALL_DIR}/samples/cpp/build_samples.sh -i ${INSTALL_DIR} -b ${BUILD_DIR}/cpp_samples_clang - env: - CMAKE_COMPILE_WARNING_AS_ERROR: 'ON' - CC: clang - CXX: clang++ - - - name: Build c samples - run: ${INSTALL_DIR}/samples/c/build_samples.sh -i ${INSTALL_DIR} -b ${BUILD_DIR}/c_samples - env: - CMAKE_COMPILE_WARNING_AS_ERROR: 'ON' - - # - # Tests - # - - - name: Samples tests - if: fromJSON(needs.smart_ci.outputs.affected_components).samples.test - run: | - export WORKSPACE=${INSTALL_DIR} - export IE_APP_PATH=${INSTALL_DIR}/samples_bin - export IE_APP_PYTHON_PATH=${INSTALL_DIR}/samples/python - export SHARE=${INSTALL_TEST_DIR}/smoke_tests/samples_smoke_tests_data - - python3 -m pip install --ignore-installed PyYAML -r ${INSTALL_TEST_DIR}/smoke_tests/requirements.txt - export LD_LIBRARY_PATH=${IE_APP_PATH}:$LD_LIBRARY_PATH - - source ${INSTALL_DIR}/setupvars.sh - - python3 -m pytest -sv ${INSTALL_TEST_DIR}/smoke_tests \ - --env_conf ${INSTALL_TEST_DIR}/smoke_tests/env_config.yml \ - --junitxml=${INSTALL_TEST_DIR}/TEST-SamplesSmokeTests.xml - - - name: Upload Test Results - uses: actions/upload-artifact@v3 - if: ${{ !cancelled() }} - with: - name: test-results-samples - path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml - if-no-files-found: 'warn' + uses: ./.github/workflows/job_samples_tests.yml + with: + runner: 'aks-linux-16-cores-arm' + image: 'openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} ONNX_Runtime: name: ONNX Runtime Integration - needs: [Build, Smart_CI] - timeout-minutes: 30 - defaults: - run: - shell: bash - runs-on: 'aks-linux-16-cores-arm' - container: - image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 - volumes: - - /mount/caches:/mount/caches - options: -e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING - env: - DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input - CMAKE_GENERATOR: 'Ninja Multi-Config' - CMAKE_CXX_COMPILER_LAUNCHER: sccache - CMAKE_C_COMPILER_LAUNCHER: sccache - OPENVINO_REPO: /__w/openvino/openvino/openvino - INSTALL_DIR: /__w/openvino/openvino/install - SCCACHE_AZURE_KEY_PREFIX: 'ubuntu20_aarch64_onnxruntime' - ONNX_RUNTIME_REPO: /__w/openvino/openvino/onnxruntime - ONNX_RUNTIME_UTILS: /__w/openvino/openvino/install/onnxruntime - ONNX_RUNTIME_BUILD_DIR: /__w/openvino/openvino/onnxruntime/build if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_RT - - steps: - - name: Fetch install_build_dependencies.sh and setup_python action - uses: actions/checkout@v4 - with: - sparse-checkout: | - install_build_dependencies.sh - .github/actions/setup_python/action.yml - sparse-checkout-cone-mode: false - path: ${{ env.OPENVINO_REPO }} - - - name: Install git - run: | - apt-get update - apt-get install --assume-yes --no-install-recommends git ca-certificates - - - name: Setup Python ${{ env.PYTHON_VERSION }} - uses: ./openvino/.github/actions/setup_python - with: - version: ${{ env.PYTHON_VERSION }} - should-setup-pip-paths: 'false' - - # - # Initialize OpenVINO - # - - - name: Download OpenVINO package - uses: actions/download-artifact@v3 - with: - name: openvino_package - path: ${{ env.INSTALL_DIR }} - - - name: Extract OpenVINO package - run: | - pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} - popd - - - name: Install OpenVINO dependencies - run: ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -c=dev -y - - - name: Clone ONNX Runtime - run: | - branch=`tr -s '\n ' < ${ONNX_RUNTIME_UTILS}/version` - git clone --branch $branch --single-branch --recursive https://github.com/microsoft/onnxruntime.git ${ONNX_RUNTIME_REPO} - - # - # Tests - # - - - name: Install Build Dependencies - run: bash ${OPENVINO_REPO}/install_build_dependencies.sh - - - name: Install sccache - uses: mozilla-actions/sccache-action@v0.0.3 - with: - version: "v0.5.4" - - - name: Build Lin ONNX Runtime - run: | - source ${INSTALL_DIR}/setupvars.sh - - ${ONNX_RUNTIME_REPO}/build.sh \ - --config RelWithDebInfo \ - --use_openvino CPU_FP32 \ - --build_shared_lib \ - --parallel \ - --skip_tests \ - --compile_no_warning_as_error \ - --build_dir ${ONNX_RUNTIME_BUILD_DIR} - env: - CXXFLAGS: "-Wno-error=deprecated-declarations" - - - name: Show sccache stats - run: ${SCCACHE_PATH} --show-stats - - - name: Run onnxruntime_test_all - if: ${{ 'false' }} # Ticket: 126277 - run: | - source ${INSTALL_DIR}/setupvars.sh - skip_tests=$(tr -s '\n ' ':' < ${ONNX_RUNTIME_UTILS}/skip_tests) - - ./onnxruntime_test_all --gtest_filter=-$skip_tests - working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo - - - name: Run onnxruntime_shared_lib_test - run: | - source ${INSTALL_DIR}/setupvars.sh - ./onnxruntime_shared_lib_test --gtest_filter=-CApiTest.test_custom_op_openvino_wrapper_library - working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo - - - name: Run onnxruntime_global_thread_pools_test - run: | - source ${INSTALL_DIR}/setupvars.sh - ./onnxruntime_global_thread_pools_test - working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo - - - name: Run onnxruntime_api_tests_without_env - run: | - source ${INSTALL_DIR}/setupvars.sh - ./onnxruntime_api_tests_without_env - working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo - - - name: Run pytorch-converted tests - run: | - source ${INSTALL_DIR}/setupvars.sh - ./onnx_test_runner "${ONNX_RUNTIME_REPO}/cmake/external/onnx/onnx/backend/test/data/pytorch-converted" - working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo - - - name: Run pytorch-operator tests - run: | - source ${INSTALL_DIR}/setupvars.sh - ./onnx_test_runner "${ONNX_RUNTIME_REPO}/cmake/external/onnx/onnx/backend/test/data/pytorch-operator" - working-directory: ${{ env.ONNX_RUNTIME_BUILD_DIR }}/RelWithDebInfo/RelWithDebInfo + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_onnx_runtime.yml + with: + runner: 'aks-linux-16-cores-arm' + container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04", "volumes": ["/mount:/mount"], "options": "-e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING"}' + sccache-azure-key-prefix: 'ubuntu20_aarch64_onnxruntime' CXX_Unit_Tests: name: C++ unit tests - needs: [Build, Smart_CI] - timeout-minutes: 20 - defaults: - run: - shell: bash - runs-on: 'aks-linux-16-cores-arm' - container: - image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 - env: - INSTALL_DIR: /__w/openvino/openvino/install - INSTALL_TEST_DIR: /__w/openvino/openvino/install/tests - - steps: - - name: Download OpenVINO package - uses: actions/download-artifact@v3 - with: - name: openvino_package - path: ${{ env.INSTALL_DIR }} - - - name: Download OpenVINO tests package - uses: actions/download-artifact@v3 - with: - name: openvino_tests - path: ${{ env.INSTALL_TEST_DIR }} - - - name: Extract OpenVINO packages - run: | - pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} - popd - pushd ${INSTALL_TEST_DIR} - tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} - popd - - - name: Install OpenVINO dependencies - run: ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -c=gpu -y - - # - # Tests - # - - - name: OpenVINO Core Unit Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).Core.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OVCoreUT.xml - - - name: OpenVINO Inference Functional Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).inference.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_inference_functional_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-InferenceFunc.xml - - - name: OpenVINO Inference Unit Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).inference.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_inference_unit_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-InferenceUnit.xml - - - name: Low Precision Transformations Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).LP_transformations.test - run: | - source ${INSTALL_DIR}/setupvars.sh - - # Skip filter ticket: 126279 - ${INSTALL_TEST_DIR}/ov_lp_transformations_tests --gtest_print_time=1 \ - --gtest_filter=-*smoke_LPT/FoldFakeQuantizeInTransformations.CompareFunctions* \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-LpTransformations.xml - - - name: OpenVINO Conditional compilation tests - if: fromJSON(needs.smart_ci.outputs.affected_components).Core.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_conditional_compilation_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ConditionalCompilation.xml - - - name: IR frontend tests - if: fromJSON(needs.smart_ci.outputs.affected_components).IR_FE.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_ir_frontend_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-IRFrontend.xml - - - name: PaddlePaddle frontend tests - if: ${{ 'false' }} - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/paddle_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-PaddleTests.xml - - - name: ONNX frontend tests - if: ${{ 'false' }} # Ticket: 126280 - #if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_onnx_frontend_tests --gtest_print_time=1 \ - --gtest_filter=-*IE_GPU* \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ONNXFrontend.xml - - - name: TensorFlow Common frontend tests - if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test || - fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_tensorflow_common_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TensorFlowCommonFrontend.xml - - - name: TensorFlow frontend tests - if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_tensorflow_frontend_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TensorFlowFrontend.xml - - - name: TensorFlow Lite frontend tests - if: fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_tensorflow_lite_frontend_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TensorFlowLiteFrontend.xml - - - name: Transformations func tests - if: ${{ 'false' }} # Ticket: 126281 - #if: fromJSON(needs.smart_ci.outputs.affected_components).transformations.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_transformations_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-Transformations.xml - - - name: Common test utils tests - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_util_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-CommonUtilTests.xml - - - name: Snippets func tests - #if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_snippets_func_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-SnippetsFuncTests.xml - - - name: CPU plugin unit tests - if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_cpu_unit_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-CPUUnitTests.xml - - - name: ov_subgraphs_dumper_tests tests - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_subgraphs_dumper_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_subgraphs_dumper_tests.xml - - - name: Template OpImpl tests - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_op_conformance_tests --gtest_print_time=1 --device=TEMPLATE --gtest_filter=*OpImpl*\ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OpImplTests.xml - - - name: AUTO unit tests - if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_auto_unit_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_auto_unit_tests.xml - - - name: AUTO func Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO.test - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_auto_func_tests --gtest_print_time=1 \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_func_tests.xml - - - name: Template plugin func tests - if: fromJSON(needs.smart_ci.outputs.affected_components).TEMPLATE.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_template_func_tests --gtest_print_time=1 \ - --gtest_filter=*smoke* \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TemplateFuncTests.xml - - - name: Inference Engine C API tests - if: fromJSON(needs.smart_ci.outputs.affected_components).C_API.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/InferenceEngineCAPITests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-InferenceEngineCAPITests.xml - - - name: OpenVINO C API tests - if: fromJSON(needs.smart_ci.outputs.affected_components).C_API.test - run: | - source ${INSTALL_DIR}/setupvars.sh - - # Skip filter ticket: 126283 - ${INSTALL_TEST_DIR}/ov_capi_test --gtest_print_time=1 \ - --gtest_filter=-*ov_core/ov_core_test.ov_core_compile_model_with_property* \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OpenVINOCAPITests.xml - - - name: AutoBatch unit tests - if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO_BATCH.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_auto_batch_unit_tests --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_auto_batch_unit_tests.xml - - - name: AutoBatch func tests - if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO_BATCH.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_auto_batch_func_tests --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_auto_batch_func_tests.xml - - - name: Proxy Plugin func tests - if: fromJSON(needs.smart_ci.outputs.affected_components).PROXY.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_proxy_plugin_tests --gtest_print_time=1 --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OVProxyTests.xml - - - name: Hetero unit tests - if: fromJSON(needs.smart_ci.outputs.affected_components).HETERO.test - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_hetero_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroUnitTests.xml - - - name: Hetero func tests - if: fromJSON(needs.smart_ci.outputs.affected_components).HETERO.test - run: | - source ${INSTALL_DIR}/setupvars.sh - ${INSTALL_TEST_DIR}/ov_hetero_func_tests --gtest_print_time=1 --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-OVHeteroFuncTests.xml - - - name: Upload Test Results - uses: actions/upload-artifact@v3 - if: ${{ !cancelled() }} - with: - name: test-results-cpp - path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml - if-no-files-found: 'warn' + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_cxx_unit_tests.yml + with: + runner: 'aks-linux-16-cores-arm' + image: 'openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} Python_Unit_Tests: name: Python unit tests - needs: [Build, Smart_CI] - timeout-minutes: 180 - defaults: - run: - shell: bash - runs-on: 'aks-linux-16-cores-arm' - container: - image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 - volumes: - - /mount/caches:/mount/caches - env: - OPENVINO_REPO: /__w/openvino/openvino/openvino - INSTALL_DIR: /__w/openvino/openvino/install - INSTALL_TEST_DIR: /__w/openvino/openvino/install/tests - LAYER_TESTS_INSTALL_DIR: /__w/openvino/openvino/install/tests/layer_tests - - steps: - # - # Initialize OpenVINO - # - - name: Download OpenVINO package - uses: actions/download-artifact@v3 - with: - name: openvino_package - path: ${{ env.INSTALL_DIR }} - - - name: Download OpenVINO tests package - uses: actions/download-artifact@v3 - with: - name: openvino_tests - path: ${{ env.INSTALL_TEST_DIR }} - - - name: Extract OpenVINO packages - run: | - pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} - popd - - pushd ${INSTALL_TEST_DIR} - tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} - popd - - - name: Fetch setup_python action - uses: actions/checkout@v4 - with: - sparse-checkout: | - .github/actions/setup_python/action.yml - sparse-checkout-cone-mode: false - path: ${{ env.OPENVINO_REPO }} - - - name: Setup Python ${{ env.PYTHON_VERSION }} - uses: ./openvino/.github/actions/setup_python - with: - version: ${{ env.PYTHON_VERSION }} - pip-cache-path: ${{ env.PIP_CACHE_PATH }} - should-setup-pip-paths: 'true' - - - name: Install OpenVINO dependencies - run: | - apt-get update && apt-get install -y gcc python3-dev # Needed for building `psutil` - ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -y - - - name: Install OpenVINO Python wheels - run: | - # Install the core OV wheel - python3 -m pip install ${INSTALL_DIR}/tools/openvino-*.whl - - # Find and install OV dev wheel - pushd ${INSTALL_DIR}/tools - ov_dev_wheel_name=$(find . -name 'openvino_dev*.whl') - python3 -m pip install $ov_dev_wheel_name[mxnet,caffe,kaldi,onnx,tensorflow2,pytorch] - popd - - - name: Install Python API tests dependencies - run: | - # To enable pytest parallel features - python3 -m pip install pytest-xdist[psutil] - # For torchvision to OpenVINO preprocessing converter - python3 -m pip install -r ${INSTALL_TEST_DIR}/python/preprocess/torchvision/requirements.txt - - # TODO: replace with Python API tests requirements - python3 -m pip install -r ${INSTALL_TEST_DIR}/mo/requirements_dev.txt - - # - # Tests - # - - - name: Python API 1.0 Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test - run: | - python3 -m pytest -s ${INSTALL_TEST_DIR}/pyngraph \ - --junitxml=${INSTALL_TEST_DIR}/TEST-Pyngraph.xml \ - --ignore=${INSTALL_TEST_DIR}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py \ - --ignore=${INSTALL_TEST_DIR}/pyngraph/tests_compatibility/test_onnx/test_backend.py - - - name: Python API 2.0 Tests - timeout-minutes: 30 - if: ${{ 'false' }} # Ticket: 126380 - #if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test - run: | - # for 'template' extension - export LD_LIBRARY_PATH=${INSTALL_TEST_DIR}:$LD_LIBRARY_PATH - python3 -m pytest -sv ${INSTALL_TEST_DIR}/pyopenvino \ - --junitxml=${INSTALL_TEST_DIR}/TEST-Pyngraph.xml \ - --ignore=${INSTALL_TEST_DIR}/pyopenvino/tests/test_utils/test_utils.py - - - name: Model Optimizer unit tests - if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test - run: | - python3 -m pytest -s ${INSTALL_TEST_DIR}/mo/unit_tests \ - --junitxml=${INSTALL_TEST_DIR}/TEST-ModelOptimizer.xml \ - --ignore-glob="**/mo/front/mxnet/**" - - - name: Python ONNX operators tests - if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test || - fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE.test - run: | - # Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time - ONNX Model Zoo tests are run separately - python3 -m pytest -sv ${INSTALL_TEST_DIR}/onnx -k 'not cuda' \ - --junitxml=${INSTALL_TEST_DIR}/TEST-onnx_frontend.xml \ - --ignore=${INSTALL_TEST_DIR}/onnx/test_python/test_zoo_models.py - - - name: OVC unit tests - if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test - run: python3 -m pytest -s ${INSTALL_TEST_DIR}/ovc/unit_tests --junitxml=${INSTALL_TEST_DIR}/TEST-OpenVinoConversion.xml - - - name: Install Python Layer tests dependencies - if: ${{ always() }} - run: | - # layer test requirements - python3 -m pip install -r ${LAYER_TESTS_INSTALL_DIR}/requirements.txt - - - name: MO Python API Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test - run: | - # Import 'test_utils' installed in '/tests/python/openvino' - export LD_LIBRARY_PATH=${PIP_INSTALL_PATH}/openvino/libs:$LD_LIBRARY_PATH - export PYTHONPATH=${INSTALL_TEST_DIR}/python - export LD_PRELOAD=${PIP_INSTALL_PATH}/torch/lib/../../torch.libs/libgomp-d22c30c5.so.1.0.0 - - echo ${PIP_INSTALL_PATH} - echo ${PIP_INSTALL_PATH} - echo ${PIP_INSTALL_PATH} - - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/mo_python_api_tests --junitxml=${INSTALL_TEST_DIR}/TEST-test_mo_convert.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - - name: OVC Python API Tests - if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test - run: | - # Import 'test_utils' installed in '/tests/python/openvino' - export PYTHONPATH=${INSTALL_TEST_DIR}/python - export LD_LIBRARY_PATH=${PIP_INSTALL_PATH}/openvino/libs:$LD_LIBRARY_PATH - export LD_PRELOAD=${PIP_INSTALL_PATH}/torch/lib/../../torch.libs/libgomp-d22c30c5.so.1.0.0 - - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/ovc_python_api_tests --junitxml=${INSTALL_TEST_DIR}/TEST-test_ovc_convert.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - - name: Python Frontend tests - if: fromJSON(needs.smart_ci.outputs.affected_components).PyTorch_FE.test || - fromJSON(needs.smart_ci.outputs.affected_components).PDPD_FE.test - run: | - # to allow 'libtest_builtin_extensions.so' to find 'libopenvino_onnx_frontend.so' - export LD_LIBRARY_PATH=${PIP_INSTALL_PATH}/openvino/libs:$LD_LIBRARY_PATH - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/py_frontend_tests --junitxml=${INSTALL_TEST_DIR}/TEST-test_py_fontend.xml - - - name: PyTorch Layer Tests - timeout-minutes: 20 - if: ${{ 'false' }} # Ticket: 126287 - #if: fromJSON(needs.smart_ci.outputs.affected_components).PyTorch_FE.test - run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/pytorch_tests -n logical -m precommit --junitxml=${INSTALL_TEST_DIR}/TEST-pytorch.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP32 - - - name: PyTorch torch.compile TORCHFX Layer Tests - if: ${{ 'false' }} # RuntimeError: Python 3.11+ not yet supported for torch.compile, torch 2.0.1 is installed on Linux ARM64, it works in torch 2.1.1 - timeout-minutes: 20 - #if: fromJSON(needs.smart_ci.outputs.affected_components).PyTorch_FE.test - run: | - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/pytorch_tests -m precommit_fx_backend --junitxml=${INSTALL_TEST_DIR}/TEST-pytorch.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP32 - PYTORCH_TRACING_MODE: TORCHFX - - - name: PyTorch torch.compile TORCHSCRIPT Layer Tests - if: ${{ 'false' }} # RuntimeError: Python 3.11+ not yet supported for torch.compile, torch 2.0.1 is installed on Linux ARM64, it works in torch 2.1.1 - timeout-minutes: 20 - #if: fromJSON(needs.smart_ci.outputs.affected_components).PyTorch_FE.test - run: | - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/pytorch_tests -m precommit_ts_backend --junitxml=${INSTALL_TEST_DIR}/TEST-pytorch.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP32 - PYTORCH_TRACING_MODE: TORCHSCRIPT - - - name: ONNX Layer Tests - timeout-minutes: 30 - if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE.test - run: | - # requires 'unit_tests' from 'tools/mo' - export PYTHONPATH=${INSTALL_TEST_DIR}/mo:$PYTHONPATH - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/onnx_tests -m "not launch_only_if_manually_specified and precommit" --junitxml=${INSTALL_TEST_DIR}/TEST-onnx.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - - name: TensorFlow 1 Layer Tests - TF FE - timeout-minutes: 30 - if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test - run: | - # requires 'unit_tests' from 'mo' - export PYTHONPATH=${INSTALL_TEST_DIR}/mo - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_tests/ --use_new_frontend -m precommit_tf_fe --junitxml=${INSTALL_TEST_DIR}/TEST-tf_fe.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - - name: TensorFlow 2 Layer Tests - TF FE - timeout-minutes: 30 - if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test - run: | - # requires 'unit_tests' from 'mo' - export PYTHONPATH=${INSTALL_TEST_DIR}/mo - python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow2_keras_tests/ --use_new_frontend -m precommit_tf_fe --junitxml=${INSTALL_TEST_DIR}/TEST-tf2_fe.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - - name: JAX Layer Tests - TF FE - timeout-minutes: 30 - if: ${{ 'false' }} - #if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test - run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/jax_tests/ -m precommit --junitxml=${INSTALL_TEST_DIR}/TEST-jax.xml - env: - TEST_DEVICE: CPU - - - name: TensorFlow 1 Layer Tests - Legacy FE - timeout-minutes: 30 - run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=${INSTALL_TEST_DIR}/TEST-tf_Roll.xml - - - name: TensorFlow 2 Layer Tests - Legacy FE - timeout-minutes: 30 - if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test - run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=${INSTALL_TEST_DIR}/TEST-tf_Roll.xml - - - name: TensorFlow 2 Layer Tests - Legacy FE - if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test - run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow2_keras_tests/test_tf2_keras_activation.py --ir_version=11 -k "sigmoid" --junitxml=${INSTALL_TEST_DIR}/TEST-tf2_Activation.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - - name: TensorFlow Lite Layer Tests - TFL FE - timeout-minutes: 30 - if: fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test - run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/tensorflow_lite_tests/ --junitxml=${INSTALL_TEST_DIR}/TEST-tfl_fe.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - - name: Clone API snippets - if: ${{ always() }} - uses: actions/checkout@v4 - with: - sparse-checkout: openvino/docs/snippets - path: ${{ env.OPENVINO_REPO }} - submodules: 'false' - - - name: Docs Python snippets - if: ${{ always() }} - run: | - # to find 'snippets' module in docs - export PYTHONPATH=${OPENVINO_REPO}/docs - # for 'template' extension - export LD_LIBRARY_PATH=${INSTALL_TEST_DIR}:$LD_LIBRARY_PATH - python3 ${OPENVINO_REPO}/docs/snippets/main.py - - - name: Upload Test Results - uses: actions/upload-artifact@v3 - if: ${{ !cancelled() }} - with: - name: test-results-python - path: | - ${{ env.INSTALL_TEST_DIR }}/TEST*.html - ${{ env.INSTALL_TEST_DIR }}/TEST*.xml - if-no-files-found: 'warn' + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_python_unit_tests.yml + with: + runner: 'aks-linux-16-cores-arm' + container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04", "volumes": ["/mount:/mount"]}' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} CPU_Functional_Tests: name: CPU functional tests - needs: [Build, Smart_CI] - timeout-minutes: 60 - defaults: - run: - shell: bash - runs-on: 'aks-linux-16-cores-arm' - container: - image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 - env: - OPENVINO_REPO: /__w/openvino/openvino/openvino - INSTALL_DIR: /__w/openvino/openvino/install - INSTALL_TEST_DIR: /__w/openvino/openvino/install/tests - PARALLEL_TEST_SCRIPT: /__w/openvino/openvino/install/tests/functional_test_utils/layer_tests_summary/run_parallel.py - PARALLEL_TEST_CACHE: /__w/openvino/openvino/install/tests/test_cache.lst - # if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test - if: ${{ 'false' }} # Ticket: 126379 - steps: - - name: Download OpenVINO package - uses: actions/download-artifact@v3 - with: - name: openvino_package - path: ${{ env.INSTALL_DIR }} - - - name: Download OpenVINO tests package - uses: actions/download-artifact@v3 - with: - name: openvino_tests - path: ${{ env.INSTALL_TEST_DIR }} - - - name: Extract OpenVINO packages - run: | - pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} - popd - pushd ${INSTALL_TEST_DIR} - tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} - popd - - - name: Install OpenVINO dependencies - run: bash ${INSTALL_DIR}/install_dependencies/install_openvino_dependencies.sh -c=core -y - - - name: Fetch setup_python action - uses: actions/checkout@v4 - with: - sparse-checkout: | - .github/actions/setup_python/action.yml - sparse-checkout-cone-mode: false - path: ${{ env.OPENVINO_REPO }} - - - name: Setup Python ${{ env.PYTHON_VERSION }} - uses: ./openvino/.github/actions/setup_python - with: - version: ${{ env.PYTHON_VERSION }} - should-setup-pip-paths: 'false' - - - name: Install python dependencies for run_parallel.py - run: python3 -m pip install -r ${INSTALL_TEST_DIR}/functional_test_utils/layer_tests_summary/requirements.txt - - - name: Restore tests execution time - uses: actions/cache/restore@v3 - with: - path: ${{ env.PARALLEL_TEST_CACHE }} - key: ${{ runner.os }}-${{ runner.arch }}-tests-functional-cpu-stamp-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-${{ runner.arch }}-tests-functional-cpu-stamp - - - name: Intel CPU plugin func tests (parallel) - run: | - source ${INSTALL_DIR}/setupvars.sh - python3 ${PARALLEL_TEST_SCRIPT} -e ${INSTALL_TEST_DIR}/ov_cpu_func_tests -c ${PARALLEL_TEST_CACHE} -w ${INSTALL_TEST_DIR} -s suite -rf 0 -- --gtest_print_time=1 --gtest_filter=*smoke* - timeout-minutes: 40 - - - name: Save tests execution time - uses: actions/cache/save@v3 - if: github.ref_name == 'master' - with: - path: ${{ env.PARALLEL_TEST_CACHE }} - key: ${{ runner.os }}-${{ runner.arch }}-tests-functional-cpu-stamp-${{ github.sha }} - - - name: Upload Test Results - uses: actions/upload-artifact@v3 - if: ${{ !cancelled() }} - with: - name: test-results-functional-cpu - path: | - ${{ env.INSTALL_TEST_DIR }}/temp/*.log - ${{ env.INSTALL_TEST_DIR }}/logs/*.log - ${{ env.INSTALL_TEST_DIR }}/logs/failed/*.log - ${{ env.INSTALL_TEST_DIR }}/logs/crashed/*.log - ${{ env.INSTALL_TEST_DIR }}/logs/hanged/*.log - ${{ env.INSTALL_TEST_DIR }}/logs/interapted/*.log - ${{ env.INSTALL_TEST_DIR }}/logs/hash_table.csv - ${{ env.PARALLEL_TEST_CACHE }} - if-no-files-found: 'error' + if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_cpu_functional_tests.yml + with: + runner: 'aks-linux-16-cores-arm' + image: 'openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04' TensorFlow_Hub_Models_Tests: name: TensorFlow Hub Models tests - needs: [Build, Smart_CI] - defaults: - run: - shell: bash - runs-on: 'aks-linux-16-cores-arm' - timeout-minutes: ${{ github.event_name == 'schedule' && 400 || 5 }} - container: - image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 - volumes: - - /mount/caches:/mount/caches - env: - OPENVINO_REPO: ${{ github.workspace }}/openvino - INSTALL_DIR: ${{ github.workspace }}/install - INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests - MODEL_HUB_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/model_hub_tests + if: ${{ 'false' }} # TODO: Enable once the dependencies are ready for arm (no tensorflow-text available for arm from PyPI) # if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test || # fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test - if: ${{ 'false' }} # TODO: Enable once the self-hosted runners are ready for them - - steps: - - name: Check sudo - run: if [ "$(id -u)" -eq 0 ]; then apt update && apt --assume-yes install sudo; fi - - - name: Download OpenVINO package - uses: actions/download-artifact@v3 - with: - name: openvino_package_x86_64 - path: ${{ env.INSTALL_DIR }} - - - name: Download OpenVINO tests package - uses: actions/download-artifact@v3 - with: - name: openvino_tests_x86_64 - path: ${{ env.INSTALL_TEST_DIR }} - - - name: Extract OpenVINO packages - run: | - pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} - popd - - pushd ${INSTALL_TEST_DIR} - tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} - popd - - - name: Fetch setup_python action - uses: actions/checkout@v4 - with: - sparse-checkout: | - .github/actions/setup_python/action.yml - sparse-checkout-cone-mode: false - path: 'openvino' - - - name: Setup Python ${{ env.PYTHON_VERSION }} - uses: ./openvino/.github/actions/setup_python - with: - version: ${{ env.PYTHON_VERSION }} - should-setup-pip-paths: 'false' - self-hosted-runner: 'false' - - - name: Install OpenVINO Python wheels - run: python3 -m pip install ${INSTALL_DIR}/tools/openvino-* - - - name: Install TF Hub tests requirements - run: | - python3 -m pip install -r ${MODEL_HUB_TESTS_INSTALL_DIR}/tf_hub_tests/requirements.txt - - - name: TensorFlow Hub Tests - TF FE - run: | - export PYTHONPATH=${MODEL_HUB_TESTS_INSTALL_DIR}:$PYTHONPATH - python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/tf_hub_tests/ -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-tf_hub_tf_fe.html --self-contained-html -v - env: - TYPE: ${{ github.event_name == 'schedule' && 'nightly' || 'precommit'}} - TEST_DEVICE: CPU - - - name: Upload Test Results - uses: actions/upload-artifact@v3 - if: ${{ !cancelled() }} - with: - name: test-results-tensorflow-hub-models - path: | - ${{ env.INSTALL_TEST_DIR }}/TEST*.html - if-no-files-found: 'error' + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_tensorflow_hub_models_tests.yml + with: + runner: 'aks-linux-16-cores-arm' + container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04"}' + event: ${{ github.event_name }} TensorFlow_Hub_Performance_Models_Tests: name: TensorFlow Hub Performance Models tests - needs: [Build, Smart_CI] - defaults: - run: - shell: bash - runs-on: 'aks-linux-16-cores-arm' - timeout-minutes: ${{ github.event_name == 'schedule' && 400 || 5 }} - env: - OPENVINO_REPO: ${{ github.workspace }}/openvino - INSTALL_DIR: ${{ github.workspace }}/install - INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests - MODEL_HUB_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/model_hub_tests + if: ${{ 'false' }} # TODO: Enable once the dependencies are ready for arm (no tensorflow-text available for arm from PyPI) # if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test || # fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test - if: ${{ 'false' }} # TODO: Enable once the self-hosted runners are ready for them - - steps: - - name: Check sudo - run: if [ "$(id -u)" -eq 0 ]; then apt update && apt --assume-yes install sudo; fi - - - name: Download OpenVINO package - uses: actions/download-artifact@v3 - with: - name: openvino_package_x86_64 - path: ${{ env.INSTALL_DIR }} - - - name: Download OpenVINO tests package - uses: actions/download-artifact@v3 - with: - name: openvino_tests_x86_64 - path: ${{ env.INSTALL_TEST_DIR }} - - - name: Extract OpenVINO packages - run: | - pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} - popd - - pushd ${INSTALL_TEST_DIR} - tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} - popd - - - name: Fetch setup_python action - uses: actions/checkout@v4 - with: - sparse-checkout: | - .github/actions/setup_python/action.yml - sparse-checkout-cone-mode: false - path: 'openvino' - - - name: Setup Python ${{ env.PYTHON_VERSION }} - uses: ./openvino/.github/actions/setup_python - with: - version: ${{ env.PYTHON_VERSION }} - should-setup-pip-paths: 'false' - self-hosted-runner: 'false' + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_tensorflow_hub_performance_models_tests.yml + with: + runner: 'aks-linux-16-cores-arm' + container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04"}' + event: ${{ github.event_name }} - - name: Install OpenVINO Python wheels - run: python3 -m pip install ${INSTALL_DIR}/tools/openvino-* - - - name: Install TF Hub tests requirements - run: | - python3 -m pip install -r ${MODEL_HUB_TESTS_INSTALL_DIR}/tf_hub_tests/requirements.txt - - - name: Install Hub Performance tests requirements - run: | - python3 -m pip install -r ${MODEL_HUB_TESTS_INSTALL_DIR}/performance_tests/requirements.txt - - - name: Performance Hub Tests - run: | - export PYTHONPATH=${MODEL_HUB_TESTS_INSTALL_DIR}:$PYTHONPATH - python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/performance_tests/ -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-tf_hub_performance.html --self-contained-html -v - env: - TYPE: ${{ github.event_name == 'schedule' && 'nightly' || 'precommit'}} - TEST_DEVICE: CPU - - - name: Upload Test Results - uses: actions/upload-artifact@v3 - if: ${{ !cancelled() }} - with: - name: test-results-tensorflow-hub-performance-models - path: | - ${{ env.INSTALL_TEST_DIR }}/TEST*.html - if-no-files-found: 'error' - - # TODO: Enable once they are ready for self-hosted runners PyTorch_Models_Tests: name: PyTorch Models tests - needs: [Build, Smart_CI] - timeout-minutes: ${{ github.event_name == 'schedule' && 400 || 30 }} - defaults: - run: - shell: bash - runs-on: 'aks-linux-16-cores-arm' - container: - image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04 - volumes: - - /mount/caches:/mount/caches - env: - OPENVINO_REPO: ${{ github.workspace }}/openvino - INSTALL_DIR: ${{ github.workspace }}/install - INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests - MODEL_HUB_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/model_hub_tests + if: ${{ 'false' }} # TODO: Enable once the dependencies are ready for arm (no tensorflow-text available for arm from PyPI) # if: fromJSON(needs.smart_ci.outputs.affected_components).PyTorch_FE.test - if: ${{ 'false' }} # TODO: Enable once the self-hosted runners are ready for them - - steps: - - name: Check sudo - run: if [ "$(id -u)" -eq 0 ]; then apt update && apt --assume-yes install sudo; fi - - - name: Install dependencies - run: | - # install git (required to build pip deps from the sources) - # install 'g++' to build 'detectron2' and 'natten' wheels - sudo apt-get install --assume-yes --no-install-recommends g++ git ca-certificates - - - name: Download OpenVINO package - uses: actions/download-artifact@v3 - with: - name: openvino_package_x86_64 - path: ${{ env.INSTALL_DIR }} - - - name: Download OpenVINO tests package - uses: actions/download-artifact@v3 - with: - name: openvino_tests_x86_64 - path: ${{ env.INSTALL_TEST_DIR }} - - - name: Extract OpenVINO packages - run: | - pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} - popd - pushd ${INSTALL_TEST_DIR} - tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} - popd - - - name: Fetch setup_python action - uses: actions/checkout@v4 - with: - sparse-checkout: | - .github/actions/setup_python/action.yml - sparse-checkout-cone-mode: false - path: 'openvino' - - - name: Setup Python ${{ env.PYTHON_VERSION }} - uses: ./openvino/.github/actions/setup_python - with: - version: ${{ env.PYTHON_VERSION }} - should-setup-pip-paths: 'false' - self-hosted-runner: 'false' - - - name: Install OpenVINO Python wheels - run: python3 -m pip install ${INSTALL_DIR}/tools/openvino-* - - - name: Install PyTorch tests requirements - run: | - python3 -m pip install -r ${MODEL_HUB_TESTS_INSTALL_DIR}/torch_tests/requirements.txt - python3 -m pip install -r ${MODEL_HUB_TESTS_INSTALL_DIR}/torch_tests/requirements_secondary.txt - echo "Available storage:" - df -h - env: - CPLUS_INCLUDE_PATH: ${{ env.Python_ROOT_DIR }}/include/python${{ env.PYTHON_VERSION }} - - - name: PyTorch Models Tests - run: | - export PYTHONPATH=${MODEL_HUB_TESTS_INSTALL_DIR}:$PYTHONPATH - python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/torch_tests -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-torch_model_tests.html --self-contained-html -v - env: - TYPE: ${{ github.event_name == 'schedule' && 'nightly' || 'precommit'}} - TEST_DEVICE: CPU - USE_SYSTEM_CACHE: False - - - name: Available storage after tests - run: | - echo "Available storage:" - df -h - - - name: Upload Test Results - uses: actions/upload-artifact@v3 - if: ${{ !cancelled() }} - with: - name: test-results-torch-models - path: | - ${{ env.INSTALL_TEST_DIR }}/TEST*.html - if-no-files-found: 'error' + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_pytorch_models_tests.yml + with: + runner: 'aks-linux-16-cores-arm' + container: '{"image": "openvinogithubactions.azurecr.io/dockerhub/ubuntu:20.04"}' + event: ${{ github.event_name }} Overall_Status: - name: ci/gha_overall_status - needs: [Smart_CI, Build, Debian_Packages, Samples, ONNX_Runtime, CXX_Unit_Tests, Python_Unit_Tests] + name: ci/gha_overall_status_linux_arm64 + needs: [Smart_CI, Build, Debian_Packages, Samples, ONNX_Runtime, CXX_Unit_Tests, Python_Unit_Tests, CPU_Functional_Tests, + TensorFlow_Hub_Models_Tests, TensorFlow_Hub_Performance_Models_Tests, PyTorch_Models_Tests] if: ${{ always() }} runs-on: ubuntu-latest steps: diff --git a/.github/workflows/linux_conditional_compilation.yml b/.github/workflows/linux_conditional_compilation.yml index c5d09112d2e7e7..cf69a3e0f51ba9 100644 --- a/.github/workflows/linux_conditional_compilation.yml +++ b/.github/workflows/linux_conditional_compilation.yml @@ -49,7 +49,7 @@ jobs: container: image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04 volumes: - - /mount/caches:/mount/caches + - /mount:/mount options: -e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING env: DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input @@ -60,6 +60,7 @@ jobs: GITHUB_WORKSPACE: '/__w/openvino/openvino' OPENVINO_REPO: /__w/openvino/openvino/openvino INSTALL_DIR: /__w/openvino/openvino/openvino_install + INSTALL_TEST_DIR: /__w/openvino/openvino/tests_install BUILD_DIR: /__w/openvino/openvino/openvino_build SELECTIVE_BUILD_STAT_DIR: /__w/openvino/openvino/selective_build_stat MODELS_PATH: /__w/openvino/openvino/testdata @@ -163,7 +164,9 @@ jobs: run: ${SCCACHE_PATH} --show-stats - name: Cmake install - OpenVINO - run: cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} -P ${BUILD_DIR}/cmake_install.cmake + run: | + cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_DIR} -P ${BUILD_DIR}/cmake_install.cmake + cmake -DCMAKE_INSTALL_PREFIX=${INSTALL_TEST_DIR} -DCOMPONENT=tests -P ${BUILD_DIR}/cmake_install.cmake - name: Build C++ samples - OpenVINO build tree run: | @@ -189,13 +192,26 @@ jobs: tar -czvf ${BUILD_DIR}/openvino_selective_build_stat.tar.gz * popd - pushd ${OPENVINO_REPO} + pushd ${INSTALL_DIR} + tar -czvf ${BUILD_DIR}/openvino_package.tar.gz \ + install_dependencies/install_openvino_dependencies.sh + popd + + pushd ${INSTALL_TEST_DIR} tar -czvf ${BUILD_DIR}/openvino_tests.tar.gz \ - bin/intel64/Release/ov_cpu_func_tests \ - src/tests/test_utils/functional_test_utils/layer_tests_summary/* \ - scripts/install_dependencies/* + tests/ov_cpu_func_tests \ + tests/libtemplate_extension.so \ + tests/functional_test_utils/layer_tests_summary/* popd + - name: Upload openvino package + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: openvino_package + path: ${{ env.BUILD_DIR }}/openvino_package.tar.gz + if-no-files-found: 'error' + - name: Upload selective build statistics package if: ${{ always() }} uses: actions/upload-artifact@v3 @@ -223,7 +239,7 @@ jobs: container: image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04 volumes: - - /mount/caches:/mount/caches + - /mount:/mount options: -e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING env: DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input @@ -310,74 +326,23 @@ jobs: CPU_Functional_Tests: name: CPU functional tests - needs: [Build, Smart_CI] - timeout-minutes: 25 - defaults: - run: - shell: bash - runs-on: aks-linux-8-cores-32gb - container: - image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04 - env: - OPENVINO_REPO: /__w/openvino/openvino/openvino - DEBIAN_FRONTEND: noninteractive # to prevent apt-get from waiting user input - INSTALL_TEST_DIR: /__w/openvino/openvino/install/tests - PARALLEL_TEST_SCRIPT: /__w/openvino/openvino/install/tests/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py - PARALLEL_TEST_CACHE: /__w/openvino/openvino/install/tests/test_cache.lst if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test - + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_cpu_functional_tests.yml + with: + runner: 'aks-linux-8-cores-32gb' + image: 'openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04' + + Overall_Status: + name: ci/gha_overall_status_linux_cc + needs: [Smart_CI, Build, CC_Build, CPU_Functional_Tests] + if: ${{ always() }} + runs-on: ubuntu-latest steps: - - name: Download OpenVINO tests package - uses: actions/download-artifact@v3 - with: - name: openvino_tests - path: ${{ env.INSTALL_TEST_DIR }} - - - name: Extract OpenVINO tests package - run: tar -xvzf ${INSTALL_TEST_DIR}/openvino_tests.tar.gz -C ${INSTALL_TEST_DIR} - - - name: Install OpenVINO dependencies - run: bash ${INSTALL_TEST_DIR}/scripts/install_dependencies/install_openvino_dependencies.sh -c=core -c=gpu -y - - - name: Fetch setup_python action - uses: actions/checkout@v4 - with: - sparse-checkout: | - .github/actions/setup_python/action.yml - sparse-checkout-cone-mode: false - path: ${{ env.OPENVINO_REPO }} - - - name: Setup Python ${{ env.PYTHON_VERSION }} - uses: ./openvino/.github/actions/setup_python - with: - version: ${{ env.PYTHON_VERSION }} - should-setup-pip-paths: 'false' - - - name: Install python dependencies for run_parallel.py - run: python3 -m pip install -r ${INSTALL_TEST_DIR}/src/tests/test_utils/functional_test_utils/layer_tests_summary/requirements.txt - - - name: Restore tests execution time - uses: actions/cache/restore@v3 - with: - path: ${{ env.PARALLEL_TEST_CACHE }} - key: ${{ runner.os }}-tests-functional-cpu-stamp-${{ github.sha }} - restore-keys: | - ${{ runner.os }}-tests-functional-cpu-stamp - - - name: Intel CPU plugin func tests (parallel) - run: python3 ${PARALLEL_TEST_SCRIPT} -e ${INSTALL_TEST_DIR}/bin/intel64/Release/ov_cpu_func_tests -c ${PARALLEL_TEST_CACHE} -w ${INSTALL_TEST_DIR} -s suite -rf 0 -- --gtest_print_time=1 --gtest_filter=*smoke* - timeout-minutes: 20 - - - name: Upload Test Results - uses: actions/upload-artifact@v3 - if: ${{ !cancelled() }} - with: - name: test-results-functional-cpu - path: | - ${{ env.INSTALL_TEST_DIR }}/TEST*.xml - ${{ env.INSTALL_TEST_DIR }}/logs/failed/*.log - ${{ env.INSTALL_TEST_DIR }}/logs/crashed/*.log - ${{ env.INSTALL_TEST_DIR }}/logs/hanged/*.log - ${{ env.INSTALL_TEST_DIR }}/logs/interapted/*.log - ${{ env.INSTALL_TEST_DIR }}/logs/disabled_tests.log - if-no-files-found: 'error' + - name: Check status of all jobs + if: >- + ${{ + contains(needs.*.result, 'failure') || + contains(needs.*.result, 'cancelled') + }} + run: exit 1 diff --git a/.github/workflows/linux_riscv.yml b/.github/workflows/linux_riscv.yml index 83eebb5b54b7b8..5db7ed22a02707 100644 --- a/.github/workflows/linux_riscv.yml +++ b/.github/workflows/linux_riscv.yml @@ -49,7 +49,7 @@ jobs: container: image: openvinogithubactions.azurecr.io/dockerhub/ubuntu:22.04 volumes: - - /mount/caches:/mount/caches + - /mount:/mount env: CMAKE_BUILD_TYPE: 'Release' CMAKE_GENERATOR: 'Ninja' @@ -206,3 +206,17 @@ jobs: source ${OPENVINO_BUILD_DIR}/dependencies/deactivate_conanbuild.sh env: CMAKE_TOOLCHAIN_FILE: ${{ env.OPENVINO_BUILD_DIR }}/dependencies/conan_toolchain.cmake + + Overall_Status: + name: ci/gha_overall_status_linux_riscv + needs: [Smart_CI, Build] + if: ${{ always() }} + runs-on: ubuntu-latest + steps: + - name: Check status of all jobs + if: >- + ${{ + contains(needs.*.result, 'failure') || + contains(needs.*.result, 'cancelled') + }} + run: exit 1 diff --git a/.github/workflows/mac.yml b/.github/workflows/mac.yml index d8ac800e18b7d5..2a59d9de4d50b2 100644 --- a/.github/workflows/mac.yml +++ b/.github/workflows/mac.yml @@ -33,27 +33,41 @@ env: PYTHON_VERSION: '3.11' jobs: + + Smart_CI: + runs-on: ubuntu-latest + outputs: + affected_components: "${{ steps.smart_ci.outputs.affected_components }}" + skip_workflow: "${{ steps.smart_ci.outputs.skip_workflow }}" + steps: + - name: checkout action + uses: actions/checkout@v4 + with: + sparse-checkout: .github/actions/smart-ci + + - name: Get affected components + id: smart_ci + uses: ./.github/actions/smart-ci + with: + repository: ${{ github.repository }} + pr: ${{ github.event.number }} + commit_sha: ${{ github.sha }} + component_pattern: "category: (.*)" + repo_token: ${{ secrets.GITHUB_TOKEN }} + skip_when_only_listed_labels_set: 'docs' + skip_when_only_listed_files_changed: '*.md,*.rst,*.png,*.jpg,*.svg' + Build: + needs: Smart_CI timeout-minutes: 150 defaults: run: shell: bash - strategy: - max-parallel: 2 - fail-fast: false - matrix: - include: - - arhitecture: 'x86_64' - machine: 'macos-13-large' - macos_deployment_target: '10.12' - - arhitecture: 'arm64' - machine: 'macos-13-xlarge' - macos_deployment_target: '11.0' - runs-on: ${{ matrix.machine }} + runs-on: 'macos-13-large' env: CMAKE_BUILD_TYPE: 'Release' CMAKE_GENERATOR: 'Ninja Multi-Config' - MACOSX_DEPLOYMENT_TARGET: ${{ matrix.macos_deployment_target }} + MACOSX_DEPLOYMENT_TARGET: '10.12' CMAKE_CXX_COMPILER_LAUNCHER: ccache CMAKE_C_COMPILER_LAUNCHER: ccache OPENVINO_REPO: ${{ github.workspace }}/openvino @@ -125,9 +139,9 @@ jobs: # github.ref_name is 'ref/PR_#' in case of the PR, and 'branch_name' when executed on push save: ${{ github.ref_name == 'master' && 'true' || 'false' }} verbose: 2 - key: ${{ runner.os }}-${{ matrix.arhitecture }}-main + key: ${{ runner.os }}-${{ runner.arch }}-main restore-keys: | - ${{ runner.os }}-${{ matrix.arhitecture }}-main + ${{ runner.os }}-${{ runner.arch }}-main - name: CMake configure run: | @@ -184,7 +198,7 @@ jobs: if: ${{ always() }} uses: actions/upload-artifact@v3 with: - name: openvino_package_${{ matrix.arhitecture }} + name: openvino_package path: ${{ env.BUILD_DIR }}/openvino_package.tar.gz if-no-files-found: 'error' @@ -192,650 +206,39 @@ jobs: if: ${{ always() }} uses: actions/upload-artifact@v3 with: - name: openvino_tests_${{ matrix.arhitecture }} + name: openvino_tests path: ${{ env.BUILD_DIR }}/openvino_tests.tar.gz if-no-files-found: 'error' Samples: - needs: Build - timeout-minutes: 5 - defaults: - run: - shell: bash - strategy: - max-parallel: 2 - fail-fast: false - matrix: - include: - - arhitecture: 'x86_64' - machine: 'macos-13' - - arhitecture: 'arm64' - machine: 'macos-13-xlarge' - runs-on: ${{ matrix.machine }} - env: - OPENVINO_REPO: ${{ github.workspace }}/openvino - INSTALL_DIR: ${{ github.workspace }}/install - INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests - BUILD_DIR: ${{ github.workspace }}/build - - steps: - - # - # Initialize OpenVINO - # - - - name: Download OpenVINO package - uses: actions/download-artifact@v3 - with: - name: openvino_package_${{ matrix.arhitecture }} - path: ${{ env.INSTALL_DIR }} - - - name: Download OpenVINO tests package - uses: actions/download-artifact@v3 - with: - name: openvino_tests_${{ matrix.arhitecture }} - path: ${{ env.INSTALL_TEST_DIR }} - - - name: Extract OpenVINO packages - run: | - pushd ${INSTALL_DIR} - tar -xzf openvino_package.tar.gz -C ${INSTALL_DIR} - popd - - pushd ${INSTALL_TEST_DIR} - tar -xzf openvino_tests.tar.gz -C ${INSTALL_DIR} - popd - - - name: Install dependencies - run: brew install coreutils - - - name: Fetch setup_python action - uses: actions/checkout@v4 - with: - sparse-checkout: | - .github/actions/setup_python/action.yml - sparse-checkout-cone-mode: false - path: 'openvino' - - - name: Setup Python ${{ env.PYTHON_VERSION }} - uses: ./openvino/.github/actions/setup_python - with: - version: ${{ env.PYTHON_VERSION }} - should-setup-pip-paths: 'false' - self-hosted-runner: 'false' - - - name: Build cpp samples - run: ${INSTALL_DIR}/samples/cpp/build_samples.sh -i ${INSTALL_DIR} -b ${BUILD_DIR}/cpp_samples - env: - CMAKE_COMPILE_WARNING_AS_ERROR: 'ON' - - - name: Build c samples - run: ${INSTALL_DIR}/samples/c/build_samples.sh -i ${INSTALL_DIR} -b ${BUILD_DIR}/c_samples - env: - CMAKE_COMPILE_WARNING_AS_ERROR: 'ON' - - # - # Tests - # - - - name: Samples tests - run: | - export WORKSPACE=${INSTALL_DIR} - export IE_APP_PATH=${INSTALL_DIR}/samples_bin - export IE_APP_PYTHON_PATH=${INSTALL_DIR}/samples/python - export SHARE=${INSTALL_TEST_DIR}/smoke_tests/samples_smoke_tests_data - - python3 -m pip install --ignore-installed PyYAML -r ${INSTALL_TEST_DIR}/smoke_tests/requirements.txt - - source ${INSTALL_DIR}/setupvars.sh - - python3 -m pytest -sv ${INSTALL_TEST_DIR}/smoke_tests \ - --env_conf ${INSTALL_TEST_DIR}/smoke_tests/env_config.yml \ - --junitxml=${INSTALL_TEST_DIR}/TEST-SamplesSmokeTests.xml - - - name: Upload Test Results - uses: actions/upload-artifact@v3 - if: ${{ !cancelled() }} - with: - name: test-results-samples-${{ matrix.arhitecture }} - path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml - if-no-files-found: 'error' + needs: [ Build, Smart_CI ] + if: fromJSON(needs.smart_ci.outputs.affected_components).samples + uses: ./.github/workflows/job_samples_tests.yml + with: + runner: 'macos-13' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} CXX_Unit_Tests: - name: C++ Unit tests - needs: Build - timeout-minutes: 20 - defaults: - run: - shell: bash - strategy: - max-parallel: 2 - fail-fast: false - matrix: - include: - - arhitecture: 'x86_64' - machine: 'macos-13' - - arhitecture: 'arm64' - machine: 'macos-13-xlarge' - runs-on: ${{ matrix.machine }} - env: - INSTALL_DIR: ${{ github.workspace }}/install - INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests - - steps: - # - # Dependencies - # - - - name: Download OpenVINO package - uses: actions/download-artifact@v3 - with: - name: openvino_package_${{ matrix.arhitecture }} - path: ${{ env.INSTALL_DIR }} - - - name: Download OpenVINO tests package - uses: actions/download-artifact@v3 - with: - name: openvino_tests_${{ matrix.arhitecture }} - path: ${{ env.INSTALL_TEST_DIR }} - - - name: Extract OpenVINO packages - run: | - pushd ${{ env.INSTALL_DIR }} - tar -xzf openvino_package.tar.gz -C ${{ env.INSTALL_DIR }} && rm openvino_package.tar.gz || exit 1 - popd - pushd ${{ env.INSTALL_TEST_DIR }} - tar -xzf openvino_tests.tar.gz -C ${{ env.INSTALL_DIR }} && rm openvino_tests.tar.gz || exit 1 - popd - - # - # Tests - # - - - name: OpenVINO Core Unit Tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-NGraphUT.xml - - - name: OpenVINO Inference Functional Tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_inference_functional_tests --gtest_print_time=1 \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceFunc.xml - - - name: OpenVINO Inference Unit Tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_inference_unit_tests --gtest_print_time=1 \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceUnit.xml - - - name: Low Precision Transformations Tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - - # Skips under Ticket: 122660 - skip_filter=${{ matrix.arhitecture == 'arm64' && '--gtest_filter=-*smoke_LPT/FoldFakeQuantizeInTransformations.CompareFunctions*' || '' }} - - ${{ env.INSTALL_TEST_DIR }}/ov_lp_transformations_tests --gtest_print_time=1 "$skip_filter" \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-LpTransformations.xml - - - name: OpenVINO Conditional compilation tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_conditional_compilation_tests --gtest_print_time=1 \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ConditionalCompilation.xml - - - name: IR frontend tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_ir_frontend_tests --gtest_print_time=1 \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-IRFrontend.xml - - - name: PaddlePaddle frontend tests - if: ${{ 'false' }} - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/paddle_tests --gtest_print_time=1 --gtest_filter=*smoke* \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-PaddleTests.xml - - - name: ONNX frontend tests - if: ${{ matrix.arhitecture == 'x86_64' }} # Ticket for ARM64: 122663 - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - - ${{ env.INSTALL_TEST_DIR }}/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ONNXFrontend.xml - - - name: TensorFlow Common tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_common_tests --gtest_print_time=1 \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowCommonFrontend.xml - - - name: TensorFlow frontend tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - - # Skips under Ticket: 122666 - skip_filter=${{ matrix.arhitecture == 'arm64' && '--gtest_filter=-*CompileModelsTests.ModelWithSplitConvConcat*:*NgramCompilation*' || '' }} - - ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_frontend_tests --gtest_print_time=1 "$skip_filter" \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowFrontend.xml - - - name: TensorFlow Lite frontend tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_lite_frontend_tests --gtest_print_time=1 \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowLiteFrontend.xml - - - name: Transformations func tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - - # Skips under Ticket: 122668 - skip_filter=${{ matrix.arhitecture == 'arm64' && '--gtest_filter=-*TransformationTestsF.CompressQuantizeWeights*:*TransformationTests/CompressQuantizeWeightsTests.FusionTest*' || '' }} - - ${{ env.INSTALL_TEST_DIR }}/ov_transformations_tests --gtest_print_time=1 "$skip_filter" \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-Transformations.xml - - - name: Common test utils tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_util_tests --gtest_print_time=1 \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-commonUtilsTests.xml - - - name: Snippets func tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_snippets_func_tests --gtest_print_time=1 \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-SnippetsFuncTests.xml - - - name: CPU plugin unit tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_cpu_unit_tests --gtest_print_time=1 \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-CPUUnitTests.xml - - - name: ov_subgraphs_dumper_tests tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_subgraphs_dumper_tests --gtest_print_time=1 \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-ov_subgraphs_dumper_tests.xml - - - name: Template OpImpl tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_op_conformance_tests --gtest_print_time=1 --device=TEMPLATE --gtest_filter="*OpImpl*" \ - --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-TemplateOpImplTests.xml - - - name: AUTO unit tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_auto_unit_tests --gtest_print_time=1 \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_unit_tests.xml - - - name: AUTO func Tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_auto_func_tests --gtest_print_time=1 \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_func_tests.xml - - - name: Template plugin func tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_template_func_tests --gtest_print_time=1 \ - --gtest_filter=*smoke* \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TemplateFuncTests.xml - - - name: Inference Engine C API tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/InferenceEngineCAPITests --gtest_print_time=1 \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceEngineCAPITests.xml - - - name: OpenVINO C API tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_capi_test --gtest_print_time=1 \ - --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OpenVINOCAPITests.xml - - - name: AutoBatch unit tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_auto_batch_unit_tests --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_batch_unit_tests.xml - - - name: AutoBatch func tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_auto_batch_func_tests --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_batch_func_tests.xml - - - name: Proxy Plugin func tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_proxy_plugin_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVProxyTests.xml - - - name: Hetero unit tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_hetero_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroUnitTests.xml - - - name: Hetero func tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - ${{ env.INSTALL_TEST_DIR }}/ov_hetero_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroFuncTests.xml - - - name: Upload Test Results - uses: actions/upload-artifact@v3 - if: ${{ always() }} - with: - name: test-results-cpp-${{ matrix.arhitecture }} - path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml - if-no-files-found: 'error' + name: C++ unit tests + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_cxx_unit_tests.yml + with: + runner: 'macos-13' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} Python_Unit_Tests: name: Python unit tests - needs: Build - timeout-minutes: 55 - defaults: - run: - shell: bash - strategy: - max-parallel: 2 - fail-fast: false - matrix: - include: - - arhitecture: 'x86_64' - machine: 'macos-13' - - arhitecture: 'arm64' - machine: 'macos-13-xlarge' - runs-on: ${{ matrix.machine }} - env: - OPENVINO_REPO: ${{ github.workspace }}/openvino - OPENVINO_CONTRIB_REPO: ${{ github.workspace }}/openvino_contrib - INSTALL_DIR: ${{ github.workspace }}/install - INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests - LAYER_TESTS_INSTALL_DIR: ${{ github.workspace }}/install/tests/layer_tests - steps: - - name: Fetch setup_python action - uses: actions/checkout@v4 - with: - sparse-checkout: | - .github/actions/setup_python/action.yml - sparse-checkout-cone-mode: false - path: 'openvino' - - - name: Setup Python ${{ env.PYTHON_VERSION }} - uses: ./openvino/.github/actions/setup_python - with: - version: ${{ env.PYTHON_VERSION }} - should-setup-pip-paths: 'false' - self-hosted-runner: 'false' - - # - # Dependencies - # - - - name: Download OpenVINO package - uses: actions/download-artifact@v3 - with: - name: openvino_package_${{ matrix.arhitecture }} - path: ${{ env.INSTALL_DIR }} - - - name: Download OpenVINO tests package - uses: actions/download-artifact@v3 - with: - name: openvino_tests_${{ matrix.arhitecture }} - path: ${{ env.INSTALL_TEST_DIR }} - - - name: Extract OpenVINO packages - run: | - pushd ${{ env.INSTALL_DIR }} - tar -xzf openvino_package.tar.gz -C ${{ env.INSTALL_DIR }} - popd - - pushd ${{ env.INSTALL_TEST_DIR }} - tar -xzf openvino_tests.tar.gz -C ${{ env.INSTALL_DIR }} - popd - - - name: Install OpenVINO Python wheels - run: | - # Install the core OV wheel - python3 -m pip install ${{ env.INSTALL_DIR }}/tools/openvino-*.whl - - # mxnet is only available on x86_64 - extras_to_install="caffe,kaldi,onnx,tensorflow2,pytorch" - if [[ "${{ matrix.arhitecture }}" == "x86_64" ]]; then - extras_to_install="mxnet,$extras_to_install" - fi - - # Find and install OV dev wheel - pushd ${{ env.INSTALL_DIR }}/tools - ov_dev_wheel_name=$(find . -name 'openvino_dev*.whl') - python3 -m pip install $ov_dev_wheel_name[$extras_to_install] - popd - - - name: Install Python API tests dependencies - run: | - # For torchvision to OpenVINO preprocessing converter - python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/python/preprocess/torchvision/requirements.txt - - # TODO: replace with Python API tests requirements - python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/mo/requirements_dev.txt - - - name: Python API 1.0 Tests - run: | - python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/pyngraph \ - --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml \ - --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py \ - --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests_compatibility/test_onnx/test_backend.py - - - name: Python API 2.0 Tests - run: | - # For python imports to import pybind_mock_frontend - export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}:$PYTHONPATH - # for 'template' extension - export DYLD_LIBRARY_PATH=${{ env.INSTALL_TEST_DIR }}:$DYLD_LIBRARY_PATH - - python3 -m pytest -sv ${{ env.INSTALL_TEST_DIR }}/pyopenvino \ - --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml \ - --ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_utils/test_utils.py - - - name: MO Python API Tests - run: | - python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt - - # Used for 'test_utils' installed in '/python/openvino/test_utils' - export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}/python/openvino/test_utils:${{ env.INSTALL_TEST_DIR }}/python:$PYTHONPATH - - python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/mo_python_api_tests/ --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_mo_convert.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - - name: OVC Python API Tests - run: | - python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt - - # Used for 'test_utils' installed in '/python/openvino/test_utils' - export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}/python/openvino/test_utils:${{ env.INSTALL_TEST_DIR }}/python:$PYTHONPATH - - python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/ovc_python_api_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_ovc_convert.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - - name: Model Optimizer unit tests - run: | - export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}:$PYTHONPATH - python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/mo/unit_tests \ - --ignore=${{ env.INSTALL_TEST_DIR }}/mo/unit_tests/mo/front/mxnet \ - --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-ModelOptimizer.xml - - - name: PyTorch Layer Tests - run: | - python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt - export PYTHONPATH=${{ env.LAYER_TESTS_INSTALL_DIR }}:$PYTHONPATH - - python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/pytorch_tests -m precommit --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-pytorch.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - - name: ONNX Layer Tests - run: | - python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt - - export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}/mo:$PYTHONPATH - - python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/onnx_tests -m "not launch_only_if_manually_specified and precommit" --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-onnx.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - - name: TensorFlow 1 Layer Tests - TF FE - run: | - python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt - - export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}/mo:$PYTHONPATH - - python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_tests/ --use_new_frontend -m precommit_tf_fe --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf_fe.xml - env: - TEST_DEVICE: CPU - - - name: TensorFlow 2 Layer Tests - TF FE - if: ${{ 'false' }} # Ticket: 123322 - run: | - python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt - - export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}/mo:$PYTHONPATH - - python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow2_keras_tests/ --use_new_frontend -m precommit_tf_fe --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf2_fe.xml - env: - TEST_DEVICE: CPU - - - name: TensorFlow 1 Layer Tests - Legacy FE - run: | - python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt - - export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}/mo:$PYTHONPATH - - python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf_Roll.xml - - - name: TensorFlow 2 Layer Tests - Legacy FE - run: | - python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt - - export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}/mo:$PYTHONPATH - - python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow2_keras_tests/test_tf2_keras_activation.py \ - --ir_version=11 --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf2_Activation.xml -k "sigmoid" - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - - name: TensorFlow Lite Layer Tests - TFL FE - run: | - python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt - - export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}/mo:$PYTHONPATH - - python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_lite_tests/ --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tfl_fe.xml - env: - TEST_DEVICE: CPU - TEST_PRECISION: FP16 - - - name: Python ONNX operators tests - if: ${{ 'false' }} # Ticket: 123325 - run: | - # Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time - ONNX Model Zoo tests are run separately - python3 -m pytest -sv ${{ env.INSTALL_TEST_DIR }}/onnx -k 'not cuda' \ - --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-onnx_frontend.xml \ - --ignore=${{ env.INSTALL_TEST_DIR }}/onnx/test_python/test_zoo_models.py - - - name: Python Frontend tests - run: | - python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt - - export PYTHONPATH=${{ env.INSTALL_TEST_DIR }}/mo:$PYTHONPATH - - # to allow 'libtest_builtin_extensions.so' to find 'libopenvino_onnx_frontend.so' - source ${{ env.INSTALL_DIR }}/setupvars.sh - - python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/py_frontend_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_py_fontend.xml - - # TODO: install to 'tests' component via cpack - - name: OVC unit tests - run: python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/ovc/unit_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-OpenVinoConversion.xml - - - name: Upload Test Results - uses: actions/upload-artifact@v3 - if: ${{ always() }} - with: - name: test-results-python-${{ matrix.arhitecture }} - path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml - if-no-files-found: 'error' + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_python_unit_tests.yml + with: + runner: 'macos-13' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} CPU_Functional_Tests: name: CPU functional tests - needs: Build - timeout-minutes: 25 - defaults: - run: - shell: bash - strategy: - max-parallel: 2 - fail-fast: false - matrix: - include: - # ticket: 122001 - # - arhitecture: 'x86_64' - # machine: 'macos-13' - - arhitecture: 'arm64' - machine: 'macos-13-xlarge' - runs-on: ${{ matrix.machine }} - env: - INSTALL_DIR: ${{ github.workspace }}/install - INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests - - steps: - - name: Create Directories - run: mkdir -p ${{ env.INSTALL_DIR }} ${{ env.INSTALL_TEST_DIR }} - - - name: Download OpenVINO package - uses: actions/download-artifact@v3 - with: - name: openvino_package_${{ matrix.arhitecture }} - path: ${{ env.INSTALL_DIR }} - - - name: Download OpenVINO tests package - uses: actions/download-artifact@v3 - with: - name: openvino_tests_${{ matrix.arhitecture }} - path: ${{ env.INSTALL_TEST_DIR }} - - - name: Extract OpenVINO packages - run: | - pushd ${{ env.INSTALL_DIR }} - tar -xzf openvino_package.tar.gz -C ${{ env.INSTALL_DIR }} && rm openvino_package.tar.gz - popd - pushd ${{ env.INSTALL_TEST_DIR }} - tar -xzf openvino_tests.tar.gz -C ${{ env.INSTALL_DIR }} && rm openvino_tests.tar.gz - popd - - - name: CPU plugin func tests - run: | - source ${{ env.INSTALL_DIR }}/setupvars.sh - - # Skips under Ticket: 122769 - skip_filter=${{ matrix.arhitecture == 'arm64' && '--gtest_filter=-*smoke_nonzero/NonZeroLayerTest.Inference/IS*:*smoke_NormalizeL2_*:*Extension.XmlModelWithExtensionFromDSO*:*Extension.OnnxModelWithExtensionFromDSO*:*ONNXQuantizedModels/QuantizedModelsTests.MaxPool*:*ONNXQuantizedModels/QuantizedModelsTests.Convolution*:**' || '' }} - - ${{ env.INSTALL_TEST_DIR }}/ov_cpu_func_tests --gtest_print_time=1 --gtest_filter=*smoke* "$skip_filter" --gtest_output=xml:"${{ env.INSTALL_TEST_DIR }}/TEST-CPUFuncTests.xml" - - - name: Upload Test Results - uses: actions/upload-artifact@v3 - if: ${{ always() }} - with: - name: test-results-functional-cpu-${{ matrix.arhitecture }} - path: ${{ env.INSTALL_TEST_DIR }}/TEST*.xml - if-no-files-found: 'error' + # if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test + if: ${{ 'false' }} # Ticket: 122001 + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_cpu_functional_tests.yml + with: + runner: 'macos-13' diff --git a/.github/workflows/mac_arm64.yml b/.github/workflows/mac_arm64.yml new file mode 100644 index 00000000000000..ea96b26c465a7e --- /dev/null +++ b/.github/workflows/mac_arm64.yml @@ -0,0 +1,241 @@ +name: macOS ARM64 (Python 3.11) +on: + workflow_dispatch: + schedule: + # at 00:00 on workdays + - cron: '0 0 * * 1,2,3,4,5' +# pull_request: +# paths-ignore: +# - '**/docs/**' +# - 'docs/**' +# - '**/**.md' +# - '**.md' +# - '**/layer_tests_summary/**' +# - '**/conformance/**' +# push: +# paths-ignore: +# - '**/docs/**' +# - 'docs/**' +# - '**/**.md' +# - '**.md' +# - '**/layer_tests_summary/**' +# - '**/conformance/**' +# branches: +# - master +# - 'releases/**' + +concurrency: + # github.ref is not unique in post-commit + group: ${{ github.event_name == 'push' && github.run_id || github.ref }}-mac-arm64 + cancel-in-progress: true + +env: + PYTHON_VERSION: '3.11' + +jobs: + Smart_CI: + runs-on: ubuntu-latest + outputs: + affected_components: "${{ steps.smart_ci.outputs.affected_components }}" + skip_workflow: "${{ steps.smart_ci.outputs.skip_workflow }}" + steps: + - name: checkout action + uses: actions/checkout@v4 + with: + sparse-checkout: .github/actions/smart-ci + + - name: Get affected components + id: smart_ci + uses: ./.github/actions/smart-ci + with: + repository: ${{ github.repository }} + pr: ${{ github.event.number }} + commit_sha: ${{ github.sha }} + component_pattern: "category: (.*)" + repo_token: ${{ secrets.GITHUB_TOKEN }} + skip_when_only_listed_labels_set: 'docs' + skip_when_only_listed_files_changed: '*.md,*.rst,*.png,*.jpg,*.svg' + + Build: + needs: Smart_CI + timeout-minutes: 150 + defaults: + run: + shell: bash + runs-on: 'macos-13-xlarge' + env: + CMAKE_BUILD_TYPE: 'Release' + CMAKE_GENERATOR: 'Ninja Multi-Config' + MACOSX_DEPLOYMENT_TARGET: '11.0' + CMAKE_CXX_COMPILER_LAUNCHER: ccache + CMAKE_C_COMPILER_LAUNCHER: ccache + OPENVINO_REPO: ${{ github.workspace }}/openvino + OPENVINO_CONTRIB_REPO: ${{ github.workspace }}/openvino_contrib + INSTALL_DIR: ${{ github.workspace }}/openvino_install + INSTALL_TEST_DIR: ${{ github.workspace }}/tests_install + BUILD_DIR: ${{ github.workspace }}/build + steps: + - name: Clone OpenVINO + uses: actions/checkout@v4 + with: + path: 'openvino' + submodules: 'true' + + - name: Clone OpenVINO Contrib + uses: actions/checkout@v4 + with: + repository: 'openvinotoolkit/openvino_contrib' + path: 'openvino_contrib' + + # + # Print system info + # + + - name: System info + uses: ./openvino/.github/actions/system_info + + # + # Dependencies + # + + - name: Install build dependencies + run: brew install coreutils ninja scons + + - name: Setup Python ${{ env.PYTHON_VERSION }} + uses: ./openvino/.github/actions/setup_python + with: + version: ${{ env.PYTHON_VERSION }} + should-setup-pip-paths: 'false' + self-hosted-runner: 'false' + + - name: Install python dependencies + run: | + # For Python API + python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/wheel/requirements-dev.txt + python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/requirements.txt + + # For running Python API tests + python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/src/compatibility/openvino/requirements-dev.txt + + # For running ONNX frontend unit tests + python3 -m pip install --force-reinstall -r ${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/requirements.txt + + # For running TensorFlow frontend unit tests + python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/tensorflow/tests/requirements.txt + + # For running Paddle frontend unit tests + python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/paddle/tests/requirements.txt + + # + # Build + # + + - name: Setup ccache + uses: hendrikmuhs/ccache-action@v1.2 + with: + max-size: "2000M" + # Should save cache only if run in the master branch of the base repo + # github.ref_name is 'ref/PR_#' in case of the PR, and 'branch_name' when executed on push + save: ${{ github.ref_name == 'master' && 'true' || 'false' }} + verbose: 2 + key: ${{ runner.os }}-${{ runner.arch }}-main + restore-keys: | + ${{ runner.os }}-${{ runner.arch }}-main + + - name: CMake configure + run: | + cmake \ + -G "${{ env.CMAKE_GENERATOR }}" \ + -DENABLE_CPPLINT=OFF \ + -DENABLE_NCC_STYLE=OFF \ + -DENABLE_TESTS=ON \ + -DCMAKE_COMPILE_WARNING_AS_ERROR=OFF \ + -DENABLE_STRICT_DEPENDENCIES=OFF \ + -DCMAKE_CXX_COMPILER_LAUNCHER=${{ env.CMAKE_CXX_COMPILER_LAUNCHER }} \ + -DCMAKE_C_COMPILER_LAUNCHER=${{ env.CMAKE_C_COMPILER_LAUNCHER }} \ + -S ${{ env.OPENVINO_REPO }} \ + -B ${{ env.BUILD_DIR }} + + - name: Cmake build - OpenVINO + run: cmake --build ${{ env.BUILD_DIR }} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} + + - name: Show ccache stats + run: ccache --show-stats + + - name: Cmake install - OpenVINO + run: | + cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -P ${{ env.BUILD_DIR }}/cmake_install.cmake + cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_TEST_DIR }} -DCOMPONENT=tests -P ${{ env.BUILD_DIR }}/cmake_install.cmake + cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -DCOMPONENT=python_wheels -P ${{ env.BUILD_DIR }}/cmake_install.cmake + + - name: Pack Artifacts + run: | + pushd ${{ env.INSTALL_DIR }} + tar -czvf ${{ env.BUILD_DIR }}/openvino_package.tar.gz * + popd + + pushd ${{ env.INSTALL_TEST_DIR }} + tar -czvf ${{ env.BUILD_DIR }}/openvino_tests.tar.gz * + popd + + - name: Cmake & Build - OpenVINO Contrib + run: | + cmake \ + -DBUILD_nvidia_plugin=OFF \ + -DBUILD_java_api=OFF \ + -DCUSTOM_OPERATIONS="calculate_grid;complex_mul;fft;grid_sample;sparse_conv;sparse_conv_transpose" \ + -DOPENVINO_EXTRA_MODULES=${{ env.OPENVINO_CONTRIB_REPO }}/modules \ + -S ${{ env.OPENVINO_REPO }} \ + -B ${{ env.BUILD_DIR }} + cmake --build ${{ env.BUILD_DIR }} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} + + # + # Upload build artifacts + # + + - name: Upload openvino package + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: openvino_package + path: ${{ env.BUILD_DIR }}/openvino_package.tar.gz + if-no-files-found: 'error' + + - name: Upload openvino tests package + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: openvino_tests + path: ${{ env.BUILD_DIR }}/openvino_tests.tar.gz + if-no-files-found: 'error' + + Samples: + needs: Build + uses: ./.github/workflows/job_samples_tests.yml + with: + runner: 'macos-13-xlarge' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} + + CXX_Unit_Tests: + name: C++ unit tests + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_cxx_unit_tests.yml + with: + runner: 'macos-13-xlarge' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} + + Python_Unit_Tests: + name: Python unit tests + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_python_unit_tests.yml + with: + runner: 'macos-13-xlarge' + affected-components: ${{ needs.smart_ci.outputs.affected_components }} + + CPU_Functional_Tests: + name: CPU functional tests + if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test + needs: [ Build, Smart_CI ] + uses: ./.github/workflows/job_cpu_functional_tests.yml + with: + runner: 'macos-13-xlarge' diff --git a/.github/workflows/mo.yml b/.github/workflows/mo.yml index 69be4df6bf8fbc..f8956969b6a403 100644 --- a/.github/workflows/mo.yml +++ b/.github/workflows/mo.yml @@ -24,7 +24,7 @@ jobs: uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.10' diff --git a/.github/workflows/py_checks.yml b/.github/workflows/py_checks.yml index c97d5167e2b035..80d6cad5243af3 100644 --- a/.github/workflows/py_checks.yml +++ b/.github/workflows/py_checks.yml @@ -28,7 +28,7 @@ jobs: uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.8' diff --git a/.github/workflows/stale_prs_and_issues.yml b/.github/workflows/stale_prs_and_issues.yml index 0ea15bb6d4e6cb..deaf62781842e4 100644 --- a/.github/workflows/stale_prs_and_issues.yml +++ b/.github/workflows/stale_prs_and_issues.yml @@ -12,7 +12,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v8 + - uses: actions/stale@v9 with: stale-issue-message: 'This issue will be closed in a week because of 9 months of no activity.' stale-pr-message: 'This PR will be closed in a week because of 2 weeks of no activity.' diff --git a/.github/workflows/webassembly.yml b/.github/workflows/webassembly.yml index 60685489f6414f..75203f7215e8db 100644 --- a/.github/workflows/webassembly.yml +++ b/.github/workflows/webassembly.yml @@ -35,7 +35,7 @@ jobs: container: image: emscripten/emsdk volumes: - - /mount/caches:/mount/caches + - /mount:/mount options: -e SCCACHE_AZURE_BLOB_CONTAINER -e SCCACHE_AZURE_CONNECTION_STRING env: CMAKE_BUILD_TYPE: 'Release' diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 353a38666d7862..f4d59f63019782 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -1,22 +1,8 @@ name: Windows (VS 2019, Python 3.11) on: workflow_dispatch: -# pull_request: -# paths-ignore: -# - '**/docs/**' -# - 'docs/**' -# - '**/**.md' -# - '**.md' -# - '**/layer_tests_summary/**' -# - '**/conformance/**' + pull_request: push: - paths-ignore: - - '**/docs/**' - - 'docs/**' - - '**/**.md' - - '**.md' - - '**/layer_tests_summary/**' - - '**/conformance/**' branches: - master concurrency: @@ -28,7 +14,31 @@ env: PYTHON_VERSION: '3.11' jobs: + Smart_CI: + runs-on: ubuntu-latest + outputs: + affected_components: "${{ steps.smart_ci.outputs.affected_components }}" + skip_workflow: "${{ steps.smart_ci.outputs.skip_workflow }}" + steps: + - name: checkout action + uses: actions/checkout@v4 + with: + sparse-checkout: .github/actions/smart-ci + + - name: Get affected components + id: smart_ci + uses: ./.github/actions/smart-ci + with: + repository: ${{ github.repository }} + pr: ${{ github.event.number }} + commit_sha: ${{ github.sha }} + component_pattern: "category: (.*)" + repo_token: ${{ secrets.GITHUB_TOKEN }} + skip_when_only_listed_labels_set: 'docs' + skip_when_only_listed_files_changed: '*.md,*.rst,*.png,*.jpg,*.svg,*/layer_tests_summary/*,*/conformance/*' + Build: + needs: Smart_CI timeout-minutes: 180 defaults: run: @@ -46,6 +56,8 @@ jobs: BUILD_DIR: "${{ github.workspace }}\\openvino_build" # TODO: specify version of compiler here SCCACHE_AZURE_KEY_PREFIX: windows2022_x86_64_Release + if: "!needs.smart_ci.outputs.skip_workflow" + steps: - name: Clone OpenVINO uses: actions/checkout@v4 @@ -93,6 +105,7 @@ jobs: # For running TensorFlow Lite frontend unit tests python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/tensorflow_lite/tests/requirements.txt + # Disabled because of CVS-95904 # For running Paddle frontend unit tests # python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/paddle/tests/requirements.txt @@ -186,7 +199,8 @@ jobs: if-no-files-found: 'error' Samples: - needs: Build + needs: [Build, Smart_CI] + if: fromJSON(needs.smart_ci.outputs.affected_components).samples timeout-minutes: 20 defaults: run: @@ -267,7 +281,7 @@ jobs: Python_Unit_Tests: name: Python unit tests - needs: Build + needs: [Build, Smart_CI] timeout-minutes: 75 defaults: run: @@ -337,17 +351,20 @@ jobs: python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/mo/requirements_dev.txt - name: Python API 1.0 Tests + #if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test # Ticket: 127101 shell: cmd run: | python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/pyngraph ${{ env.PYTHON_STATIC_ARGS }} --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml --ignore=${{ env.INSTALL_TEST_DIR }}/pyngraph/tests_compatibility/test_onnx/test_zoo_models.py - name: Python API 2.0 Tests + #if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test # Ticket: 127101 shell: cmd run: | set PYTHONPATH=${{ env.LAYER_TESTS_INSTALL_DIR }};%PYTHONPATH% python3 -m pytest -sv ${{ env.INSTALL_TEST_DIR }}/pyopenvino ${{ env.PYTHON_STATIC_ARGS }} --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-Pyngraph.xml --ignore=${{ env.INSTALL_TEST_DIR }}/pyopenvino/tests/test_utils/test_utils.py - name: Model Optimizer UT + if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test shell: cmd run: | python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/mo/unit_tests --ignore=${{ env.INSTALL_TEST_DIR }}/mo/unit_tests/mo/front/mxnet --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-ModelOptimizer.xml @@ -363,6 +380,7 @@ jobs: TEST_DEVICE: CPU - name: ONNX Layer Tests + if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE.test shell: cmd run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt @@ -375,6 +393,7 @@ jobs: TEST_PRECISION: FP16 - name: TensorFlow 1 Layer Tests - TF FE + if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test shell: cmd run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt @@ -387,6 +406,7 @@ jobs: TEST_PRECISION: FP16 - name: TensorFlow 2 Layer Tests - TF FE + if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test shell: cmd run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt @@ -399,12 +419,14 @@ jobs: TEST_DEVICE: CPU - name: TensorFlow 1 Layer Tests - Legacy FE + if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test shell: cmd run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/tensorflow_tests/test_tf_Roll.py --ir_version=10 --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-tf_Roll.xml - name: TensorFlow 2 Layer Tests - Legacy FE + if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test shell: cmd run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt @@ -414,6 +436,7 @@ jobs: TEST_PRECISION: FP16 - name: TensorFlow Lite Layer Tests - TFL FE + if: fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test shell: cmd run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt @@ -423,6 +446,8 @@ jobs: TEST_PRECISION: FP16 - name: Python ONNX operators tests + if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test || + fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE.test shell: cmd run: | :: Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time - ONNX Model Zoo tests are run separately @@ -431,26 +456,28 @@ jobs: --ignore=${{ env.INSTALL_TEST_DIR }}/onnx/test_python/test_zoo_models.py - name: MO Python API Tests + if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test shell: cmd run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt :: Used for 'test_utils' installed in '\python\openvino\test_utils' set PYTHONPATH=${{ env.INSTALL_TEST_DIR }}\python\openvino\test_utils;${{ env.INSTALL_TEST_DIR }}\python;%PYTHONPATH% - + python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/mo_python_api_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_mo_convert.xml env: TEST_DEVICE: CPU TEST_PRECISION: FP16 - name: OVC Python API Tests + if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test shell: cmd run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt :: Used for 'test_utils' installed in '\python\openvino\test_utils' set PYTHONPATH=${{ env.INSTALL_TEST_DIR }}\python\openvino\test_utils;${{ env.INSTALL_TEST_DIR }}\python;%PYTHONPATH% - + :: Skip test ticket: 126319 python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/ovc_python_api_tests -k "not test_ovc_tool_non_existng_output_dir" --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_ovc_convert.xml env: @@ -458,6 +485,8 @@ jobs: TEST_PRECISION: FP16 - name: Python Frontend tests + if: fromJSON(needs.smart_ci.outputs.affected_components).PyTorch_FE.test || + fromJSON(needs.smart_ci.outputs.affected_components).PDPD_FE.test shell: cmd run: | python3 -m pip install -r ${{ env.LAYER_TESTS_INSTALL_DIR }}/requirements.txt @@ -465,6 +494,7 @@ jobs: call "${{ env.INSTALL_DIR }}\\setupvars.bat" && python3 -m pytest ${{ env.LAYER_TESTS_INSTALL_DIR }}/py_frontend_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-test_py_fontend.xml - name: OVC unit tests + if: fromJSON(needs.smart_ci.outputs.affected_components).MO.test shell: cmd run: python3 -m pytest -s ${{ env.INSTALL_TEST_DIR }}/ovc/unit_tests --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-OpenVinoConversion.xml @@ -478,7 +508,7 @@ jobs: CXX_Unit_Tests: name: C++ unit tests - needs: Build + needs: [Build, Smart_CI] timeout-minutes: 25 defaults: run: @@ -511,73 +541,87 @@ jobs: popd - name: OpenVINO Core unit tests + if: fromJSON(needs.smart_ci.outputs.affected_components).Core.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_core_unit_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-NGraphUT.xml - name: OpenVINO Inference functional tests + if: fromJSON(needs.smart_ci.outputs.affected_components).inference.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_inference_functional_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceFunc.xml - name: OpenVINO Inference unit tests + if: fromJSON(needs.smart_ci.outputs.affected_components).inference.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_inference_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceUnit.xml - name: Low Precision Transformations Tests + if: fromJSON(needs.smart_ci.outputs.affected_components).LP_transformations.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_lp_transformations_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-LpTransformations.xml - name: OpenVINO Conditional compilation tests + if: fromJSON(needs.smart_ci.outputs.affected_components).Core.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_conditional_compilation_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ConditionalCompilation.xml - name: IR frontend tests + if: fromJSON(needs.smart_ci.outputs.affected_components).IR_FE.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_ir_frontend_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-IRFrontend.xml - - name: PaddlePaddle frontend tests # Disabled in Azure: https://github.com/openvinotoolkit/openvino/blob/master/.ci/azure/linux.yml#L403 + - name: PaddlePaddle frontend tests # Disabled because of CVS-95904 if: ${{ 'false' }} shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/paddle_tests --gtest_print_time=1 --gtest_filter=*smoke* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-PaddleTests.xml - name: ONNX frontend tests + if: fromJSON(needs.smart_ci.outputs.affected_components).ONNX_FE.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ONNXFrontend.xml - name: TensorFlow Common frontend tests + if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test || + fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_common_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowCommonFrontend.xml - name: TensorFlow frontend tests + if: fromJSON(needs.smart_ci.outputs.affected_components).TF_FE.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_frontend_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowFrontend.xml - name: TensorFlow Lite frontend tests + if: fromJSON(needs.smart_ci.outputs.affected_components).TFL_FE.test shell: cmd run: | :: Skip ticket: 126320 call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_tensorflow_lite_frontend_tests --gtest_print_time=1 --gtest_filter=-*test_decode_convert_equal_convert*:*test_convert_partially_equal_convert* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TensorFlowLiteFrontend.xml - name: Transformations func tests + if: fromJSON(needs.smart_ci.outputs.affected_components).transformations.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_transformations_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-Transformations.xml - name: Legacy Transformations func tests + if: fromJSON(needs.smart_ci.outputs.affected_components).GNA.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_legacy_transformations_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-LegacyTransformations.xml - name: Inference Engine 1.0 unit tests + if: fromJSON(needs.smart_ci.outputs.affected_components).GNA.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/InferenceEngineUnitTests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceEngineUnitTests.xml @@ -588,11 +632,13 @@ jobs: call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_util_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-commonUtilsTests.xml - name: Snippets func tests + if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_snippets_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-SnippetsFuncTests.xml - name: CPU plugin unit tests + if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_cpu_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-CPUUnitTests.xml @@ -608,26 +654,31 @@ jobs: call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_op_conformance_tests --gtest_print_time=1 --gtest_filter="*OpImpl*" --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TemplateOpImplTests.xml - name: GNA plugin unit tests + if: fromJSON(needs.smart_ci.outputs.affected_components).GNA.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_gna_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-GNAUnitTests.xml - name: AUTO unit tests + if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_auto_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_unit_tests.xml - name: AUTO func Tests + if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_auto_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_func_tests.xml - name: Template plugin func tests + if: fromJSON(needs.smart_ci.outputs.affected_components).TEMPLATE.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_template_func_tests --gtest_print_time=1 --gtest_filter=*smoke* --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-TemplateFuncTests.xml - name: Inference Engine C API tests + if: fromJSON(needs.smart_ci.outputs.affected_components).C_API.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/InferenceEngineCAPITests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-InferenceEngineCAPITests.xml @@ -639,26 +690,31 @@ jobs: call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_capi_test --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OpenVINOCAPITests.xml - name: AutoBatch unit tests + if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO_BATCH.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_auto_batch_unit_tests --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_batch_unit_tests.xml - name: AutoBatch func tests + if: fromJSON(needs.smart_ci.outputs.affected_components).AUTO_BATCH.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_auto_batch_func_tests --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-ov_auto_batch_func_tests.xml - name: Proxy Plugin func tests + if: fromJSON(needs.smart_ci.outputs.affected_components).PROXY.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_proxy_plugin_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVProxyTests.xml - name: Hetero Unit Tests + if: fromJSON(needs.smart_ci.outputs.affected_components).HETERO.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_hetero_unit_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroUnitTests.xml - name: Hetero Func Tests + if: fromJSON(needs.smart_ci.outputs.affected_components).HETERO.test shell: cmd run: | call "${{ env.INSTALL_DIR }}\\setupvars.bat" && ${{ env.INSTALL_TEST_DIR }}/ov_hetero_func_tests --gtest_print_time=1 --gtest_output=xml:${{ env.INSTALL_TEST_DIR }}/TEST-OVHeteroFuncTests.xml @@ -673,7 +729,7 @@ jobs: CPU_Functional_Tests: name: CPU functional tests - needs: Build + needs: [Build, Smart_CI] timeout-minutes: 70 defaults: run: @@ -685,7 +741,7 @@ jobs: INSTALL_TEST_DIR: "${{ github.workspace }}\\install\\tests" PARALLEL_TEST_SCRIPT: "${{ github.workspace }}\\install\\tests\\functional_test_utils\\layer_tests_summary\\run_parallel.py" PARALLEL_TEST_CACHE: "${{ github.workspace }}\\install\\tests\\test_cache.lst" - + if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test steps: - name: Download OpenVINO package uses: actions/download-artifact@v3 @@ -763,3 +819,17 @@ jobs: ${{ env.INSTALL_TEST_DIR }}/logs/hash_table.csv ${{ env.PARALLEL_TEST_CACHE }} if-no-files-found: 'error' + + Overall_Status: + name: ci/gha_overall_status_windows + needs: [Smart_CI, Build, Samples, CXX_Unit_Tests, Python_Unit_Tests, CPU_Functional_Tests] + if: ${{ always() }} + runs-on: ubuntu-latest + steps: + - name: Check status of all jobs + if: >- + ${{ + contains(needs.*.result, 'failure') || + contains(needs.*.result, 'cancelled') + }} + run: exit 1 diff --git a/.github/workflows/windows_conditional_compilation.yml b/.github/workflows/windows_conditional_compilation.yml index e2155ab06997f3..fcb74ab0438532 100644 --- a/.github/workflows/windows_conditional_compilation.yml +++ b/.github/workflows/windows_conditional_compilation.yml @@ -4,24 +4,24 @@ on: schedule: # run daily at 00:00 - cron: '0 0 * * *' -# pull_request: -# paths-ignore: -# - '**/docs/**' -# - 'docs/**' -# - '**/**.md' -# - '**.md' -# - '**/layer_tests_summary/**' -# - '**/conformance/**' -# push: -# paths-ignore: -# - '**/docs/**' -# - 'docs/**' -# - '**/**.md' -# - '**.md' -# - '**/layer_tests_summary/**' -# - '**/conformance/**' -# branches: -# - master + # pull_request: + # paths-ignore: + # - '**/docs/**' + # - 'docs/**' + # - '**/**.md' + # - '**.md' + # - '**/layer_tests_summary/**' + # - '**/conformance/**' + push: + paths-ignore: + - '**/docs/**' + - 'docs/**' + - '**/**.md' + - '**.md' + - '**/layer_tests_summary/**' + - '**/conformance/**' + branches: + - master concurrency: # github.ref is not unique in post-commit @@ -37,7 +37,7 @@ jobs: defaults: run: shell: pwsh - runs-on: windows-latest-8-cores + runs-on: aks-win-16-cores-32gb env: CMAKE_BUILD_TYPE: 'Release' CMAKE_GENERATOR: 'Ninja Multi-Config' @@ -49,6 +49,8 @@ jobs: BUILD_DIR: "${{ github.workspace }}\\openvino_build" MODELS_PATH: "${{ github.workspace }}\\testdata" SELECTIVE_BUILD_STAT_DIR: "${{ github.workspace }}\\selective_build_stat" + # TODO: specify version of compiler here + SCCACHE_AZURE_KEY_PREFIX: windows2022_x86_64_itt_Release steps: - name: Clone OpenVINO uses: actions/checkout@v4 @@ -82,6 +84,11 @@ jobs: should-setup-pip-paths: 'false' self-hosted-runner: 'false' + - name: Install sccache + uses: mozilla-actions/sccache-action@v0.0.3 + with: + version: "v0.5.4" + - name: Install build dependencies run: choco install --no-progress ninja @@ -89,15 +96,16 @@ jobs: run: | # For running ONNX frontend unit tests python3 -m pip install --force-reinstall -r ${{ env.OPENVINO_REPO }}/src/frontends/onnx/tests/requirements.txt - + # For running TensorFlow frontend unit tests python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/tensorflow/tests/requirements.txt - + # For running TensorFlow Lite frontend unit tests python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/tensorflow_lite/tests/requirements.txt - + + # Disabled because of CVS-95904 # For running Paddle frontend unit tests - python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/paddle/tests/requirements.txt + # python3 -m pip install -r ${{ env.OPENVINO_REPO }}/src/frontends/paddle/tests/requirements.txt # # Build @@ -106,18 +114,6 @@ jobs: - name: Configure Developer Command Prompt for Microsoft Visual C++ uses: ilammy/msvc-dev-cmd@v1 - - name: Setup sccache - uses: hendrikmuhs/ccache-action@v1.2 - with: - variant: sccache - max-size: "2000M" - # Should save cache only if run in the master branch of the base repo - # github.ref_name is 'ref/PR_#' in case of the PR, and 'branch_name' when executed on push - save: ${{ github.ref_name == 'master' && 'true' || 'false' }} - key: ${{ github.job }}-${{ runner.os }}-itt - restore-keys: | - ${{ github.job }}-${{ runner.os }}-itt - - name: CMake configure - CC COLLECT run: | cmake -G "${{ env.CMAKE_GENERATOR }}" ` @@ -133,10 +129,29 @@ jobs: -S ${{ env.OPENVINO_REPO }} ` -B ${{ env.BUILD_DIR }} + - name: Clean sccache stats + run: '& "$Env:SCCACHE_PATH" --zero-stats' + + # to get more information on the issue + # described in the next step + - name: Show which network ports are used + run: netstat -ban + + # the case is the following: + # sccache: error: An attempt was made to access a socket in a way forbidden by its access permissions. (os error 10013) + # This looks like the attempt to use + # a port below 1024 or a port + # which is occupied by another app + - name: Stop sccache server just in case + run: '& "$Env:SCCACHE_PATH" --stop-server' + - name: Cmake build - CC COLLECT run: | - cmake --build ${{ env.BUILD_DIR }} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} - cmake --build ${{ env.BUILD_DIR }} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --target sea_itt_lib + cmake --build ${{ env.BUILD_DIR }} --parallel 8 --config ${{ env.CMAKE_BUILD_TYPE }} && ` + cmake --build ${{ env.BUILD_DIR }} --parallel 8 --config ${{ env.CMAKE_BUILD_TYPE }} --target sea_itt_lib + + - name: Show sccache stats + run: '& "$Env:SCCACHE_PATH" --show-stats' - name: Cmake install - OpenVINO run: cmake -DCMAKE_INSTALL_PREFIX=${{ env.INSTALL_DIR }} -P ${{ env.BUILD_DIR }}/cmake_install.cmake @@ -160,7 +175,7 @@ jobs: shell: cmd run: | set path=%path%;${{ env.OPENVINO_REPO }}\temp\tbb\bin - + python3 ${{ env.OPENVINO_REPO }}\thirdparty\itt_collector\runtool\sea_runtool.py ^ --bindir ${{ env.OPENVINO_REPO }}\bin\intel64\${{ env.CMAKE_BUILD_TYPE }} ^ -o ${{ env.SELECTIVE_BUILD_STAT_DIR }}\itt_stat ! ${{ env.OPENVINO_REPO }}\bin\intel64\${{ env.CMAKE_BUILD_TYPE }}\benchmark_app.exe ^ @@ -216,7 +231,7 @@ jobs: defaults: run: shell: pwsh - runs-on: windows-latest-8-cores + runs-on: aks-win-16-cores-32gb env: CMAKE_BUILD_TYPE: 'Release' CMAKE_CXX_COMPILER_LAUNCHER: sccache @@ -225,6 +240,7 @@ jobs: BUILD_DIR: "${{ github.workspace }}\\openvino_build" MODELS_PATH: "${{ github.workspace }}\\testdata" SELECTIVE_BUILD_STAT_DIR: "${{ github.workspace }}\\selective_build_stat" + SCCACHE_AZURE_KEY_PREFIX: windows2022_x86_64_cc_Release steps: - name: Clone OpenVINO uses: actions/checkout@v4 @@ -249,6 +265,18 @@ jobs: - name: Extract selective build statistics package run: Expand-Archive ${{ env.SELECTIVE_BUILD_STAT_DIR }}/openvino_selective_build_stat.zip -DestinationPath "${{ env.SELECTIVE_BUILD_STAT_DIR }}" + - name: Setup Python ${{ env.PYTHON_VERSION }} + uses: ./openvino/.github/actions/setup_python + with: + version: ${{ env.PYTHON_VERSION }} + should-setup-pip-paths: 'false' + self-hosted-runner: 'false' + + - name: Install sccache + uses: mozilla-actions/sccache-action@v0.0.3 + with: + version: "v0.5.4" + - name: CMake configure - CC ON run: | cmake ` @@ -267,9 +295,15 @@ jobs: -S ${{ env.OPENVINO_REPO }} ` -B ${{ env.BUILD_DIR }} + - name: Clean sccache stats + run: '& "$Env:SCCACHE_PATH" --zero-stats' + - name: Cmake build - CC ON run: cmake --build ${{ env.BUILD_DIR }} --parallel --config ${{ env.CMAKE_BUILD_TYPE }} --target benchmark_app + - name: Show sccache stats + run: '& "$Env:SCCACHE_PATH" --show-stats' + - name: List bin files shell: cmd run: dir ${{ env.OPENVINO_REPO }}\bin\ /s @@ -283,10 +317,11 @@ jobs: CPU_Functional_Tests: name: CPU functional tests needs: Build + timeout-minutes: 70 defaults: run: shell: pwsh - runs-on: windows-latest-8-cores + runs-on: aks-win-8-cores-16gb env: OPENVINO_REPO: "${{ github.workspace }}\\openvino" INSTALL_TEST_DIR: "${{ github.workspace }}\\tests_install" diff --git a/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake b/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake index bc512b9b229b02..fc9abc64b9e4cc 100644 --- a/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake +++ b/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake @@ -87,11 +87,6 @@ function(ov_set_temp_directory temp_variable source_tree_dir) endif() endfunction() -macro(set_temp_directory) - message(WARNING "'set_temp_directory' is deprecated. Please, use 'ov_set_temp_directory'") - ov_set_temp_directory(${ARGV}) -endmacro() - # # For cross-compilation # @@ -294,11 +289,6 @@ function(ov_mark_target_as_cc TARGET_NAME) add_dependencies(${TARGET_NAME} conditional_compilation_gen) endfunction() -function(ie_mark_target_as_cc TARGET_NAME) - message(WARNING "This function is deprecated. Please use ov_mark_target_as_cc(TARGET_NAME) instead.") - ov_mark_target_as_cc(${TARGET_NAME}) -endfunction() - include(python_requirements) # Code style utils diff --git a/cmake/developer_package/add_target_helpers.cmake b/cmake/developer_package/add_target_helpers.cmake index 92f4afbc23bbbe..238a9cde5b37eb 100644 --- a/cmake/developer_package/add_target_helpers.cmake +++ b/cmake/developer_package/add_target_helpers.cmake @@ -181,15 +181,3 @@ function(ov_add_test_target) COMPONENT ${ARG_COMPONENT} EXCLUDE_FROM_ALL) endfunction() - -# deprecated - -function(addIeTarget) - message(WARNING "'addIeTarget' is deprecated, please, use 'ov_add_target' instead") - ov_add_target(${ARGV}) -endfunction() - -function(addIeTargetTest) - message(WARNING "'addIeTargetTest' is deprecated, please, use 'ov_add_test_target' instead") - ov_add_test_target(${ARGV}) -endfunction() diff --git a/cmake/developer_package/api_validator/api_validator.cmake b/cmake/developer_package/api_validator/api_validator.cmake index 6749366a64db05..4eeb9e1e5e0b7e 100644 --- a/cmake/developer_package/api_validator/api_validator.cmake +++ b/cmake/developer_package/api_validator/api_validator.cmake @@ -196,10 +196,3 @@ endfunction() function(ov_add_api_validator_post_build_step) _ov_add_api_validator_post_build_step(${ARGN}) endfunction() - -# deprecated - -function(ie_add_api_validator_post_build_step) - message(WARNING "'ie_add_api_validator_post_build_step' is deprecated, use 'ov_add_api_validator_post_build_step' instead") - _ov_add_api_validator_post_build_step(${ARGN}) -endfunction() diff --git a/cmake/developer_package/clang_format/clang_format.cmake b/cmake/developer_package/clang_format/clang_format.cmake index 57319e48006938..b031c1e640bce9 100644 --- a/cmake/developer_package/clang_format/clang_format.cmake +++ b/cmake/developer_package/clang_format/clang_format.cmake @@ -130,8 +130,3 @@ function(ov_add_clang_format_target TARGET_NAME) add_dependencies(clang_format_check_all ${TARGET_NAME}) add_dependencies(clang_format_fix_all ${TARGET_NAME}_fix) endfunction() - -function(add_clang_format_target) - message(WARNING "add_clang_format_target is deprecated, use ov_add_clang_format_target instead") - ov_add_clang_format_target(${ARGV}) -endfunction() diff --git a/cmake/developer_package/compile_flags/os_flags.cmake b/cmake/developer_package/compile_flags/os_flags.cmake index 2c621d93425f4b..2e2e52b015c58d 100644 --- a/cmake/developer_package/compile_flags/os_flags.cmake +++ b/cmake/developer_package/compile_flags/os_flags.cmake @@ -32,11 +32,6 @@ macro(ov_disable_deprecated_warnings) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${ov_c_cxx_deprecated}") endmacro() -macro(disable_deprecated_warnings) - message(WARNING "'disable_deprecated_warnings' is deprecated, use 'ov_disable_deprecated_warnings' instead") - ov_disable_deprecated_warnings() -endmacro() - # # ov_deprecated_no_errors() # @@ -213,16 +208,6 @@ function(ov_disable_all_warnings) endforeach() endfunction() -# -# ie_enable_lto() -# -# Enables Link Time Optimization compilation -# -macro(ie_enable_lto) - message(WARNING "'ie_enable_lto' is deprecated, set 'INTERPROCEDURAL_OPTIMIZATION_RELEASE' target property instead") - set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE ON) -endmacro() - # # ov_add_compiler_flags(]) # @@ -235,11 +220,6 @@ macro(ov_add_compiler_flags) endforeach() endmacro() -macro(ie_add_compiler_flags) - message(WARNING "'ie_add_compiler_flags' is deprecated, use 'ov_add_compiler_flags' instead") - ov_add_compiler_flags(${ARGN}) -endmacro() - # # ov_force_include(
) # diff --git a/cmake/developer_package/faster_build.cmake b/cmake/developer_package/faster_build.cmake index f70274f465070c..b9ad18dfa98570 100644 --- a/cmake/developer_package/faster_build.cmake +++ b/cmake/developer_package/faster_build.cmake @@ -19,10 +19,3 @@ function(ov_build_target_faster TARGET_NAME) target_precompile_headers(${TARGET_NAME} ${FASTER_BUILD_PCH}) endif() endfunction() - -# deprecated - -function(ie_faster_build) - message(WARNING "ie_faster_build is deprecated, use ov_build_target_faster instead") - ov_build_target_faster(${ARGV}) -endfunction() diff --git a/cmake/developer_package/frontends/frontends.cmake b/cmake/developer_package/frontends/frontends.cmake index 1a037c5ab72309..0b14cabe54a05e 100644 --- a/cmake/developer_package/frontends/frontends.cmake +++ b/cmake/developer_package/frontends/frontends.cmake @@ -57,10 +57,10 @@ function(ov_generate_frontends_hpp) # for some reason dependency on source files does not work # so, we have to use explicit target and make it dependency for frontend_common add_custom_target(_ov_frontends_hpp DEPENDS ${ov_frontends_hpp}) - add_dependencies(frontend_common_obj _ov_frontends_hpp) + add_dependencies(openvino_frontend_common_obj _ov_frontends_hpp) # add dependency for object files - get_target_property(sources frontend_common_obj SOURCES) + get_target_property(sources openvino_frontend_common_obj SOURCES) foreach(source IN LISTS sources) if("${source}" MATCHES "\\$\\") # object library @@ -220,6 +220,7 @@ macro(ov_add_frontend) PUBLIC $ PRIVATE + $ ${frontend_root_dir}/src ${CMAKE_CURRENT_BINARY_DIR}) @@ -342,6 +343,7 @@ macro(ov_add_frontend) install(DIRECTORY ${${TARGET_NAME}_INCLUDE_DIR}/openvino DESTINATION ${FRONTEND_INSTALL_INCLUDE} COMPONENT ${dev_component} + ${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL} FILES_MATCHING PATTERN "*.hpp") # public target name diff --git a/cmake/developer_package/options.cmake b/cmake/developer_package/options.cmake index 4506d85a027f92..7a9baa0b41de24 100644 --- a/cmake/developer_package/options.cmake +++ b/cmake/developer_package/options.cmake @@ -55,20 +55,3 @@ function (ov_print_enabled_features) endforeach() message(STATUS "") endfunction() - -# deprecated - -macro (ie_option variable description value) - message(WARNING "'ie_option' is deprecated, please, use 'ov_option' instead") - ov_option(${variable} "${description}" ${value}) -endmacro() - -macro(ie_dependent_option variable description def_value condition fallback_value) - message(WARNING "'ie_dependent_option' is deprecated, please, use 'ov_dependent_option' instead") - ov_dependent_option(${variable} "${description}" ${def_value} "${condition}" ${fallback_value}) -endmacro() - -function(print_enabled_features) - message(WARNING "'print_enabled_features' is deprecated, please, use 'ov_print_enabled_features' instead") - ov_print_enabled_features() -endfunction() diff --git a/cmake/developer_package/packaging/packaging.cmake b/cmake/developer_package/packaging/packaging.cmake index 2279580040f736..ff970e0b040cc5 100644 --- a/cmake/developer_package/packaging/packaging.cmake +++ b/cmake/developer_package/packaging/packaging.cmake @@ -241,10 +241,3 @@ macro(ov_cpack) include(CPack) endmacro() - -# deprecated - -macro(ie_cpack) - message(WARNING "'ie_cpack' is deprecated. Please, use 'ov_cpack'") - ov_cpack(${ARGV}) -endmacro() diff --git a/cmake/developer_package/plugins/plugins.cmake b/cmake/developer_package/plugins/plugins.cmake index a8ba97ad9fa27d..16a9e935a896c8 100644 --- a/cmake/developer_package/plugins/plugins.cmake +++ b/cmake/developer_package/plugins/plugins.cmake @@ -135,9 +135,6 @@ function(ov_add_plugin) install(TARGETS ${OV_PLUGIN_NAME} LIBRARY DESTINATION ${OV_CPACK_PLUGINSDIR} COMPONENT ${install_component}) - install(TARGETS ${OV_PLUGIN_NAME} - LIBRARY DESTINATION ${OV_CPACK_PLUGINSDIR} - COMPONENT ${install_component}) else() ov_install_static_lib(${OV_PLUGIN_NAME} ${OV_CPACK_COMP_CORE}) endif() @@ -167,11 +164,6 @@ function(ov_add_plugin) endif() endfunction() -function(ie_add_plugin) - message(WARNING "'ie_add_plugin' is deprecated. Please, use 'ov_add_plugin'") - ov_add_plugin(${ARGN}) -endfunction() - # # ov_register_in_plugins_xml(MAIN_TARGET
) # @@ -263,14 +255,6 @@ macro(ov_register_plugins) endif() endmacro() -# -# ie_register_plugins() -# -macro(ie_register_plugins) - message(WARNING "'ie_register_plugins' is deprecated. Please, use 'ov_register_plugins'") - ov_register_plugins(${ARGN}) -endmacro() - # # ov_target_link_plugins() # diff --git a/cmake/developer_package/version.cmake b/cmake/developer_package/version.cmake index effb320014452a..4ecf558225fb65 100644 --- a/cmake/developer_package/version.cmake +++ b/cmake/developer_package/version.cmake @@ -166,28 +166,6 @@ macro(ov_parse_ci_build_number repo_root) endif() endmacro() -macro (addVersionDefines FILE) - message(WARNING "'addVersionDefines' is deprecated. Please, use 'ov_add_version_defines'") - - set(__version_file ${FILE}) - if(NOT IS_ABSOLUTE ${__version_file}) - set(__version_file "${CMAKE_CURRENT_SOURCE_DIR}/${__version_file}") - endif() - if(NOT EXISTS ${__version_file}) - message(FATAL_ERROR "${FILE} does not exists in current source directory") - endif() - foreach (VAR ${ARGN}) - if (DEFINED ${VAR} AND NOT "${${VAR}}" STREQUAL "") - set_property( - SOURCE ${__version_file} - APPEND - PROPERTY COMPILE_DEFINITIONS - ${VAR}="${${VAR}}") - endif() - endforeach() - unset(__version_file) -endmacro() - macro (ov_add_version_defines FILE TARGET) set(__version_file ${FILE}) if(NOT IS_ABSOLUTE ${__version_file}) diff --git a/cmake/developer_package/whole_archive.cmake b/cmake/developer_package/whole_archive.cmake index 0ad00055fbfb0e..c15a5c378a3181 100644 --- a/cmake/developer_package/whole_archive.cmake +++ b/cmake/developer_package/whole_archive.cmake @@ -51,10 +51,3 @@ function(ov_target_link_whole_archive targetName) target_link_libraries(${targetName} PRIVATE ${libs}) endif() endfunction() - -# deprecated - -function(ieTargetLinkWholeArchive) - message(WARNING "'ieTargetLinkWholeArchive' is deprecated, use 'ov_target_link_whole_archive' instead") - ov_target_link_whole_archive(${ARGN}) -endfunction() diff --git a/cmake/extra_modules.cmake b/cmake/extra_modules.cmake index 6c392fcc6eed12..dae1cb0b2e045e 100644 --- a/cmake/extra_modules.cmake +++ b/cmake/extra_modules.cmake @@ -2,38 +2,6 @@ # SPDX-License-Identifier: Apache-2.0 # -function(ie_generate_dev_package_config) - # dummy check that OpenCV is here - find_package(OpenCV QUIET) - if(OpenCV_VERSION VERSION_LESS 3.0) - set(OpenCV_FOUND OFF) - endif() - - # export all targets with prefix and use them during extra modules build - export(TARGETS ${_OPENVINO_DEVELOPER_PACKAGE_TARGETS} NAMESPACE IE:: - APPEND FILE "${CMAKE_BINARY_DIR}/inference_engine_developer_package_targets.cmake") - add_custom_target(ie_dev_targets DEPENDS ${_OPENVINO_DEVELOPER_PACKAGE_TARGETS}) - - set(PATH_VARS "OpenVINO_SOURCE_DIR") - if(ENABLE_SAMPLES OR ENABLE_TESTS) - list(APPEND PATH_VARS "gflags_BINARY_DIR") - # if we've found system gflags - if(gflags_DIR) - set(gflags_BINARY_DIR "${gflags_DIR}") - endif() - endif() - - configure_package_config_file("${OpenVINO_SOURCE_DIR}/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in" - "${CMAKE_BINARY_DIR}/InferenceEngineDeveloperPackageConfig.cmake" - INSTALL_DESTINATION share # not used - PATH_VARS ${PATH_VARS} - NO_CHECK_REQUIRED_COMPONENTS_MACRO) - - configure_file("${OpenVINO_SOURCE_DIR}/cmake/templates/InferenceEngineConfig-version.cmake.in" - "${CMAKE_BINARY_DIR}/InferenceEngineDeveloperPackageConfig-version.cmake" - @ONLY) -endfunction() - function(ov_generate_dev_package_config) # dummy check that OpenCV is here find_package(OpenCV QUIET) @@ -207,7 +175,6 @@ endfunction() # this OpenVINODeveloperPackageConfig.cmake is not used during extra modules build # since it's generated after modules are configured -ie_generate_dev_package_config() ov_generate_dev_package_config() # extra modules must be registered after inference_engine library diff --git a/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in b/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in deleted file mode 100644 index a98b4207e285d2..00000000000000 --- a/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in +++ /dev/null @@ -1,188 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -@PACKAGE_INIT@ - -include(CMakeFindDependencyMacro) - -message(WARNING "find_package(InferenceEngineDeveloperPackage) is deprecated and will be removed in 2024.0 release. Please, use find_package(OpenVINODeveloperPackage)") - -# TODO: remove after changing [private plugins] -set_and_check(OpenVINO_SOURCE_DIR "@OpenVINO_SOURCE_DIR@") # NPU -set_and_check(OpenVINO_MAIN_SOURCE_DIR "@OpenVINO_SOURCE_DIR@") # NPU - -# Variables to export in plugin's projects - -set(ov_options "@OV_OPTIONS@") -list(APPEND ov_options CMAKE_CXX_COMPILER_LAUNCHER CMAKE_C_COMPILER_LAUNCHER - CMAKE_CXX_LINKER_LAUNCHER CMAKE_C_LINKER_LAUNCHER - CMAKE_INSTALL_PREFIX CPACK_GENERATOR) - -if(APPLE) - list(APPEND ov_options CMAKE_OSX_ARCHITECTURES CMAKE_OSX_DEPLOYMENT_TARGET) -endif() - -get_property(_OV_GENERATOR_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) -if(_OV_GENERATOR_MULTI_CONFIG) - list(APPEND ov_options CMAKE_CONFIGURATION_TYPES) - if(CMAKE_GENERATOR MATCHES "^Ninja Multi-Config$") - list(APPEND ov_options CMAKE_DEFAULT_BUILD_TYPE) - endif() -else() - list(APPEND ov_options CMAKE_BUILD_TYPE) -endif() -unset(_OV_GENERATOR_MULTI_CONFIG) - -file(TO_CMAKE_PATH "${CMAKE_CURRENT_LIST_DIR}" cache_path) - -message(STATUS "The following CMake options are exported from Inference Engine Developer package") -message(" ") -foreach(option IN LISTS ov_options) - if(NOT DEFINED "${option}") - load_cache("${cache_path}" READ_WITH_PREFIX "" ${option}) - endif() - message(" ${option}: ${${option}}") -endforeach() -message(" ") - -# for samples in 3rd party projects -if(ENABLE_SAMPLES) - set_and_check(gflags_DIR "@gflags_BINARY_DIR@") -endif() - -# Disable warning as error for private components -set(CMAKE_COMPILE_WARNING_AS_ERROR OFF) - -# -# Content -# - -find_dependency(OpenVINODeveloperScripts - PATHS "${OpenVINO_SOURCE_DIR}/cmake/developer_package" - NO_CMAKE_FIND_ROOT_PATH - NO_DEFAULT_PATH) - -find_dependency(InferenceEngine - PATHS "${CMAKE_CURRENT_LIST_DIR}" - NO_CMAKE_FIND_ROOT_PATH - NO_DEFAULT_PATH) - -find_dependency(ngraph - PATHS "${CMAKE_CURRENT_LIST_DIR}" - NO_CMAKE_FIND_ROOT_PATH - NO_DEFAULT_PATH) - -if(TARGET openvino::runtime AND NOT TARGET IE::runtime) - add_library(IE::runtime INTERFACE IMPORTED) - set_target_properties(IE::runtime PROPERTIES - INTERFACE_LINK_LIBRARIES openvino::runtime) -endif() - -# WA for cmake: it exports ngraph as IE::ngraph in the IE export list -# while we already have ngraph export in its own export list as ngraph::ngraph -if(TARGET ngraph::ngraph AND NOT TARGET IE::ngraph) - add_library(IE::ngraph INTERFACE IMPORTED) - set_target_properties(IE::ngraph PROPERTIES INTERFACE_LINK_LIBRARIES ngraph::ngraph) -endif() - -_ov_find_tbb() - -include("${CMAKE_CURRENT_LIST_DIR}/inference_engine_developer_package_targets.cmake") - -if(TARGET IE::ov_core_dev AND NOT TARGET openvino::core::dev) - add_library(openvino::core::dev INTERFACE IMPORTED) - set_target_properties(openvino::core::dev PROPERTIES - INTERFACE_LINK_LIBRARIES IE::ov_core_dev) -endif() - -if(TARGET IE::runtime::dev AND NOT TARGET openvino::runtime::dev) - add_library(openvino::runtime::dev INTERFACE IMPORTED) - set_target_properties(openvino::runtime::dev PROPERTIES - INTERFACE_LINK_LIBRARIES IE::runtime::dev) -endif() - -if(TARGET IE::reference AND NOT TARGET IE::ngraph_reference) - add_library(IE::ngraph_reference INTERFACE IMPORTED) - set_target_properties(IE::ngraph_reference PROPERTIES - INTERFACE_LINK_LIBRARIES IE::reference) -endif() - -if(ENABLE_SYSTEM_PUGIXML) - set(_ov_pugixml_pkgconfig_interface "@pugixml_FOUND@") - set(_ov_pugixml_cmake_interface "@PugiXML_FOUND@") - if(_ov_pugixml_pkgconfig_interface) - find_dependency(PkgConfig) - elseif(_ov_pugixml_cmake_interface) - find_dependency(PugiXML) - endif() - if(PugiXML_FOUND) - set_property(TARGET pugixml PROPERTY IMPORTED_GLOBAL TRUE) - add_library(IE::pugixml ALIAS pugixml) - elseif(PkgConfig_FOUND) - if(${CMAKE_FIND_PACKAGE_NAME}_FIND_QUIETLY) - set(pkg_config_quiet_arg QUIET) - endif() - if(${CMAKE_FIND_PACKAGE_NAME}_FIND_REQUIRED) - set(pkg_config_required_arg REQUIRED) - endif() - - pkg_search_module(pugixml - ${pkg_config_quiet_arg} - ${pkg_config_required_arg} - IMPORTED_TARGET GLOBAL - pugixml) - - unset(pkg_config_quiet_arg) - unset(pkg_config_required_arg) - - if(pugixml_FOUND) - add_library(IE::pugixml ALIAS PkgConfig::pugixml) - - # PATCH: on Ubuntu 18.04 pugixml.pc contains incorrect include directories - get_target_property(interface_include_dir PkgConfig::pugixml INTERFACE_INCLUDE_DIRECTORIES) - if(interface_include_dir AND NOT EXISTS "${interface_include_dir}") - set_target_properties(PkgConfig::pugixml PROPERTIES - INTERFACE_INCLUDE_DIRECTORIES "") - endif() - endif() - endif() - - # debian 9 case: no cmake, no pkg-config files - if(NOT TARGET IE::pugixml) - find_library(PUGIXML_LIBRARY NAMES pugixml DOC "Path to pugixml library") - if(PUGIXML_LIBRARY) - add_library(IE::pugixml INTERFACE IMPORTED GLOBAL) - set_target_properties(IE::pugixml PROPERTIES INTERFACE_LINK_LIBRARIES "${PUGIXML_LIBRARY}") - else() - message(FATAL_ERROR "Failed to find system pugixml in OpenVINO Developer Package") - endif() - endif() -endif() - -set(_ov_nlohmann_json_FOUND "@nlohmann_json_FOUND@") -if(_ov_nlohmann_json_FOUND) - find_dependency(nlohmann_json) - set_target_properties(nlohmann_json::nlohmann_json PROPERTIES IMPORTED_GLOBAL ON) - add_library(IE::nlohmann_json ALIAS nlohmann_json::nlohmann_json) -endif() -unset(_ov_nlohmann_json_FOUND) - -# inherit OpenCV from main IE project if enabled -if("@OpenCV_FOUND@") - # Use OpenCV_DIR from cache only if user doesn't define OpenCV_DIR - if(NOT OpenCV_DIR) - load_cache("${cache_path}" READ_WITH_PREFIX "" OpenCV_DIR) - endif() - find_dependency(OpenCV) -endif() - -# -# Extra Compile Flags -# - -# don't fail on strict compilation options in 3rd party modules -ov_dev_package_no_errors() - -# Don't threat deprecated API warnings as errors in 3rd party apps -ov_deprecated_no_errors() diff --git a/docs/IE_PLUGIN_DG/LowPrecisionModelRepresentation.rst b/docs/IE_PLUGIN_DG/LowPrecisionModelRepresentation.rst deleted file mode 100644 index dfa8108ffc25b6..00000000000000 --- a/docs/IE_PLUGIN_DG/LowPrecisionModelRepresentation.rst +++ /dev/null @@ -1,21 +0,0 @@ -.. {#openvino_docs_ie_plugin_dg_lp_representation} - -Representation of low-precision models -====================================== -The goal of this document is to describe how optimized models are represented in OpenVINO Intermediate Representation (IR) and provide guidance on interpretation rules for such models at runtime. -Currently, there are two groups of optimization methods that can influence on the IR after applying them to the full-precision model: -- **Sparsity**. It is represented by zeros inside the weights and this is up to the hardware plugin how to interpret these zeros (use weights as is or apply special compression algorithms and sparse arithmetic). No additional mask is provided with the model. -- **Quantization**. The rest of this document is dedicated to the representation of quantized models. - -## Representation of quantized models -The OpenVINO Toolkit represents all the quantized models using the so-called FakeQuantize operation (see the description in [this document](@ref openvino_docs_ops_quantization_FakeQuantize_1)). This operation is very expressive and allows mapping values from arbitrary input and output ranges. The whole idea behind that is quite simple: we project (discretize) the input values to the low-precision data type using affine transformation (with clamp and rounding) and then reproject discrete values back to the original range and data type. It can be considered as an emulation of the quantization process which happens at runtime. -In order to be able to execute a particular DL operation in low-precision all its inputs should be quantized i.e. should have FakeQuantize between operation and data blobs. The figure below shows an example of quantized Convolution which contains two FakeQuantize nodes: one for weights and one for activations (bias is quantized using the same parameters). -![quantized_convolution] -
Figure 1. Example of quantized Convolution operation.
- -Starting from OpenVINO 2020.2 release all the quantized models are represented in the compressed form. It means that the weights of low-precision operations are converted into the target precision (e.g. INT8). It helps to substantially reduce the model size. The rest of the parameters can be represented in FLOAT32 or FLOAT16 precision depending on the input full-precision model used in the quantization process. Fig. 2 below shows an example of the part of the compressed IR. -![quantized_model_example] -
Figure 2. Example of compressed quantized model.
- -[quantized_convolution]: images/quantized_convolution.png -[quantized_model_example]: images/quantized_model_example.png diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_Caffe_Python_Layers.rst b/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_Caffe_Python_Layers.rst deleted file mode 100644 index 28f7cd295688f6..00000000000000 --- a/docs/MO_DG/prepare_model/customize_model_optimizer/Extending_Model_Optimizer_with_Caffe_Python_Layers.rst +++ /dev/null @@ -1,110 +0,0 @@ -# [LEGACY] Extending Model Optimizer with Caffe Python Layers {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers} - - -.. meta:: - :description: Learn how to extract operator attributes in Model Optimizer to - support a custom Caffe operation written only in Python. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions ` article. - -This article provides instructions on how to support a custom Caffe operation written only in Python. For example, the -`Faster-R-CNN model `__ implemented in -Caffe contains a custom proposal layer written in Python. The layer is described in the -`Faster-R-CNN prototxt `__ in the following way: - -.. code-block:: sh - - layer { - name: 'proposal' - type: 'Python' - bottom: 'rpn_cls_prob_reshape' - bottom: 'rpn_bbox_pred' - bottom: 'im_info' - top: 'rois' - python_param { - module: 'rpn.proposal_layer' - layer: 'ProposalLayer' - param_str: "'feat_stride': 16" - } - } - - -This article describes only a procedure on how to extract operator attributes in Model Optimizer. The rest of the -operation enabling pipeline and information on how to support other Caffe operations (written in C++) is described in -the :doc:`Customize Model Optimizer ` guide. - -======================================== -Writing Extractor for Caffe Python Layer -======================================== - -Custom Caffe Python layers have an attribute ``type`` (defining the type of the operation) equal to ``Python`` and two -mandatory attributes ``module`` and ``layer`` in the ``python_param`` dictionary. The ``module`` defines the Python module name -with the layer implementation, while ``layer`` value is an operation type defined by a user. In order to extract -attributes for such an operation it is necessary to implement extractor class inherited from the -``CaffePythonFrontExtractorOp`` class instead of ``FrontExtractorOp`` class, used for standard framework layers. The ``op`` -class attribute value should be set to the ``module + "." + layer`` value so the extractor is triggered for this kind of -operation. - -Below is a simplified example of the extractor for the custom operation Proposal from the mentioned Faster-R-CNN model. -The full code with additional checks can be found `here `__. - -The sample code uses operation ``ProposalOp`` which corresponds to ``Proposal`` operation described in the :doc:`Available Operations Sets ` -page. For a detailed explanation of the extractor, refer to the source code below. - -.. code-block:: py - :force: - - from openvino.tools.mo.ops.proposal import ProposalOp - from openvino.tools.mo.front.extractor import CaffePythonFrontExtractorOp - - - class ProposalPythonFrontExtractor(CaffePythonFrontExtractorOp): - op = 'rpn.proposal_layer.ProposalLayer' # module + "." + layer - enabled = True # extractor is enabled - - @staticmethod - def extract_proposal_params(node, defaults): - param = node.pb.python_param # get the protobuf message representation of the layer attributes - # parse attributes from the layer protobuf message to a Python dictionary - attrs = CaffePythonFrontExtractorOp.parse_param_str(param.param_str) - update_attrs = defaults - - # the operation expects ratio and scale values to be called "ratio" and "scale" while Caffe uses different names - if 'ratios' in attrs: - attrs['ratio'] = attrs['ratios'] - del attrs['ratios'] - if 'scales' in attrs: - attrs['scale'] = attrs['scales'] - del attrs['scales'] - - update_attrs.update(attrs) - ProposalOp.update_node_stat(node, update_attrs) # update the node attributes - - @classmethod - def extract(cls, node): - # define default values for the Proposal layer attributes - defaults = { - 'feat_stride': 16, - 'base_size': 16, - 'min_size': 16, - 'ratio': [0.5, 1, 2], - 'scale': [8, 16, 32], - 'pre_nms_topn': 6000, - 'post_nms_topn': 300, - 'nms_thresh': 0.7 - } - cls.extract_proposal_params(node, defaults) - return cls.enabled - -==================== -Additional Resources -==================== - -* :doc:`Model Optimizer Extensibility ` -* :doc:`Graph Traversal and Modification Using Ports and Connections ` -* :doc:`Model Optimizer Extensions ` - diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Model_Optimizer_Extensions.rst b/docs/MO_DG/prepare_model/customize_model_optimizer/Model_Optimizer_Extensions.rst deleted file mode 100644 index f857940468975d..00000000000000 --- a/docs/MO_DG/prepare_model/customize_model_optimizer/Model_Optimizer_Extensions.rst +++ /dev/null @@ -1,60 +0,0 @@ -# [LEGACY] Model Optimizer Extensions {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions} - - -.. meta:: - :description: Learn about deprecated extensions, which enable injecting logic - to the model conversion pipeline without changing the Model - Optimizer core code. - -.. toctree:: - :maxdepth: 1 - :hidden: - - openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Operation - openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Extractor - openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Transformation_Extensions - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions ` article. - -Model Optimizer extensions enable you to inject some logic to the model conversion pipeline without changing the Model -Optimizer core code. There are three types of the Model Optimizer extensions: - -1. :doc:`Model Optimizer operation `. -2. A :doc:`framework operation extractor `. -3. A :doc:`model transformation `, which can be executed during front, middle or back phase of the model conversion. - -An extension is just a plain text file with a Python code. The file should contain a class (or classes) inherited from -one of extension base classes. Extension files should be saved to a directory with the following structure: - -.. code-block:: sh - - .// - ops/ - custom operations - front/ - framework independent front transformations - / - front transformations for models only and extractors for operations - / - front transformations for models only and extractors for operations - ... - middle/ - middle transformations - back/ - back transformations - -Model Optimizer uses the same layout internally to keep built-in extensions. The only exception is that the -``mo/ops/`` directory is also used as a source of the Model Optimizer operations due to historical reasons. - -.. note:: - The name of a root directory with extensions should not be equal to "extensions" because it will result in a name conflict with the built-in Model Optimizer extensions. - -.. note:: - Model Optimizer itself is built by using these extensions, so there is a huge number of examples of their usage in the Model Optimizer code. - -==================== -Additional Resources -==================== - -* :doc:`Model Optimizer Extensibility ` -* :doc:`Graph Traversal and Modification Using Ports and Connections ` -* :doc:`Extending Model Optimizer with Caffe Python Layers ` - diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Model_Optimizer_Extractor.rst b/docs/MO_DG/prepare_model/customize_model_optimizer/Model_Optimizer_Extractor.rst deleted file mode 100644 index 88d26a4a8fc66e..00000000000000 --- a/docs/MO_DG/prepare_model/customize_model_optimizer/Model_Optimizer_Extractor.rst +++ /dev/null @@ -1,113 +0,0 @@ -# [LEGACY] Operation Extractor {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Extractor} - - -.. meta:: - :description: Learn about a deprecated generic extension in Model Optimizer, - which provides the operation extractor usable for all model - frameworks. - - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions ` article. - -Model Optimizer runs specific extractor for each operation in the model during the model loading. - -There are several types of Model Optimizer extractor extensions: - -1. The generic one, which is described in this article. -2. The special extractor for Caffe models with Python layers. This kind of extractor is described in the :doc:`Extending Model Optimizer with Caffe Python Layers ` guide. - -Generic extension provides a generic mechanism for the operation extractor applicable for all frameworks. Model Optimizer provides the ``mo.front.extractor.FrontExtractorOp`` class as a base class to implement the extractor. It has the ``extract`` class method, which gets the only parameter ``Node``, which corresponds to the graph node to extract data from. The operation description in the original framework format is stored in the attribute ``pb`` of the node. The extractor goal is to parse this attribute and save necessary attributes to the corresponding node of the graph. Consider the extractor for the ``Const`` TensorFlow operation (refer to the ``extensions/front/tf/const_ext.py`` file): - -.. code-block:: py - :force: - - from openvino.tools.mo.front.extractor import FrontExtractorOp - from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor, tf_tensor_shape, tf_tensor_content - from openvino.tools.mo.ops.const import Const - - - class ConstExtractor(FrontExtractorOp): - # The "op" class attribute defines a type of the operation in the framework (in this case it is a TensorFlow), - # for which the extractor should be triggered. - op = 'Const' - enabled = True # The flag that indicates that this extractor is enabled. - - @classmethod - def extract(cls, node): # The entry point of the extractor. - # The `node.pb` attribute stores the TensorFlow representation of the operation, which is a Protobuf message of the - # specific format. In particular, the message contains the attribute called "value" containing the description of - # the constant. The string "pb.attr["value"].tensor" is just a Python binding for Protobuf message parsing. - pb_tensor = node.pb.attr["value"].tensor - # Get the shape of the tensor from the protobuf message, using the helper function "tf_tensor_shape". - shape = tf_tensor_shape(pb_tensor.tensor_shape) - # Create a dictionary with necessary attributes. - attrs = { - 'shape': shape, - # Get the tensor value, using "tf_tensor_content" helper function. - 'value': tf_tensor_content(pb_tensor.dtype, shape, pb_tensor), - # Get the tensor data type, using "tf_dtype_extractor" helper function. - 'data_type': tf_dtype_extractor(pb_tensor.dtype), - } - # Update the node attributes, using default attributes from the "Const" operation and attributes saved to the - # "attrs" dictionary. - Const.update_node_stat(node, attrs) - return cls.enabled - -Consider another example with an extractor of the ``Constant`` ONNX operation (refer to the ``extensions/front/onnx/const_ext.py`` file): - -.. code-block:: py - :force: - - from onnx import numpy_helper - from onnx.numpy_helper import to_array - - from openvino.tools.mo.front.extractor import FrontExtractorOp - from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - from openvino.tools.mo.ops.const import Const - - - class ConstantExtractor(FrontExtractorOp): - op = 'Constant' - enabled = True - - @classmethod - def extract(cls, node): - # Use "onnx_attr" helper method, which parses the Protobuf representation of the operation saved in the "node". - # Gets the value of the attribute with name "value" as "TensorProto" type (specified with a keyword "t"). - pb_value = onnx_attr(node, 'value', 't') - # Use "numpy_helper.to_array()" ONNX helper method to convert "TensorProto" object to a numpy array. - value = numpy_helper.to_array(pb_value) - - attrs = { - 'data_type': value.dtype, - 'value': value, - } - # Update the node attributes, using default attributes from the "Const" operation and attributes saved to the - # "attrs" dictionary. - Const.update_node_stat(node, attrs) - return cls.enabled - -The extractors for operations from different frameworks work similarly. The only difference is in the helper methods used to parse operation attributes encoded with a framework-specific representation. - -A common practice is to use ``update_node_stat()`` method of the dedicated ``Op`` class to update the node attributes. This method does the following: - -1. Sets values for common attributes like ``op``, ``type``, ``infer``, ``in_ports_count``, ``out_ports_count``, ``version`` to values specific to the dedicated operation (``Const`` operation in this case). -2. Uses ``supported_attrs()`` and ``backend_attrs()`` methods, defined in the ``Op`` class to update specific node attribute ``IE``. The IR emitter uses the value stored in the ``IE`` attribute to pre-process attribute values and save them to IR. -3. Optionally sets additional attributes provided to the ``update_node_stat()`` function as a second parameter. Usually these attributes are parsed from the particular instance of the operation. - -.. note:: - Model Optimizer uses numpy arrays to store values and numpy arrays of ``np.int64`` type to store shapes in the graph. - -==================== -Additional Resources -==================== - -* :doc:`Model Optimizer Extensibility ` -* :doc:`Graph Traversal and Modification Using Ports and Connections ` -* :doc:`Model Optimizer Extensions ` -* :doc:`Extending Model Optimizer with Caffe Python Layers ` - diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Model_Optimizer_Operation.rst b/docs/MO_DG/prepare_model/customize_model_optimizer/Model_Optimizer_Operation.rst deleted file mode 100644 index 03dbe96e2eba4c..00000000000000 --- a/docs/MO_DG/prepare_model/customize_model_optimizer/Model_Optimizer_Operation.rst +++ /dev/null @@ -1,110 +0,0 @@ -# [LEGACY] Model Optimizer Operation {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Operation} - - -.. meta:: - :description: Learn about the Op class, that contains operation attributes, - which are set to a node of the graph created during model - conversion with Model Optimizer. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions ` article. - -Model Optimizer defines a ``mo.ops.Op`` class (``Op`` will be used later in the document to be short), which is a base class -for an operation used in the Model Optimizer. The instance of the ``Op`` class serves several purposes: - -1. Stores the operation attributes. -2. Stores the operation shape/value and type inference functions. -3. Defines operation attributes to be saved to the corresponding IR section. -4. Contains convenient methods to create a graph node from an ``Op`` object instance and connect it with the existing graph. -5. Used in the extractors to store parsed attributes and operation specific attributes in the dedicated graph node. - -It is important to mention that there is no connection between the instance of the ``Op`` class and the ``Node`` object -created from it. The ``Op`` class is just a container for attributes describing the operation. Model Optimizer uses the ``Op`` -class during a model conversion to create a node of the graph with attributes copied from the ``Op`` class instance. Graph -manipulations are performed with graph ``Nodes`` and their attributes and does not involve ``Ops``. - -There are a number of common attributes used in the operations. Below is the list of these attributes with description. - -* ``id`` — **(Mandatory)** — unique identifier of a node in a graph. Generated automatically, equal to the number of nodes in the graph plus 1 if not specified. -* ``name`` — **(Mandatory)** — name of the operation. Generated automatically, equal to the ``id`` if not specified. -* ``type`` — **(Mandatory)** — type of the operation according to the :doc:`opset specification `. For the internal Model Optimizer operations, this attribute should be set to ``None``. The model conversion fails if an operation with ``type`` equal to ``None`` comes to the IR emitting phase. -* ``version`` — **(Mandatory)** — the operation set (opset) name the operation belongs to. If not specified, Model Optimizer sets it equal to ``experimental``. For more information about operation sets, refer to :doc:`OpenVINO Model Representation ` section. -* ``op`` — Model Optimizer type of the operation. In many cases, the value of ``type`` is equal to the value of ``op``. However, when Model Optimizer cannot instantiate the opset operation during model loading, it creates an instance of an internal operation. Thus, the attribute ``op`` is used as a type of this internal operation. Later in the pipeline, the node created from an internal operation will be replaced during front, middle or back phase with node(s) created from the opset. -* ``infer`` — the attribute defines a function calculating output tensor(s) shape and optional value(s). The attribute may be set to ``None`` for the internal Model Optimizer operations used during the front phase only. For more information about the shape inference function, refer to the :ref:`Partial Inference `. -* ``type_infer`` — the attribute defines a function calculating output tensor(s) data type. If the attribute is not defined, the default function is used. The function checks if the ``data_type`` node attribute is set and then propagates this type to the output tensor from the **port 0**. Otherwise, it propagates the data type of the tensor coming into the input **port 0** to the output tensor from the **port 0**. -* ``in_ports_count`` — default number of input ports to be created for the operation. Additional ports can be created or redundant ports can be removed using dedicated ``Node`` class API methods. -* ``out_ports_count`` — default number of output ports to be created for the operation. Additional ports can be created or redundant ports can be removed using dedicated ``Node`` class API methods. - -Below is an example of the Model Optimizer class for the :doc:`SoftMax ` operation from -the ``mo/ops/softmax.py`` file with the comments in code. - -.. code-block:: py - - class Softmax(Op): - # The class attribute defines a name of the operation so the operation class can be obtained using the - # "Op.get_op_class_by_name()" static method - op = 'SoftMax' - - # The operation works as an extractor by default. This is a legacy behavior, currently not recommended for use, - # thus "enabled" class attribute is set to False. The recommended approach is to use dedicated extractor extension. - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { # The constructor of the base class Op is called with additional default attributes. - 'type': __class__.op, # The operation is from the opset so the type is set to 'SoftMax'. - 'op': __class__.op, # Internal Model Optimizer operation has the same type. - 'version': 'opset1', # The operation corresponds to opset1. - 'infer': Softmax.infer, # Shape inference function is defined below. - 'axis': 1, # Default value for the "axis" attribute of the operation SoftMax. - 'in_ports_count': 1, # The operation has one input. - 'out_ports_count': 1, # The operation produces one output. - }, attrs) - - # The method returns operation specific attributes list. This method is important when implementing - # extractor inherited from CaffePythonFrontExtractorOp class to extract attribute for Caffe Python operation. - # However, it is currently used interchangeably with the "backend_attrs()" method. If the "backend_attrs()" is not used, - # then the "supported_attrs()" is used instead. In this particular case, the operation has just one attribute "axis". - def supported_attrs(self): - return ['axis'] - - @staticmethod - def infer(node: Node): - "some code calculating output shape and values" - -There is a dedicated method called ``backend_attrs()`` defining a list of attributes to be saved to the IR. Consider an -example from the ``mo/ops/pooling.py`` file: - -.. code-block:: py - - def backend_attrs(self): - return [ - ('strides', lambda node: ','.join(map(str, node['stride'][node.spatial_dims]))), - ('kernel', lambda node: ','.join(map(str, node['window'][node.spatial_dims]))), - - ('pads_begin', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 0)))), - ('pads_end', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 1)))), - - ('pool-method', 'pool_method'), - ('exclude-pad', 'exclude_pad'), - - 'rounding_type', - 'auto_pad', - ] - -The ``backend_attrs()`` function returns a list of records. A record can be of one of the following formats: -1. A string defining the attribute to be saved to the IR. If the value of the attribute is ``None``, the attribute is not saved. Examples of this case are ``rounding_type`` and ``auto_pad``. -2. A tuple, where the first element is a string defining the name of the attribute as it will appear in the IR and the second element is a function to produce the value for this attribute. The function gets an instance of the ``Node`` as the only parameter and returns a string with the value to be saved to the IR. Examples of this case are ``strides``, ``kernel``, ``pads_begin`` and ``pads_end``. -3. A tuple, where the first element is a string defining the name of the attribute as it will appear in the IR and the second element is the name of the ``Node`` attribute to get the value from. Examples of this case are ``pool-method`` and ``exclude-pad``. - -==================== -Additional Resources -==================== - -* :doc:`Model Optimizer Extensibility ` -* :doc:`Graph Traversal and Modification Using Ports and Connections ` -* :doc:`Model Optimizer Extensions ` -* :doc:`Extending Model Optimizer with Caffe Python Layers ` - diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Model_Optimizer_Ports_Connections.rst b/docs/MO_DG/prepare_model/customize_model_optimizer/Model_Optimizer_Ports_Connections.rst deleted file mode 100644 index 985a934c14106c..00000000000000 --- a/docs/MO_DG/prepare_model/customize_model_optimizer/Model_Optimizer_Ports_Connections.rst +++ /dev/null @@ -1,186 +0,0 @@ -# [LEGACY] Graph Traversal and Modification {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer_Model_Optimizer_Ports_Connections} - - -.. meta:: - :description: Learn about deprecated APIs and the Port and Connection classes - in Model Optimizer used for graph traversal and transformation. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions ` article. - -There are three APIs for a graph traversal and transformation used in the Model Optimizer: - -1. The API provided with the ``networkx`` Python library for the ``networkx.MultiDiGraph`` class, which is the base class for -the ``mo.graph.graph.Graph`` object. For example, the following methods belong to this API level: - -* ``graph.add_edges_from([list])``, -* ``graph.add_node(x, attrs)``, -* ``graph.out_edges(node_id)`` -* other methods where ``graph`` is a an instance of the ``networkx.MultiDiGraph`` class. - -**This is the lowest-level API. Avoid using it in the Model Optimizer transformations**. For more details, refer to the :ref:`Model Representation in Memory ` section. - -2. The API built around the ``mo.graph.graph.Node`` class. The ``Node`` class is the primary class to work with graph nodes -and their attributes. Examples of such methods and functions are: - -* ``node.in_node(y)``, -* ``node.out_node(x)``, -* ``node.get_outputs()``, -* ``node.insert_node_after(n1, y)``, -* ``create_edge(n1, n2)`` - -**There are some "Node" class methods not recommended for use and some functions defined in the mo.graph.graph have been deprecated**. For more details, refer to the ``mo/graph/graph.py`` file. - -3. The high-level API called Model Optimizer Graph API, which uses ``mo.graph.graph.Graph``, ``mo.graph.port.Port`` and -``mo.graph.connection.Connection`` classes. For example, the following methods belong to this API level: - -* ``node.in_port(x)``, -* ``node.out_port(y)``, -* ``port.get_connection()``, -* ``connection.get_source()``, -* ``connection.set_destination(dest_port)`` - -**This is the recommended API for the Model Optimizer transformations and operations implementation**. - -The main benefit of using the Model Optimizer Graph API is that it hides some internal implementation details (the fact that -the graph contains data nodes), provides API to perform safe and predictable graph manipulations, and adds operation -semantic to the graph. This is achieved with introduction of concepts of ports and connections. - -.. note:: - This article is dedicated to the Model Optimizer Graph API only and does not cover other two non-recommended APIs. - -.. _mo_intro_ports: - -===== -Ports -===== - -An operation semantic describes how many inputs and outputs the operation has. For example, -:doc:`Parameter ` and :doc:`Const ` operations have no -inputs and have one output, :doc:`ReLU ` operation has one input and one output, -:doc:`Split ` operation has 2 inputs and a variable number of outputs depending on the value of the -attribute ``num_splits``. - -Each operation node in the graph (an instance of the ``Node`` class) has 0 or more input and output ports (instances of -the ``mo.graph.port.Port`` class). The ``Port`` object has several attributes: - -* ``node`` - the instance of the ``Node`` object the port belongs to. -* ``idx`` - the port number. Input and output ports are numbered independently, starting from ``0``. Thus, -:doc:`ReLU ` operation has one input port (with index ``0``) and one output port (with index ``0``). -* ``type`` - the type of the port. Could be equal to either ``"in"`` or ``"out"``. -* ``data`` - the object that should be used to get attributes of the corresponding data node. This object has methods ``get_shape()`` / ``set_shape()`` and ``get_value()`` / ``set_value()`` to get/set shape/value of the corresponding data node. For example, ``in_port.data.get_shape()`` returns an input shape of a tensor connected to input port ``in_port`` (``in_port.type == 'in'``), ``out_port.data.get_value()`` returns a value of a tensor produced from output port ``out_port`` (``out_port.type == 'out'``). - -.. note:: - Functions ``get_shape()`` and ``get_value()`` return ``None`` until the partial inference phase. For more information about model conversion phases, refer to the :ref:`Model Conversion Pipeline `. For information about partial inference phase, see the :ref:`Partial Inference `. - -There are several methods of the ``Node`` class to get the instance of a corresponding port: - -* ``in_port(x)`` and ``out_port(x)`` to get the input/output port with number ``x``. -* ``in_ports()`` and ``out_ports()`` to get a dictionary, where key is a port number and the value is the corresponding input/output port. - -Attributes ``in_ports_count`` and ``out_ports_count`` of the ``Op`` class instance define default number of input and output -ports to be created for the ``Node``. However, additional input/output ports can be added using methods -``add_input_port()`` and ``add_output_port()``. Port also can be removed, using the ``delete_input_port()`` and -``delete_output_port()`` methods. - -The ``Port`` class is just an abstraction that works with edges incoming/outgoing to/from a specific ``Node`` instance. For -example, output port with ``idx = 1`` corresponds to the outgoing edge of a node with an attribute ``out = 1``, the input -port with ``idx = 2`` corresponds to the incoming edge of a node with an attribute ``in = 2``. - -Consider the example of a graph part with 4 operation nodes "Op1", "Op2", "Op3", and "Op4" and a number of data nodes -depicted with light green boxes. - -.. image:: _static/images/MO_ports_example_1.svg - :scale: 80 % - :align: center - -Operation nodes have input ports (yellow squares) and output ports (light purple squares). Input port may not be -connected. For example, the input **port 2** of node **Op1** does not have incoming edge, while output port always has an -associated data node (after the partial inference when the data nodes are added to the graph), which may have no -consumers. - -Ports can be used to traverse a graph. The method ``get_source()`` of an input port returns an output port producing the -tensor consumed by the input port. It is important that the method works the same during front, middle and back phases of a -model conversion even though the graph structure changes (there are no data nodes in the graph during the front phase). - -Let's assume that there are 4 instances of ``Node`` object ``op1, op2, op3``, and ``op4`` corresponding to nodes **Op1**, **Op2**, -**Op3**, and **Op4**, respectively. The result of ``op2.in_port(0).get_source()`` and ``op4.in_port(1).get_source()`` is the -same object ``op1.out_port(1)`` of type ``Port``. - -The method ``get_destination()`` of an output port returns the input port of the node consuming this tensor. If there are -multiple consumers of this tensor, the error is raised. The method ``get_destinations()`` of an output port returns a -list of input ports consuming the tensor. - -The method ``disconnect()`` removes a node incoming edge corresponding to the specific input port. The method removes -several edges if it is applied during the front phase for a node output port connected with multiple nodes. - -The method ``port.connect(another_port)`` connects output port ``port`` and input port ``another_port``. The method handles -situations when the graph contains data nodes (middle and back phases) and does not create an edge between two nodes -but also automatically creates data node or reuses existing data node. If the method is used during the front phase and -data nodes do not exist, the method creates edge and properly sets ``in`` and ``out`` edge attributes. - -For example, applying the following two methods to the graph above will result in the graph depicted below: - -.. code-block:: py - :force: - - op4.in_port(1).disconnect() - op3.out_port(0).connect(op4.in_port(1)) - -.. image:: _static/images/MO_ports_example_2.svg - :scale: 80 % - :align: center - -.. note:: - For a full list of available methods, refer to the ``Node`` class implementation in the ``mo/graph/graph.py`` and ``Port`` class implementation in the ``mo/graph/port.py`` files. - -=========== -Connections -=========== - -Connection is a concept introduced to easily and reliably perform graph modifications. Connection corresponds to a -link between a source output port with one or more destination input ports or a link between a destination input port -and source output port producing data. So each port is connected with one or more ports with help of a connection. -Model Optimizer uses the ``mo.graph.connection.Connection`` class to represent a connection. - -There is only one ``get_connection()`` method of the ``Port`` class to get the instance of the corresponding ``Connection`` -object. If the port is not connected, the returned value is ``None``. - -For example, the ``op3.out_port(0).get_connection()`` method returns a ``Connection`` object encapsulating edges from node -**Op3** to data node **data_3_0** and two edges from data node **data_3_0** to two ports of the node **Op4**. - -The ``Connection`` class provides methods to get source and destination(s) ports the connection corresponds to: - -* ``connection.get_source()`` - returns an output ``Port`` object producing the tensor. -* ``connection.get_destinations()`` - returns a list of input ``Port`` consuming the data. -* ``connection.get_destination()`` - returns a single input ``Port`` consuming the data. If there are multiple consumers, the exception is raised. - -The ``Connection`` class provides methods to modify a graph by changing a source or destination(s) of a connection. For -example, the function call ``op3.out_port(0).get_connection().set_source(op1.out_port(0))`` changes source port of edges -consuming data from port ``op3.out_port(0)`` to ``op1.out_port(0)``. The transformed graph from the sample above is depicted -below: - -.. image:: _static/images/MO_connection_example_1.svg - :scale: 80 % - :align: center - -Another example is the ``connection.set_destination(dest_port)`` method. It disconnects ``dest_port`` and all input ports to which -the connection is currently connected and connects the connection source port to ``dest_port``. - -Note that connection works seamlessly during front, middle, and back phases and hides the fact that the graph structure is -different. - -.. note:: - For a full list of available methods, refer to the ``Connection`` class implementation in the ``mo/graph/connection.py`` file. - -==================== -Additional Resources -==================== - -* :doc:`Model Optimizer Extensibility ` -* :doc:`Model Optimizer Extensions ` -* :doc:`Extending Model Optimizer with Caffe Python Layers ` - diff --git a/docs/MO_DG/prepare_model/customize_model_optimizer/Model_Optimizer_Transformation_Extensions.rst b/docs/MO_DG/prepare_model/customize_model_optimizer/Model_Optimizer_Transformation_Extensions.rst deleted file mode 100644 index 4178364c44bc3a..00000000000000 --- a/docs/MO_DG/prepare_model/customize_model_optimizer/Model_Optimizer_Transformation_Extensions.rst +++ /dev/null @@ -1,605 +0,0 @@ -# [LEGACY] Graph Transformation Extensions {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Transformation_Extensions} - - -.. meta:: - :description: Learn about various base classes for front, middle and back phase - transformations applied during model conversion with Model Optimizer. - -.. danger:: - - The code described here has been **deprecated!** Do not use it to avoid working with a legacy solution. It will be kept for some time to ensure backwards compatibility, but **you should not use** it in contemporary applications. - - This guide describes a deprecated TensorFlow conversion method. The guide on the new and recommended method, using a new frontend, can be found in the :doc:`Frontend Extensions ` article. - -Model Optimizer provides various base classes to implement :ref:`Front Phase Transformations `, -:ref:`Middle Phase Transformations `, and :ref:`Back Phase Transformations `. -All classes have the following common class attributes and methods: - -1. The ``enabled`` attribute specifies whether the transformation is enabled or not. The value can be changed during runtime to enable or disable execution of the transformation during a model conversion. Default value is ``True``. -2. The ``id`` attribute specifies a unique transformation string identifier. This transformation identifier can be used to enable (disable) the transformation by setting environment variable ``MO_ENABLED_TRANSFORMS`` (``MO_DISABLED_TRANSFORMS``) with a comma separated list of ``ids``. The environment variables override the value of the ``enabled`` attribute of the transformation. Instead of using ``id`` attribute value you can add fully defined class name to ``MO_ENABLED_TRANSFORMS`` (``MO_DISABLED_TRANSFORMS``) variable, ``extensions.back.NonmalizeToNormalizeL2.NormalizeToNormalizeL2`` for example. It is an optional attribute. -3. The ``run_not_recursively`` attribute specifies whether the transformation should be executed in the sub-graphs, for example, body of the :doc:`TensorIterator ` and the :doc:`Loop `. Default value is ``True``. -4. The ``force_clean_up`` attribute specifies whether the graph clean up should be executed after the transformation. The graph cleanup removes nodes of the graph not reachable from the model inputs. Default value is ``False``. -5. The ``force_shape_inference`` attribute specifies whether the nodes marked with ``need_shape_inference`` attribute equal to ``True`` should be re-inferred after the transformation. Model Optimizer sets this attribute automatically for nodes, input(s) of which were changed during the transformation, or you can set this attribute manually in the transformation for the specific nodes. Default value is ``False``. -6. Attribute ``graph_condition`` specifies a list of functions with one parameter -- ``Graph`` object. The transformation is executed if and only if all functions return ``True``. If the attribute is not set, no check is performed. -7. Method ``run_before()`` returns a list of transformation classes which this transformation should be executed before. -8. Method ``run_after()`` returns a list of transformation classes which this transformation should be executed after. - -.. note:: - Some of the transformation types have specific class attributes and methods, which are explained in the corresponding sections of this document. - -Model Optimizer builds a graph of dependencies between registered transformations and executes them in the topological -order. To execute the transformation during a proper model conversion phase, Model Optimizer defines several -anchor transformations that do nothing. All transformations are ordered with respect to these anchor transformations. -The diagram below shows anchor transformations, some of built-in transformations and dependencies between them: - -.. image:: _static/images/MO_transformations_graph.svg - -User-defined transformations are executed after the corresponding ``Start`` and before the corresponding ``Finish`` anchor -transformations by default (if ``run_before()`` and ``run_after()`` methods have not been overridden). - -.. note:: - The ``PreMiddleStart`` and ``PostMiddleStart`` anchors were introduced due to historical reasons to refactor the Model Optimizer pipeline, which initially had a hardcoded order of transformations. - -.. _mo_front_phase_transformations: - -=========================== -Front Phase Transformations -=========================== - -There are several types of a front phase transformation: - -1. :ref:`Pattern-Defined Front Phase Transformations ` triggered for each sub-graph of the original graph isomorphic to the specified pattern. -2. :ref:`Specific Operation Front Phase Transformations ` triggered for the node with a specific ``op`` attribute value. -3. :ref:`Generic Front Phase Transformations `. -4. Manually enabled transformation, defined with a JSON configuration file (for TensorFlow, ONNX, Apache MXNet, and PaddlePaddle models), specified using the ``--transformations_config`` command-line parameter: - - 1. :ref:`Node Name Pattern Front Phase Transformations `. - 2. :ref:`Front Phase Transformations Using Start and End Points `. - 3. :ref:`Generic Front Phase Transformations Enabled with Transformations Configuration File `. - -.. _pattern_defined_front_phase_transformations: - -Pattern-Defined Front Phase Transformations -########################################### - -This type of transformation is implemented using ``mo.front.common.replacement.FrontReplacementSubgraph`` and -``mo.front.common.replacement.FrontReplacementPattern`` as base classes and works as follows: - -1. Define a sub-graph to be matched, using a list of nodes with attributes and edges connecting them (edges may also have attributes). -2. Model Optimizer searches for all sub-graphs of the original graph, isomorphic to the specified sub-graph (pattern). -3. Model Optimizer executes the defined function performing graph transformation for each instance of a matched sub-graph. You can override different functions in the base transformation class so the Model Optimizer works differently: - - 1. The ``replace_sub_graph(self, graph, match)`` override the method. In this case Model Optimizer only executes the overridden function, pass the ``graph`` object and a dictionary describing the matched sub-graph. You are required to write the transformation and connect the newly created nodes to the rest of the graph. - 2. The ``generate_sub_graph(self, graph, match)`` override the method. This case is not recommended for use because it is the most complicated approach. It can be effectively replaced with one of two previous approaches. - -The sub-graph pattern is defined in the ``pattern()`` function. This function should return a dictionary with two keys: -``nodes`` and ``edges``: - -* The value for the ``nodes`` key is a list of tuples with two elements. - - * The first element is an alias name for a node that will be used to define edges between nodes and in the transformation function. - * The second element is a dictionary with attributes. The key is a name of an attribute that should exist in the node. The value for the attribute can be some specific value to match or a function that gets a single parameter - the attribute value from the node. The function should return the result of attribute comparison with a dedicated value. - -* The value for the ``edges`` key is a list of tuples with two or three elements. - - * The first element is the alias name of the node producing a tensor. - * The second element is the alias name of the node consuming the tensor. - * The third element (optional) is the dictionary with expected edge attributes. This dictionary usually contains attributes like ``in`` and ``out``, defining input and output ports. - -Consider the example of a front transformation implemented in the ``extensions/front/Mish_fusion.py`` file performing -fusing of the sub-graph defining the :doc:`Mish ` activation function into a single -operation: - -.. code-block:: py - :force: - - from openvino.tools.mo.front.Softplus_fusion import SoftplusFusion - from openvino.tools.mo.ops.activation_ops import Mish - from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph - from openvino.tools.mo.front.subgraph_matcher import SubgraphMatch - from openvino.tools.mo.graph.graph import Graph, rename_nodes - - - class MishFusion(FrontReplacementSubgraph): - """ - The transformation looks for the pattern with Softplus defining the Mish function: Mish(x) = x * tanh(SoftPlus(x)). - """ - enabled = True # Transformation is enabled. - - def run_after(self): # Run this transformation after "SoftplusFusion" transformation. - return [SoftplusFusion] - - def pattern(self): # Define pattern according to formulae x * tanh(SoftPlus(x)). - return dict( - nodes=[ - ('mul', dict(op='Mul')), - ('tanh', dict(op='Tanh')), - ('softplus', dict(op='SoftPlus')), - ], - edges=[ - ('softplus', 'tanh'), - ('tanh', 'mul'), - ]) - - def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): # Entry point for the transformation. - mul = match['mul'] # Get the Node corresponding to matched "mul" node. - mul_name = mul.soft_get('name', mul.id) - softplus = match['softplus'] # Get the Node corresponding to the matched "softplus" node. - - # Determine the input port of Mul which gets the 'input' node output. - input_port_idx = int(mul.in_port(0).get_connection().get_source().node.soft_get('op') == 'Tanh') - - # Check that the same tensor is provided as input to Mul and SoftPlus. - if mul.in_port(input_port_idx).get_source() != softplus.in_port(0).get_source(): - return - - mish = Mish(graph, {}).create_node() # Create Mish operation. - mish.in_port(0).connect(mul.in_port(input_port_idx).get_source()) # Connect input to the Mish. - mul.out_port(0).get_connection().set_source(mish.out_port(0)) # Reconnect outgoing edge from "mul" to Mish. - - # Rename the created Mish operation to have the name of the "mul" node, which produced the value equal to the - # Mish output. - rename_nodes([(mul, mul_name + '/TBR'), (mish, mul_name)]) - -.. _specific_operation_front_phase_transformations: - -Specific Operation Front Phase Transformations -############################################## - -This type of transformation is implemented using ``mo.front.common.replacement.FrontReplacementOp`` as base class and -works as follows: - -1. Define an operation type to trigger the transformation. -2. Model Optimizer searches for all nodes in the graph with the attribute ``op`` equal to the specified value. -3. Model Optimizer executes the defined function performing graph transformation for each instance of a matched node. You can override different functions in the base transformation class and Model Optimizer works differently: - - 1. The ``replace_sub_graph(self, graph, match)`` override method. In this case, Model Optimizer only executes the overridden function. Pass the ``graph`` object and a dictionary with a single key ``op`` with the matched node as value. You are required to write the transformation and connect the newly created nodes to the rest of the graph. - 2. The ``replace_op(self, graph, node)`` override method. In this case, Model Optimizer executes the overridden function. Pass the ``graph`` object and the matched node as ``node`` parameter. If the function returns an ``id`` of some node, then the ``Node`` with this ``id`` is connected to the consumers of the matched node. After applying the transformation, the matched node is removed from the graph. - -The ``FrontReplacementOp`` class provides a simpler mechanism to match a single operation with specific value of the ``op`` -(write the ``op`` attribute in the class instead of defining a ``pattern()`` function) attribute and perform the -transformation. - -Consider an example transformation from the ``extensions/front/Pack.py`` file, which replaces ``Pack`` operation from -the TensorFlow: - -.. code-block:: py - :force: - - from openvino.tools.mo.front.common.partial_infer.utils import int64_array - from openvino.tools.mo.front.common.replacement import FrontReplacementOp - from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs - from openvino.tools.mo.graph.graph import Node, Graph, rename_nodes - from openvino.tools.mo.ops.concat import Concat - from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - - class Pack(FrontReplacementOp): - op = "Pack" # Trigger transformation for all nodes in the graph with the op = "Pack" attribute - enabled = True # Transformation is enabled. - - def replace_op(self, graph: Graph, node: Node): # Entry point for the transformation. - # Create a Concat operation with a number of inputs equal to a number of inputs to Pack. - out_node = Concat(graph, {'axis': node.axis, 'in_ports_count': len(node.in_ports())}).create_node() - pack_name = node.soft_get('name', node.id) - - for ind in node.in_ports(): - # Add dimension of size 1 to all inputs of the Pack operation and add them as Concat inputs. - unsqueeze_node = create_op_with_const_inputs(graph, Unsqueeze, {1: int64_array([node.axis])}, - {'name': node.soft_get('name', node.id) + '/Unsqueeze'}) - node.in_port(ind).get_connection().set_destination(unsqueeze_node.in_port(0)) - unsqueeze_node.out_port(0).connect(out_node.in_port(ind)) - - # Rename the created Concat operation to have the name of the "pack" node, which produced the value equal to the - # Concat output. - rename_nodes([(node, pack_name + '/TBR'), (out_node, pack_name)]) - return [out_node.id] # Reconnect the Pack operation consumers to get input from Concat instead. - - -.. _generic_front_phase_transformations: - -Generic Front Phase Transformations -################################### - -Model Optimizer provides a mechanism to implement generic front phase transformation. This type of transformation is -implemented using ``mo.front.common.replacement.FrontReplacementSubgraph`` or -``mo.front.common.replacement.FrontReplacementPattern`` as base classes. Make sure the transformation is enabled before trying to execute it. -Then, Model Optimizer executes the ``find_and_replace_pattern(self, graph)`` method and -provides a ``Graph`` object as an input. - -Consider the example of a generic front transformation from the ``extensions/front/SqueezeNormalize.py`` file performing -normalization of the :doc:`Squeeze ` operation. Older version of the operation had a list of -axes to squeeze as an attribute, but now it is a separate input. For backward compatibility, the Model Optimizer -operation supports both semantics. Before IR generation, however, the operation should be normalized according to the -specification. - -.. code-block:: py - :force: - - import logging as log - - from openvino.tools.mo.front.common.partial_infer.utils import int64_array - from openvino.tools.mo.front.common.replacement import FrontReplacementPattern - from openvino.tools.mo.graph.graph import Graph - from openvino.tools.mo.ops.const import Const - from openvino.tools.mo.utils.error import Error - - - class SqueezeNormalize(FrontReplacementPattern): - """ - Normalizes inputs of the Squeeze layers. The layers should have two inputs: the input with data and input with the - dimensions to squeeze. If the second input is omitted then all dimensions of size 1 should be removed. - """ - enabled = True # The transformation is enabled. - - def find_and_replace_pattern(self, graph: Graph): # The function is called unconditionally. - for squeeze_node in graph.get_op_nodes(op='Squeeze'): # Iterate over all nodes with op='Squeeze'. - # If the operation has only 1 input node and no 'squeeze_dims' Node attribute, then convert the attribute to - # the operation input. - if len(squeeze_node.in_nodes()) == 1 and squeeze_node.has_valid('squeeze_dims'): - dims_node = Const(graph, {'name': squeeze_node.id + '/Dims', - 'value': int64_array(squeeze_node.squeeze_dims)}).create_node() - squeeze_node.in_port(1).connect(dims_node.out_port(0)) - del squeeze_node['squeeze_dims'] - # If two inputs already exist, that means the operation is already normalized. - elif len(squeeze_node.in_nodes()) == 2: - log.debug('The Squeeze node "{}" is already normalized'.format(squeeze_node.name)) - # In all other cases, raise an error. - else: - raise Error('The Squeeze layer "{}" should either have 2 inputs or one input and an "squeeze_dims" ' - 'attribute'.format(squeeze_node.soft_get('name'))) - -For the details on implementation and how these front phase transformations work, refer to the ``mo/front/common/replacement.py`` -file. - -.. _node_name_pattern_front_phase_transformations: - -Node Name Pattern Front Phase Transformations -############################################# - -TensorFlow uses a mechanism of scope to group related operation nodes. It is a good practice to put nodes performing -particular task into the same scope. This approach divides a graph into logical blocks that are easier to review in the -TensorBoard. The scope, in fact, just defines a common name prefix for the nodes belonging to it. - -For example, Inception topologies contain several types of so-called **Inception blocks**. Some of them are equal to each -other, but located in different places of the network. For example, Inception V4 from the -`TensorFlow-Slim image classification model library `__ has -``Mixed_5b``, ``Mixed_5c`` and ``Mixed_5d`` inception blocks with exactly the same nodes, with the same set of attributes. - -Consider a situation when these Inception blocks are implemented extremely efficiently using a single Inference -Engine operation called ``InceptionBlock`` and these blocks in the model need to be replaced with instances of this operation. -Model Optimizer provides mechanism to trigger the transformation for a sub-graph of operations defined by the node name -regular expressions (scope). In this particular case, some of the patterns are: ``.*InceptionV4/Mixed_5b``, -``.*InceptionV4/Mixed_5c`` and ``.*InceptionV4/Mixed_5d``. Each pattern starts with ``.*``, because the ``InceptionV4`` prefix -is added to all nodes names during a model freeze. - -This type of transformation is implemented using ``mo.front.tf.replacement.FrontReplacementFromConfigFileSubGraph`` as a -base class and works as follows: - -1. Prepare a JSON configuration file template defining node names patterns. -2. Run Model Optimizer with the ``--tensorflow_custom_operations_config_update`` command-line parameter, and Model Optimizer adds information about input and output nodes of the specified sub-graphs. -3. Model Optimizer executes the defined transformation **only** when you specify the path to the configuration file updated in step 2 using the ``--transformations_config`` command-line parameter. - -Consider the following possible configuration file template for the Inception Block transformation: - -.. code-block:: json - - [ - { - "custom_attributes": { - "attr1_key": "attr1_value", - "attr2_key": 123456 - }, - "id": "InceptionBlockTransformation", - "instances": [ - ".*InceptionV4/Mixed_5b", - ".*InceptionV4/Mixed_5c", - ".*InceptionV4/Mixed_5d" - ], - "match_kind": "scope" - } - ] - -The configuration file contains a list of dictionaries. Each dictionary defines one transformation. Each transformation -is defined with several parameters: - -* ``id`` - **(Mandatory)** — is a unique identifier of the transformation. It is used in the Python code that implements the transformation to link the class and the transformation description from the configuration file. -* ``match_kind`` - **(Mandatory)** — is a string that specifies the matching algorithm. For the node name pattern case, the value should be equal to ``scope``. Another possible values are described in the dedicated sections below. -* ``instances`` - **(Mandatory)** — specifies instances of the sub-graph to be matched. It contains a list of node names prefixes patterns for the match kind of the ``scope`` type. -* ``custom_attributes`` - **(Optional)** — is a dictionary with attributes that can be used in the transformation code. - -After running Model Optimizer with additional ``--tensorflow_custom_operations_config_update`` parameter pointing to -the template configuration file, the content of the file should be updated with two new sections ``inputs`` and ``outputs``. -The file content after the update is as follows: - -.. code-block:: json - - [ - { - "id": "InceptionBlockTransformation", - "custom_attributes": { - "attr1_key": "attr1_value", - "attr2_key": 123456 - }, - "instances": [ - ".*InceptionV4/Mixed_5b", - ".*InceptionV4/Mixed_5c", - ".*InceptionV4/Mixed_5d" - ], - "match_kind": "scope", - "inputs": [ - [ - { - "node": "Branch_2/Conv2d_0a_1x1/Conv2D$", - "port": 0 - }, - { - "node": "Branch_3/AvgPool_0a_3x3/AvgPool$", - "port": 0 - }, - { - "node": "Branch_1/Conv2d_0a_1x1/Conv2D$", - "port": 0 - }, - { - "node": "Branch_0/Conv2d_0a_1x1/Conv2D$", - "port": 0 - } - ] - ], - "outputs": [ - { - "node": "concat$", - "port": 0 - } - ] - } - ] - -The value for ``inputs`` key is a list of lists describing input tensors of the sub-graph. Each element of the top-level -list corresponds to one unique input tensor of the sub-graph. Each internal list describes a list of nodes consuming -this tensor and port numbers, where the tensor is consumed. Model Optimizer generates regular expressions for the input -nodes names to uniquely identify them in each instance of the sub-graph, defined by the ``instances``. Denote these nodes -as input nodes of the sub-graph. - -In the InceptionV4 topology, the ``InceptionV4/Mixed_5b`` block has four input tensors from outside of the sub-graph, -but all of them are produced by the ``InceptionV4/Mixed_5a/concat`` node. Therefore, the top-level list of the ``inputs`` -contains one list corresponding to this tensor. Four input nodes of the sub-graph consume the tensor produced by -``InceptionV4/Mixed_5a/concat`` node. In this case, all four input nodes consume input tensor into "port 0". - -The order of items in the internal list describing nodes does not matter, but the order of elements in the top-level -list is important. This order defines how Model Optimizer attaches input tensors to a new generated -node if the sub-graph is replaced with a single node. The ``i``-th input node of the sub-graph is obtained using -``match.single_input_node(i)`` call in the sub-graph transformation code. More information about API is given below. If it is -necessary to change the order of input tensors, the configuration file can be edited in the text editor. - -The value for the ``outputs`` key is a list describing nodes of the sub-graph producing tensor, that goes outside of the -sub-graph or does not have child nodes. Denote these nodes as output nodes of the sub-graph. The order of elements in -the list is important. The ``i``-th element of the list describes the ``i``-th output tensor of the sub-graph, which could be -obtained using ``match.output_node(i)`` call. The order of elements can be manually changed in the configuration file. -Model Optimizer uses this order to connect output edges if the sub-graph is replaced with a single node. - -For more examples of this type of transformation, refer to the :doc:`Converting TensorFlow Object Detection API Models ` guide. - -.. _start_end_points_front_phase_transformations: - -Front Phase Transformations Using Start and End Points -###################################################### - -This type of transformation is implemented using ``mo.front.tf.replacement.FrontReplacementFromConfigFileSubGraph`` as a -base class and works as follows: - -1. Prepare a JSON configuration file that defines the sub-graph to match, using two lists of node names: "start" and "end" nodes. -2. Model Optimizer executes the defined transformation **only** when you specify the path to the configuration file using the ``--transformations_config`` command-line parameter . Model Optimizer performs the following steps to match the sub-graph: - - 1. Starts a graph traversal from every start node following the direction of the graph edges. The search stops in an end node or in the case of a node without consumers. All visited nodes are added to the matched sub-graph. - 2. Starts another graph traversal from each non-start node of the sub-graph, i.e. every node except nodes from the "start" list. In this step, the edges are traversed in the opposite edge direction. All newly visited nodes are added to the matched sub-graph. This step is needed to add nodes required for calculation values of internal nodes of the matched sub-graph. - 3. Checks that all "end" nodes were reached from "start" nodes. If not, it exits with an error. - 4. Checks that there are no :doc:`Parameter ` operations among added nodes. If they exist, the sub-graph depends on the inputs of the model. Such configuration is considered incorrect so Model Optimizer exits with an error. - -This algorithm finds all nodes "between" start and end nodes and nodes needed for calculation of non-input nodes of the -matched sub-graph. - -The example of a JSON configuration file for a transformation with start and end points is -``extensions/front/tf/ssd_support_api_v1.15.json``: - -.. code-block:: json - - [ - { - "custom_attributes": { - "code_type": "caffe.PriorBoxParameter.CENTER_SIZE", - "pad_mode": "caffe.ResizeParameter.CONSTANT", - "resize_mode": "caffe.ResizeParameter.WARP", - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPISSDPostprocessorReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "detection_boxes", - "detection_scores", - "num_detections" - ], - "start_points": [ - "Postprocessor/Shape", - "Postprocessor/scale_logits", - "Postprocessor/Tile", - "Postprocessor/Reshape_1", - "Postprocessor/Cast_1" - ] - }, - "match_kind": "points" - } - ] - -The format of the file is similar to the one provided as an example in the -:ref:`Node Name Pattern Front Phase Transformations ` section. The difference is in -the value of the ``match_kind`` parameter, which should be equal to the ``points`` and the format of the ``instances`` parameter, -which should be a dictionary with two keys ``start_points`` and ``end_points``, defining start and end node names -respectively. - -.. note:: - The ``include_inputs_to_sub_graph`` and ``include_outputs_to_sub_graph`` parameters are redundant and should be always equal to ``true``. - -.. note:: - This sub-graph match algorithm has a limitation that each start node must have only one input. Therefore, it is not possible to specify, for example, the :doc:`Convolution ` node as input because it has two inputs: data tensor and tensor with weights. - -For other examples of transformations with points, refer to the -:doc:`Converting TensorFlow Object Detection API Models ` guide. - -.. _generic_transformations_config_front_phase_transformations: - -Generic Front Phase Transformations Enabled with Transformations Configuration File -################################################################################### - -This type of transformation works similarly to the :ref:`Generic Front Phase Transformations ` and -:ref:`Front Phase Transformations Using Start and End Points `. - -The base class for this type of transformation is -``mo.front.common.replacement.FrontReplacementFromConfigFileGeneral``. Model Optimizer executes the -``transform_graph(self, graph, replacement_descriptions)`` method and provides the ``Graph`` object and dictionary with values -parsed from the `custom_attributes` attribute of the provided JSON configuration file. - -The example of the configuration file for this type of transformation is ``extensions/front/tf/yolo_v1_tiny.json``: - -.. code-block:: json - - [ - { - "id": "TFYOLO", - "match_kind": "general", - "custom_attributes": { - "classes": 20, - "coords": 4, - "num": 2, - "do_softmax": 0 - } - } - ] - -and the corresponding transformation file is ``./extensions/front/YOLO.py``: - -.. code-block:: py - :force: - - from openvino.tools.mo.front.no_op_eraser import NoOpEraser - from openvino.tools.mo.front.standalone_const_eraser import StandaloneConstEraser - from openvino.tools.mo.ops.regionyolo import RegionYoloOp - from openvino.tools.mo.front.tf.replacement import FrontReplacementFromConfigFileGeneral - from openvino.tools.mo.graph.graph import Node, Graph - from openvino.tools.mo.ops.result import Result - from openvino.tools.mo.utils.error import Error - - - class YoloRegionAddon(FrontReplacementFromConfigFileGeneral): - """ - Replaces all Result nodes in graph with YoloRegion->Result nodes chain. - YoloRegion node attributes are taken from configuration file - """ - replacement_id = 'TFYOLO' # The identifier matching the "id" attribute in the JSON file. - - def run_after(self): - return [NoOpEraser, StandaloneConstEraser] - - def transform_graph(self, graph: Graph, replacement_descriptions): - op_outputs = [n for n, d in graph.nodes(data=True) if 'op' in d and d['op'] == 'Result'] - for op_output in op_outputs: - last_node = Node(graph, op_output).in_node(0) - op_params = dict(name=last_node.id + '/YoloRegion', axis=1, end_axis=-1) - op_params.update(replacement_descriptions) - region_layer = RegionYoloOp(graph, op_params) - region_layer_node = region_layer.create_node([last_node]) - # In here, 'axis' from 'dim_attrs' can be removed to avoid permutation from axis = 1 to axis = 2. - region_layer_node.dim_attrs.remove('axis') - Result(graph).create_node([region_layer_node]) - graph.remove_node(op_output) - -The configuration file has only 3 parameters: ``id`` identifier of the transformation , ``match_kind`` (which should be equal -to ``general``) and the ``custom_attributes`` dictionary with custom attributes accessible in the transformation. - -.. _mo_middle_phase_transformations: - -============================ -Middle Phase Transformations -============================ - -There are two types of middle phase transformations: - -1. :ref:`Pattern-Defined Middle Phase Transformations ` triggered for each sub-graph of the original graph, isomorphic to the specified pattern. -2. :ref:`Generic Middle Phase Transformations `. - -.. _pattern_defined_middle_phase_transformations: - -Pattern-Defined Middle Phase Transformations -############################################ - -This type of transformation is implemented using ``mo.middle.replacement.MiddleReplacementPattern`` as a base class and -works similarly to the :ref:`Pattern-Defined Middle Phase Transformations ` -The are two differences: - -1. The transformation entry function name is ``replace_pattern(self, graph, match)``. -2. The pattern defining the graph should contain data nodes because the structure of the graph is different between front and middle phases. For more information about the graph structure changes, refer to the :ref:`Partial Inference `. - -For the example of a pattern-defined middle transformation, refer to the ``extensions/middle/L2NormToNorm.py`` file. - -.. _generic_middle_phase_transformations: - -Generic Middle Phase Transformations -#################################### - -Model Optimizer provides a mechanism to implement generic middle phase transformations. This type of transformation is -implemented using ``mo.middle.replacement.MiddleReplacementPattern`` as a base class and works similarly to the -:ref:`Generic Front Phase Transformations `. The only difference is that the -transformation entry function name is ``find_and_replace_pattern(self, graph: Graph)``. - -For the example of this transformation, refer to the ``extensions/middle/CheckForCycle.py`` file. - -.. _mo_back_phase_transformations: - -========================== -Back Phase Transformations -========================== - -There are two types of back phase transformations: - -1. :ref:`Pattern-Defined Back Phase Transformations ` triggered for each sub-graph of the original graph, isomorphic to the specified pattern. -2. :ref:`Generic Back Phase Transformations `. - -.. note:: - The graph layout during the back phase is always NCHW. However, during the front and middle phases it could be NHWC if the original model was using it. For more details, refer to :ref:`Model Conversion Pipeline `. - -.. _pattern_defined_back_phase_transformations: - -Pattern-Defined Back Phase Transformations -########################################## - -This type of transformation is implemented using ``mo.back.replacement.MiddleReplacementPattern`` as a base class and -works the same way as :ref:`Pattern-Defined Middle Phase Transformations `. - -For the example of a pattern-defined back transformation, refer to the ``extensions/back/ShufflenetReLUReorder.py`` file. - -.. _generic_back_phase_transformations: - -Generic Back Phase Transformations -################################## - -Model Optimizer provides mechanism to implement generic back phase transformations. This type of transformation is -implemented using ``mo.back.replacement.BackReplacementPattern`` as a base class and works the same way as -:ref:`Generic Middle Phase Transformations `. - -For the example of this transformation, refer to the ``extensions/back/GatherNormalizer.py`` file. - -==================== -Additional Resources -==================== - -* :doc:`Model Optimizer Extensibility ` -* :doc:`Graph Traversal and Modification Using Ports and Connections ` -* :doc:`Model Optimizer Extensions ` -* :doc:`Extending Model Optimizer with Caffe Python Layers ` - diff --git a/docs/OV_Runtime_UG/Int8Inference.md b/docs/OV_Runtime_UG/Int8Inference.md deleted file mode 100644 index f8fabaf5d5f2d2..00000000000000 --- a/docs/OV_Runtime_UG/Int8Inference.md +++ /dev/null @@ -1,59 +0,0 @@ -# Low-Precision 8-bit Integer Inference - -## Disclaimer - -Low-precision 8-bit inference is optimized for: -- Intel® architecture processors with the following instruction set architecture extensions: - - Intel® Advanced Vector Extensions 512 Vector Neural Network Instructions (Intel® AVX-512 VNNI) - - Intel® Advanced Vector Extensions 512 (Intel® AVX-512) - - Intel® Advanced Vector Extensions 2.0 (Intel® AVX2) - - Intel® Streaming SIMD Extensions 4.2 (Intel® SSE4.2) -- Intel® processor graphics: - - Intel® Iris® Xe Graphics - - Intel® Iris® Xe MAX Graphics - -## Introduction - -For 8-bit integer computation, a model must be quantized. You can use a quantized model from [OpenVINO™ Toolkit Intel's Pre-Trained Models](@ref omz_models_group_intel) or quantize a model yourself. For more details on how to get quantized model please refer to [Model Optimization](@ref openvino_docs_model_optimization_guide) document. - -The quantization process adds [FakeQuantize](../ops/quantization/FakeQuantize_1.md) layers on activations and weights for most layers. Read more about mathematical computations in the [Uniform Quantization with Fine-Tuning](https://github.com/openvinotoolkit/nncf/blob/develop/docs/compression_algorithms/Quantization.md). - -When you pass the quantized IR to the OpenVINO™ plugin, the plugin automatically recognizes it as a quantized model and performs 8-bit inference. Note that if you pass a quantized model to another plugin that does not support 8-bit inference but supports all operations from the model, the model is inferred in precision that this plugin supports. - -At runtime, the quantized model is loaded to the plugin. The plugin uses the `Low Precision Transformation` component to update the model to infer it in low precision: - - Update `FakeQuantize` layers to have quantized output tensors in low-precision range and add dequantization layers to compensate for the update. Dequantization layers are pushed through as many layers as possible to have more layers in low precision. After that, most layers have quantized input tensors in low-precision range and can be inferred in low precision. Ideally, dequantization layers should be fused in the next `FakeQuantize` layer. - - Weights are quantized and stored in `Constant` layers. - -## Prerequisites - -Let's explore quantized [TensorFlow* implementation of the ResNet-50](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/resnet-50-tf) model. Use [Model Downloader](@ref omz_tools_downloader) to download the `FP16` model from [OpenVINO™ Toolkit - Open Model Zoo repository](https://github.com/openvinotoolkit/open_model_zoo): - -```sh -omz_downloader --name resnet-50-tf --precisions FP16-INT8 -``` -After that you should quantize the model with the [Model Quantizer](@ref omz_tools_downloader) tool. -```sh -omz_quantizer --model_dir public/resnet-50-tf --dataset_dir --precisions=FP16-INT8 -``` - -The simplest way to infer the model and collect performance counters is the [Benchmark Application](../../samples/cpp/benchmark_app/README.md): -```sh -./benchmark_app -m resnet-50-tf.xml -d CPU -niter 1 -api sync -report_type average_counters -report_folder pc_report_dir -``` -If you infer the model with the OpenVINO™ CPU plugin and collect performance counters, all operations (except the last non-quantized SoftMax) are executed in INT8 precision. - -## Low-Precision 8-bit Integer Inference Workflow - -For 8-bit integer computations, a model must be quantized. Quantized models can be downloaded from [Overview of OpenVINO™ Toolkit Intel's Pre-Trained Models](@ref omz_models_group_intel). If the model is not quantized, you can use the [Post-Training Optimization Tool](@ref pot_introduction) to quantize the model. The quantization process adds [FakeQuantize](../ops/quantization/FakeQuantize_1.md) layers on activations and weights for most layers. Read more about mathematical computations in the [Uniform Quantization with Fine-Tuning](https://github.com/openvinotoolkit/nncf/blob/develop/docs/compression_algorithms/Quantization.md). - -8-bit inference pipeline includes two stages (also refer to the figure below): -1. *Offline stage*, or *model quantization*. During this stage, [FakeQuantize](../ops/quantization/FakeQuantize_1.md) layers are added before most layers to have quantized tensors before layers in a way that low-precision accuracy drop for 8-bit integer inference satisfies the specified threshold. The output of this stage is a quantized model. Quantized model precision is not changed, quantized tensors are in the original precision range (`fp32`). `FakeQuantize` layer has `levels` attribute which defines quants count. Quants count defines precision which is used during inference. For `int8` range `levels` attribute value has to be 255 or 256. To quantize the model, you can use the [Post-Training Optimization Tool](@ref pot_introduction) delivered with the Intel® Distribution of OpenVINO™ toolkit release package. - - When you pass the quantized IR to the OpenVINO™ plugin, the plugin automatically recognizes it as a quantized model and performs 8-bit inference. Note, if you pass a quantized model to another plugin that does not support 8-bit inference but supports all operations from the model, the model is inferred in precision that this plugin supports. - -2. *Runtime stage*. This stage is an internal procedure of the OpenVINO™ plugin. During this stage, the quantized model is loaded to the plugin. The plugin uses `Low Precision Transformation` component to update the model to infer it in low precision: - - Update `FakeQuantize` layers to have quantized output tensors in low precision range and add dequantization layers to compensate the update. Dequantization layers are pushed through as many layers as possible to have more layers in low precision. After that, most layers have quantized input tensors in low precision range and can be inferred in low precision. Ideally, dequantization layers should be fused in the next `FakeQuantize` layer. - - Weights are quantized and stored in `Constant` layers. - -![int8_flow] - diff --git a/docs/Legal_Information.rst b/docs/articles_en/Legal_Information.rst similarity index 100% rename from docs/Legal_Information.rst rename to docs/articles_en/Legal_Information.rst diff --git a/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/QuantizedNetworks.rst b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/QuantizedNetworks.rst index 762fb7cb3f77bb..277151b16b3f2c 100644 --- a/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/QuantizedNetworks.rst +++ b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/QuantizedNetworks.rst @@ -3,7 +3,11 @@ Quantized models compute and restrictions ========================================= +.. toctree:: + :maxdepth: 1 + :hidden: + openvino_docs_ie_plugin_dg_lp_representation .. meta:: :description: Learn about the support for quantized models with different diff --git a/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/QuantizedNetworks/LowPrecisionModelRepresentation.rst b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/QuantizedNetworks/LowPrecisionModelRepresentation.rst new file mode 100644 index 00000000000000..42da84e5271545 --- /dev/null +++ b/docs/articles_en/documentation/openvino_extensibility/openvino_plugin_library/detailed_guides/QuantizedNetworks/LowPrecisionModelRepresentation.rst @@ -0,0 +1,34 @@ +.. {#openvino_docs_ie_plugin_dg_lp_representation} + +Representation of low-precision models +====================================== + +The goal of this document is to describe how optimized models are represented in OpenVINO Intermediate Representation (IR) and provide guidance +on interpretation rules for such models at runtime. + +Currently, there are two groups of optimization methods that can influence on the IR after applying them to the full-precision model: + +- **Sparsity**. It is represented by zeros inside the weights and this is up to the hardware plugin how to interpret these zeros + (use weights as is or apply special compression algorithms and sparse arithmetic). No additional mask is provided with the model. +- **Quantization**. The rest of this document is dedicated to the representation of quantized models. + +Representation of quantized models +################################### + +The OpenVINO Toolkit represents all the quantized models using the so-called FakeQuantize operation (see the description in +:doc:`this document `). This operation is very expressive and allows mapping values from +arbitrary input and output ranges. The whole idea behind that is quite simple: we project (discretize) the input values to the low-precision +data type using affine transformation (with clamp and rounding) and then reproject discrete values back to the original range and data type. +It can be considered as an emulation of the quantization process which happens at runtime. +In order to be able to execute a particular DL operation in low-precision all its inputs should be quantized i.e. should have FakeQuantize +between operation and data blobs. The figure below shows an example of quantized Convolution which contains two FakeQuantize nodes: one for +weights and one for activations (bias is quantized using the same parameters). + +.. .. image:: _static/images/quantized_convolution.png + +Starting from OpenVINO 2020.2 release all the quantized models are represented in the compressed form. It means that the weights +of low-precision operations are converted into the target precision (e.g. INT8). It helps to substantially reduce the model size. +The rest of the parameters can be represented in FLOAT32 or FLOAT16 precision depending on the input full-precision model used in +the quantization process. Fig. 2 below shows an example of the part of the compressed IR. + +.. .. image:: _static/images/quantized_model_example.png diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMax_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMax_1.rst index 77ea0d5b177049..5037372de4cbce 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMax_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMax_1.rst @@ -30,6 +30,8 @@ Particular cases: 1. If ``axes`` is an empty list, *ReduceMax* corresponds to the identity operation. 2. If ``axes`` contains all dimensions of input ``data``, a single reduction value is calculated for the entire input tensor. +Reducing empty tensor results in an undefined behavior. + **Attributes** * *keep_dims* diff --git a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMin_1.rst b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMin_1.rst index a8c0f351fde32e..4986ddc474606f 100644 --- a/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMin_1.rst +++ b/docs/articles_en/documentation/openvino_ir/operation_sets/operations_specifications/reduction/ReduceMin_1.rst @@ -30,6 +30,8 @@ Particular cases: 1. If ``axes`` is an empty list, *ReduceMin* corresponds to the identity operation. 2. If ``axes`` contains all dimensions of input ``data``, a single reduction value is calculated for the entire input tensor. +Reducing empty tensor results in an undefined behavior. + **Attributes** * *keep_dims* diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/extending_model_optimizer_with_caffe_python_layers.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/extending_model_optimizer_with_caffe_python_layers.rst index 28f7cd295688f6..1665228b720149 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/extending_model_optimizer_with_caffe_python_layers.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/extending_model_optimizer_with_caffe_python_layers.rst @@ -1,5 +1,7 @@ -# [LEGACY] Extending Model Optimizer with Caffe Python Layers {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers} +.. {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Extending_Model_Optimizer_With_Caffe_Python_Layers} +[LEGACY] Extending Model Optimizer with Caffe Python Layers +============================================================ .. meta:: :description: Learn how to extract operator attributes in Model Optimizer to diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions.rst index f857940468975d..17ca99f21d6e06 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions.rst @@ -1,5 +1,7 @@ -# [LEGACY] Model Optimizer Extensions {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions} +.. {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions} +[LEGACY] Model Optimizer Extensions +===================================== .. meta:: :description: Learn about deprecated extensions, which enable injecting logic diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_extractor.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_extractor.rst index 88d26a4a8fc66e..8cce1ff4ad1d76 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_extractor.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_extractor.rst @@ -1,5 +1,7 @@ -# [LEGACY] Operation Extractor {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Extractor} +.. {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Extractor} +[LEGACY] Operation Extractor +============================= .. meta:: :description: Learn about a deprecated generic extension in Model Optimizer, diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_operation.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_operation.rst index 03dbe96e2eba4c..9b800fdbe52fcf 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_operation.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_operation.rst @@ -1,5 +1,7 @@ -# [LEGACY] Model Optimizer Operation {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Operation} +.. {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Operation} +[LEGACY] Model Optimizer Operation +=================================== .. meta:: :description: Learn about the Op class, that contains operation attributes, diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_transformation_extensions.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_transformation_extensions.rst index 4178364c44bc3a..7b4d69ad072b4b 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_transformation_extensions.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_extensions/model_optimizer_transformation_extensions.rst @@ -1,5 +1,7 @@ -# [LEGACY] Graph Transformation Extensions {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Transformation_Extensions} +.. {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Model_Optimizer_Extensions_Model_Optimizer_Transformation_Extensions} +[LEGACY] Graph Transformation Extensions +========================================== .. meta:: :description: Learn about various base classes for front, middle and back phase diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_ports_connections.rst b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_ports_connections.rst index 985a934c14106c..947f61bcd1916f 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_ports_connections.rst +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition/customize_model_optimizer/model_optimizer_ports_connections.rst @@ -1,5 +1,7 @@ -# [LEGACY] Graph Traversal and Modification {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer_Model_Optimizer_Ports_Connections} +.. {#openvino_docs_MO_DG_prepare_model_customize_model_optimizer_Customize_Model_Optimizer_Model_Optimizer_Ports_Connections} +[LEGACY] Graph Traversal and Modification +=========================================== .. meta:: :description: Learn about deprecated APIs and the Port and Connection classes diff --git a/docs/glossary.rst b/docs/articles_en/glossary.rst similarity index 100% rename from docs/glossary.rst rename to docs/articles_en/glossary.rst diff --git a/docs/articles_en/learn_openvino.rst b/docs/articles_en/learn_openvino.rst index db433e93272aed..a8a2a8018b4f43 100644 --- a/docs/articles_en/learn_openvino.rst +++ b/docs/articles_en/learn_openvino.rst @@ -30,6 +30,5 @@ as well as an experienced user. | :doc:`OpenVINO Samples ` | The OpenVINO samples (Python and C++) are simple console applications that show how to use specific OpenVINO API features. They can assist you in executing tasks such as loading a model, running inference, querying particular device capabilities, etc. -| :doc:`OpenVINO™ API 2.0 Transition Guide ` -| With the release of 2022.1 OpenVINO introduced its improved API 2.0 and its new OpenVINO IR model format: IR v11. This tutorial will instruct you on how to adopt the new solution, as well as show you the benefits of the new logic of working with models. - +| :doc:`Optimize and Deploy Generative AI Models ` +| Detailed information on how OpenVINO accelerates Generative AI use cases and what models it supports. This tutorial provides instructions for running Generative AI models using Hugging Face Optimum Intel and Native OpenVINO APIs. diff --git a/docs/articles_en/learn_openvino/tutorials.rst b/docs/articles_en/learn_openvino/tutorials.rst index f2d86fdcec4ff6..e5d8772f85a123 100644 --- a/docs/articles_en/learn_openvino/tutorials.rst +++ b/docs/articles_en/learn_openvino/tutorials.rst @@ -58,29 +58,36 @@ The Jupyter notebooks are categorized into following classes: Below you will find a selection of recommended tutorials that demonstrate inference on a particular model. These tutorials are guaranteed to provide a great experience with inference in OpenVINO: + .. showcase:: - :title: 269-film-slowmo - :img: https://github.com/googlestaging/frame-interpolation/raw/main/moment.gif + :title: 272-paint-by-example + :img: https://camo.githubusercontent.com/79d83ce8fc6813a503f372bacb7dc709c79d9560060df3dc92196b9849cc33a9/68747470733a2f2f757365722d696d616765732e67697468756275736572636f6e74656e742e636f6d2f3130333232363538302f3233363935343931382d66333634623232372d323933632d346637382d613962662d3964636562636231303334612e706e67 - Frame interpolation using FILM and OpenVINO. + Paint by Example using Stable Diffusion and OpenVINO. .. showcase:: - :title: 268-table-question-answering - :img: _static/images/notebook_eye.png + :title: 271-sdxl-turbo + :img: _images/271-sdxl-turbo-with-output_30_1.png - Table Question Answering using TAPAS and OpenVINO. + Single step image generation using SDXL-turbo and OpenVINO. .. showcase:: - :title: 267-distil-whisper-asr - :img: _static/images/notebook_eye.png + :title: 270-sound-generation-audioldm2 + :img: https://github.com/openvinotoolkit/openvino_notebooks/assets/76463150/c93a0f86-d9cf-4bd1-93b9-e27532170d75 + + Sound Generation with AudioLDM2 and OpenVINO. - Automatic speech recognition using Distil-Whisper and OpenVINO. +.. showcase:: + :title: 269-film-slowmo + :img: https://github.com/googlestaging/frame-interpolation/raw/main/moment.gif + + Frame interpolation using FILM and OpenVINO. .. showcase:: - :title: 266-speculative-sampling + :title: 267-distil-whisper-asr :img: _static/images/notebook_eye.png - Text Generation via Speculative Sampling, KV Caching, and OpenVINO. + Automatic speech recognition using Distil-Whisper and OpenVINO. .. showcase:: :title: 265-wuerstchen-image-generation @@ -101,58 +108,16 @@ Below you will find a selection of recommended tutorials that demonstrate infere Image generation with Latent Consistency Model and OpenVINO. .. showcase:: - :title: 262-softvc-voice-conversion - :img: _static/images/notebook_eye.png - - SoftVC VITS Singing Voice Conversion and OpenVINO. - -.. showcase:: - :title: 261-fast-segment-anything - :img: https://user-images.githubusercontent.com/26833433/248551984-d98f0f6d-7535-45d0-b380-2e1440b52ad7.jpg + :title: 263-lcm-lora-controlnet + :img: https://user-images.githubusercontent.com/29454499/284292122-f146e16d-7233-49f7-a401-edcb714b5288.png - Object segmentation with FastSAM and OpenVINO. + Text-to-Image Generation with LCM LoRA and ControlNet Conditioning. .. showcase:: - :title: 259-decidiffusion-image-generation - :img: https://user-images.githubusercontent.com/29454499/274927904-cd734349-9954-4656-ab96-08a903e846ef.png - - Image generation with DeciDiffusion and OpenVINO. - -.. showcase:: - :title: 258-blip-diffusion-subject-generation - :img: https://user-images.githubusercontent.com/76161256/275485611-0ecf621f-b544-44ae-8258-8a49be704989.png - - Subject-driven image generation and editing using BLIP Diffusion and OpenVINO. - -.. showcase:: - :title: 257-llava-multimodal-chatbot - :img: https://raw.githubusercontent.com/haotian-liu/LLaVA/main/images/llava_logo.png - - Visual-language assistant with LLaVA and OpenVINO. - -.. showcase:: - :title: 256-bark-text-to-audio - :img: https://user-images.githubusercontent.com/29454499/269278630-9a770279-0045-480e-95f2-1a2f2d0a5115.png - - Text-to-speech generation using Bark and OpenVINO. - -.. showcase:: - :title: 254-llm-chatbot + :title: 262-softvc-voice-conversion :img: _static/images/notebook_eye.png - Create an LLM-powered Chatbot using OpenVINO. - -.. showcase:: - :title: 253-zeroscope-text2video - :img: https://camo.githubusercontent.com/64eec6e52d060ca971c5a3be3f0d60e712907c98b4661b454d7e3e9575c2bc6b/68747470733a2f2f68756767696e67666163652e636f2f64617461736574732f68756767696e67666163652f646f63756d656e746174696f6e2d696d616765732f7265736f6c76652f6d61696e2f6469666675736572732f646172746876616465725f63657270656e73652e676966 - - Video generation with ZeroScope and OpenVINO. - -.. showcase:: - :title: 251-tiny-sd-image-generation - :img: https://user-images.githubusercontent.com/29454499/260904650-274fc2f9-24d2-46a3-ac3d-d660ec3c9a19.png - - Image Generation with Tiny-SD and OpenVINO. + SoftVC VITS Singing Voice Conversion and OpenVINO. .. note:: diff --git a/docs/articles_en/learn_openvino/tutorials/notebooks_section_1.rst b/docs/articles_en/learn_openvino/tutorials/notebooks_section_1.rst index 7d1bb5a25d30f0..54a4b4a35e3c85 100644 --- a/docs/articles_en/learn_openvino/tutorials/notebooks_section_1.rst +++ b/docs/articles_en/learn_openvino/tutorials/notebooks_section_1.rst @@ -17,6 +17,12 @@ Tutorials that explain how to optimize and quantize models with OpenVINO tools. Convert TensorFlow Hub models to OpenVINO Intermediate Representation (IR). +.. showcase:: + :title: 125-lraspp-segmentation + :img: _static/images/notebook_eye.png + + Semantic segmentation with LRASPP MobileNet v3 and OpenVINO + .. showcase:: :title: 125-convnext-classification :img: _static/images/notebook_eye.png diff --git a/docs/articles_en/learn_openvino/tutorials/notebooks_section_2.rst b/docs/articles_en/learn_openvino/tutorials/notebooks_section_2.rst index 18ab9430db1dee..b3c03f09a55ed6 100644 --- a/docs/articles_en/learn_openvino/tutorials/notebooks_section_2.rst +++ b/docs/articles_en/learn_openvino/tutorials/notebooks_section_2.rst @@ -11,6 +11,22 @@ Model Demos Demos that demonstrate inference on a particular model. +.. showcase:: + :title: 272-paint-by-example + :img: https://camo.githubusercontent.com/79d83ce8fc6813a503f372bacb7dc709c79d9560060df3dc92196b9849cc33a9/68747470733a2f2f757365722d696d616765732e67697468756275736572636f6e74656e742e636f6d2f3130333232363538302f3233363935343931382d66333634623232372d323933632d346637382d613962662d3964636562636231303334612e706e67 + + Paint by Example using Stable Diffusion and OpenVINO. + +.. showcase:: + :title: 271-sdxl-turbo + :img: _images/271-sdxl-turbo-with-output_30_1.png + + Single step image generation using SDXL-turbo and OpenVINO. + +.. showcase:: + :title: 270-sound-generation-audioldm2 + :img: https://github.com/openvinotoolkit/openvino_notebooks/assets/76463150/c93a0f86-d9cf-4bd1-93b9-e27532170d75 + .. showcase:: :title: 269-film-slowmo :img: https://github.com/googlestaging/frame-interpolation/raw/main/moment.gif @@ -53,6 +69,12 @@ Demos that demonstrate inference on a particular model. Image generation with Latent Consistency Model and OpenVINO. +.. showcase:: + :title: 263-lcm-lora-controlnet + :img: https://user-images.githubusercontent.com/29454499/284292122-f146e16d-7233-49f7-a401-edcb714b5288.png + + Text-to-Image Generation with LCM LoRA and ControlNet Conditioning. + .. showcase:: :title: 262-softvc-voice-conversion :img: _static/images/notebook_eye.png @@ -125,6 +147,12 @@ Demos that demonstrate inference on a particular model. Universal segmentation with OneFormer and OpenVINO™. +.. showcase:: + :title: 248-ssd-b1 + :img: https://user-images.githubusercontent.com/29454499/258651862-28b63016-c5ff-4263-9da8-73ca31100165.jpeg + + Image generation with Stable Diffusion XL and OpenVINO™. + .. showcase:: :title: 248-stable-diffusion-xl :img: https://user-images.githubusercontent.com/29454499/258651862-28b63016-c5ff-4263-9da8-73ca31100165.jpeg diff --git a/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/CPU.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/CPU.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/CPU.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/CPU.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/GNA.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GNA.rst similarity index 99% rename from docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/GNA.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GNA.rst index 0c968f56c7172a..f53b92a27a2372 100644 --- a/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/GNA.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GNA.rst @@ -359,7 +359,7 @@ and *W* is limited to 87 when there are 64 input channels. :download:`Table of Maximum Input Tensor Widths (W) vs. Rest of Parameters (Input and Kernel Precision: i16) <../../../docs/OV_Runtime_UG/supported_plugins/files/GNA_Maximum_Input_Tensor_Widths_i16.csv>` -:download:`Table of Maximum Input Tensor Widths (W) vs. Rest of Parameters (Input and Kernel Precision: i8) <../../../docs/OV_Runtime_UG/supported_plugins/files/GNA_Maximum_Input_Tensor_Widths_i8.csv>` +:download:`Table of Maximum Input Tensor Widths (W) vs. Rest of Parameters (Input and Kernel Precision: i8) <../../../docs/OV_Runtime_UG/supported_plugins/files/GNA_Maximum_Input_Tensor_Widths_i8.csv>` .. note:: diff --git a/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/GPU.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GPU.rst similarity index 98% rename from docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/GPU.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GPU.rst index 8c54eb3a5bdd17..c47b0126a8c419 100644 --- a/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/GPU.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GPU.rst @@ -452,6 +452,15 @@ Below is a list of such operations: The behavior depends on specific parameters of the operations and hardware configuration. +.. important:: + + While working on a fine tuned model, inference may give an inaccuracy and performance drop + on GPU if winograd convolutions are selected. This issue can be fixed by disabling winograd + convolutions: + + .. code:: bash + + compiled_model = core.compile_model(ov_model, device_name=devStr1, config={ "GPU_DISABLE_WINOGRAD_CONVOLUTION": True }) GPU Performance Checklist: Summary ####################################### diff --git a/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/GPU/GPU_RemoteTensor_API.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GPU/GPU_RemoteTensor_API.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/GPU/GPU_RemoteTensor_API.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/GPU/GPU_RemoteTensor_API.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/NPU.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/NPU.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/NPU.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/NPU.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/config_properties.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/config_properties.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/Device_Plugins/config_properties.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/Device_Plugins/config_properties.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/ShapeInference.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/ShapeInference.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/ShapeInference.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/ShapeInference.rst diff --git a/docs/OV_Runtime_UG/Troubleshooting_ReshapeMethod.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/ShapeInference/Troubleshooting_ReshapeMethod.rst similarity index 100% rename from docs/OV_Runtime_UG/Troubleshooting_ReshapeMethod.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/ShapeInference/Troubleshooting_ReshapeMethod.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_common.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_common.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_common.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_common.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_internals.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_internals.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_internals.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_internals.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_latency.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_latency.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_latency.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_latency.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_latency/Model_caching_overview.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_latency/Model_caching_overview.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_latency/Model_caching_overview.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_latency/Model_caching_overview.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_tput.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_tput.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_tput.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_tput.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_tput_advanced.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_tput_advanced.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/dldt_deployment_optimization_tput_advanced.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/dldt_deployment_optimization_tput_advanced.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/memory_optimization_guide.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/memory_optimization_guide.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/memory_optimization_guide.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/memory_optimization_guide.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/performance_hints.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/performance_hints.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/performance_hints.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/performance_hints.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/precision_control.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/precision_control.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/precision_control.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/precision_control.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview/layout_overview.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview/layout_overview.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview/layout_overview.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview/layout_overview.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview/preprocessing_details.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview/preprocessing_details.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview/preprocessing_details.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview/preprocessing_details.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview/preprocessing_usecase_save.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview/preprocessing_usecase_save.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/dldt_deployment_optimization_guide/preprocessing_overview/preprocessing_usecase_save.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/dldt_deployment_optimization_guide/preprocessing_overview/preprocessing_usecase_save.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/inference_modes_overview.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/inference_modes_overview.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview.rst diff --git a/docs/OV_Runtime_UG/auto_device_selection.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/auto_device_selection.rst similarity index 95% rename from docs/OV_Runtime_UG/auto_device_selection.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/auto_device_selection.rst index 832369d9611183..807944dd248506 100644 --- a/docs/OV_Runtime_UG/auto_device_selection.rst +++ b/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/auto_device_selection.rst @@ -167,6 +167,17 @@ Following the OpenVINO™ naming convention, the Automatic Device Selection mode | | | | | The default value is ``true``. | +----------------------------------------------+--------------------------------------------------------------------+ +| ``ov::intel_auto::schedule_policy`` | **Values**: | +| | | +| | ``ROUND_ROBIN`` | +| | | +| | ``DEVICE_PRIORITY`` | +| | | +| | Specify the schedule policy of infer request assigned to hardware | +| | plugin for AUTO cumulative mode (MULTI). | +| | | +| | The default value is ``DEVICE_PRIORITY``. | ++----------------------------------------------+--------------------------------------------------------------------+ Inference with AUTO is configured similarly to when device plugins are used: you compile the model on the plugin with configuration and execute inference. diff --git a/docs/OV_Runtime_UG/AutoPlugin_Debugging.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/auto_device_selection/AutoPlugin_Debugging.rst similarity index 100% rename from docs/OV_Runtime_UG/AutoPlugin_Debugging.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/auto_device_selection/AutoPlugin_Debugging.rst diff --git a/docs/OV_Runtime_UG/automatic_batching.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/automatic_batching.rst similarity index 100% rename from docs/OV_Runtime_UG/automatic_batching.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/automatic_batching.rst diff --git a/docs/OV_Runtime_UG/hetero_execution.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/hetero_execution.rst similarity index 100% rename from docs/OV_Runtime_UG/hetero_execution.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/hetero_execution.rst diff --git a/docs/OV_Runtime_UG/multi_device.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/multi_device.rst similarity index 100% rename from docs/OV_Runtime_UG/multi_device.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/inference_modes_overview/multi_device.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/integrate_with_your_application.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/integrate_with_your_application.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application.rst diff --git a/docs/OV_Runtime_UG/Python_API_exclusives.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application/Python_API_exclusives.rst similarity index 100% rename from docs/OV_Runtime_UG/Python_API_exclusives.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application/Python_API_exclusives.rst diff --git a/docs/OV_Runtime_UG/Python_API_inference.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application/Python_API_inference.rst similarity index 100% rename from docs/OV_Runtime_UG/Python_API_inference.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application/Python_API_inference.rst diff --git a/docs/OV_Runtime_UG/model_representation.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application/model_representation.rst similarity index 100% rename from docs/OV_Runtime_UG/model_representation.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application/model_representation.rst diff --git a/docs/OV_Runtime_UG/ov_infer_request.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application/ov_infer_request.rst similarity index 100% rename from docs/OV_Runtime_UG/ov_infer_request.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/integrate_with_your_application/ov_infer_request.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/model_state_intro.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/model_state_intro.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/model_state_intro.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/model_state_intro.rst diff --git a/docs/OV_Runtime_UG/lowlatency2.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/model_state_intro/lowlatency2.rst similarity index 100% rename from docs/OV_Runtime_UG/lowlatency2.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/model_state_intro/lowlatency2.rst diff --git a/docs/articles_en/openvino_workflow/openvino_intro/ov_dynamic_shapes.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/ov_dynamic_shapes.rst similarity index 100% rename from docs/articles_en/openvino_workflow/openvino_intro/ov_dynamic_shapes.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/ov_dynamic_shapes.rst diff --git a/docs/OV_Runtime_UG/ov_without_dynamic_shapes.rst b/docs/articles_en/openvino_workflow/running_inference_with_openvino/ov_dynamic_shapes/ov_without_dynamic_shapes.rst similarity index 100% rename from docs/OV_Runtime_UG/ov_without_dynamic_shapes.rst rename to docs/articles_en/openvino_workflow/running_inference_with_openvino/ov_dynamic_shapes/ov_without_dynamic_shapes.rst diff --git a/docs/install_guides/pre-release-note.md b/docs/dev/pypi_publish/pre-release-note.md similarity index 100% rename from docs/install_guides/pre-release-note.md rename to docs/dev/pypi_publish/pre-release-note.md diff --git a/docs/install_guides/pypi-openvino-dev.md b/docs/dev/pypi_publish/pypi-openvino-dev.md similarity index 100% rename from docs/install_guides/pypi-openvino-dev.md rename to docs/dev/pypi_publish/pypi-openvino-dev.md diff --git a/docs/install_guides/pypi-openvino-rt.md b/docs/dev/pypi_publish/pypi-openvino-rt.md similarity index 100% rename from docs/install_guides/pypi-openvino-rt.md rename to docs/dev/pypi_publish/pypi-openvino-rt.md diff --git a/docs/nbdoc/consts.py b/docs/nbdoc/consts.py index 57d9e603b38b4a..46f8c9480258bf 100644 --- a/docs/nbdoc/consts.py +++ b/docs/nbdoc/consts.py @@ -5,7 +5,7 @@ repo_owner = "openvinotoolkit" repo_name = "openvino_notebooks" repo_branch = "tree/main" -artifacts_link = "http://repository.toolbox.iotg.sclab.intel.com/projects/ov-notebook/0.1.0-latest/20231114220808/dist/rst_files/" +artifacts_link = "http://repository.toolbox.iotg.sclab.intel.com/projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/" blacklisted_extensions = ['.xml', '.bin'] notebooks_repo = "https://github.com/openvinotoolkit/openvino_notebooks/blob/main/" notebooks_binder = "https://mybinder.org/v2/gh/openvinotoolkit/openvino_notebooks/HEAD?filepath=" diff --git a/docs/notebooks/002-openvino-api-with-output.rst b/docs/notebooks/002-openvino-api-with-output.rst index 1810a90a6ee60d..466d51c361018b 100644 --- a/docs/notebooks/002-openvino-api-with-output.rst +++ b/docs/notebooks/002-openvino-api-with-output.rst @@ -1,8 +1,17 @@ OpenVINO™ Runtime API Tutorial ============================== -This notebook explains the basics of the OpenVINO Runtime API. It -covers: +This notebook explains the basics of the OpenVINO Runtime API. + +The notebook is divided into sections with headers. The next cell +contains global requirements for installation and imports. Each section +is standalone and does not depend on any previous sections. All models +used in this tutorial are provided as examples. These model files can be +replaced with your own models. The exact outputs will be different, but +the process is the same. + +**Table of contents:** + - `Loading OpenVINO Runtime and Showing Info <#loading-openvino-runtime-and-showing-info>`__ @@ -13,6 +22,7 @@ covers: - `PaddlePaddle Model <#paddlepaddle-model>`__ - `TensorFlow Model <#tensorflow-model>`__ - `TensorFlow Lite Model <#tensorflow-lite-model>`__ + - `PyTorch Model <#pytorch-model>`__ - `Getting Information about a Model <#getting-information-about-a-model>`__ @@ -28,19 +38,11 @@ covers: - `Caching a Model <#caching-a-model>`__ -The notebook is divided into sections with headers. The next cell -contains global requirements installation and imports. Each section is -standalone and does not depend on any previous sections. A segmentation -and classification OpenVINO IR model and a segmentation ONNX model are -provided as examples. These model files can be replaced with your own -models. The exact outputs will be different, but the process is the -same. - .. code:: ipython3 # Required imports. Please execute this cell first. - %pip install -q "openvino>=2023.1.0" - %pip install requests tqdm + %pip install -q "openvino>=2023.1.0" + %pip install requests tqdm ipywidgets # Fetch `notebook_utils` module import urllib.request @@ -55,19 +57,44 @@ same. .. parsed-literal:: Note: you may need to restart the kernel to use updated packages. - Requirement already satisfied: requests in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (2.31.0) - Requirement already satisfied: tqdm in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (4.66.1) - Requirement already satisfied: charset-normalizer<4,>=2 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests) (3.3.1) - Requirement already satisfied: idna<4,>=2.5 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests) (3.4) - Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests) (2.0.7) - Requirement already satisfied: certifi>=2017.4.17 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests) (2023.7.22) + Requirement already satisfied: requests in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (2.31.0) + Requirement already satisfied: tqdm in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (4.66.1) + Requirement already satisfied: ipywidgets in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (8.1.1) + Requirement already satisfied: charset-normalizer<4,>=2 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests) (3.3.2) + Requirement already satisfied: idna<4,>=2.5 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests) (3.6) + Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests) (2.1.0) + Requirement already satisfied: certifi>=2017.4.17 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests) (2023.11.17) + Requirement already satisfied: comm>=0.1.3 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipywidgets) (0.2.0) + Requirement already satisfied: ipython>=6.1.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipywidgets) (8.12.3) + Requirement already satisfied: traitlets>=4.3.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipywidgets) (5.14.0) + Requirement already satisfied: widgetsnbextension~=4.0.9 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipywidgets) (4.0.9) + Requirement already satisfied: jupyterlab-widgets~=3.0.9 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipywidgets) (3.0.9) + Requirement already satisfied: backcall in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets) (0.2.0) + Requirement already satisfied: decorator in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets) (5.1.1) + Requirement already satisfied: jedi>=0.16 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets) (0.19.1) + Requirement already satisfied: matplotlib-inline in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets) (0.1.6) + Requirement already satisfied: pickleshare in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets) (0.7.5) + Requirement already satisfied: prompt-toolkit!=3.0.37,<3.1.0,>=3.0.30 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets) (3.0.41) + Requirement already satisfied: pygments>=2.4.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets) (2.17.2) + Requirement already satisfied: stack-data in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets) (0.6.3) + Requirement already satisfied: typing-extensions in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets) (4.8.0) + Requirement already satisfied: pexpect>4.3 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets) (4.9.0) + Requirement already satisfied: parso<0.9.0,>=0.8.3 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from jedi>=0.16->ipython>=6.1.0->ipywidgets) (0.8.3) + Requirement already satisfied: ptyprocess>=0.5 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from pexpect>4.3->ipython>=6.1.0->ipywidgets) (0.7.0) + Requirement already satisfied: wcwidth in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from prompt-toolkit!=3.0.37,<3.1.0,>=3.0.30->ipython>=6.1.0->ipywidgets) (0.2.12) + Requirement already satisfied: executing>=1.2.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from stack-data->ipython>=6.1.0->ipywidgets) (2.0.1) + Requirement already satisfied: asttokens>=2.1.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from stack-data->ipython>=6.1.0->ipywidgets) (2.4.1) + Requirement already satisfied: pure-eval in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from stack-data->ipython>=6.1.0->ipywidgets) (0.2.2) + Requirement already satisfied: six>=1.12.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from asttokens>=2.1.0->stack-data->ipython>=6.1.0->ipywidgets) (1.16.0) Note: you may need to restart the kernel to use updated packages. Loading OpenVINO Runtime and Showing Info ----------------------------------------- -Initialize OpenVINO Runtime with Core() + + +Initialize OpenVINO Runtime with ``ov.Core()`` .. code:: ipython3 @@ -103,6 +130,8 @@ be faster. Loading a Model --------------- + + After initializing OpenVINO Runtime, first read the model file with ``read_model()``, then compile it to the specified device with the ``compile_model()`` method. @@ -115,6 +144,8 @@ using a tool dedicated to this task. OpenVINO IR Model ~~~~~~~~~~~~~~~~~ + + An OpenVINO IR (Intermediate Representation) model consists of an ``.xml`` file, containing information about network topology, and a ``.bin`` file, containing the weights and biases binary data. Models in @@ -167,7 +198,7 @@ notebooks. .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.bin') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.bin') @@ -184,6 +215,8 @@ notebooks. ONNX Model ~~~~~~~~~~ + + `ONNX `__ is an open format built to represent machine learning models. ONNX defines a common set of operators - the building blocks of machine learning and deep learning models - and a common file @@ -214,7 +247,7 @@ points to the filename of an ONNX model. .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/segmentation.onnx') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/segmentation.onnx') @@ -237,6 +270,8 @@ The ONNX model can be exported to OpenVINO IR with ``save_model()``: PaddlePaddle Model ~~~~~~~~~~~~~~~~~~ + + `PaddlePaddle `__ models saved for inference can also be passed to OpenVINO Runtime without any conversion step. Pass the filename with extension to @@ -268,7 +303,7 @@ without any conversion step. Pass the filename with extension to .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/inference.pdiparams') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/inference.pdiparams') @@ -289,16 +324,10 @@ without any conversion step. Pass the filename with extension to TensorFlow Model ~~~~~~~~~~~~~~~~ -TensorFlow models saved in frozen graph format can also be passed to -``read_model`` starting in OpenVINO 2022.3. - **NOTE**: Directly loading TensorFlow models is available as a - preview feature in the OpenVINO 2022.3 release. Fully functional - support will be provided in the upcoming 2023 releases. Currently - support is limited to only frozen graph inference format. Other - TensorFlow model formats must be converted to OpenVINO IR using - `model conversion - API `__. + +TensorFlow models saved in frozen graph format can also be passed to +``read_model``. .. code:: ipython3 @@ -318,7 +347,7 @@ TensorFlow models saved in frozen graph format can also be passed to .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.pb') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.pb') @@ -339,6 +368,8 @@ TensorFlow models saved in frozen graph format can also be passed to TensorFlow Lite Model ~~~~~~~~~~~~~~~~~~~~~ + + `TFLite `__ models saved for inference can also be passed to OpenVINO Runtime. Pass the filename with extension ``.tflite`` to ``read_model`` and exported an OpenVINO IR with @@ -352,7 +383,7 @@ It is pre-trained model optimized to work with TensorFlow Lite. from pathlib import Path - tflite_model_url = 'https://tfhub.dev/tensorflow/lite-model/inception_v4_quant/1/default/1?lite-format=tflite' + tflite_model_url = 'https://www.kaggle.com/models/tensorflow/inception/frameworks/tfLite/variations/v4-quant/versions/1?lite-format=tflite' tflite_model_path = Path('model/classification.tflite') download_file(tflite_model_url, filename=tflite_model_path.name, directory=tflite_model_path.parent) @@ -368,7 +399,7 @@ It is pre-trained model optimized to work with TensorFlow Lite. .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.tflite') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.tflite') @@ -385,9 +416,43 @@ It is pre-trained model optimized to work with TensorFlow Lite. ov.save_model(model_tflite, output_model="model/exported_tflite_model.xml") +PyTorch Model +~~~~~~~~~~~~~ + + + +`PyTorch `__ models can not be directly passed to +``core.read_model``. ``ov.Model`` for model objects from this framework +can be obtained using ``ov.convert_model`` API. You can find more +details in `pytorch-to-openvino <../102-pytorch-to-openvino>`__ +notebook. In this tutorial we will use +`resnet18 `__ +model form torchvision library. After conversion model using +``ov.convert_model``, it can be compiled on device using +``core.compile_model`` or saved on disk for the next usage using +``ov.save_model`` + +.. code:: ipython3 + + import openvino as ov + import torch + from torchvision.models import resnet18, ResNet18_Weights + + core = ov.Core() + + pt_model = resnet18(weights=ResNet18_Weights.IMAGENET1K_V1) + example_input = torch.zeros((1, 3, 224, 224)) + ov_model_pytorch = ov.convert_model(pt_model, example_input=example_input) + + compiled_model_pytorch = core.compile_model(ov_model_pytorch, device_name="CPU") + + ov.save_model(ov_model_pytorch, "model/exported_pytorch_model.xml") + Getting Information about a Model --------------------------------- + + The OpenVINO Model instance stores information about the model. Information about the inputs and outputs of the model are in ``model.inputs`` and ``model.outputs``. These are also properties of the @@ -415,13 +480,15 @@ Information about the inputs and outputs of the model are in .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.bin') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.bin') Model Inputs ~~~~~~~~~~~~ + + Information about all input layers is stored in the ``inputs`` dictionary. @@ -495,6 +562,8 @@ point) precision. Model Outputs ~~~~~~~~~~~~~ + + .. code:: ipython3 import openvino as ov @@ -559,32 +628,41 @@ classes (``C``). The output is returned as 32-bit floating point. Doing Inference on a Model -------------------------- + + **NOTE** this notebook demonstrates only the basic synchronous inference API. For an async inference example, please refer to `Async API notebook <115-async-api-with-output.html>`__ The diagram below shows a typical inference pipeline with OpenVINO -.. figure:: https://docs.openvino.ai/2023.0/_images/IMPLEMENT_PIPELINE_with_API_C.svg +.. figure:: https://github.com/openvinotoolkit/openvino_notebooks/assets/29454499/a91bc582-165b-41a2-ab08-12c812059936 :alt: image.png image.png Creating OpenVINO Core and model compilation is covered in the previous -steps. The next step is preparing an inference request. To do inference -on a model, first create an inference request by calling the -``create_infer_request()`` method of ``CompiledModel``, -``compiled_model`` that was loaded with ``compile_model()``. Then, call -the ``infer()`` method of ``InferRequest``. It expects one argument: -``inputs``. This is a dictionary that maps input layer names to input -data or list of input data in ``np.ndarray`` format, where the position -of the input tensor corresponds to input index. If a model has a single -input, wrapping to a dictionary or list can be omitted. +steps. The next step is preparing inputs. You can provide inputs in one +of the supported format: dictionary with name of inputs as keys and +``np.arrays`` that represent input tensors as values, list or tuple of +``np.arrays`` represented input tensors (their order should match with +model inputs order). If a model has a single input, wrapping to a +dictionary or list can be omitted. To do inference on a model, pass +prepared inputs into compiled model object obtained using +``core.compile_model``. The inference result represented as dictionary, +where keys are model outputs and ``np.arrays`` represented their +produced data as values. .. code:: ipython3 # Install opencv package for image handling - !pip install -q opencv-python + %pip install -q opencv-python + + +.. parsed-literal:: + + Note: you may need to restart the kernel to use updated packages. + **Load the network** @@ -608,7 +686,7 @@ input, wrapping to a dictionary or list can be omitted. .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.bin') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.bin') @@ -756,9 +834,13 @@ notebook <001-hello-world-with-output.html>`__. Reshaping and Resizing ---------------------- + + Change Image Size ~~~~~~~~~~~~~~~~~ + + Instead of reshaping the image to fit the model, it is also possible to reshape the model to fit the image. Be aware that not all models support reshaping, and models that do, may not support all input shapes. The @@ -793,7 +875,7 @@ input shape. .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/segmentation.bin') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/segmentation.bin') @@ -847,6 +929,8 @@ dimensions. After reshaping, compile the network once again. Change Batch Size ~~~~~~~~~~~~~~~~~ + + Use the ``.reshape()`` method to set the batch size, by increasing the first element of ``new_shape``. For example, to set a batch size of two, set ``new_shape = (2,3,544,544)`` in the cell above. @@ -907,6 +991,8 @@ input image through the network to see the result: Caching a Model --------------- + + For some devices, like GPU, loading a model can take some time. Model Caching solves this issue by caching the model in a cache directory. If ``core.compile_model(model=net, device_name=device_name, config=config_dict)`` @@ -944,7 +1030,7 @@ the cache. .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.bin') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/002-openvino-api/model/classification.bin') diff --git a/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst b/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst index 81805f59100eb5..2168de355e96ff 100644 --- a/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst +++ b/docs/notebooks/102-pytorch-onnx-to-openvino-with-output.rst @@ -44,8 +44,7 @@ documentation `__ - `ONNX Model Conversion <#onnx-model-conversion>`__ - - `Convert PyTorch model to - ONNX <#convert-pytorch-model-to-onnx>`__ + - `Convert PyTorch model to ONNX <#convert-pytorch-model-to-onnx>`__ - `Convert ONNX Model to OpenVINO IR Format <#convert-onnx-model-to-openvino-ir-format>`__ @@ -78,11 +77,15 @@ documentation `__ Note: you may need to restart the kernel to use updated packages. -Preparation ------------------------------------------------------ +Preparation +----------- + + + +Imports +~~~~~~~ + -Imports -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: ipython3 @@ -105,8 +108,10 @@ Imports from notebook_utils import segmentation_map_to_image, viz_result_image, SegmentationMap, Label, download_file -Settings -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Settings +~~~~~~~~ + + Set a name for the model, then define width and height of the image that will be used by the network during inference. According to the input @@ -127,14 +132,14 @@ transforms function, the model is pre-trained on images with a height of onnx_path.parent.mkdir() ir_path = onnx_path.with_suffix(".xml") -Load Model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Load Model +~~~~~~~~~~ + + Generally, PyTorch models represent an instance of ``torch.nn.Module`` class, initialized by a state dictionary with model weights. Typical -steps for getting a pre-trained model: - -1. Create instance of model class +steps for getting a pre-trained model: 1. Create instance of model class 2. Load checkpoint state dict, which contains pre-trained model weights 3. Turn model to evaluation for switching some operations to inference mode @@ -180,11 +185,15 @@ have not downloaded the model before. Loaded PyTorch LRASPP MobileNetV3 model -ONNX Model Conversion ---------------------------------------------------------------- +ONNX Model Conversion +--------------------- + + + +Convert PyTorch model to ONNX +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + -Convert PyTorch model to ONNX -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OpenVINO supports PyTorch models that are exported in ONNX format. We will use the ``torch.onnx.export`` function to obtain the ONNX model, @@ -223,8 +232,10 @@ line of the output will read: ONNX model exported to model/lraspp_mobilenet_v3_large.onnx. -Convert ONNX Model to OpenVINO IR Format -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Convert ONNX Model to OpenVINO IR Format +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + To convert the ONNX model to OpenVINO IR with ``FP16`` precision, use model conversion API. The models are saved inside the current directory. @@ -246,14 +257,18 @@ For more information on how to convert models, see this Exporting ONNX model to IR... This may take a few minutes. -Show Results ------------------------------------------------------- +Show Results +------------ + + Confirm that the segmentation results look as expected by comparing model predictions on the ONNX, OpenVINO IR and PyTorch models. -Load and Preprocess an Input Image -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Load and Preprocess an Input Image +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Images need to be normalized before propagating through the network. @@ -296,8 +311,10 @@ Images need to be normalized before propagating through the network. data/coco.jpg: 0%| | 0.00/202k [00:00`__ - `Pytorch ONNX diff --git a/docs/notebooks/102-pytorch-to-openvino-with-output.rst b/docs/notebooks/102-pytorch-to-openvino-with-output.rst deleted file mode 100644 index 71a724522deb89..00000000000000 --- a/docs/notebooks/102-pytorch-to-openvino-with-output.rst +++ /dev/null @@ -1,796 +0,0 @@ -Convert a PyTorch Model to OpenVINO™ IR -======================================= - -This tutorial demonstrates step-by-step instructions on how to do -inference on a PyTorch classification model using OpenVINO Runtime. -Starting from OpenVINO 2023.0 release, OpenVINO supports direct PyTorch -model conversion without an intermediate step to convert them into ONNX -format. In order, if you try to use the lower OpenVINO version or prefer -to use ONNX, please check this -`tutorial <102-pytorch-to-openvino-with-output.html>`__. - -In this tutorial, we will use the -`RegNetY_800MF `__ model from -`torchvision `__ to -demonstrate how to convert PyTorch models to OpenVINO Intermediate -Representation. - -The RegNet model was proposed in `Designing Network Design -Spaces `__ by Ilija Radosavovic, Raj -Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár. The authors -design search spaces to perform Neural Architecture Search (NAS). They -first start from a high dimensional search space and iteratively reduce -the search space by empirically applying constraints based on the -best-performing models sampled by the current search space. Instead of -focusing on designing individual network instances, authors design -network design spaces that parametrize populations of networks. The -overall process is analogous to the classic manual design of networks -but elevated to the design space level. The RegNet design space provides -simple and fast networks that work well across a wide range of flop -regimes. - -**Table of contents:** - - -- `Prerequisites <#prerequisites>`__ -- `Load PyTorch Model <#load-pytorch-model>`__ - - - `Prepare Input Data <#prepare-input-data>`__ - - `Run PyTorch Model - Inference <#run-pytorch-model-inference>`__ - - `Benchmark PyTorch Model - Inference <#benchmark-pytorch-model-inference>`__ - -- `Convert PyTorch Model to OpenVINO Intermediate - Representation <#convert-pytorch-model-to-openvino-intermediate-representation>`__ - - - `Select inference device <#select-inference-device>`__ - - `Run OpenVINO Model - Inference <#run-openvino-model-inference>`__ - - `Benchmark OpenVINO Model - Inference <#benchmark-openvino-model-inference>`__ - -- `Convert PyTorch Model with Static Input - Shape <#convert-pytorch-model-with-static-input-shape>`__ - - - `Select inference device <#select-inference-device>`__ - - `Run OpenVINO Model Inference with Static Input - Shape <#run-openvino-model-inference-with-static-input-shape>`__ - - `Benchmark OpenVINO Model Inference with Static Input - Shape <#benchmark-openvino-model-inference-with-static-input-shape>`__ - -- `Convert TorchScript Model to OpenVINO Intermediate - Representation <#convert-torchscript-model-to-openvino-intermediate-representation>`__ - - - `Scripted Model <#scripted-model>`__ - - `Benchmark Scripted Model - Inference <#benchmark-scripted-model-inference>`__ - - `Convert PyTorch Scripted Model to OpenVINO Intermediate - Representation <#convert-pytorch-scripted-model-to-openvino-intermediate-representation>`__ - - `Benchmark OpenVINO Model Inference Converted From Scripted - Model <#benchmark-openvino-model-inference-converted-from-scripted-model>`__ - - `Traced Model <#traced-model>`__ - - `Benchmark Traced Model - Inference <#benchmark-traced-model-inference>`__ - - `Convert PyTorch Traced Model to OpenVINO Intermediate - Representation <#convert-pytorch-traced-model-to-openvino-intermediate-representation>`__ - - `Benchmark OpenVINO Model Inference Converted From Traced - Model <#benchmark-openvino-model-inference-converted-from-traced-model>`__ - -Prerequisites -------------------------------------------------------- - -Install notebook dependencies - -.. code:: ipython3 - - %pip install -q "openvino>=2023.1.0" scipy - - -.. parsed-literal:: - - Note: you may need to restart the kernel to use updated packages. - - -Download input data and label map - -.. code:: ipython3 - - import requests - from pathlib import Path - from PIL import Image - - MODEL_DIR = Path("model") - DATA_DIR = Path("data") - - MODEL_DIR.mkdir(exist_ok=True) - DATA_DIR.mkdir(exist_ok=True) - MODEL_NAME = "regnet_y_800mf" - - image = Image.open(requests.get("https://farm9.staticflickr.com/8225/8511402100_fea15da1c5_z.jpg", stream=True).raw) - - labels_file = DATA_DIR / "imagenet_2012.txt" - - if not labels_file.exists(): - resp = requests.get("https://raw.githubusercontent.com/openvinotoolkit/open_model_zoo/master/data/dataset_classes/imagenet_2012.txt") - with labels_file.open("wb") as f: - f.write(resp.content) - - imagenet_classes = labels_file.open("r").read().splitlines() - -Load PyTorch Model ------------------------------------------------------------- - -Generally, PyTorch models represent an instance of the -``torch.nn.Module`` class, initialized by a state dictionary with model -weights. Typical steps for getting a pre-trained model: - -1. Create an instance of a model class -2. Load checkpoint state dict, which contains pre-trained model weights -3. Turn the model to evaluation for switching some operations to - inference mode - -The ``torchvision`` module provides a ready-to-use set of functions for -model class initialization. We will use -``torchvision.models.regnet_y_800mf``. You can directly pass pre-trained -model weights to the model initialization function using the weights -enum ``RegNet_Y_800MF_Weights.DEFAULT``. - -.. code:: ipython3 - - import torchvision - - # get default weights using available weights Enum for model - weights = torchvision.models.RegNet_Y_800MF_Weights.DEFAULT - - # create model topology and load weights - model = torchvision.models.regnet_y_800mf(weights=weights) - - # switch model to inference mode - model.eval(); - -Prepare Input Data -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The code below demonstrates how to preprocess input data using a -model-specific transforms module from ``torchvision``. After -transformation, we should concatenate images into batched tensor, in our -case, we will run the model with batch 1, so we just unsqueeze input on -the first dimension. - -.. code:: ipython3 - - import torch - - # Initialize the Weight Transforms - preprocess = weights.transforms() - - # Apply it to the input image - img_transformed = preprocess(image) - - # Add batch dimension to image tensor - input_tensor = img_transformed.unsqueeze(0) - -Run PyTorch Model Inference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The model returns a vector of probabilities in raw logits format, -softmax can be applied to get normalized values in the [0, 1] range. For -a demonstration that the output of the original model and OpenVINO -converted is the same, we defined a common postprocessing function which -can be reused later. - -.. code:: ipython3 - - import numpy as np - from scipy.special import softmax - - # Perform model inference on input tensor - result = model(input_tensor) - - # Postprocessing function for getting results in the same way for both PyTorch model inference and OpenVINO - def postprocess_result(output_tensor:np.ndarray, top_k:int = 5): - """ - Posprocess model results. This function applied sofrmax on output tensor and returns specified top_k number of labels with highest probability - Parameters: - output_tensor (np.ndarray): model output tensor with probabilities - top_k (int, *optional*, default 5): number of labels with highest probability for return - Returns: - topk_labels: label ids for selected top_k scores - topk_scores: selected top_k highest scores predicted by model - """ - softmaxed_scores = softmax(output_tensor, -1)[0] - topk_labels = np.argsort(softmaxed_scores)[-top_k:][::-1] - topk_scores = softmaxed_scores[topk_labels] - return topk_labels, topk_scores - - # Postprocess results - top_labels, top_scores = postprocess_result(result.detach().numpy()) - - # Show results - display(image) - for idx, (label, score) in enumerate(zip(top_labels, top_scores)): - _, predicted_label = imagenet_classes[label].split(" ", 1) - print(f"{idx + 1}: {predicted_label} - {score * 100 :.2f}%") - - - -.. image:: 102-pytorch-to-openvino-with-output_files/102-pytorch-to-openvino-with-output_11_0.png - - -.. parsed-literal:: - - 1: tiger cat - 25.91% - 2: Egyptian cat - 10.26% - 3: computer keyboard, keypad - 9.22% - 4: tabby, tabby cat - 9.09% - 5: hamper - 2.35% - - -Benchmark PyTorch Model Inference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code:: ipython3 - - %%timeit - - # Run model inference - model(input_tensor) - - -.. parsed-literal:: - - 17.5 ms ± 9.66 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) - - -Convert PyTorch Model to OpenVINO Intermediate Representation -------------------------------------------------------------------------------------------------------- - -Starting from the 2023.0 release OpenVINO supports direct PyTorch models -conversion to OpenVINO Intermediate Representation (IR) format. OpenVINO -model conversion API should be used for these purposes. More details -regarding PyTorch model conversion can be found in OpenVINO -`documentation `__ - - **Note**: Please, take into account that direct support PyTorch - models conversion is an experimental feature. Model coverage will be - increased in the next releases. For cases, when PyTorch model - conversion failed, you still can try to export the model to ONNX - format. Please refer to this - `tutorial <102-pytorch-to-openvino-with-output.html>`__ - which explains how to convert PyTorch model to ONNX, then to OpenVINO - -The ``convert_model`` function accepts the PyTorch model object and -returns the ``openvino.Model`` instance ready to load on a device using -``core.compile_model`` or save on disk for next usage using -``ov.save_model``. Optionally, we can provide additional parameters, -such as: - -- ``compress_to_fp16`` - flag to perform model weights compression into - FP16 data format. It may reduce the required space for model storage - on disk and give speedup for inference devices, where FP16 - calculation is supported. -- ``example_input`` - input data sample which can be used for model - tracing. -- ``input_shape`` - the shape of input tensor for conversion - -and any other advanced options supported by model conversion Python API. -More details can be found on this -`page `__ - -.. code:: ipython3 - - import openvino as ov - - # Create OpenVINO Core object instance - core = ov.Core() - - # Convert model to openvino.runtime.Model object - ov_model = ov.convert_model(model) - - # Save openvino.runtime.Model object on disk - ov.save_model(ov_model, MODEL_DIR / f"{MODEL_NAME}_dynamic.xml") - - ov_model - - - - -.. parsed-literal:: - - - ] - outputs[ - - ]> - - - -Select inference device -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -select device from dropdown list for running inference using OpenVINO - -.. code:: ipython3 - - import ipywidgets as widgets - - device = widgets.Dropdown( - options=core.available_devices + ["AUTO"], - value='AUTO', - description='Device:', - disabled=False, - ) - - device - - - - -.. parsed-literal:: - - Dropdown(description='Device:', index=1, options=('CPU', 'AUTO'), value='AUTO') - - - -.. code:: ipython3 - - # Load OpenVINO model on device - compiled_model = core.compile_model(ov_model, device.value) - compiled_model - - - - -.. parsed-literal:: - - - ] - outputs[ - - ]> - - - -Run OpenVINO Model Inference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code:: ipython3 - - # Run model inference - result = compiled_model(input_tensor)[0] - - # Posptorcess results - top_labels, top_scores = postprocess_result(result) - - # Show results - display(image) - for idx, (label, score) in enumerate(zip(top_labels, top_scores)): - _, predicted_label = imagenet_classes[label].split(" ", 1) - print(f"{idx + 1}: {predicted_label} - {score * 100 :.2f}%") - - - -.. image:: 102-pytorch-to-openvino-with-output_files/102-pytorch-to-openvino-with-output_20_0.png - - -.. parsed-literal:: - - 1: tiger cat - 25.91% - 2: Egyptian cat - 10.26% - 3: computer keyboard, keypad - 9.22% - 4: tabby, tabby cat - 9.09% - 5: hamper - 2.35% - - -Benchmark OpenVINO Model Inference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code:: ipython3 - - %%timeit - - compiled_model(input_tensor) - - -.. parsed-literal:: - - 3.21 ms ± 12 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) - - -Convert PyTorch Model with Static Input Shape ---------------------------------------------------------------------------------------- - -The default conversion path preserves dynamic input shapes, in order if -you want to convert the model with static shapes, you can explicitly -specify it during conversion using the ``input_shape`` parameter or -reshape the model into the desired shape after conversion. For the model -reshaping example please check the following -`tutorial <002-openvino-api-with-output.html>`__. - -.. code:: ipython3 - - # Convert model to openvino.runtime.Model object - ov_model = ov.convert_model(model, input=[[1,3,224,224]]) - # Save openvino.runtime.Model object on disk - ov.save_model(ov_model, MODEL_DIR / f"{MODEL_NAME}_static.xml") - ov_model - - - - -.. parsed-literal:: - - - ] - outputs[ - - ]> - - - -Select inference device -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -select device from dropdown list for running inference using OpenVINO - -.. code:: ipython3 - - device - - - - -.. parsed-literal:: - - Dropdown(description='Device:', index=1, options=('CPU', 'AUTO'), value='AUTO') - - - -.. code:: ipython3 - - # Load OpenVINO model on device - compiled_model = core.compile_model(ov_model, device.value) - compiled_model - - - - -.. parsed-literal:: - - - ] - outputs[ - - ]> - - - -Now, we can see that input of our converted model is tensor of shape [1, -3, 224, 224] instead of [?, 3, ?, ?] reported by previously converted -model. - -Run OpenVINO Model Inference with Static Input Shape -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code:: ipython3 - - # Run model inference - result = compiled_model(input_tensor)[0] - - # Posptorcess results - top_labels, top_scores = postprocess_result(result) - - # Show results - display(image) - for idx, (label, score) in enumerate(zip(top_labels, top_scores)): - _, predicted_label = imagenet_classes[label].split(" ", 1) - print(f"{idx + 1}: {predicted_label} - {score * 100 :.2f}%") - - - -.. image:: 102-pytorch-to-openvino-with-output_files/102-pytorch-to-openvino-with-output_31_0.png - - -.. parsed-literal:: - - 1: tiger cat - 25.91% - 2: Egyptian cat - 10.26% - 3: computer keyboard, keypad - 9.22% - 4: tabby, tabby cat - 9.09% - 5: hamper - 2.35% - - -Benchmark OpenVINO Model Inference with Static Input Shape -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code:: ipython3 - - %%timeit - - compiled_model(input_tensor) - - -.. parsed-literal:: - - 2.79 ms ± 12 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) - - -Convert TorchScript Model to OpenVINO Intermediate Representation ------------------------------------------------------------------------------------------------------------ - -TorchScript is a way to create serializable and optimizable models from -PyTorch code. Any TorchScript program can be saved from a Python process -and loaded in a process where there is no Python dependency. More -details about TorchScript can be found in `PyTorch -documentation `__. - -There are 2 possible ways to convert the PyTorch model to TorchScript: - -- ``torch.jit.script`` - Scripting a function or ``nn.Module`` will - inspect the source code, compile it as TorchScript code using the - TorchScript compiler, and return a ``ScriptModule`` or - ``ScriptFunction``. -- ``torch.jit.trace`` - Trace a function and return an executable or - ``ScriptFunction`` that will be optimized using just-in-time - compilation. - -Let’s consider both approaches and their conversion into OpenVINO IR. - -Scripted Model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``torch.jit.script`` inspects model source code and compiles it to -``ScriptModule``. After compilation model can be used for inference or -saved on disk using the ``torch.jit.save`` function and after that -restored with ``torch.jit.load`` in any other environment without the -original PyTorch model code definitions. - -TorchScript itself is a subset of the Python language, so not all -features in Python work, but TorchScript provides enough functionality -to compute on tensors and do control-dependent operations. For a -complete guide, see the `TorchScript Language -Reference `__. - -.. code:: ipython3 - - # Get model path - scripted_model_path = MODEL_DIR / f"{MODEL_NAME}_scripted.pth" - - # Compile and save model if it has not been compiled before or load compiled model - if not scripted_model_path.exists(): - scripted_model = torch.jit.script(model) - torch.jit.save(scripted_model, scripted_model_path) - else: - scripted_model = torch.jit.load(scripted_model_path) - - # Run scripted model inference - result = scripted_model(input_tensor) - - # Postprocess results - top_labels, top_scores = postprocess_result(result.detach().numpy()) - - # Show results - display(image) - for idx, (label, score) in enumerate(zip(top_labels, top_scores)): - _, predicted_label = imagenet_classes[label].split(" ", 1) - print(f"{idx + 1}: {predicted_label} - {score * 100 :.2f}%") - - - -.. image:: 102-pytorch-to-openvino-with-output_files/102-pytorch-to-openvino-with-output_35_0.png - - -.. parsed-literal:: - - 1: tiger cat - 25.91% - 2: Egyptian cat - 10.26% - 3: computer keyboard, keypad - 9.22% - 4: tabby, tabby cat - 9.09% - 5: hamper - 2.35% - - -Benchmark Scripted Model Inference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code:: ipython3 - - %%timeit - - scripted_model(input_tensor) - - -.. parsed-literal:: - - 12.9 ms ± 9.28 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) - - -Convert PyTorch Scripted Model to OpenVINO Intermediate Representation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The conversion step for the scripted model to OpenVINO IR is similar to -the original PyTorch model. - -.. code:: ipython3 - - # Convert model to openvino.runtime.Model object - ov_model = ov.convert_model(scripted_model) - - # Load OpenVINO model on device - compiled_model = core.compile_model(ov_model, device.value) - - # Run OpenVINO model inference - result = compiled_model(input_tensor, device.value)[0] - - # Postprocess results - top_labels, top_scores = postprocess_result(result) - - # Show results - display(image) - for idx, (label, score) in enumerate(zip(top_labels, top_scores)): - _, predicted_label = imagenet_classes[label].split(" ", 1) - print(f"{idx + 1}: {predicted_label} - {score * 100 :.2f}%") - - - -.. image:: 102-pytorch-to-openvino-with-output_files/102-pytorch-to-openvino-with-output_39_0.png - - -.. parsed-literal:: - - 1: tiger cat - 25.91% - 2: Egyptian cat - 10.26% - 3: computer keyboard, keypad - 9.22% - 4: tabby, tabby cat - 9.09% - 5: hamper - 2.35% - - -Benchmark OpenVINO Model Inference Converted From Scripted Model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code:: ipython3 - - %%timeit - - compiled_model(input_tensor) - - -.. parsed-literal:: - - 3.21 ms ± 17.8 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) - - -Traced Model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Using ``torch.jit.trace``, you can turn an existing module or Python -function into a TorchScript ``ScriptFunction`` or ``ScriptModule``. You -must provide example inputs, and model will be executed, recording the -operations performed on all the tensors. - -- The resulting recording of a standalone function produces - ``ScriptFunction``. - -- The resulting recording of ``nn.Module.forward`` or ``nn.Module`` - produces ``ScriptModule``. - -In the same way like scripted model, traced model can be used for -inference or saved on disk using ``torch.jit.save`` function and after -that restored with ``torch.jit.load`` in any other environment without -original PyTorch model code definitions. - -.. code:: ipython3 - - # Get model path - traced_model_path = MODEL_DIR / f"{MODEL_NAME}_traced.pth" - - # Trace and save model if it has not been traced before or load traced model - if not traced_model_path.exists(): - traced_model = torch.jit.trace(model, example_inputs=input_tensor) - torch.jit.save(traced_model, traced_model_path) - else: - traced_model = torch.jit.load(traced_model_path) - - # Run traced model inference - result = traced_model(input_tensor) - - # Postprocess results - top_labels, top_scores = postprocess_result(result.detach().numpy()) - - # Show results - display(image) - for idx, (label, score) in enumerate(zip(top_labels, top_scores)): - _, predicted_label = imagenet_classes[label].split(" ", 1) - print(f"{idx + 1}: {predicted_label} - {score * 100 :.2f}%") - - - -.. image:: 102-pytorch-to-openvino-with-output_files/102-pytorch-to-openvino-with-output_43_0.png - - -.. parsed-literal:: - - 1: tiger cat - 25.91% - 2: Egyptian cat - 10.26% - 3: computer keyboard, keypad - 9.22% - 4: tabby, tabby cat - 9.09% - 5: hamper - 2.35% - - -Benchmark Traced Model Inference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code:: ipython3 - - %%timeit - - traced_model(input_tensor) - - -.. parsed-literal:: - - 13.4 ms ± 22.3 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) - - -Convert PyTorch Traced Model to OpenVINO Intermediate Representation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The conversion step for a traced model to OpenVINO IR is similar to the -original PyTorch model. - -.. code:: ipython3 - - # Convert model to openvino.runtime.Model object - ov_model = ov.convert_model(traced_model) - - # Load OpenVINO model on device - compiled_model = core.compile_model(ov_model, device.value) - - # Run OpenVINO model inference - result = compiled_model(input_tensor)[0] - - # Postprocess results - top_labels, top_scores = postprocess_result(result) - - # Show results - display(image) - for idx, (label, score) in enumerate(zip(top_labels, top_scores)): - _, predicted_label = imagenet_classes[label].split(" ", 1) - print(f"{idx + 1}: {predicted_label} - {score * 100 :.2f}%") - - - -.. image:: 102-pytorch-to-openvino-with-output_files/102-pytorch-to-openvino-with-output_47_0.png - - -.. parsed-literal:: - - 1: tiger cat - 25.91% - 2: Egyptian cat - 10.26% - 3: computer keyboard, keypad - 9.22% - 4: tabby, tabby cat - 9.09% - 5: hamper - 2.35% - - -Benchmark OpenVINO Model Inference Converted From Traced Model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code:: ipython3 - - %%timeit - - compiled_model(input_tensor)[0] - - -.. parsed-literal:: - - 2.82 ms ± 8.37 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) - diff --git a/docs/notebooks/102-pytorch-to-openvino-with-output_files/index.html b/docs/notebooks/102-pytorch-to-openvino-with-output_files/index.html index 2b4be31be6c88b..953c8eb9d0929f 100644 --- a/docs/notebooks/102-pytorch-to-openvino-with-output_files/index.html +++ b/docs/notebooks/102-pytorch-to-openvino-with-output_files/index.html @@ -1,20 +1,20 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/102-pytorch-to-openvino-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/102-pytorch-to-openvino-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/102-pytorch-to-openvino-with-output_files/


../
-102-pytorch-to-openvino-with-output_11_0.jpg       31-Oct-2023 00:35               54874
-102-pytorch-to-openvino-with-output_11_0.png       31-Oct-2023 00:35              542516
-102-pytorch-to-openvino-with-output_20_0.jpg       31-Oct-2023 00:35               54874
-102-pytorch-to-openvino-with-output_20_0.png       31-Oct-2023 00:35              542516
-102-pytorch-to-openvino-with-output_31_0.jpg       31-Oct-2023 00:35               54874
-102-pytorch-to-openvino-with-output_31_0.png       31-Oct-2023 00:35              542516
-102-pytorch-to-openvino-with-output_35_0.jpg       31-Oct-2023 00:35               54874
-102-pytorch-to-openvino-with-output_35_0.png       31-Oct-2023 00:35              542516
-102-pytorch-to-openvino-with-output_39_0.jpg       31-Oct-2023 00:35               54874
-102-pytorch-to-openvino-with-output_39_0.png       31-Oct-2023 00:35              542516
-102-pytorch-to-openvino-with-output_43_0.jpg       31-Oct-2023 00:35               54874
-102-pytorch-to-openvino-with-output_43_0.png       31-Oct-2023 00:35              542516
-102-pytorch-to-openvino-with-output_47_0.jpg       31-Oct-2023 00:35               54874
-102-pytorch-to-openvino-with-output_47_0.png       31-Oct-2023 00:35              542516
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/102-pytorch-to-openvino-with-output_files/


../
+102-pytorch-to-openvino-with-output_11_0.jpg       07-Dec-2023 00:49               54874
+102-pytorch-to-openvino-with-output_11_0.png       07-Dec-2023 00:49              542516
+102-pytorch-to-openvino-with-output_20_0.jpg       07-Dec-2023 00:49               54874
+102-pytorch-to-openvino-with-output_20_0.png       07-Dec-2023 00:49              542516
+102-pytorch-to-openvino-with-output_31_0.jpg       07-Dec-2023 00:49               54874
+102-pytorch-to-openvino-with-output_31_0.png       07-Dec-2023 00:49              542516
+102-pytorch-to-openvino-with-output_35_0.jpg       07-Dec-2023 00:49               54874
+102-pytorch-to-openvino-with-output_35_0.png       07-Dec-2023 00:49              542516
+102-pytorch-to-openvino-with-output_39_0.jpg       07-Dec-2023 00:49               54874
+102-pytorch-to-openvino-with-output_39_0.png       07-Dec-2023 00:49              542516
+102-pytorch-to-openvino-with-output_43_0.jpg       07-Dec-2023 00:49               54874
+102-pytorch-to-openvino-with-output_43_0.png       07-Dec-2023 00:49              542516
+102-pytorch-to-openvino-with-output_47_0.jpg       07-Dec-2023 00:49               54874
+102-pytorch-to-openvino-with-output_47_0.png       07-Dec-2023 00:49              542516
 

diff --git a/docs/notebooks/103-paddle-to-openvino-classification-with-output.rst b/docs/notebooks/103-paddle-to-openvino-classification-with-output.rst index 50ec3305ec903d..53d9144842e38b 100644 --- a/docs/notebooks/103-paddle-to-openvino-classification-with-output.rst +++ b/docs/notebooks/103-paddle-to-openvino-classification-with-output.rst @@ -33,42 +33,35 @@ Source of the - `Select inference device <#select-inference-device>`__ - `References <#references>`__ -Preparation ------------------------------------------------------ +Preparation +----------- -Imports -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. code:: ipython3 - - import sys - - if sys.version_info.minor > 7: - %pip install -q "paddlepaddle>=2.5.1" - else: - %pip install -q "paddlepaddle==2.4.2" +Imports +~~~~~~~ -.. parsed-literal:: - - Note: you may need to restart the kernel to use updated packages. .. code:: ipython3 - !pip install -q paddleclas --no-deps - !pip install -q "prettytable" "ujson" "visualdl>=2.2.0" "faiss-cpu>=1.7.1" + %pip install -q "paddlepaddle>=2.5.1" + %pip install -q paddleclas --no-deps + %pip install -q "prettytable" "ujson" "visualdl>=2.2.0" "faiss-cpu>=1.7.1" # Install openvino package !pip install -q "openvino>=2023.1.0" .. parsed-literal:: + Note: you may need to restart the kernel to use updated packages. + Note: you may need to restart the kernel to use updated packages. ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. paddleclas 2.5.1 requires easydict, which is not installed. paddleclas 2.5.1 requires faiss-cpu==1.7.1.post2, but you have faiss-cpu 1.7.4 which is incompatible. paddleclas 2.5.1 requires gast==0.3.3, but you have gast 0.4.0 which is incompatible. - + Note: you may need to restart the kernel to use updated packages. + .. code:: ipython3 @@ -81,11 +74,11 @@ Imports .. parsed-literal:: - --2023-10-30 22:31:22-- http://nz2.archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2.19_amd64.deb + --2023-12-06 22:32:58-- http://nz2.archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2.19_amd64.deb Resolving proxy-mu.intel.com (proxy-mu.intel.com)... 10.217.247.236 Connecting to proxy-mu.intel.com (proxy-mu.intel.com)|10.217.247.236|:911... connected. Proxy request sent, awaiting response... 404 Not Found - 2023-10-30 22:31:22 ERROR 404: Not Found. + 2023-12-06 22:32:59 ERROR 404: Not Found. dpkg: error: cannot access archive 'libssl1.1_1.1.1f-1ubuntu2.19_amd64.deb': No such file or directory @@ -114,12 +107,14 @@ Imports .. parsed-literal:: - 2023-10-30 22:31:24 INFO: Loading faiss with AVX2 support. - 2023-10-30 22:31:24 INFO: Successfully loaded faiss with AVX2 support. + 2023-12-06 22:33:00 INFO: Loading faiss with AVX2 support. + 2023-12-06 22:33:00 INFO: Successfully loaded faiss with AVX2 support. + + +Settings +~~~~~~~~ -Settings -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Set ``IMAGE_FILENAME`` to the filename of an image to use. Set ``MODEL_NAME`` to the PaddlePaddle model to download from PaddleHub. @@ -175,8 +170,10 @@ PaddleHub. This may take a while. Model Extracted to "./model". -Show Inference on PaddlePaddle Model ------------------------------------------------------------------------------- +Show Inference on PaddlePaddle Model +------------------------------------ + + In the next cell, we load the model, load and display an image, do inference on that image, and then show the top three prediction results. @@ -195,7 +192,7 @@ inference on that image, and then show the top three prediction results. .. parsed-literal:: - [2023/10/30 22:31:43] ppcls WARNING: The current running environment does not support the use of GPU. CPU has been used instead. + [2023/12/06 22:33:21] ppcls WARNING: The current running environment does not support the use of GPU. CPU has been used instead. Labrador retriever, 0.75138 German short-haired pointer, 0.02373 Great Dane, 0.01848 @@ -204,7 +201,7 @@ inference on that image, and then show the top three prediction results. -.. image:: 103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_9_1.png +.. image:: 103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_8_1.png ``classifier.predict()`` takes an image file name, reads the image, @@ -261,24 +258,24 @@ clipping values. .. parsed-literal:: - Processed image shape: (3, 224, 224) + 2023-12-06 22:33:22 WARNING: Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). .. parsed-literal:: - 2023-10-30 22:31:44 WARNING: Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers). + Processed image shape: (3, 224, 224) .. parsed-literal:: - + -.. image:: 103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_16_3.png +.. image:: 103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_15_3.png To decode the labels predicted by the model to names of classes, we need @@ -297,8 +294,10 @@ OpenVINO model. partition = line.split("\n")[0].partition(" ") class_id_map[int(partition[0])] = str(partition[-1]) -Convert the Model to OpenVINO IR Format ---------------------------------------------------------------------------------- +Convert the Model to OpenVINO IR Format +--------------------------------------- + + Call the OpenVINO Model Conversion API to convert the PaddlePaddle model to OpenVINO IR, with FP32 precision. ``ov.convert_model`` function @@ -318,8 +317,10 @@ for more information about the Model Conversion API. else: print(f"{model_xml} already exists.") -Select inference device ------------------------------------------------------------------ +Select inference device +----------------------- + + select device from dropdown list for running inference using OpenVINO @@ -346,8 +347,10 @@ select device from dropdown list for running inference using OpenVINO -Show Inference on OpenVINO Model --------------------------------------------------------------------------- +Show Inference on OpenVINO Model +-------------------------------- + + Load the IR model, get model information, load the image, do inference, convert the inference to a meaningful result, and show the output. See @@ -391,11 +394,13 @@ information. -.. image:: 103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_24_1.png +.. image:: 103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_23_1.png + + +Timing and Comparison +--------------------- -Timing and Comparison ---------------------------------------------------------------- Measure the time it takes to do inference on fifty images and compare the result. The timing information gives an indication of performance. @@ -448,7 +453,7 @@ Note that many optimizations are possible to improve the performance. .. parsed-literal:: - PaddlePaddle model on CPU: 0.0071 seconds per image, FPS: 141.73 + PaddlePaddle model on CPU: 0.0070 seconds per image, FPS: 142.41 PaddlePaddle result: Labrador retriever, 0.75138 @@ -459,11 +464,13 @@ Note that many optimizations are possible to improve the performance. -.. image:: 103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_28_1.png +.. image:: 103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_27_1.png + + +Select inference device +----------------------- -Select inference device ------------------------------------------------------------------ select device from dropdown list for running inference using OpenVINO @@ -510,7 +517,7 @@ select device from dropdown list for running inference using OpenVINO .. parsed-literal:: - OpenVINO IR model in OpenVINO Runtime (AUTO): 0.0030 seconds per image, FPS: 328.24 + OpenVINO IR model in OpenVINO Runtime (AUTO): 0.0028 seconds per image, FPS: 352.29 OpenVINO result: Labrador retriever, 0.74909 @@ -521,11 +528,13 @@ select device from dropdown list for running inference using OpenVINO -.. image:: 103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_31_1.png +.. image:: 103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_30_1.png + + +References +---------- -References ----------------------------------------------------- - `PaddleClas `__ - `OpenVINO PaddlePaddle diff --git a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_15_3.png b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_15_3.png new file mode 100644 index 00000000000000..87f3978ae62d26 --- /dev/null +++ b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_15_3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37fe65815997e6f67c6a635a98caf7ad3d5066aee57709f650fe8ef4c8bdfe11 +size 120883 diff --git a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_16_3.png b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_16_3.png deleted file mode 100644 index 9928929e2f69f5..00000000000000 --- a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_16_3.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:306bb006db6e8ef4b71e12f1007cf62a782408867b1c5e2af981d0dcde7d50e7 -size 120883 diff --git a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_23_1.png b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_23_1.png new file mode 100644 index 00000000000000..87b70a6bf1d23f --- /dev/null +++ b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_23_1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22866fa0a0a063c7772d4a884812ca79fb0737c1eb2281bc825ee18eded729c5 +size 224886 diff --git a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_24_1.png b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_24_1.png deleted file mode 100644 index 1e378b726a5a10..00000000000000 --- a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_24_1.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:aaa7ff0a118fe7ac95479e0467f34f793d1013d972c5c850c610e39f6983ee3c -size 224886 diff --git a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_27_1.png b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_27_1.png new file mode 100644 index 00000000000000..87b70a6bf1d23f --- /dev/null +++ b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_27_1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22866fa0a0a063c7772d4a884812ca79fb0737c1eb2281bc825ee18eded729c5 +size 224886 diff --git a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_28_1.png b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_28_1.png deleted file mode 100644 index 1e378b726a5a10..00000000000000 --- a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_28_1.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:aaa7ff0a118fe7ac95479e0467f34f793d1013d972c5c850c610e39f6983ee3c -size 224886 diff --git a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_30_1.png b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_30_1.png new file mode 100644 index 00000000000000..87b70a6bf1d23f --- /dev/null +++ b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_30_1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22866fa0a0a063c7772d4a884812ca79fb0737c1eb2281bc825ee18eded729c5 +size 224886 diff --git a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_31_1.png b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_31_1.png deleted file mode 100644 index 1e378b726a5a10..00000000000000 --- a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_31_1.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:aaa7ff0a118fe7ac95479e0467f34f793d1013d972c5c850c610e39f6983ee3c -size 224886 diff --git a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_8_1.png b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_8_1.png new file mode 100644 index 00000000000000..87b70a6bf1d23f --- /dev/null +++ b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_8_1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22866fa0a0a063c7772d4a884812ca79fb0737c1eb2281bc825ee18eded729c5 +size 224886 diff --git a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_9_1.png b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_9_1.png deleted file mode 100644 index 1e378b726a5a10..00000000000000 --- a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/103-paddle-to-openvino-classification-with-output_9_1.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:aaa7ff0a118fe7ac95479e0467f34f793d1013d972c5c850c610e39f6983ee3c -size 224886 diff --git a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/index.html b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/index.html index 8b1207e9e33259..66c78f6a2a407e 100644 --- a/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/index.html +++ b/docs/notebooks/103-paddle-to-openvino-classification-with-output_files/index.html @@ -1,11 +1,11 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/103-paddle-to-openvino-classification-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/103-paddle-to-openvino-classification-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/103-paddle-to-openvino-classification-with-output_files/


../
-103-paddle-to-openvino-classification-with-outp..> 31-Oct-2023 00:35              120883
-103-paddle-to-openvino-classification-with-outp..> 31-Oct-2023 00:35              224886
-103-paddle-to-openvino-classification-with-outp..> 31-Oct-2023 00:35              224886
-103-paddle-to-openvino-classification-with-outp..> 31-Oct-2023 00:35              224886
-103-paddle-to-openvino-classification-with-outp..> 31-Oct-2023 00:35              224886
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/103-paddle-to-openvino-classification-with-output_files/


../
+103-paddle-to-openvino-classification-with-outp..> 07-Dec-2023 00:49              120883
+103-paddle-to-openvino-classification-with-outp..> 07-Dec-2023 00:49              224886
+103-paddle-to-openvino-classification-with-outp..> 07-Dec-2023 00:49              224886
+103-paddle-to-openvino-classification-with-outp..> 07-Dec-2023 00:49              224886
+103-paddle-to-openvino-classification-with-outp..> 07-Dec-2023 00:49              224886
 

diff --git a/docs/notebooks/105-language-quantize-bert-with-output.rst b/docs/notebooks/105-language-quantize-bert-with-output.rst index e3fddbbfb9ca35..5c089756cf15da 100644 --- a/docs/notebooks/105-language-quantize-bert-with-output.rst +++ b/docs/notebooks/105-language-quantize-bert-with-output.rst @@ -31,8 +31,7 @@ and datasets. It consists of the following steps: - `Prepare the Dataset <#prepare-the-dataset>`__ - `Optimize model using NNCF Post-training Quantization API <#optimize-model-using-nncf-post-training-quantization-api>`__ -- `Load and Test OpenVINO - Model <#load-and-test-openvino-model>`__ +- `Load and Test OpenVINO Model <#load-and-test-openvino-model>`__ - `Select inference device <#select-inference-device>`__ @@ -44,7 +43,7 @@ and datasets. It consists of the following steps: .. code:: ipython3 %pip install -q "nncf>=2.5.0" - %pip install -q "transformers" datasets evaluate + %pip install -q "transformers" datasets evaluate --extra-index-url https://download.pytorch.org/whl/cpu %pip install -q "openvino>=2023.1.0" @@ -55,8 +54,10 @@ and datasets. It consists of the following steps: Note: you may need to restart the kernel to use updated packages. -Imports -------------------------------------------------- +Imports +------- + + .. code:: ipython3 @@ -87,10 +88,10 @@ Imports .. parsed-literal:: - 2023-10-30 22:33:08.247649: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. - 2023-10-30 22:33:08.281400: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. + 2023-12-06 22:34:55.977192: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. + 2023-12-06 22:34:56.010680: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. - 2023-10-30 22:33:08.912908: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT + 2023-12-06 22:34:56.639162: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT .. parsed-literal:: @@ -98,8 +99,10 @@ Imports INFO:nncf:NNCF initialized successfully. Supported frameworks detected: torch, tensorflow, onnx, openvino -Settings --------------------------------------------------- +Settings +-------- + + .. code:: ipython3 @@ -113,8 +116,10 @@ Settings os.makedirs(DATA_DIR, exist_ok=True) os.makedirs(MODEL_DIR, exist_ok=True) -Prepare the Model ------------------------------------------------------------ +Prepare the Model +----------------- + + Perform the following: @@ -180,22 +185,13 @@ PyTorch model formats are supported: .. parsed-literal:: [ WARNING ] Please fix your imports. Module %s has been moved to %s. The old module will be deleted in version %s. + No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda' -.. parsed-literal:: - - WARNING:nncf:NNCF provides best results with torch==2.0.1, while current torch version is 2.1.0+cpu. If you encounter issues, consider switching to torch==2.0.1 - - -.. parsed-literal:: - - No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda' - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/jit/annotations.py:386: UserWarning: TorchScript will treat type annotations of Tensor dtype-specific subtypes as if they are normal Tensors. dtype constraints are not enforced in compilation either. - warnings.warn( +Prepare the Dataset +------------------- -Prepare the Dataset -------------------------------------------------------------- We download the `General Language Understanding Evaluation (GLUE) `__ dataset for the MRPC task from @@ -219,15 +215,10 @@ tokenizer from HuggingFace. data_source = create_data_source() +Optimize model using NNCF Post-training Quantization API +-------------------------------------------------------- -.. parsed-literal:: - - Map: 0%| | 0/408 [00:00`__ provides a suite of advanced algorithms for Neural Networks inference optimization in @@ -261,21 +252,93 @@ The optimization process contains the following steps: model_type=ModelType.TRANSFORMER) + +.. parsed-literal:: + + Output() + + + +.. raw:: html + +

+
+
+
+
+.. raw:: html
+
+    
+    
+ + + + +.. parsed-literal:: + + Output() + + + +.. raw:: html + +

+
+
+
+
+.. raw:: html
+
+    
+    
+ + + .. parsed-literal:: - Statistics collection: 100%|██████████| 300/300 [00:07<00:00, 39.50it/s] - Applying Smooth Quant: 100%|██████████| 50/50 [00:00<00:00, 51.91it/s] + INFO:nncf:36 ignored nodes were found by name in the NNCFGraph + .. parsed-literal:: - INFO:nncf:36 ignored nodes was found by name in the NNCFGraph + Output() + + + +.. raw:: html + +

+
+
+
+
+.. raw:: html
+
+    
+    
+ + .. parsed-literal:: - Statistics collection: 100%|██████████| 300/300 [00:25<00:00, 11.96it/s] - Applying Fast Bias correction: 100%|██████████| 74/74 [00:25<00:00, 2.93it/s] + Output() + + + +.. raw:: html + +

+
+
+
+
+.. raw:: html
+
+    
+    
+ .. code:: ipython3 @@ -283,8 +346,10 @@ The optimization process contains the following steps: compressed_model_xml = Path(MODEL_DIR) / "quantized_bert_mrpc.xml" ov.save_model(quantized_model, compressed_model_xml) -Load and Test OpenVINO Model ----------------------------------------------------------------------- +Load and Test OpenVINO Model +---------------------------- + + To load and test converted model, perform the following: @@ -293,8 +358,10 @@ To load and test converted model, perform the following: - Run the inference. - Get the answer from the model output. -Select inference device -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Select inference device +~~~~~~~~~~~~~~~~~~~~~~~ + + select device from dropdown list for running inference using OpenVINO @@ -352,8 +419,10 @@ changing ``sample_idx`` to another value (from 0 to 407). The same meaning: yes -Compare F1-score of FP32 and INT8 models ----------------------------------------------------------------------------------- +Compare F1-score of FP32 and INT8 models +---------------------------------------- + + .. code:: ipython3 @@ -393,11 +462,13 @@ Compare F1-score of FP32 and INT8 models Checking the accuracy of the original model: F1 score: 0.9019 Checking the accuracy of the quantized model: - F1 score: 0.8985 + F1 score: 0.8969 + + +Compare Performance of the Original, Converted and Quantized Models +------------------------------------------------------------------- -Compare Performance of the Original, Converted and Quantized Models -------------------------------------------------------------------------------------------------------------- Compare the original PyTorch model with OpenVINO converted and quantized models (``FP32``, ``INT8``) to see the difference in performance. It is @@ -454,9 +525,9 @@ Frames Per Second (FPS) for images. .. parsed-literal:: - PyTorch model on CPU: 0.073 seconds per sentence, SPS: 13.72 - IR FP32 model in OpenVINO Runtime/AUTO: 0.022 seconds per sentence, SPS: 46.40 - OpenVINO IR INT8 model in OpenVINO Runtime/AUTO: 0.010 seconds per sentence, SPS: 98.65 + PyTorch model on CPU: 0.073 seconds per sentence, SPS: 13.77 + IR FP32 model in OpenVINO Runtime/AUTO: 0.021 seconds per sentence, SPS: 48.61 + OpenVINO IR INT8 model in OpenVINO Runtime/AUTO: 0.009 seconds per sentence, SPS: 109.06 Finally, measure the inference performance of OpenVINO ``FP32`` and @@ -486,20 +557,21 @@ in OpenVINO. [Step 2/11] Loading OpenVINO Runtime [ WARNING ] Default duration 120 seconds is used for unknown device device.value [ INFO ] OpenVINO: - [ INFO ] Build ................................. 2023.1.0-12185-9e6b00e51cd-releases/2023/1 + [ INFO ] Build ................................. 2023.2.0-13089-cfd42bd2cb0-HEAD [ INFO ] [ INFO ] Device info: - [ ERROR ] Exception from src/inference/src/core.cpp:84: - Exception from src/inference/src/dev/core_impl.cpp:565: + [ INFO ] + [ INFO ] + [Step 3/11] Setting device configuration + [ ERROR ] Exception from src/inference/src/core.cpp:244: + Exception from src/inference/src/dev/core_impl.cpp:559: Device with "device" name is not registered in the OpenVINO Runtime Traceback (most recent call last): - File "/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/openvino/tools/benchmark/main.py", line 102, in main - benchmark.print_version_info() - File "/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/openvino/tools/benchmark/benchmark.py", line 48, in print_version_info - for device, version in self.core.get_versions(self.device).items(): - RuntimeError: Exception from src/inference/src/core.cpp:84: - Exception from src/inference/src/dev/core_impl.cpp:565: + File "/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/openvino/tools/benchmark/main.py", line 165, in main + supported_properties = benchmark.core.get_property(device, properties.supported_properties()) + RuntimeError: Exception from src/inference/src/core.cpp:244: + Exception from src/inference/src/dev/core_impl.cpp:559: Device with "device" name is not registered in the OpenVINO Runtime @@ -518,20 +590,21 @@ in OpenVINO. [Step 2/11] Loading OpenVINO Runtime [ WARNING ] Default duration 120 seconds is used for unknown device device.value [ INFO ] OpenVINO: - [ INFO ] Build ................................. 2023.1.0-12185-9e6b00e51cd-releases/2023/1 + [ INFO ] Build ................................. 2023.2.0-13089-cfd42bd2cb0-HEAD [ INFO ] [ INFO ] Device info: - [ ERROR ] Exception from src/inference/src/core.cpp:84: - Exception from src/inference/src/dev/core_impl.cpp:565: + [ INFO ] + [ INFO ] + [Step 3/11] Setting device configuration + [ ERROR ] Exception from src/inference/src/core.cpp:244: + Exception from src/inference/src/dev/core_impl.cpp:559: Device with "device" name is not registered in the OpenVINO Runtime Traceback (most recent call last): - File "/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/openvino/tools/benchmark/main.py", line 102, in main - benchmark.print_version_info() - File "/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/openvino/tools/benchmark/benchmark.py", line 48, in print_version_info - for device, version in self.core.get_versions(self.device).items(): - RuntimeError: Exception from src/inference/src/core.cpp:84: - Exception from src/inference/src/dev/core_impl.cpp:565: + File "/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/openvino/tools/benchmark/main.py", line 165, in main + supported_properties = benchmark.core.get_property(device, properties.supported_properties()) + RuntimeError: Exception from src/inference/src/core.cpp:244: + Exception from src/inference/src/dev/core_impl.cpp:559: Device with "device" name is not registered in the OpenVINO Runtime diff --git a/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst b/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst index 1173f4b74c3a64..77ad8fdb5d173a 100644 --- a/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst +++ b/docs/notebooks/107-speech-recognition-quantization-data2vec-with-output.rst @@ -30,8 +30,7 @@ steps: Representation <#convert-model-to-openvino-intermediate-representation>`__ - `Prepare inference data <#prepare-inference-data>`__ -- `Check model inference - result <#check-model-inference-result>`__ +- `Check model inference result <#check-model-inference-result>`__ - `Validate model accuracy on dataset <#validate-model-accuracy-on-dataset>`__ - `Quantization <#quantization>`__ @@ -42,8 +41,10 @@ steps: - `Compare Accuracy of the Original and Quantized Models <#compare-accuracy-of-the-original-and-quantized-models>`__ -Download and prepare model --------------------------------------------------------------------- +Download and prepare model +-------------------------- + + data2vec is a framework for self-supervised representation learning for images, speech, and text as described in `data2vec: A General Framework @@ -61,8 +62,10 @@ In our case, we will use ``data2vec-audio-base-960h`` model, which was finetuned on 960 hours of audio from LibriSpeech Automatic Speech Recognition corpus and distributed as part of HuggingFace transformers. -Obtain Pytorch model representation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Obtain Pytorch model representation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + For instantiating PyTorch model class, we should use ``Data2VecAudioForCTC.from_pretrained`` method with providing model ID @@ -77,8 +80,8 @@ model specific pre- and post-processing steps. .. code:: ipython3 %pip install -q "openvino>=2023.1.0" "nncf>=2.5.0" - %pip install -q datasets "torchmetrics>=0.11.0" - %pip install -q soundfile librosa transformers + %pip install -q datasets "torchmetrics>=0.11.0" --extra-index-url https://download.pytorch.org/whl/cpu + %pip install -q soundfile librosa transformers --extra-index-url https://download.pytorch.org/whl/cpu .. code:: ipython3 @@ -96,8 +99,10 @@ model specific pre- and post-processing steps. 2023-09-12 19:27:58.411557: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT -Convert model to OpenVINO Intermediate Representation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Convert model to OpenVINO Intermediate Representation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -133,8 +138,10 @@ Convert model to OpenVINO Intermediate Representation Read IR model from model/data2vec-audo-base.xml -Prepare inference data -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Prepare inference data +~~~~~~~~~~~~~~~~~~~~~~ + + For demonstration purposes, we will use short dummy version of LibriSpeech dataset - ``patrickvonplaten/librispeech_asr_dummy`` to @@ -169,8 +176,10 @@ dataset. Loading cached processed dataset at /home/ea/.cache/huggingface/datasets/patrickvonplaten___librispeech_asr_dummy/clean/2.1.0/f2c70a4d03ab4410954901bde48c54b85ca1b7f9bf7d616e7e2a72b5ee6ddbfc/cache-5282243604a7a526.arrow -Check model inference result ----------------------------------------------------------------------- +Check model inference result +---------------------------- + + The code below is used for running model inference on a single sample from the dataset. It contains the following steps: @@ -242,8 +251,10 @@ For reference, see the same function provided for OpenVINO model. -Validate model accuracy on dataset ----------------------------------------------------------------------------- +Validate model accuracy on dataset +---------------------------------- + + For model accuracy evaluation, `Word Error Rate `__ metric can be @@ -302,8 +313,10 @@ library. [OpenVino] Word Error Rate: 0.0383 -Quantization ------------------------------------------------------- +Quantization +------------ + + `NNCF `__ provides a suite of advanced algorithms for Neural Networks inference optimization in @@ -565,8 +578,10 @@ saved using ``ov.save_model`` function. quantized_model_path = Path(f"{MODEL_NAME}_openvino_model/{MODEL_NAME}_quantized.xml") ov.save_model(quantized_model, quantized_model_path) -Check INT8 model inference result ---------------------------------------------------------------------------- +Check INT8 model inference result +--------------------------------- + + ``INT8`` model is the same in usage like the original one. We need to read it, using the ``core.read_model`` method and load on the device, @@ -604,8 +619,8 @@ using ``core.compile_model``. After that, we can reuse the same -Compare Performance of the Original and Quantized Models --------------------------------------------------------------------------------------------------- +Compare Performance of the Original and Quantized Models +-------------------------------------------------------- `Benchmark Tool `__ @@ -771,8 +786,10 @@ is used to measure the inference performance of the ``FP16`` and [ INFO ] Throughput: 66.40 FPS -Compare Accuracy of the Original and Quantized Models ------------------------------------------------------------------------------------------------ +Compare Accuracy of the Original and Quantized Models +----------------------------------------------------- + + Finally, calculate WER metric for the ``INT8`` model representation and compare it with the ``FP16`` result. diff --git a/docs/notebooks/109-latency-tricks-with-output.rst b/docs/notebooks/109-latency-tricks-with-output.rst index 60ba8c7a7d2b62..4d93f99b2be828 100644 --- a/docs/notebooks/109-latency-tricks-with-output.rst +++ b/docs/notebooks/109-latency-tricks-with-output.rst @@ -27,8 +27,8 @@ The quantization and pre-post-processing API are not included here as they change the precision (quantization) or processing graph (prepostprocessor). You can find examples of how to apply them to optimize performance on OpenVINO IR files in -`111-detection-quantization <111-detection-quantization-with-output.html>`__ and -`118-optimize-preprocessing <118-optimize-preprocessing-with-output.html>`__. +`111-detection-quantization <../111-detection-quantization>`__ and +`118-optimize-preprocessing <../118-optimize-preprocessing>`__. |image0| @@ -44,11 +44,12 @@ optimize performance on OpenVINO IR files in result in different performance. A similar notebook focused on the throughput mode is available -`here <109-throughput-tricks-with-output.html>`__. +`here <109-throughput-tricks.ipynb>`__. **Table of contents:** +- `Prerequisites <#prerequisites>`__ - `Data <#data>`__ - `Model <#model>`__ - `Hardware <#hardware>`__ @@ -70,14 +71,16 @@ A similar notebook focused on the throughput mode is available - `Performance comparison <#performance-comparison>`__ - `Conclusions <#conclusions>`__ +.. |image0| image:: https://user-images.githubusercontent.com/4547501/229120774-01f4f972-424d-4280-8395-220dd432985a.png + Prerequisites ------------- -.. |image0| image:: https://user-images.githubusercontent.com/4547501/229120774-01f4f972-424d-4280-8395-220dd432985a.png + .. code:: ipython3 - %pip install -q "openvino>=2023.1.0" seaborn "ultralytics<=8.0.178" onnx + %pip install -q "openvino>=2023.1.0" seaborn "ultralytics<=8.0.178" onnx --extra-index-url https://download.pytorch.org/whl/cpu .. parsed-literal:: @@ -100,8 +103,10 @@ Prerequisites ) import notebook_utils as utils -Data ----------------------------------------------- +Data +---- + + We will use the same image of the dog sitting on a bicycle for all experiments below. The image is resized and preprocessed to fulfill the @@ -129,19 +134,21 @@ requirements of this particular object detection model. -.. image:: 109-latency-tricks-with-output_files/109-latency-tricks-with-output_4_0.jpg +.. image:: 109-latency-tricks-with-output_files/109-latency-tricks-with-output_5_0.jpg .. parsed-literal:: - + + +Model +----- + -Model ------------------------------------------------ We decided to go with `YOLOv5n `__, one of the @@ -181,15 +188,17 @@ PyTorch Hub and small enough to see the difference in performance. .. parsed-literal:: Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n.pt to model/yolov5n.pt... - 100%|██████████| 3.87M/3.87M [00:01<00:00, 3.48MB/s] + 100%|██████████| 3.87M/3.87M [00:02<00:00, 1.50MB/s] Fusing layers... YOLOv5n summary: 213 layers, 1867405 parameters, 0 gradients Adding AutoShape... -Hardware --------------------------------------------------- +Hardware +-------- + + The code below lists the available hardware we will use in the benchmarking process. @@ -215,8 +224,10 @@ benchmarking process. CPU: Intel(R) Core(TM) i9-10920X CPU @ 3.50GHz -Helper functions ----------------------------------------------------------- +Helper functions +---------------- + + We’re defining a benchmark model function to use for all optimized models below. It runs inference 1000 times, averages the latency time, @@ -350,15 +361,19 @@ the image. utils.show_array(output_img) -Optimizations -------------------------------------------------------- +Optimizations +------------- + + Below, we present the performance tricks for faster inference in the latency mode. We release resources after every benchmarking to be sure the same amount of resource is available for every experiment. -PyTorch model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +PyTorch model +~~~~~~~~~~~~~ + + First, we’re benchmarking the original PyTorch model without any optimizations applied. We will treat it as our baseline. @@ -374,17 +389,19 @@ optimizations applied. We will treat it as our baseline. -.. image:: 109-latency-tricks-with-output_files/109-latency-tricks-with-output_14_0.jpg +.. image:: 109-latency-tricks-with-output_files/109-latency-tricks-with-output_15_0.jpg .. parsed-literal:: - PyTorch model on CPU. First inference time: 0.0219 seconds - PyTorch model on CPU: 0.0199 seconds per image (50.21 FPS) + PyTorch model on CPU. First inference time: 0.0252 seconds + PyTorch model on CPU: 0.0214 seconds per image (46.73 FPS) + + +ONNX model +~~~~~~~~~~ -ONNX model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The first optimization is exporting the PyTorch model to ONNX and running it in OpenVINO. It’s possible, thanks to the ONNX frontend. It @@ -423,17 +440,19 @@ Representation (IR) to leverage the OpenVINO Runtime. -.. image:: 109-latency-tricks-with-output_files/109-latency-tricks-with-output_17_0.jpg +.. image:: 109-latency-tricks-with-output_files/109-latency-tricks-with-output_18_0.jpg .. parsed-literal:: - ONNX model on CPU. First inference time: 0.0172 seconds - ONNX model on CPU: 0.0133 seconds per image (75.13 FPS) + ONNX model on CPU. First inference time: 0.0186 seconds + ONNX model on CPU: 0.0124 seconds per image (80.72 FPS) + + +OpenVINO IR model +~~~~~~~~~~~~~~~~~ -OpenVINO IR model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Let’s convert the ONNX model to OpenVINO Intermediate Representation (IR) FP16 and run it. Reducing the precision is one of the well-known @@ -459,17 +478,19 @@ accuracy drop. That’s why we skip that step in this notebook. -.. image:: 109-latency-tricks-with-output_files/109-latency-tricks-with-output_19_0.jpg +.. image:: 109-latency-tricks-with-output_files/109-latency-tricks-with-output_20_0.jpg .. parsed-literal:: - OpenVINO model on CPU. First inference time: 0.0166 seconds - OpenVINO model on CPU: 0.0133 seconds per image (75.29 FPS) + OpenVINO model on CPU. First inference time: 0.0148 seconds + OpenVINO model on CPU: 0.0123 seconds per image (81.38 FPS) + + +OpenVINO IR model on GPU +~~~~~~~~~~~~~~~~~~~~~~~~ -OpenVINO IR model on GPU -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Usually, a GPU device is faster than a CPU, so let’s run the above model on the GPU. Please note you need to have an Intel GPU and `install @@ -492,8 +513,10 @@ execution. del ov_gpu_model # release resources -OpenVINO IR model + more inference threads -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +OpenVINO IR model + more inference threads +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + There is a possibility to add a config for any device (CPU in this case). We will increase the number of threads to an equal number of our @@ -517,17 +540,19 @@ If it is the case, don’t use it. -.. image:: 109-latency-tricks-with-output_files/109-latency-tricks-with-output_23_0.jpg +.. image:: 109-latency-tricks-with-output_files/109-latency-tricks-with-output_24_0.jpg .. parsed-literal:: - OpenVINO model + more threads on CPU. First inference time: 0.0156 seconds - OpenVINO model + more threads on CPU: 0.0134 seconds per image (74.72 FPS) + OpenVINO model + more threads on CPU. First inference time: 0.0155 seconds + OpenVINO model + more threads on CPU: 0.0124 seconds per image (80.47 FPS) + + +OpenVINO IR model in latency mode +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -OpenVINO IR model in latency mode -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OpenVINO offers a virtual device called `AUTO `__, @@ -547,17 +572,19 @@ devices as well. -.. image:: 109-latency-tricks-with-output_files/109-latency-tricks-with-output_25_0.jpg +.. image:: 109-latency-tricks-with-output_files/109-latency-tricks-with-output_26_0.jpg .. parsed-literal:: - OpenVINO model on AUTO. First inference time: 0.0162 seconds - OpenVINO model on AUTO: 0.0136 seconds per image (73.76 FPS) + OpenVINO model on AUTO. First inference time: 0.0156 seconds + OpenVINO model on AUTO: 0.0125 seconds per image (79.73 FPS) + + +OpenVINO IR model in latency mode + shared memory +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -OpenVINO IR model in latency mode + shared memory -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OpenVINO is a C++ toolkit with Python wrappers (API). The default behavior in the Python API is copying the input to the additional buffer @@ -581,27 +608,31 @@ performance! -.. image:: 109-latency-tricks-with-output_files/109-latency-tricks-with-output_27_0.jpg +.. image:: 109-latency-tricks-with-output_files/109-latency-tricks-with-output_28_0.jpg .. parsed-literal:: - OpenVINO model + shared memory on AUTO. First inference time: 0.0143 seconds - OpenVINO model + shared memory on AUTO: 0.0054 seconds per image (186.06 FPS) + OpenVINO model + shared memory on AUTO. First inference time: 0.0112 seconds + OpenVINO model + shared memory on AUTO: 0.0054 seconds per image (185.55 FPS) + + +Other tricks +~~~~~~~~~~~~ -Other tricks -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are other tricks for performance improvement, such as quantization and pre-post-processing or dedicated to throughput mode. To get even more from your model, please visit -`111-detection-quantization <111-detection-quantization-with-output.html>`__, -`118-optimize-preprocessing <118-optimize-preprocessing-with-output.html>`__, and -`109-throughput-tricks <109-throughput-tricks-with-output.html>`__. +`111-detection-quantization <../111-detection-quantization>`__, +`118-optimize-preprocessing <../118-optimize-preprocessing>`__, and +`109-throughput-tricks <109-throughput-tricks.ipynb>`__. + +Performance comparison +---------------------- + -Performance comparison ----------------------------------------------------------------- The following graphical comparison is valid for the selected model and hardware simultaneously. If you cannot see any improvement between some @@ -634,11 +665,13 @@ steps, just skip them. -.. image:: 109-latency-tricks-with-output_files/109-latency-tricks-with-output_30_0.png +.. image:: 109-latency-tricks-with-output_files/109-latency-tricks-with-output_31_0.png + + +Conclusions +----------- -Conclusions ------------------------------------------------------ We already showed the steps needed to improve the performance of an object detection model. Even if you experience much better performance diff --git a/docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_14_0.jpg b/docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_15_0.jpg similarity index 100% rename from docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_14_0.jpg rename to docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_15_0.jpg diff --git a/docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_17_0.jpg b/docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_18_0.jpg similarity index 100% rename from docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_17_0.jpg rename to docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_18_0.jpg diff --git a/docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_19_0.jpg b/docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_20_0.jpg similarity index 100% rename from docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_19_0.jpg rename to docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_20_0.jpg diff --git a/docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_23_0.jpg b/docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_24_0.jpg similarity index 100% rename from docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_23_0.jpg rename to docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_24_0.jpg diff --git a/docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_25_0.jpg b/docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_26_0.jpg similarity index 100% rename from docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_25_0.jpg rename to docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_26_0.jpg diff --git a/docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_27_0.jpg b/docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_28_0.jpg similarity index 100% rename from docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_27_0.jpg rename to docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_28_0.jpg diff --git a/docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_30_0.png b/docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_30_0.png deleted file mode 100644 index 6fa3f77dbbf8df..00000000000000 --- a/docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_30_0.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:ff39f8edc5bdcd106ae6c1d49b52edb7342a5a29490b03f3de375b0315155159 -size 57006 diff --git a/docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_31_0.png b/docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_31_0.png new file mode 100644 index 00000000000000..b05a4817148c9a --- /dev/null +++ b/docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_31_0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:545f14f9a5a612fe9e498660b4640562ad87b9b29df063fa6b354df83c16eaa8 +size 57156 diff --git a/docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_4_0.jpg b/docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_5_0.jpg similarity index 100% rename from docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_4_0.jpg rename to docs/notebooks/109-latency-tricks-with-output_files/109-latency-tricks-with-output_5_0.jpg diff --git a/docs/notebooks/109-latency-tricks-with-output_files/index.html b/docs/notebooks/109-latency-tricks-with-output_files/index.html index 75d2c7dfc5ee89..54ec123587e86c 100644 --- a/docs/notebooks/109-latency-tricks-with-output_files/index.html +++ b/docs/notebooks/109-latency-tricks-with-output_files/index.html @@ -1,14 +1,14 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/109-latency-tricks-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/109-latency-tricks-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/109-latency-tricks-with-output_files/


../
-109-latency-tricks-with-output_14_0.jpg            31-Oct-2023 00:35              162715
-109-latency-tricks-with-output_17_0.jpg            31-Oct-2023 00:35              162715
-109-latency-tricks-with-output_19_0.jpg            31-Oct-2023 00:35              162715
-109-latency-tricks-with-output_23_0.jpg            31-Oct-2023 00:35              162715
-109-latency-tricks-with-output_25_0.jpg            31-Oct-2023 00:35              162715
-109-latency-tricks-with-output_27_0.jpg            31-Oct-2023 00:35              162715
-109-latency-tricks-with-output_30_0.png            31-Oct-2023 00:35               57006
-109-latency-tricks-with-output_4_0.jpg             31-Oct-2023 00:35              155828
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/109-latency-tricks-with-output_files/


../
+109-latency-tricks-with-output_15_0.jpg            07-Dec-2023 00:49              162715
+109-latency-tricks-with-output_18_0.jpg            07-Dec-2023 00:49              162715
+109-latency-tricks-with-output_20_0.jpg            07-Dec-2023 00:49              162715
+109-latency-tricks-with-output_24_0.jpg            07-Dec-2023 00:49              162715
+109-latency-tricks-with-output_26_0.jpg            07-Dec-2023 00:49              162715
+109-latency-tricks-with-output_28_0.jpg            07-Dec-2023 00:49              162715
+109-latency-tricks-with-output_31_0.png            07-Dec-2023 00:49               57156
+109-latency-tricks-with-output_5_0.jpg             07-Dec-2023 00:49              155828
 

diff --git a/docs/notebooks/109-throughput-tricks-with-output.rst b/docs/notebooks/109-throughput-tricks-with-output.rst index 446d6beac1be5d..38c1b8cd3060d1 100644 --- a/docs/notebooks/109-throughput-tricks-with-output.rst +++ b/docs/notebooks/109-throughput-tricks-with-output.rst @@ -24,8 +24,8 @@ The quantization and pre-post-processing API are not included here as they change the precision (quantization) or processing graph (prepostprocessor). You can find examples of how to apply them to optimize performance on OpenVINO IR files in -`111-detection-quantization <111-detection-quantization-with-output.html>`__ and -`118-optimize-preprocessing `__. +`111-detection-quantization <../111-detection-quantization>`__ and +`118-optimize-preprocessing <../118-optimize-preprocessing>`__. |image0| @@ -41,11 +41,12 @@ optimize performance on OpenVINO IR files in result in different performance. A similar notebook focused on the latency mode is available -`here <109-latency-tricks-with-output.html>`__. +`here <109-latency-tricks.ipynb>`__. **Table of contents:** +- `Prerequisites <#prerequisites>`__ - `Data <#data>`__ - `Model <#model>`__ - `Hardware <#hardware>`__ @@ -70,14 +71,16 @@ A similar notebook focused on the latency mode is available - `Performance comparison <#performance-comparison>`__ - `Conclusions <#conclusions>`__ +.. |image0| image:: https://github.com/openvinotoolkit/openvino_notebooks/assets/4547501/ac17148c-bee9-43aa-87fc-ead61ac75f1d + Prerequisites ------------- -.. |image0| image:: https://github.com/openvinotoolkit/openvino_notebooks/assets/4547501/ac17148c-bee9-43aa-87fc-ead61ac75f1d + .. code:: ipython3 - %pip install -q "openvino>=2023.1.0" "ultralytics<=8.0.178" seaborn ultralytics onnx + %pip install -q "openvino>=2023.1.0" "ultralytics<=8.0.178" seaborn onnx --extra-index-url https://download.pytorch.org/whl/cpu .. parsed-literal:: @@ -99,8 +102,10 @@ Prerequisites ) import notebook_utils as utils -Data ----------------------------------------------- +Data +---- + + We will use the same image of the dog sitting on a bicycle copied 1000 times to simulate the video with 1000 frames (about 33s). The image is @@ -134,19 +139,21 @@ object detection model. -.. image:: 109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_4_0.jpg +.. image:: 109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_5_0.jpg .. parsed-literal:: - + + + +Model +----- -Model ------------------------------------------------ We decided to go with `YOLOv5n `__, one of the @@ -186,8 +193,10 @@ PyTorch Hub and small enough to see the difference in performance. requirements: /opt/home/k8sworker/.cache/torch/hub/requirements.txt not found, check failed. -Hardware --------------------------------------------------- +Hardware +-------- + + The code below lists the available hardware we will use in the benchmarking process. @@ -213,8 +222,10 @@ benchmarking process. CPU: Intel(R) Core(TM) i9-10920X CPU @ 3.50GHz -Helper functions ----------------------------------------------------------- +Helper functions +---------------- + + We’re defining a benchmark model function to use for all optimizations below. It runs inference for 1000 frames and prints average frames per @@ -353,15 +364,19 @@ the image. utils.show_array(output_img) -Optimizations -------------------------------------------------------- +Optimizations +------------- + + Below, we present the performance tricks for faster inference in the throughput mode. We release resources after every benchmarking to be sure the same amount of resource is available for every experiment. -PyTorch model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +PyTorch model +~~~~~~~~~~~~~ + + First, we’re benchmarking the original PyTorch model without any optimizations applied. We will treat it as our baseline. @@ -377,17 +392,19 @@ optimizations applied. We will treat it as our baseline. -.. image:: 109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_14_0.jpg +.. image:: 109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_15_0.jpg .. parsed-literal:: - PyTorch model on CPU. First inference time: 0.0292 seconds - PyTorch model on CPU: 0.0210 seconds per image (47.67 FPS) + PyTorch model on CPU. First inference time: 0.0220 seconds + PyTorch model on CPU: 0.0208 seconds per image (48.18 FPS) + + +OpenVINO IR model +~~~~~~~~~~~~~~~~~ -OpenVINO IR model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The first optimization is exporting the PyTorch model to OpenVINO Intermediate Representation (IR) FP16 and running it. Reducing the @@ -422,17 +439,19 @@ step in this notebook. -.. image:: 109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_17_0.jpg +.. image:: 109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_18_0.jpg .. parsed-literal:: - OpenVINO model on CPU. First inference time: 0.0182 seconds - OpenVINO model on CPU: 0.0073 seconds per image (136.13 FPS) + OpenVINO model on CPU. First inference time: 0.0156 seconds + OpenVINO model on CPU: 0.0071 seconds per image (141.19 FPS) + + +OpenVINO IR model + bigger batch +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -OpenVINO IR model + bigger batch -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Batch processing often gives higher throughput as more inputs are processed at once. To use bigger batches (than 1), we must convert the @@ -478,17 +497,19 @@ hardware and model. -.. image:: 109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_20_0.jpg +.. image:: 109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_21_0.jpg .. parsed-literal:: - OpenVINO model + bigger batch on CPU. First inference time: 0.0502 seconds - OpenVINO model + bigger batch on CPU: 0.0076 seconds per image (131.86 FPS) + OpenVINO model + bigger batch on CPU. First inference time: 0.0481 seconds + OpenVINO model + bigger batch on CPU: 0.0069 seconds per image (145.67 FPS) + + +Asynchronous processing +~~~~~~~~~~~~~~~~~~~~~~~ -Asynchronous processing -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Asynchronous mode means that OpenVINO immediately returns from an inference call and doesn’t wait for the result. It requires more @@ -526,8 +547,10 @@ the pipeline. del infer_queue # release resources return fps -OpenVINO IR model in throughput mode -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +OpenVINO IR model in throughput mode +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + OpenVINO allows specifying a performance hint changing the internal configuration of the device. There are three different hints: @@ -548,17 +571,19 @@ feature, which sets the batch size to the optimal level. -.. image:: 109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_24_0.jpg +.. image:: 109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_25_0.jpg .. parsed-literal:: - OpenVINO model on CPU (THROUGHPUT). First inference time: 0.0274 seconds - OpenVINO model on CPU (THROUGHPUT): 0.0040 seconds per image (249.34 FPS) + OpenVINO model on CPU (THROUGHPUT). First inference time: 0.0254 seconds + OpenVINO model on CPU (THROUGHPUT): 0.0040 seconds per image (250.82 FPS) + + +OpenVINO IR model in throughput mode on GPU +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -OpenVINO IR model in throughput mode on GPU -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Usually, a GPU device provides more frames per second than a CPU, so let’s run the above model on the GPU. Please note you need to have an @@ -581,8 +606,10 @@ execution. del ov_gpu_model # release resources -OpenVINO IR model in throughput mode on AUTO -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +OpenVINO IR model in throughput mode on AUTO +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + OpenVINO offers a virtual device called `AUTO `__, @@ -599,17 +626,19 @@ performance hint. -.. image:: 109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_28_0.jpg +.. image:: 109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_29_0.jpg .. parsed-literal:: - OpenVINO model on AUTO (THROUGHPUT). First inference time: 0.0247 seconds - OpenVINO model on AUTO (THROUGHPUT): 0.0040 seconds per image (248.93 FPS) + OpenVINO model on AUTO (THROUGHPUT). First inference time: 0.0231 seconds + OpenVINO model on AUTO (THROUGHPUT): 0.0040 seconds per image (251.86 FPS) + + +OpenVINO IR model in cumulative throughput mode on AUTO +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -OpenVINO IR model in cumulative throughput mode on AUTO -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The AUTO device in throughput mode will select the best, but one physical device to bring the highest throughput. However, if we have @@ -625,29 +654,33 @@ activate all devices. -.. image:: 109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_30_0.jpg +.. image:: 109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_31_0.jpg .. parsed-literal:: - OpenVINO model on AUTO (CUMULATIVE THROUGHPUT). First inference time: 0.0258 seconds - OpenVINO model on AUTO (CUMULATIVE THROUGHPUT): 0.0040 seconds per image (250.04 FPS) + OpenVINO model on AUTO (CUMULATIVE THROUGHPUT). First inference time: 0.0260 seconds + OpenVINO model on AUTO (CUMULATIVE THROUGHPUT): 0.0040 seconds per image (251.22 FPS) + + +Other tricks +~~~~~~~~~~~~ -Other tricks -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are other tricks for performance improvement, such as advanced options, quantization and pre-post-processing or dedicated to latency mode. To get even more from your model, please visit `advanced throughput options `__, -`109-latency-tricks <109-latency-tricks-with-output.html>`__, -`111-detection-quantization <111-detection-quantization-with-output.html>`__, and -`118-optimize-preprocessing <118-optimize-preprocessing-with-output.html>`__. +`109-latency-tricks <109-latency-tricks.ipynb>`__, +`111-detection-quantization <../111-detection-quantization>`__, and +`118-optimize-preprocessing <../118-optimize-preprocessing>`__. + +Performance comparison +---------------------- + -Performance comparison ----------------------------------------------------------------- The following graphical comparison is valid for the selected model and hardware simultaneously. If you cannot see any improvement between some @@ -679,11 +712,13 @@ steps, just skip them. -.. image:: 109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_33_0.png +.. image:: 109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_34_0.png + + +Conclusions +----------- -Conclusions ------------------------------------------------------ We already showed the steps needed to improve the throughput of an object detection model. Even if you experience much better performance diff --git a/docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_14_0.jpg b/docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_15_0.jpg similarity index 100% rename from docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_14_0.jpg rename to docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_15_0.jpg diff --git a/docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_17_0.jpg b/docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_18_0.jpg similarity index 100% rename from docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_17_0.jpg rename to docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_18_0.jpg diff --git a/docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_20_0.jpg b/docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_21_0.jpg similarity index 100% rename from docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_20_0.jpg rename to docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_21_0.jpg diff --git a/docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_24_0.jpg b/docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_25_0.jpg similarity index 100% rename from docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_24_0.jpg rename to docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_25_0.jpg diff --git a/docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_28_0.jpg b/docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_29_0.jpg similarity index 100% rename from docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_28_0.jpg rename to docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_29_0.jpg diff --git a/docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_30_0.jpg b/docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_31_0.jpg similarity index 100% rename from docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_30_0.jpg rename to docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_31_0.jpg diff --git a/docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_33_0.png b/docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_33_0.png deleted file mode 100644 index 631ed0cf8ecf48..00000000000000 --- a/docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_33_0.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:10e27ed40dfd078777a2cd9513a00136b6327571b82ef0af6485b8ea5234dcfa -size 62451 diff --git a/docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_34_0.png b/docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_34_0.png new file mode 100644 index 00000000000000..0696417c908865 --- /dev/null +++ b/docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_34_0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b94c59d38427a1ec4994131e2bbefc03b0ebdf0fde6f20956a3775faa4e5f451 +size 62516 diff --git a/docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_4_0.jpg b/docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_5_0.jpg similarity index 100% rename from docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_4_0.jpg rename to docs/notebooks/109-throughput-tricks-with-output_files/109-throughput-tricks-with-output_5_0.jpg diff --git a/docs/notebooks/109-throughput-tricks-with-output_files/index.html b/docs/notebooks/109-throughput-tricks-with-output_files/index.html index 1ec6286d016602..33c9880ea6a6d2 100644 --- a/docs/notebooks/109-throughput-tricks-with-output_files/index.html +++ b/docs/notebooks/109-throughput-tricks-with-output_files/index.html @@ -1,14 +1,14 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/109-throughput-tricks-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/109-throughput-tricks-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/109-throughput-tricks-with-output_files/


../
-109-throughput-tricks-with-output_14_0.jpg         31-Oct-2023 00:35              162715
-109-throughput-tricks-with-output_17_0.jpg         31-Oct-2023 00:35              162715
-109-throughput-tricks-with-output_20_0.jpg         31-Oct-2023 00:35              162715
-109-throughput-tricks-with-output_24_0.jpg         31-Oct-2023 00:35              162715
-109-throughput-tricks-with-output_28_0.jpg         31-Oct-2023 00:35              162715
-109-throughput-tricks-with-output_30_0.jpg         31-Oct-2023 00:35              162715
-109-throughput-tricks-with-output_33_0.png         31-Oct-2023 00:35               62451
-109-throughput-tricks-with-output_4_0.jpg          31-Oct-2023 00:35              155828
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/109-throughput-tricks-with-output_files/


../
+109-throughput-tricks-with-output_15_0.jpg         07-Dec-2023 00:49              162715
+109-throughput-tricks-with-output_18_0.jpg         07-Dec-2023 00:49              162715
+109-throughput-tricks-with-output_21_0.jpg         07-Dec-2023 00:49              162715
+109-throughput-tricks-with-output_25_0.jpg         07-Dec-2023 00:49              162715
+109-throughput-tricks-with-output_29_0.jpg         07-Dec-2023 00:49              162715
+109-throughput-tricks-with-output_31_0.jpg         07-Dec-2023 00:49              162715
+109-throughput-tricks-with-output_34_0.png         07-Dec-2023 00:49               62516
+109-throughput-tricks-with-output_5_0.jpg          07-Dec-2023 00:49              155828
 

diff --git a/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output.rst b/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output.rst index 4ddd722e7a1831..f8762186f0c374 100644 --- a/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output.rst +++ b/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output.rst @@ -24,13 +24,13 @@ This third tutorial in the series shows how to: All notebooks in this series: - `Data Preparation for 2D Segmentation of 3D Medical - Data `__ + Data `__ - `Train a 2D-UNet Medical Imaging Model with PyTorch - Lightning `__ + Lightning `__ - Convert and Quantize a Segmentation Model and Show Live Inference (this notebook) - `Live Inference and Benchmark CT-scan - data <110-ct-scan-live-inference-with-output.html>`__ + data <110-ct-scan-live-inference.ipynb>`__ Instructions ------------ @@ -39,7 +39,7 @@ This notebook needs a trained UNet model. We provide a pre-trained model, trained for 20 epochs with the full `Kits-19 `__ frames dataset, which has an F1 score on the validation set of 0.9. The training code is -available in `this notebook `__. +available in `this notebook `__. NNCF for PyTorch models requires a C++ compiler. On Windows, install `Microsoft Visual Studio @@ -66,8 +66,7 @@ purposes, use a representative dataset for quantizing the model. - `Metric <#metric>`__ - `Quantization <#quantization>`__ -- `Compare FP32 and INT8 - Model <#compare-fp-and-int-model>`__ +- `Compare FP32 and INT8 Model <#compare-fp-and-int-model>`__ - `Compare File Size <#compare-file-size>`__ - `Compare Metrics for the original model and the quantized model to @@ -88,7 +87,7 @@ purposes, use a representative dataset for quantizing the model. .. code:: ipython3 - %pip install -q "openvino>=2023.1.0" "monai>=0.9.1,<1.0.0" "torchmetrics>=0.11.0" + %pip install -q "openvino>=2023.1.0" "monai>=0.9.1,<1.0.0" "torchmetrics>=0.11.0" "nncf>=2.6.0" --extra-index-url https://download.pytorch.org/whl/cpu .. parsed-literal:: @@ -96,8 +95,10 @@ purposes, use a representative dataset for quantizing the model. Note: you may need to restart the kernel to use updated packages. -Imports -------------------------------------------------- +Imports +------- + + .. code:: ipython3 @@ -181,10 +182,10 @@ Imports .. parsed-literal:: - 2023-10-30 22:43:08.129843: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. - 2023-10-30 22:43:08.164608: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. + 2023-12-06 22:47:51.629108: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. + 2023-12-06 22:47:51.662883: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. - 2023-10-30 22:43:08.732898: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT + 2023-12-06 22:47:52.221639: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT .. parsed-literal:: @@ -192,13 +193,15 @@ Imports INFO:nncf:NNCF initialized successfully. Supported frameworks detected: torch, tensorflow, onnx, openvino -Settings --------------------------------------------------- +Settings +-------- + + By default, this notebook will download one CT scan from the KITS19 dataset that will be used for quantization. To use the full dataset, set ``BASEDIR`` to the path of the dataset, as prepared according to the -`Data Preparation `__ notebook. +`Data Preparation `__ notebook. .. code:: ipython3 @@ -208,8 +211,10 @@ dataset that will be used for quantization. To use the full dataset, set MODEL_DIR = Path("model") MODEL_DIR.mkdir(exist_ok=True) -Load PyTorch Model ------------------------------------------------------------- +Load PyTorch Model +------------------ + + Download the pre-trained model weights, load the PyTorch model and the ``state_dict`` that was saved after training. The model used in this @@ -217,7 +222,7 @@ notebook is a `BasicUNet `__ model from `MONAI `__. We provide a pre-trained checkpoint. To see how this model performs, check out the `training -notebook `__. +notebook `__. .. code:: ipython3 @@ -254,8 +259,10 @@ notebook `__. -Download CT-scan Data ---------------------------------------------------------------- +Download CT-scan Data +--------------------- + + .. code:: ipython3 @@ -280,16 +287,20 @@ Download CT-scan Data Data for case_00117 exists -Configuration -------------------------------------------------------- +Configuration +------------- + + + +Dataset +~~~~~~~ + -Dataset -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ``KitsDataset`` class in the next cell expects images and masks in the *``basedir``* directory, in a folder per patient. It is a simplified version of the Dataset class in the `training -notebook `__. +notebook `__. Images are loaded with MONAI’s `LoadImage `__, @@ -381,8 +392,10 @@ kidney pixels to verify that the annotations look correct: .. image:: 110-ct-segmentation-quantize-nncf-with-output_files/110-ct-segmentation-quantize-nncf-with-output_15_1.png -Metric -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Metric +~~~~~~ + + Define a metric to determine the performance of the model. @@ -416,8 +429,10 @@ library. metric.update(label.flatten(), prediction.flatten()) return metric.compute() -Quantization ------------------------------------------------------- +Quantization +------------ + + Before quantizing the model, we compute the F1 score on the ``FP32`` model, for comparison: @@ -454,7 +469,7 @@ this notebook. [ WARNING ] Please fix your imports. Module %s has been moved to %s. The old module will be deleted in version %s. No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda' - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/monai/networks/nets/basic_unet.py:179: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/monai/networks/nets/basic_unet.py:179: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! if x_e.shape[-i - 1] != x_0.shape[-i - 1]: @@ -497,6 +512,49 @@ steps: ignored_scope=nncf.IgnoredScope(patterns=[".*LeakyReLU.*"]) ) + + +.. parsed-literal:: + + Output() + + + +.. raw:: html + +

+
+
+
+
+.. raw:: html
+
+    
+    
+ + + + +.. parsed-literal:: + + Output() + + + +.. raw:: html + +

+
+
+
+
+.. raw:: html
+
+    
+    
+ + + Export the quantized model to ONNX and then convert it to OpenVINO IR model and save it. @@ -512,11 +570,11 @@ model and save it. .. parsed-literal:: - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/nncf/torch/quantization/layers.py:336: TracerWarning: Converting a tensor to a Python number might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/nncf/torch/quantization/layers.py:333: TracerWarning: Converting a tensor to a Python number might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! return self._level_low.item() - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/nncf/torch/quantization/layers.py:344: TracerWarning: Converting a tensor to a Python number might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/nncf/torch/quantization/layers.py:341: TracerWarning: Converting a tensor to a Python number might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! return self._level_high.item() - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/monai/networks/nets/basic_unet.py:179: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/monai/networks/nets/basic_unet.py:179: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! if x_e.shape[-i - 1] != x_0.shape[-i - 1]: @@ -527,11 +585,15 @@ than quantization. See the `NNCF documentation `__ in the NNCF repository for more information. -Compare FP32 and INT8 Model ---------------------------------------------------------------------- +Compare FP32 and INT8 Model +--------------------------- + + + +Compare File Size +~~~~~~~~~~~~~~~~~ + -Compare File Size -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: ipython3 @@ -548,8 +610,10 @@ Compare File Size INT8 model size: 1940.55 KB -Compare Metrics for the original model and the quantized model to be sure that there no degradation. -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Compare Metrics for the original model and the quantized model to be sure that there no degradation. +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -568,8 +632,10 @@ Compare Metrics for the original model and the quantized model to be sure that t INT8 F1: 0.999 -Compare Performance of the FP32 IR Model and Quantized Models -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Compare Performance of the FP32 IR Model and Quantized Models +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + To measure the inference performance of the ``FP32`` and ``INT8`` models, we use `Benchmark @@ -606,18 +672,18 @@ be run in the notebook with ``! benchmark_app`` or [ INFO ] Parsing input parameters [Step 2/11] Loading OpenVINO Runtime [ INFO ] OpenVINO: - [ INFO ] Build ................................. 2023.1.0-12185-9e6b00e51cd-releases/2023/1 + [ INFO ] Build ................................. 2023.2.0-13089-cfd42bd2cb0-HEAD [ INFO ] [ INFO ] Device info: [ INFO ] CPU - [ INFO ] Build ................................. 2023.1.0-12185-9e6b00e51cd-releases/2023/1 + [ INFO ] Build ................................. 2023.2.0-13089-cfd42bd2cb0-HEAD [ INFO ] [ INFO ] [Step 3/11] Setting device configuration [ WARNING ] Performance hint was not explicitly specified in command line. Device(CPU) performance hint will be set to PerformanceMode.LATENCY. [Step 4/11] Reading model files [ INFO ] Loading model files - [ INFO ] Read model took 34.85 ms + [ INFO ] Read model took 26.10 ms [ INFO ] Original model I/O parameters: [ INFO ] Model inputs: [ INFO ] x (node: x) : f32 / [...] / [?,?,?,?] @@ -631,7 +697,7 @@ be run in the notebook with ``! benchmark_app`` or [ INFO ] Model outputs: [ INFO ] ***NO_NAME*** (node: __module.final_conv/aten::_convolution/Add_425) : f32 / [...] / [?,1,16..,16..] [Step 7/11] Loading the model to the device - [ INFO ] Compile model took 76.76 ms + [ INFO ] Compile model took 80.42 ms [Step 8/11] Querying optimal runtime parameters [ INFO ] Model: [ INFO ] NETWORK_NAME: Model0 @@ -653,9 +719,9 @@ be run in the notebook with ``! benchmark_app`` or [Step 9/11] Creating infer requests and preparing input tensors [ ERROR ] Input x is dynamic. Provide data shapes! Traceback (most recent call last): - File "/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/openvino/tools/benchmark/main.py", line 485, in main + File "/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/openvino/tools/benchmark/main.py", line 485, in main data_queue = get_input_data(paths_to_input, app_inputs_info) - File "/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/openvino/tools/benchmark/utils/inputs_filling.py", line 123, in get_input_data + File "/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/openvino/tools/benchmark/utils/inputs_filling.py", line 123, in get_input_data raise Exception(f"Input {info.name} is dynamic. Provide data shapes!") Exception: Input x is dynamic. Provide data shapes! @@ -672,32 +738,32 @@ be run in the notebook with ``! benchmark_app`` or [ INFO ] Parsing input parameters [Step 2/11] Loading OpenVINO Runtime [ INFO ] OpenVINO: - [ INFO ] Build ................................. 2023.1.0-12185-9e6b00e51cd-releases/2023/1 + [ INFO ] Build ................................. 2023.2.0-13089-cfd42bd2cb0-HEAD [ INFO ] [ INFO ] Device info: [ INFO ] CPU - [ INFO ] Build ................................. 2023.1.0-12185-9e6b00e51cd-releases/2023/1 + [ INFO ] Build ................................. 2023.2.0-13089-cfd42bd2cb0-HEAD [ INFO ] [ INFO ] [Step 3/11] Setting device configuration [ WARNING ] Performance hint was not explicitly specified in command line. Device(CPU) performance hint will be set to PerformanceMode.LATENCY. [Step 4/11] Reading model files [ INFO ] Loading model files - [ INFO ] Read model took 30.95 ms + [ INFO ] Read model took 12.76 ms [ INFO ] Original model I/O parameters: [ INFO ] Model inputs: [ INFO ] x.1 (node: x.1) : f32 / [...] / [1,1,512,512] [ INFO ] Model outputs: - [ INFO ] 578 (node: 578) : f32 / [...] / [1,1,512,512] + [ INFO ] 571 (node: 571) : f32 / [...] / [1,1,512,512] [Step 5/11] Resizing model to match image sizes and given batch [ INFO ] Model batch size: 1 [Step 6/11] Configuring input of the model [ INFO ] Model inputs: [ INFO ] x.1 (node: x.1) : f32 / [N,C,H,W] / [1,1,512,512] [ INFO ] Model outputs: - [ INFO ] 578 (node: 578) : f32 / [...] / [1,1,512,512] + [ INFO ] 571 (node: 571) : f32 / [...] / [1,1,512,512] [Step 7/11] Loading the model to the device - [ INFO ] Compile model took 199.95 ms + [ INFO ] Compile model took 188.00 ms [Step 8/11] Querying optimal runtime parameters [ INFO ] Model: [ INFO ] NETWORK_NAME: main_graph @@ -721,21 +787,23 @@ be run in the notebook with ``! benchmark_app`` or [ INFO ] Fill input 'x.1' with random values [Step 10/11] Measuring performance (Start inference synchronously, limits: 15000 ms duration) [ INFO ] Benchmarking in inference only mode (inputs filling are not included in measurement loop). - [ INFO ] First inference took 33.43 ms + [ INFO ] First inference took 30.70 ms [Step 11/11] Dumping statistics report [ INFO ] Execution Devices:['CPU'] - [ INFO ] Count: 954 iterations - [ INFO ] Duration: 15006.26 ms + [ INFO ] Count: 971 iterations + [ INFO ] Duration: 15006.86 ms [ INFO ] Latency: - [ INFO ] Median: 15.47 ms - [ INFO ] Average: 15.52 ms - [ INFO ] Min: 15.18 ms - [ INFO ] Max: 19.34 ms - [ INFO ] Throughput: 63.57 FPS + [ INFO ] Median: 15.20 ms + [ INFO ] Average: 15.24 ms + [ INFO ] Min: 14.91 ms + [ INFO ] Max: 16.90 ms + [ INFO ] Throughput: 64.70 FPS + + +Visually Compare Inference Results +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Visually Compare Inference Results -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Visualize the results of the model on four slices of the validation set. Compare the results of the ``FP32`` IR model with the results of the @@ -814,15 +882,17 @@ seed is displayed to enable reproducing specific runs of this cell. .. parsed-literal:: - Visualizing results with seed 1698702266 + Visualizing results with seed 1701899334 .. image:: 110-ct-segmentation-quantize-nncf-with-output_files/110-ct-segmentation-quantize-nncf-with-output_37_1.png -Show Live Inference -------------------------------------------------------------- +Show Live Inference +------------------- + + To show live inference on the model in the notebook, we will use the asynchronous processing feature of OpenVINO. @@ -839,8 +909,10 @@ printed. **NOTE**: If you experience flickering on Firefox, consider using Chrome or Edge to run this notebook. -Load Model and List of Image Files -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Load Model and List of Image Files +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + We load the segmentation model to OpenVINO Runtime with ``SegmentationModel``, based on the `Open Model @@ -866,8 +938,10 @@ overlay of the segmentation mask on the original image/frame. case_00117, 69 images -Show Inference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Show Inference +~~~~~~~~~~~~~~ + + In the next cell, we run the ``show_live_inference`` function, which loads the ``segmentation_model`` to the specified ``device`` (using @@ -891,24 +965,29 @@ performs inference, and displays the results on the frames loaded in .. parsed-literal:: - Loaded model to CPU in 0.17 seconds. - Total time for 68 frames: 3.43 seconds, fps:20.10 + Loaded model to CPU in 0.18 seconds. + Total time for 68 frames: 2.73 seconds, fps:25.30 -References ----------------------------------------------------- +References +---------- -**OpenVINO** -- `NNCF Repository `__ -- `Neural Network Compression Framework for fast model -inference `__ -- `OpenVINO API Tutorial <002-openvino-api-with-output.html>`__ -- `OpenVINO PyPI (pip install openvino-dev) `__ -**Kits19 Data** +**OpenVINO** - `NNCF +Repository `__ - `Neural +Network Compression Framework for fast model +inference `__ - `OpenVINO API +Tutorial <002-openvino-api-with-output.html>`__ - `OpenVINO +PyPI (pip install +openvino-dev) `__ -- `Kits19 Challenge Homepage `__ -- `Kits19 GitHub Repository `__ -- `The KiTS19 Challenge Data: 300 Kidney Tumor Cases with Clinical Context, CT Semantic Segmentations, and Surgical Outcomes `__ -- `The state of the art in kidney and kidney tumor segmentation in contrast-enhanced CT imaging: Results of the KiTS19 challenge `__ +**Kits19 Data** - `Kits19 Challenge +Homepage `__ - `Kits19 GitHub +Repository `__ - `The KiTS19 +Challenge Data: 300 Kidney Tumor Cases with Clinical Context, CT +Semantic Segmentations, and Surgical +Outcomes `__ - `The state of the art +in kidney and kidney tumor segmentation in contrast-enhanced CT imaging: +Results of the KiTS19 +challenge `__ diff --git a/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output_files/110-ct-segmentation-quantize-nncf-with-output_15_1.png b/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output_files/110-ct-segmentation-quantize-nncf-with-output_15_1.png index adf48a5b97d54c..c9b0c7506f38cd 100644 --- a/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output_files/110-ct-segmentation-quantize-nncf-with-output_15_1.png +++ b/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output_files/110-ct-segmentation-quantize-nncf-with-output_15_1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:eb2418bea000be01c041f45ac09c14c701e09a7b1ffbc551fbc9f7ce4c4fb6fd +oid sha256:8d5031a8fde68c06c9d1585a57d549809af699f8411c9021d3571718ae037908 size 158997 diff --git a/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output_files/110-ct-segmentation-quantize-nncf-with-output_37_1.png b/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output_files/110-ct-segmentation-quantize-nncf-with-output_37_1.png index 9b09719c5d40b7..696c1b8436968f 100644 --- a/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output_files/110-ct-segmentation-quantize-nncf-with-output_37_1.png +++ b/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output_files/110-ct-segmentation-quantize-nncf-with-output_37_1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6503457981c8d2d24c46d2879b1a499fb4143c566a5196b903b50b49c4094cc2 -size 378309 +oid sha256:04761a5918e007435a0a6d8e1e44df408ba32326558935ebb4c47c48aa1d41cc +size 385394 diff --git a/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output_files/index.html b/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output_files/index.html index e8d45cd1099e5d..4c06e712a0fb91 100644 --- a/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output_files/index.html +++ b/docs/notebooks/110-ct-segmentation-quantize-nncf-with-output_files/index.html @@ -1,9 +1,9 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/110-ct-segmentation-quantize-nncf-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/110-ct-segmentation-quantize-nncf-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/110-ct-segmentation-quantize-nncf-with-output_files/


../
-110-ct-segmentation-quantize-nncf-with-output_1..> 31-Oct-2023 00:35              158997
-110-ct-segmentation-quantize-nncf-with-output_3..> 31-Oct-2023 00:35              378309
-110-ct-segmentation-quantize-nncf-with-output_4..> 31-Oct-2023 00:35               73812
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/110-ct-segmentation-quantize-nncf-with-output_files/


../
+110-ct-segmentation-quantize-nncf-with-output_1..> 07-Dec-2023 00:49              158997
+110-ct-segmentation-quantize-nncf-with-output_3..> 07-Dec-2023 00:49              385394
+110-ct-segmentation-quantize-nncf-with-output_4..> 07-Dec-2023 00:49               73812
 

diff --git a/docs/notebooks/112-pytorch-post-training-quantization-nncf-with-output.rst b/docs/notebooks/112-pytorch-post-training-quantization-nncf-with-output.rst index 8fdf5bea838ea4..311973516693b3 100644 --- a/docs/notebooks/112-pytorch-post-training-quantization-nncf-with-output.rst +++ b/docs/notebooks/112-pytorch-post-training-quantization-nncf-with-output.rst @@ -31,8 +31,7 @@ quantization, not demanding the fine-tuning of the model. - `Settings <#settings>`__ - `Download and Prepare Tiny ImageNet dataset <#download-and-prepare-tiny-imagenet-dataset>`__ - - `Helpers classes and - functions <#helpers-classes-and-functions>`__ + - `Helpers classes and functions <#helpers-classes-and-functions>`__ - `Validation function <#validation-function>`__ - `Create and load original uncompressed model <#create-and-load-original-uncompressed-model>`__ @@ -42,8 +41,7 @@ quantization, not demanding the fine-tuning of the model. - `Model quantization and benchmarking <#model-quantization-and-benchmarking>`__ - - `I. Evaluate the loaded - model <#i-evaluate-the-loaded-model>`__ + - `I. Evaluate the loaded model <#i-evaluate-the-loaded-model>`__ - `II. Create and initialize quantization <#ii-create-and-initialize-quantization>`__ - `III. Convert the models to OpenVINO Intermediate Representation @@ -52,8 +50,10 @@ quantization, not demanding the fine-tuning of the model. - `IV. Compare performance of INT8 model and FP32 model in OpenVINO <#iv-compare-performance-of-int-model-and-fp-model-in-openvino>`__ -Preparations ------------------------------------------------------- +Preparations +------------ + + .. code:: ipython3 @@ -105,8 +105,10 @@ Preparations os.environ["LIB"] = os.pathsep.join(b.library_dirs) print(f"Added {vs_dir} to PATH") -Imports -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Imports +~~~~~~~ + + .. code:: ipython3 @@ -133,8 +135,10 @@ Imports INFO:nncf:NNCF initialized successfully. Supported frameworks detected: torch, tensorflow, onnx, openvino -Settings -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Settings +~~~~~~~~ + + .. code:: ipython3 @@ -174,12 +178,14 @@ Settings .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/112-pytorch-post-training-quantization-nncf/model/resnet50_fp32.pth') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/112-pytorch-post-training-quantization-nncf/model/resnet50_fp32.pth') + +Download and Prepare Tiny ImageNet dataset +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + -Download and Prepare Tiny ImageNet dataset -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - 100k images of shape 3x64x64, - 200 different classes: snake, spider, cat, truck, grasshopper, gull, @@ -238,8 +244,10 @@ Download and Prepare Tiny ImageNet dataset Successfully downloaded and extracted dataset to: output -Helpers classes and functions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Helpers classes and functions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + The code below will help to count accuracy and visualize validation process. @@ -304,8 +312,10 @@ process. return res -Validation function -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Validation function +~~~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -357,8 +367,10 @@ Validation function ) return top1.avg -Create and load original uncompressed model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Create and load original uncompressed model +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + ResNet-50 from the `torchivision repository `__ is pre-trained on @@ -385,8 +397,10 @@ values. model = create_model(MODEL_DIR / fp32_checkpoint_filename) -Create train and validation DataLoaders -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Create train and validation DataLoaders +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -435,15 +449,19 @@ Create train and validation DataLoaders train_loader, val_loader = create_dataloaders() -Model quantization and benchmarking ------------------------------------------------------------------------------ +Model quantization and benchmarking +----------------------------------- + + With the validation pipeline, model files, and data-loading procedures for model calibration now prepared, it’s time to proceed with the actual post-training quantization using NNCF. -I. Evaluate the loaded model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +I. Evaluate the loaded model +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -453,20 +471,22 @@ I. Evaluate the loaded model .. parsed-literal:: - Test: [ 0/79] Time 0.260 (0.260) Acc@1 81.25 (81.25) Acc@5 92.19 (92.19) - Test: [10/79] Time 0.232 (0.238) Acc@1 56.25 (66.97) Acc@5 86.72 (87.50) - Test: [20/79] Time 0.233 (0.237) Acc@1 67.97 (64.29) Acc@5 85.16 (87.35) - Test: [30/79] Time 0.233 (0.236) Acc@1 53.12 (62.37) Acc@5 77.34 (85.33) - Test: [40/79] Time 0.236 (0.235) Acc@1 67.19 (60.86) Acc@5 90.62 (84.51) - Test: [50/79] Time 0.232 (0.235) Acc@1 60.16 (60.80) Acc@5 88.28 (84.42) - Test: [60/79] Time 0.230 (0.235) Acc@1 66.41 (60.46) Acc@5 86.72 (83.79) - Test: [70/79] Time 0.244 (0.235) Acc@1 52.34 (60.21) Acc@5 80.47 (83.33) - * Acc@1 60.740 Acc@5 83.960 Total time: 18.416 + Test: [ 0/79] Time 0.257 (0.257) Acc@1 81.25 (81.25) Acc@5 92.19 (92.19) + Test: [10/79] Time 0.233 (0.231) Acc@1 56.25 (66.97) Acc@5 86.72 (87.50) + Test: [20/79] Time 0.223 (0.231) Acc@1 67.97 (64.29) Acc@5 85.16 (87.35) + Test: [30/79] Time 0.231 (0.231) Acc@1 53.12 (62.37) Acc@5 77.34 (85.33) + Test: [40/79] Time 0.232 (0.234) Acc@1 67.19 (60.86) Acc@5 90.62 (84.51) + Test: [50/79] Time 0.226 (0.233) Acc@1 60.16 (60.80) Acc@5 88.28 (84.42) + Test: [60/79] Time 0.225 (0.233) Acc@1 66.41 (60.46) Acc@5 86.72 (83.79) + Test: [70/79] Time 0.232 (0.234) Acc@1 52.34 (60.21) Acc@5 80.47 (83.33) + * Acc@1 60.740 Acc@5 83.960 Total time: 18.296 Test accuracy of FP32 model: 60.740 -II. Create and initialize quantization -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +II. Create and initialize quantization +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + NNCF enables post-training quantization by adding the quantization layers into the model graph and then using a subset of the training @@ -502,32 +522,59 @@ Guide
+ + + + +.. raw:: html + +
+    
+ .. parsed-literal:: - INFO:nncf:Collecting tensor statistics |█████ | 1 / 3 - INFO:nncf:Collecting tensor statistics |██████████ | 2 / 3 - INFO:nncf:Collecting tensor statistics |████████████████| 3 / 3 INFO:nncf:Compiling and loading torch extension: quantized_functions_cpu... INFO:nncf:Finished loading torch extension: quantized_functions_cpu - INFO:nncf:BatchNorm statistics adaptation |█████ | 1 / 3 - INFO:nncf:BatchNorm statistics adaptation |██████████ | 2 / 3 - INFO:nncf:BatchNorm statistics adaptation |████████████████| 3 / 3 + + + +.. parsed-literal:: + + Output() + + + +.. raw:: html + +

+
+
+
+
+.. raw:: html
+
+    
+    
+ 3. Evaluate the new model on the validation set after initialization of @@ -543,16 +590,16 @@ Guide `__ - `Perform Quantization <#perform-quantization>`__ - - `Create Dataset for - Validation <#create-dataset-for-validation>`__ + - `Create Dataset for Validation <#create-dataset-for-validation>`__ - `Run nncf.quantize for Getting an Optimized Model <#run-nncfquantize-for-getting-an-optimized-model>`__ -- `Serialize an OpenVINO IR - model <#serialize-an-openvino-ir-model>`__ +- `Serialize an OpenVINO IR model <#serialize-an-openvino-ir-model>`__ - `Compare Accuracy of the Original and Quantized Models <#compare-accuracy-of-the-original-and-quantized-models>`__ @@ -66,8 +64,10 @@ This tutorial consists of the following steps: DATA_DIR.mkdir(exist_ok=True) MODEL_DIR.mkdir(exist_ok=True) -Prepare the Model ------------------------------------------------------------ +Prepare the Model +----------------- + + Model preparation stage has the following steps: @@ -91,10 +91,10 @@ Model preparation stage has the following steps: Cloning into 'pytorch-cifar-models'... remote: Enumerating objects: 282, done. remote: Counting objects: 100% (281/281), done. - remote: Compressing objects: 100% (95/95), done. - remote: Total 282 (delta 136), reused 269 (delta 129), pack-reused 1 - Receiving objects: 100% (282/282), 9.22 MiB | 3.32 MiB/s, done. - Resolving deltas: 100% (136/136), done. + remote: Compressing objects: 100% (96/96), done. + remote: Total 282 (delta 135), reused 269 (delta 128), pack-reused 1 + Receiving objects: 100% (282/282), 9.22 MiB | 3.72 MiB/s, done. + Resolving deltas: 100% (135/135), done. .. code:: ipython3 @@ -125,8 +125,10 @@ can be found on this ov.save_model(ov_model, MODEL_DIR / "mobilenet_v2.xml") -Prepare Dataset ---------------------------------------------------------- +Prepare Dataset +--------------- + + We will use `CIFAR10 `__ dataset from @@ -158,7 +160,7 @@ Preprocessing for model obtained from training .. parsed-literal:: - 100%|██████████| 170498071/170498071 [01:12<00:00, 2348008.09it/s] + 100%|██████████| 170498071/170498071 [00:48<00:00, 3527631.36it/s] .. parsed-literal:: @@ -166,8 +168,10 @@ Preprocessing for model obtained from training Extracting data/cifar-10-python.tar.gz to data -Perform Quantization --------------------------------------------------------------- +Perform Quantization +-------------------- + + `NNCF `__ provides a suite of advanced algorithms for Neural Networks inference optimization in @@ -180,8 +184,10 @@ MobileNetV2. The optimization process contains the following steps: 3. Serialize an OpenVINO IR model, using the ``openvino.save_model`` function. -Create Dataset for Validation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Create Dataset for Validation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + NNCF is compatible with ``torch.utils.data.DataLoader`` interface. For performing quantization it should be passed into ``nncf.Dataset`` object @@ -205,8 +211,10 @@ model during quantization, in our case, to pick input tensor from pair INFO:nncf:NNCF initialized successfully. Supported frameworks detected: torch, tensorflow, onnx, openvino -Run nncf.quantize for Getting an Optimized Model ------------------------------------------------------------------------------------------- +Run nncf.quantize for Getting an Optimized Model +------------------------------------------------ + + ``nncf.quantize`` function accepts model and prepared quantization dataset for performing basic quantization. Optionally, additional @@ -222,16 +230,58 @@ about supported parameters can be found on this .. parsed-literal:: - 2023-10-30 22:54:06.313060: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. - 2023-10-30 22:54:06.344685: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. + 2023-12-06 23:00:00.245123: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. + 2023-12-06 23:00:00.276123: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. - 2023-10-30 22:54:06.959396: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT - Statistics collection: 100%|██████████| 300/300 [00:09<00:00, 31.98it/s] - Applying Fast Bias correction: 100%|██████████| 36/36 [00:01<00:00, 20.03it/s] + 2023-12-06 23:00:00.791927: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT + + + +.. parsed-literal:: + + Output() + + + +.. raw:: html + +

+
+
+
+
+.. raw:: html
+
+    
+    
+ + + + +.. parsed-literal:: + + Output() + + + +.. raw:: html + +

+
+
+
+
+.. raw:: html
+
+    
+    
+ + + +Serialize an OpenVINO IR model +------------------------------ -Serialize an OpenVINO IR model ------------------------------------------------------------------------- Similar to ``ov.convert_model``, quantized model is ``ov.Model`` object which ready to be loaded into device and can be serialized on disk using @@ -241,8 +291,10 @@ which ready to be loaded into device and can be serialized on disk using ov.save_model(quant_ov_model, MODEL_DIR / "quantized_mobilenet_v2.xml") -Compare Accuracy of the Original and Quantized Models ------------------------------------------------------------------------------------------------ +Compare Accuracy of the Original and Quantized Models +----------------------------------------------------- + + .. code:: ipython3 @@ -259,8 +311,10 @@ Compare Accuracy of the Original and Quantized Models total += 1 return correct / total -Select inference device -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Select inference device +~~~~~~~~~~~~~~~~~~~~~~~ + + select device from dropdown list for running inference using OpenVINO @@ -321,8 +375,10 @@ select device from dropdown list for running inference using OpenVINO Accuracy of the optimized model: 93.54% -Compare Performance of the Original and Quantized Models --------------------------------------------------------------------------------------------------- +Compare Performance of the Original and Quantized Models +-------------------------------------------------------- + + Finally, measure the inference performance of the ``FP32`` and ``INT8`` models, using `Benchmark @@ -348,18 +404,18 @@ Tool -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/113-image-classification-quantization-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/113-image-classification-quantization-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/113-image-classification-quantization-with-output_files/


../
-113-image-classification-quantization-with-outp..> 31-Oct-2023 00:35               14855
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/113-image-classification-quantization-with-output_files/


../
+113-image-classification-quantization-with-outp..> 07-Dec-2023 00:49               14855
 

diff --git a/docs/notebooks/115-async-api-with-output.rst b/docs/notebooks/115-async-api-with-output.rst index 51f74ce38d7b3f..3850ebc5ed1e05 100644 --- a/docs/notebooks/115-async-api-with-output.rst +++ b/docs/notebooks/115-async-api-with-output.rst @@ -13,6 +13,7 @@ requests) rather than wait for the current inference to complete first. **Table of contents:** + - `Imports <#imports>`__ - `Prepare model and data processing <#prepare-model-and-data-processing>`__ @@ -36,8 +37,7 @@ requests) rather than wait for the current inference to complete first. - `AsyncInferQueue <#asyncinferqueue>`__ - `Setting Callback <#setting-callback>`__ - - `Test the performance with - AsyncInferQueue <#test-the-performance-with-asyncinferqueue>`__ + - `Test the performance with AsyncInferQueue <#test-the-performance-with-asyncinferqueue>`__ Imports ------- @@ -46,8 +46,15 @@ Imports .. code:: ipython3 - # %pip install -q "openvino>=2023.1.0" - # %pip install -q opencv-python matplotlib + %pip install -q "openvino>=2023.1.0" + %pip install -q opencv-python matplotlib + + +.. parsed-literal:: + + Note: you may need to restart the kernel to use updated packages. + Note: you may need to restart the kernel to use updated packages. + .. code:: ipython3 @@ -306,7 +313,7 @@ Test performance in Sync Mode .. parsed-literal:: Source ended - average throuput in sync mode: 38.27 fps + average throuput in sync mode: 40.67 fps Async Mode @@ -445,7 +452,7 @@ Test the performance in Async Mode .. parsed-literal:: Source ended - average throuput in async mode: 72.15 fps + average throuput in async mode: 74.75 fps Compare the performance @@ -588,5 +595,5 @@ Test the performance with ``AsyncInferQueue`` .. parsed-literal:: - average throughput in async mode with async infer queue: 105.36 fps + average throughput in async mode with async infer queue: 111.75 fps diff --git a/docs/notebooks/115-async-api-with-output_files/115-async-api-with-output_21_0.png b/docs/notebooks/115-async-api-with-output_files/115-async-api-with-output_21_0.png index dcc102dd691908..c452c1a5664d1d 100644 --- a/docs/notebooks/115-async-api-with-output_files/115-async-api-with-output_21_0.png +++ b/docs/notebooks/115-async-api-with-output_files/115-async-api-with-output_21_0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e4f523a824b6e628ef48fa654a0af2dedb2661f23bf18bc31d1e9cc37540fccd -size 30440 +oid sha256:8b8c8b61f0bbb25c280a3e72cf2172fd29bf11668231cb3d2527c1b8a05307f2 +size 30406 diff --git a/docs/notebooks/115-async-api-with-output_files/index.html b/docs/notebooks/115-async-api-with-output_files/index.html index 87539a38a88371..376097c1329249 100644 --- a/docs/notebooks/115-async-api-with-output_files/index.html +++ b/docs/notebooks/115-async-api-with-output_files/index.html @@ -1,10 +1,10 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231114220808/dist/rst_files/115-async-api-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/115-async-api-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231114220808/dist/rst_files/115-async-api-with-output_files/


../
-115-async-api-with-output_15_0.png                 15-Nov-2023 00:43                4307
-115-async-api-with-output_19_0.png                 15-Nov-2023 00:43                4307
-115-async-api-with-output_21_0.png                 15-Nov-2023 00:43               30440
-115-async-api-with-output_27_0.png                 15-Nov-2023 00:43                4307
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/115-async-api-with-output_files/


../
+115-async-api-with-output_15_0.png                 07-Dec-2023 00:49                4307
+115-async-api-with-output_19_0.png                 07-Dec-2023 00:49                4307
+115-async-api-with-output_21_0.png                 07-Dec-2023 00:49               30406
+115-async-api-with-output_27_0.png                 07-Dec-2023 00:49                4307
 

diff --git a/docs/notebooks/116-sparsity-optimization-with-output.rst b/docs/notebooks/116-sparsity-optimization-with-output.rst index f02e7081021741..b6eb46e54c5252 100644 --- a/docs/notebooks/116-sparsity-optimization-with-output.rst +++ b/docs/notebooks/116-sparsity-optimization-with-output.rst @@ -37,13 +37,15 @@ consists of the following steps: performance <#benchmark-quantized-sparse-inference-performance>`__ - `When this might be helpful <#when-this-might-be-helpful>`__ -Prerequisites -------------------------------------------------------- +Prerequisites +------------- + + .. code:: ipython3 %pip install -q "openvino>=2023.1.0" - %pip install -q "git+https://github.com/huggingface/optimum-intel.git" datasets onnx onnxruntime + %pip install -q "git+https://github.com/huggingface/optimum-intel.git" datasets onnx transformers>=4.33.0 --extra-index-url https://download.pytorch.org/whl/cpu .. parsed-literal:: @@ -52,8 +54,10 @@ Prerequisites Note: you may need to restart the kernel to use updated packages. -Imports -------------------------------------------------- +Imports +------- + + .. code:: ipython3 @@ -73,16 +77,16 @@ Imports .. parsed-literal:: No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda' - 2023-10-30 22:57:12.569340: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. - 2023-10-30 22:57:12.603049: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. + 2023-12-06 23:02:39.282111: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. + 2023-12-06 23:02:39.316382: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. - 2023-10-30 22:57:13.131994: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/deepspeed.py:23: FutureWarning: transformers.deepspeed module is deprecated and will be removed in a future version. Please import deepspeed modules directly from transformers.integrations - warnings.warn( + 2023-12-06 23:02:40.030243: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT + + +Download, quantize and sparsify the model, using Hugging Face Optimum API +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Download, quantize and sparsify the model, using Hugging Face Optimum API -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The first step is to download a quantized sparse transformers which has been translated to OpenVINO IR. Then, it will be put through a @@ -113,7 +117,7 @@ model card on Hugging Face. .. parsed-literal:: Compiling the model to CPU ... - Set CACHE_DIR to /opt/home/k8sworker/.cache/huggingface/hub/models--OpenVINO--bert-base-uncased-sst2-int8-unstructured80/snapshots/dc44eb46300882463d50ee847e0f6485bad3cdad/model_cache + Setting OpenVINO CACHE_DIR to /opt/home/k8sworker/.cache/huggingface/hub/models--OpenVINO--bert-base-uncased-sst2-int8-unstructured80/snapshots/dc44eb46300882463d50ee847e0f6485bad3cdad/model_cache .. parsed-literal:: @@ -147,8 +151,10 @@ the IRs into a single folder. -Benchmark quantized dense inference performance ------------------------------------------------------------------------------------------ +Benchmark quantized dense inference performance +----------------------------------------------- + + Benchmark dense inference performance using parallel execution on four CPU cores to simulate a small instance in the cloud infrastructure. @@ -187,18 +193,18 @@ as an example. It is recommended to tune based on your applications. [ INFO ] Parsing input parameters [Step 2/11] Loading OpenVINO Runtime [ INFO ] OpenVINO: - [ INFO ] Build ................................. 2023.1.0-12185-9e6b00e51cd-releases/2023/1 + [ INFO ] Build ................................. 2023.2.0-13089-cfd42bd2cb0-HEAD [ INFO ] [ INFO ] Device info: [ INFO ] CPU - [ INFO ] Build ................................. 2023.1.0-12185-9e6b00e51cd-releases/2023/1 + [ INFO ] Build ................................. 2023.2.0-13089-cfd42bd2cb0-HEAD [ INFO ] [ INFO ] [Step 3/11] Setting device configuration [ WARNING ] Performance hint was not explicitly specified in command line. Device(CPU) performance hint will be set to PerformanceMode.THROUGHPUT. [Step 4/11] Reading model files [ INFO ] Loading model files - [ INFO ] Read model took 77.13 ms + [ INFO ] Read model took 60.26 ms [ INFO ] Original model I/O parameters: [ INFO ] Model inputs: [ INFO ] input_ids (node: input_ids) : i64 / [...] / [?,?] @@ -209,7 +215,7 @@ as an example. It is recommended to tune based on your applications. [Step 5/11] Resizing model to match image sizes and given batch [ INFO ] Model batch size: 1 [ INFO ] Reshaping model: 'input_ids': [1,64], 'attention_mask': [1,64], 'token_type_ids': [1,64] - [ INFO ] Reshape model took 25.98 ms + [ INFO ] Reshape model took 24.75 ms [Step 6/11] Configuring input of the model [ INFO ] Model inputs: [ INFO ] input_ids (node: input_ids) : i64 / [...] / [1,64] @@ -218,7 +224,7 @@ as an example. It is recommended to tune based on your applications. [ INFO ] Model outputs: [ INFO ] logits (node: logits) : f32 / [...] / [1,2] [Step 7/11] Loading the model to the device - [ INFO ] Compile model took 970.05 ms + [ INFO ] Compile model took 1092.05 ms [Step 8/11] Querying optimal runtime parameters [ INFO ] Model: [ INFO ] NETWORK_NAME: torch_jit @@ -246,21 +252,23 @@ as an example. It is recommended to tune based on your applications. [ INFO ] Fill input 'token_type_ids' with random values [Step 10/11] Measuring performance (Start inference asynchronously, 4 inference requests, limits: 60000 ms duration) [ INFO ] Benchmarking in inference only mode (inputs filling are not included in measurement loop). - [ INFO ] First inference took 28.50 ms + [ INFO ] First inference took 27.56 ms [Step 11/11] Dumping statistics report [ INFO ] Execution Devices:['CPU'] - [ INFO ] Count: 9116 iterations - [ INFO ] Duration: 60051.84 ms + [ INFO ] Count: 8952 iterations + [ INFO ] Duration: 60029.00 ms [ INFO ] Latency: - [ INFO ] Median: 26.14 ms - [ INFO ] Average: 26.19 ms - [ INFO ] Min: 24.91 ms - [ INFO ] Max: 41.99 ms - [ INFO ] Throughput: 151.80 FPS + [ INFO ] Median: 26.46 ms + [ INFO ] Average: 26.52 ms + [ INFO ] Min: 25.37 ms + [ INFO ] Max: 40.49 ms + [ INFO ] Throughput: 149.13 FPS + + +Benchmark quantized sparse inference performance +------------------------------------------------ -Benchmark quantized sparse inference performance ------------------------------------------------------------------------------------------- To enable sparse weight decompression feature, users can add it to runtime config like below. ``CPU_SPARSE_WEIGHTS_DECOMPRESSION_RATE`` @@ -300,18 +308,18 @@ for which a layer will be enabled. [ INFO ] Parsing input parameters [Step 2/11] Loading OpenVINO Runtime [ INFO ] OpenVINO: - [ INFO ] Build ................................. 2023.1.0-12185-9e6b00e51cd-releases/2023/1 + [ INFO ] Build ................................. 2023.2.0-13089-cfd42bd2cb0-HEAD [ INFO ] [ INFO ] Device info: [ INFO ] CPU - [ INFO ] Build ................................. 2023.1.0-12185-9e6b00e51cd-releases/2023/1 + [ INFO ] Build ................................. 2023.2.0-13089-cfd42bd2cb0-HEAD [ INFO ] [ INFO ] [Step 3/11] Setting device configuration [ WARNING ] Performance hint was not explicitly specified in command line. Device(CPU) performance hint will be set to PerformanceMode.THROUGHPUT. [Step 4/11] Reading model files [ INFO ] Loading model files - [ INFO ] Read model took 83.15 ms + [ INFO ] Read model took 61.56 ms [ INFO ] Original model I/O parameters: [ INFO ] Model inputs: [ INFO ] input_ids (node: input_ids) : i64 / [...] / [?,?] @@ -322,7 +330,7 @@ for which a layer will be enabled. [Step 5/11] Resizing model to match image sizes and given batch [ INFO ] Model batch size: 1 [ INFO ] Reshaping model: 'input_ids': [1,64], 'attention_mask': [1,64], 'token_type_ids': [1,64] - [ INFO ] Reshape model took 26.29 ms + [ INFO ] Reshape model took 24.68 ms [Step 6/11] Configuring input of the model [ INFO ] Model inputs: [ INFO ] input_ids (node: input_ids) : i64 / [...] / [1,64] @@ -331,7 +339,7 @@ for which a layer will be enabled. [ INFO ] Model outputs: [ INFO ] logits (node: logits) : f32 / [...] / [1,2] [Step 7/11] Loading the model to the device - [ INFO ] Compile model took 903.83 ms + [ INFO ] Compile model took 1029.24 ms [Step 8/11] Querying optimal runtime parameters [ INFO ] Model: [ INFO ] NETWORK_NAME: torch_jit @@ -359,21 +367,23 @@ for which a layer will be enabled. [ INFO ] Fill input 'token_type_ids' with random values [Step 10/11] Measuring performance (Start inference asynchronously, 4 inference requests, limits: 60000 ms duration) [ INFO ] Benchmarking in inference only mode (inputs filling are not included in measurement loop). - [ INFO ] First inference took 29.52 ms + [ INFO ] First inference took 29.95 ms [Step 11/11] Dumping statistics report [ INFO ] Execution Devices:['CPU'] - [ INFO ] Count: 9128 iterations - [ INFO ] Duration: 60046.44 ms + [ INFO ] Count: 8984 iterations + [ INFO ] Duration: 60026.81 ms [ INFO ] Latency: - [ INFO ] Median: 26.15 ms - [ INFO ] Average: 26.18 ms - [ INFO ] Min: 25.14 ms - [ INFO ] Max: 42.73 ms - [ INFO ] Throughput: 152.02 FPS + [ INFO ] Median: 26.52 ms + [ INFO ] Average: 26.59 ms + [ INFO ] Min: 23.89 ms + [ INFO ] Max: 40.95 ms + [ INFO ] Throughput: 149.67 FPS + + +When this might be helpful +-------------------------- -When this might be helpful --------------------------------------------------------------------- This feature can improve inference performance for models with sparse weights in the scenarios when the model is deployed to handle multiple diff --git a/docs/notebooks/119-tflite-to-openvino-with-output.rst b/docs/notebooks/119-tflite-to-openvino-with-output.rst index 843d31494aae02..299489f15e64e5 100644 --- a/docs/notebooks/119-tflite-to-openvino-with-output.rst +++ b/docs/notebooks/119-tflite-to-openvino-with-output.rst @@ -27,18 +27,21 @@ and do inference with a sample image. Format <#convert-a-model-to-openvino-ir-format>`__ - `Load model using OpenVINO TensorFlow Lite Frontend <#load-model-using-openvino-tensorflow-lite-frontend>`__ -- `Run OpenVINO model - inference <#run-openvino-model-inference>`__ +- `Run OpenVINO model inference <#run-openvino-model-inference>`__ - `Select inference device <#select-inference-device>`__ - `Estimate Model Performance <#estimate-model-performance>`__ -Preparation ------------------------------------------------------ +Preparation +----------- + + + +Install requirements +~~~~~~~~~~~~~~~~~~~~ + -Install requirements -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: ipython3 @@ -59,8 +62,10 @@ Install requirements Note: you may need to restart the kernel to use updated packages. -Imports -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Imports +~~~~~~~ + + .. code:: ipython3 @@ -71,8 +76,10 @@ Imports from notebook_utils import download_file, load_image -Download TFLite model ---------------------------------------------------------------- +Download TFLite model +--------------------- + + .. code:: ipython3 @@ -80,7 +87,7 @@ Download TFLite model tflite_model_path = model_dir / "efficientnet_lite0_fp32_2.tflite" ov_model_path = tflite_model_path.with_suffix(".xml") - model_url = "https://tfhub.dev/tensorflow/lite-model/efficientnet/lite0/fp32/2?lite-format=tflite" + model_url = "https://www.kaggle.com/models/tensorflow/efficientnet/frameworks/tfLite/variations/lite0-fp32/versions/2?lite-format=tflite" download_file(model_url, tflite_model_path.name, model_dir) @@ -95,12 +102,14 @@ Download TFLite model .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/119-tflite-to-openvino/model/efficientnet_lite0_fp32_2.tflite') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/119-tflite-to-openvino/model/efficientnet_lite0_fp32_2.tflite') -Convert a Model to OpenVINO IR Format -------------------------------------------------------------------------------- +Convert a Model to OpenVINO IR Format +------------------------------------- + + To convert the TFLite model to OpenVINO IR, model conversion Python API can be used. ``ov.convert_model`` function accepts the path to the @@ -127,13 +136,15 @@ For TensorFlow Lite models support, refer to this Model model/efficientnet_lite0_fp32_2.tflite successfully converted and saved to model/efficientnet_lite0_fp32_2.xml -Load model using OpenVINO TensorFlow Lite Frontend --------------------------------------------------------------------------------------------- +Load model using OpenVINO TensorFlow Lite Frontend +-------------------------------------------------- + + TensorFlow Lite models are supported via ``FrontEnd`` API. You may skip conversion to IR and read models directly by OpenVINO runtime API. For more examples supported formats reading via Frontend API, please look -this `tutorial <002-openvino-api-with-output.html>`__. +this `tutorial <../002-openvino-api>`__. .. code:: ipython3 @@ -141,8 +152,10 @@ this `tutorial <002-openvino-api-with-output.html>`__. ov_model = core.read_model(tflite_model_path) -Run OpenVINO model inference ----------------------------------------------------------------------- +Run OpenVINO model inference +---------------------------- + + We can find information about model input preprocessing in its `description `__ @@ -156,8 +169,10 @@ on `TensorFlow Hub `__. resized_image = image.resize((224, 224)) input_tensor = np.expand_dims((np.array(resized_image).astype(np.float32) - 127) / 128, 0) -Select inference device -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Select inference device +~~~~~~~~~~~~~~~~~~~~~~~ + + select device from dropdown list for running inference using OpenVINO @@ -216,8 +231,8 @@ select device from dropdown list for running inference using OpenVINO Predicted label: n02109047 Great Dane with probability 0.715318 -Estimate Model Performance --------------------------------------------------------------------- +Estimate Model Performance +-------------------------- `Benchmark Tool `__ @@ -247,18 +262,18 @@ GPU. [ INFO ] Parsing input parameters [Step 2/11] Loading OpenVINO Runtime [ INFO ] OpenVINO: - [ INFO ] Build ................................. 2023.1.0-12185-9e6b00e51cd-releases/2023/1 + [ INFO ] Build ................................. 2023.2.0-13089-cfd42bd2cb0-HEAD [ INFO ] [ INFO ] Device info: [ INFO ] CPU - [ INFO ] Build ................................. 2023.1.0-12185-9e6b00e51cd-releases/2023/1 + [ INFO ] Build ................................. 2023.2.0-13089-cfd42bd2cb0-HEAD [ INFO ] [ INFO ] [Step 3/11] Setting device configuration [ WARNING ] Performance hint was not explicitly specified in command line. Device(CPU) performance hint will be set to PerformanceMode.THROUGHPUT. [Step 4/11] Reading model files [ INFO ] Loading model files - [ INFO ] Read model took 29.37 ms + [ INFO ] Read model took 28.35 ms [ INFO ] Original model I/O parameters: [ INFO ] Model inputs: [ INFO ] images (node: images) : f32 / [...] / [1,224,224,3] @@ -272,7 +287,7 @@ GPU. [ INFO ] Model outputs: [ INFO ] Softmax (node: 63) : f32 / [...] / [1,1000] [Step 7/11] Loading the model to the device - [ INFO ] Compile model took 133.03 ms + [ INFO ] Compile model took 147.48 ms [Step 8/11] Querying optimal runtime parameters [ INFO ] Model: [ INFO ] NETWORK_NAME: TensorFlow_Lite_Frontend_IR @@ -296,15 +311,15 @@ GPU. [ INFO ] Fill input 'images' with random values [Step 10/11] Measuring performance (Start inference asynchronously, 6 inference requests, limits: 15000 ms duration) [ INFO ] Benchmarking in inference only mode (inputs filling are not included in measurement loop). - [ INFO ] First inference took 7.30 ms + [ INFO ] First inference took 7.23 ms [Step 11/11] Dumping statistics report [ INFO ] Execution Devices:['CPU'] - [ INFO ] Count: 17562 iterations - [ INFO ] Duration: 15010.30 ms + [ INFO ] Count: 17526 iterations + [ INFO ] Duration: 15005.83 ms [ INFO ] Latency: - [ INFO ] Median: 4.98 ms - [ INFO ] Average: 4.99 ms - [ INFO ] Min: 3.69 ms - [ INFO ] Max: 15.16 ms - [ INFO ] Throughput: 1170.00 FPS + [ INFO ] Median: 5.00 ms + [ INFO ] Average: 5.00 ms + [ INFO ] Min: 2.72 ms + [ INFO ] Max: 15.43 ms + [ INFO ] Throughput: 1167.95 FPS diff --git a/docs/notebooks/119-tflite-to-openvino-with-output_files/index.html b/docs/notebooks/119-tflite-to-openvino-with-output_files/index.html index 783453225f99b3..ae8ae03dd1a6a9 100644 --- a/docs/notebooks/119-tflite-to-openvino-with-output_files/index.html +++ b/docs/notebooks/119-tflite-to-openvino-with-output_files/index.html @@ -1,8 +1,8 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/119-tflite-to-openvino-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/119-tflite-to-openvino-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/119-tflite-to-openvino-with-output_files/


../
-119-tflite-to-openvino-with-output_16_1.jpg        31-Oct-2023 00:35               68170
-119-tflite-to-openvino-with-output_16_1.png        31-Oct-2023 00:35              621006
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/119-tflite-to-openvino-with-output_files/


../
+119-tflite-to-openvino-with-output_16_1.jpg        07-Dec-2023 00:49               68170
+119-tflite-to-openvino-with-output_16_1.png        07-Dec-2023 00:49              621006
 

diff --git a/docs/notebooks/120-tensorflow-instance-segmentation-to-openvino-with-output.rst b/docs/notebooks/120-tensorflow-instance-segmentation-to-openvino-with-output.rst index 8b39f89c50ea0d..034af17d0cb6b6 100644 --- a/docs/notebooks/120-tensorflow-instance-segmentation-to-openvino-with-output.rst +++ b/docs/notebooks/120-tensorflow-instance-segmentation-to-openvino-with-output.rst @@ -25,7 +25,6 @@ Runtime `__ - `Imports <#imports>`__ @@ -43,8 +42,10 @@ and do inference with a sample image. - `Async inference pipeline <#async-inference-pipeline>`__ - `Integration preprocessing to model <#integration-preprocessing-to-model>`__ -Prerequisites -------------------------------------------------------- +Prerequisites +------------- + + Install required packages: @@ -71,8 +72,10 @@ The notebook uses utility functions. The cell below will download the filename="notebook_utils.py", ); -Imports -------------------------------------------------- +Imports +------- + + .. code:: ipython3 @@ -90,8 +93,10 @@ Imports # OpenVINO modules import openvino as ov -Settings --------------------------------------------------- +Settings +-------- + + Define model related variables and create corresponding directories: @@ -113,12 +118,14 @@ Define model related variables and create corresponding directories: openvino_ir_path = ir_model_dir / f"{model_name}.xml" - tf_model_url = "https://tfhub.dev/tensorflow/mask_rcnn/inception_resnet_v2_1024x1024/1?tf-hub-format=compressed" + tf_model_url = "https://www.kaggle.com/models/tensorflow/mask-rcnn-inception-resnet-v2/frameworks/tensorFlow2/variations/1024x1024/versions/1?tf-hub-format=compressed" tf_model_archive_filename = f"{model_name}.tar.gz" -Download Model from TensorFlow Hub ----------------------------------------------------------------------------- +Download Model from TensorFlow Hub +---------------------------------- + + Download archive with TensorFlow Instance Segmentation model (`mask_rcnn_inception_resnet_v2_1024x1024 `__) @@ -149,8 +156,10 @@ archive: with tarfile.open(tf_model_dir / tf_model_archive_filename) as file: file.extractall(path=tf_model_dir) -Convert Model to OpenVINO IR ----------------------------------------------------------------------- +Convert Model to OpenVINO IR +---------------------------- + + OpenVINO Model Optimizer Python API can be used to convert the TensorFlow model to OpenVINO IR. @@ -175,11 +184,15 @@ when the model is run in the future. # Save converted OpenVINO IR model to the corresponding directory ov.save_model(ov_model, openvino_ir_path) -Test Inference on the Converted Model -------------------------------------------------------------------------------- +Test Inference on the Converted Model +------------------------------------- + + + +Select inference device +----------------------- + -Select inference device ------------------------------------------------------------------ select device from dropdown list for running inference using OpenVINO @@ -206,31 +219,35 @@ select device from dropdown list for running inference using OpenVINO -Load the Model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Load the Model +~~~~~~~~~~~~~~ + + .. code:: ipython3 openvino_ir_model = core.read_model(openvino_ir_path) compiled_model = core.compile_model(model=openvino_ir_model, device_name=device.value) -Get Model Information -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Get Model Information +~~~~~~~~~~~~~~~~~~~~~ + + Mask R-CNN with Inception ResNet V2 instance segmentation model has one input - a three-channel image of variable size. The input tensor shape is ``[1, height, width, 3]`` with values in ``[0, 255]``. Model output dictionary contains a lot of tensors, we will use only 5 of -them: - -- ``num_detections``: A ``tf.int`` tensor with only one value, the number of detections ``[N]``. -- ``detection_boxes``: A ``tf.float32`` tensor of shape ``[N, 4]`` containing bounding box coordinates in the following order: ``[ymin, xmin, ymax, xmax]``. -- ``detection_classes``: A ``tf.int`` tensor of shape ``[N]`` containing detection class index from the label file. -- ``detection_scores``: A ``tf.float32`` tensor of shape ``[N]`` containing detection scores. -- ``detection_masks``: A ``[batch, max_detections, mask_height, mask_width]`` tensor. - - Note that apixel-wise sigmoid score converter is applied to the detection masks. +them: - ``num_detections``: A ``tf.int`` tensor with only one value, the +number of detections ``[N]``. - ``detection_boxes``: A ``tf.float32`` +tensor of shape ``[N, 4]`` containing bounding box coordinates in the +following order: ``[ymin, xmin, ymax, xmax]``. - ``detection_classes``: +A ``tf.int`` tensor of shape ``[N]`` containing detection class index +from the label file. - ``detection_scores``: A ``tf.float32`` tensor of +shape ``[N]`` containing detection scores. - ``detection_masks``: A +``[batch, max_detections, mask_height, mask_width]`` tensor. Note that a +pixel-wise sigmoid score converter is applied to the detection masks. For more information about model inputs, outputs and their formats, see the `model overview page on TensorFlow @@ -290,8 +307,10 @@ the first (and highest) detection score. -Get an Image for Test Inference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Get an Image for Test Inference +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Load and save an image: @@ -336,7 +355,7 @@ Read the image, resize and convert it to the input shape of the network: .. parsed-literal:: - + @@ -344,8 +363,10 @@ Read the image, resize and convert it to the input shape of the network: .. image:: 120-tensorflow-instance-segmentation-to-openvino-with-output_files/120-tensorflow-instance-segmentation-to-openvino-with-output_25_1.png -Perform Inference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Perform Inference +~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -391,8 +412,10 @@ be extracted from the result. For further model result visualization image_detections_num: [100.] -Inference Result Visualization -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Inference Result Visualization +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Define utility functions to visualize the inference results @@ -631,24 +654,29 @@ original test image: .. image:: 120-tensorflow-instance-segmentation-to-openvino-with-output_files/120-tensorflow-instance-segmentation-to-openvino-with-output_39_0.png -Next Steps ----------------------------------------------------- +Next Steps +---------- + + This section contains suggestions on how to additionally improve the performance of your application using OpenVINO. -Async inference pipeline -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Async inference pipeline +~~~~~~~~~~~~~~~~~~~~~~~~ -The key advantage of the Async API is that when a device is busy with -inference, the application can perform other tasks in parallel (for -example, populating inputs or scheduling other requests) rather than -wait for the current inference to complete first. To understand how to -perform async inference using openvino, refer to the `Async API +The key advantage of the Async +API is that when a device is busy with inference, the application can +perform other tasks in parallel (for example, populating inputs or +scheduling other requests) rather than wait for the current inference to +complete first. To understand how to perform async inference using +openvino, refer to the `Async API tutorial <115-async-api-with-output.html>`__. -Integration preprocessing to model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Integration preprocessing to model +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Preprocessing API enables making preprocessing a part of the model reducing application code and dependency on additional image processing diff --git a/docs/notebooks/120-tensorflow-instance-segmentation-to-openvino-with-output_files/120-tensorflow-instance-segmentation-to-openvino-with-output_25_1.png b/docs/notebooks/120-tensorflow-instance-segmentation-to-openvino-with-output_files/120-tensorflow-instance-segmentation-to-openvino-with-output_25_1.png index 0a3d192dfb9b9a..d28e1510584f5f 100644 --- a/docs/notebooks/120-tensorflow-instance-segmentation-to-openvino-with-output_files/120-tensorflow-instance-segmentation-to-openvino-with-output_25_1.png +++ b/docs/notebooks/120-tensorflow-instance-segmentation-to-openvino-with-output_files/120-tensorflow-instance-segmentation-to-openvino-with-output_25_1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:899a1126af7e881b5b8ad2182133a5334f3c98031a9d1b5d9285a76b44a162fc +oid sha256:62971c6546b399ce0eecbdc81a5cb327c4bebdfa1a6db658e364870db606d761 size 395346 diff --git a/docs/notebooks/120-tensorflow-instance-segmentation-to-openvino-with-output_files/120-tensorflow-instance-segmentation-to-openvino-with-output_39_0.png b/docs/notebooks/120-tensorflow-instance-segmentation-to-openvino-with-output_files/120-tensorflow-instance-segmentation-to-openvino-with-output_39_0.png index b9851ff336418c..99694eb8f0359f 100644 --- a/docs/notebooks/120-tensorflow-instance-segmentation-to-openvino-with-output_files/120-tensorflow-instance-segmentation-to-openvino-with-output_39_0.png +++ b/docs/notebooks/120-tensorflow-instance-segmentation-to-openvino-with-output_files/120-tensorflow-instance-segmentation-to-openvino-with-output_39_0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7d8537cac1002162c69c666fe659effaf3bb23d10d3da276349abe7af2469499 -size 394617 +oid sha256:1359365695e66164137690b56f261eae3364dff68f3e155a3e6b878ff17cea3f +size 393190 diff --git a/docs/notebooks/120-tensorflow-instance-segmentation-to-openvino-with-output_files/index.html b/docs/notebooks/120-tensorflow-instance-segmentation-to-openvino-with-output_files/index.html index f0b5ea945592d3..9911d8830b7be5 100644 --- a/docs/notebooks/120-tensorflow-instance-segmentation-to-openvino-with-output_files/index.html +++ b/docs/notebooks/120-tensorflow-instance-segmentation-to-openvino-with-output_files/index.html @@ -1,8 +1,8 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/120-tensorflow-instance-segmentation-to-openvino-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/120-tensorflow-instance-segmentation-to-openvino-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/120-tensorflow-instance-segmentation-to-openvino-with-output_files/


../
-120-tensorflow-instance-segmentation-to-openvin..> 31-Oct-2023 00:35              395346
-120-tensorflow-instance-segmentation-to-openvin..> 31-Oct-2023 00:35              394617
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/120-tensorflow-instance-segmentation-to-openvino-with-output_files/


../
+120-tensorflow-instance-segmentation-to-openvin..> 07-Dec-2023 00:49              395346
+120-tensorflow-instance-segmentation-to-openvin..> 07-Dec-2023 00:49              393190
 

diff --git a/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output.rst b/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output.rst index 393738a11682dc..0a69d8cc2ff110 100644 --- a/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output.rst +++ b/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output.rst @@ -31,8 +31,7 @@ and do inference with a sample image. - `Settings <#settings>`__ - `Download Model from TensorFlow Hub <#download-model-from-tensorflow-hub>`__ -- `Convert Model to OpenVINO - IR <#convert-model-to-openvino-ir>`__ +- `Convert Model to OpenVINO IR <#convert-model-to-openvino-ir>`__ - `Test Inference on the Converted Model <#test-inference-on-the-converted-model>`__ - `Select inference device <#select-inference-device>`__ @@ -51,8 +50,10 @@ and do inference with a sample image. - `Integration preprocessing to model <#integration-preprocessing-to-model>`__ -Prerequisites -------------------------------------------------------- +Prerequisites +------------- + + Install required packages: @@ -79,8 +80,10 @@ The notebook uses utility functions. The cell below will download the filename="notebook_utils.py", ); -Imports -------------------------------------------------- +Imports +------- + + .. code:: ipython3 @@ -97,8 +100,10 @@ Imports # Notebook utils module from notebook_utils import download_file -Settings --------------------------------------------------- +Settings +-------- + + Define model related variables and create corresponding directories: @@ -120,12 +125,14 @@ Define model related variables and create corresponding directories: openvino_ir_path = ir_model_dir / f"{model_name}.xml" - tf_model_url = "https://tfhub.dev/tensorflow/faster_rcnn/resnet50_v1_640x640/1?tf-hub-format=compressed" + tf_model_url = "https://www.kaggle.com/models/tensorflow/faster-rcnn-resnet-v1/frameworks/tensorFlow2/variations/faster-rcnn-resnet50-v1-640x640/versions/1?tf-hub-format=compressed" tf_model_archive_filename = f"{model_name}.tar.gz" -Download Model from TensorFlow Hub ----------------------------------------------------------------------------- +Download Model from TensorFlow Hub +---------------------------------- + + Download archive with TensorFlow Object Detection model (`faster_rcnn_resnet50_v1_640x640 `__) @@ -150,7 +157,7 @@ from TensorFlow Hub: .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/120-tensorflow-object-detection-to-openvino/model/tf/faster_rcnn_resnet50_v1_640x640.tar.gz') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/120-tensorflow-object-detection-to-openvino/model/tf/faster_rcnn_resnet50_v1_640x640.tar.gz') @@ -163,8 +170,10 @@ Extract TensorFlow Object Detection model from the downloaded archive: with tarfile.open(tf_model_dir / tf_model_archive_filename) as file: file.extractall(path=tf_model_dir) -Convert Model to OpenVINO IR ----------------------------------------------------------------------- +Convert Model to OpenVINO IR +---------------------------- + + OpenVINO Model Converter Python API can be used to convert the TensorFlow model to OpenVINO IR. @@ -180,7 +189,7 @@ or saved on disk using the ``save_model`` function to reduce loading time when the model is run in the future. See the `Model Converter Developer -Guide `__ +Guide `__ for more information about Model Converter and TensorFlow `models support `__. @@ -191,11 +200,15 @@ support -Get an Image for Test Inference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Get an Image for Test Inference +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Load and save an image: @@ -324,7 +343,7 @@ Load and save an image: .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/120-tensorflow-object-detection-to-openvino/data/coco_bike.jpg') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/120-tensorflow-object-detection-to-openvino/data/coco_bike.jpg') @@ -352,7 +371,7 @@ Read the image, resize and convert it to the input shape of the network: .. parsed-literal:: - + @@ -360,8 +379,10 @@ Read the image, resize and convert it to the input shape of the network: .. image:: 120-tensorflow-object-detection-to-openvino-with-output_files/120-tensorflow-object-detection-to-openvino-with-output_25_1.png -Perform Inference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Perform Inference +~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -471,8 +492,10 @@ outputs will be used. image_detections_num: [300.] -Inference Result Visualization -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Inference Result Visualization +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Define utility functions to visualize the inference results @@ -612,7 +635,7 @@ Zoo `__: .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/120-tensorflow-object-detection-to-openvino/data/coco_91cl.txt') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/120-tensorflow-object-detection-to-openvino/data/coco_91cl.txt') @@ -651,24 +674,29 @@ original test image: .. image:: 120-tensorflow-object-detection-to-openvino-with-output_files/120-tensorflow-object-detection-to-openvino-with-output_38_0.png -Next Steps ----------------------------------------------------- +Next Steps +---------- + + This section contains suggestions on how to additionally improve the performance of your application using OpenVINO. -Async inference pipeline -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Async inference pipeline +~~~~~~~~~~~~~~~~~~~~~~~~ -The key advantage of the Async API is that when a device is busy with -inference, the application can perform other tasks in parallel (for -example, populating inputs or scheduling other requests) rather than -wait for the current inference to complete first. To understand how to -perform async inference using openvino, refer to the `Async API +The key advantage of the Async +API is that when a device is busy with inference, the application can +perform other tasks in parallel (for example, populating inputs or +scheduling other requests) rather than wait for the current inference to +complete first. To understand how to perform async inference using +openvino, refer to the `Async API tutorial <115-async-api-with-output.html>`__. -Integration preprocessing to model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Integration preprocessing to model +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Preprocessing API enables making preprocessing a part of the model reducing application code and dependency on additional image processing diff --git a/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output_files/120-tensorflow-object-detection-to-openvino-with-output_25_1.png b/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output_files/120-tensorflow-object-detection-to-openvino-with-output_25_1.png index 0a3d192dfb9b9a..d28e1510584f5f 100644 --- a/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output_files/120-tensorflow-object-detection-to-openvino-with-output_25_1.png +++ b/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output_files/120-tensorflow-object-detection-to-openvino-with-output_25_1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:899a1126af7e881b5b8ad2182133a5334f3c98031a9d1b5d9285a76b44a162fc +oid sha256:62971c6546b399ce0eecbdc81a5cb327c4bebdfa1a6db658e364870db606d761 size 395346 diff --git a/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output_files/120-tensorflow-object-detection-to-openvino-with-output_38_0.png b/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output_files/120-tensorflow-object-detection-to-openvino-with-output_38_0.png index 33a6b939145c42..c767e2ad495e5a 100644 --- a/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output_files/120-tensorflow-object-detection-to-openvino-with-output_38_0.png +++ b/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output_files/120-tensorflow-object-detection-to-openvino-with-output_38_0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:df9e84273cf7c5c19bf1c4ea8d6b61372f59b04228a95bacd2fac1b97dde6d4a -size 392067 +oid sha256:ec79c39b72b7a139eca9f5b7081efec33a47fa8c55c9a459340774a045f2525b +size 391797 diff --git a/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output_files/index.html b/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output_files/index.html index 1faa6594c627b4..e489a487635923 100644 --- a/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output_files/index.html +++ b/docs/notebooks/120-tensorflow-object-detection-to-openvino-with-output_files/index.html @@ -1,8 +1,8 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/120-tensorflow-object-detection-to-openvino-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/120-tensorflow-object-detection-to-openvino-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/120-tensorflow-object-detection-to-openvino-with-output_files/


../
-120-tensorflow-object-detection-to-openvino-wit..> 31-Oct-2023 00:35              395346
-120-tensorflow-object-detection-to-openvino-wit..> 31-Oct-2023 00:35              392067
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/120-tensorflow-object-detection-to-openvino-with-output_files/


../
+120-tensorflow-object-detection-to-openvino-wit..> 07-Dec-2023 00:49              395346
+120-tensorflow-object-detection-to-openvino-wit..> 07-Dec-2023 00:49              391797
 

diff --git a/docs/notebooks/121-convert-to-openvino-with-output.rst b/docs/notebooks/121-convert-to-openvino-with-output.rst index e0f554470d26a5..20b8583863daa0 100644 --- a/docs/notebooks/121-convert-to-openvino-with-output.rst +++ b/docs/notebooks/121-convert-to-openvino-with-output.rst @@ -2,7 +2,10 @@ OpenVINO™ model conversion API ============================== This notebook shows how to convert a model from original framework -format to OpenVINO Intermediate Representation (IR). Contents: +format to OpenVINO Intermediate Representation (IR). + +**Table of contents:** + - `OpenVINO IR format <#openvino-ir-format>`__ - `IR preparation with Python conversion API and Model Optimizer @@ -38,13 +41,15 @@ format to OpenVINO Intermediate Representation (IR). Contents: .. parsed-literal:: ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. - tensorflow 2.13.1 requires protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.20.3, but you have protobuf 3.20.2 which is incompatible. + tensorflow 2.12.0 requires protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.20.3, but you have protobuf 3.20.2 which is incompatible. Note: you may need to restart the kernel to use updated packages. OpenVINO IR format ------------------ + + OpenVINO `Intermediate Representation (IR) `__ is the proprietary model format of OpenVINO. It is produced after converting a @@ -58,6 +63,8 @@ an ``.xml`` file, containing information about network topology, and a IR preparation with Python conversion API and Model Optimizer command-line tool ------------------------------------------------------------------------------- + + There are two ways to convert a model from the original framework format to OpenVINO IR: Python conversion API and Model Optimizer command-line tool. You can choose one of them based on whichever is most convenient @@ -113,7 +120,7 @@ documentation. conversion into IR. The legacy Frontend is Python based and is available for TensorFlow*, ONNX*, MXNet*, Caffe*, and Kaldi* models. - --input_model INPUT_MODEL, -w INPUT_MODEL, -m INPUT_MODEL + --input_model INPUT_MODEL, -m INPUT_MODEL, -w INPUT_MODEL Tensorflow*: a file with a pre-trained model (binary or text .pb file after freezing). Caffe*: a model proto file with model weights. @@ -678,6 +685,8 @@ documentation. Fetching example models ----------------------- + + This notebook uses two models for conversion examples: - `Distilbert `__ @@ -733,11 +742,11 @@ NLP model from Hugging Face and export it in ONNX format: .. parsed-literal:: - 2023-10-30 23:03:34.054449: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. - 2023-10-30 23:03:34.088016: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. + 2023-12-06 23:09:08.345195: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. + 2023-12-06 23:09:08.379671: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. - 2023-10-30 23:03:34.718197: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/distilbert/modeling_distilbert.py:223: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + 2023-12-06 23:09:09.007347: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/distilbert/modeling_distilbert.py:223: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. mask, torch.tensor(torch.finfo(scores.dtype).min) @@ -976,6 +985,8 @@ Convert PyTorch model to ONNX format: Basic conversion ---------------- + + To convert a model to OpenVINO IR, use the following command: .. code:: ipython3 @@ -1000,8 +1011,8 @@ To convert a model to OpenVINO IR, use the following command: [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. - [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/distilbert.xml - [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/distilbert.bin + [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/distilbert.xml + [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/121-convert-to-openvino/model/distilbert.bin .. code:: ipython3 @@ -1029,6 +1040,8 @@ To convert a model to OpenVINO IR, use the following command: Model conversion parameters --------------------------- + + Both Python conversion API and Model Optimizer command-line tool provide the following capabilities: \* overriding original input shapes for model conversion with ``input`` and ``input_shape`` parameters. `Setting @@ -1055,6 +1068,8 @@ mentioned above to override input shapes and cut the model. Setting Input Shapes ~~~~~~~~~~~~~~~~~~~~ + + Model conversion is supported for models with dynamic input shapes that contain undefined dimensions. However, if the shape of data is not going to change from one inference request to another, it is recommended to @@ -1092,8 +1107,8 @@ guide `__ -- `Get Pytorch model and OpenVINO IR model <#>`__ -- `Define validator and data loader <#>`__ -- `Prepare calibration and validation datasets <#>`__ -- `Prepare validation function <#>`__ -- `Run quantization with accuracy control <#>`__ -- `Compare Performance of the Original and Quantized Models <#>`__ + +- `Prerequisites <#prerequisites>`__ +- `Get Pytorch model and OpenVINO IR + model <#get-pytorch-model-and-openvino-ir-model>`__ + + - `Define validator and data + loader <#define-validator-and-data-loader>`__ + - `Prepare calibration and validation + datasets <#prepare-calibration-and-validation-datasets>`__ + - `Prepare validation function <#prepare-validation-function>`__ + +- `Run quantization with accuracy + control <#run-quantization-with-accuracy-control>`__ +- `Compare Accuracy and Performance of the Original and Quantized + Models <#compare-accuracy-and-performance-of-the-original-and-quantized-models>`__ Prerequisites ----------------------------------- +^^^^^^^^^^^^^ + + Install necessary packages. .. code:: ipython3 %pip install -q "openvino>=2023.1.0" - %pip install "nncf>=2.6.0" - %pip install -q "ultralytics==8.0.43" + %pip install -q "nncf>=2.6.0" + %pip install -q "ultralytics==8.0.43" --extra-index-url https://download.pytorch.org/whl/cpu + +Get Pytorch model and OpenVINO IR model +--------------------------------------- + -Get Pytorch model and OpenVINO IR Model ---------------------------------------------------- Generally, PyTorch models represent an instance of the `torch.nn.Module `__ @@ -115,18 +127,19 @@ Load model. ov_model = ov.Core().read_model(model_path) Define validator and data loader ---------------------------------------------------- - -The original model -repository uses a ``Validator`` wrapper, which represents the accuracy -validation pipeline. It creates dataloader and evaluation metrics and -updates metrics on each data batch produced by the dataloader. Besides -that, it is responsible for data preprocessing and results -postprocessing. For class initialization, the configuration should be -provided. We will use the default setup, but it can be replaced with -some parameters overriding to test on custom data. The model has -connected the ``ValidatorClass`` method, which creates a validator class -instance. +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + + +The original model repository uses a ``Validator`` wrapper, which +represents the accuracy validation pipeline. It creates dataloader and +evaluation metrics and updates metrics on each data batch produced by +the dataloader. Besides that, it is responsible for data preprocessing +and results postprocessing. For class initialization, the configuration +should be provided. We will use the default setup, but it can be +replaced with some parameters overriding to test on custom data. The +model has connected the ``ValidatorClass`` method, which creates a +validator class instance. .. code:: ipython3 @@ -150,7 +163,9 @@ instance. Prepare calibration and validation datasets ---------------------------------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + We can use one dataset as calibration and validation datasets. Name it ``quantization_dataset``. @@ -176,7 +191,9 @@ We can use one dataset as calibration and validation datasets. Name it Prepare validation function ----------------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + .. code:: ipython3 @@ -230,24 +247,22 @@ Prepare validation function validation_fn = partial(validation_ac, validator=validator, log=False) Run quantization with accuracy control ---------------------------------------------------- - -You should provide -the calibration dataset and the validation dataset. It can be the same -dataset. - -- parameter ``max_drop`` defines the accuracy drop threshold. - The quantization process stops when the degradation of accuracy metric - on the validation dataset is less than the ``max_drop``. The default - value is 0.01. NNCF will stop the quantization and report an error if - the ``max_drop`` value can’t be reached. -- ``drop_type`` defines how the - accuracy drop will be calculated: ABSOLUTE (used by default) or - RELATIVE. -- ``ranking_subset_size`` - size of a subset that is used to - rank layers by their contribution to the accuracy drop. Default value is - 300, and the more samples it has the better ranking, potentially. Here - we use the value 25 to speed up the execution. +-------------------------------------- + + + +You should provide the calibration dataset and the validation dataset. +It can be the same dataset. - parameter ``max_drop`` defines the +accuracy drop threshold. The quantization process stops when the +degradation of accuracy metric on the validation dataset is less than +the ``max_drop``. The default value is 0.01. NNCF will stop the +quantization and report an error if the ``max_drop`` value can’t be +reached. - ``drop_type`` defines how the accuracy drop will be +calculated: ABSOLUTE (used by default) or RELATIVE. - +``ranking_subset_size`` - size of a subset that is used to rank layers +by their contribution to the accuracy drop. Default value is 300, and +the more samples it has the better ranking, potentially. Here we use the +value 25 to speed up the execution. **NOTE**: Execution can take tens of minutes and requires up to 15 GB of free memory @@ -321,9 +336,11 @@ dataset. Compare Accuracy and Performance of the Original and Quantized Models --------------------------------------------------------------------- -Now we can compare metrics of the Original non-quantized -OpenVINO IR model and Quantized OpenVINO IR model to make sure that the -``max_drop`` is not exceeded. + + +Now we can compare metrics of the Original non-quantized OpenVINO IR +model and Quantized OpenVINO IR model to make sure that the ``max_drop`` +is not exceeded. .. code:: ipython3 diff --git a/docs/notebooks/123-detectron2-to-openvino-with-output.rst b/docs/notebooks/123-detectron2-to-openvino-with-output.rst index 03019a26cbc2f5..373adb96367f95 100644 --- a/docs/notebooks/123-detectron2-to-openvino-with-output.rst +++ b/docs/notebooks/123-detectron2-to-openvino-with-output.rst @@ -1,5 +1,4 @@ -Convert Detectron2 Models to OpenVINO™ -======================================== +# Convert Detectron2 Models to OpenVINO™ `Detectron2 `__ is Facebook AI Research’s library that provides state-of-the-art detection @@ -16,31 +15,44 @@ using OpenVINO™. We will use ``Faster R-CNN FPN x1`` model and detection and instance segmentation respectively. **Table of contents:** --- - -- `Prerequisites <#prerequisites>`__ -- `Define helpers for PyTorch model initialization and conversion <#define-helpers-for-pytorch-model-initialization-and-conversion>`__ -- `Prepare input data <#prepare-input-data>`__ -- `Object Detection <#object-detection>`__ -- `Download PyTorch Detection model <#download-pytorch-detection-model>`__ -- `Convert Detection Model to OpenVINO Intermediate Representation <#convert-detection-model-to-openvino-intermediate-representation>`__ -- `Select inference device <#select-inference-device>`__ -- `Run Detection model inference <#run-detection-model-inference>`__ -- `Instance Segmentation <#instance-segmentation>`__ -- `Download PyTorch Instance Segmentation model <#download-pytorch-instance-segmentation-model>`__ -- `Convert Instance Segmentation Model to OpenVINO Intermediate Representation <#convert-instance-segmentation-model-to-openvino-intermediate-representation>`__ -- `Select inference device <#select-inference-device>`__ -- `Run Instance Segmentation model inference <#run-instance-segmentation-model-inference>`__ + + +- `Prerequisites <#prerequisites>`__ + + - `Define helpers for PyTorch model initialization and + conversion <#define-helpers-for-pytorch-model-initialization-and-conversion>`__ + - `Prepare input data <#prepare-input-data>`__ + +- `Object Detection <#object-detection>`__ + + - `Download PyTorch Detection + model <#download-pytorch-detection-model>`__ + - `Convert Detection Model to OpenVINO Intermediate + Representation <#convert-detection-model-to-openvino-intermediate-representation>`__ + - `Select inference device <#select-inference-device>`__ + - `Run Detection model inference <#run-detection-model-inference>`__ + +- `Instance Segmentation <#instance-segmentation>`__ + + - `Download Instance Segmentation PyTorch + model <#download-instance-segmentation-pytorch-model>`__ + - `Convert Instance Segmentation Model to OpenVINO Intermediate + Representation <#convert-instance-segmentation-model-to-openvino-intermediate-representation>`__ + - `Select inference device <#select-inference-device>`__ + - `Run Instance Segmentation model + inference <#run-instance-segmentation-model-inference>`__ Prerequisites -------------------------------------------------------- +------------- + + Install required packages for running model .. code:: ipython3 %pip install -q --extra-index-url https://download.pytorch.org/whl/cpu torch torchvision - %pip install -q "git+https://github.com/facebookresearch/detectron2.git" + %pip install -q "git+https://github.com/facebookresearch/detectron2.git" --extra-index-url https://download.pytorch.org/whl/cpu %pip install -q "openvino>=2023.1.0" @@ -51,8 +63,10 @@ Install required packages for running model Note: you may need to restart the kernel to use updated packages. -Define helpers for PyTorch model initialization and conversion -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Define helpers for PyTorch model initialization and conversion +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Detectron2 provides universal and configurable API for working with models, it means that all steps required for model creation, conversion @@ -139,8 +153,10 @@ simplify model’s structure making it more export-friendly. ov_model = ov.convert_model(traceable_model, example_input=sample_input[0]["image"]) return ov_model -Prepare input data -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Prepare input data +~~~~~~~~~~~~~~~~~~ + + For running model conversion and inference we need to provide example input. The cells below download sample image and apply preprocessing @@ -201,11 +217,15 @@ steps based on model specific transformations defined in model config. Now, when all components required for model conversion are prepared, we can consider how to use them on specific examples. -Object Detection ----------------------------------------------------------- +Object Detection +---------------- + + + +Download PyTorch Detection model +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + -Download PyTorch Detection model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Download faster_rcnn_R_50_FPN_1x from Detectron Model Zoo. @@ -215,8 +235,10 @@ Download faster_rcnn_R_50_FPN_1x from Detectron Model Zoo. model, cfg = get_model_and_config(model_name) sample_input = get_sample_inputs(image_file, cfg) -Convert Detection Model to OpenVINO Intermediate Representation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Convert Detection Model to OpenVINO Intermediate Representation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Convert model using ``convert_detectron2_model`` function and ``sample_input`` prepared above. After conversion, model saved on disk @@ -232,8 +254,10 @@ directory. else: ov_model = model_xml_path -Select inference device -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Select inference device +~~~~~~~~~~~~~~~~~~~~~~~ + + select device from dropdown list for running inference using OpenVINO @@ -264,6 +288,8 @@ select device from dropdown list for running inference using OpenVINO Run Detection model inference ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Load our converted model on selected device and run inference on sample input. @@ -362,6 +388,8 @@ provide helpers for wrapping output in original Detectron2 format. Instance Segmentation --------------------- + + As it was discussed above, Detectron2 provides generic approach for working with models for different use cases. The steps that required to convert and run models pretrained for Instance Segmentation use case @@ -370,6 +398,8 @@ will be very similar to Object Detection. Download Instance Segmentation PyTorch model ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 model_name = "COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x" @@ -379,6 +409,8 @@ Download Instance Segmentation PyTorch model Convert Instance Segmentation Model to OpenVINO Intermediate Representation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 model_xml_path = MODEL_DIR / (model_name.split("/")[-1] + '.xml') @@ -389,8 +421,10 @@ Convert Instance Segmentation Model to OpenVINO Intermediate Representation else: ov_model = model_xml_path -Select inference device -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Select inference device +~~~~~~~~~~~~~~~~~~~~~~~ + + select device from dropdown list for running inference using OpenVINO @@ -410,6 +444,8 @@ select device from dropdown list for running inference using OpenVINO Run Instance Segmentation model inference ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + In comparison with Object Detection, Instance Segmentation models have additional output that represents instance masks for each object. Our postprocessing function handle this difference. diff --git a/docs/notebooks/123-detectron2-to-openvino-with-output_files/123-detectron2-to-openvino-with-output_22_0.jpg b/docs/notebooks/123-detectron2-to-openvino-with-output_files/123-detectron2-to-openvino-with-output_22_0.jpg index 21179d56bf5ead..cd651b0e2b600e 100644 --- a/docs/notebooks/123-detectron2-to-openvino-with-output_files/123-detectron2-to-openvino-with-output_22_0.jpg +++ b/docs/notebooks/123-detectron2-to-openvino-with-output_files/123-detectron2-to-openvino-with-output_22_0.jpg @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:856bd51bd1a5dd45f52f6f0f939390f11c5c35b4af4781679a617cb9fe451a41 -size 57591 +oid sha256:d6f08e4b663ac8ea6d0970804111a5ffc4e7aac44f24b2a7121f15c9900fa535 +size 59666 diff --git a/docs/notebooks/123-detectron2-to-openvino-with-output_files/123-detectron2-to-openvino-with-output_22_0.png b/docs/notebooks/123-detectron2-to-openvino-with-output_files/123-detectron2-to-openvino-with-output_22_0.png index 22b92c9c1d9db7..6b0bc4fdbc35ed 100644 --- a/docs/notebooks/123-detectron2-to-openvino-with-output_files/123-detectron2-to-openvino-with-output_22_0.png +++ b/docs/notebooks/123-detectron2-to-openvino-with-output_files/123-detectron2-to-openvino-with-output_22_0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f3846096706af5b748d9e673831136adb1a5d912a91783b6c3b700edbd0d8359 -size 508592 +oid sha256:a4a06ea24c3a51aead9c90815b867910e7be07e8b8d443c89ad4003695dcf48d +size 508841 diff --git a/docs/notebooks/123-detectron2-to-openvino-with-output_files/123-detectron2-to-openvino-with-output_32_0.jpg b/docs/notebooks/123-detectron2-to-openvino-with-output_files/123-detectron2-to-openvino-with-output_32_0.jpg index f589819418c7ae..cbce435a3172e1 100644 --- a/docs/notebooks/123-detectron2-to-openvino-with-output_files/123-detectron2-to-openvino-with-output_32_0.jpg +++ b/docs/notebooks/123-detectron2-to-openvino-with-output_files/123-detectron2-to-openvino-with-output_32_0.jpg @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8668a57174bdee831756b00f1cc479c75ad4515151d4cbf5adfa8a87be595d77 -size 53100 +oid sha256:cacd033467ea9cdf6ea9db93bb70af3837dd492a870bac492683badc7643ed6d +size 55922 diff --git a/docs/notebooks/123-detectron2-to-openvino-with-output_files/123-detectron2-to-openvino-with-output_32_0.png b/docs/notebooks/123-detectron2-to-openvino-with-output_files/123-detectron2-to-openvino-with-output_32_0.png index 9516a78e6dc303..a5e22ebe6d1698 100644 --- a/docs/notebooks/123-detectron2-to-openvino-with-output_files/123-detectron2-to-openvino-with-output_32_0.png +++ b/docs/notebooks/123-detectron2-to-openvino-with-output_files/123-detectron2-to-openvino-with-output_32_0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c2913494596627acf2e77e9846889891f2dd348d6162dfdfade8ec0857698361 -size 456596 +oid sha256:0f1347884309a3631eb6eb2c63a44fa995c158b1e747c5230974eaada4fda4d0 +size 459385 diff --git a/docs/notebooks/123-detectron2-to-openvino-with-output_files/index.html b/docs/notebooks/123-detectron2-to-openvino-with-output_files/index.html index 150ba32dd39328..6ddb39d37c3afe 100644 --- a/docs/notebooks/123-detectron2-to-openvino-with-output_files/index.html +++ b/docs/notebooks/123-detectron2-to-openvino-with-output_files/index.html @@ -1,12 +1,12 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/123-detectron2-to-openvino-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/123-detectron2-to-openvino-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/123-detectron2-to-openvino-with-output_files/


../
-123-detectron2-to-openvino-with-output_22_0.jpg    31-Oct-2023 00:35               57591
-123-detectron2-to-openvino-with-output_22_0.png    31-Oct-2023 00:35              508592
-123-detectron2-to-openvino-with-output_32_0.jpg    31-Oct-2023 00:35               53100
-123-detectron2-to-openvino-with-output_32_0.png    31-Oct-2023 00:35              456596
-123-detectron2-to-openvino-with-output_8_0.jpg     31-Oct-2023 00:35               46858
-123-detectron2-to-openvino-with-output_8_0.png     31-Oct-2023 00:35              503218
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/123-detectron2-to-openvino-with-output_files/


../
+123-detectron2-to-openvino-with-output_22_0.jpg    07-Dec-2023 00:49               59666
+123-detectron2-to-openvino-with-output_22_0.png    07-Dec-2023 00:49              508841
+123-detectron2-to-openvino-with-output_32_0.jpg    07-Dec-2023 00:49               55922
+123-detectron2-to-openvino-with-output_32_0.png    07-Dec-2023 00:49              459385
+123-detectron2-to-openvino-with-output_8_0.jpg     07-Dec-2023 00:49               46858
+123-detectron2-to-openvino-with-output_8_0.png     07-Dec-2023 00:49              503218
 

diff --git a/docs/notebooks/124-hugging-face-hub-with-output.rst b/docs/notebooks/124-hugging-face-hub-with-output.rst index ca4d046f9648ec..863d6facd0ca71 100644 --- a/docs/notebooks/124-hugging-face-hub-with-output.rst +++ b/docs/notebooks/124-hugging-face-hub-with-output.rst @@ -13,13 +13,13 @@ models, namely |image0| -Throughout this notebook we will learn: - -1. How to load a HF pipeline using the ``transformers`` package and then convert it to OpenVINO. -2. How to load the same pipeline using Optimum Intel package. +Throughout this notebook we will learn: 1. How to load a HF pipeline +using the ``transformers`` package and then convert it to OpenVINO. 2. +How to load the same pipeline using Optimum Intel package. **Table of contents:** + - `Converting a Model from the HF Transformers Package <#converting-a-model-from-the-hf-transformers-package>`__ @@ -97,7 +97,8 @@ Initializing a Model Using the HF Transformers Package -We will use `roberta text sentiment classification `__ +We will use `roberta text sentiment +classification `__ model in our example, it is a transformer-based encoder model pretrained in a special way, please refer to the model card to learn more. @@ -120,7 +121,7 @@ tutorials `__ +We use the OpenVINO `Model +conversion +API `__ to convert the model (this one is implemented in PyTorch) to OpenVINO Intermediate Representation (IR). @@ -235,11 +238,13 @@ original model. This is a rather simple example as the pipeline includes just one encoder model. Contemporary state of the art pipelines often consist of -several model, feel free to explore other OpenVINO tutorials: - -1. `Stable Diffusion v2 `__ -2. `Zero-shot Image Classification with OpenAI CLIP `__ -3. `Controllable Music Generation with MusicGen `__ +several model, feel free to explore other OpenVINO tutorials: 1. `Stable +Diffusion +v2 `__ +2. `Zero-shot Image Classification with OpenAI +CLIP `__ +3. `Controllable Music Generation with +MusicGen `__ The workflow for the ``diffusers`` package is exactly the same. The first example in the list above relies on the ``diffusers``. @@ -292,7 +297,8 @@ the full list of supported devices). For that, just replace the ``AutoModelForXxx`` class with the corresponding ``OVModelForXxx`` class. -You can find more information in `Optimum Intel documentation `__. +You can find more information in `Optimum Intel +documentation `__. .. code:: ipython3 @@ -315,10 +321,10 @@ You can find more information in `Optimum Intel documentation `__ +subpackage contains definitions of models for addressing different +tasks, including: image classification, pixelwise semantic segmentation, +object detection, instance segmentation, person keypoint detection, +video classification, and optical flow. Throughout this notebook we will +show how to use one of them. The LRASPP model is based on the `Searching +for MobileNetV3 `__ paper. According +to the paper, Searching for MobileNetV3, LR-ASPP or Lite Reduced Atrous +Spatial Pyramid Pooling has a lightweight and efficient segmentation +decoder architecture. he model is pre-trained on the `MS +COCO `__ dataset. Instead of training on +all 80 classes, the segmentation model has been trained on 20 classes +from the `PASCAL VOC `__ +dataset: **background, aeroplane, bicycle, bird, boat, bottle, bus, car, +cat, chair, cow, dining table, dog, horse, motorbike, person, potted +plant, sheep, sofa, train, tv monitor** + +More information about the model is available in the `torchvision +documentation `__ + +**Table of contents:** + +- `Prerequisites <#prerequisites>`__ +- `Get a test image <#get-a-test-image>`__ +- `Download and prepare a model <#download-and-prepare-a-model>`__ +- `Define a preprocessing and prepare an input + data <#define-a-preprocessing-and-prepare-an-input-data>`__ +- `Run an inference on the PyTorch + model <#run-an-inference-on-the-pytorch-model>`__ +- `Convert the original model to OpenVINO IR + Format <#convert-the-original-model-to-openvino-ir-format>`__ +- `Run an inference on the OpenVINO + model <#run-an-inference-on-the-openvino-model>`__ +- `Show results <#show-results>`__ +- `Show results for the OpenVINO IR + model <#show-results-for-the-openvino-ir-model>`__ + +Prerequisites +------------------------------------------------------- + +.. code:: ipython3 + + %pip install -q --extra-index-url https://download.pytorch.org/whl/cpu torch torchvision + %pip install -q matplotlib + %pip install -q "openvino>=2023.2.0" + + +.. parsed-literal:: + + Note: you may need to restart the kernel to use updated packages. + Note: you may need to restart the kernel to use updated packages. + Note: you may need to restart the kernel to use updated packages. + + +.. code:: ipython3 + + from pathlib import Path + + import openvino as ov + import torch + +Get a test image +---------------- + +First of all lets get a test +image from an open dataset. + +.. code:: ipython3 + + import urllib.request + + from torchvision.io import read_image + import torchvision.transforms as transforms + + + img_path = 'cats_image.jpeg' + urllib.request.urlretrieve( + url='https://huggingface.co/datasets/huggingface/cats-image/resolve/main/cats_image.jpeg', + filename=img_path + ) + image = read_image(img_path) + display(transforms.ToPILImage()(image)) + + + +.. image:: 125-lraspp-segmentation-with-output_files/125-lraspp-segmentation-with-output_5_0.png + + +Download and prepare a model +---------------------------- + +Define width and height of the +image that will be used by the network during inference. According to +the input transforms function, the model is pre-trained on images with a +height of 480 and width of 640. + +.. code:: ipython3 + + IMAGE_WIDTH = 640 + IMAGE_HEIGHT = 480 + +Torchvision provides a mechanism of `listing and retrieving available +models `__. + +.. code:: ipython3 + + import torchvision.models as models + + # List available models + all_models = models.list_models() + # List of models by type + segmentation_models = models.list_models(module=models.segmentation) + + print(segmentation_models) + + +.. parsed-literal:: + + ['deeplabv3_mobilenet_v3_large', 'deeplabv3_resnet101', 'deeplabv3_resnet50', 'fcn_resnet101', 'fcn_resnet50', 'lraspp_mobilenet_v3_large'] + + +We will use ``lraspp_mobilenet_v3_large``. You can get a model by name +using +``models.get_model("lraspp_mobilenet_v3_large", weights='DEFAULT')`` or +call a `corresponding +function `__ +directly. We will use +``torchvision.models.segmentation.lraspp_mobilenet_v3_large``. You can +directly pass pre-trained model weights to the model initialization +function using weights enum +LRASPP_MobileNet_V3_Large_Weights.COCO_WITH_VOC_LABELS_V1. It is a +default weights. To get all available weights for the model you can call +``weights_enum = models.get_model_weights("lraspp_mobilenet_v3_large")``, +but there is only one for this model. + +.. code:: ipython3 + + weights = models.segmentation.LRASPP_MobileNet_V3_Large_Weights.COCO_WITH_VOC_LABELS_V1 + model = models.segmentation.lraspp_mobilenet_v3_large(weights=weights) + +Define a preprocessing and prepare an input data +------------------------------------------------ + +You can use +``torchvision.transforms`` to make a preprocessing or +use\ `preprocessing transforms from the model +wight `__. + +.. code:: ipython3 + + import numpy as np + + + preprocess = models.segmentation.LRASPP_MobileNet_V3_Large_Weights.COCO_WITH_VOC_LABELS_V1.transforms() + preprocess.resize_size = (IMAGE_HEIGHT, IMAGE_WIDTH) # change to an image size + + input_data = preprocess(image) + input_data = np.expand_dims(input_data, axis=0) + +Run an inference on the PyTorch model\ +------------------------------------------------------------------------------- + +.. code:: ipython3 + + model.eval() + with torch.no_grad(): + result_torch = model(torch.as_tensor(input_data).float())['out'] + +Convert the original model to OpenVINO IR Format +------------------------------------------------ + + + +To convert the original model to OpenVINO IR with ``FP16`` precision, +use model conversion API. The models are saved inside the current +directory. For more information on how to convert models, see this +`page `__. + +.. code:: ipython3 + + ov_model_xml_path = Path('models/ov_lraspp_model.xml') + + + if not ov_model_xml_path.exists(): + ov_model_xml_path.parent.mkdir(parents=True, exist_ok=True) + dummy_input = torch.randn(1, 3, IMAGE_HEIGHT, IMAGE_WIDTH) + ov_model = ov.convert_model(model, example_input=dummy_input) + ov.save_model(ov_model, ov_model_xml_path) + else: + print(f"IR model {ov_model_xml_path} already exists.") + +Run an inference on the OpenVINO model\ +-------------------------------------------------------------------------------- + +Select device from dropdown list for running inference using OpenVINO + +.. code:: ipython3 + + import ipywidgets as widgets + + core = ov.Core() + device = widgets.Dropdown( + options=core.available_devices + ["AUTO"], + value='AUTO', + description='Device:', + disabled=False, + ) + + device + + + + +.. parsed-literal:: + + Dropdown(description='Device:', index=1, options=('CPU', 'AUTO'), value='AUTO') + + + +Run an inference + +.. code:: ipython3 + + compiled_model = core.compile_model(ov_model_xml_path, device_name=device.value) + +.. code:: ipython3 + + res_ir = compiled_model(input_data)[0] + +Show results +------------ + +Confirm that the segmentation +results look as expected by comparing model predictions on the OpenVINO +IR and PyTorch models. + +You can use `pytorch +tutorial `__ +to visualize segmentation masks. Below is a simple example how to +visualize the image with a ``cat`` mask for the PyTorch model. + +.. code:: ipython3 + + import torch + import matplotlib.pyplot as plt + + import torchvision.transforms.functional as F + + + plt.rcParams["savefig.bbox"] = 'tight' + + + def show(imgs): + if not isinstance(imgs, list): + imgs = [imgs] + fix, axs = plt.subplots(ncols=len(imgs), squeeze=False) + for i, img in enumerate(imgs): + img = img.detach() + img = F.to_pil_image(img) + axs[0, i].imshow(np.asarray(img)) + axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[]) + +Prepare and display a cat mask. + +.. code:: ipython3 + + sem_classes = [ + '__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', + 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', + 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' + ] + sem_class_to_idx = {cls: idx for (idx, cls) in enumerate(sem_classes)} + + normalized_mask = torch.nn.functional.softmax(result_torch, dim=1) + + cat_mask = normalized_mask[0, sem_class_to_idx['cat']] + + show(cat_mask) + + + +.. image:: 125-lraspp-segmentation-with-output_files/125-lraspp-segmentation-with-output_28_0.png + + +The +`draw_segmentation_masks() `__\ function +can be used to plots those masks on top of the original image. This +function expects the masks to be boolean masks, but our masks above +contain probabilities in [0, 1]. To get boolean masks, we can do the +following: + +.. code:: ipython3 + + class_dim = 1 + boolean_cat_mask = (normalized_mask.argmax(class_dim) == sem_class_to_idx['cat']) + +And now we can plot a boolean mask on top of the original image. + +.. code:: ipython3 + + from torchvision.utils import draw_segmentation_masks + + show(draw_segmentation_masks(image, masks=boolean_cat_mask, alpha=0.7, colors='yellow')) + + + +.. image:: 125-lraspp-segmentation-with-output_files/125-lraspp-segmentation-with-output_32_0.png + + +Show results for the OpenVINO IR model\ +-------------------------------------------------------------------------------- + +.. code:: ipython3 + + normalized_mask = torch.nn.functional.softmax(torch.from_numpy(res_ir), dim=1) + boolean_cat_mask = (normalized_mask.argmax(class_dim) == sem_class_to_idx['cat']) + show(draw_segmentation_masks(image, masks=boolean_cat_mask, alpha=0.7, colors='yellow')) + + + +.. image:: 125-lraspp-segmentation-with-output_files/125-lraspp-segmentation-with-output_34_0.png + diff --git a/docs/notebooks/125-lraspp-segmentation-with-output_files/125-lraspp-segmentation-with-output_28_0.png b/docs/notebooks/125-lraspp-segmentation-with-output_files/125-lraspp-segmentation-with-output_28_0.png new file mode 100644 index 00000000000000..ad37eba7fa0c6e --- /dev/null +++ b/docs/notebooks/125-lraspp-segmentation-with-output_files/125-lraspp-segmentation-with-output_28_0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b5a1c7f29fc97014c6bdd4eb2604ace96556a4864b7b2175b960a5ed1c62896 +size 87576 diff --git a/docs/notebooks/125-lraspp-segmentation-with-output_files/125-lraspp-segmentation-with-output_32_0.png b/docs/notebooks/125-lraspp-segmentation-with-output_files/125-lraspp-segmentation-with-output_32_0.png new file mode 100644 index 00000000000000..c48d430667b625 --- /dev/null +++ b/docs/notebooks/125-lraspp-segmentation-with-output_files/125-lraspp-segmentation-with-output_32_0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ae70e272473dddb3c96182bbe79ca0fe1a0d9867ea6b54e91d3322273753233 +size 385522 diff --git a/docs/notebooks/125-lraspp-segmentation-with-output_files/125-lraspp-segmentation-with-output_34_0.png b/docs/notebooks/125-lraspp-segmentation-with-output_files/125-lraspp-segmentation-with-output_34_0.png new file mode 100644 index 00000000000000..25e9953ece7b27 --- /dev/null +++ b/docs/notebooks/125-lraspp-segmentation-with-output_files/125-lraspp-segmentation-with-output_34_0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04f1d79111e4b949ceb3bd19a51c7453f1bdae0ce5b8b21b748717a93e89891e +size 385520 diff --git a/docs/notebooks/125-lraspp-segmentation-with-output_files/125-lraspp-segmentation-with-output_5_0.jpg b/docs/notebooks/125-lraspp-segmentation-with-output_files/125-lraspp-segmentation-with-output_5_0.jpg new file mode 100644 index 00000000000000..161db4d6d6bce0 --- /dev/null +++ b/docs/notebooks/125-lraspp-segmentation-with-output_files/125-lraspp-segmentation-with-output_5_0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4aeb8d18432656f38f690facffbfa90ad3966dc2113a675025d1231a7c5747b +size 63187 diff --git a/docs/notebooks/125-lraspp-segmentation-with-output_files/125-lraspp-segmentation-with-output_5_0.png b/docs/notebooks/125-lraspp-segmentation-with-output_files/125-lraspp-segmentation-with-output_5_0.png new file mode 100644 index 00000000000000..d86a9f8cfdfb9b --- /dev/null +++ b/docs/notebooks/125-lraspp-segmentation-with-output_files/125-lraspp-segmentation-with-output_5_0.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62c6c291107839fe807c457757e888c4804eb5ae7dc9459c590ccde8eb216fc0 +size 723717 diff --git a/docs/notebooks/125-lraspp-segmentation-with-output_files/index.html b/docs/notebooks/125-lraspp-segmentation-with-output_files/index.html new file mode 100644 index 00000000000000..e9a0c4468b5ce6 --- /dev/null +++ b/docs/notebooks/125-lraspp-segmentation-with-output_files/index.html @@ -0,0 +1,11 @@ + +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/125-lraspp-segmentation-with-output_files/ + +

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/125-lraspp-segmentation-with-output_files/


../
+125-lraspp-segmentation-with-output_28_0.png       07-Dec-2023 00:49               87576
+125-lraspp-segmentation-with-output_32_0.png       07-Dec-2023 00:49              385522
+125-lraspp-segmentation-with-output_34_0.png       07-Dec-2023 00:49              385520
+125-lraspp-segmentation-with-output_5_0.jpg        07-Dec-2023 00:49               63187
+125-lraspp-segmentation-with-output_5_0.png        07-Dec-2023 00:49              723717
+

+ diff --git a/docs/notebooks/126-tensorflow-hub-with-output.rst b/docs/notebooks/126-tensorflow-hub-with-output.rst index 2a66974a13cb5c..9949a5d4a6d2a7 100644 --- a/docs/notebooks/126-tensorflow-hub-with-output.rst +++ b/docs/notebooks/126-tensorflow-hub-with-output.rst @@ -21,7 +21,7 @@ You have the flexibility to run this tutorial notebook in its entirety or selectively execute specific sections, as each section operates independently. -**Table of contents:** +**Table of contents:** - `Image classification <#image-classification>`__ - `Install required packages <#install-required-packages>`__ @@ -38,7 +38,6 @@ independently. - `Select inference device <#select-inference-device>`__ - `Inference <#inference>`__ - Image classification -------------------- @@ -70,17 +69,15 @@ Install required packages .. code:: ipython3 %pip install -q tensorflow_hub tensorflow pillow numpy matplotlib - %pip install -q "openvino==2023.2.0.dev20230922" + %pip install -q "openvino>=2023.2.0" .. parsed-literal:: ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. - onnxconverter-common 1.14.0 requires protobuf==3.20.2, but you have protobuf 4.25.0 which is incompatible. - tf2onnx 1.15.1 requires protobuf~=3.20.2, but you have protobuf 4.25.0 which is incompatible. + onnxconverter-common 1.14.0 requires protobuf==3.20.2, but you have protobuf 4.25.1 which is incompatible. + tf2onnx 1.15.1 requires protobuf~=3.20.2, but you have protobuf 4.25.1 which is incompatible. Note: you may need to restart the kernel to use updated packages. - ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. - openvino-dev 2023.1.0 requires openvino==2023.1.0, but you have openvino 2023.2.0.dev20230922 which is incompatible. Note: you may need to restart the kernel to use updated packages. @@ -110,12 +107,12 @@ Import libraries IMAGE_SHAPE = (224, 224) IMAGE_URL, IMAGE_PATH = "https://storage.googleapis.com/download.tensorflow.org/example_images/grace_hopper.jpg", "data/grace_hopper.jpg" - MODEL_URL, MODEL_PATH = "https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/classification/5", "models/mobilenet_v2_100_224.xml" + MODEL_URL, MODEL_PATH = "https://www.kaggle.com/models/google/mobilenet-v1/frameworks/tensorFlow2/variations/100-224-classification/versions/2", "models/mobilenet_v2_100_224.xml" Download the classifier ~~~~~~~~~~~~~~~~~~~~~~~ - Select a MobileNetV2 +Select a MobileNetV2 pre-trained model `from TensorFlow Hub `__ and wrap it as a Keras layer with ``hub.KerasLayer``. @@ -127,14 +124,14 @@ and wrap it as a Keras layer with ``hub.KerasLayer``. .. parsed-literal:: - 2023-11-14 23:08:14.660883: E tensorflow/compiler/xla/stream_executor/cuda/cuda_driver.cc:266] failed call to cuInit: CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE: forward compatibility was attempted on non supported HW - 2023-11-14 23:08:14.661058: E tensorflow/compiler/xla/stream_executor/cuda/cuda_diagnostics.cc:312] kernel version 470.182.3 does not match DSO version 470.223.2 -- cannot find working devices in this configuration + 2023-12-06 23:12:55.307954: E tensorflow/compiler/xla/stream_executor/cuda/cuda_driver.cc:266] failed call to cuInit: CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE: forward compatibility was attempted on non supported HW + 2023-12-06 23:12:55.308138: E tensorflow/compiler/xla/stream_executor/cuda/cuda_diagnostics.cc:312] kernel version 470.182.3 does not match DSO version 470.223.2 -- cannot find working devices in this configuration Download a single image to try the model on ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - The input ``images`` are +The input ``images`` are expected to have color values in the range [0,1], following the `common image input conventions `__. @@ -319,7 +316,7 @@ Install required packages .. code:: ipython3 %pip install -q tensorflow tensorflow_hub "opencv-python" numpy matplotlib - %pip install -q "openvino==2023.2.0.dev20230922" + %pip install -q "openvino>=2023.2.0" .. parsed-literal:: @@ -351,7 +348,7 @@ Install required packages STYLE_IMAGE_URL = "https://upload.wikimedia.org/wikipedia/commons/b/b4/Vassily_Kandinsky%2C_1913_-_Composition_7.jpg" STYLE_IMAGE_PATH = "./data/Vassily_Kandinsky%2C_1913_-_Composition_7.jpg" - MODEL_URL = "https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2" + MODEL_URL = "https://www.kaggle.com/models/google/arbitrary-image-stylization-v1/frameworks/tensorFlow1/variations/256/versions/2" MODEL_PATH = "./models/arbitrary-image-stylization-v1-256.xml" Load the model diff --git a/docs/notebooks/126-tensorflow-hub-with-output_files/126-tensorflow-hub-with-output_26_0.png b/docs/notebooks/126-tensorflow-hub-with-output_files/126-tensorflow-hub-with-output_26_0.png index 7ed328b470ce0f..4f1da92e5284c7 100644 --- a/docs/notebooks/126-tensorflow-hub-with-output_files/126-tensorflow-hub-with-output_26_0.png +++ b/docs/notebooks/126-tensorflow-hub-with-output_files/126-tensorflow-hub-with-output_26_0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6c0078b72e3696c4265712121725b2db212016519c0e609c0ffafdd98e1dc970 +oid sha256:3050fd069390a5410693d7649a6d666fe41504c7b1543897a83170e6cd8a6a5f size 203738 diff --git a/docs/notebooks/126-tensorflow-hub-with-output_files/126-tensorflow-hub-with-output_45_0.png b/docs/notebooks/126-tensorflow-hub-with-output_files/126-tensorflow-hub-with-output_45_0.png index 90d22510a83fc0..f4139ed6d294b2 100644 --- a/docs/notebooks/126-tensorflow-hub-with-output_files/126-tensorflow-hub-with-output_45_0.png +++ b/docs/notebooks/126-tensorflow-hub-with-output_files/126-tensorflow-hub-with-output_45_0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:da6266c3c305c2278d74aa8d9e46c57f6c7d62dfacb92d2d616fc5e00e4cce6c +oid sha256:f34ce33eec0d7a7db4163c8ef99cadbba0249073fa82e923dfc21c3c30c665d0 size 538743 diff --git a/docs/notebooks/126-tensorflow-hub-with-output_files/index.html b/docs/notebooks/126-tensorflow-hub-with-output_files/index.html index 072161cf5c7b17..9d19c72ee8b04f 100644 --- a/docs/notebooks/126-tensorflow-hub-with-output_files/index.html +++ b/docs/notebooks/126-tensorflow-hub-with-output_files/index.html @@ -1,10 +1,10 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231114220808/dist/rst_files/126-tensorflow-hub-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/126-tensorflow-hub-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231114220808/dist/rst_files/126-tensorflow-hub-with-output_files/


../
-126-tensorflow-hub-with-output_11_0.jpg            15-Nov-2023 00:43               10479
-126-tensorflow-hub-with-output_11_0.png            15-Nov-2023 00:43               92843
-126-tensorflow-hub-with-output_26_0.png            15-Nov-2023 00:43              203738
-126-tensorflow-hub-with-output_45_0.png            15-Nov-2023 00:43              538743
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/126-tensorflow-hub-with-output_files/


../
+126-tensorflow-hub-with-output_11_0.jpg            07-Dec-2023 00:49               10479
+126-tensorflow-hub-with-output_11_0.png            07-Dec-2023 00:49               92843
+126-tensorflow-hub-with-output_26_0.png            07-Dec-2023 00:49              203738
+126-tensorflow-hub-with-output_45_0.png            07-Dec-2023 00:49              538743
 

diff --git a/docs/notebooks/201-vision-monodepth-with-output.rst b/docs/notebooks/201-vision-monodepth-with-output.rst index bd1172c37c0cec..ca1b1630ed59d3 100644 --- a/docs/notebooks/201-vision-monodepth-with-output.rst +++ b/docs/notebooks/201-vision-monodepth-with-output.rst @@ -55,11 +55,15 @@ Transactions on Pattern Analysis and Machine Intelligence, doi: Video <#do-inference-on-a-video-and-create-monodepth-video>`__ - `Display Monodepth Video <#display-monodepth-video>`__ -Preparation ------------------------------------------------------ +Preparation +----------- + + + +Install requirements +~~~~~~~~~~~~~~~~~~~~ + -Install requirements -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: ipython3 @@ -84,12 +88,14 @@ Install requirements .. parsed-literal:: - ('notebook_utils.py', ) + ('notebook_utils.py', ) + +Imports +~~~~~~~ + -Imports -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: ipython3 @@ -113,8 +119,13 @@ Imports from notebook_utils import download_file, load_image -Download the model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Download the model +~~~~~~~~~~~~~~~~~~ + + + +The model is in the `OpenVINO Intermediate Representation +(IR) `__ format. .. code:: ipython3 @@ -142,8 +153,10 @@ Download the model model/MiDaS_small.bin: 0%| | 0.00/31.6M [00:00 -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/201-vision-monodepth-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/201-vision-monodepth-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/201-vision-monodepth-with-output_files/


../
-201-vision-monodepth-with-output_18_0.png          31-Oct-2023 00:35              959858
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/201-vision-monodepth-with-output_files/


../
+201-vision-monodepth-with-output_18_0.png          07-Dec-2023 00:49              959858
 

diff --git a/docs/notebooks/204-segmenter-semantic-segmentation-with-output.rst b/docs/notebooks/204-segmenter-semantic-segmentation-with-output.rst index 3a53e7b87d63fe..02d4ce98e33fef 100644 --- a/docs/notebooks/204-segmenter-semantic-segmentation-with-output.rst +++ b/docs/notebooks/204-segmenter-semantic-segmentation-with-output.rst @@ -24,23 +24,21 @@ Segmenter `__. More about the model and its details can be found in the following paper: `Segmenter: Transformer for Semantic Segmentation `__ or in the -`repository `__. #### Table of -content: - `Get and prepare PyTorch -model <#get-and-prepare-pytorch-model>`__ - -`Prerequisites <#prerequisites>`__ - `Loading PyTorch -model <#loading-pytorch-model>`__ - `Preparing preprocessing and -visualization -functions <#preparing-preprocessing-and-visualization-functions>`__ -- `Preprocessing <#preprocessing>`__ - -`Visualization <#visualization>`__ - `Validation of inference of -original model <#validation-of-inference-of-original-model>`__ - -`Convert PyTorch model to OpenVINO Intermediate Representation -(IR) <#convert-pytorch-model-to-openvino-intermediate-representation-ir>`__ -- `Verify converted model -inference <#verify-converted-model-inference>`__ - `Select -inference device <#select-inference-device>`__ - `Benchmarking -performance of converted -model <#benchmarking-performance-of-converted-model>`__ +`repository `__. + +**Table of contents:** + +- `Get and prepare PyTorch model <#get-and-prepare-pytorch-model>`__ +- `Prerequisites <#prerequisites>`__ +- `Loading PyTorch model <#loading-pytorch-model>`__ +- `Preparing preprocessing and visualization functions <#preparing-preprocessing-and-visualization-functions>`__ +- `Preprocessing <#preprocessing>`__ +- `visualization <#visualization>`__ +- `Validation of inference of original model <#validation-of-inference-of-original-model>`__ +- `Convert PyTorch model to OpenVINO Intermediate Representation (IR) <#convert-pytorch-model-to-openvino-intermediate-representation-ir>`__ +- `Verify converted model inference <#verify-converted-model-inference>`__ +- `Select inference device <#select-inference-device>`__ +- `Benchmarking performance of converted model <#benchmarking-performance-of-converted-model>`__ .. |Segmenteer diagram| image:: https://user-images.githubusercontent.com/24582831/148507554-87eb80bd-02c7-4c31-b102-c6141e231ec8.png @@ -54,8 +52,10 @@ notebook consists of the following steps: - Validating inference of the converted model - Benchmark performance of the converted model -Get and prepare PyTorch model ------------------------------------------------------------------------ +Get and prepare PyTorch model +----------------------------- + + The first thing we’ll need to do is clone `repository `__ containing model @@ -69,14 +69,16 @@ The code from the repository already contains functions that create model and load weights, but we will need to download config and trained weights (checkpoint) file and add some additional helper functions. -Prerequisites -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Prerequisites +~~~~~~~~~~~~~ + + .. code:: ipython3 # Installing requirements %pip install -q "openvino>=2023.1.0" - %pip install -q timm "mmsegmentation==0.30.0" einops "mmcv==1.7.1" "timm == 0.4.12" + %pip install -q timm "mmsegmentation==0.30.0" einops "mmcv==1.7.1" "timm == 0.4.12" --extra-index-url https://download.pytorch.org/whl/cpu .. parsed-literal:: @@ -126,7 +128,7 @@ config for our model. Cloning into 'segmenter'... remote: Enumerating objects: 268, done. remote: Total 268 (delta 0), reused 0 (delta 0), pack-reused 268 - Receiving objects: 100% (268/268), 15.34 MiB | 3.50 MiB/s, done. + Receiving objects: 100% (268/268), 15.34 MiB | 3.51 MiB/s, done. Resolving deltas: 100% (117/117), done. @@ -159,8 +161,10 @@ config for our model. model/variant.yml: 0%| | 0.00/940 [00:00`__ @@ -204,18 +208,22 @@ Load normalization settings from config file. .. parsed-literal:: No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda' - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/mmcv/__init__.py:20: UserWarning: On January 1, 2023, MMCV will release v2.0.0, in which it will remove components related to the training process and add a data transformation module. In addition, it will rename the package names mmcv to mmcv-lite and mmcv-full to mmcv. See https://github.com/open-mmlab/mmcv/blob/master/docs/en/compatibility.md for more details. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/mmcv/__init__.py:20: UserWarning: On January 1, 2023, MMCV will release v2.0.0, in which it will remove components related to the training process and add a data transformation module. In addition, it will rename the package names mmcv to mmcv-lite and mmcv-full to mmcv. See https://github.com/open-mmlab/mmcv/blob/master/docs/en/compatibility.md for more details. warnings.warn( -Preparing preprocessing and visualization functions ---------------------------------------------------------------------------------------------- +Preparing preprocessing and visualization functions +--------------------------------------------------- + + Now we will define utility functions for preprocessing and visualizing the results. -Preprocessing -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Preprocessing +~~~~~~~~~~~~~ + + Inference input is tensor with shape ``[1, 3, H, W]`` in ``B, C, H, W`` format, where: @@ -259,8 +267,10 @@ normalized with given mean and standard deviation provided in return im -Visualization -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Visualization +~~~~~~~~~~~~~ + + Inference output contains labels assigned to each pixel, so the output in our case is ``[150, H, W]`` in ``CL, H, W`` format where: @@ -303,8 +313,10 @@ corresponding to the inferred labels. return pil_blend -Validation of inference of original model ------------------------------------------------------------------------------------ +Validation of inference of original model +----------------------------------------- + + Now that we have everything ready, we can perform segmentation on example image ``coco_hollywood.jpg``. @@ -355,8 +367,10 @@ We can see that model segments the image into meaningful parts. Since we are using tiny variant of model, the result is not as good as it is with larger models, but it already shows nice segmentation performance. -Convert PyTorch model to OpenVINO Intermediate Representation (IR) ------------------------------------------------------------------------------------------------------------- +Convert PyTorch model to OpenVINO Intermediate Representation (IR) +------------------------------------------------------------------ + + Now that we’ve verified that the inference of PyTorch model works, we will convert it to OpenVINO IR format. @@ -401,22 +415,24 @@ they are not a problem. .. parsed-literal:: - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/204-segmenter-semantic-segmentation/./segmenter/segm/model/utils.py:69: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/204-segmenter-semantic-segmentation/./segmenter/segm/model/utils.py:69: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! if H % patch_size > 0: - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/204-segmenter-semantic-segmentation/./segmenter/segm/model/utils.py:71: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/204-segmenter-semantic-segmentation/./segmenter/segm/model/utils.py:71: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! if W % patch_size > 0: - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/204-segmenter-semantic-segmentation/./segmenter/segm/model/vit.py:122: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/204-segmenter-semantic-segmentation/./segmenter/segm/model/vit.py:122: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! if x.shape[1] != pos_embed.shape[1]: - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/204-segmenter-semantic-segmentation/./segmenter/segm/model/decoder.py:100: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/204-segmenter-semantic-segmentation/./segmenter/segm/model/decoder.py:100: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! masks = rearrange(masks, "b (h w) n -> b n h w", h=int(GS)) - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/204-segmenter-semantic-segmentation/./segmenter/segm/model/utils.py:85: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/204-segmenter-semantic-segmentation/./segmenter/segm/model/utils.py:85: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! if extra_h > 0: - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/204-segmenter-semantic-segmentation/./segmenter/segm/model/utils.py:87: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/204-segmenter-semantic-segmentation/./segmenter/segm/model/utils.py:87: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! if extra_w > 0: -Verify converted model inference --------------------------------------------------------------------------- +Verify converted model inference +-------------------------------- + + To test that model was successfully converted, we can use same inference function from original repository, but we need to make custom class. @@ -483,8 +499,10 @@ any additional custom code required to process input. Now that we have created ``SegmenterOV`` helper class, we can use it in inference function. -Select inference device -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Select inference device +~~~~~~~~~~~~~~~~~~~~~~~ + + select device from dropdown list for running inference using OpenVINO @@ -544,8 +562,10 @@ select device from dropdown list for running inference using OpenVINO As we can see, we get the same results as with original model. -Benchmarking performance of converted model -------------------------------------------------------------------------------------- +Benchmarking performance of converted model +------------------------------------------- + + Finally, use the OpenVINO `Benchmark Tool `__ @@ -590,18 +610,18 @@ to measure the inference performance of the model. [Step 2/11] Loading OpenVINO Runtime [ WARNING ] Default duration 120 seconds is used for unknown device AUTO [ INFO ] OpenVINO: - [ INFO ] Build ................................. 2023.2.0-12538-e7c1344d3c3 + [ INFO ] Build ................................. 2023.2.0-13089-cfd42bd2cb0-HEAD [ INFO ] [ INFO ] Device info: [ INFO ] AUTO - [ INFO ] Build ................................. 2023.2.0-12538-e7c1344d3c3 + [ INFO ] Build ................................. 2023.2.0-13089-cfd42bd2cb0-HEAD [ INFO ] [ INFO ] [Step 3/11] Setting device configuration [ WARNING ] Performance hint was not explicitly specified in command line. Device(AUTO) performance hint will be set to PerformanceMode.THROUGHPUT. [Step 4/11] Reading model files [ INFO ] Loading model files - [ INFO ] Read model took 24.01 ms + [ INFO ] Read model took 22.59 ms [ INFO ] Original model I/O parameters: [ INFO ] Model inputs: [ INFO ] im (node: im) : f32 / [...] / [2,3,512,512] @@ -615,7 +635,7 @@ to measure the inference performance of the model. [ INFO ] Model outputs: [ INFO ] y (node: aten::upsample_bilinear2d/Interpolate) : f32 / [...] / [2,150,512,512] [Step 7/11] Loading the model to the device - [ INFO ] Compile model took 387.83 ms + [ INFO ] Compile model took 375.79 ms [Step 8/11] Querying optimal runtime parameters [ INFO ] Model: [ INFO ] NETWORK_NAME: Model0 @@ -647,15 +667,15 @@ to measure the inference performance of the model. [ INFO ] Fill input 'im' with random values [Step 10/11] Measuring performance (Start inference asynchronously, 6 inference requests, limits: 120000 ms duration) [ INFO ] Benchmarking in inference only mode (inputs filling are not included in measurement loop). - [ INFO ] First inference took 208.27 ms + [ INFO ] First inference took 209.86 ms [Step 11/11] Dumping statistics report [ INFO ] Execution Devices:['CPU'] - [ INFO ] Count: 1392 iterations - [ INFO ] Duration: 120914.63 ms + [ INFO ] Count: 1692 iterations + [ INFO ] Duration: 120618.19 ms [ INFO ] Latency: - [ INFO ] Median: 520.24 ms - [ INFO ] Average: 520.33 ms - [ INFO ] Min: 364.28 ms - [ INFO ] Max: 586.27 ms - [ INFO ] Throughput: 23.02 FPS + [ INFO ] Median: 427.65 ms + [ INFO ] Average: 426.90 ms + [ INFO ] Min: 189.03 ms + [ INFO ] Max: 509.01 ms + [ INFO ] Throughput: 28.06 FPS diff --git a/docs/notebooks/204-segmenter-semantic-segmentation-with-output_files/index.html b/docs/notebooks/204-segmenter-semantic-segmentation-with-output_files/index.html index 634657998713a6..7706eac7f4d63d 100644 --- a/docs/notebooks/204-segmenter-semantic-segmentation-with-output_files/index.html +++ b/docs/notebooks/204-segmenter-semantic-segmentation-with-output_files/index.html @@ -1,10 +1,10 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/204-segmenter-semantic-segmentation-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/204-segmenter-semantic-segmentation-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/204-segmenter-semantic-segmentation-with-output_files/


../
-204-segmenter-semantic-segmentation-with-output..> 31-Oct-2023 00:35               72352
-204-segmenter-semantic-segmentation-with-output..> 31-Oct-2023 00:35              909669
-204-segmenter-semantic-segmentation-with-output..> 31-Oct-2023 00:35               72372
-204-segmenter-semantic-segmentation-with-output..> 31-Oct-2023 00:35              909654
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/204-segmenter-semantic-segmentation-with-output_files/


../
+204-segmenter-semantic-segmentation-with-output..> 07-Dec-2023 00:49               72352
+204-segmenter-semantic-segmentation-with-output..> 07-Dec-2023 00:49              909669
+204-segmenter-semantic-segmentation-with-output..> 07-Dec-2023 00:49               72372
+204-segmenter-semantic-segmentation-with-output..> 07-Dec-2023 00:49              909654
 

diff --git a/docs/notebooks/210-slowfast-video-recognition-with-output.rst b/docs/notebooks/210-slowfast-video-recognition-with-output.rst index db2faf795143ea..0f2f5e022938e8 100644 --- a/docs/notebooks/210-slowfast-video-recognition-with-output.rst +++ b/docs/notebooks/210-slowfast-video-recognition-with-output.rst @@ -43,29 +43,34 @@ This tutorial consists of the following steps - `Prepare PyTorch Model <#prepare-pytorch-model>`__ - - `Install necessary - packages <#install-necessary-packages>`__ + - `Install necessary packages <#install-necessary-packages>`__ - `Imports and Settings <#imports-and-settings>`__ - `Export to ONNX <#export-to-onnx>`__ -- `Convert ONNX to OpenVINO™ Intermediate +- `Convert ONNX to OpenVINO Intermediate Representation <#convert-onnx-to-openvino-intermediate-representation>`__ - `Select inference device <#select-inference-device>`__ - `Verify Model Inference <#verify-model-inference>`__ -Prepare PyTorch Model ---------------------------------------------------------------- +Prepare PyTorch Model +--------------------- + + + +Install necessary packages +~~~~~~~~~~~~~~~~~~~~~~~~~~ + -Install necessary packages -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: ipython3 !pip install -q "openvino>=2023.1.0" - !pip install -q fvcore + !pip install -q fvcore --extra-index-url https://download.pytorch.org/whl/cpu + +Imports and Settings +~~~~~~~~~~~~~~~~~~~~ + -Imports and Settings -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: ipython3 @@ -709,8 +714,10 @@ inference using the same. The top 5 predictions can be seen below. Predicted labels: archery, throwing axe, playing paintball, golf driving, riding or walking with horse -Export to ONNX --------------------------------------------------------- +Export to ONNX +-------------- + + Now that we have obtained our trained model and checked inference with it, we export the PyTorch model to Open Neural Network Exchange(ONNX) @@ -733,8 +740,10 @@ quantization. export_params=True, ) -Convert ONNX to OpenVINO Intermediate Representation ----------------------------------------------------------------------------------------------- +Convert ONNX to OpenVINO Intermediate Representation +---------------------------------------------------- + + Now that our ONNX model is ready, we can convert it to IR format. In this format, the network is represented using two files: an ``xml`` file @@ -765,8 +774,10 @@ using the ``weights`` parameter. # read converted model conv_model = core.read_model(str(IR_PATH)) -Select inference device ------------------------------------------------------------------ +Select inference device +----------------------- + + select device from dropdown list for running inference using OpenVINO @@ -797,8 +808,10 @@ select device from dropdown list for running inference using OpenVINO # load model on device compiled_model = core.compile_model(model=conv_model, device_name=device.value) -Verify Model Inference ----------------------------------------------------------------- +Verify Model Inference +---------------------- + + Using the compiled model, we run inference on the same sample video and print the top 5 predictions again. diff --git a/docs/notebooks/216-attention-center-with-output.rst b/docs/notebooks/216-attention-center-with-output.rst new file mode 100644 index 00000000000000..39b3c2bb2e05e6 --- /dev/null +++ b/docs/notebooks/216-attention-center-with-output.rst @@ -0,0 +1,347 @@ +The attention center model with OpenVINO™ +========================================= + +This notebook demonstrates how to use the `attention center +model `__ with +OpenVINO. This model is in the `TensorFlow Lite +format `__, which is supported in +OpenVINO now by TFLite frontend. + +Eye tracking is commonly used in visual neuroscience and cognitive +science to answer related questions such as visual attention and +decision making. Computational models that predict where to look have +direct applications to a variety of computer vision tasks. The attention +center model takes an RGB image as input and return a 2D point as +output. This 2D point is the predicted center of human attention on the +image i.e. the most salient part of images, on which people pay +attention fist to. This allows find the most visually salient regions +and handle it as early as possible. For example, it could be used for +the latest generation image format (such as `JPEG +XL `__), which supports encoding the +parts that you pay attention to fist. It can help to improve user +experience, image will appear to load faster. + +Attention center model architecture is: > The attention center model is +a deep neural net, which takes an image as input, and uses a pre-trained +classification network, e.g, ResNet, MobileNet, etc., as the backbone. +Several intermediate layers that output from the backbone network are +used as input for the attention center prediction module. These +different intermediate layers contain different information e.g., +shallow layers often contain low level information like +intensity/color/texture, while deeper layers usually contain higher and +more semantic information like shape/object. All are useful for the +attention prediction. The attention center prediction applies +convolution, deconvolution and/or resizing operator together with +aggregation and sigmoid function to generate a weighting map for the +attention center. And then an operator (the Einstein summation operator +in our case) can be applied to compute the (gravity) center from the +weighting map. An L2 norm between the predicted attention center and the +ground-truth attention center can be computed as the training loss. +Source: `Google AI blog +post `__. + +.. figure:: https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjxLCDJHzJNjB_von-vFlq8TJJFA41aB85T-QE3ZNxW8kshAf3HOEyIEJ4uggXjbJmZhsdj7j6i6mvvmXtyaxXJPm3JHuKILNRTPfX9KvICbFBRD8KNuDVmLABzYuhQci3BT2BqV-wM54IxaoAV1YDBbnpJC92UZfEBGvakLusiqND2AaPpWPr2gJV1/s1600/image4.png + :alt: drawing + + drawing + +The attention center model has been trained with images from the `COCO +dataset `__ annotated with saliency from +the `SALICON dataset `__. + +**Table of contents:** + + +- `Imports <#imports>`__ +- `Download the attention-center + model <#download-the-attention-center-model>`__ + + - `Convert Tensorflow Lite model to OpenVINO IR + format <#convert-tensorflow-lite-model-to-openvino-ir-format>`__ + +- `Select inference device <#select-inference-device>`__ +- `Prepare image to use with attention-center + model <#prepare-image-to-use-with-attention-center-model>`__ +- `Load input image <#load-input-image>`__ +- `Get result with OpenVINO IR + model <#get-result-with-openvino-ir-model>`__ + +.. code:: ipython3 + + %pip install "openvino>=2023.2.0" + + +.. parsed-literal:: + + Requirement already satisfied: openvino>=2023.2.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (2023.2.0) + Requirement already satisfied: numpy>=1.16.6 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino>=2023.2.0) (1.23.5) + Requirement already satisfied: openvino-telemetry>=2023.2.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino>=2023.2.0) (2023.2.1) + DEPRECATION: pytorch-lightning 1.6.5 has a non-standard dependency specifier torch>=1.8.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063 + Note: you may need to restart the kernel to use updated packages. + + +Imports +------- + + + +.. code:: ipython3 + + import cv2 + + import numpy as np + import tensorflow as tf + from pathlib import Path + import matplotlib.pyplot as plt + + import openvino as ov + + +.. parsed-literal:: + + 2023-12-06 23:32:10.485958: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. + 2023-12-06 23:32:10.520826: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. + To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. + 2023-12-06 23:32:11.062803: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT + + +Download the attention-center model +----------------------------------- + + + +Download the model as part of `attention-center +repo `__. The repo +include model in folder ``./model``. + +.. code:: ipython3 + + if not Path('./attention-center').exists(): + ! git clone https://github.com/google/attention-center + + +.. parsed-literal:: + + Cloning into 'attention-center'... + remote: Enumerating objects: 168, done. + remote: Counting objects: 100% (168/168), done. + remote: Compressing objects: 100% (132/132), done. + remote: Total 168 (delta 73), reused 114 (delta 28), pack-reused 0 + Receiving objects: 100% (168/168), 26.22 MiB | 3.34 MiB/s, done. + Resolving deltas: 100% (73/73), done. + + +Convert Tensorflow Lite model to OpenVINO IR format +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + + +The attention-center model is pre-trained model in TensorFlow Lite +format. In this Notebook the model will be converted to OpenVINO IR +format with model conversion API. For more information about model +conversion, see this +`page `__. +This step is also skipped if the model is already converted. + +Also TFLite models format is supported in OpenVINO by TFLite frontend, +so the model can be passed directly to ``core.read_model()``. You can +find example in +`002-openvino-api `__. + +.. code:: ipython3 + + tflite_model_path = Path("./attention-center/model/center.tflite") + + ir_model_path = Path("./model/ir_center_model.xml") + + core = ov.Core() + + if not ir_model_path.exists(): + model = ov.convert_model(tflite_model_path, input=[('image:0', [1,480,640,3], ov.Type.f32)]) + ov.save_model(model, ir_model_path) + print("IR model saved to {}".format(ir_model_path)) + else: + print("Read IR model from {}".format(ir_model_path)) + model = core.read_model(ir_model_path) + + +.. parsed-literal:: + + IR model saved to model/ir_center_model.xml + + +Select inference device +----------------------- + + + +select device from dropdown list for running inference using OpenVINO + +.. code:: ipython3 + + import ipywidgets as widgets + + device = widgets.Dropdown( + options=core.available_devices + ["AUTO"], + value='AUTO', + description='Device:', + disabled=False, + ) + + device + + + + +.. parsed-literal:: + + Dropdown(description='Device:', index=1, options=('CPU', 'AUTO'), value='AUTO') + + + +.. code:: ipython3 + + if "GPU" in device.value: + core.set_property(device_name=device.value, properties={'INFERENCE_PRECISION_HINT': ov.Type.f32}) + compiled_model = core.compile_model(model=model, device_name=device.value) + +Prepare image to use with attention-center model +------------------------------------------------ + + + +The attention-center model takes an RGB image with shape (480, 640) as +input. + +.. code:: ipython3 + + class Image(): + def __init__(self, model_input_image_shape, image_path=None, image=None): + self.model_input_image_shape = model_input_image_shape + self.image = None + self.real_input_image_shape = None + + if image_path is not None: + self.image = cv2.imread(str(image_path)) + self.real_input_image_shape = self.image.shape + elif image is not None: + self.image = image + self.real_input_image_shape = self.image.shape + else: + raise Exception("Sorry, image can't be found, please, specify image_path or image") + + def prepare_image_tensor(self): + rgb_image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB) + resized_image = cv2.resize(rgb_image, (self.model_input_image_shape[1], self.model_input_image_shape[0])) + + image_tensor = tf.constant(np.expand_dims(resized_image, axis=0), + dtype=tf.float32) + return image_tensor + + def scalt_center_to_real_image_shape(self, predicted_center): + new_center_y = round(predicted_center[0] * self.real_input_image_shape[1] / self.model_input_image_shape[1]) + new_center_x = round(predicted_center[1] * self.real_input_image_shape[0] / self.model_input_image_shape[0]) + return (int(new_center_y), int(new_center_x)) + + def draw_attention_center_point(self, predicted_center): + image_with_circle = cv2.circle(self.image, + predicted_center, + radius=10, + color=(3, 3, 255), + thickness=-1) + return image_with_circle + + def print_image(self, predicted_center=None): + image_to_print = self.image + if predicted_center is not None: + image_to_print = self.draw_attention_center_point(predicted_center) + + plt.imshow(cv2.cvtColor(image_to_print, cv2.COLOR_BGR2RGB)) + +Load input image +---------------- + + + +Upload input image using file loading button + +.. code:: ipython3 + + import ipywidgets as widgets + + load_file_widget = widgets.FileUpload( + accept="image/*", multiple=False, description="Image file", + ) + + load_file_widget + + + + +.. parsed-literal:: + + FileUpload(value=(), accept='image/*', description='Image file') + + + +.. code:: ipython3 + + import io + import PIL + from urllib.request import urlretrieve + + img_path = Path("data/coco.jpg") + img_path.parent.mkdir(parents=True, exist_ok=True) + urlretrieve( + "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/coco.jpg", + img_path, + ) + + # read uploaded image + image = PIL.Image.open(io.BytesIO(list(load_file_widget.value.values())[-1]['content'])) if load_file_widget.value else PIL.Image.open(img_path) + image.convert("RGB") + + input_image = Image((480, 640), image=(np.ascontiguousarray(image)[:, :, ::-1]).astype(np.uint8)) + image_tensor = input_image.prepare_image_tensor() + input_image.print_image() + + +.. parsed-literal:: + + 2023-12-06 23:32:25.308665: E tensorflow/compiler/xla/stream_executor/cuda/cuda_driver.cc:266] failed call to cuInit: CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE: forward compatibility was attempted on non supported HW + 2023-12-06 23:32:25.308704: I tensorflow/compiler/xla/stream_executor/cuda/cuda_diagnostics.cc:168] retrieving CUDA diagnostic information for host: iotg-dev-workstation-07 + 2023-12-06 23:32:25.308709: I tensorflow/compiler/xla/stream_executor/cuda/cuda_diagnostics.cc:175] hostname: iotg-dev-workstation-07 + 2023-12-06 23:32:25.308855: I tensorflow/compiler/xla/stream_executor/cuda/cuda_diagnostics.cc:199] libcuda reported version is: 470.223.2 + 2023-12-06 23:32:25.308869: I tensorflow/compiler/xla/stream_executor/cuda/cuda_diagnostics.cc:203] kernel reported version is: 470.182.3 + 2023-12-06 23:32:25.308873: E tensorflow/compiler/xla/stream_executor/cuda/cuda_diagnostics.cc:312] kernel version 470.182.3 does not match DSO version 470.223.2 -- cannot find working devices in this configuration + + + +.. image:: 216-attention-center-with-output_files/216-attention-center-with-output_15_1.png + + +Get result with OpenVINO IR model +--------------------------------- + + + +.. code:: ipython3 + + output_layer = compiled_model.output(0) + + # make inference, get result in input image resolution + res = compiled_model([image_tensor])[output_layer] + # scale point to original image resulution + predicted_center = input_image.scalt_center_to_real_image_shape(res[0]) + print(f'Prediction attention center point {predicted_center}') + input_image.print_image(predicted_center) + + +.. parsed-literal:: + + Prediction attention center point (292, 277) + + + +.. image:: 216-attention-center-with-output_files/216-attention-center-with-output_17_1.png + diff --git a/docs/notebooks/216-attention-center-with-output_files/216-attention-center-with-output_15_1.png b/docs/notebooks/216-attention-center-with-output_files/216-attention-center-with-output_15_1.png new file mode 100644 index 00000000000000..3f302494abcb62 --- /dev/null +++ b/docs/notebooks/216-attention-center-with-output_files/216-attention-center-with-output_15_1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2dd4338c6c163e7693885ce544e8c9cd2aecedf3b136fa295e22877f37b5634c +size 387941 diff --git a/docs/notebooks/216-attention-center-with-output_files/216-attention-center-with-output_17_1.png b/docs/notebooks/216-attention-center-with-output_files/216-attention-center-with-output_17_1.png new file mode 100644 index 00000000000000..649b33ce6c0886 --- /dev/null +++ b/docs/notebooks/216-attention-center-with-output_files/216-attention-center-with-output_17_1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03e8883f23b57427be0e00338ba25397c859132d9c0891ff537cc9b355a3ccca +size 387905 diff --git a/docs/notebooks/216-attention-center-with-output_files/index.html b/docs/notebooks/216-attention-center-with-output_files/index.html new file mode 100644 index 00000000000000..f332a8b4e9696e --- /dev/null +++ b/docs/notebooks/216-attention-center-with-output_files/index.html @@ -0,0 +1,8 @@ + +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/216-attention-center-with-output_files/ + +

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/216-attention-center-with-output_files/


../
+216-attention-center-with-output_15_1.png          07-Dec-2023 00:49              387941
+216-attention-center-with-output_17_1.png          07-Dec-2023 00:49              387905
+

+ diff --git a/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst b/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst index dbf2a1ba91d468..4b7c9d1ac0818f 100644 --- a/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst +++ b/docs/notebooks/220-cross-lingual-books-alignment-with-output.rst @@ -22,7 +22,7 @@ The notebook guides you through the entire process of creating a parallel book: from obtaining raw texts to building a visualization of aligned sentences. Here is the pipeline diagram: -|image1| +|image0| Visualizing the result allows you to identify areas for improvement in the pipeline steps, as indicated in the diagram. @@ -37,10 +37,9 @@ Prerequisites - ``seaborn`` - for alignment matrix visualization - ``ipywidgets`` - for displaying HTML and JS output in the notebook +**Table of contents:** -**Table of contents**: - -- `Get Books <#get-books>`__ +- `Get Books <#get-books>`__ - `Clean Text <#clean-text>`__ - `Split Text <#split-text>`__ - `Get Sentence Embeddings <#get-sentence-embeddings>`__ @@ -48,23 +47,22 @@ Prerequisites - `Optimize the Model with OpenVINO <#optimize-the-model-with-openvino>`__ -- `Calculate Sentence - Alignment <#calculate-sentence-alignment>`__ -- `Postprocess Sentence - Alignment <#postprocess-sentence-alignment>`__ -- `Visualize Sentence - Alignment <#visualize-sentence-alignment>`__ +- `Calculate Sentence Alignment <#calculate-sentence-alignment>`__ +- `Postprocess Sentence Alignment <#postprocess-sentence-alignment>`__ +- `Visualize Sentence Alignment <#visualize-sentence-alignment>`__ - `Speed up Embeddings Computation <#speed-up-embeddings-computation>`__ -.. |image1| image:: https://user-images.githubusercontent.com/51917466/254582697-18f3ab38-e264-4b2c-a088-8e54b855c1b2.png +.. |image0| image:: https://user-images.githubusercontent.com/51917466/254582697-18f3ab38-e264-4b2c-a088-8e54b855c1b2.png .. code:: ipython3 - !pip install -q --extra-index-url https://download.pytorch.org/whl/cpu requests pysbd transformers[torch] "openvino>=2023.1.0" seaborn ipywidgets + !pip install -q --extra-index-url https://download.pytorch.org/whl/cpu requests pysbd transformers[torch] "openvino>=2023.1.0" matplotlib seaborn ipywidgets Get Books ---------------------------------------------------- +--------- + + The first step is to get the books that we will be working with. For this notebook, we will use English and German versions of Anna Karenina @@ -92,7 +90,9 @@ To get the texts, we will pass the IDs to the request.raise_for_status() book_metadata = request.json() - book_url = book_metadata["formats"]["text/plain"] + text_format_key = "text/plain" + text_plain = [k for k in book_metadata["formats"] if k.startswith(text_format_key)] + book_url = book_metadata["formats"][text_plain[0]] return requests.get(book_url).text @@ -135,7 +135,7 @@ Let’s check that we got the right books by showing a part of the texts: - \*\*\* START OF THE PROJECT GUTENBERG EBOOK ANNA KARENINA \*\*\* + *** START OF THE PROJECT GUTENBERG EBOOK ANNA KARENINA *** [Illustration] @@ -189,7 +189,7 @@ which in a raw format looks like this: .. parsed-literal:: - '\ufeffThe Project Gutenberg eBook of Anna Karenina\r\n \r\nThis ebook is for the use of anyone anywhere in the United States and\r\nmost other parts of the world at no cost and with almost no restrictions\r\nwhatsoever. You may copy it, give it away or re-use it under the terms\r\nof the Project Gutenberg License included with this ebook or online\r\nat www.gutenberg.org. If you are not located in the United States,\r\nyou will have to check the laws of the country where you are located\r\nbefore using this eBook.\r\n\r\nTitle: Anna Karenina\r\n\r\n\r\nAuthor: graf Leo Tolstoy\r\n\r\nTranslator: Constance Garnett\r\n\r\nRelease date: July 1, 1998 [eBook #1399]\r\n Most recently updated: April 9, 2023\r\n\r\nLanguage: English\r\n\r\n\r\n\r\n\* START OF THE PROJECT GUTENBERG EBOOK ANNA KARENINA \*\r\n[Illustration]\r\n\r\n\r\n\r\n\r\n ANNA KARENINA \r\n\r\n by Leo Tolstoy \r\n\r\n Translated by Constance Garnett \r\n\r\nContents\r\n\r\n\r\n PART ONE\r\n PART TWO\r\n PART THREE\r\n PART FOUR\r\n PART FIVE\r\n PART SIX\r\n PART SEVEN\r\n PART EIGHT\r\n\r\n\r\n\r\n\r\nPART ONE\r\n\r\nChapter 1\r\n\r\n\r\nHappy families are all alike; every unhappy family is unhappy in its\r\nown way.\r\n\r\nEverything was in confusion in the Oblonskys’ house. The wife had\r\ndiscovered that the husband was carrying on an intrigue with a French\r\ngirl, who had been a governess in their family, and she had announced\r\nto her husband that she could not go on living in the same house with\r\nhim. This position of affairs had now lasted three days, and not only\r\nthe husband and wife themselves, but all the me' + '\ufeffThe Project Gutenberg eBook of Anna Karenina\r\n \r\nThis ebook is for the use of anyone anywhere in the United States and\r\nmost other parts of the world at no cost and with almost no restrictions\r\nwhatsoever. You may copy it, give it away or re-use it under the terms\r\nof the Project Gutenberg License included with this ebook or online\r\nat www.gutenberg.org. If you are not located in the United States,\r\nyou will have to check the laws of the country where you are located\r\nbefore using this eBook.\r\n\r\nTitle: Anna Karenina\r\n\r\n\r\nAuthor: graf Leo Tolstoy\r\n\r\nTranslator: Constance Garnett\r\n\r\nRelease date: July 1, 1998 [eBook #1399]\r\n Most recently updated: April 9, 2023\r\n\r\nLanguage: English\r\n\r\n\r\n\r\n\*\*\* START OF THE PROJECT GUTENBERG EBOOK ANNA KARENINA \*\*\*\r\n[Illustration]\r\n\r\n\r\n\r\n\r\n ANNA KARENINA \r\n\r\n by Leo Tolstoy \r\n\r\n Translated by Constance Garnett \r\n\r\nContents\r\n\r\n\r\n PART ONE\r\n PART TWO\r\n PART THREE\r\n PART FOUR\r\n PART FIVE\r\n PART SIX\r\n PART SEVEN\r\n PART EIGHT\r\n\r\n\r\n\r\n\r\nPART ONE\r\n\r\nChapter 1\r\n\r\n\r\nHappy families are all alike; every unhappy family is unhappy in its\r\nown way.\r\n\r\nEverything was in confusion in the Oblonskys’ house. The wife had\r\ndiscovered that the husband was carrying on an intrigue with a French\r\ngirl, who had been a governess in their family, and she had announced\r\nto her husband that she could not go on living in the same house with\r\nhim. This position of affairs had now lasted three days, and not only\r\nthe husband and wife themselves, but all the me' @@ -202,12 +202,14 @@ which in a raw format looks like this: .. parsed-literal:: - '\ufeffThe Project Gutenberg eBook of Anna Karenina, 1. Band\r\n \r\nThis ebook is for the use of anyone anywhere in the United States and\r\nmost other parts of the world at no cost and with almost no restrictions\r\nwhatsoever. You may copy it, give it away or re-use it under the terms\r\nof the Project Gutenberg License included with this ebook or online\r\nat www.gutenberg.org. If you are not located in the United States,\r\nyou will have to check the laws of the country where you are located\r\nbefore using this eBook.\r\n\r\nTitle: Anna Karenina, 1. Band\r\n\r\n\r\nCreator: graf Leo Tolstoy\r\n\r\nRelease date: February 18, 2014 [eBook #44956]\r\n\r\nLanguage: German\r\n\r\n\r\n\r\n\*\*\* START OF THE PROJECT GUTENBERG EBOOK ANNA KARENINA, 1. BAND \*\r\n\r\n\r\n\r\nProduced by Norbert H. Langkau, Jens Nordmann and the\r\nOnline Distributed Proofreading Team at http://www.pgdp.net\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n Anna Karenina.\r\n\r\n\r\n Roman aus dem Russischen\r\n\r\n des\r\n\r\n Grafen Leo N. Tolstoi.\r\n\r\n\r\n\r\n Nach der siebenten Auflage übersetzt\r\n\r\n von\r\n\r\n Hans Moser.\r\n\r\n\r\n Erster Band.\r\n\r\n\r\n\r\n Leipzig\r\n\r\n Druck und Verlag von Philipp Reclam jun.\r\n\r\n \* \* \* \* \*\r\n\r\n\r\n\r\n\r\n Erster Teil.\r\n\r\n »Die' + 'The Project Gutenberg EBook of Anna Karenina, 1. Band, by Leo N. Tolstoi\r\n\r\nThis eBook is for the use of anyone anywhere at no cost and with\r\nalmost no restrictions whatsoever. You may copy it, give it away or\r\nre-use it under the terms of the Project Gutenberg License included\r\nwith this eBook or online at www.gutenberg.org\r\n\r\n\r\nTitle: Anna Karenina, 1. Band\r\n\r\nAuthor: Leo N. Tolstoi\r\n\r\nRelease Date: February 18, 2014 [EBook #44956]\r\n\r\nLanguage: German\r\n\r\nCharacter set encoding: ISO-8859-1\r\n\r\n\*\*\* START OF THIS PROJECT GUTENBERG EBOOK ANNA KARENINA, 1. BAND \*\*\*\r\n\r\n\r\n\r\n\r\nProduced by Norbert H. Langkau, Jens Nordmann and the\r\nOnline Distributed Proofreading Team at http://www.pgdp.net\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n Anna Karenina.\r\n\r\n\r\n Roman aus dem Russischen\r\n\r\n des\r\n\r\n Grafen Leo N. Tolstoi.\r\n\r\n\r\n\r\n Nach der siebenten Auflage übersetzt\r\n\r\n von\r\n\r\n Hans Moser.\r\n\r\n\r\n Erster Band.\r\n\r\n\r\n\r\n Leipzig\r\n\r\n Druck und Verlag von Philipp Reclam jun.\r\n\r\n \* \* \* \* \*\r\n\r\n\r\n\r\n\r\n Erster Teil.\r\n\r\n »Die Rache ist mein, ich will vergelten.«\r\n\r\n 1.\r\n\r\n\r\nAlle glücklichen Familien sind einander ähnlich; jede unglücklich' Clean Text ----------------------------------------------------- +---------- + + The downloaded books may contain service information before and after the main text. The text might have different formatting styles and @@ -222,11 +224,11 @@ underscores for potential emphasis or italicization: The next stages of the pipeline will be difficult to complete without cleaning and normalizing the text. Since formatting may differ, manual work is required at this stage. For example, the main content in the -German version is enclosed in ``* * * * *``, so +German version is enclosed in ``* * * * *``, so it is safe to remove everything before the first occurrence and after the last occurrence of these asterisks. - Hint: There are text-cleaning libraries that clean up common + **Hint**: There are text-cleaning libraries that clean up common flaws. If the source of the text is known, you can look for a library designed for that source, for example `gutenberg_cleaner `__. @@ -243,7 +245,7 @@ the last occurrence of these asterisks. start_pattern_en = r"\nPART ONE" anna_karenina_en = re.split(start_pattern_en, anna_karenina_en)[1].strip() - end_pattern_en = "* END OF THE PROJECT GUTENBERG EBOOK ANNA KARENINA *" + end_pattern_en = "*** END OF THE PROJECT GUTENBERG EBOOK ANNA KARENINA ***" anna_karenina_en = anna_karenina_en.split(end_pattern_en)[0].strip() .. code:: ipython3 @@ -333,7 +335,9 @@ needed. Split Text ----------------------------------------------------- +---------- + + Dividing text into sentences is a challenging task in text processing. The problem is called `sentence boundary @@ -345,7 +349,7 @@ code `__, as the rules for splitting text into sentences may vary for different languages. - Hint: The ``book_metadata`` obtained from the Gutendex contains + **Hint**: The ``book_metadata`` obtained from the Gutendex contains the language code as well, enabling automation of this part of the pipeline. @@ -373,7 +377,9 @@ languages. Get Sentence Embeddings ------------------------------------------------------------------ +----------------------- + + The next step is to transform sentences into vector representations. Transformer encoder models, like BERT, provide high-quality embeddings @@ -393,12 +399,12 @@ languages. It has the same architecture as the BERT model but has been trained on a different task: to produce identical embeddings for translation pairs. -|image0| +|image01| This makes LaBSE a great choice for our task and it can be reused for different language pairs still producing good results. -.. |image0| image:: https://user-images.githubusercontent.com/51917466/254582913-51531880-373b-40cb-bbf6-1965859df2eb.png%22 +.. |image01| image:: https://user-images.githubusercontent.com/51917466/254582913-51531880-373b-40cb-bbf6-1965859df2eb.png .. code:: ipython3 @@ -414,15 +420,6 @@ different language pairs still producing good results. pt_model = AutoModel.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) - -.. parsed-literal:: - - 2023-09-15 18:53:46.819925: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. - 2023-09-15 18:53:46.859715: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. - To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. - 2023-09-15 18:53:47.576875: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT - - The model has two outputs: ``last_hidden_state`` and ``pooler_output``. For generating embeddings, you can use either the first vector from the ``last_hidden_state``, which corresponds to the special ``[CLS]`` token, @@ -447,7 +444,7 @@ best fit. return np.vstack(embeddings) else: embeddings = [ - embedding_model(tokenizer(sent, return_tensors="pt"))[ + embedding_model(**tokenizer(sent, return_tensors="pt"))[ "last_hidden_state" ][0][0] for sent in tqdm(sentences, disable=disable_tqdm) @@ -472,7 +469,9 @@ best fit. Optimize the Model with OpenVINO -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + The LaBSE model is quite large and can be slow to infer on some hardware, so let’s optimize it with OpenVINO. `Model conversion Python @@ -500,40 +499,6 @@ The converted model must be compiled for the target device using the embeddings_de = get_embeddings(sentences_de, compiled_model) -.. parsed-literal:: - - WARNING:tensorflow:Please fix your imports. Module tensorflow.python.training.tracking.base has been moved to tensorflow.python.trackable.base. The old module will be deleted in version 2.11. - - -.. parsed-literal:: - - [ WARNING ] Please fix your imports. Module %s has been moved to %s. The old module will be deleted in version %s. - - -.. parsed-literal:: - - INFO:nncf:NNCF initialized successfully. Supported frameworks detected: torch, tensorflow, onnx, openvino - WARNING:nncf:NNCF provides best results with torch==2.0.1, while current torch version is 1.13.1+cu117. If you encounter issues, consider switching to torch==2.0.1 - huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks... - To disable this warning, you can either: - - Avoid using `tokenizers` before the fork if possible - - Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false) - huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks... - To disable this warning, you can either: - - Avoid using `tokenizers` before the fork if possible - - Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false) - huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks... - To disable this warning, you can either: - - Avoid using `tokenizers` before the fork if possible - - Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false) - - -.. parsed-literal:: - - /home/ea/work/ov_venv/lib/python3.8/site-packages/torch/jit/annotations.py:309: UserWarning: TorchScript will treat type annotations of Tensor dtype-specific subtypes as if they are normal Tensors. dtype constraints are not enforced in compilation either. - warnings.warn("TorchScript will treat type annotations of Tensor " - - .. parsed-literal:: @@ -566,7 +531,9 @@ model predictions remain within an acceptable tolerance: Calculate Sentence Alignment ----------------------------------------------------------------------- +---------------------------- + + With the embedding matrices from the previous step, we can calculate the alignment: 1. Calculate sentence similarity between each pair of @@ -691,7 +658,9 @@ will be lists of German sentence numbers. Postprocess Sentence Alignment ------------------------------------------------------------------------- +------------------------------ + + There are several gaps in the resulting alignment, such as English sentence #14 not mapping to any German sentence. Here are some possible @@ -716,7 +685,9 @@ Most likely, English sentence 14 is part of either German sentence 17 or suitable alignment. Visualize Sentence Alignment ----------------------------------------------------------------------- +---------------------------- + + To evaluate the final alignment and choose the best way to improve the results of the pipeline, we will create an interactive table with HTML @@ -875,7 +846,9 @@ To read the model from disk, use the ``read_model`` method of the ov_model = core.read_model(ov_model_path) Speed up Embeddings Computation -------------------------------------------------------------------------- +------------------------------- + + Let’s see how we can speed up the most computationally complex part of the pipeline - getting embeddings. You might wonder why, when using @@ -960,7 +933,7 @@ advance and fill it in as the inference requests are executed. Let’s compare the models and plot the results. - Note: To get a more accurate benchmark, use the `Benchmark Python + **Note**: To get a more accurate benchmark, use the `Benchmark Python Tool `__ .. code:: ipython3 @@ -1054,12 +1027,6 @@ Let’s compare the models and plot the results. ylabel="Sentences Per Second", title=f"Sentence Embeddings Benchmark\n{cpu_name}" ) perf_ratio = benchmark_dataframe.mean() / benchmark_dataframe.mean()[0] - plot.bar_label( - plot.containers[0], - labels=[f"×{ratio:.2f}" for ratio in perf_ratio], - color="white", - label_type="center", - ) plot.spines["right"].set_visible(False) plot.spines["top"].set_visible(False) plot.spines["left"].set_visible(False) diff --git a/docs/notebooks/220-cross-lingual-books-alignment-with-output_files/220-cross-lingual-books-alignment-with-output_31_0.png b/docs/notebooks/220-cross-lingual-books-alignment-with-output_files/220-cross-lingual-books-alignment-with-output_31_0.png index b2ceabfb965558..d22e0ec2318693 100644 --- a/docs/notebooks/220-cross-lingual-books-alignment-with-output_files/220-cross-lingual-books-alignment-with-output_31_0.png +++ b/docs/notebooks/220-cross-lingual-books-alignment-with-output_files/220-cross-lingual-books-alignment-with-output_31_0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ce28e8a2906c5681f16ca683fd2ed51f24efdf0e16b3245ddafe16b9556e28f7 +oid sha256:6eb8ff85598a36eb189f49ad757496939d0c9e57096300f63764a13bbf205cbf size 24464 diff --git a/docs/notebooks/220-cross-lingual-books-alignment-with-output_files/220-cross-lingual-books-alignment-with-output_48_0.png b/docs/notebooks/220-cross-lingual-books-alignment-with-output_files/220-cross-lingual-books-alignment-with-output_48_0.png index 02bdaf9d68fca0..ecf3f13078bb95 100644 --- a/docs/notebooks/220-cross-lingual-books-alignment-with-output_files/220-cross-lingual-books-alignment-with-output_48_0.png +++ b/docs/notebooks/220-cross-lingual-books-alignment-with-output_files/220-cross-lingual-books-alignment-with-output_48_0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f4597f3d5412fcd4194f85bb9cd30734052a8de052a11eba2b57b741f89bdbeb -size 32175 +oid sha256:86749919eec1fdc021522f51fb2e63fbdc25cde3c0e3e5a3dcaff82f4bf66628 +size 29527 diff --git a/docs/notebooks/220-cross-lingual-books-alignment-with-output_files/index.html b/docs/notebooks/220-cross-lingual-books-alignment-with-output_files/index.html index 687e04a344b339..d9e7525e5f9549 100644 --- a/docs/notebooks/220-cross-lingual-books-alignment-with-output_files/index.html +++ b/docs/notebooks/220-cross-lingual-books-alignment-with-output_files/index.html @@ -1,8 +1,8 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/220-cross-lingual-books-alignment-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/220-cross-lingual-books-alignment-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/220-cross-lingual-books-alignment-with-output_files/


../
-220-cross-lingual-books-alignment-with-output_3..> 31-Oct-2023 00:35               24464
-220-cross-lingual-books-alignment-with-output_4..> 31-Oct-2023 00:35               32175
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/220-cross-lingual-books-alignment-with-output_files/


../
+220-cross-lingual-books-alignment-with-output_3..> 07-Dec-2023 00:49               24464
+220-cross-lingual-books-alignment-with-output_4..> 07-Dec-2023 00:49               29527
 

diff --git a/docs/notebooks/224-3D-segmentation-point-clouds-with-output.rst b/docs/notebooks/224-3D-segmentation-point-clouds-with-output.rst index 53fd6f3522bd0c..d976db28c19a15 100644 --- a/docs/notebooks/224-3D-segmentation-point-clouds-with-output.rst +++ b/docs/notebooks/224-3D-segmentation-point-clouds-with-output.rst @@ -23,14 +23,12 @@ effective, showing strong performance on par or even better than state of the art. **Table of contents:** ---- -- `Imports <#imports>`__ +- `Imports <#imports>`__ - `Prepare the Model <#prepare-the-model>`__ - `Data Processing Module <#data-processing-module>`__ -- `Visualize the original 3D - data <#visualize-the-original-d-data>`__ +- `Visualize the original 3D data <#visualize-the-original-d-data>`__ - `Run inference <#run-inference>`__ - `Select inference device <#select-inference-device>`__ @@ -46,8 +44,10 @@ of the art. Note: you may need to restart the kernel to use updated packages. -Imports -------------------------------------------------- +Imports +------- + + .. code:: ipython3 @@ -66,8 +66,10 @@ Imports from notebook_utils import download_file -Prepare the Model ------------------------------------------------------------ +Prepare the Model +----------------- + + Download the pre-trained PointNet ONNX model. This pre-trained model is provided by `axinc-ai `__, and you can @@ -109,8 +111,10 @@ API, see this model = core.read_model(model=ir_model_xml) -Data Processing Module ----------------------------------------------------------------- +Data Processing Module +---------------------- + + .. code:: ipython3 @@ -164,8 +168,10 @@ Data Processing Module return ax -Visualize the original 3D data ------------------------------------------------------------------------- +Visualize the original 3D data +------------------------------ + + The point cloud data can be downloaded from `ShapeNet `__, @@ -197,12 +203,20 @@ chair for example. data/chair.pts: 0%| | 0.00/69.2k [00:00 -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/224-3D-segmentation-point-clouds-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/224-3D-segmentation-point-clouds-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/224-3D-segmentation-point-clouds-with-output_files/


../
-224-3D-segmentation-point-clouds-with-output_11..> 31-Oct-2023 00:35              209355
-224-3D-segmentation-point-clouds-with-output_16..> 31-Oct-2023 00:35              222552
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/224-3D-segmentation-point-clouds-with-output_files/


../
+224-3D-segmentation-point-clouds-with-output_11..> 07-Dec-2023 00:49              207243
+224-3D-segmentation-point-clouds-with-output_16..> 07-Dec-2023 00:49              220281
 

diff --git a/docs/notebooks/226-yolov7-optimization-with-output.rst b/docs/notebooks/226-yolov7-optimization-with-output.rst index 9fa5d0ad5c3d62..1e45938931cbd1 100644 --- a/docs/notebooks/226-yolov7-optimization-with-output.rst +++ b/docs/notebooks/226-yolov7-optimization-with-output.rst @@ -57,8 +57,7 @@ The tutorial consists of the following steps: - `Download dataset <#download-dataset>`__ - `Create dataloader <#create-dataloader>`__ - - `Define validation - function <#define-validation-function>`__ + - `Define validation function <#define-validation-function>`__ - `Optimize model using NNCF Post-training Quantization API <#optimize-model-using-nncf-post-training-quantization-api>`__ @@ -69,8 +68,10 @@ The tutorial consists of the following steps: - `Compare Performance of the Original and Quantized Models <#compare-performance-of-the-original-and-quantized-models>`__ -Get Pytorch model ------------------------------------------------------------ +Get Pytorch model +----------------- + + Generally, PyTorch models represent an instance of the `torch.nn.Module `__ @@ -87,8 +88,10 @@ to obtain pre-trained model: In this case, the model creators provide a tool that enables converting the YOLOv7 model to ONNX, so we do not need to do these steps manually. -Prerequisites -------------------------------------------------------- +Prerequisites +------------- + + .. code:: ipython3 @@ -119,11 +122,13 @@ Prerequisites .. parsed-literal:: Cloning into 'yolov7'... - remote: Enumerating objects: 1191, done. - remote: Total 1191 (delta 0), reused 0 (delta 0), pack-reused 1191 - Receiving objects: 100% (1191/1191), 74.23 MiB | 3.77 MiB/s, done. - Resolving deltas: 100% (516/516), done. - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/226-yolov7-optimization/yolov7 + remote: Enumerating objects: 1197, done. + remote: Counting objects: 100% (6/6), done. + remote: Compressing objects: 100% (5/5), done. + remote: Total 1197 (delta 2), reused 3 (delta 1), pack-reused 1191 + Receiving objects: 100% (1197/1197), 74.23 MiB | 3.54 MiB/s, done. + Resolving deltas: 100% (517/517), done. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/226-yolov7-optimization/yolov7 .. code:: ipython3 @@ -148,12 +153,14 @@ Prerequisites .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/226-yolov7-optimization/yolov7/model/yolov7-tiny.pt') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/226-yolov7-optimization/yolov7/model/yolov7-tiny.pt') -Check model inference ---------------------------------------------------------------- +Check model inference +--------------------- + + ``detect.py`` script run pytorch model inference and save image as result, @@ -166,7 +173,7 @@ result, .. parsed-literal:: Namespace(agnostic_nms=False, augment=False, classes=None, conf_thres=0.25, device='', exist_ok=False, img_size=640, iou_thres=0.45, name='exp', no_trace=False, nosave=False, project='runs/detect', save_conf=False, save_txt=False, source='inference/images/horses.jpg', update=False, view_img=False, weights=['model/yolov7-tiny.pt']) - YOLOR 🚀 v0.1-126-g84932d7 torch 1.13.1+cpu CPU + YOLOR 🚀 v0.1-128-ga207844 torch 1.13.1+cpu CPU Fusing layers... Model Summary: 200 layers, 6219709 parameters, 229245 gradients @@ -174,9 +181,9 @@ result, traced_script_module saved! model is traced! - 5 horses, Done. (73.0ms) Inference, (0.8ms) NMS + 5 horses, Done. (70.2ms) Inference, (0.8ms) NMS The image with the result is saved in: runs/detect/exp/horses.jpg - Done. (0.086s) + Done. (0.083s) .. code:: ipython3 @@ -192,8 +199,10 @@ result, -Export to ONNX --------------------------------------------------------- +Export to ONNX +-------------- + + To export an ONNX format of the model, we will use ``export.py`` script. Let us check its arguments. @@ -270,7 +279,7 @@ an end2end ONNX model, you can check this Import onnx_graphsurgeon failure: No module named 'onnx_graphsurgeon' Namespace(batch_size=1, conf_thres=0.25, device='cpu', dynamic=False, dynamic_batch=False, end2end=False, fp16=False, grid=True, img_size=[640, 640], include_nms=False, int8=False, iou_thres=0.45, max_wh=None, simplify=False, topk_all=100, weights='model/yolov7-tiny.pt') - YOLOR 🚀 v0.1-126-g84932d7 torch 1.13.1+cpu CPU + YOLOR 🚀 v0.1-128-ga207844 torch 1.13.1+cpu CPU Fusing layers... Model Summary: 200 layers, 6219709 parameters, 6219709 gradients @@ -285,16 +294,16 @@ an end2end ONNX model, you can check this Starting ONNX export with onnx 1.15.0... ONNX export success, saved as model/yolov7-tiny.onnx - Export complete (2.48s). Visualize with https://github.com/lutzroeder/netron. + Export complete (2.50s). Visualize with https://github.com/lutzroeder/netron. -Convert ONNX Model to OpenVINO Intermediate Representation (IR) ---------------------------------------------------------------------------------------------------------- +Convert ONNX Model to OpenVINO Intermediate Representation (IR) +--------------------------------------------------------------- -While ONNX models are directly supported by OpenVINO runtime, it can be -useful to convert them to IR format to take the advantage of OpenVINO -model conversion API features. The ``ov.convert_model`` python function -of `model conversion +While ONNX models are directly +supported by OpenVINO runtime, it can be useful to convert them to IR +format to take the advantage of OpenVINO model conversion API features. +The ``ov.convert_model`` python function of `model conversion API `__ can be used for converting the model. The function returns instance of OpenVINO Model class, which is ready to use in Python interface. @@ -309,15 +318,19 @@ However, it can also be save on device in OpenVINO IR format using # serialize model for saving IR ov.save_model(model, 'model/yolov7-tiny.xml') -Verify model inference ----------------------------------------------------------------- +Verify model inference +---------------------- + + To test model work, we create inference pipeline similar to ``detect.py``. The pipeline consists of preprocessing step, inference of OpenVINO model, and results post-processing to get bounding boxes. -Preprocessing -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Preprocessing +~~~~~~~~~~~~~ + + Model input is a tensor with the ``[1, 3, 640, 640]`` shape in ``N, C, H, W`` format, where @@ -405,8 +418,10 @@ To keep specific shape, preprocessing automatically enables padding. COLORS = {name: [np.random.randint(0, 255) for _ in range(3)] for i, name in enumerate(NAMES)} -Postprocessing -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Postprocessing +~~~~~~~~~~~~~~ + + Model output contains detection boxes candidates. It is a tensor with the ``[1,25200,85]`` shape in the ``B, N, 85`` format, where: @@ -484,8 +499,10 @@ algorithm and rescale boxes coordinates to original image size. # read converted model model = core.read_model('model/yolov7-tiny.xml') -Select inference device -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Select inference device +~~~~~~~~~~~~~~~~~~~~~~~ + + select device from dropdown list for running inference using OpenVINO @@ -530,11 +547,15 @@ select device from dropdown list for running inference using OpenVINO -Verify model accuracy ---------------------------------------------------------------- +Verify model accuracy +--------------------- + + + +Download dataset +~~~~~~~~~~~~~~~~ + -Download dataset -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ YOLOv7 tiny is pre-trained on the COCO dataset, so in order to evaluate the model accuracy, we need to download it. According to the @@ -576,8 +597,10 @@ the original model evaluation scripts. coco2017labels-segments.zip: 0%| | 0.00/169M [00:00`__ provides a suite of advanced algorithms for Neural Networks inference optimization in @@ -841,16 +868,64 @@ asymmetric quantization of activations. .. parsed-literal:: - 2023-10-30 23:38:09.707478: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. - 2023-10-30 23:38:09.738739: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. + 2023-12-06 23:42:22.592237: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. + 2023-12-06 23:42:22.623324: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. - 2023-10-30 23:38:10.279255: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT - Statistics collection: 100%|██████████| 300/300 [00:38<00:00, 7.73it/s] - Applying Fast Bias correction: 100%|██████████| 58/58 [00:04<00:00, 13.46it/s] + 2023-12-06 23:42:23.162794: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT + + + +.. parsed-literal:: + + Output() + + + +.. raw:: html + +

+
+
+
+
+.. raw:: html
+
+    
+    
+ + + + +.. parsed-literal:: + + Output() + + +.. parsed-literal:: + + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/nncf/experimental/tensor/tensor.py:80: RuntimeWarning: invalid value encountered in multiply + return Tensor(self.data * unwrap_tensor_data(other)) + + + +.. raw:: html + +

+
+
+
+
+.. raw:: html
+
+    
+    
+ + + +Validate Quantized model inference +---------------------------------- -Validate Quantized model inference ----------------------------------------------------------------------------- .. code:: ipython3 @@ -879,8 +954,10 @@ Validate Quantized model inference -Validate quantized model accuracy ---------------------------------------------------------------------------- +Validate quantized model accuracy +--------------------------------- + + .. code:: ipython3 @@ -906,15 +983,17 @@ Validate quantized model accuracy .. parsed-literal:: Class Images Labels Precision Recall mAP@.5 mAP@.5:.95 - all 5000 36335 0.637 0.508 0.54 0.353 + all 5000 36335 0.634 0.509 0.54 0.353 As we can see, model accuracy slightly changed after quantization. However, if we look at the output image, these changes are not significant. -Compare Performance of the Original and Quantized Models --------------------------------------------------------------------------------------------------- +Compare Performance of the Original and Quantized Models +-------------------------------------------------------- + + Finally, use the OpenVINO `Benchmark Tool `__ @@ -954,18 +1033,18 @@ models. [Step 2/11] Loading OpenVINO Runtime [ WARNING ] Default duration 120 seconds is used for unknown device AUTO [ INFO ] OpenVINO: - [ INFO ] Build ................................. 2023.1.0-12185-9e6b00e51cd-releases/2023/1 + [ INFO ] Build ................................. 2023.2.0-13089-cfd42bd2cb0-HEAD [ INFO ] [ INFO ] Device info: [ INFO ] AUTO - [ INFO ] Build ................................. 2023.1.0-12185-9e6b00e51cd-releases/2023/1 + [ INFO ] Build ................................. 2023.2.0-13089-cfd42bd2cb0-HEAD [ INFO ] [ INFO ] [Step 3/11] Setting device configuration [ WARNING ] Performance hint was not explicitly specified in command line. Device(AUTO) performance hint will be set to PerformanceMode.THROUGHPUT. [Step 4/11] Reading model files [ INFO ] Loading model files - [ INFO ] Read model took 13.97 ms + [ INFO ] Read model took 12.83 ms [ INFO ] Original model I/O parameters: [ INFO ] Model inputs: [ INFO ] images (node: images) : f32 / [...] / [1,3,640,640] @@ -979,7 +1058,7 @@ models. [ INFO ] Model outputs: [ INFO ] output (node: output) : f32 / [...] / [1,25200,85] [Step 7/11] Loading the model to the device - [ INFO ] Compile model took 265.32 ms + [ INFO ] Compile model took 265.01 ms [Step 8/11] Querying optimal runtime parameters [ INFO ] Model: [ INFO ] NETWORK_NAME: torch_jit @@ -1011,17 +1090,17 @@ models. [ INFO ] Fill input 'images' with random values [Step 10/11] Measuring performance (Start inference asynchronously, 6 inference requests, limits: 120000 ms duration) [ INFO ] Benchmarking in inference only mode (inputs filling are not included in measurement loop). - [ INFO ] First inference took 45.17 ms + [ INFO ] First inference took 45.58 ms [Step 11/11] Dumping statistics report [ INFO ] Execution Devices:['CPU'] - [ INFO ] Count: 11544 iterations - [ INFO ] Duration: 120105.63 ms + [ INFO ] Count: 11556 iterations + [ INFO ] Duration: 120096.78 ms [ INFO ] Latency: - [ INFO ] Median: 62.16 ms - [ INFO ] Average: 62.28 ms - [ INFO ] Min: 45.96 ms - [ INFO ] Max: 85.36 ms - [ INFO ] Throughput: 96.12 FPS + [ INFO ] Median: 61.89 ms + [ INFO ] Average: 62.22 ms + [ INFO ] Min: 32.61 ms + [ INFO ] Max: 119.36 ms + [ INFO ] Throughput: 96.22 FPS .. code:: ipython3 @@ -1037,18 +1116,18 @@ models. [Step 2/11] Loading OpenVINO Runtime [ WARNING ] Default duration 120 seconds is used for unknown device AUTO [ INFO ] OpenVINO: - [ INFO ] Build ................................. 2023.1.0-12185-9e6b00e51cd-releases/2023/1 + [ INFO ] Build ................................. 2023.2.0-13089-cfd42bd2cb0-HEAD [ INFO ] [ INFO ] Device info: [ INFO ] AUTO - [ INFO ] Build ................................. 2023.1.0-12185-9e6b00e51cd-releases/2023/1 + [ INFO ] Build ................................. 2023.2.0-13089-cfd42bd2cb0-HEAD [ INFO ] [ INFO ] [Step 3/11] Setting device configuration [ WARNING ] Performance hint was not explicitly specified in command line. Device(AUTO) performance hint will be set to PerformanceMode.THROUGHPUT. [Step 4/11] Reading model files [ INFO ] Loading model files - [ INFO ] Read model took 23.91 ms + [ INFO ] Read model took 22.04 ms [ INFO ] Original model I/O parameters: [ INFO ] Model inputs: [ INFO ] images (node: images) : f32 / [...] / [1,3,640,640] @@ -1062,7 +1141,7 @@ models. [ INFO ] Model outputs: [ INFO ] output (node: output) : f32 / [...] / [1,25200,85] [Step 7/11] Loading the model to the device - [ INFO ] Compile model took 456.30 ms + [ INFO ] Compile model took 485.86 ms [Step 8/11] Querying optimal runtime parameters [ INFO ] Model: [ INFO ] NETWORK_NAME: torch_jit @@ -1094,15 +1173,15 @@ models. [ INFO ] Fill input 'images' with random values [Step 10/11] Measuring performance (Start inference asynchronously, 6 inference requests, limits: 120000 ms duration) [ INFO ] Benchmarking in inference only mode (inputs filling are not included in measurement loop). - [ INFO ] First inference took 27.69 ms + [ INFO ] First inference took 26.73 ms [Step 11/11] Dumping statistics report [ INFO ] Execution Devices:['CPU'] - [ INFO ] Count: 32700 iterations - [ INFO ] Duration: 120025.10 ms + [ INFO ] Count: 32994 iterations + [ INFO ] Duration: 120021.82 ms [ INFO ] Latency: - [ INFO ] Median: 21.82 ms - [ INFO ] Average: 21.90 ms - [ INFO ] Min: 16.88 ms - [ INFO ] Max: 44.61 ms - [ INFO ] Throughput: 272.44 FPS + [ INFO ] Median: 21.62 ms + [ INFO ] Average: 21.71 ms + [ INFO ] Min: 16.37 ms + [ INFO ] Max: 44.70 ms + [ INFO ] Throughput: 274.90 FPS diff --git a/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_10_0.jpg b/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_10_0.jpg index fc548e7ce6e1d9..061ea391916f97 100644 --- a/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_10_0.jpg +++ b/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_10_0.jpg @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:1e31df95b9edc90d3ca7f1df06b6a7e752133edf83b803d5b9ef5f85007fb591 -size 64645 +oid sha256:cb7301ac14bde59c43dcdf46dd8c514c27943137b3a2df9d28bfdcd7d35b9ea7 +size 65007 diff --git a/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_10_0.png b/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_10_0.png index 74fbc2353dce45..6f65be57ae800d 100644 --- a/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_10_0.png +++ b/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_10_0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5fe329db16f68795e1f73c293b698fee6c9c6e4ef606d36357cb06b805de8af2 -size 568876 +oid sha256:e21910f8212b672dab36ea007d4777a725d6dd5772c1493743b6b9fe6c1db68a +size 567956 diff --git a/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_27_0.jpg b/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_27_0.jpg index 87f3623f3cb4cc..6a936d70f5100f 100644 --- a/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_27_0.jpg +++ b/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_27_0.jpg @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:535860c41070cd771ee02e2983fe7e02a37c4985a2f1f0dabfefb76352db6291 -size 63263 +oid sha256:a1b813c69fdc909df67becf01adc156b5e4f27c83634212e9844fc8610cab20b +size 64089 diff --git a/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_27_0.png b/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_27_0.png index bd375472d3741f..8864bcdb7e055f 100644 --- a/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_27_0.png +++ b/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_27_0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ae19e82dff227f9344c02bd32c02e207e0bb7c0d4e620f8f50013824fa9450c6 -size 574652 +oid sha256:7950e5373569d34d88b5511331eb176e23c16b5ef18b0c6fa803fce3f59fbc4b +size 573726 diff --git a/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_44_0.jpg b/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_44_0.jpg index 9b8e37417e856f..3396a7851fb416 100644 --- a/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_44_0.jpg +++ b/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_44_0.jpg @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ccacd9e276d177f6f50c7ea6291ae7a1ee68fceace7af1f1a5a953e46be8855d -size 63193 +oid sha256:54f7b6155834878c6410c267849bdf7847a96d481c345a8c562fc3c97d1925b9 +size 64178 diff --git a/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_44_0.png b/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_44_0.png index 78b7d1275c44f9..423323f69d5e20 100644 --- a/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_44_0.png +++ b/docs/notebooks/226-yolov7-optimization-with-output_files/226-yolov7-optimization-with-output_44_0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d8451206e0966b9e5c60310ad856e84cf1ed9f34d59775c6f33bd1bcb7f8fbe0 -size 574615 +oid sha256:68e8f429263c9d8fe420527d299226f25be192f5aa1a3a159562a59b31525eac +size 573483 diff --git a/docs/notebooks/226-yolov7-optimization-with-output_files/index.html b/docs/notebooks/226-yolov7-optimization-with-output_files/index.html index 125d72c1e321b3..1b1414dfa70d2d 100644 --- a/docs/notebooks/226-yolov7-optimization-with-output_files/index.html +++ b/docs/notebooks/226-yolov7-optimization-with-output_files/index.html @@ -1,12 +1,12 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/226-yolov7-optimization-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/226-yolov7-optimization-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/226-yolov7-optimization-with-output_files/


../
-226-yolov7-optimization-with-output_10_0.jpg       31-Oct-2023 00:35               64645
-226-yolov7-optimization-with-output_10_0.png       31-Oct-2023 00:35              568876
-226-yolov7-optimization-with-output_27_0.jpg       31-Oct-2023 00:35               63263
-226-yolov7-optimization-with-output_27_0.png       31-Oct-2023 00:35              574652
-226-yolov7-optimization-with-output_44_0.jpg       31-Oct-2023 00:35               63193
-226-yolov7-optimization-with-output_44_0.png       31-Oct-2023 00:35              574615
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/226-yolov7-optimization-with-output_files/


../
+226-yolov7-optimization-with-output_10_0.jpg       07-Dec-2023 00:49               65007
+226-yolov7-optimization-with-output_10_0.png       07-Dec-2023 00:49              567956
+226-yolov7-optimization-with-output_27_0.jpg       07-Dec-2023 00:49               64089
+226-yolov7-optimization-with-output_27_0.png       07-Dec-2023 00:49              573726
+226-yolov7-optimization-with-output_44_0.jpg       07-Dec-2023 00:49               64178
+226-yolov7-optimization-with-output_44_0.png       07-Dec-2023 00:49              573483
 

diff --git a/docs/notebooks/227-whisper-convert-with-output.rst b/docs/notebooks/227-whisper-convert-with-output.rst index d420c4b6be06fc..8998293609e811 100644 --- a/docs/notebooks/227-whisper-convert-with-output.rst +++ b/docs/notebooks/227-whisper-convert-with-output.rst @@ -26,6 +26,7 @@ Whisper pipeline with OpenVINO models. **Table of contents:** + - `Prerequisites <#prerequisites>`__ - `Instantiate model <#instantiate-model>`__ @@ -54,10 +55,10 @@ Install dependencies. .. code:: ipython3 %pip install -q "openvino>=2023.1.0" - %pip install -q "python-ffmpeg<=1.0.16" moviepy transformers + %pip install -q "python-ffmpeg<=1.0.16" moviepy transformers --extra-index-url https://download.pytorch.org/whl/cpu %pip install -q -I "git+https://github.com/garywu007/pytube.git" %pip install -q -U gradio - %pip install -q -I "git+https://github.com/openai/whisper.git@fcfeaf1b61994c071bba62da47d7846933576ac9" + %pip install -q -I "git+https://github.com/openai/whisper.git@fcfeaf1b61994c071bba62da47d7846933576ac9" --extra-index-url https://download.pytorch.org/whl/cpu Instantiate model ----------------- @@ -563,7 +564,7 @@ Interactive demo .. .. raw:: html -..
+..
.. parsed-literal:: diff --git a/docs/notebooks/229-distilbert-sequence-classification-with-output.rst b/docs/notebooks/229-distilbert-sequence-classification-with-output.rst index 4bc1bb3daf5b49..a6ee3609e825fc 100644 --- a/docs/notebooks/229-distilbert-sequence-classification-with-output.rst +++ b/docs/notebooks/229-distilbert-sequence-classification-with-output.rst @@ -20,39 +20,41 @@ sequence classification model using OpenVINO. - `Inference <#inference>`__ - - `For a single input - sentence <#for-a-single-input-sentence>`__ + - `For a single input sentence <#for-a-single-input-sentence>`__ - `Read from a text file <#read-from-a-text-file>`__ -Imports -------------------------------------------------- +Imports +------- + + .. code:: ipython3 - %pip install "openvino>=2023.1.0" transformers + %pip install "openvino>=2023.1.0" transformers --extra-index-url https://download.pytorch.org/whl/cpu .. parsed-literal:: - Requirement already satisfied: openvino>=2023.1.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (2023.1.0) - Requirement already satisfied: transformers in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (4.34.1) - Requirement already satisfied: numpy>=1.16.6 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino>=2023.1.0) (1.24.3) - Requirement already satisfied: openvino-telemetry>=2023.1.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino>=2023.1.0) (2023.2.1) - Requirement already satisfied: filelock in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from transformers) (3.13.1) - Requirement already satisfied: huggingface-hub<1.0,>=0.16.4 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from transformers) (0.17.3) - Requirement already satisfied: packaging>=20.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from transformers) (23.2) - Requirement already satisfied: pyyaml>=5.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from transformers) (6.0.1) - Requirement already satisfied: regex!=2019.12.17 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from transformers) (2023.10.3) - Requirement already satisfied: requests in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from transformers) (2.31.0) - Requirement already satisfied: tokenizers<0.15,>=0.14 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from transformers) (0.14.1) - Requirement already satisfied: safetensors>=0.3.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from transformers) (0.4.0) - Requirement already satisfied: tqdm>=4.27 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from transformers) (4.66.1) - Requirement already satisfied: fsspec in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from huggingface-hub<1.0,>=0.16.4->transformers) (2023.10.0) - Requirement already satisfied: typing-extensions>=3.7.4.3 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from huggingface-hub<1.0,>=0.16.4->transformers) (4.8.0) - Requirement already satisfied: charset-normalizer<4,>=2 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests->transformers) (3.3.1) - Requirement already satisfied: idna<4,>=2.5 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests->transformers) (3.4) - Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests->transformers) (2.0.7) - Requirement already satisfied: certifi>=2017.4.17 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests->transformers) (2023.7.22) + Looking in indexes: https://pypi.org/simple, https://download.pytorch.org/whl/cpu + Requirement already satisfied: openvino>=2023.1.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (2023.2.0) + Requirement already satisfied: transformers in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (4.35.2) + Requirement already satisfied: numpy>=1.16.6 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino>=2023.1.0) (1.23.5) + Requirement already satisfied: openvino-telemetry>=2023.2.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino>=2023.1.0) (2023.2.1) + Requirement already satisfied: filelock in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from transformers) (3.13.1) + Requirement already satisfied: huggingface-hub<1.0,>=0.16.4 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from transformers) (0.19.4) + Requirement already satisfied: packaging>=20.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from transformers) (23.2) + Requirement already satisfied: pyyaml>=5.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from transformers) (6.0.1) + Requirement already satisfied: regex!=2019.12.17 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from transformers) (2023.10.3) + Requirement already satisfied: requests in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from transformers) (2.31.0) + Requirement already satisfied: tokenizers<0.19,>=0.14 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from transformers) (0.15.0) + Requirement already satisfied: safetensors>=0.3.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from transformers) (0.4.1) + Requirement already satisfied: tqdm>=4.27 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from transformers) (4.66.1) + Requirement already satisfied: fsspec>=2023.5.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from huggingface-hub<1.0,>=0.16.4->transformers) (2023.10.0) + Requirement already satisfied: typing-extensions>=3.7.4.3 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from huggingface-hub<1.0,>=0.16.4->transformers) (4.8.0) + Requirement already satisfied: charset-normalizer<4,>=2 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests->transformers) (3.3.2) + Requirement already satisfied: idna<4,>=2.5 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests->transformers) (3.6) + Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests->transformers) (2.1.0) + Requirement already satisfied: certifi>=2017.4.17 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests->transformers) (2023.11.17) DEPRECATION: pytorch-lightning 1.6.5 has a non-standard dependency specifier torch>=1.8.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063 Note: you may need to restart the kernel to use updated packages. @@ -66,8 +68,10 @@ Imports import numpy as np import openvino as ov -Initializing the Model ----------------------------------------------------------------- +Initializing the Model +---------------------- + + We will use the transformer-based `DistilBERT base uncased finetuned SST-2 `__ @@ -80,8 +84,10 @@ model from Hugging Face. pretrained_model_name_or_path=checkpoint ) -Initializing the Tokenizer --------------------------------------------------------------------- +Initializing the Tokenizer +-------------------------- + + Text Preprocessing cleans the text-based input data so it can be fed into the model. @@ -100,8 +106,10 @@ understand the context of a sentence. Here, we will use pretrained_model_name_or_path=checkpoint ) -Convert Model to OpenVINO Intermediate Representation format ------------------------------------------------------------------------------------------------------- +Convert Model to OpenVINO Intermediate Representation format +------------------------------------------------------------ + + `Model conversion API `__ @@ -131,7 +139,7 @@ optimal execution on end-point target devices. .. parsed-literal:: - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/distilbert/modeling_distilbert.py:223: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/distilbert/modeling_distilbert.py:223: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. mask, torch.tensor(torch.finfo(scores.dtype).min) @@ -149,8 +157,10 @@ documentation. `__ - - `Convert model to OpenVINO - IR <#convert-model-to-openvino-ir>`__ + - `Convert model to OpenVINO IR <#convert-model-to-openvino-ir>`__ - `Verify model inference <#verify-model-inference>`__ - `Preprocessing <#preprocessing>`__ - `Postprocessing <#postprocessing>`__ @@ -47,8 +41,7 @@ The tutorial consists of the following steps: - `Download the validation dataset <#download-the-validation-dataset>`__ - - `Define validation - function <#define-validation-function>`__ + - `Define validation function <#define-validation-function>`__ - `Configure Validator helper and create DataLoader <#configure-validator-helper-and-create-dataloader>`__ @@ -66,15 +59,16 @@ The tutorial consists of the following steps: - `Validate quantized model accuracy <#validate-quantized-model-accuracy>`__ -- `Other ways to optimize - model <#other-ways-to-optimize-model>`__ +- `Other ways to optimize model <#other-ways-to-optimize-model>`__ - `Live demo <#live-demo>`__ - `Run Live Object Detection and Segmentation <#run-live-object-detection-and-segmentation>`__ -Get PyTorch model ------------------------------------------------------------ +Get PyTorch model +----------------- + + Generally, PyTorch models represent an instance of the `torch.nn.Module `__ @@ -92,15 +86,17 @@ In this case, the creators of the model provide an API that enables converting the YOLOv8 model to ONNX and then to OpenVINO IR. Therefore, we do not need to do these steps manually. -Prerequisites -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Prerequisites +^^^^^^^^^^^^^ + + Install necessary packages. .. code:: ipython3 %pip install -q "openvino>=2023.1.0" "nncf>=2.5.0" - %pip install "ultralytics==8.0.43" onnx + %pip install -q "torch>=2.1" "torchvision>=0.16" "ultralytics==8.0.43" onnx --extra-index-url https://download.pytorch.org/whl/cpu Import required utility functions. The lower cell will download the ``notebook_utils`` Python module from GitHub. @@ -201,8 +197,10 @@ Define utility functions for drawing results -Instantiate model ------------------------------------------------------------ +Instantiate model +----------------- + + For loading the model, required to specify a path to the model checkpoint. It can be some local path or name available on models hub @@ -255,8 +253,10 @@ Let us consider the examples: -Convert model to OpenVINO IR -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Convert model to OpenVINO IR +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + YOLOv8 provides API for convenient model exporting to different formats including OpenVINO IR. ``model.export`` is responsible for model @@ -270,15 +270,19 @@ preserve dynamic shapes in the model. if not seg_model_path.exists(): seg_model.export(format="openvino", dynamic=True, half=False) -Verify model inference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Verify model inference +~~~~~~~~~~~~~~~~~~~~~~ + + To test model work, we create inference pipeline similar to ``model.predict`` method. The pipeline consists of preprocessing step, inference of OpenVINO model and results post-processing to get results. -Preprocessing -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Preprocessing +~~~~~~~~~~~~~ + + Model input is a tensor with the ``[-1, 3, -1, -1]`` shape in the ``N, C, H, W`` format, where \* ``N`` - number of images in batch (batch @@ -391,8 +395,10 @@ To keep a specific shape, preprocessing automatically enables padding. input_tensor = np.expand_dims(input_tensor, 0) return input_tensor -Postprocessing -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Postprocessing +~~~~~~~~~~~~~~ + + The model output contains: - detection boxes candidates - proto mask candidates @@ -488,8 +494,10 @@ batch size - ``C`` - number of candidates - ``H`` - mask height - ``W`` results.append({"det": pred[:, :6].numpy(), "segment": segments}) return results -Select inference device -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Select inference device +~~~~~~~~~~~~~~~~~~~~~~~ + + Select device from dropdown list for running inference using OpenVINO @@ -518,8 +526,10 @@ Select device from dropdown list for running inference using OpenVINO -Test on single image -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Test on single image +~~~~~~~~~~~~~~~~~~~~ + + Now, once we have defined preprocessing and postprocessing steps, we are ready to check model prediction. @@ -569,15 +579,19 @@ ready to check model prediction. Great! The result is the same, as produced by original models. -Check model accuracy on the dataset ------------------------------------------------------------------------------ +Check model accuracy on the dataset +----------------------------------- + + For comparing the optimized model result with the original, it is good to know some measurable results in terms of model accuracy on the validation dataset. -Download the validation dataset -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Download the validation dataset +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + YOLOv8 is pre-trained on the COCO dataset, so to evaluate the model accuracy we need to download it. According to the instructions provided @@ -628,8 +642,10 @@ evaluation function. /home/ea/work/openvino_notebooks/notebooks/230-yolov8-optimization/datasets/coco.yaml: 0%| | 0.00/1… -Define validation function -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Define validation function +~~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -696,8 +712,10 @@ Define validation function pf = '%20s' + '%12i' * 2 + '%12.3g' * 4 # print format print(pf % ('all', total_images, total_objects, s_mp, s_mr, s_map50, s_mean_ap)) -Configure Validator helper and create DataLoader -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Configure Validator helper and create DataLoader +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + The original model repository uses a ``Validator`` wrapper, which represents the accuracy validation pipeline. It creates dataloader and @@ -791,8 +809,10 @@ subset difference. *To validate the models on the full dataset set IOU threshold, ``mAP@.5:.95`` - is calculated on range IOU thresholds from 0.5 to 0.95 with step 0.05. -Optimize model using NNCF Post-training Quantization API --------------------------------------------------------------------------------------------------- +Optimize model using NNCF Post-training Quantization API +-------------------------------------------------------- + + `NNCF `__ provides a suite of advanced algorithms for Neural Networks inference optimization in @@ -914,8 +934,10 @@ point precision, using the ``ignored_scope`` parameter. Quantized segmentation model will be saved to models/yolov8n-seg_openvino_int8_model/yolov8n-seg.xml -Validate Quantized model inference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Validate Quantized model inference +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + ``nncf.quantize`` returns the OpenVINO Model class instance, which is suitable for loading on a device for making predictions. ``INT8`` model @@ -955,13 +977,16 @@ on the image. -Compare the Original and Quantized Models ------------------------------------------------------------------------------------ +Compare the Original and Quantized Models +----------------------------------------- + -Compare performance of the Original and Quantized Models -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Finally, use the OpenVINO `Benchmark +Compare performance of the Original and Quantized Models +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Finally, use the OpenVINO +`Benchmark Tool `__ to measure the inference performance of the ``FP32`` and ``INT8`` models. @@ -1158,8 +1183,10 @@ models. [ INFO ] Throughput: 293.15 FPS -Validate quantized model accuracy -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Validate quantized model accuracy +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + As we can see, there is no significant difference between ``INT8`` and float model result in a single image test. To understand how @@ -1205,8 +1232,10 @@ accuracy on a dataset. Great! Looks like accuracy was changed, but not significantly and it meets passing criteria. -Other ways to optimize model ----------------------------------------------------------------------- +Other ways to optimize model +---------------------------- + + The performance could be also improved by another OpenVINO method such as async inference pipeline or preprocessing API. @@ -1230,10 +1259,12 @@ utilization. For more information, refer to the overview of tutorial <118-optimize-preprocessing-with-output.html>`__. To see, how it could be used with YOLOV8 object detection model , please, see `Convert and Optimize YOLOv8 real-time object detection with -OpenVINO tutorial <230-yolov8-object-detection-with-output.html>`__ +OpenVINO tutorial <./230-yolov8-object-detection.ipynb>`__ + +Live demo +--------- + -Live demo ---------------------------------------------------- The following code runs model inference on a video: @@ -1339,8 +1370,10 @@ The following code runs model inference on a video: if use_popup: cv2.destroyAllWindows() -Run Live Object Detection and Segmentation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Run Live Object Detection and Segmentation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Use a webcam as the video input. By default, the primary webcam is set with \ ``source=0``. If you have multiple webcams, each one will be diff --git a/docs/notebooks/230-yolov8-instance-segmentation-with-output_files/index.html b/docs/notebooks/230-yolov8-instance-segmentation-with-output_files/index.html index 3c7b8dfd83b309..05ae4f77b6c8e5 100644 --- a/docs/notebooks/230-yolov8-instance-segmentation-with-output_files/index.html +++ b/docs/notebooks/230-yolov8-instance-segmentation-with-output_files/index.html @@ -1,13 +1,13 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/230-yolov8-instance-segmentation-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/230-yolov8-instance-segmentation-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/230-yolov8-instance-segmentation-with-output_files/


../
-230-yolov8-instance-segmentation-with-output_11..> 31-Oct-2023 00:35               81079
-230-yolov8-instance-segmentation-with-output_11..> 31-Oct-2023 00:35              790288
-230-yolov8-instance-segmentation-with-output_22..> 31-Oct-2023 00:35              104433
-230-yolov8-instance-segmentation-with-output_22..> 31-Oct-2023 00:35              919053
-230-yolov8-instance-segmentation-with-output_44..> 31-Oct-2023 00:35              103935
-230-yolov8-instance-segmentation-with-output_44..> 31-Oct-2023 00:35              918316
-230-yolov8-instance-segmentation-with-output_60..> 31-Oct-2023 00:35              495438
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/230-yolov8-instance-segmentation-with-output_files/


../
+230-yolov8-instance-segmentation-with-output_11..> 07-Dec-2023 00:49               81079
+230-yolov8-instance-segmentation-with-output_11..> 07-Dec-2023 00:49              790288
+230-yolov8-instance-segmentation-with-output_22..> 07-Dec-2023 00:49              104433
+230-yolov8-instance-segmentation-with-output_22..> 07-Dec-2023 00:49              919053
+230-yolov8-instance-segmentation-with-output_44..> 07-Dec-2023 00:49              103935
+230-yolov8-instance-segmentation-with-output_44..> 07-Dec-2023 00:49              918316
+230-yolov8-instance-segmentation-with-output_60..> 07-Dec-2023 00:49              495438
 

diff --git a/docs/notebooks/230-yolov8-keypoint-detection-with-output.rst b/docs/notebooks/230-yolov8-keypoint-detection-with-output.rst index b2064339cd612c..e79775191c5464 100644 --- a/docs/notebooks/230-yolov8-keypoint-detection-with-output.rst +++ b/docs/notebooks/230-yolov8-keypoint-detection-with-output.rst @@ -12,17 +12,12 @@ optimize `PyTorch YOLOv8 Pose model `__ with OpenVINO. We consider the steps required for keypoint detection scenario. -The tutorial consists of the following steps: - -- Prepare the PyTorch model. -- Download and prepare a dataset. -- Validate the original model. -- Convert the PyTorch model to OpenVINO IR. -- Validate the converted model. -- Prepare and run optimization pipeline. -- Compare performance of the FP32 and quantized models. -- Compare accuracy of the FP32 and quantized models. -- Live demo +The tutorial consists of the following steps: - Prepare the PyTorch +model. - Download and prepare a dataset. - Validate the original model. +- Convert the PyTorch model to OpenVINO IR. - Validate the converted +model. - Prepare and run optimization pipeline. - Compare performance of +the FP32 and quantized models. - Compare accuracy of the FP32 and +quantized models. - Live demo **Table of contents:** @@ -33,8 +28,7 @@ The tutorial consists of the following steps: - `Instantiate model <#instantiate-model>`__ - - `Convert model to OpenVINO - IR <#convert-model-to-openvino-ir>`__ + - `Convert model to OpenVINO IR <#convert-model-to-openvino-ir>`__ - `Verify model inference <#verify-model-inference>`__ - `Preprocessing <#preprocessing>`__ - `Postprocessing <#postprocessing>`__ @@ -46,8 +40,7 @@ The tutorial consists of the following steps: - `Download the validation dataset <#download-the-validation-dataset>`__ - - `Define validation - function <#define-validation-function>`__ + - `Define validation function <#define-validation-function>`__ - `Configure Validator helper and create DataLoader <#configure-validator-helper-and-create-dataloader>`__ @@ -65,15 +58,16 @@ The tutorial consists of the following steps: - `Compare accuracy of the Original and Quantized Models <#compare-accuracy-of-the-original-and-quantized-models>`__ -- `Other ways to optimize - model <#other-ways-to-optimize-model>`__ +- `Other ways to optimize model <#other-ways-to-optimize-model>`__ - `Live demo <#live-demo>`__ - `Run Keypoint Detection on video <#run-keypoint-detection-on-video>`__ -Get PyTorch model ------------------------------------------------------------ +Get PyTorch model +----------------- + + Generally, PyTorch models represent an instance of the `torch.nn.Module `__ @@ -91,14 +85,16 @@ In this case, the creators of the model provide an API that enables converting the YOLOv8 model to ONNX and then to OpenVINO IR. Therefore, we do not need to do these steps manually. -Prerequisites -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Prerequisites +^^^^^^^^^^^^^ + + Install necessary packages. .. code:: ipython3 - %pip install -q "openvino>=2023.1.0" "nncf>=2.5.0" "protobuf==3.20.*" "ultralytics==8.0.159" "onnx" + %pip install -q "openvino>=2023.1.0" "nncf>=2.5.0" "protobuf==3.20.*" "torch>=2.1" "torchvision>=0.16" "ultralytics==8.0.159" "onnx" --extra-index-url https://download.pytorch.org/whl/cpu Import required utility functions. The lower cell will download the ``notebook_utils`` Python module from GitHub. @@ -226,8 +222,10 @@ Define utility functions for drawing results -Instantiate model ------------------------------------------------------------ +Instantiate model +----------------- + + For loading the model, required to specify a path to the model checkpoint. It can be some local path or name available on models hub @@ -271,8 +269,10 @@ Let us consider the examples: -Convert model to OpenVINO IR -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Convert model to OpenVINO IR +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + YOLOv8 provides API for convenient model exporting to different formats including OpenVINO IR. ``model.export`` is responsible for model @@ -286,15 +286,19 @@ preserve dynamic shapes in the model. if not pose_model_path.exists(): pose_model.export(format="openvino", dynamic=True, half=False) -Verify model inference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Verify model inference +~~~~~~~~~~~~~~~~~~~~~~ + + To test model work, we create inference pipeline similar to ``model.predict`` method. The pipeline consists of preprocessing step, inference of OpenVINO model and results post-processing to get results. -Preprocessing -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Preprocessing +~~~~~~~~~~~~~ + + Model input is a tensor with the ``[-1, 3, -1, -1]`` shape in the ``N, C, H, W`` format, where \* ``N`` - number of images in batch (batch @@ -406,8 +410,10 @@ To keep a specific shape, preprocessing automatically enables padding. input_tensor = np.expand_dims(input_tensor, 0) return input_tensor -Postprocessing -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Postprocessing +~~~~~~~~~~~~~~ + + The model output contains detection boxes candidates, it is a tensor with the ``[-1,56,-1]`` shape in the ``B,56,N`` format, where: @@ -480,8 +486,10 @@ After prediction detection box has the [``x``, ``y``, ``h``, ``w``, return results -Select inference device -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Select inference device +~~~~~~~~~~~~~~~~~~~~~~~ + + Select device from dropdown list for running inference using OpenVINO @@ -510,8 +518,10 @@ Select device from dropdown list for running inference using OpenVINO -Test on single image -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Test on single image +~~~~~~~~~~~~~~~~~~~~ + + Now, once we have defined preprocessing and postprocessing steps, we are ready to check model prediction. @@ -558,15 +568,19 @@ ready to check model prediction. Great! The result is the same, as produced by original models. -Check model accuracy on the dataset ------------------------------------------------------------------------------ +Check model accuracy on the dataset +----------------------------------- + + For comparing the optimized model result with the original, it is good to know some measurable results in terms of model accuracy on the validation dataset. -Download the validation dataset -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Download the validation dataset +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + YOLOv8 is pre-trained on the COCO dataset, so to evaluate the model accuracy we need to download it. According to the instructions provided @@ -615,8 +629,10 @@ evaluation function. datasets/coco-pose.yaml: 0%| | 0.00/781 [00:00`__ provides a suite of advanced algorithms for Neural Networks inference optimization in @@ -906,8 +926,10 @@ point precision, using the ``ignored_scope`` parameter. Quantized keypoint detection model will be saved to models/yolov8n-pose_openvino_int8_model/yolov8n-pose.xml -Validate Quantized model inference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Validate Quantized model inference +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + ``nncf.quantize`` returns the OpenVINO Model class instance, which is suitable for loading on a device for making predictions. ``INT8`` model @@ -947,13 +969,16 @@ on the image. -Compare the Original and Quantized Models ------------------------------------------------------------------------------------ +Compare the Original and Quantized Models +----------------------------------------- + -Compare performance of the Original and Quantized Models -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Finally, use the OpenVINO `Benchmark +Compare performance of the Original and Quantized Models +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Finally, use the OpenVINO +`Benchmark Tool `__ to measure the inference performance of the ``FP32`` and ``INT8`` models. @@ -1149,8 +1174,10 @@ models. [ INFO ] Throughput: 426.35 FPS -Compare accuracy of the Original and Quantized Models -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Compare accuracy of the Original and Quantized Models +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + As we can see, there is no significant difference between ``INT8`` and float model result in a single image test. To understand how @@ -1192,8 +1219,10 @@ accuracy on a dataset. Great! Looks like accuracy was changed, but not significantly and it meets passing criteria. -Other ways to optimize model ----------------------------------------------------------------------- +Other ways to optimize model +---------------------------- + + The performance could be also improved by another OpenVINO method such as async inference pipeline or preprocessing API. @@ -1217,10 +1246,12 @@ utilization. For more information, refer to the overview of tutorial <118-optimize-preprocessing-with-output.html>`__. To see, how it could be used with YOLOV8 object detection model , please, see `Convert and Optimize YOLOv8 real-time object detection with -OpenVINO tutorial <230-yolov8-object-detection-with-output.html>`__ +OpenVINO tutorial <./230-yolov8-object-detection.ipynb>`__ + +Live demo +--------- + -Live demo ---------------------------------------------------- The following code runs model inference on a video: @@ -1326,8 +1357,10 @@ The following code runs model inference on a video: if use_popup: cv2.destroyAllWindows() -Run Keypoint Detection on video -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Run Keypoint Detection on video +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 diff --git a/docs/notebooks/230-yolov8-keypoint-detection-with-output_files/index.html b/docs/notebooks/230-yolov8-keypoint-detection-with-output_files/index.html index f6787514b14ade..ec5f0b474d3879 100644 --- a/docs/notebooks/230-yolov8-keypoint-detection-with-output_files/index.html +++ b/docs/notebooks/230-yolov8-keypoint-detection-with-output_files/index.html @@ -1,13 +1,13 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/230-yolov8-keypoint-detection-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/230-yolov8-keypoint-detection-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/230-yolov8-keypoint-detection-with-output_files/


../
-230-yolov8-keypoint-detection-with-output_11_1.jpg 31-Oct-2023 00:35               58622
-230-yolov8-keypoint-detection-with-output_11_1.png 31-Oct-2023 00:35              581068
-230-yolov8-keypoint-detection-with-output_22_0.jpg 31-Oct-2023 00:35               58280
-230-yolov8-keypoint-detection-with-output_22_0.png 31-Oct-2023 00:35              584143
-230-yolov8-keypoint-detection-with-output_46_0.jpg 31-Oct-2023 00:35               58058
-230-yolov8-keypoint-detection-with-output_46_0.png 31-Oct-2023 00:35              584051
-230-yolov8-keypoint-detection-with-output_62_0.png 31-Oct-2023 00:35              490879
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/230-yolov8-keypoint-detection-with-output_files/


../
+230-yolov8-keypoint-detection-with-output_11_1.jpg 07-Dec-2023 00:49               58622
+230-yolov8-keypoint-detection-with-output_11_1.png 07-Dec-2023 00:49              581068
+230-yolov8-keypoint-detection-with-output_22_0.jpg 07-Dec-2023 00:49               58280
+230-yolov8-keypoint-detection-with-output_22_0.png 07-Dec-2023 00:49              584143
+230-yolov8-keypoint-detection-with-output_46_0.jpg 07-Dec-2023 00:49               58058
+230-yolov8-keypoint-detection-with-output_46_0.png 07-Dec-2023 00:49              584051
+230-yolov8-keypoint-detection-with-output_62_0.png 07-Dec-2023 00:49              490879
 

diff --git a/docs/notebooks/230-yolov8-object-detection-with-output.rst b/docs/notebooks/230-yolov8-object-detection-with-output.rst index 8a7e28c20834ca..d45e71089959df 100644 --- a/docs/notebooks/230-yolov8-object-detection-with-output.rst +++ b/docs/notebooks/230-yolov8-object-detection-with-output.rst @@ -10,18 +10,13 @@ This tutorial demonstrates step-by-step instructions on how to run and optimize PyTorch YOLOv8 with OpenVINO. We consider the steps required for object detection scenario. -The tutorial consists of the following steps: - -- Prepare the PyTorch model. -- Download and prepare a dataset. -- Validate the original model. -- Convert the PyTorch model to OpenVINO IR. -- Validate the converted model. -- Prepare and run optimization pipeline. -- Compare performance ofthe FP32 and quantized models. -- Compare accuracy of the FP32 and quantized models. -- Other optimization possibilities with OpenVINO api -- Live demo +The tutorial consists of the following steps: - Prepare the PyTorch +model. - Download and prepare a dataset. - Validate the original model. +- Convert the PyTorch model to OpenVINO IR. - Validate the converted +model. - Prepare and run optimization pipeline. - Compare performance of +the FP32 and quantized models. - Compare accuracy of the FP32 and +quantized models. - Other optimization possibilities with OpenVINO api - +Live demo **Table of contents:** @@ -32,8 +27,7 @@ The tutorial consists of the following steps: - `Instantiate model <#instantiate-model>`__ - - `Convert model to OpenVINO - IR <#convert-model-to-openvino-ir>`__ + - `Convert model to OpenVINO IR <#convert-model-to-openvino-ir>`__ - `Verify model inference <#verify-model-inference>`__ - `Preprocessing <#preprocessing>`__ - `Postprocessing <#postprocessing>`__ @@ -45,8 +39,7 @@ The tutorial consists of the following steps: - `Download the validation dataset <#download-the-validation-dataset>`__ - - `Define validation - function <#define-validation-function>`__ + - `Define validation function <#define-validation-function>`__ - `Configure Validator helper and create DataLoader <#configure-validator-helper-and-create-dataloader>`__ @@ -72,8 +65,7 @@ The tutorial consists of the following steps: - `Initialize PrePostProcessing API <#initialize-prepostprocessing-api>`__ - - `Define input data - format <#define-input-data-format>`__ + - `Define input data format <#define-input-data-format>`__ - `Describe preprocessing steps <#describe-preprocessing-steps>`__ - `Integrating Steps into a @@ -83,8 +75,10 @@ The tutorial consists of the following steps: - `Run Live Object Detection <#run-live-object-detection>`__ -Get PyTorch model ------------------------------------------------------------ +Get PyTorch model +----------------- + + Generally, PyTorch models represent an instance of the `torch.nn.Module `__ @@ -102,15 +96,17 @@ In this case, the creators of the model provide an API that enables converting the YOLOv8 model to ONNX and then to OpenVINO IR. Therefore, we do not need to do these steps manually. -Prerequisites -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Prerequisites +^^^^^^^^^^^^^ + + Install necessary packages. .. code:: ipython3 %pip install -q "openvino>=2023.1.0" "nncf>=2.5.0" - %pip install -q "ultralytics==8.0.43" onnx + %pip install -q "torch>=2.1" "torchvision>=0.16" "ultralytics==8.0.43" onnx --extra-index-url https://download.pytorch.org/whl/cpu Import required utility functions. The lower cell will download the ``notebook_utils`` Python module from GitHub. @@ -205,8 +201,10 @@ Define utility functions for drawing results -Instantiate model ------------------------------------------------------------ +Instantiate model +----------------- + + There are `several models `__ available in the @@ -260,8 +258,10 @@ Let us consider the examples: -Convert model to OpenVINO IR -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Convert model to OpenVINO IR +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + YOLOv8 provides API for convenient model exporting to different formats including OpenVINO IR. ``model.export`` is responsible for model @@ -275,15 +275,19 @@ preserve dynamic shapes in the model. if not det_model_path.exists(): det_model.export(format="openvino", dynamic=True, half=False) -Verify model inference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Verify model inference +~~~~~~~~~~~~~~~~~~~~~~ + + To test model work, we create inference pipeline similar to ``model.predict`` method. The pipeline consists of preprocessing step, inference of OpenVINO model and results post-processing to get results. -Preprocessing -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Preprocessing +~~~~~~~~~~~~~ + + Model input is a tensor with the ``[-1, 3, -1, -1]`` shape in the ``N, C, H, W`` format, where \* ``N`` - number of images in batch (batch @@ -396,8 +400,10 @@ To keep a specific shape, preprocessing automatically enables padding. input_tensor = np.expand_dims(input_tensor, 0) return input_tensor -Postprocessing -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Postprocessing +~~~~~~~~~~~~~~ + + The model output contains detection boxes candidates, it is a tensor with the ``[-1,84,-1]`` shape in the ``B,84,N`` format, where: @@ -461,8 +467,10 @@ Finally, detection box has the [``x``, ``y``, ``h``, ``w``, return results -Select inference device -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Select inference device +~~~~~~~~~~~~~~~~~~~~~~~ + + Select device from dropdown list for running inference using OpenVINO @@ -491,8 +499,10 @@ Select device from dropdown list for running inference using OpenVINO -Test on single image -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Test on single image +~~~~~~~~~~~~~~~~~~~~ + + Now, once we have defined preprocessing and postprocessing steps, we are ready to check model prediction for object detection. @@ -537,15 +547,19 @@ ready to check model prediction for object detection. -Check model accuracy on the dataset ------------------------------------------------------------------------------ +Check model accuracy on the dataset +----------------------------------- + + For comparing the optimized model result with the original, it is good to know some measurable results in terms of model accuracy on the validation dataset. -Download the validation dataset -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Download the validation dataset +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + YOLOv8 is pre-trained on the COCO dataset, so to evaluate the model accuracy we need to download it. According to the instructions provided @@ -594,8 +608,10 @@ evaluation function. datasets/coco.yaml: 0%| | 0.00/1.25k [00:00`__ provides a suite of advanced algorithms for Neural Networks inference optimization in @@ -875,8 +895,10 @@ point precision, using the ``ignored_scope`` parameter. Quantized detection model will be saved to models/yolov8n_openvino_int8_model/yolov8n.xml -Validate Quantized model inference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Validate Quantized model inference +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + ``nncf.quantize`` returns the OpenVINO Model class instance, which is suitable for loading on a device for making predictions. ``INT8`` model @@ -916,11 +938,15 @@ on the image. -Compare the Original and Quantized Models ------------------------------------------------------------------------------------ +Compare the Original and Quantized Models +----------------------------------------- + + + +Compare performance object detection models +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + -Compare performance object detection models -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Finally, use the OpenVINO `Benchmark Tool `__ @@ -1118,8 +1144,10 @@ models. [ INFO ] Throughput: 208.39 FPS -Validate quantized model accuracy -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Validate quantized model accuracy +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + As we can see, there is no significant difference between ``INT8`` and float model result in a single image test. To understand how @@ -1161,24 +1189,28 @@ accuracy on a dataset. Great! Looks like accuracy was changed, but not significantly and it meets passing criteria. -Next steps ----------------------------------------------------- +Next steps +---------- -This section contains suggestions on how to additionally improve the -performance of your application using OpenVINO. +This section contains +suggestions on how to additionally improve the performance of your +application using OpenVINO. -Async inference pipeline -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Async inference pipeline +~~~~~~~~~~~~~~~~~~~~~~~~ -The key advantage of the Async API is that when a device is busy with -inference, the application can perform other tasks in parallel (for -example, populating inputs or scheduling other requests) rather than -wait for the current inference to complete first. To understand how to -perform async inference using openvino, refer to `Async API +The key advantage of the Async +API is that when a device is busy with inference, the application can +perform other tasks in parallel (for example, populating inputs or +scheduling other requests) rather than wait for the current inference to +complete first. To understand how to perform async inference using +openvino, refer to `Async API tutorial <115-async-api-with-output.html>`__ -Integration preprocessing to model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Integration preprocessing to model +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Preprocessing API enables making preprocessing a part of the model reducing application code and dependency on additional image processing @@ -1198,8 +1230,10 @@ The integration process consists of the following steps: 1. Initialize a PrePostProcessing object. 2. Define the input data format. 3. Describe preprocessing steps. 4. Integrating Steps into a Model. -Initialize PrePostProcessing API -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Initialize PrePostProcessing API +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + The ``openvino.preprocess.PrePostProcessor`` class enables specifying preprocessing and postprocessing steps for a model. @@ -1210,8 +1244,10 @@ preprocessing and postprocessing steps for a model. ppp = PrePostProcessor(quantized_det_model) -Define input data format -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Define input data format +^^^^^^^^^^^^^^^^^^^^^^^^ + + To address particular input of a model/preprocessor, the ``input(input_id)`` method, where ``input_id`` is a positional index or @@ -1229,8 +1265,10 @@ provide this to the tensor description. To perform layout conversion, we also should provide information about layout expected by model -Describe preprocessing steps -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Describe preprocessing steps +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Our preprocessing function contains the following steps: \* Convert the data type from ``U8`` to ``FP32``. \* Convert the data layout from @@ -1259,8 +1297,10 @@ preprocessing steps: -Integrating Steps into a Model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Integrating Steps into a Model +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + Once the preprocessing steps have been finished, the model can be finally built. Additionally, we can save a completed model to OpenVINO @@ -1308,8 +1348,10 @@ device. Now, we can skip these preprocessing steps in detect function: -Live demo ---------------------------------------------------- +Live demo +--------- + + The following code runs model inference on a video: @@ -1416,8 +1458,10 @@ The following code runs model inference on a video: if use_popup: cv2.destroyAllWindows() -Run Live Object Detection -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Run Live Object Detection +~~~~~~~~~~~~~~~~~~~~~~~~~ + + Use a webcam as the video input. By default, the primary webcam is set with \ ``source=0``. If you have multiple webcams, each one will be diff --git a/docs/notebooks/230-yolov8-object-detection-with-output_files/230-yolov8-object-detection-with-output_22_0.jpg b/docs/notebooks/230-yolov8-object-detection-with-output_files/230-yolov8-object-detection-with-output_22_0.jpg deleted file mode 100644 index 54446c3246150e..00000000000000 --- a/docs/notebooks/230-yolov8-object-detection-with-output_files/230-yolov8-object-detection-with-output_22_0.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:a33a9db3664a7c515e8fe0b4cbb3d76ffd5caa1aae461c9a2f59de83337a4b69 -size 110022 diff --git a/docs/notebooks/230-yolov8-object-detection-with-output_files/index.html b/docs/notebooks/230-yolov8-object-detection-with-output_files/index.html index 7caea3730dfc26..7b6abb15729e24 100644 --- a/docs/notebooks/230-yolov8-object-detection-with-output_files/index.html +++ b/docs/notebooks/230-yolov8-object-detection-with-output_files/index.html @@ -1,15 +1,15 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/230-yolov8-object-detection-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/230-yolov8-object-detection-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/230-yolov8-object-detection-with-output_files/


../
-230-yolov8-object-detection-with-output_11_1.jpg   31-Oct-2023 00:35              110998
-230-yolov8-object-detection-with-output_11_1.png   31-Oct-2023 00:35              910454
-230-yolov8-object-detection-with-output_22_0.jpg   31-Oct-2023 00:35              110022
-230-yolov8-object-detection-with-output_22_0.png   31-Oct-2023 00:35              929067
-230-yolov8-object-detection-with-output_45_0.jpg   31-Oct-2023 00:35              110900
-230-yolov8-object-detection-with-output_45_0.png   31-Oct-2023 00:35              929911
-230-yolov8-object-detection-with-output_68_0.jpg   31-Oct-2023 00:35              110900
-230-yolov8-object-detection-with-output_68_0.png   31-Oct-2023 00:35              929911
-230-yolov8-object-detection-with-output_74_0.png   31-Oct-2023 00:35              491904
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/230-yolov8-object-detection-with-output_files/


../
+230-yolov8-object-detection-with-output_11_1.jpg   07-Dec-2023 00:49              110998
+230-yolov8-object-detection-with-output_11_1.png   07-Dec-2023 00:49              910454
+230-yolov8-object-detection-with-output_22_0.jpg   07-Dec-2023 00:49              110022
+230-yolov8-object-detection-with-output_22_0.png   07-Dec-2023 00:49              929067
+230-yolov8-object-detection-with-output_45_0.jpg   07-Dec-2023 00:49              110900
+230-yolov8-object-detection-with-output_45_0.png   07-Dec-2023 00:49              929911
+230-yolov8-object-detection-with-output_68_0.jpg   07-Dec-2023 00:49              110900
+230-yolov8-object-detection-with-output_68_0.png   07-Dec-2023 00:49              929911
+230-yolov8-object-detection-with-output_74_0.png   07-Dec-2023 00:49              491904
 

diff --git a/docs/notebooks/231-instruct-pix2pix-image-editing-with-output.rst b/docs/notebooks/231-instruct-pix2pix-image-editing-with-output.rst index f57eaa0c65bd5f..bdc9b80be3adb4 100644 --- a/docs/notebooks/231-instruct-pix2pix-image-editing-with-output.rst +++ b/docs/notebooks/231-instruct-pix2pix-image-editing-with-output.rst @@ -34,10 +34,8 @@ Notebook contains the following steps: - `Prerequisites <#prerequisites>`__ -- `Create Pytorch Models - pipeline <#create-pytorch-models-pipeline>`__ -- `Convert Models to OpenVINO - IR <#convert-models-to-openvino-ir>`__ +- `Create Pytorch Models pipeline <#create-pytorch-models-pipeline>`__ +- `Convert Models to OpenVINO IR <#convert-models-to-openvino-ir>`__ - `Text Encoder <#text-encoder>`__ - `VAE <#vae>`__ @@ -46,27 +44,29 @@ Notebook contains the following steps: - `Prepare Inference Pipeline <#prepare-inference-pipeline>`__ - `Quantization <#quantization>`__ - - `Prepare calibration - dataset <#prepare-calibration-dataset>`__ + - `Prepare calibration dataset <#prepare-calibration-dataset>`__ - `Run quantization <#run-quantization>`__ - `Compare inference time of the FP16 and INT8 models <#compare-inference-time-of-the-fp-and-int-models>`__ -- `Interactive demo with - Gradio <#interactive-demo-with-gradio>`__ +- `Interactive demo with Gradio <#interactive-demo-with-gradio>`__ + +Prerequisites +------------- + -Prerequisites -------------------------------------------------------- Install necessary packages .. code:: ipython3 - %pip install -q "transformers>=4.25.1" accelerate gradio datasets diffusers + %pip install -q "transformers>=4.25.1" accelerate gradio datasets diffusers --extra-index-url https://download.pytorch.org/whl/cpu %pip install -q "openvino>=2023.1.0" -Create Pytorch Models pipeline ------------------------------------------------------------------------- +Create Pytorch Models pipeline +------------------------------ + + ``StableDiffusionInstructPix2PixPipeline`` is an end-to-end inference pipeline that you can use to edit images from text instructions with @@ -95,11 +95,13 @@ First, we load the pre-trained weights of all components of the model. del pipe -Convert Models to OpenVINO IR ------------------------------------------------------------------------ +Convert Models to OpenVINO IR +----------------------------- + + OpenVINO supports PyTorch models using `Model Conversion -API `__ +API `__ to convert the model to IR format. ``ov.convert_model`` function accepts PyTorch model object and example input and then converts it to ``ov.Model`` class instance that ready to use for loading on device or @@ -121,8 +123,10 @@ The model consists of three important parts: Let us convert each part. -Text Encoder -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Text Encoder +~~~~~~~~~~~~ + + The text-encoder is responsible for transforming the input prompt, for example, “a photo of an astronaut riding a horse” into an embedding @@ -202,8 +206,10 @@ hidden states. -VAE -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +VAE +~~~ + + The VAE model consists of two parts: an encoder and a decoder. @@ -313,8 +319,10 @@ into two independent models. -Unet -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Unet +~~~~ + + The Unet model has three inputs: @@ -391,8 +399,10 @@ Model predicts the ``sample`` state for the next step. -Prepare Inference Pipeline --------------------------------------------------------------------- +Prepare Inference Pipeline +-------------------------- + + Putting it all together, let us now take a closer look at how the model inference works by illustrating the logical flow. @@ -998,8 +1008,10 @@ generation. Nice. As you can see, the picture has quite a high definition 🔥. -Quantization -------------------------------------------------------- +Quantization +------------ + + `NNCF `__ enables post-training quantization by adding quantization layers into model @@ -1052,8 +1064,10 @@ Let’s load ``skip magic`` extension to skip quantization if %load_ext skip_kernel_extension -Prepare calibration dataset -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Prepare calibration dataset +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + We use a portion of `fusing/instructpix2pix-1000-samples `__ @@ -1127,8 +1141,10 @@ model inputs for calibration we should customize ``CompiledModel``. 0%| | 0/300 [00:00 -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/231-instruct-pix2pix-image-editing-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/231-instruct-pix2pix-image-editing-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/231-instruct-pix2pix-image-editing-with-output_files/


../
-231-instruct-pix2pix-image-editing-with-output_..> 31-Oct-2023 00:35             3699096
-231-instruct-pix2pix-image-editing-with-output_..> 31-Oct-2023 00:35             3646410
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/231-instruct-pix2pix-image-editing-with-output_files/


../
+231-instruct-pix2pix-image-editing-with-output_..> 07-Dec-2023 00:49             3699096
+231-instruct-pix2pix-image-editing-with-output_..> 07-Dec-2023 00:49             3646410
 

diff --git a/docs/notebooks/233-blip-convert-with-output.rst b/docs/notebooks/233-blip-convert-with-output.rst index a2265cfed797c2..e4a3d8e7facbd0 100644 --- a/docs/notebooks/233-blip-convert-with-output.rst +++ b/docs/notebooks/233-blip-convert-with-output.rst @@ -31,8 +31,7 @@ The tutorial consists of the following parts: - `Visual Question Answering <#visual-question-answering>`__ - `Instantiate Model <#instantiate-model>`__ -- `Convert Models to OpenVINO - IR <#convert-models-to-openvino-ir>`__ +- `Convert Models to OpenVINO IR <#convert-models-to-openvino-ir>`__ - `Vision Model <#vision-model>`__ - `Text Encoder <#text-encoder>`__ @@ -40,8 +39,7 @@ The tutorial consists of the following parts: - `Run OpenVINO Model <#run-openvino-model>`__ - - `Prepare Inference - Pipeline <#prepare-inference-pipeline>`__ + - `Prepare Inference Pipeline <#prepare-inference-pipeline>`__ - `Select inference device <#select-inference-device>`__ - `Image Captioning <#image-captioning>`__ - `Question Answering <#question-answering>`__ @@ -49,8 +47,10 @@ The tutorial consists of the following parts: - `Interactive demo <#interactive-demo>`__ - `Next steps <#next-steps>`__ -Background ----------------------------------------------------- +Background +---------- + + Visual language processing is a branch of artificial intelligence that focuses on creating algorithms designed to enable computers to more @@ -79,8 +79,10 @@ context are variables requested by a user. This notebook does not focus on Text to Image retrieval. Instead, it considers Image Captioning and Visual Question Answering. -Image Captioning -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Image Captioning +~~~~~~~~~~~~~~~~ + + Image Captioning is the task of describing the content of an image in words. This task lies at the intersection of computer vision and natural @@ -91,8 +93,10 @@ decoded into a descriptive text sequence. |image1| -Visual Question Answering -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Visual Question Answering +~~~~~~~~~~~~~~~~~~~~~~~~~ + + Visual Question Answering (VQA) is the task of answering text-based questions about image content. @@ -189,8 +193,10 @@ There are a lot of applications for visual question answering: .. |image4| image:: https://user-images.githubusercontent.com/29454499/222094861-3cafdf9f-d700-4741-b6c5-fb09c1a4da9a.png .. |image5| image:: https://user-images.githubusercontent.com/29454499/222095118-3d5826e4-2662-4d1c-abf2-a515f23d6d6a.png -Instantiate Model ------------------------------------------------------------ +Instantiate Model +----------------- + + The BLIP model was proposed in the `BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and @@ -239,8 +245,7 @@ text and vision modalities and postprocessing of generation results. .. code:: ipython3 - %pip install -q --extra-index-url https://download.pytorch.org/whl/cpu torch torchvision - %pip install -q "transformers >= 4.26.0" gradio "openvino>=2023.1.0" matplotlib + %pip install -q --extra-index-url https://download.pytorch.org/whl/cpu torch torchvision "transformers>=4.26.0" gradio "openvino>=2023.1.0" matplotlib .. code:: ipython3 @@ -315,8 +320,10 @@ text and vision modalities and postprocessing of generation results. .. image:: 233-blip-convert-with-output_files/233-blip-convert-with-output_7_0.png -Convert Models to OpenVINO IR ------------------------------------------------------------------------ +Convert Models to OpenVINO IR +----------------------------- + + Starting from OpenVINO 2023.0 release, OpenVINO supports direct PyTorch models conversion to OpenVINO Intermediate Representation (IR) format to @@ -337,8 +344,10 @@ The model consists of three parts: To be able to perform multiple tasks, using the same model components, you should convert each part independently. -Vision Model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Vision Model +~~~~~~~~~~~~ + + The vision model accepts float input tensors with the [1,3,384,384] shape, containing RGB image pixel values normalized in the [0,1] range. @@ -375,8 +384,10 @@ shape, containing RGB image pixel values normalized in the [0,1] range. Vision model will be loaded from blip_vision_model.xml -Text Encoder -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Text Encoder +~~~~~~~~~~~~ + + The text encoder is used by visual question answering tasks to build a question embedding representation. It takes ``input_ids`` with a @@ -412,8 +423,10 @@ model and attention masks for them. Text encoder will be loaded from blip_text_encoder.xml -Text Decoder -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Text Decoder +~~~~~~~~~~~~ + + The text decoder is responsible for generating the sequence of tokens to represent model output (answer to question or caption), using an image @@ -493,17 +506,21 @@ shapes. Text decoder will be loaded from blip_text_decoder_with_past.xml -Run OpenVINO Model ------------------------------------------------------------- +Run OpenVINO Model +------------------ + + + +Prepare Inference Pipeline +~~~~~~~~~~~~~~~~~~~~~~~~~~ + -Prepare Inference Pipeline -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As discussed before, the model consists of several blocks which can be reused for building pipelines for different tasks. In the diagram below, you can see how image captioning works: -|image21| +|image01| The visual model accepts the image preprocessed by ``BlipProcessor`` as input and produces image embeddings, which are directly passed to the @@ -517,20 +534,22 @@ tokenized by ``BlipProcessor`` are provided to the text encoder and then multimodal question embedding is passed to the text decoder for performing generation of answers. -|image31| +|image11| The next step is implementing both pipelines using OpenVINO models. -.. |image21| image:: https://user-images.githubusercontent.com/29454499/221865836-a56da06e-196d-449c-a5dc-4136da6ab5d5.png -.. |image31| image:: https://user-images.githubusercontent.com/29454499/221868167-d0081add-d9f3-4591-80e7-4753c88c1d0a.png +.. |image01| image:: https://user-images.githubusercontent.com/29454499/221865836-a56da06e-196d-449c-a5dc-4136da6ab5d5.png +.. |image11| image:: https://user-images.githubusercontent.com/29454499/221868167-d0081add-d9f3-4591-80e7-4753c88c1d0a.png .. code:: ipython3 # create OpenVINO Core object instance core = ov.Core() -Select inference device -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Select inference device +~~~~~~~~~~~~~~~~~~~~~~~ + + select device from dropdown list for running inference using OpenVINO @@ -586,8 +605,10 @@ initial token for decoder work. Now, the model is ready for generation. -Image Captioning -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Image Captioning +~~~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -600,8 +621,10 @@ Image Captioning .. image:: 233-blip-convert-with-output_files/233-blip-convert-with-output_25_0.png -Question Answering -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Question Answering +~~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -626,8 +649,10 @@ Question Answering Processing time: 0.1617 -Interactive demo ----------------------------------------------------------- +Interactive demo +---------------- + + .. code:: ipython3 @@ -671,10 +696,12 @@ Interactive demo # demo.launch(server_name='your server name', server_port='server port in int') # Read more in the docs: https://gradio.app/docs/ -Next steps ----------------------------------------------------- +Next steps +---------- + + -Open the `233-blip-optimize <233-blip-optimize-with-output.html>`__ notebook to +Open the `233-blip-optimize <233-blip-optimize.ipynb>`__ notebook to quantize vision and text encoder models with the Post-training Quantization API of NNCF and compress weights of the text decoder. Then compare the converted and optimized OpenVINO models. diff --git a/docs/notebooks/233-blip-convert-with-output_files/index.html b/docs/notebooks/233-blip-convert-with-output_files/index.html index 1bbb317e912229..d5bd5b3d31260f 100644 --- a/docs/notebooks/233-blip-convert-with-output_files/index.html +++ b/docs/notebooks/233-blip-convert-with-output_files/index.html @@ -1,9 +1,9 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/233-blip-convert-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/233-blip-convert-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/233-blip-convert-with-output_files/


../
-233-blip-convert-with-output_25_0.png              31-Oct-2023 00:35              206940
-233-blip-convert-with-output_27_0.png              31-Oct-2023 00:35              210551
-233-blip-convert-with-output_7_0.png               31-Oct-2023 00:35              210551
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/233-blip-convert-with-output_files/


../
+233-blip-convert-with-output_25_0.png              07-Dec-2023 00:49              206940
+233-blip-convert-with-output_27_0.png              07-Dec-2023 00:49              210551
+233-blip-convert-with-output_7_0.png               07-Dec-2023 00:49              210551
 

diff --git a/docs/notebooks/234-encodec-audio-compression-with-output.rst b/docs/notebooks/234-encodec-audio-compression-with-output.rst index 419ccc2cbe16b6..217a132bd68e01 100644 --- a/docs/notebooks/234-encodec-audio-compression-with-output.rst +++ b/docs/notebooks/234-encodec-audio-compression-with-output.rst @@ -47,8 +47,10 @@ and original `repo `__. - `Run EnCodec with OpenVINO <#run-encodec-with-openvino>`__ -Prerequisites -------------------------------------------------------- +Prerequisites +------------- + + Install required dependencies: @@ -59,13 +61,17 @@ Install required dependencies: .. parsed-literal:: - DEPRECATION: git+https://\*\*\*\*@github.com/eaidova/encodec#egg=encodec;python_version=="3.7" contains an egg fragment with a non-PEP 508 name pip 25.0 will enforce this behaviour change. A possible replacement is to use the req @ url syntax, and remove the egg fragment. Discussion can be found at https://github.com/pypa/pip/issues/11617 - DEPRECATION: pytorch-lightning 1.6.5 has a non-standard dependency specifier torch>=1.8.\*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063 + DEPRECATION: pytorch-lightning 1.6.5 has a non-standard dependency specifier torch>=1.8.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063 + ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. + pyannote-audio 2.0.1 requires torchaudio<1.0,>=0.10, but you have torchaudio 2.1.1+cpu which is incompatible. + torchvision 0.14.1+cpu requires torch==1.13.1, but you have torch 2.1.1+cpu which is incompatible. Note: you may need to restart the kernel to use updated packages. -Instantiate audio compression pipeline --------------------------------------------------------------------------------- +Instantiate audio compression pipeline +-------------------------------------- + + `Codecs `__, which act as encoders and decoders for streams of data, help empower most of the audio @@ -122,8 +128,17 @@ bandwidth. model = EncodecModel.encodec_model_24khz() model.set_target_bandwidth(6.0) -Explore EnCodec pipeline ------------------------------------------------------------------- + +.. parsed-literal:: + + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/nn/utils/weight_norm.py:30: UserWarning: torch.nn.utils.weight_norm is deprecated in favor of torch.nn.utils.parametrizations.weight_norm. + warnings.warn("torch.nn.utils.weight_norm is deprecated in favor of torch.nn.utils.parametrizations.weight_norm.") + + +Explore EnCodec pipeline +------------------------ + + Let us explore model capabilities on example audio: @@ -173,8 +188,10 @@ Let us explore model capabilities on example audio: .. image:: 234-encodec-audio-compression-with-output_files/234-encodec-audio-compression-with-output_6_2.png -Preprocessing -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Preprocessing +~~~~~~~~~~~~~ + + To achieve the best result, audio should have the number of channels and sample rate expected by the model. If audio does not fulfill these @@ -201,8 +218,10 @@ number of channels using the ``convert_audio`` function. wav = convert_audio(wav, sr, model_sr, model_channels) -Encoding -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Encoding +~~~~~~~~ + + Audio waveform should be split by chunks and then encoded by Encoder model, then compressed by quantizer for reducing memory. The result of @@ -250,8 +269,10 @@ Let us compare obtained compression result: Great! Now, we see the power of hyper compression. Binary size of a file becomes 60 times smaller and more suitable for sending via network. -Decompression -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Decompression +~~~~~~~~~~~~~ + + After successful sending of the compressed audio, it should be decompressed on the recipient’s side. The decoder model is responsible @@ -262,6 +283,13 @@ similar as possible to the original. out, out_sr = decompress(out_file.read_bytes()) + +.. parsed-literal:: + + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/nn/utils/weight_norm.py:30: UserWarning: torch.nn.utils.weight_norm is deprecated in favor of torch.nn.utils.parametrizations.weight_norm. + warnings.warn("torch.nn.utils.weight_norm is deprecated in favor of torch.nn.utils.parametrizations.weight_norm.") + + .. code:: ipython3 output_file = "decopressed.wav" @@ -286,7 +314,7 @@ audio. @@ -299,8 +327,10 @@ audio. Nice! Audio sounds close to original. -Convert model to OpenVINO Intermediate Representation format ------------------------------------------------------------------------------------------------------- +Convert model to OpenVINO Intermediate Representation format +------------------------------------------------------------ + + For best results with OpenVINO, it is recommended to convert the model to OpenVINO IR format. OpenVINO supports PyTorch via ONNX conversion. We @@ -358,25 +388,19 @@ with ``ov.save_model``. .. parsed-literal:: - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/encodec/modules/conv.py:60: TracerWarning: Converting a tensor to a Python float might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/encodec/modules/conv.py:60: TracerWarning: Converting a tensor to a Python float might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total) - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/encodec/modules/conv.py:85: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/encodec/modules/conv.py:85: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right) - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/encodec/modules/conv.py:87: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/encodec/modules/conv.py:87: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! max_pad = max(padding_left, padding_right) - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/encodec/modules/conv.py:89: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/encodec/modules/conv.py:89: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! if length <= max_pad: - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/onnx/symbolic_opset9.py:4315: UserWarning: Exporting a model to ONNX with a batch_size other than 1, with a variable length with LSTM can cause an error when running the ONNX model with a different batch size. Make sure to save the model with a batch size of 1, or define the initial states (h0/c0) as inputs of the model. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/onnx/symbolic_opset9.py:4662: UserWarning: Exporting a model to ONNX with a batch_size other than 1, with a variable length with LSTM can cause an error when running the ONNX model with a different batch size. Make sure to save the model with a batch size of 1, or define the initial states (h0/c0) as inputs of the model. warnings.warn( - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/onnx/_internal/jit_utils.py:258: UserWarning: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function. (Triggered internally at ../torch/csrc/jit/passes/onnx/shape_type_inference.cpp:1884.) - _C._jit_pass_onnx_node_shape_type_inference(node, params_dict, opset_version) - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/onnx/utils.py:687: UserWarning: Constant folding - Only steps=1 can be constant folded for opset >= 10 onnx::Slice op. Constant folding not applied. (Triggered internally at ../torch/csrc/jit/passes/onnx/constant_fold.cpp:179.) + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/onnx/utils.py:702: UserWarning: Constant folding - Only steps=1 can be constant folded for opset >= 10 onnx::Slice op. Constant folding not applied. (Triggered internally at ../torch/csrc/jit/passes/onnx/constant_fold.cpp:179.) _C._jit_pass_onnx_graph_shape_type_inference( - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/onnx/utils.py:687: UserWarning: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function. (Triggered internally at ../torch/csrc/jit/passes/onnx/shape_type_inference.cpp:1884.) - _C._jit_pass_onnx_graph_shape_type_inference( - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/onnx/utils.py:1178: UserWarning: Constant folding - Only steps=1 can be constant folded for opset >= 10 onnx::Slice op. Constant folding not applied. (Triggered internally at ../torch/csrc/jit/passes/onnx/constant_fold.cpp:179.) - _C._jit_pass_onnx_graph_shape_type_inference( - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/onnx/utils.py:1178: UserWarning: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function. (Triggered internally at ../torch/csrc/jit/passes/onnx/shape_type_inference.cpp:1884.) + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/onnx/utils.py:1209: UserWarning: Constant folding - Only steps=1 can be constant folded for opset >= 10 onnx::Slice op. Constant folding not applied. (Triggered internally at ../torch/csrc/jit/passes/onnx/constant_fold.cpp:179.) _C._jit_pass_onnx_graph_shape_type_inference( @@ -393,16 +417,18 @@ with ``ov.save_model``. .. parsed-literal:: - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/encodec/quantization/core_vq.py:358: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/encodec/quantization/core_vq.py:358: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. quantized_out = torch.tensor(0.0, device=q_indices.device) - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/encodec/quantization/core_vq.py:359: TracerWarning: Iterating over a tensor might cause the trace to be incorrect. Passing a tensor of different shape won't change the number of iterations executed (and might lead to errors or silently give incorrect results). + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/encodec/quantization/core_vq.py:359: TracerWarning: Iterating over a tensor might cause the trace to be incorrect. Passing a tensor of different shape won't change the number of iterations executed (and might lead to errors or silently give incorrect results). for i, indices in enumerate(q_indices): - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/encodec/modules/conv.py:103: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/encodec/modules/conv.py:103: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! assert (padding_left + padding_right) <= x.shape[-1] -Integrate OpenVINO to EnCodec pipeline --------------------------------------------------------------------------------- +Integrate OpenVINO to EnCodec pipeline +-------------------------------------- + + The following steps are required for integration of OpenVINO to EnCodec pipeline: @@ -412,8 +438,10 @@ pipeline: 3. Replace the original frame processing functions with OpenVINO based algorithms. -Select inference device -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Select inference device +~~~~~~~~~~~~~~~~~~~~~~~ + + select device from dropdown list for running inference using OpenVINO @@ -476,8 +504,10 @@ select device from dropdown list for running inference using OpenVINO model._encode_frame = encode_frame model._decode_frame = decode_frame -Run EnCodec with OpenVINO -------------------------------------------------------------------- +Run EnCodec with OpenVINO +------------------------- + + The process of running encodec with OpenVINO under hood will be the same like with the original PyTorch models. @@ -501,6 +531,13 @@ like with the original PyTorch models. out, out_sr = decompress(out_file.read_bytes()) + +.. parsed-literal:: + + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/nn/utils/weight_norm.py:30: UserWarning: torch.nn.utils.weight_norm is deprecated in favor of torch.nn.utils.parametrizations.weight_norm. + warnings.warn("torch.nn.utils.weight_norm is deprecated in favor of torch.nn.utils.parametrizations.weight_norm.") + + .. code:: ipython3 ov_output_file = "decopressed_ov.wav" @@ -521,7 +558,7 @@ like with the original PyTorch models. diff --git a/docs/notebooks/234-encodec-audio-compression-with-output_files/234-encodec-audio-compression-with-output_19_1.png b/docs/notebooks/234-encodec-audio-compression-with-output_files/234-encodec-audio-compression-with-output_19_1.png index e87ac388104511..4d1f7be941887e 100644 --- a/docs/notebooks/234-encodec-audio-compression-with-output_files/234-encodec-audio-compression-with-output_19_1.png +++ b/docs/notebooks/234-encodec-audio-compression-with-output_files/234-encodec-audio-compression-with-output_19_1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:163c03d2e54146fc13d51ca270e2b8601a8545292cf8d2e62394f818ef754548 -size 44358 +oid sha256:c99a97c7779acef829103435a9de148f7d7ec1dea72b09b694e6c039f2737c45 +size 44357 diff --git a/docs/notebooks/234-encodec-audio-compression-with-output_files/234-encodec-audio-compression-with-output_38_1.png b/docs/notebooks/234-encodec-audio-compression-with-output_files/234-encodec-audio-compression-with-output_38_1.png index e87ac388104511..4d1f7be941887e 100644 --- a/docs/notebooks/234-encodec-audio-compression-with-output_files/234-encodec-audio-compression-with-output_38_1.png +++ b/docs/notebooks/234-encodec-audio-compression-with-output_files/234-encodec-audio-compression-with-output_38_1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:163c03d2e54146fc13d51ca270e2b8601a8545292cf8d2e62394f818ef754548 -size 44358 +oid sha256:c99a97c7779acef829103435a9de148f7d7ec1dea72b09b694e6c039f2737c45 +size 44357 diff --git a/docs/notebooks/234-encodec-audio-compression-with-output_files/234-encodec-audio-compression-with-output_6_2.png b/docs/notebooks/234-encodec-audio-compression-with-output_files/234-encodec-audio-compression-with-output_6_2.png index 1c2d79f85f39ee..3a8316bb151f77 100644 --- a/docs/notebooks/234-encodec-audio-compression-with-output_files/234-encodec-audio-compression-with-output_6_2.png +++ b/docs/notebooks/234-encodec-audio-compression-with-output_files/234-encodec-audio-compression-with-output_6_2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:008a0a166593dc371f3a6d5fb865a10fa23ddc589e342eaec7da6de1a1e0241d +oid sha256:f29c7e17e50adc3a28c221231ed536f5148c7d9c5534078d6bdef078632bcd07 size 45005 diff --git a/docs/notebooks/234-encodec-audio-compression-with-output_files/index.html b/docs/notebooks/234-encodec-audio-compression-with-output_files/index.html index e5b083f2014592..c1a265b91e4d86 100644 --- a/docs/notebooks/234-encodec-audio-compression-with-output_files/index.html +++ b/docs/notebooks/234-encodec-audio-compression-with-output_files/index.html @@ -1,9 +1,9 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/234-encodec-audio-compression-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/234-encodec-audio-compression-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/234-encodec-audio-compression-with-output_files/


../
-234-encodec-audio-compression-with-output_19_1.png 31-Oct-2023 00:35               44358
-234-encodec-audio-compression-with-output_38_1.png 31-Oct-2023 00:35               44358
-234-encodec-audio-compression-with-output_6_2.png  31-Oct-2023 00:35               45005
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/234-encodec-audio-compression-with-output_files/


../
+234-encodec-audio-compression-with-output_19_1.png 07-Dec-2023 00:49               44357
+234-encodec-audio-compression-with-output_38_1.png 07-Dec-2023 00:49               44357
+234-encodec-audio-compression-with-output_6_2.png  07-Dec-2023 00:49               45005
 

diff --git a/docs/notebooks/235-controlnet-stable-diffusion-with-output.rst b/docs/notebooks/235-controlnet-stable-diffusion-with-output.rst index 55443ec56c13e4..14941bc5aa2af7 100644 --- a/docs/notebooks/235-controlnet-stable-diffusion-with-output.rst +++ b/docs/notebooks/235-controlnet-stable-diffusion-with-output.rst @@ -141,6 +141,7 @@ discussed steps are also applicable to other annotation modes. **Table of contents:** + - `Prerequisites <#prerequisites>`__ - `Instantiating Generation Pipeline <#instantiating-generation-pipeline>`__ @@ -175,7 +176,7 @@ Prerequisites .. code:: ipython3 %pip install -q --extra-index-url https://download.pytorch.org/whl/cpu "torch" "torchvision" - %pip install -q "diffusers>=0.14.0" "transformers>=4.30.2" "controlnet-aux>=0.0.6" "gradio>=3.36" + %pip install -q "diffusers>=0.14.0" "transformers>=4.30.2" "controlnet-aux>=0.0.6" "gradio>=3.36" --extra-index-url https://download.pytorch.org/whl/cpu %pip install -q "openvino>=2023.1.0" Instantiating Generation Pipeline diff --git a/docs/notebooks/235-controlnet-stable-diffusion-with-output_files/index.html b/docs/notebooks/235-controlnet-stable-diffusion-with-output_files/index.html index 413af7ce9ee1f0..ddfffdf435525a 100644 --- a/docs/notebooks/235-controlnet-stable-diffusion-with-output_files/index.html +++ b/docs/notebooks/235-controlnet-stable-diffusion-with-output_files/index.html @@ -1,10 +1,10 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231114220808/dist/rst_files/235-controlnet-stable-diffusion-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/235-controlnet-stable-diffusion-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231114220808/dist/rst_files/235-controlnet-stable-diffusion-with-output_files/


../
-235-controlnet-stable-diffusion-with-output_17_..> 15-Nov-2023 00:43              498463
-235-controlnet-stable-diffusion-with-output_34_..> 15-Nov-2023 00:43               30487
-235-controlnet-stable-diffusion-with-output_34_..> 15-Nov-2023 00:43              464375
-235-controlnet-stable-diffusion-with-output_8_0..> 15-Nov-2023 00:43              498463
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/235-controlnet-stable-diffusion-with-output_files/


../
+235-controlnet-stable-diffusion-with-output_17_..> 07-Dec-2023 00:49              498463
+235-controlnet-stable-diffusion-with-output_34_..> 07-Dec-2023 00:49               30487
+235-controlnet-stable-diffusion-with-output_34_..> 07-Dec-2023 00:49              464375
+235-controlnet-stable-diffusion-with-output_8_0..> 07-Dec-2023 00:49              498463
 

diff --git a/docs/notebooks/236-stable-diffusion-v2-infinite-zoom-with-output.rst b/docs/notebooks/236-stable-diffusion-v2-infinite-zoom-with-output.rst index 30edc7d2c08fe4..5e5411cc6f41a5 100644 --- a/docs/notebooks/236-stable-diffusion-v2-infinite-zoom-with-output.rst +++ b/docs/notebooks/236-stable-diffusion-v2-infinite-zoom-with-output.rst @@ -22,7 +22,7 @@ In previous notebooks, we already discussed how to run `Text-to-Image generation and Image-to-Image generation using Stable Diffusion v1 <225-stable-diffusion-text-to-image-with-output.html>`__ and `controlling its generation process using -ControlNet <235-controlnet-stable-diffusion/235-controlnet-stable-diffusion-with-output.html>`__. +ControlNet <./235-controlnet-stable-diffusion/235-controlnet-stable-diffusion.ipynb>`__. Now is turn of Stable Diffusion v2. Stable Diffusion v2: What’s new? @@ -86,17 +86,17 @@ Notebook contains the following steps: library <#stable-diffusion-in-diffusers-library>`__ - `Convert models to OpenVINO Intermediate representation (IR) format <#convert-models-to-openvino-intermediate-representation-ir-format>`__ - - `Prepare Inference - pipeline <#prepare-inference-pipeline>`__ + - `Prepare Inference pipeline <#prepare-inference-pipeline>`__ - `Zoom Video Generation <#zoom-video-generation>`__ - - `Configure Inference - Pipeline <#configure-inference-pipeline>`__ + - `Configure Inference Pipeline <#configure-inference-pipeline>`__ - `Select inference device <#select-inference-device>`__ - `Run Infinite Zoom video generation <#run-infinite-zoom-video-generation>`__ -Stable Diffusion v2 Infinite Zoom Showcase ------------------------------------------------------------------------------------- +Stable Diffusion v2 Infinite Zoom Showcase +------------------------------------------ + + In this tutorial we consider how to use Stable Diffusion v2 model for generation sequence of images for infinite zoom video effect. To do @@ -104,8 +104,10 @@ this, we will need `stabilityai/stable-diffusion-2-inpainting `__ model. -Stable Diffusion Text guided Inpainting -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Stable Diffusion Text guided Inpainting +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + In image editing, inpainting is a process of restoring missing parts of pictures. Most commonly applied to reconstructing old deteriorated @@ -138,17 +140,21 @@ Using this inpainting feature, decreasing image by certain margin and masking this border for every new frame we can create interesting Zoom Out video based on our prompt. -Prerequisites -------------------------------------------------------- +Prerequisites +------------- + + install required packages .. code:: ipython3 - %pip install -q "diffusers>=0.14.0" "transformers >= 4.25.1" gradio "openvino>=2023.1.0" + %pip install -q "diffusers>=0.14.0" "transformers>=4.25.1" gradio "openvino>=2023.1.0" --extra-index-url https://download.pytorch.org/whl/cpu + +Stable Diffusion in Diffusers library +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + -Stable Diffusion in Diffusers library -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To work with Stable Diffusion v2, we will use Hugging Face `Diffusers `__ library. To @@ -199,11 +205,13 @@ The code below demonstrates how to create del pipe_inpaint gc.collect(); -Convert models to OpenVINO Intermediate representation (IR) format -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Convert models to OpenVINO Intermediate representation (IR) format +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Conversion part of model stayed remain as in `Text-to-Image generation -notebook <236-stable-diffusion-v2-text-to-image-with-output.html>`__. Except +notebook <./236-stable-diffusion-v2-text-to-image.ipynb>`__. Except U-Net now has 9 channels, which now calculated like 4 for U-Net generated latents channels + 4 for latent representation of masked image + 1 channel resized mask. @@ -428,8 +436,10 @@ generated latents channels + 4 for latent representation of masked image VAE decoder will be loaded from sd2_inpainting/vae_decoder.xml -Prepare Inference pipeline -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Prepare Inference pipeline +~~~~~~~~~~~~~~~~~~~~~~~~~~ + + As it was discussed previously, Inpainting inference pipeline is based on Text-to-Image inference pipeline with addition mask processing step. @@ -445,7 +455,7 @@ We will reuse ``OVStableDiffusionPipeline`` basic utilities in import cv2 from transformers import CLIPTokenizer - from diffusers.pipeline_utils import DiffusionPipeline + from diffusers import DiffusionPipeline from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler @@ -500,8 +510,8 @@ We will reuse ``OVStableDiffusionPipeline`` basic utilities in .. parsed-literal:: - /tmp/ipykernel_1292073/2055396221.py:8: FutureWarning: Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead. - from diffusers.pipeline_utils import DiffusionPipeline + /tmp/ipykernel_1292073/2055396221.py:8: FutureWarning: Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers is deprecated. Please import from diffusers.pipelines.pipeline_utils instead. + from diffusers import DiffusionPipeline .. code:: ipython3 @@ -860,8 +870,10 @@ We will reuse ``OVStableDiffusionPipeline`` basic utilities in return timesteps, num_inference_steps - t_start -Zoom Video Generation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Zoom Video Generation +~~~~~~~~~~~~~~~~~~~~~ + + For achieving zoom effect, we will use inpainting to expand images beyond their original borders. We run our @@ -1095,8 +1107,10 @@ generation is finished, we record frames in reversed order. loop=0, ) -Configure Inference Pipeline -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Configure Inference Pipeline +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Configuration steps: 1. Load models on device 2. Configure tokenizer and scheduler 3. Create instance of ``OVStableDiffusionInpaintingPipeline`` @@ -1108,8 +1122,10 @@ class tokenizer = CLIPTokenizer.from_pretrained('openai/clip-vit-large-patch14') -Select inference device -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Select inference device +~~~~~~~~~~~~~~~~~~~~~~~ + + select device from dropdown list for running inference using OpenVINO @@ -1154,8 +1170,10 @@ select device from dropdown list for running inference using OpenVINO scheduler=scheduler_inpaint, ) -Run Infinite Zoom video generation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Run Infinite Zoom video generation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -1218,3 +1236,5 @@ Run Infinite Zoom video generation .. .. raw:: html ..
+ + diff --git a/docs/notebooks/236-stable-diffusion-v2-text-to-image-demo-with-output.rst b/docs/notebooks/236-stable-diffusion-v2-text-to-image-demo-with-output.rst index 3c7ee85bcbb054..1adc480041c019 100644 --- a/docs/notebooks/236-stable-diffusion-v2-text-to-image-demo-with-output.rst +++ b/docs/notebooks/236-stable-diffusion-v2-text-to-image-demo-with-output.rst @@ -21,25 +21,21 @@ like to see the full implementation of stable diffusion for text to image, please visit `236-stable-diffusion-v2-text-to-image `__. - **Table of contents:** ---- - `Step 0: Install and import prerequisites <#step--install-and-import-prerequisites>`__ - `Step 1: Stable Diffusion v2 Fundamental components <#step--stable-diffusion-v-fundamental-components>`__ -- `Step 1.1: Retrieve components from HuggingFace <#step--retrieve-components-from-huggingface>`__ +- `Step 1.1: Retrieve components fromHuggingFace <#step--retrieve-components-from-huggingface>`__ - `Step 2: Convert the models to OpenVINO <#step--convert-the-models-to-openvino>`__ - `Step 3: Text-to-Image Generation Inference Pipeline <#step--text-to-image-generation-inference-pipeline>`__ - `Step 3.1: Load and Understand Text to Image OpenVINO models <#step--load-and-understand-text-to-image-openvino-models>`__ - `Step 3.2: Select inference device <#step--select-inference-device>`__ - `Step 3.3: Run Text-to-Image generation <#step--run-text-to-image-generation>`__ -Step 0: Install and import prerequisites ----------------------------------------------------------------------------------- +Step 0: Install and import prerequisites +---------------------------------------- -.. code:: ipython3 - from pathlib import Path To work with Stable Diffusion v2, we will use Hugging Face’s `Diffusers `__ library. @@ -51,7 +47,7 @@ pipelines `__. .. code:: ipython3 - %pip install -q "diffusers>=0.14.0" "openvino>=2023.1.0" "transformers >= 4.31" accelerate "urllib3==1.26.15" + %pip install -q "diffusers>=0.14.0" "openvino>=2023.1.0" "transformers>=4.31" accelerate "urllib3==1.26.15" --extra-index-url https://download.pytorch.org/whl/cpu .. parsed-literal:: @@ -68,8 +64,10 @@ pipelines `__. Note: you may need to restart the kernel to use updated packages. -Step 1: Stable Diffusion v2 Fundamental components --------------------------------------------------------------------------------------------- +Step 1: Stable Diffusion v2 Fundamental components +-------------------------------------------------- + + Stable Diffusion pipelines for both Text to Image and Inpainting consist of three important parts: @@ -83,8 +81,10 @@ of three important parts: Depending on the pipeline, the parameters for these parts can differ, which we’ll explore in this demo! -Step 1.1: Retrieve components from HuggingFace -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Step 1.1: Retrieve components from HuggingFace +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Let’s start by retrieving these components from HuggingFace! @@ -125,8 +125,10 @@ using ``stable-diffusion-2-1``. Loading pipeline components...: 0%| | 0/6 [00:00 -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/236-stable-diffusion-v2-text-to-image-demo-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/236-stable-diffusion-v2-text-to-image-demo-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/236-stable-diffusion-v2-text-to-image-demo-with-output_files/


../
-236-stable-diffusion-v2-text-to-image-demo-with..> 31-Oct-2023 00:35              100414
-236-stable-diffusion-v2-text-to-image-demo-with..> 31-Oct-2023 00:35             1057851
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/236-stable-diffusion-v2-text-to-image-demo-with-output_files/


../
+236-stable-diffusion-v2-text-to-image-demo-with..> 07-Dec-2023 00:49              100414
+236-stable-diffusion-v2-text-to-image-demo-with..> 07-Dec-2023 00:49             1057851
 

diff --git a/docs/notebooks/236-stable-diffusion-v2-text-to-image-with-output.rst b/docs/notebooks/236-stable-diffusion-v2-text-to-image-with-output.rst index 882d1586f300f5..bb74f68a50540b 100644 --- a/docs/notebooks/236-stable-diffusion-v2-text-to-image-with-output.rst +++ b/docs/notebooks/236-stable-diffusion-v2-text-to-image-with-output.rst @@ -22,7 +22,7 @@ In previous notebooks, we already discussed how to run `Text-to-Image generation and Image-to-Image generation using Stable Diffusion v1 <225-stable-diffusion-text-to-image-with-output.html>`__ and `controlling its generation process using -ControlNet <235-controlnet-stable-diffusion-with-output.html>`__. +ControlNet <./235-controlnet-stable-diffusion/235-controlnet-stable-diffusion.ipynb>`__. Now is turn of Stable Diffusion v2. Stable Diffusion v2: What’s new? @@ -90,24 +90,25 @@ notebook `__ - `U-Net <#u-net>`__ - `VAE <#vae>`__ - - `Prepare Inference - Pipeline <#prepare-inference-pipeline>`__ - - `Configure Inference - Pipeline <#configure-inference-pipeline>`__ - - `Run Text-to-Image - generation <#run-text-to-image-generation>`__ + - `Prepare Inference Pipeline <#prepare-inference-pipeline>`__ + - `Configure Inference Pipeline <#configure-inference-pipeline>`__ + - `Run Text-to-Image generation <#run-text-to-image-generation>`__ + +Prerequisites +------------- + -Prerequisites -------------------------------------------------------- install required packages .. code:: ipython3 - %pip install -q "diffusers>=0.14.0" "openvino>=2023.1.0" "transformers >= 4.25.1" gradio + %pip install -q "diffusers>=0.14.0" "openvino>=2023.1.0" "transformers>=4.25.1" gradio --extra-index-url https://download.pytorch.org/whl/cpu + +Stable Diffusion v2 for Text-to-Image Generation +------------------------------------------------ + -Stable Diffusion v2 for Text-to-Image Generation ------------------------------------------------------------------------------------------- To start, let’s look on Text-to-Image process for Stable Diffusion v2. We will use `Stable Diffusion @@ -121,10 +122,11 @@ post `__ and original model `repository `__. -Stable Diffusion in Diffusers library -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Stable Diffusion in Diffusers library +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To work with Stable Diffusion v2, we will use Hugging Face +To work with Stable Diffusion +v2, we will use Hugging Face `Diffusers `__ library. To experiment with Stable Diffusion models, Diffusers exposes the `StableDiffusionPipeline `__ @@ -166,8 +168,10 @@ using ``stable-diffusion-2-1``: Fetching 13 files: 0%| | 0/13 [00:00`__ - `Image encoding <#image-encoding>`__ - `Example point input <#example-point-input>`__ - - `Example with multiple - points <#example-with-multiple-points>`__ + - `Example with multiple points <#example-with-multiple-points>`__ - `Example box and point input with negative label <#example-box-and-point-input-with-negative-label>`__ @@ -33,8 +32,7 @@ Object masks from prompts with SAM and OpenVINO - `Optimize encoder using NNCF Post-training Quantization API <#optimize-encoder-using-nncf-post-training-quantization-api>`__ - - `Prepare a calibration - dataset <#prepare-a-calibration-dataset>`__ + - `Prepare a calibration dataset <#prepare-a-calibration-dataset>`__ - `Run quantization and serialize OpenVINO IR model <#run-quantization-and-serialize-openvino-ir-model>`__ - `Validate Quantized Model @@ -66,8 +64,10 @@ zero-shot transfer). This notebook shows an example of how to convert and use Segment Anything Model in OpenVINO format, allowing it to run on a variety of platforms that support an OpenVINO. -Background ----------------------------------------------------- +Background +---------- + + Previously, to solve any kind of segmentation problem, there were two classes of approaches. The first, interactive segmentation, allowed for @@ -134,18 +134,24 @@ post =3.25" "openvino>=2023.1.0" "nncf>=2.5.0" + %pip install -q "segment_anything" "gradio>=3.25" "openvino>=2023.1.0" "nncf>=2.5.0" "torch>=2.1" "torchvision>=0.16" --extra-index-url https://download.pytorch.org/whl/cpu + +Convert model to OpenVINO Intermediate Representation +----------------------------------------------------- + + + +Download model checkpoint and create PyTorch model +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Convert model to OpenVINO Intermediate Representation ------------------------------------------------------------------------------------------------ -Download model checkpoint and create PyTorch model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are several Segment Anything Model `checkpoints `__ @@ -195,8 +201,10 @@ into account this fact, we split model on 2 independent parts: image_encoder and mask_predictor (combination of Prompt Encoder and Mask Decoder). -Image Encoder -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Image Encoder +~~~~~~~~~~~~~ + + Image Encoder input is tensor with shape ``1x3x1024x1024`` in ``NCHW`` format, contains image for segmentation. Image Encoder output is image @@ -248,8 +256,10 @@ embeddings, tensor with shape ``1x256x64x64`` ov_encoder = core.compile_model(ov_encoder_model, device.value) -Mask predictor -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Mask predictor +~~~~~~~~~~~~~~ + + This notebook expects the model was exported with the parameter ``return_single_mask=True``. It means that model will only return the @@ -425,11 +435,15 @@ Model outputs: ov_predictor = core.compile_model(ov_model, device.value) -Run OpenVINO model in interactive segmentation mode ---------------------------------------------------------------------------------------------- +Run OpenVINO model in interactive segmentation mode +--------------------------------------------------- + + + +Example Image +~~~~~~~~~~~~~ + -Example Image -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: ipython3 @@ -459,8 +473,10 @@ Example Image .. image:: 237-segment-anything-with-output_files/237-segment-anything-with-output_21_0.png -Preprocessing and visualization utilities -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Preprocessing and visualization utilities +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + To prepare input for Image Encoder we should: @@ -575,8 +591,10 @@ These steps are applicable to all available models w, h = box[2] - box[0], box[3] - box[1] ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0, 0, 0, 0), lw=2)) -Image encoding -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Image encoding +~~~~~~~~~~~~~~ + + To start work with image, we should preprocess it and obtain image embeddings using ``ov_encoder``. We will use the same image for all @@ -592,8 +610,10 @@ reuse them. Now, we can try to provide different prompts for mask generation -Example point input -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Example point input +~~~~~~~~~~~~~~~~~~~ + + In this example we select one point. The green star symbol show its location on the image below. @@ -658,8 +678,10 @@ object). .. image:: 237-segment-anything-with-output_files/237-segment-anything-with-output_35_0.png -Example with multiple points -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Example with multiple points +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + in this example, we provide additional point for cover larger object area. @@ -725,8 +747,10 @@ Package inputs, then predict and threshold the mask. Great! Looks like now, predicted mask cover whole truck. -Example box and point input with negative label -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Example box and point input with negative label +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + In this example we define input prompt using bounding box and point inside it.The bounding box represented as set of points of its left @@ -798,8 +822,10 @@ Package inputs, then predict and threshold the mask. .. image:: 237-segment-anything-with-output_files/237-segment-anything-with-output_53_0.png -Interactive segmentation ------------------------------------------------------------------- +Interactive segmentation +------------------------ + + Now, you can try SAM on own image. Upload image to input window and click on desired point, model predict segment based on your image and @@ -852,8 +878,8 @@ point. with gr.Blocks() as demo: with gr.Row(): - input_img = gr.Image(label="Input", type="numpy").style(height=480, width=480) - output_img = gr.Image(label="Selected Segment", type="numpy").style(height=480, width=480) + input_img = gr.Image(label="Input", type="numpy", height=480, width=480) + output_img = gr.Image(label="Selected Segment", type="numpy", height=480, width=480) def on_image_change(img): segmenter.set_image(img) @@ -885,14 +911,6 @@ point. demo.launch(share=True) -.. parsed-literal:: - - /tmp/ipykernel_862585/1907223323.py:46: GradioDeprecationWarning: The `style` method is deprecated. Please set these arguments in the constructor instead. - input_img = gr.Image(label="Input", type="numpy").style(height=480, width=480) - /tmp/ipykernel_862585/1907223323.py:47: GradioDeprecationWarning: The `style` method is deprecated. Please set these arguments in the constructor instead. - output_img = gr.Image(label="Selected Segment", type="numpy").style(height=480, width=480) - - .. parsed-literal:: Running on local URL: http://127.0.0.1:7860 @@ -906,8 +924,10 @@ point. ..
-Run OpenVINO model in automatic mask generation mode ----------------------------------------------------------------------------------------------- +Run OpenVINO model in automatic mask generation mode +---------------------------------------------------- + + Since SAM can efficiently process prompts, masks for the entire image can be generated by sampling a large number of prompts over an image. @@ -1273,8 +1293,10 @@ is a dictionary containing various data about the mask. These keys are: -Optimize encoder using NNCF Post-training Quantization API ----------------------------------------------------------------------------------------------------- +Optimize encoder using NNCF Post-training Quantization API +---------------------------------------------------------- + + `NNCF `__ provides a suite of advanced algorithms for Neural Networks inference optimization in @@ -1291,8 +1313,10 @@ The optimization process contains the following steps: 3. Serialize OpenVINO IR model, using the ``openvino.save_model`` function. -Prepare a calibration dataset -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Prepare a calibration dataset +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Download COCO dataset. Since the dataset is used to calibrate the model’s parameter instead of fine-tuning it, we don’t need to download @@ -1368,8 +1392,10 @@ dataset and returns data that can be passed to the model for inference. INFO:nncf:NNCF initialized successfully. Supported frameworks detected: torch, tensorflow, onnx, openvino -Run quantization and serialize OpenVINO IR model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Run quantization and serialize OpenVINO IR model +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + The ``nncf.quantize`` function provides an interface for model quantization. It requires an instance of the OpenVINO Model and @@ -1431,8 +1457,10 @@ activations. ov_encoder_path_int8 = "sam_image_encoder_int8.xml" ov.save_model(quantized_model, ov_encoder_path_int8) -Validate Quantized Model Inference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Validate Quantized Model Inference +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + We can reuse the previous code to validate the output of ``INT8`` model. @@ -1495,10 +1523,11 @@ Run ``INT8`` model in automatic mask generation mode -Compare Performance of the Original and Quantized Models -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Compare Performance of the Original and Quantized Models +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Finally, use the OpenVINO `Benchmark +Finally, use the OpenVINO +`Benchmark Tool `__ to measure the inference performance of the ``FP32`` and ``INT8`` models. diff --git a/docs/notebooks/237-segment-anything-with-output_files/index.html b/docs/notebooks/237-segment-anything-with-output_files/index.html index 71c3c02fad6a0e..6d8401dae2a6d1 100644 --- a/docs/notebooks/237-segment-anything-with-output_files/index.html +++ b/docs/notebooks/237-segment-anything-with-output_files/index.html @@ -1,18 +1,18 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/237-segment-anything-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/237-segment-anything-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/237-segment-anything-with-output_files/


../
-237-segment-anything-with-output_21_0.png          31-Oct-2023 00:35              467418
-237-segment-anything-with-output_28_0.png          31-Oct-2023 00:35              468529
-237-segment-anything-with-output_35_0.png          31-Oct-2023 00:35              469443
-237-segment-anything-with-output_39_0.png          31-Oct-2023 00:35              470668
-237-segment-anything-with-output_44_0.png          31-Oct-2023 00:35              468092
-237-segment-anything-with-output_48_0.png          31-Oct-2023 00:35              468088
-237-segment-anything-with-output_53_0.png          31-Oct-2023 00:35              472756
-237-segment-anything-with-output_68_1.jpg          31-Oct-2023 00:35              262203
-237-segment-anything-with-output_68_1.png          31-Oct-2023 00:35             2409333
-237-segment-anything-with-output_80_0.png          31-Oct-2023 00:35              469432
-237-segment-anything-with-output_82_1.jpg          31-Oct-2023 00:35              262535
-237-segment-anything-with-output_82_1.png          31-Oct-2023 00:35             2397126
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/237-segment-anything-with-output_files/


../
+237-segment-anything-with-output_21_0.png          07-Dec-2023 00:49              467418
+237-segment-anything-with-output_28_0.png          07-Dec-2023 00:49              468529
+237-segment-anything-with-output_35_0.png          07-Dec-2023 00:49              469443
+237-segment-anything-with-output_39_0.png          07-Dec-2023 00:49              470668
+237-segment-anything-with-output_44_0.png          07-Dec-2023 00:49              468092
+237-segment-anything-with-output_48_0.png          07-Dec-2023 00:49              468088
+237-segment-anything-with-output_53_0.png          07-Dec-2023 00:49              472756
+237-segment-anything-with-output_68_1.jpg          07-Dec-2023 00:49              262203
+237-segment-anything-with-output_68_1.png          07-Dec-2023 00:49             2409333
+237-segment-anything-with-output_80_0.png          07-Dec-2023 00:49              469432
+237-segment-anything-with-output_82_1.jpg          07-Dec-2023 00:49              262535
+237-segment-anything-with-output_82_1.png          07-Dec-2023 00:49             2397126
 

diff --git a/docs/notebooks/239-image-bind-convert-with-output.rst b/docs/notebooks/239-image-bind-convert-with-output.rst index 0fee6dbdeb7c15..2d3ea2a420a527 100644 --- a/docs/notebooks/239-image-bind-convert-with-output.rst +++ b/docs/notebooks/239-image-bind-convert-with-output.rst @@ -83,19 +83,20 @@ zero-shot classification. - `Text-Image classification <#text-image-classification>`__ - `Text-Audio classification <#text-audio-classification>`__ - - `Image-Audio - classification <#image-audio-classification>`__ + - `Image-Audio classification <#image-audio-classification>`__ - `Next Steps <#next-steps>`__ -Prerequisites -------------------------------------------------------- +Prerequisites +------------- + + .. code:: ipython3 import sys - %pip install -q soundfile pytorchvideo ftfy "timm==0.6.7" einops fvcore "openvino>=2023.1.0" + %pip install -q soundfile pytorchvideo ftfy "timm==0.6.7" einops fvcore "openvino>=2023.1.0" --extra-index-url https://download.pytorch.org/whl/cpu if sys.version_info.minor < 8: %pip install -q "decord" @@ -124,8 +125,10 @@ Prerequisites /home/ea/work/openvino_notebooks/notebooks/239-image-bind/ImageBind -Instantiate PyTorch model -------------------------------------------------------------------- +Instantiate PyTorch model +------------------------- + + To start work with the model, we should instantiate the PyTorch model class. ``imagebind_model.imagebind_huge(pretrained=True)`` downloads @@ -160,8 +163,10 @@ card `__. warnings.warn( -Prepare input data ------------------------------------------------------------- +Prepare input data +------------------ + + ImageBind works with data across 6 different modalities. Each of them requires its steps for preprocessing. ``data`` module is responsible for @@ -192,8 +197,10 @@ data reading and preprocessing for each modality. ModalityType.AUDIO: data.load_and_transform_audio_data(audio_paths, "cpu"), } -Convert Model to OpenVINO Intermediate Representation (IR) format ------------------------------------------------------------------------------------------------------------ +Convert Model to OpenVINO Intermediate Representation (IR) format +----------------------------------------------------------------- + + OpenVINO supports PyTorch through Model Conversion API. You will use `model conversion Python @@ -226,8 +233,10 @@ embeddings. core = ov.Core() -Select inference device -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Select inference device +~~~~~~~~~~~~~~~~~~~~~~~ + + select device from dropdown list for running inference using OpenVINO @@ -289,8 +298,10 @@ select device from dropdown list for running inference using OpenVINO if npatch_per_img == N: -Zero-shot classification using ImageBind and OpenVINO ------------------------------------------------------------------------------------------------ +Zero-shot classification using ImageBind and OpenVINO +----------------------------------------------------- + + In zero-shot classification, a piece of data is embedded and fed to the model to retrieve a label that corresponds with the contents of the @@ -350,8 +361,10 @@ they represent the same object. image_list = [img.split('/')[-1] for img in image_paths] audio_list = [audio.split('/')[-1] for audio in audio_paths] -Text-Image classification -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Text-Image classification +~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -364,8 +377,10 @@ Text-Image classification .. image:: 239-image-bind-convert-with-output_files/239-image-bind-convert-with-output_20_0.png -Text-Audio classification -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Text-Audio classification +~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -378,8 +393,10 @@ Text-Audio classification .. image:: 239-image-bind-convert-with-output_files/239-image-bind-convert-with-output_22_0.png -Image-Audio classification -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Image-Audio classification +~~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -491,9 +508,11 @@ Putting all together, we can match text, image, and sound for our data. -Next Steps ----------------------------------------------------- +Next Steps +---------- + + -Open the `239-image-bind-quantize <239-image-bind-quantize-with-output.html>`__ +Open the `239-image-bind-quantize <239-image-bind-quantize.ipynb>`__ notebook to quantize the IR model with the Post-training Quantization API of NNCF and compare ``FP16`` and ``INT8`` models. diff --git a/docs/notebooks/239-image-bind-convert-with-output_files/index.html b/docs/notebooks/239-image-bind-convert-with-output_files/index.html index 3e1c221be0c6b9..748ce6aba0313c 100644 --- a/docs/notebooks/239-image-bind-convert-with-output_files/index.html +++ b/docs/notebooks/239-image-bind-convert-with-output_files/index.html @@ -1,15 +1,15 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/239-image-bind-convert-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/239-image-bind-convert-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/239-image-bind-convert-with-output_files/


../
-239-image-bind-convert-with-output_20_0.png        31-Oct-2023 00:35               15474
-239-image-bind-convert-with-output_22_0.png        31-Oct-2023 00:35               13795
-239-image-bind-convert-with-output_24_0.png        31-Oct-2023 00:35               18151
-239-image-bind-convert-with-output_26_1.jpg        31-Oct-2023 00:35               36700
-239-image-bind-convert-with-output_26_1.png        31-Oct-2023 00:35              341289
-239-image-bind-convert-with-output_27_1.jpg        31-Oct-2023 00:35               71448
-239-image-bind-convert-with-output_27_1.png        31-Oct-2023 00:35              839471
-239-image-bind-convert-with-output_28_1.jpg        31-Oct-2023 00:35               54208
-239-image-bind-convert-with-output_28_1.png        31-Oct-2023 00:35              658748
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/239-image-bind-convert-with-output_files/


../
+239-image-bind-convert-with-output_20_0.png        07-Dec-2023 00:49               15474
+239-image-bind-convert-with-output_22_0.png        07-Dec-2023 00:49               13795
+239-image-bind-convert-with-output_24_0.png        07-Dec-2023 00:49               18151
+239-image-bind-convert-with-output_26_1.jpg        07-Dec-2023 00:49               36700
+239-image-bind-convert-with-output_26_1.png        07-Dec-2023 00:49              341289
+239-image-bind-convert-with-output_27_1.jpg        07-Dec-2023 00:49               71448
+239-image-bind-convert-with-output_27_1.png        07-Dec-2023 00:49              839471
+239-image-bind-convert-with-output_28_1.jpg        07-Dec-2023 00:49               54208
+239-image-bind-convert-with-output_28_1.png        07-Dec-2023 00:49              658748
 

diff --git a/docs/notebooks/240-dolly-2-instruction-following-with-output.rst b/docs/notebooks/240-dolly-2-instruction-following-with-output.rst index 3dca6e68489c62..552b8a95b99dba 100644 --- a/docs/notebooks/240-dolly-2-instruction-following-with-output.rst +++ b/docs/notebooks/240-dolly-2-instruction-following-with-output.rst @@ -83,6 +83,7 @@ and `repo `__ **Table of contents:** + - `Prerequisites <#prerequisites>`__ - `Select inference device <#select-inference-device>`__ @@ -120,7 +121,7 @@ documentation `__. .. code:: ipython3 - %pip install -q "diffusers>=0.16.1" "transformers>=4.33.0" "openvino==2023.2.0.dev20230922" "nncf>=2.6.0" datasets onnx gradio --extra-index-url https://download.pytorch.org/whl/cpu + %pip install -q "diffusers>=0.16.1" "transformers>=4.33.0" "openvino>=2023.2.0" "nncf>=2.6.0" datasets onnx gradio --extra-index-url https://download.pytorch.org/whl/cpu %pip install -q --upgrade "git+https://github.com/huggingface/optimum-intel.git" Select inference device @@ -218,10 +219,10 @@ compatible with Optimum models. .. parsed-literal:: No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda' - 2023-10-09 11:07:22.234444: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. - 2023-10-09 11:07:22.273745: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. + 2023-11-17 13:10:43.359093: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. + 2023-11-17 13:10:43.398436: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. - 2023-10-09 11:07:22.903943: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT + 2023-11-17 13:10:44.026743: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT Compiling the model to CPU ... @@ -282,7 +283,6 @@ accuracy drop. if to_compress.value: if not compressed_model_path.exists(): - ov_model = OVModelForCausalLM.from_pretrained(model_id, device=current_device, export=True, ov_config=ov_config) quantizer = OVQuantizer.from_pretrained(ov_model) quantizer.quantize(save_directory=compressed_model_path, weights_only=True) del quantizer @@ -295,8 +295,8 @@ accuracy drop. .. parsed-literal:: * Original IR model size: 5297.21 MB - * Compressed IR model size: 2660.29 MB - * Model compression rate: 1.991 + * Compressed IR model size: 2657.89 MB + * Model compression rate: 1.993 .. parsed-literal:: @@ -717,23 +717,3 @@ generation parameters: # If you are launching remotely, specify server_name and server_port # EXAMPLE: `demo.launch(server_name='your server name', server_port='server port in int')` # To learn more please refer to the Gradio docs: https://gradio.app/docs/ - - -.. parsed-literal:: - - /tmp/ipykernel_709262/2332051390.py:57: GradioDeprecationWarning: The `enable_queue` parameter has been deprecated. Please use the `.queue()` method instead. - demo.launch(enable_queue=True, share=False, height=800) - - -.. parsed-literal:: - - Running on local URL: http://127.0.0.1:7860 - - To create a public link, set `share=True` in `launch()`. - - - -.. .. raw:: html - -..
- diff --git a/docs/notebooks/245-typo-detector-with-output.rst b/docs/notebooks/245-typo-detector-with-output.rst index 42ebf79363741a..ede76147a6889d 100644 --- a/docs/notebooks/245-typo-detector-with-output.rst +++ b/docs/notebooks/245-typo-detector-with-output.rst @@ -43,43 +43,41 @@ The model has been pretrained on the - `2. Converting the model to OpenVINO IR <#-converting-the-model-to-openvino-ir>`__ -- `Select inference device <#select-inference-device>`__ -- `1. Hugging Face Optimum Intel - library <#-hugging-face-optimum-intel-library>`__ + - `Select inference device <#select-inference-device>`__ - - `Load the model <#load-the-model>`__ - - `Load the tokenizer <#load-the-tokenizer>`__ + - `1. Hugging Face Optimum Intel + library <#-hugging-face-optimum-intel-library>`__ -- `2. Converting the model to OpenVINO - IR <#-converting-the-model-to-openvino-ir>`__ + - `Load the model <#load-the-model>`__ + - `Load the tokenizer <#load-the-tokenizer>`__ - - `Load the Pytorch model <#load-the-pytorch-model>`__ - - `Converting to OpenVINO IR <#converting-to-openvino-ir>`__ - - `Inference <#inference>`__ + - `2. Converting the model to OpenVINO + IR <#-converting-the-model-to-openvino-ir>`__ -- `Helper Functions <#helper-functions>`__ + - `Load the Pytorch model <#load-the-pytorch-model>`__ + - `Converting to OpenVINO IR <#converting-to-openvino-ir>`__ + - `Inference <#inference>`__ + + - `Helper Functions <#helper-functions>`__ .. code:: ipython3 - %pip install -q "diffusers>=0.17.1" "openvino>=2023.1.0" "nncf>=2.5.0" "gradio" "onnx>=1.11.0" "onnxruntime>=1.14.0" "transformers>=4.31.0" + %pip install -q "diffusers>=0.17.1" "openvino>=2023.1.0" "nncf>=2.5.0" "gradio" "onnx>=1.11.0" "transformers>=4.33.0" --extra-index-url https://download.pytorch.org/whl/cpu %pip install -q "git+https://github.com/huggingface/optimum-intel.git" .. parsed-literal:: DEPRECATION: pytorch-lightning 1.6.5 has a non-standard dependency specifier torch>=1.8.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063 - ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. - onnxconverter-common 1.14.0 requires protobuf==3.20.2, but you have protobuf 4.24.4 which is incompatible. - pytorch-lightning 1.6.5 requires protobuf<=3.20.1, but you have protobuf 4.24.4 which is incompatible. - tensorflow 2.13.1 requires typing-extensions<4.6.0,>=3.6.6, but you have typing-extensions 4.8.0 which is incompatible. - tf2onnx 1.15.1 requires protobuf~=3.20.2, but you have protobuf 4.24.4 which is incompatible. Note: you may need to restart the kernel to use updated packages. DEPRECATION: pytorch-lightning 1.6.5 has a non-standard dependency specifier torch>=1.8.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063 Note: you may need to restart the kernel to use updated packages. -Imports -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Imports +~~~~~~~ + + .. code:: ipython3 @@ -93,14 +91,16 @@ Imports .. parsed-literal:: - 2023-10-31 00:01:48.550893: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. - 2023-10-31 00:01:48.584584: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. + 2023-12-07 00:04:25.014506: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. + 2023-12-07 00:04:25.048142: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. - 2023-10-31 00:01:49.140201: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT + 2023-12-07 00:04:25.652741: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT + + +Methods +~~~~~~~ -Methods -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The notebook provides two methods to run the inference of typo detector with OpenVINO runtime, so that you can experience both calling the API @@ -108,8 +108,10 @@ of Optimum with OpenVINO Runtime included, and loading models in other frameworks, converting them to OpenVINO IR format, and running inference with OpenVINO Runtime. -1. Using the `Hugging Face Optimum `__ library -''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' +1. Using the `Hugging Face Optimum `__ library +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + + The Hugging Face Optimum API is a high-level API that allows us to convert models from the Hugging Face Transformers library to the @@ -117,8 +119,10 @@ OpenVINO™ IR format. Compiled models in OpenVINO IR format can be loaded using Optimum. Optimum allows the use of optimization on targeted hardware. -2. Converting the model to OpenVINO IR -'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' +2. Converting the model to OpenVINO IR +'''''''''''''''''''''''''''''''''''''' + + The Pytorch model is converted to `OpenVINO IR format `__. This @@ -147,8 +151,10 @@ methods | inference with OpenVINO Runtime | | +-----------------------------------+----------------------------------+ -Select inference device -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Select inference device +~~~~~~~~~~~~~~~~~~~~~~~ + + select device from dropdown list for running inference using OpenVINO @@ -177,8 +183,10 @@ select device from dropdown list for running inference using OpenVINO -1. Hugging Face Optimum Intel library -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +1. Hugging Face Optimum Intel library +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + For this method, we need to install the ``Hugging Face Optimum Intel library`` accelerated by OpenVINO @@ -207,12 +215,12 @@ Import required model class .. parsed-literal:: No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda' - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/deepspeed.py:23: FutureWarning: transformers.deepspeed module is deprecated and will be removed in a future version. Please import deepspeed modules directly from transformers.integrations - warnings.warn( -Load the model -'''''''''''''''''''''''''''''''''''''''''''''''''''''''' +Load the model +'''''''''''''' + + From the ``OVModelForTokenCLassification`` class we will import the relevant pre-trained model. To load a Transformers model and convert it @@ -238,8 +246,8 @@ your model. Framework not specified. Using pt to export to ONNX. Using the export variant default. Available variants are: - - default: The default ONNX variant. - Using framework PyTorch: 1.13.1+cpu + - default: The default ONNX variant. + Using framework PyTorch: 2.1.1+cpu .. parsed-literal:: @@ -250,14 +258,15 @@ your model. .. parsed-literal:: [ WARNING ] Please fix your imports. Module %s has been moved to %s. The old module will be deleted in version %s. - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/nncf/torch/dynamic_graph/wrappers.py:74: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/nncf/torch/dynamic_graph/wrappers.py:75: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. op1 = operator(\*args, \*\*kwargs) Compiling the model to AUTO ... - Set CACHE_DIR to /tmp/tmpuz_oy32n/model_cache -Load the tokenizer -'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' +Load the tokenizer +'''''''''''''''''' + + Text Preprocessing cleans the text-based input data so it can be fed into the model. Tokenization splits paragraphs and sentences into @@ -362,14 +371,18 @@ Let’s run a demo using the Hugging Face Optimum API. [Input]: I have been stuying for my math exam all week, but I'm stil not very confidet that I will pass it, because there are so many formuals to remeber. [Detected]: I have been stuying for my math exam all week, but I'm stil not very confidet that I will pass it, because there are so many formuals to remeber. ---------------------------------------------------------------------------------------------------------------------------------- - Time elapsed: 0.20258617401123047 + Time elapsed: 0.1588735580444336 + + +2. Converting the model to OpenVINO IR +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + -2. Converting the model to OpenVINO IR -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Load the Pytorch model +'''''''''''''''''''''' + -Load the Pytorch model -'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' Use the ``AutoModelForTokenClassification`` class to load the pretrained pytorch model. @@ -389,8 +402,10 @@ pytorch model. model = AutoModelForTokenClassification.from_pretrained(model_id, config=config) model.save_pretrained(model_dir) -Converting to OpenVINO IR -''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' +Converting to OpenVINO IR +''''''''''''''''''''''''' + + .. code:: ipython3 @@ -400,8 +415,10 @@ Converting to OpenVINO IR ov_model = ov.convert_model(model, example_input=dict(dummy_model_input)) ov.save_model(ov_model, ov_model_path) -Inference -''''''''''''''''''''''''''''''''''''''''''''''''''' +Inference +''''''''' + + OpenVINO™ Runtime Python API is used to compile the model in OpenVINO IR format. The Core class from the ``openvino`` module is imported first. @@ -415,8 +432,10 @@ the compiled model as it is needed for inference. compiled_model = core.compile_model(ov_model, device.value) output_layer = compiled_model.output(0) -Helper Functions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Helper Functions +~~~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -590,5 +609,5 @@ Let’s run a demo using the converted OpenVINO IR model. [Input]: I have been stuying for my math exam all week, but I'm stil not very confidet that I will pass it, because there are so many formuals to remeber. [Detected]: I have been stuying for my math exam all week, but I'm stil not very confidet that I will pass it, because there are so many formuals to remeber. ---------------------------------------------------------------------------------------------------------------------------------- - Time elapsed: 0.10554790496826172 + Time elapsed: 0.10169100761413574 diff --git a/docs/notebooks/247-code-language-id-with-output.rst b/docs/notebooks/247-code-language-id-with-output.rst index 5b982bd26ce7e8..50d7cec212b984 100644 --- a/docs/notebooks/247-code-language-id-with-output.rst +++ b/docs/notebooks/247-code-language-id-with-output.rst @@ -16,6 +16,7 @@ navigation. **Table of contents:** + - `Introduction <#introduction>`__ - `Task <#task>`__ @@ -116,14 +117,14 @@ Install prerequisites -First, complete the `repository installation steps <../notebooks_installation.html>`__. +First, complete the `repository installation steps <../../README.md>`__. Then, the following cell will install: - HuggingFace Optimum with OpenVINO support - HuggingFace Evaluate to benchmark results .. code:: ipython3 - %pip install -q "diffusers>=0.17.1" "openvino>=2023.1.0" "nncf>=2.5.0" "gradio" "onnx>=1.11.0" "transformers>=4.33.0" "evaluate" + %pip install -q "diffusers>=0.17.1" "openvino>=2023.1.0" "nncf>=2.5.0" "gradio" "onnx>=1.11.0" "transformers>=4.33.0" "evaluate" --extra-index-url https://download.pytorch.org/whl/cpu %pip install -q "git+https://github.com/huggingface/optimum-intel.git" @@ -131,9 +132,9 @@ OpenVINO support - HuggingFace Evaluate to benchmark results DEPRECATION: pytorch-lightning 1.6.5 has a non-standard dependency specifier torch>=1.8.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063 ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts. - onnxconverter-common 1.14.0 requires protobuf==3.20.2, but you have protobuf 4.25.0 which is incompatible. - pytorch-lightning 1.6.5 requires protobuf<=3.20.1, but you have protobuf 4.25.0 which is incompatible. - tf2onnx 1.15.1 requires protobuf~=3.20.2, but you have protobuf 4.25.0 which is incompatible. + onnxconverter-common 1.14.0 requires protobuf==3.20.2, but you have protobuf 4.25.1 which is incompatible. + pytorch-lightning 1.6.5 requires protobuf<=3.20.1, but you have protobuf 4.25.1 which is incompatible. + tf2onnx 1.15.1 requires protobuf~=3.20.2, but you have protobuf 4.25.1 which is incompatible. Note: you may need to restart the kernel to use updated packages. DEPRECATION: pytorch-lightning 1.6.5 has a non-standard dependency specifier torch>=1.8.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063 Note: you may need to restart the kernel to use updated packages. @@ -163,10 +164,10 @@ equivalent to ``AutoModelForSequenceClassification`` from Transformers .. parsed-literal:: - 2023-11-15 00:06:22.342451: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. - 2023-11-15 00:06:22.376717: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. + 2023-12-07 00:07:02.218482: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. + 2023-12-07 00:07:02.252471: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. - 2023-11-15 00:06:22.962059: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT + 2023-12-07 00:07:02.836089: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT .. parsed-literal:: @@ -260,7 +261,7 @@ Download resources .. parsed-literal:: Framework not specified. Using pt to export to ONNX. - Some weights of the model checkpoint at huggingface/CodeBERTa-language-id were not used when initializing RobertaForSequenceClassification: ['roberta.pooler.dense.weight', 'roberta.pooler.dense.bias'] + Some weights of the model checkpoint at huggingface/CodeBERTa-language-id were not used when initializing RobertaForSequenceClassification: ['roberta.pooler.dense.bias', 'roberta.pooler.dense.weight'] - This IS expected if you are initializing RobertaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). - This IS NOT expected if you are initializing RobertaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). Using the export variant default. Available variants are: @@ -283,7 +284,7 @@ Download resources .. parsed-literal:: - Ressources cached locally at: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/notebooks/247-code-language-id/model/CodeBERTa-language-id + Ressources cached locally at: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/247-code-language-id/model/CodeBERTa-language-id Create inference pipeline @@ -396,7 +397,7 @@ NOTE: the base model is loaded using .. parsed-literal:: - Some weights of the model checkpoint at huggingface/CodeBERTa-language-id were not used when initializing RobertaForSequenceClassification: ['roberta.pooler.dense.weight', 'roberta.pooler.dense.bias'] + Some weights of the model checkpoint at huggingface/CodeBERTa-language-id were not used when initializing RobertaForSequenceClassification: ['roberta.pooler.dense.bias', 'roberta.pooler.dense.weight'] - This IS expected if you are initializing RobertaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). - This IS NOT expected if you are initializing RobertaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). @@ -582,7 +583,7 @@ Inference on new input using quantized model df['speed'] = df.distance / df.time Predicted label: python - Predicted score: 0.83 + Predicted score: 0.81 Load evaluation set @@ -699,16 +700,16 @@ displayed. base 1.0 - 2.340578 - 51.269396 - 0.019505 + 2.045702 + 58.659569 + 0.017048 quantized 1.0 - 3.334829 - 35.983857 - 0.027790 + 2.602893 + 46.102553 + 0.021691 @@ -719,8 +720,11 @@ displayed. Additional resources -------------------- - - `Grammatical Error Correction with OpenVINO `__ -- `Quantize a Hugging Face Question-Answering Model with OpenVINO `__ \ \*\* +- `Grammatical Error Correction +with +OpenVINO `__ +- `Quantize a Hugging Face Question-Answering Model with +OpenVINO `__\ \*\* Clean up -------- diff --git a/docs/notebooks/248-ssd-b1-with-output.rst b/docs/notebooks/248-ssd-b1-with-output.rst new file mode 100644 index 00000000000000..0827e1ed95051b --- /dev/null +++ b/docs/notebooks/248-ssd-b1-with-output.rst @@ -0,0 +1,336 @@ +Image generation with Segmind Stable Diffusion 1B (SSD-1B) model and OpenVINO +============================================================================= + +The `Segmind Stable Diffusion Model +(SSD-1B) `__ is +a distilled 50% smaller version of the Stable Diffusion XL (SDXL), +offering a 60% speedup while maintaining high-quality text-to-image +generation capabilities. It has been trained on diverse datasets, +including Grit and Midjourney scrape data, to enhance its ability to +create a wide range of visual content based on textual prompts. This +model employs a knowledge distillation strategy, where it leverages the +teachings of several expert models in succession, including SDXL, +ZavyChromaXL, and JuggernautXL, to combine their strengths and produce +impressive visual outputs. + +.. figure:: https://user-images.githubusercontent.com/82945616/277419571-a5583e8a-6a05-4680-a540-f80502feed0b.png + :alt: image + + image + +In this tutorial, we consider how to run the SSD-1B model using +OpenVINO. + +We will use a pre-trained model from the `Hugging Face +Diffusers `__ library. To +simplify the user experience, the `Hugging Face Optimum +Intel `__ library is +used to convert the models to OpenVINO™ IR format. + +**Table of contents:** + + +- `Install Prerequisites <#install-prerequisites>`__ +- `SSD-1B Base model <#ssd-b-base-model>`__ +- `Select inference device SSD-1B Base + model <#select-inference-device-ssd-b-base-model>`__ +- `Text2image Generation Interactive + Demo <#textimage-generation-interactive-demo>`__ + +Install prerequisites +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code:: ipython3 + + %pip install -q "git+https://github.com/huggingface/optimum-intel.git" + %pip install -q "openvino>=2023.1.0" + %pip install -q --upgrade-strategy eager "invisible-watermark>=0.2.0" "transformers>=4.33" "accelerate" "onnx" "onnxruntime" safetensors "diffusers>=0.22.0" + %pip install -q gradio + + +.. parsed-literal:: + + DEPRECATION: pytorch-lightning 1.6.5 has a non-standard dependency specifier torch>=1.8.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063 + Note: you may need to restart the kernel to use updated packages. + DEPRECATION: pytorch-lightning 1.6.5 has a non-standard dependency specifier torch>=1.8.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063 + Note: you may need to restart the kernel to use updated packages. + DEPRECATION: pytorch-lightning 1.6.5 has a non-standard dependency specifier torch>=1.8.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063 + Note: you may need to restart the kernel to use updated packages. + DEPRECATION: pytorch-lightning 1.6.5 has a non-standard dependency specifier torch>=1.8.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063 + Note: you may need to restart the kernel to use updated packages. + + +SSD-1B Base model +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +We will start with the base model part, which is responsible for the +generation of images of the desired output size. +`SSD-1B `__ is available for +downloading via the `HuggingFace hub `__. +It already provides a ready-to-use model in OpenVINO format compatible +with `Optimum +Intel `__. + +To load an OpenVINO model and run an inference with OpenVINO Runtime, +you need to replace diffusers ``StableDiffusionXLPipeline`` with Optimum +``OVStableDiffusionXLPipeline``. In case you want to load a PyTorch +model and convert it to the OpenVINO format on the fly, you can set +``export=True``. + +You can save the model on disk using the ``save_pretrained`` method. + +.. code:: ipython3 + + from pathlib import Path + from optimum.intel.openvino import OVStableDiffusionXLPipeline + + + model_id = "segmind/SSD-1B" + model_dir = Path("openvino-ssd-1b") + + +.. parsed-literal:: + + INFO:nncf:NNCF initialized successfully. Supported frameworks detected: torch, tensorflow, onnx, openvino + + +.. parsed-literal:: + + No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda' + 2023-12-07 00:09:54.638748: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. + 2023-12-07 00:09:54.672777: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. + To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. + 2023-12-07 00:09:55.202678: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT + + +Select inference device SSD-1B Base model +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +select device from dropdown list for running inference using OpenVINO + +.. code:: ipython3 + + import ipywidgets as widgets + import openvino as ov + + + core = ov.Core() + + device = widgets.Dropdown( + options=core.available_devices + ["AUTO"], + value='AUTO', + description='Device:', + disabled=False, + ) + + device + + + + +.. parsed-literal:: + + Dropdown(description='Device:', index=1, options=('CPU', 'AUTO'), value='AUTO') + + + +.. code:: ipython3 + + import gc + + + if not model_dir.exists(): + text2image_pipe = OVStableDiffusionXLPipeline.from_pretrained(model_id, compile=False, device=device.value, export=True) + text2image_pipe.half() + text2image_pipe.save_pretrained(model_dir) + text2image_pipe.compile() + gc.collect() + else: + text2image_pipe = OVStableDiffusionXLPipeline.from_pretrained(model_dir, device=device.value) + + +.. parsed-literal:: + + Framework not specified. Using pt to export to ONNX. + Keyword arguments {'subfolder': '', 'trust_remote_code': False} are not expected by StableDiffusionXLImg2ImgPipeline and will be ignored. + + + +.. parsed-literal:: + + Loading pipeline components...: 0%| | 0/7 [00:00 1 or self.sliding_window is not None: + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/modeling_attn_mask_utils.py:137: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + if past_key_values_length > 0: + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/clip/modeling_clip.py:273: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/clip/modeling_clip.py:281: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/clip/modeling_clip.py:313: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + Using framework PyTorch: 1.13.1+cpu + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/diffusers/models/unet_2d_condition.py:878: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + if dim % default_overall_up_factor != 0: + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/diffusers/models/resnet.py:265: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + assert hidden_states.shape[1] == self.channels + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/diffusers/models/resnet.py:271: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + assert hidden_states.shape[1] == self.channels + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/diffusers/models/resnet.py:173: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + assert hidden_states.shape[1] == self.channels + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/diffusers/models/resnet.py:186: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + if hidden_states.shape[0] >= 64: + Using framework PyTorch: 1.13.1+cpu + Using framework PyTorch: 1.13.1+cpu + Using framework PyTorch: 1.13.1+cpu + Compiling the vae_decoder to AUTO ... + Compiling the unet to AUTO ... + Compiling the vae_encoder to AUTO ... + Compiling the text_encoder_2 to AUTO ... + Compiling the text_encoder to AUTO ... + + +Run Text2Image generation pipeline +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Now, we can run the model for the generation of images using text +prompts. To speed up evaluation and reduce the required memory we +decrease ``num_inference_steps`` and image size (using ``height`` and +``width``). You can modify them to suit your needs and depend on the +target hardware. We also specified a ``generator`` parameter based on a +numpy random state with a specific seed for results reproducibility. +>\ **Note**: Generating a default size 1024x1024 image requires about +53GB for the SSD-1B model in case if the converted model is loaded from +disk and up to 64GB RAM for the SDXL model after exporting. + +.. code:: ipython3 + + prompt = "An astronaut riding a green horse" # Your prompt here + neg_prompt = "ugly, blurry, poor quality" # Negative prompt here + image = text2image_pipe(prompt=prompt, num_inference_steps=15, negative_prompt=neg_prompt).images[0] + image + + +.. parsed-literal:: + + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/optimum/intel/openvino/modeling_diffusion.py:565: FutureWarning: `shared_memory` is deprecated and will be removed in 2024.0. Value of `shared_memory` is going to override `share_inputs` value. Please use only `share_inputs` explicitly. + outputs = self.request(inputs, shared_memory=True) + + + +.. parsed-literal:: + + 0%| | 0/15 [00:00 + diff --git a/docs/notebooks/248-ssd-b1-with-output_files/248-ssd-b1-with-output_11_1.jpg b/docs/notebooks/248-ssd-b1-with-output_files/248-ssd-b1-with-output_11_1.jpg new file mode 100644 index 00000000000000..d264e8a18aac2e --- /dev/null +++ b/docs/notebooks/248-ssd-b1-with-output_files/248-ssd-b1-with-output_11_1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19169c7496ca7e24ca07ea968be245551abd17b9286ea1cc3693237ddabe93fe +size 27761 diff --git a/docs/notebooks/248-ssd-b1-with-output_files/248-ssd-b1-with-output_11_1.png b/docs/notebooks/248-ssd-b1-with-output_files/248-ssd-b1-with-output_11_1.png new file mode 100644 index 00000000000000..47a77d9d17face --- /dev/null +++ b/docs/notebooks/248-ssd-b1-with-output_files/248-ssd-b1-with-output_11_1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df277510a31c59e8b5a372faac4307a5d0190197d1414812d59fd2016dbec5d5 +size 411284 diff --git a/docs/notebooks/248-ssd-b1-with-output_files/248-ssd-b1-with-output_9_3.jpg b/docs/notebooks/248-ssd-b1-with-output_files/248-ssd-b1-with-output_9_3.jpg new file mode 100644 index 00000000000000..bfb87dd12dfe14 --- /dev/null +++ b/docs/notebooks/248-ssd-b1-with-output_files/248-ssd-b1-with-output_9_3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3cac681a4943e89f0f0e94a68b31795869f8b925fe773848eb093e90c7d062b7 +size 118423 diff --git a/docs/notebooks/248-ssd-b1-with-output_files/248-ssd-b1-with-output_9_3.png b/docs/notebooks/248-ssd-b1-with-output_files/248-ssd-b1-with-output_9_3.png new file mode 100644 index 00000000000000..ad1c32f0b6397d --- /dev/null +++ b/docs/notebooks/248-ssd-b1-with-output_files/248-ssd-b1-with-output_9_3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ed72e50f145826d78a2a5af3c6e4bf2e343cd3a0f1c14ba0f3ec9784e94c48f +size 1700640 diff --git a/docs/notebooks/248-ssd-b1-with-output_files/index.html b/docs/notebooks/248-ssd-b1-with-output_files/index.html new file mode 100644 index 00000000000000..085e853cbb8c3e --- /dev/null +++ b/docs/notebooks/248-ssd-b1-with-output_files/index.html @@ -0,0 +1,10 @@ + +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/248-ssd-b1-with-output_files/ + +

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/248-ssd-b1-with-output_files/


../
+248-ssd-b1-with-output_11_1.jpg                    07-Dec-2023 00:49               27761
+248-ssd-b1-with-output_11_1.png                    07-Dec-2023 00:49              411284
+248-ssd-b1-with-output_9_3.jpg                     07-Dec-2023 00:49              118423
+248-ssd-b1-with-output_9_3.png                     07-Dec-2023 00:49             1700640
+

+ diff --git a/docs/notebooks/248-stable-diffusion-xl-with-output.rst b/docs/notebooks/248-stable-diffusion-xl-with-output.rst index f868ad02114de1..34504d0646cb7f 100644 --- a/docs/notebooks/248-stable-diffusion-xl-with-output.rst +++ b/docs/notebooks/248-stable-diffusion-xl-with-output.rst @@ -68,7 +68,7 @@ The tutorial consists of the following steps: **Table of contents:** -- `Install Prerequisites <#install-prerequisites>`__ +- `Install prerequisites <#install-prerequisites>`__ - `SDXL Base model <#sdxl-base-model>`__ - `Select inference device SDXL Base @@ -79,28 +79,34 @@ The tutorial consists of the following steps: Demo <#textimage-generation-interactive-demo>`__ - `Run Image2Image generation pipeline <#run-imageimage-generation-pipeline>`__ + + - `Select inference device SDXL Refiner + model <#select-inference-device-sdxl-refiner-model>`__ + - `Image2Image Generation Interactive Demo <#imageimage-generation-interactive-demo>`__ - `SDXL Refiner model <#sdxl-refiner-model>`__ - - `Select inference device SDXL Refiner - model <#select-inference-device-sdxl-refiner-model>`__ + - `Select inference device <#select-inference-device>`__ - `Run Text2Image generation with Refinement <#run-textimage-generation-with-refinement>`__ -Install prerequisites ---------------------------------------------------------------- +Install prerequisites +--------------------- + + .. code:: ipython3 + %pip install -q --extra-index-url https://download.pytorch.org/whl/cpu "diffusers>=0.18.0" "invisible-watermark>=0.2.0" "transformers>=4.33.0" "accelerate" "onnx" %pip install -q "git+https://github.com/huggingface/optimum-intel.git" - %pip install -q "openvino>=2023.1.0" - %pip install -q --upgrade-strategy eager "diffusers>=0.18.0" "invisible-watermark>=0.2.0" "transformers>=4.30.2" "accelerate" "onnx" "onnxruntime" - %pip install -q gradio + %pip install -q "openvino>=2023.1.0" gradio + +SDXL Base model +--------------- + -SDXL Base model ---------------------------------------------------------- We will start with the base model part, which is responsible for the generation of images of the desired output size. @@ -144,8 +150,10 @@ You can save the model on disk using the ``save_pretrained`` method. warnings.warn( -Select inference device SDXL Base model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Select inference device SDXL Base model +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + select device from dropdown list for running inference using OpenVINO @@ -194,8 +202,10 @@ select device from dropdown list for running inference using OpenVINO Compiling the text_encoder_2... -Run Text2Image generation pipeline -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Run Text2Image generation pipeline +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Now, we can run the model for the generation of images using text prompts. To speed up evaluation and reduce the required memory we @@ -240,8 +250,10 @@ numpy random state with a specific seed for results reproducibility. -Text2image Generation Interactive Demo -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Text2image Generation Interactive Demo +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -294,6 +306,7 @@ Text2image Generation Interactive Demo ..
+ .. code:: ipython3 demo.close() @@ -306,16 +319,20 @@ Text2image Generation Interactive Demo Closing server running on port: 7860 -Run Image2Image generation pipeline -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Run Image2Image generation pipeline +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + We can reuse the already converted model for running the Image2Image generation pipeline. For that, we should replace ``OVStableDiffusionXLPipeline`` with ``OVStableDiffusionXLImage2ImagePipeline``. -Select inference device SDXL Refiner model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Select inference device SDXL Refiner model +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + select device from dropdown list for running inference using OpenVINO @@ -384,8 +401,10 @@ select device from dropdown list for running inference using OpenVINO -Image2Image Generation Interactive Demo -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Image2Image Generation Interactive Demo +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -463,8 +482,10 @@ Image2Image Generation Interactive Demo -SDXL Refiner model ------------------------------------------------------------- +SDXL Refiner model +------------------ + + As we discussed above, Stable Diffusion XL can be used in a 2-stages approach: first, the base model is used to generate latents of the @@ -493,8 +514,10 @@ prompt for improving generated image. del refiner gc.collect() -Select inference device -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Select inference device +~~~~~~~~~~~~~~~~~~~~~~~ + + select device from dropdown list for running inference using OpenVINO @@ -511,8 +534,10 @@ select device from dropdown list for running inference using OpenVINO -Run Text2Image generation with Refinement -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Run Text2Image generation with Refinement +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 diff --git a/docs/notebooks/248-stable-diffusion-xl-with-output_files/index.html b/docs/notebooks/248-stable-diffusion-xl-with-output_files/index.html index 45534552f3d0de..c514cf1799702e 100644 --- a/docs/notebooks/248-stable-diffusion-xl-with-output_files/index.html +++ b/docs/notebooks/248-stable-diffusion-xl-with-output_files/index.html @@ -1,12 +1,12 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/248-stable-diffusion-xl-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/248-stable-diffusion-xl-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/248-stable-diffusion-xl-with-output_files/


../
-248-stable-diffusion-xl-with-output_10_3.jpg       31-Oct-2023 00:35               21574
-248-stable-diffusion-xl-with-output_10_3.png       31-Oct-2023 00:35              440317
-248-stable-diffusion-xl-with-output_18_3.jpg       31-Oct-2023 00:35               22767
-248-stable-diffusion-xl-with-output_18_3.png       31-Oct-2023 00:35              439143
-248-stable-diffusion-xl-with-output_29_2.jpg       31-Oct-2023 00:35               29349
-248-stable-diffusion-xl-with-output_29_2.png       31-Oct-2023 00:35              432689
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/248-stable-diffusion-xl-with-output_files/


../
+248-stable-diffusion-xl-with-output_10_3.jpg       07-Dec-2023 00:49               21574
+248-stable-diffusion-xl-with-output_10_3.png       07-Dec-2023 00:49              440317
+248-stable-diffusion-xl-with-output_18_3.jpg       07-Dec-2023 00:49               22767
+248-stable-diffusion-xl-with-output_18_3.png       07-Dec-2023 00:49              439143
+248-stable-diffusion-xl-with-output_29_2.jpg       07-Dec-2023 00:49               29349
+248-stable-diffusion-xl-with-output_29_2.png       07-Dec-2023 00:49              432689
 

diff --git a/docs/notebooks/250-music-generation-with-output.rst b/docs/notebooks/250-music-generation-with-output.rst index 181a0111215d28..272c18651bbae3 100644 --- a/docs/notebooks/250-music-generation-with-output.rst +++ b/docs/notebooks/250-music-generation-with-output.rst @@ -32,6 +32,7 @@ library. **Table of contents:** + - `Prerequisites <#prerequisites>`__ - `Install requirements <#install-requirements>`__ @@ -72,8 +73,7 @@ Install requirements .. code:: ipython3 %pip install -q "openvino>=2023.1.0" - %pip install -q --extra-index-url https://download.pytorch.org/whl/cpu torch onnx gradio ipywidgets - %pip install -q "transformers" + %pip install -q --extra-index-url https://download.pytorch.org/whl/cpu torch onnx gradio ipywidgets transformers .. parsed-literal:: @@ -82,8 +82,6 @@ Install requirements Note: you may need to restart the kernel to use updated packages. DEPRECATION: pytorch-lightning 1.6.5 has a non-standard dependency specifier torch>=1.8.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063 Note: you may need to restart the kernel to use updated packages. - DEPRECATION: pytorch-lightning 1.6.5 has a non-standard dependency specifier torch>=1.8.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063 - Note: you may need to restart the kernel to use updated packages. Imports @@ -114,10 +112,10 @@ Imports .. parsed-literal:: - 2023-11-15 00:09:21.886779: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. - 2023-11-15 00:09:21.920564: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. + 2023-12-07 00:16:19.977472: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. + 2023-12-07 00:16:20.011221: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. - 2023-11-15 00:09:22.467173: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT + 2023-12-07 00:16:20.555535: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT MusicGen in HF Transformers @@ -191,7 +189,7 @@ vocabulary. It helps the model understand the context of a sentence. @@ -401,13 +399,13 @@ wrapper class with its ``forward()`` method calling .. parsed-literal:: - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/onnx/symbolic_opset9.py:4315: UserWarning: Exporting a model to ONNX with a batch_size other than 1, with a variable length with LSTM can cause an error when running the ONNX model with a different batch size. Make sure to save the model with a batch size of 1, or define the initial states (h0/c0) as inputs of the model. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/onnx/symbolic_opset9.py:4315: UserWarning: Exporting a model to ONNX with a batch_size other than 1, with a variable length with LSTM can cause an error when running the ONNX model with a different batch size. Make sure to save the model with a batch size of 1, or define the initial states (h0/c0) as inputs of the model. warnings.warn( - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/onnx/_internal/jit_utils.py:258: UserWarning: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function. (Triggered internally at ../torch/csrc/jit/passes/onnx/shape_type_inference.cpp:1884.) + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/onnx/_internal/jit_utils.py:258: UserWarning: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function. (Triggered internally at ../torch/csrc/jit/passes/onnx/shape_type_inference.cpp:1884.) _C._jit_pass_onnx_node_shape_type_inference(node, params_dict, opset_version) - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/onnx/utils.py:687: UserWarning: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function. (Triggered internally at ../torch/csrc/jit/passes/onnx/shape_type_inference.cpp:1884.) + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/onnx/utils.py:687: UserWarning: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function. (Triggered internally at ../torch/csrc/jit/passes/onnx/shape_type_inference.cpp:1884.) _C._jit_pass_onnx_graph_shape_type_inference( - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/onnx/utils.py:1178: UserWarning: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function. (Triggered internally at ../torch/csrc/jit/passes/onnx/shape_type_inference.cpp:1884.) + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torch/onnx/utils.py:1178: UserWarning: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function. (Triggered internally at ../torch/csrc/jit/passes/onnx/shape_type_inference.cpp:1884.) _C._jit_pass_onnx_graph_shape_type_inference( @@ -643,7 +641,7 @@ We can now infer the pipeline backed by OpenVINO models. diff --git a/docs/notebooks/251-tiny-sd-image-generation-with-output.rst b/docs/notebooks/251-tiny-sd-image-generation-with-output.rst index 083da6fe58081c..7589f656b5f246 100644 --- a/docs/notebooks/251-tiny-sd-image-generation-with-output.rst +++ b/docs/notebooks/251-tiny-sd-image-generation-with-output.rst @@ -43,9 +43,8 @@ The notebook contains the following steps: - `Prerequisites <#prerequisites>`__ -- `Create PyTorch Models - pipeline <#create-pytorch-models-pipeline>`__ -- `Convert models to OpenVINO Intermediate representation (IR) +- `Create PyTorch Models pipeline <#create-pytorch-models-pipeline>`__ +- `Convert models to OpenVINO Intermediate representation format <#convert-models-to-openvino-intermediate-representation-format>`__ - `Text Encoder <#text-encoder>`__ @@ -53,26 +52,27 @@ The notebook contains the following steps: - `VAE <#vae>`__ - `Prepare Inference Pipeline <#prepare-inference-pipeline>`__ -- `Configure Inference - Pipeline <#configure-inference-pipeline>`__ +- `Configure Inference Pipeline <#configure-inference-pipeline>`__ - `Text-to-Image generation <#text-to-image-generation>`__ - `Image-to-Image generation <#image-to-image-generation>`__ + - `Interactive Demo <#interactive-demo>`__ + +Prerequisites +------------- -- `Interactive Demo <#interactive-demo>`__ -Prerequisites -------------------------------------------------------- Install required dependencies .. code:: ipython3 - %pip install -q --extra-index-url https://download.pytorch.org/whl/cpu torch torchvision - %pip -q install "openvino>=2023.1.0" "diffusers>=0.18.0" "transformers>=4.30.2" "gradio" + %pip install -q --extra-index-url https://download.pytorch.org/whl/cpu torch torchvision "openvino>=2023.1.0" "diffusers>=0.18.0" "transformers>=4.30.2" "gradio" + +Create PyTorch Models pipeline +------------------------------ + -Create PyTorch Models pipeline ------------------------------------------------------------------------- ``StableDiffusionPipeline`` is an end-to-end inference pipeline that you can use to generate images from text with just a few lines of code. @@ -121,8 +121,10 @@ First, load the pre-trained weights of all components of the model. -Convert models to OpenVINO Intermediate representation format -------------------------------------------------------------------------------------------------------- +Convert models to OpenVINO Intermediate representation format +------------------------------------------------------------- + + OpenVINO supports PyTorch through conversion to OpenVINO Intermediate Representation (IR) format. To take the advantage of OpenVINO @@ -150,8 +152,10 @@ The model consists of three important parts: Let us convert each part. -Text Encoder -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Text Encoder +~~~~~~~~~~~~ + + The text-encoder is responsible for transforming the input prompt, for example, “a photo of an astronaut riding a horse” into an embedding @@ -220,8 +224,10 @@ hidden states. -U-net -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +U-net +~~~~~ + + U-net model has three inputs: @@ -297,8 +303,10 @@ Model predicts the ``sample`` state for the next step. -VAE -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +VAE +~~~ + + The VAE model has two parts, an encoder and a decoder. The encoder is used to convert the image into a low dimensional latent representation, @@ -409,8 +417,10 @@ of the pipeline, it will be better to convert them to separate models. -Prepare Inference Pipeline --------------------------------------------------------------------- +Prepare Inference Pipeline +-------------------------- + + Putting it all together, let us now take a closer look at how the model works in inference by illustrating the logical flow. @@ -810,8 +820,10 @@ of the variational auto encoder. return timesteps, num_inference_steps - t_start -Configure Inference Pipeline ----------------------------------------------------------------------- +Configure Inference Pipeline +---------------------------- + + First, you should create instances of OpenVINO Model. @@ -882,8 +894,10 @@ Let us define them and put all components together scheduler=lms ) -Text-to-Image generation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Text-to-Image generation +~~~~~~~~~~~~~~~~~~~~~~~~ + + Now, let’s see model in action @@ -951,8 +965,10 @@ Now is show time! Nice. As you can see, the picture has quite a high definition 🔥. -Image-to-Image generation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Image-to-Image generation +~~~~~~~~~~~~~~~~~~~~~~~~~ + + One of the most amazing features of Stable Diffusion model is the ability to condition image generation from an existing image or sketch. @@ -1052,8 +1068,10 @@ found in this .. image:: 251-tiny-sd-image-generation-with-output_files/251-tiny-sd-image-generation-with-output_39_1.png -Interactive Demo -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Interactive Demo +~~~~~~~~~~~~~~~~ + + .. code:: ipython3 diff --git a/docs/notebooks/251-tiny-sd-image-generation-with-output_files/index.html b/docs/notebooks/251-tiny-sd-image-generation-with-output_files/index.html index 98e68a3c493cfe..395cb0490dd636 100644 --- a/docs/notebooks/251-tiny-sd-image-generation-with-output_files/index.html +++ b/docs/notebooks/251-tiny-sd-image-generation-with-output_files/index.html @@ -1,12 +1,12 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/251-tiny-sd-image-generation-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/251-tiny-sd-image-generation-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/251-tiny-sd-image-generation-with-output_files/


../
-251-tiny-sd-image-generation-with-output_33_1.jpg  31-Oct-2023 00:35               40294
-251-tiny-sd-image-generation-with-output_33_1.png  31-Oct-2023 00:35              434441
-251-tiny-sd-image-generation-with-output_37_1.jpg  31-Oct-2023 00:35               84339
-251-tiny-sd-image-generation-with-output_37_1.png  31-Oct-2023 00:35              770190
-251-tiny-sd-image-generation-with-output_39_1.jpg  31-Oct-2023 00:35               50437
-251-tiny-sd-image-generation-with-output_39_1.png  31-Oct-2023 00:35              699062
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/251-tiny-sd-image-generation-with-output_files/


../
+251-tiny-sd-image-generation-with-output_33_1.jpg  07-Dec-2023 00:49               40294
+251-tiny-sd-image-generation-with-output_33_1.png  07-Dec-2023 00:49              434441
+251-tiny-sd-image-generation-with-output_37_1.jpg  07-Dec-2023 00:49               84339
+251-tiny-sd-image-generation-with-output_37_1.png  07-Dec-2023 00:49              770190
+251-tiny-sd-image-generation-with-output_39_1.jpg  07-Dec-2023 00:49               50437
+251-tiny-sd-image-generation-with-output_39_1.png  07-Dec-2023 00:49              699062
 

diff --git a/docs/notebooks/254-llm-chatbot-with-output.rst b/docs/notebooks/254-llm-chatbot-with-output.rst index 78d6200e20da66..77e30616a64be5 100644 --- a/docs/notebooks/254-llm-chatbot-with-output.rst +++ b/docs/notebooks/254-llm-chatbot-with-output.rst @@ -18,7 +18,7 @@ accuracy. Previously, we already discussed how to build an instruction-following pipeline using OpenVINO and Optimum Intel, please check out `Dolly -example <240-dolly-2-instruction-following-with-output.html>`__ for reference. In this +example <../240-dolly-2-instruction-following>`__ for reference. In this tutorial, we consider how to use the power of OpenVINO for running Large Language Models for chat. We will use a pre-trained model from the `Hugging Face @@ -40,16 +40,22 @@ The tutorial consists of the following steps: **Table of contents:** + - `Prerequisites <#prerequisites>`__ - `Select model for inference <#select-model-for-inference>`__ -- `login to huggingfacehub to get access to pretrained model <#login-to-huggingfacehub-to-get-access-to-pretrained-model>`__ -- `Instantiate Model using Optimum Intel <#instantiate-model-using-optimum-intel>`__ +- `login to huggingfacehub to get access to pretrained + model <#login-to-huggingfacehub-to-get-access-to-pretrained-model>`__ +- `Instantiate Model using Optimum + Intel <#instantiate-model-using-optimum-intel>`__ - `Compress model weights <#compress-model-weights>`__ - - `Weights Compression using Optimum Intel <#weights-compression-using-optimum-intel>`__ - - `Weights Compression using NNCF <#weights-compression-using-nncf>`__ + - `Weights Compression using Optimum + Intel <#weights-compression-using-optimum-intel>`__ + - `Weights Compression using + NNCF <#weights-compression-using-nncf>`__ -- `Select device for inference and model variant <#select-device-for-inference-and-model-variant>`__ +- `Select device for inference and model + variant <#select-device-for-inference-and-model-variant>`__ - `Run Chatbot <#run-chatbot>`__ Prerequisites @@ -62,12 +68,12 @@ Install required dependencies .. code:: ipython3 %pip uninstall -q -y openvino-dev openvino openvino-nightly - %pip install -q openvino-nightly %pip install -q --extra-index-url https://download.pytorch.org/whl/cpu\ "git+https://github.com/huggingface/optimum-intel.git"\ - "git+https://github.com/openvinotoolkit/nncf.git@release_v270"\ + "nncf>=2.7"\ + "openvino-nightly"\ "gradio"\ - "onnx" "einops" "transformers>=4.34.0"\ + "onnx" "einops" "transformers_stream_generator" "tiktoken" "transformers>=4.34.0" Select model for inference -------------------------- @@ -81,6 +87,18 @@ from user side and at least 64GB RAM for conversion. The available options are: +- **tiny-llama-1b-chat** - This is the chat model finetuned on top of + `TinyLlama/TinyLlama-1.1B-intermediate-step-955k-2T `__. + The TinyLlama project aims to pretrain a 1.1B Llama model on 3 + trillion tokens with the adoption of the same architecture and + tokenizer as Llama 2. This means TinyLlama can be plugged and played + in many open-source projects built upon Llama. Besides, TinyLlama is + compact with only 1.1B parameters. This compactness allows it to + cater to a multitude of applications demanding a restricted + computation and memory footprint. More details about model can be + found in `model + card `__ + - **red-pajama-3b-chat** - A 2.8B parameter pre-trained language model based on GPT-NEOX architecture. It was developed by Together Computer and leaders from the open-source AI community. The model is @@ -109,10 +127,8 @@ The available options are: following code: .. code:: python - :force: ## login to huggingfacehub to get access to pretrained model - from huggingface_hub import notebook_login, whoami try: @@ -142,6 +158,23 @@ The available options are: `repository `__ and `HuggingFace model card `__. +- **qwen-7b-chat** - Qwen-7B is the 7B-parameter version of the large + language model series, Qwen (abbr. Tongyi Qianwen), proposed by + Alibaba Cloud. Qwen-7B is a Transformer-based large language model, + which is pretrained on a large volume of data, including web texts, + books, codes, etc. For more details about Qwen, please refer to the + `GitHub `__ code repository. +- **chatglm2-6b** - ChatGLM2-6B is the second-generation version of the + open-source bilingual (Chinese-English) chat model + `ChatGLM-6B `__. It retains the + smooth conversation flow and low deployment threshold of the + first-generation model +- **mistral-7b** - The Mistral-7B-v0.1 Large Language Model (LLM) is a + pretrained generative text model with 7 billion parameters. You can + find more details about model in the `model + card `__, + `paper `__ and `release blog + post `__. - **zephyr-7b-beta** - Zephyr is a series of language models that are trained to act as helpful assistants. Zephyr-7B-beta is the second model in the series, and is a fine-tuned version of @@ -152,6 +185,26 @@ The available options are: details about model in `technical report `__ and `HuggingFace model card `__. +- **neural-chat-7b-v3-1** - Mistral-7b model fine-tuned using Intel + Gaudi. The model fine-tuned on the open source dataset + `Open-Orca/SlimOrca `__ + and aligned with `Direct Preference Optimization (DPO) + algorithm `__. More details can be + found in `model + card `__ and `blog + post `__. +- **notus-7b-v1** - Notus is a collection of fine-tuned models using + `Direct Preference Optimization + (DPO) `__. and related + `RLHF `__ techniques. This model is + the first version, fine-tuned with DPO over zephyr-7b-sft. Following + a data-first approach, the only difference between Notus-7B-v1 and + Zephyr-7B-beta is the preference dataset used for dDPO. Proposed + approach for dataset creation helps to effectively fine-tune Notus-7b + that surpasses Zephyr-7B-beta and Claude 2 on + `AlpacaEval `__. More + details about model can be found in `model + card `__. .. code:: ipython3 @@ -164,8 +217,8 @@ The available options are: model_id = widgets.Dropdown( options=model_ids, - value=model_ids[-1], - description='Model:', + value=model_ids[0], + description="Model:", disabled=False, ) @@ -176,7 +229,7 @@ The available options are: .. parsed-literal:: - Dropdown(description='Model:', index=3, options=('red-pajama-3b-chat', 'llama-2-chat-7b', 'mpt-7b-chat', 'zeph… + Dropdown(description='Model:', options=('tiny-llama-1b-chat', 'red-pajama-3b-chat', 'llama-2-chat-7b', 'mpt-7b… @@ -188,7 +241,7 @@ The available options are: .. parsed-literal:: - Selected model zephyr-7b-beta + Selected model tiny-llama-1b-chat Instantiate Model using Optimum Intel @@ -240,99 +293,23 @@ hidden states for the current step as output. It means for all next iterations, it is enough to provide only a new token obtained from the previous step and cached key values to get the next token prediction. -In our case, MPT model currently is not covered by Optimum Intel, we -will convert it manually and create wrapper compatible with Optimum -Intel. - -Below is some code required for MPT conversion. +In our case, MPT, Qwen and ChatGLM model currently is not covered by +Optimum Intel, we will convert it manually and create wrapper compatible +with Optimum Intel. .. code:: ipython3 - from functools import wraps - import torch - from transformers import AutoModelForCausalLM - from nncf import compress_weights + from transformers import AutoModelForCausalLM, AutoConfig + from optimum.intel import OVQuantizer + from optimum.intel.openvino import OVModelForCausalLM import openvino as ov from pathlib import Path - from typing import Optional, Union, Dict, Tuple, List - - def flattenize_inputs(inputs): - """ - Helper function for making nested inputs flattens - """ - flatten_inputs = [] - for input_data in inputs: - if input_data is None: - continue - if isinstance(input_data, (list, tuple)): - flatten_inputs.extend(flattenize_inputs(input_data)) - else: - flatten_inputs.append(input_data) - return flatten_inputs - - def cleanup_torchscript_cache(): - """ - Helper for removing cached model representation - """ - torch._C._jit_clear_class_registry() - torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore() - torch.jit._state._clear_class_state() - - def convert_mpt(pt_model:torch.nn.Module, model_path:Path): - """ - MPT model conversion function - - Params: - pt_model: PyTorch model - model_path: path for saving model - Returns: - None - """ - ov_out_path = Path(model_path) / "openvino_model.xml" - pt_model.config.save_pretrained(ov_out_path.parent) - pt_model.config.use_cache = True - outs = pt_model(input_ids=torch.ones((1, 10), dtype=torch.long), attention_mask=torch.ones((1, 10), dtype=torch.long)) - inputs = ["input_ids"] - outputs = ["logits"] - - dynamic_shapes = {"input_ids": {1: "seq_len"}, "attention_mask": {1: "seq_len"}} - for idx in range(len(outs.past_key_values)): - inputs.extend([f"past_key_values.{idx}.key", f"past_key_values.{idx}.value"]) - dynamic_shapes[inputs[-1]] = {2: "past_sequence + sequence"} - dynamic_shapes[inputs[-2]] = {3: "past_sequence + sequence"} - outputs.extend([f"present.{idx}.key", f"present.{idx}.value"]) - - inputs.append("attention_mask") - dummy_inputs = {"input_ids": torch.ones((1,2), dtype=torch.long), "past_key_values": outs.past_key_values, "attention_mask": torch.ones((1,12), dtype=torch.long)} - pt_model.config.torchscript = True - orig_forward = pt_model.forward - @wraps(orig_forward) - def ts_patched_forward(input_ids: torch.Tensor, past_key_values: Tuple[Tuple[torch.Tensor]], attention_mask: torch.Tensor): - pkv_list = list(past_key_values) - outs = orig_forward(input_ids=input_ids, past_key_values=pkv_list, attention_mask=attention_mask) - return (outs.logits, tuple(outs.past_key_values)) - pt_model.forward = ts_patched_forward - ov_model = ov.convert_model(pt_model, example_input=dummy_inputs) - pt_model.forward = orig_forward - for inp_name, m_input, input_data in zip(inputs, ov_model.inputs, flattenize_inputs(dummy_inputs.values())): - input_node = m_input.get_node() - if input_node.element_type == ov.Type.dynamic: - m_input.get_node().set_element_type(ov.Type.f32) - shape = list(input_data.shape) - if inp_name in dynamic_shapes: - for k in dynamic_shapes[inp_name]: - shape[k] = -1 - input_node.set_partial_shape(ov.PartialShape(shape)) - m_input.get_tensor().set_names({inp_name}) - - for out, out_name in zip(ov_model.outputs, outputs): - out.get_tensor().set_names({out_name}) - - ov_model.validate_nodes_and_infer_types() - ov.save_model(ov_model, ov_out_path) - del ov_model - cleanup_torchscript_cache() - del pt_model + import shutil + import torch + import logging + import nncf + import gc + from converter import converters .. parsed-literal:: @@ -340,8 +317,13 @@ Below is some code required for MPT conversion. INFO:nncf:NNCF initialized successfully. Supported frameworks detected: torch, onnx, openvino -Compress model weights ----------------------------------------------------------------- +.. parsed-literal:: + + No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda' + + +Compress model weights +---------------------- @@ -353,8 +335,8 @@ larger than the size of activations, for example, Large Language Models performance even more, but introduces a minor drop in prediction quality. -Weights Compression using Optimum Intel -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Weights Compression using Optimum Intel +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -374,8 +356,8 @@ LLAMA and Zephyr examples. **Note**: There may be no speedup for INT4/INT8 compressed models on dGPU. -Weights Compression using NNCF -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Weights Compression using NNCF +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -393,22 +375,20 @@ will consider this variant based on MPT model. from IPython.display import display - # TODO: red-pajama-3b-chat currently can't be compiled in INT4 or FP16 due to ticket 123973 - is_pajama_model = model_id.value == 'red-pajama-3b-chat' prepare_int4_model = widgets.Checkbox( - value=True and not is_pajama_model, - description='Prepare INT4 model', - disabled=is_pajama_model, + value=True, + description="Prepare INT4 model", + disabled=False, ) prepare_int8_model = widgets.Checkbox( - value=False or is_pajama_model, - description='Prepare INT8 model', + value=False, + description="Prepare INT8 model", disabled=False, ) prepare_fp16_model = widgets.Checkbox( value=False, - description='Prepare FP16 model', - disabled=is_pajama_model, + description="Prepare FP16 model", + disabled=False, ) display(prepare_int4_model) @@ -438,138 +418,231 @@ We can now save floating point and compressed model variants .. code:: ipython3 - from pathlib import Path - from optimum.intel import OVQuantizer - from optimum.intel.openvino import OVModelForCausalLM - import shutil - import logging - import nncf - import gc - nncf.set_log_level(logging.ERROR) pt_model_id = model_configuration["model_id"] + pt_model_name = model_id.value.split("-")[0] + model_type = AutoConfig.from_pretrained(pt_model_id, trust_remote_code=True).model_type fp16_model_dir = Path(model_id.value) / "FP16" int8_model_dir = Path(model_id.value) / "INT8_compressed_weights" int4_model_dir = Path(model_id.value) / "INT4_compressed_weights" + def convert_to_fp16(): if (fp16_model_dir / "openvino_model.xml").exists(): return - if "mpt" not in model_id.value: - ov_model = OVModelForCausalLM.from_pretrained(pt_model_id, export=True, compile=False) + if not model_configuration["remote"]: + ov_model = OVModelForCausalLM.from_pretrained( + pt_model_id, export=True, compile=False + ) ov_model.half() ov_model.save_pretrained(fp16_model_dir) del ov_model else: - model = AutoModelForCausalLM.from_pretrained(model_configuration["model_id"], torch_dtype=torch.float32, trust_remote_code=True) - convert_mpt(model, fp16_model_dir) + model_kwargs = {} + if "revision" in model_configuration: + model_kwargs["revision"] = model_configuration["revision"] + model = AutoModelForCausalLM.from_pretrained( + model_configuration["model_id"], + torch_dtype=torch.float32, + trust_remote_code=True, + **model_kwargs + ) + converters[pt_model_name](model, fp16_model_dir) del model gc.collect() + def convert_to_int8(): if (int8_model_dir / "openvino_model.xml").exists(): return - if "mpt" not in model_id.value: - if not fp16_model_dir.exists(): - ov_model = OVModelForCausalLM.from_pretrained(pt_model_id, export=True, compile=False) - ov_model.half() - else: + int8_model_dir.mkdir(parents=True, exist_ok=True) + if not model_configuration["remote"]: + if fp16_model_dir.exists(): ov_model = OVModelForCausalLM.from_pretrained(fp16_model_dir, compile=False) + else: + ov_model = OVModelForCausalLM.from_pretrained( + pt_model_id, export=True, compile=False + ) + ov_model.half() quantizer = OVQuantizer.from_pretrained(ov_model) quantizer.quantize(save_directory=int8_model_dir, weights_only=True) del quantizer del ov_model else: convert_to_fp16() - model = ov.Core().read_model(fp16_model_dir / 'openvino_model.xml') - compressed_model = compress_weights(model) + ov_model = ov.Core().read_model(fp16_model_dir / "openvino_model.xml") + shutil.copy(fp16_model_dir / "config.json", int8_model_dir / "config.json") + configuration_file = fp16_model_dir / f"configuration_{model_type}.py" + if configuration_file.exists(): + shutil.copy( + configuration_file, int8_model_dir / f"configuration_{model_type}.py" + ) + compressed_model = nncf.compress_weights(ov_model) ov.save_model(compressed_model, int8_model_dir / "openvino_model.xml") - shutil.copy(fp16_model_dir / 'config.json', int8_model_dir / 'config.json') - del model + del ov_model del compressed_model gc.collect() - def convert_to_int4(group_size, ratio): - if (int4_model_dir / "openvino_model").exists(): + def convert_to_int4(): + compression_configs = { + "zephyr-7b-beta": { + "mode": nncf.CompressWeightsMode.INT4_SYM, + "group_size": 64, + "ratio": 0.6, + }, + "mistral-7b": { + "mode": nncf.CompressWeightsMode.INT4_SYM, + "group_size": 64, + "ratio": 0.6, + }, + "notus-7b-v1": { + "mode": nncf.CompressWeightsMode.INT4_SYM, + "group_size": 64, + "ratio": 0.6, + }, + "neural-chat-7b-v3-1": { + "mode": nncf.CompressWeightsMode.INT4_SYM, + "group_size": 64, + "ratio": 0.6, + }, + "llama-2-chat-7b": { + "mode": nncf.CompressWeightsMode.INT4_SYM, + "group_size": 128, + "ratio": 0.8, + }, + "chatglm2-6b": { + "mode": nncf.CompressWeightsMode.INT4_SYM, + "group_size": 128, + "ratio": 0.72, + "ignored_scope": nncf.IgnoredScope(["__module.transformer/aten::index_67/Gather"]) + }, + "qwen-7b-chat": { + "mode": nncf.CompressWeightsMode.INT4_SYM, + "group_size": 128, + "ratio": 0.6 + }, + 'red-pajama-3b-chat': { + "mode": nncf.CompressWeightsMode.INT4_ASYM, + "group_size": 128, + "ratio": 0.5, + }, + "default": { + "mode": nncf.CompressWeightsMode.INT4_ASYM, + "group_size": 128, + "ratio": 0.8, + }, + } + + model_compression_params = compression_configs.get( + model_id.value, compression_configs["default"] + ) + if (int4_model_dir / "openvino_model.xml").exists(): return int4_model_dir.mkdir(parents=True, exist_ok=True) - if "mpt" not in model_id.value: - # TODO: remove compression via NNCF for non-MPT models when INT4 weight compression is added to optimum-intel + if not model_configuration["remote"]: if not fp16_model_dir.exists(): - model = OVModelForCausalLM.from_pretrained(pt_model_id, export=True, compile=False) - model.half() + model = OVModelForCausalLM.from_pretrained( + pt_model_id, export=True, compile=False + ).half() + model.config.save_pretrained(int4_model_dir) + ov_model = model.model + del model + gc.collect() else: - model = OVModelForCausalLM.from_pretrained(fp16_model_dir, compile=False) - model.config.save_pretrained(int4_model_dir) - ov_model = model.model - del model + ov_model = ov.Core().read_model(fp16_model_dir / "openvino_model.xml") + shutil.copy(fp16_model_dir / "config.json", int4_model_dir / "config.json") + else: convert_to_fp16() - ov_model = ov.Core().read_model(fp16_model_dir / 'openvino_model.xml') - shutil.copy(fp16_model_dir / 'config.json', int4_model_dir / 'config.json') - compressed_model = nncf.compress_weights(ov_model, mode=nncf.CompressWeightsMode.INT4_ASYM, group_size=group_size, ratio=ratio) - ov.save_model(compressed_model, int4_model_dir / 'openvino_model.xml') + ov_model = ov.Core().read_model(fp16_model_dir / "openvino_model.xml") + shutil.copy(fp16_model_dir / "config.json", int4_model_dir / "config.json") + configuration_file = fp16_model_dir / f"configuration_{model_type}.py" + if configuration_file.exists(): + shutil.copy( + configuration_file, int4_model_dir / f"configuration_{model_type}.py" + ) + compressed_model = nncf.compress_weights(ov_model, **model_compression_params) + ov.save_model(compressed_model, int4_model_dir / "openvino_model.xml") del ov_model del compressed_model gc.collect() + if prepare_fp16_model.value: - print("Apply weights compression to FP16 format") convert_to_fp16() if prepare_int8_model.value: - print("Apply weights compression to INT8 format") convert_to_int8() if prepare_int4_model.value: - print("Apply weights compression to INT4 format") - convert_to_int4(group_size=128, ratio=0.8) + convert_to_int4() + .. parsed-literal:: - No CUDA runtime is found, using CUDA_HOME='/usr/local/cuda' + config.json: 0%| | 0.00/699 [00:00 True - /home/ea/work/openvino_notebooks/test_env/lib/python3.8/site-packages/transformers/models/mistral/modeling_mistral.py:795: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! - if input_shape[-1] > 1: - /home/ea/work/openvino_notebooks/test_env/lib/python3.8/site-packages/transformers/models/mistral/modeling_mistral.py:91: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /home/ea/work/genai_env/lib/python3.8/site-packages/transformers/modeling_attn_mask_utils.py:94: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + if (input_shape[-1] > 1 or self.sliding_window is not None) and self.is_causal: + /home/ea/work/genai_env/lib/python3.8/site-packages/optimum/exporters/onnx/model_patcher.py:392: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! if past_key_values_length > 0: - /home/ea/work/openvino_notebooks/test_env/lib/python3.8/site-packages/transformers/models/mistral/modeling_mistral.py:157: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /home/ea/work/genai_env/lib/python3.8/site-packages/transformers/models/llama/modeling_llama.py:140: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! if seq_len > self.max_seq_len_cached: - /home/ea/work/openvino_notebooks/test_env/lib/python3.8/site-packages/transformers/models/mistral/modeling_mistral.py:288: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /home/ea/work/genai_env/lib/python3.8/site-packages/transformers/models/llama/modeling_llama.py:392: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): - /home/ea/work/openvino_notebooks/test_env/lib/python3.8/site-packages/transformers/models/mistral/modeling_mistral.py:295: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /home/ea/work/genai_env/lib/python3.8/site-packages/transformers/models/llama/modeling_llama.py:399: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): - /home/ea/work/openvino_notebooks/test_env/lib/python3.8/site-packages/transformers/models/mistral/modeling_mistral.py:306: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /home/ea/work/genai_env/lib/python3.8/site-packages/transformers/models/llama/modeling_llama.py:409: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): @@ -624,21 +697,25 @@ Let’s compare model size for different compression types int4_weights = int4_model_dir / "openvino_model.bin" if fp16_weights.exists(): - print(f'Size of FP16 model is {fp16_weights.stat().st_size / 1024 / 1024:.2f} MB') + print(f"Size of FP16 model is {fp16_weights.stat().st_size / 1024 / 1024:.2f} MB") for precision, compressed_weights in zip([8, 4], [int8_weights, int4_weights]): if compressed_weights.exists(): - print(f'Size of model with INT{precision} compressed weights is {compressed_weights.stat().st_size / 1024 / 1024:.2f} MB') + print( + f"Size of model with INT{precision} compressed weights is {compressed_weights.stat().st_size / 1024 / 1024:.2f} MB" + ) if compressed_weights.exists() and fp16_weights.exists(): - print(f"Compression rate for INT{precision} model: {fp16_weights.stat().st_size / compressed_weights.stat().st_size:.3f}") + print( + f"Compression rate for INT{precision} model: {fp16_weights.stat().st_size / compressed_weights.stat().st_size:.3f}" + ) .. parsed-literal:: - Size of model with INT4 compressed weights is 4374.50 MB + Size of model with INT4 compressed weights is 696.99 MB -Select device for inference and model variant ---------------------------------------------------------------------------------------- +Select device for inference and model variant +--------------------------------------------- @@ -650,8 +727,8 @@ Select device for inference and model variant core = ov.Core() device = widgets.Dropdown( options=core.available_devices + ["AUTO"], - value='CPU', - description='Device:', + value="CPU", + description="Device:", disabled=False, ) @@ -662,160 +739,16 @@ Select device for inference and model variant .. parsed-literal:: - Dropdown(description='Device:', options=('CPU', 'GPU', 'AUTO'), value='CPU') + Dropdown(description='Device:', options=('CPU', 'GPU.0', 'GPU.1', 'AUTO'), value='CPU') -The cell below create ``OVMPTModel`` model wrapper based on -``OVModelForCausalLM`` model. +The cell below create ``OVMPTModel``, ``OVQWENModel`` and +``OVCHATGLM2Model`` wrapper based on ``OVModelForCausalLM`` model. .. code:: ipython3 - from transformers import AutoConfig, PretrainedConfig - import torch - - from optimum.utils import NormalizedTextConfig, NormalizedConfigManager - from transformers.modeling_outputs import CausalLMOutputWithPast - from optimum.intel.openvino.utils import OV_XML_FILE_NAME - import numpy as np - from pathlib import Path - - - class OVMPTModel(OVModelForCausalLM): - """ - Optimum intel compatible model wrapper for MPT - """ - def __init__( - self, - model: "Model", - config: "PretrainedConfig" = None, - device: str = "CPU", - dynamic_shapes: bool = True, - ov_config: Optional[Dict[str, str]] = None, - model_save_dir: Optional[Union[str, Path]] = None, - **kwargs, - ): - NormalizedConfigManager._conf["mpt"] = NormalizedTextConfig.with_args(num_layers="n_layers", num_attention_heads="n_heads") - super().__init__(model, config, device, dynamic_shapes, ov_config, model_save_dir, **kwargs) - - def _reshape( - self, - model: "Model", - *args, - **kwargs - ): - shapes = {} - for inputs in model.inputs: - shapes[inputs] = inputs.get_partial_shape() - if shapes[inputs].rank.get_length() in [2, 3]: - shapes[inputs][1] = -1 - else: - if ".key" in inputs.get_any_name(): - shapes[inputs][3] = -1 - else: - shapes[inputs][2] = -1 - - model.reshape(shapes) - return model - - def forward( - self, - input_ids: torch.LongTensor, - attention_mask: Optional[torch.LongTensor] = None, - past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, - **kwargs, - ) -> CausalLMOutputWithPast: - self.compile() - - if self.use_cache and past_key_values is not None: - input_ids = input_ids[:, -1:] - - inputs = {} - if past_key_values is not None: - # Flatten the past_key_values - past_key_values = tuple( - past_key_value for pkv_per_layer in past_key_values for past_key_value in pkv_per_layer - ) - # Add the past_key_values to the decoder inputs - inputs = dict(zip(self.key_value_input_names, past_key_values)) - - # Create empty past_key_values for decoder_with_past first generation step - elif self.use_cache: - shape_input_ids = input_ids.shape - num_attention_heads = ( - self.normalized_config.num_attention_heads if self.config.model_type == "bloom" else 1 - ) - for input_name in self.key_value_input_names: - model_inputs = self.model.input(input_name) - shape = model_inputs.get_partial_shape() - shape[0] = shape_input_ids[0] * num_attention_heads - if shape[2].is_dynamic: - shape[2] = 0 - if shape[1].is_dynamic: - shape[1] = 0 - if shape.rank.get_length() == 4 and shape[3].is_dynamic: - shape[3] = 0 - inputs[input_name] = ov.Tensor(model_inputs.get_element_type(), shape.get_shape()) - - inputs["input_ids"] = np.array(input_ids) - - # Add the attention_mask inputs when needed - if "attention_mask" in self.input_names and attention_mask is not None: - inputs["attention_mask"] = np.array(attention_mask) - - # Run inference - self.request.start_async(inputs, shared_memory=True) - self.request.wait() - - logits = torch.from_numpy(self.request.get_tensor("logits").data).to(self.device) - - if self.use_cache: - # Tuple of length equal to : number of layer * number of past_key_value per decoder layer (2 corresponds to the self-attention layer) - past_key_values = tuple(self.request.get_tensor(key).data for key in self.key_value_output_names) - # Tuple of tuple of length `n_layers`, with each tuple of length equal to 2 (k/v of self-attention) - past_key_values = tuple( - past_key_values[i : i + self.num_pkv] for i in range(0, len(past_key_values), self.num_pkv) - ) - else: - past_key_values = None - - return CausalLMOutputWithPast(logits=logits, past_key_values=past_key_values) - - @classmethod - def _from_pretrained( - cls, - model_id: Union[str, Path], - config: PretrainedConfig, - use_auth_token: Optional[Union[bool, str, None]] = None, - revision: Optional[Union[str, None]] = None, - force_download: bool = False, - cache_dir: Optional[str] = None, - file_name: Optional[str] = None, - subfolder: str = "", - from_onnx: bool = False, - local_files_only: bool = False, - load_in_8bit: bool = False, - **kwargs, - ): - model_path = Path(model_id) - default_file_name = OV_XML_FILE_NAME - file_name = file_name or default_file_name - - model_cache_path = cls._cached_file( - model_path=model_path, - use_auth_token=use_auth_token, - revision=revision, - force_download=force_download, - cache_dir=cache_dir, - file_name=file_name, - subfolder=subfolder, - local_files_only=local_files_only, - ) - - model = cls.load_model(model_cache_path, load_in_8bit=load_in_8bit) - init_cls = OVMPTModel - - return init_cls(model=model, config=config, model_save_dir=model_cache_path.parent, **kwargs) + from ovmodel import model_classes The cell below demonstrates how to instantiate model based on selected variant of model weights and inference device @@ -833,8 +766,9 @@ variant of model weights and inference device model_to_run = widgets.Dropdown( options=available_models, value=available_models[0], - description='Model to run:', - disabled=False) + description="Model to run:", + disabled=False, + ) model_to_run @@ -849,8 +783,6 @@ variant of model weights and inference device .. code:: ipython3 - from pathlib import Path - from optimum.intel.openvino import OVModelForCausalLM from transformers import AutoTokenizer if model_to_run.value == "INT4": @@ -860,24 +792,34 @@ variant of model weights and inference device else: model_dir = fp16_model_dir print(f"Loading model from {model_dir}") - model_name = model_configuration["model_id"] - ov_config = {'PERFORMANCE_HINT': 'LATENCY', 'NUM_STREAMS': '1', "CACHE_DIR": ""} + model_name = model_configuration["model_id"] + class_key = model_id.value.split("-")[0] + ov_config = {"PERFORMANCE_HINT": "LATENCY", "NUM_STREAMS": "1", "CACHE_DIR": ""} tok = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) - model_class = OVModelForCausalLM if "mpt" not in model_id.value else OVMPTModel - ov_model = model_class.from_pretrained(model_dir, device=device.value, ov_config=ov_config, config=AutoConfig.from_pretrained(model_dir, trust_remote_code=True), trust_remote_code=True) + model_class = ( + OVModelForCausalLM + if not model_configuration["remote"] + else model_classes[class_key] + ) + ov_model = model_class.from_pretrained( + model_dir, + device=device.value, + ov_config=ov_config, + config=AutoConfig.from_pretrained(model_dir, trust_remote_code=True), + trust_remote_code=True, + ) .. parsed-literal:: - Loading model from zephyr-7b-beta/INT4_compressed_weights + Loading model from tiny-llama-1b-chat/INT4_compressed_weights .. parsed-literal:: - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. The argument `trust_remote_code` is to be used along with export=True. It will be ignored. Compiling the model to CPU ... @@ -888,18 +830,19 @@ variant of model weights and inference device test_string = "2 + 2 =" input_tokens = tok(test_string, return_tensors="pt", **tokenizer_kwargs) answer = ov_model.generate(**input_tokens, max_new_tokens=2) - print(tok.batch_decode(answer)[0]) + print(tok.batch_decode(answer, skip_special_tokens=True)[0]) .. parsed-literal:: - /home/ea/work/openvino_notebooks/test_env/lib/python3.8/site-packages/optimum/intel/openvino/modeling_decoder.py:388: FutureWarning: `shared_memory` is deprecated and will be removed in 2024.0. Value of `shared_memory` is going to override `share_inputs` value. Please use only `share_inputs` explicitly. + Setting `pad_token_id` to `eos_token_id`:2 for open-end generation. + /home/ea/work/genai_env/lib/python3.8/site-packages/optimum/intel/openvino/modeling_decoder.py:388: FutureWarning: `shared_memory` is deprecated and will be removed in 2024.0. Value of `shared_memory` is going to override `share_inputs` value. Please use only `share_inputs` explicitly. self.request.start_async(inputs, shared_memory=True) .. parsed-literal:: - 2 + 2 = 4 + 2 + 2 = 4 Run Chatbot @@ -986,9 +929,8 @@ answers. from threading import Event, Thread from uuid import uuid4 - + from typing import List, Tuple import gradio as gr - import torch from transformers import ( AutoTokenizer, StoppingCriteria, @@ -1004,42 +946,78 @@ answers. stop_tokens = model_configuration.get("stop_tokens") tokenizer_kwargs = model_configuration.get("tokenizer_kwargs", {}) - + chinese_examples = [ + ["你好!"], + ["你是谁?"], + ["请介绍一下上海"], + ["请介绍一下英特尔公司"], + ["晚上睡不着怎么办?"], + ["给我讲一个年轻人奋斗创业最终取得成功的故事。"], + ["给这个故事起一个标题。"], + ] + + english_examples = [ + ["Hello there! How are you doing?"], + ["What is OpenVINO?"], + ["Who are you?"], + ["Can you explain to me briefly what is Python programming language?"], + ["Explain the plot of Cinderella in a sentence."], + ["What are some common mistakes to avoid when writing code?"], + [ + "Write a 100-word blog post on “Benefits of Artificial Intelligence and OpenVINO“" + ], + ] + + examples = ( + chinese_examples + if ("qwen" in model_id.value or "chatglm" in model_id.value) + else english_examples + ) max_new_tokens = 256 + class StopOnTokens(StoppingCriteria): def __init__(self, token_ids): self.token_ids = token_ids - def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: + + def __call__( + self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs + ) -> bool: for stop_id in self.token_ids: if input_ids[0][-1] == stop_id: return True return False + if stop_tokens is not None: if isinstance(stop_tokens[0], str): stop_tokens = tok.convert_tokens_to_ids(stop_tokens) - + stop_tokens = [StopOnTokens(stop_tokens)] - def default_partial_text_processor(partial_text:str, new_text:str): + + def default_partial_text_processor(partial_text: str, new_text: str): """ helper for updating partially generated answer, used by de - + Params: partial_text: text buffer for storing previosly generated text new_text: text update for the current step Returns: updated text string - + """ partial_text += new_text return partial_text - text_processor = model_configuration.get("partial_text_processor", default_partial_text_processor) - - def convert_history_to_text(history:List[Tuple[str, str]]): + + text_processor = model_configuration.get( + "partial_text_processor", default_partial_text_processor + ) + + + def convert_history_to_text(history: List[Tuple[str, str]]): """ function for conversion history stored as list pairs of user and assistant messages to string according to model expected conversation template Params: @@ -1050,18 +1028,20 @@ answers. text = start_message + "".join( [ "".join( - [ - history_template.format(user=item[0], assistant=item[1]) - ] + [history_template.format(num=round, user=item[0], assistant=item[1])] ) - for item in history[:-1] + for round, item in enumerate(history[:-1]) ] ) text += "".join( [ "".join( [ - current_message_template.format(user=history[-1][0], assistant=history[-1][1]) + current_message_template.format( + num=len(history) + 1, + user=history[-1][0], + assistant=history[-1][1], + ) ] ) ] @@ -1069,11 +1049,10 @@ answers. return text - def user(message, history): """ callback function for updating user messages in interface on submit button click - + Params: message: current message history: conversation history @@ -1087,16 +1066,16 @@ answers. def bot(history, temperature, top_p, top_k, repetition_penalty, conversation_id): """ callback function for running chatbot on submit button click - + Params: history: conversation history - temperature: parameter for control the level of creativity in AI-generated text. + temperature: parameter for control the level of creativity in AI-generated text. By adjusting the `temperature`, you can influence the AI model's probability distribution, making the text more focused or diverse. top_p: parameter for control the range of tokens considered by the AI model based on their cumulative probability. top_k: parameter for control the range of tokens considered by the AI model based on their cumulative probability, selecting number of tokens with highest probability. repetition_penalty: parameter for penalizing tokens based on how frequently they occur in the text. conversation_id: unique conversation identifier. - + """ # Construct the input message string for the model by concatenating the current system message and conversation history @@ -1108,7 +1087,9 @@ answers. history = [history[-1]] messages = convert_history_to_text(history) input_ids = tok(messages, return_tensors="pt", **tokenizer_kwargs).input_ids - streamer = TextIteratorStreamer(tok, timeout=30.0, skip_prompt=True, skip_special_tokens=True) + streamer = TextIteratorStreamer( + tok, timeout=30.0, skip_prompt=True, skip_special_tokens=True + ) generate_kwargs = dict( input_ids=input_ids, max_new_tokens=max_new_tokens, @@ -1155,9 +1136,7 @@ answers. css=".disclaimer {font-variant-caps: all-small-caps;}", ) as demo: conversation_id = gr.State(get_uuid) - gr.Markdown( - f"""

OpenVINO {model_id.value} Chatbot

""" - ) + gr.Markdown(f"""

OpenVINO {model_id.value} Chatbot

""") chatbot = gr.Chatbot(height=500) with gr.Row(): with gr.Column(): @@ -1165,7 +1144,7 @@ answers. label="Chat Message Box", placeholder="Chat Message Box", show_label=False, - container=False + container=False, ) with gr.Column(): with gr.Row(): @@ -1222,17 +1201,8 @@ answers. interactive=True, info="Penalize repetition — 1.0 to disable.", ) - gr.Examples([ - ["Hello there! How are you doing?"], - ["What is OpenVINO?"], - ["Who are you?"], - ["Can you explain to me briefly what is Python programming language?"], - ["Explain the plot of Cinderella in a sentence."], - ["What are some common mistakes to avoid when writing code?"], - ["Write a 100-word blog post on “Benefits of Artificial Intelligence and OpenVINO“"] - ], - inputs=msg, - label="Click on any example and press the 'Submit' button" + gr.Examples( + examples, inputs=msg, label="Click on any example and press the 'Submit' button" ) submit_event = msg.submit( @@ -1288,7 +1258,21 @@ answers. # it creates a publicly shareable link for the interface. Read more in the docs: https://gradio.app/docs/ demo.launch() + +.. parsed-literal:: + + Running on local URL: http://127.0.0.1:7860 + + To create a public link, set `share=True` in `launch()`. + + + +.. .. raw:: html + +..
+ + .. code:: ipython3 # please run this cell for stopping gradio interface - demo.close() + # demo.close() diff --git a/docs/notebooks/256-bark-text-to-audio-with-output.rst b/docs/notebooks/256-bark-text-to-audio-with-output.rst index 4ffede0bda03fd..3075db69c00589 100644 --- a/docs/notebooks/256-bark-text-to-audio-with-output.rst +++ b/docs/notebooks/256-bark-text-to-audio-with-output.rst @@ -58,14 +58,12 @@ is used to act as an audio representation. - `Prerequisites <#prerequisites>`__ -- `Download and Convert - models <#download-and-convert-models>`__ +- `Download and Convert models <#download-and-convert-models>`__ - `Text Encoder <#text-encoder>`__ - `Coarse encoder <#coarse-encoder>`__ - `Fine encoder <#fine-encoder>`__ - - `Prepare Inference - pipeline <#prepare-inference-pipeline>`__ + - `Prepare Inference pipeline <#prepare-inference-pipeline>`__ - `Run model inference <#run-model-inference>`__ @@ -73,22 +71,26 @@ is used to act as an audio representation. - `Interactive demo <#interactive-demo>`__ -Prerequisites -------------------------------------------------------- +Prerequisites +------------- + + .. code:: ipython3 import sys if sys.platform == "linux": - %pip install -q "torch==1.13.1" "torchvision" "torchaudio==0.13.1" --index-url https://download.pytorch.org/whl/cpu + %pip install -q "torch==1.13.1" "torchvision==0.14.1" "torchaudio==0.13.1" --index-url https://download.pytorch.org/whl/cpu else: - %pip install -q "torch==1.13.1" "torchvision" "torchaudio==0.13.1" + %pip install -q "torch==1.13.1" "torchvision==0.14.1" "torchaudio==0.13.1" %pip install -q "openvino>=2023.1.0" gradio %pip install -q "git+https://github.com/suno-ai/bark.git" -Download and Convert models ---------------------------------------------------------------------- +Download and Convert models +--------------------------- + + .. code:: ipython3 @@ -104,8 +106,10 @@ Download and Convert models torch version does not support flash attention. You will get faster inference speed by upgrade torch to newest nightly version. -Text Encoder -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Text Encoder +~~~~~~~~~~~~ + + Text encoder is responsible for embedding initial text prompt into high-level semantic tokens. it uses tokenizer for conversion input text @@ -163,8 +167,10 @@ models for that. del text_encoder_exportable del text_encoder_model, text_encoder -Coarse encoder -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Coarse encoder +~~~~~~~~~~~~~~ + + Coarse encoder is a causal autoregressive transformer, that takes as input the results of the text encoder model. It aims at predicting the @@ -257,8 +263,10 @@ provide empty tensor on the first step. fine_feature_extractor_path = fine_model_dir / "bark_fine_feature_extractor.xml" -Fine encoder -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Fine encoder +~~~~~~~~~~~~ + + Fine encoder is time a non-causal autoencoder transformer, which iteratively predicts the last codebooks based on the sum of the previous @@ -286,8 +294,10 @@ codebooks embeddings obtained using Coarse encoder. fine_model_dir / f"bark_fine_lm_{i}.xml", ) -Prepare Inference pipeline -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Prepare Inference pipeline +~~~~~~~~~~~~~~~~~~~~~~~~~~ + + For better usability, classes for working with models provided below. @@ -859,14 +869,18 @@ consists from several steps, illustrated on the diagram below: gen_fine_arr = gen_fine_arr[:, :-n_remove_from_end] return gen_fine_arr -Run model inference -------------------------------------------------------------- +Run model inference +------------------- + + Now is time to see model in action. We need only wrap our models to classes and run ``generate_audio`` function. -Select Inference device -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Select Inference device +~~~~~~~~~~~~~~~~~~~~~~~ + + select device from dropdown list for running inference using OpenVINO @@ -952,8 +966,10 @@ select device from dropdown list for running inference using OpenVINO -Interactive demo ----------------------------------------------------------- +Interactive demo +---------------- + + .. code:: ipython3 diff --git a/docs/notebooks/257-llava-multimodal-chatbot-with-output.rst b/docs/notebooks/257-llava-multimodal-chatbot-with-output.rst index 717605194f33e5..4db0fb97ec0ea4 100644 --- a/docs/notebooks/257-llava-multimodal-chatbot-with-output.rst +++ b/docs/notebooks/257-llava-multimodal-chatbot-with-output.rst @@ -43,6 +43,7 @@ The tutorial consists from following steps: **Table of contents:** + - `About model <#about-model>`__ - `Prerequisites <#prerequisites>`__ - `Build model tokenizer and image @@ -113,7 +114,7 @@ Install required dependencies import sys %pip install -q "torch>=2.1.0" "torchvision" "torchaudio" --index-url https://download.pytorch.org/whl/cpu - %pip install -q "openvino-nightly==2023.2.0.dev20231102" "git+https://github.com/openvinotoolkit/nncf.git@release_v270" "sentencepiece" "tokenizers>=0.12.1" "transformers>=4.31.0,<4.35.0" "gradio" "einops" + %pip install -q "openvino>=2023.2.0" "nncf>=2.7.0" "sentencepiece" "tokenizers>=0.12.1" "transformers>=4.31.0,<4.35.0" "gradio" "einops" .. code:: ipython3 @@ -227,7 +228,7 @@ The code below preparing function for converting LLaVA model to OpenVINO Intermediate Representation format. It splits model on parts described above, prepare example inputs for each part and convert each part using `OpenVINO Model Conversion -API `__. +API `__. ``ov.convert_model`` function accepts PyTorch model instance and returns ``ov.Model`` object that represent model in OpenVINO format. It is ready to use for loading on device using ``ov.compile_model`` or can be saved @@ -484,7 +485,7 @@ Convert model to OpenVINO format and save it on disk. Let’s consider each step more deeply. -Instantiate PyTorch model +Instantiate PyTorch model `:math:`\Uparrow` <#table-of-content>`__ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -495,7 +496,7 @@ from `HuggingFace hub `__ during first run. It may takes some time and requires at least 13 Gb free space on disk. -Compress Model weights to 4 and 8 bits using NNCF +Compress Model weights to 4 and 8 bits using NNCF `:math:`\Uparrow` <#table-of-content>`__ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -534,11 +535,11 @@ improves performance even more, but introduces a minor drop in prediction quality. More details about weights compression, can be found in `OpenVINO -documentation `__. +documentation `__. **Note**: There is no speedup for INT4 compressed models on dGPU. -Convert model to OpenVINO IR format +Convert model to OpenVINO IR format `:math:`\Uparrow` <#table-of-content>`__ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/notebooks/257-llava-multimodal-chatbot-with-output_files/index.html b/docs/notebooks/257-llava-multimodal-chatbot-with-output_files/index.html index 76276b096a486f..bdda9c41894054 100644 --- a/docs/notebooks/257-llava-multimodal-chatbot-with-output_files/index.html +++ b/docs/notebooks/257-llava-multimodal-chatbot-with-output_files/index.html @@ -1,7 +1,7 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231114220808/dist/rst_files/257-llava-multimodal-chatbot-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/257-llava-multimodal-chatbot-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231114220808/dist/rst_files/257-llava-multimodal-chatbot-with-output_files/


../
-257-llava-multimodal-chatbot-with-output_20_1.png  15-Nov-2023 00:43              539244
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/257-llava-multimodal-chatbot-with-output_files/


../
+257-llava-multimodal-chatbot-with-output_20_1.png  07-Dec-2023 00:49              539244
 

diff --git a/docs/notebooks/260-pix2struct-docvqa-with-output.rst b/docs/notebooks/260-pix2struct-docvqa-with-output.rst index 73926ec941d0be..8cb08a85db2341 100644 --- a/docs/notebooks/260-pix2struct-docvqa-with-output.rst +++ b/docs/notebooks/260-pix2struct-docvqa-with-output.rst @@ -44,6 +44,7 @@ convert the model to OpenVINO™ IR format. **Table of contents:** + - `About Pix2Struct <#about-pixstruct>`__ - `Prerequisites <#prerequisites>`__ - `Download and Convert Model <#download-and-convert-model>`__ @@ -99,7 +100,7 @@ documentation `__. .. code:: ipython3 %pip install -q torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu - %pip install -q "git+https://github.com/huggingface/optimum-intel.git" "openvino>=2023.1.0" "transformers>=4.33.0" onnx gradio + %pip install -q "git+https://github.com/huggingface/optimum-intel.git" "openvino>=2023.1.0" "transformers>=4.33.0" onnx gradio --extra-index-url https://download.pytorch.org/whl/cpu Download and Convert Model -------------------------- @@ -230,7 +231,7 @@ by ``Pix2StructProcessor.decode`` Let’s see the model in action. For testing the model, we will use a screenshot from `OpenVINO -documentation `__ +documentation `__ .. code:: ipython3 diff --git a/docs/notebooks/260-pix2struct-docvqa-with-output_files/index.html b/docs/notebooks/260-pix2struct-docvqa-with-output_files/index.html index 1728691a60f1b0..216ad4292507f3 100644 --- a/docs/notebooks/260-pix2struct-docvqa-with-output_files/index.html +++ b/docs/notebooks/260-pix2struct-docvqa-with-output_files/index.html @@ -1,8 +1,8 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231114220808/dist/rst_files/260-pix2struct-docvqa-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/260-pix2struct-docvqa-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231114220808/dist/rst_files/260-pix2struct-docvqa-with-output_files/


../
-260-pix2struct-docvqa-with-output_11_0.jpg         15-Nov-2023 00:43              134092
-260-pix2struct-docvqa-with-output_11_0.png         15-Nov-2023 00:43              221889
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/260-pix2struct-docvqa-with-output_files/


../
+260-pix2struct-docvqa-with-output_11_0.jpg         07-Dec-2023 00:49              134092
+260-pix2struct-docvqa-with-output_11_0.png         07-Dec-2023 00:49              221889
 

diff --git a/docs/notebooks/261-fast-segment-anything-with-output.rst b/docs/notebooks/261-fast-segment-anything-with-output.rst index 4710119100e349..21ca1341195a77 100644 --- a/docs/notebooks/261-fast-segment-anything-with-output.rst +++ b/docs/notebooks/261-fast-segment-anything-with-output.rst @@ -30,6 +30,7 @@ the prompt. **Table of contents:** + - `Prerequisites <#prerequisites>`__ - `Install requirements <#install-requirements>`__ @@ -65,7 +66,7 @@ Install requirements .. code:: ipython3 - %pip install -q "ultralytics==8.0.200" onnx + %pip install -q "ultralytics==8.0.200" onnx --extra-index-url https://download.pytorch.org/whl/cpu %pip install -q "openvino-dev>=2023.1.0" %pip install -q "nncf>=2.6.0" %pip install -q gradio @@ -156,8 +157,8 @@ model and generate a segmentation map. .. parsed-literal:: - image 1/1 /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/notebooks/261-fast-segment-anything/coco_bike.jpg: 768x1024 37 objects, 631.0ms - Speed: 3.8ms preprocess, 631.0ms inference, 21.8ms postprocess per image at shape (1, 3, 768, 1024) + image 1/1 /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/261-fast-segment-anything/coco_bike.jpg: 768x1024 37 objects, 621.3ms + Speed: 3.7ms preprocess, 621.3ms inference, 22.9ms postprocess per image at shape (1, 3, 768, 1024) The model returns segmentation maps for all the objects on the image. @@ -201,13 +202,13 @@ tracing. The FastSAM model itself is based on YOLOv8 model. PyTorch: starting from 'FastSAM-x.pt' with input shape (1, 3, 1024, 1024) BCHW and output shape(s) ((1, 37, 21504), (1, 32, 256, 256)) (138.2 MB) ONNX: starting export with onnx 1.15.0 opset 16... - ONNX: export success ✅ 3.5s, saved as 'FastSAM-x.onnx' (275.5 MB) + ONNX: export success ✅ 3.4s, saved as 'FastSAM-x.onnx' (275.5 MB) - OpenVINO: starting export with openvino 2023.1.0-12185-9e6b00e51cd-releases/2023/1... - OpenVINO: export success ✅ 1.0s, saved as 'FastSAM-x_openvino_model/' (275.9 MB) + OpenVINO: starting export with openvino 2023.2.0-13089-cfd42bd2cb0-HEAD... + OpenVINO: export success ✅ 1.1s, saved as 'FastSAM-x_openvino_model/' (275.9 MB) - Export complete (7.5s) - Results saved to /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/notebooks/261-fast-segment-anything + Export complete (7.4s) + Results saved to /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/261-fast-segment-anything Predict: yolo predict task=segment model=FastSAM-x_openvino_model imgsz=1024 Validate: yolo val task=segment model=FastSAM-x_openvino_model imgsz=1024 data=ultralytics/datasets/sa.yaml Visualize: https://netron.app @@ -306,8 +307,8 @@ pipeline. .. parsed-literal:: - image 1/1 /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/notebooks/261-fast-segment-anything/coco_bike.jpg: 480x640 33 objects, 353.6ms - Speed: 3.5ms preprocess, 353.6ms inference, 14.7ms postprocess per image at shape (1, 3, 480, 640) + image 1/1 /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/261-fast-segment-anything/coco_bike.jpg: 480x640 33 objects, 321.2ms + Speed: 2.3ms preprocess, 321.2ms inference, 20.1ms postprocess per image at shape (1, 3, 480, 640) One can observe the converted model outputs in the next cell, they is @@ -376,7 +377,7 @@ The quantization algorithm is based on `The YOLOv8 quantization example `__ in the NNCF repo, refer there for more details. Moreover, you can check out other quantization tutorials in the `OV notebooks -repo `__. +repo <../230-yolov8-optimization/>`__. **Note**: Model post-training quantization is time-consuming process. Be patient, it can take several minutes depending on your hardware. @@ -534,20 +535,62 @@ repo
+ + + + +.. raw:: html + +
+    
+ .. parsed-literal:: - Statistics collection: 100%|██████████| 128/128 [01:07<00:00, 1.91it/s] - Applying Fast Bias correction: 100%|██████████| 115/115 [00:30<00:00, 3.76it/s] + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/nncf/experimental/tensor/tensor.py:80: RuntimeWarning: invalid value encountered in multiply + return Tensor(self.data * unwrap_tensor_data(other)) + + + +.. parsed-literal:: + + Output() + + + +.. raw:: html + +

+
+
+
+
+.. raw:: html
+
+    
+    
+ Compare the performance of the Original and Quantized Models diff --git a/docs/notebooks/261-fast-segment-anything-with-output_files/index.html b/docs/notebooks/261-fast-segment-anything-with-output_files/index.html index 5e233240faa8f8..c6440fc6bd3500 100644 --- a/docs/notebooks/261-fast-segment-anything-with-output_files/index.html +++ b/docs/notebooks/261-fast-segment-anything-with-output_files/index.html @@ -1,10 +1,10 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231114220808/dist/rst_files/261-fast-segment-anything-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/261-fast-segment-anything-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231114220808/dist/rst_files/261-fast-segment-anything-with-output_files/


../
-261-fast-segment-anything-with-output_21_0.jpg     15-Nov-2023 00:43              116049
-261-fast-segment-anything-with-output_21_0.png     15-Nov-2023 00:43              824318
-261-fast-segment-anything-with-output_9_0.jpg      15-Nov-2023 00:43              117489
-261-fast-segment-anything-with-output_9_0.png      15-Nov-2023 00:43              815077
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/261-fast-segment-anything-with-output_files/


../
+261-fast-segment-anything-with-output_21_0.jpg     07-Dec-2023 00:49              116049
+261-fast-segment-anything-with-output_21_0.png     07-Dec-2023 00:49              824318
+261-fast-segment-anything-with-output_9_0.jpg      07-Dec-2023 00:49              117489
+261-fast-segment-anything-with-output_9_0.png      07-Dec-2023 00:49              815077
 

diff --git a/docs/notebooks/262-softvc-voice-conversion-with-output.rst b/docs/notebooks/262-softvc-voice-conversion-with-output.rst index a333dba3adb807..41946b1b94404c 100644 --- a/docs/notebooks/262-softvc-voice-conversion-with-output.rst +++ b/docs/notebooks/262-softvc-voice-conversion-with-output.rst @@ -18,8 +18,8 @@ audio are preserved. In this tutorial we will use the base model flow. -Table of contents: -^^^^^^^^^^^^^^^^^^ +**Table of contents:** + - `Prerequisites <#prerequisites>`__ - `Use the original model to run an @@ -36,7 +36,7 @@ Prerequisites .. code:: ipython3 %pip install -q --upgrade pip setuptools - %pip install -q "openvino>=2023.2.0.dev20230922" + %pip install -q "openvino>=2023.2.0" !git clone https://github.com/svc-develop-team/so-vits-svc -b 4.1-Stable %pip install -q --extra-index-url https://download.pytorch.org/whl/cpu tqdm librosa "torch>=2.1.0" "torchaudio>=2.1.0" faiss-cpu gradio "numpy==1.23.5" "fairseq==0.12.2" praat-parselmouth diff --git a/docs/notebooks/263-latent-consistency-models-image-generation-with-output.rst b/docs/notebooks/263-latent-consistency-models-image-generation-with-output.rst index 3caebe1dcd10f2..14293eda295b69 100644 --- a/docs/notebooks/263-latent-consistency-models-image-generation-with-output.rst +++ b/docs/notebooks/263-latent-consistency-models-image-generation-with-output.rst @@ -42,24 +42,35 @@ repository `__. In this tutorial, we consider how to convert and run LCM using OpenVINO. An additional part demonstrates how to run quantization with `NNCF `__ to speed up -pipeline. +pipeline. **Table of contents:** -- `Prerequisites <#prerequisites>`__ -- `Prepare models for OpenVINO format conversion <#prepare-models-for-openvino-format-conversion>`__ -- `Convert models to OpenVINO format <#convert-models-to-openvino-format>`__ -- `Text Encoder <#text-encoder>`__ -- `U-Net <#u-net>`__ -- `VAE <#vae>`__ -- `Prepare inference pipeline <#prepare-inference-pipeline>`__ -- `Configure Inference Pipeline <#configure-inference-pipeline>`__ -- `Text-to-image generation <#text-to-image-generation>`__ -- `Quantization <#quantization>`__ -- `Prepare calibration dataset <#prepare-calibration-dataset>`__ -- `Run quantization <#run-quantization>`__ -- `Compare inference time of the FP16 and INT8 models <#compare-inference-time-of-the-fp-and-int-models>`__ -- `Interactive demo <#interactive-demo>`__ + +- `Prerequisites <#prerequisites>`__ +- `Prepare models for OpenVINO format + conversion <#prepare-models-for-openvino-format-conversion>`__ +- `Convert models to OpenVINO + format <#convert-models-to-openvino-format>`__ + + - `Text Encoder <#text-encoder>`__ + - `U-Net <#u-net>`__ + - `VAE <#vae>`__ + +- `Prepare inference pipeline <#prepare-inference-pipeline>`__ + + - `Configure Inference Pipeline <#configure-inference-pipeline>`__ + +- `Text-to-image generation <#text-to-image-generation>`__ +- `Quantization <#quantization>`__ + + - `Prepare calibration dataset <#prepare-calibration-dataset>`__ + - `Run quantization <#run-quantization>`__ + - `Compare inference time of the FP16 and INT8 + models <#compare-inference-time-of-the-fp-and-int-models>`__ + - `Compare UNet file size <#compare-unet-file-size>`__ + +- `Interactive demo <#interactive-demo>`__ Prerequisites ------------- @@ -69,7 +80,7 @@ Prerequisites .. code:: ipython3 %pip install -q "torch" --index-url https://download.pytorch.org/whl/cpu - %pip install -q "openvino>=2023.1.0" transformers "diffusers>=0.22.0" pillow gradio "nncf>=2.6.0" datasets + %pip install -q "openvino>=2023.1.0" transformers "diffusers>=0.23.1" pillow gradio "nncf>=2.6.0" datasets --extra-index-url https://download.pytorch.org/whl/cpu Prepare models for OpenVINO format conversion --------------------------------------------- @@ -88,7 +99,7 @@ model is also integrated into Diffusers is the go-to library for state-of-the-art pretrained diffusion models for generating images, audio, and even 3D structures of molecules. This allows us to compare running original Stable Diffusion -(from this `notebook <225-stable-diffusion-text-to-image-with-output.html>`__) and +(from this `notebook <../225-stable-diffusion-text-to-image>`__) and distilled using LCD. The distillation approach efficiently converts a pre-trained guided diffusion model into a latent consistency model by solving an augmented PF-ODE. @@ -116,7 +127,7 @@ provide which module should be loaded for initialization using VAE_DECODER_OV_PATH = Path("model/vae_decoder.xml") - def load_orginal_pytorch_pipeline_componets(skip_models=False, skip_safety_checker=True): + def load_orginal_pytorch_pipeline_componets(skip_models=False, skip_safety_checker=False): pipe = DiffusionPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7") scheduler = pipe.scheduler tokenizer = pipe.tokenizer @@ -167,11 +178,6 @@ provide which module should be loaded for initialization using Loading pipeline components...: 0%| | 0/7 [00:00 x_t-1 latents, denoised = self.scheduler.step( @@ -735,7 +699,7 @@ inference using OpenVINO. .. parsed-literal:: - Dropdown(description='Device:', options=('CPU', 'GPU', 'AUTO'), value='CPU') + Dropdown(description='Device:', options=('CPU', 'AUTO'), value='CPU') @@ -890,9 +854,11 @@ model inputs for calibration we should customize ``CompiledModel``. import datasets from tqdm.notebook import tqdm - from transformers import Pipeline + from transformers import set_seed from typing import Any, Dict, List + set_seed(1) + class CompiledModelDecorator(ov.CompiledModel): def __init__(self, compiled_model, prob: float, data_cache: List[Any] = None): super().__init__(compiled_model) @@ -904,18 +870,22 @@ model inputs for calibration we should customize ``CompiledModel``. self.data_cache.append(*args) return super().__call__(*args, **kwargs) - def collect_calibration_data(lcm_pipeline: Pipeline, subset_size: int) -> List[Dict]: + def collect_calibration_data(lcm_pipeline: OVLatentConsistencyModelPipeline, subset_size: int) -> List[Dict]: original_unet = lcm_pipeline.unet lcm_pipeline.unet = CompiledModelDecorator(original_unet, prob=0.3) dataset = datasets.load_dataset("laion/laion2B-en", split="train", streaming=True).shuffle(seed=42) lcm_pipeline.set_progress_bar_config(disable=True) + safety_checker = lcm_pipeline.safety_checker + lcm_pipeline.safety_checker = None # Run inference for data collection pbar = tqdm(total=subset_size) diff = 0 for batch in dataset: prompt = batch["TEXT"] + if len(prompt) > tokenizer.model_max_length: + continue _ = lcm_pipeline( prompt, num_inference_steps=num_inference_steps, @@ -935,6 +905,7 @@ model inputs for calibration we should customize ``CompiledModel``. calibration_dataset = lcm_pipeline.unet.data_cache lcm_pipeline.set_progress_bar_config(disable=False) lcm_pipeline.unet = original_unet + lcm_pipeline.safety_checker = safety_checker return calibration_dataset .. code:: ipython3 @@ -954,7 +925,7 @@ model inputs for calibration we should customize ``CompiledModel``. .. parsed-literal:: - Downloading readme: 0%| | 0.00/56.0 [00:00 77). Running this sequence through the model will result in indexing errors - WARNING:__main__:The following part of your input was truncated because CLIP can only handle sequences up to 77 tokens: ['colleges harnessing technology to make education free'] - - Run quantization ~~~~~~~~~~~~~~~~ @@ -1004,25 +969,72 @@ Create a quantized model from the pre-trained converted OpenVINO model. ov.save_model(quantized_unet, UNET_INT8_OV_PATH) + .. parsed-literal:: - INFO:nncf:NNCF initialized successfully. Supported frameworks detected: torch, tensorflow, onnx, openvino + Output() + + + +.. raw:: html + +

+
+
+
+
+.. raw:: html
+
+    
+    
+ + .. parsed-literal:: - Statistics collection: 100%|██████████| 200/200 [03:15<00:00, 1.02it/s] - Applying Smooth Quant: 100%|██████████| 101/101 [00:07<00:00, 13.89it/s] + Output() + + + +.. raw:: html + +

+
+
+
+
+.. raw:: html
+
+    
+    
+ .. parsed-literal:: - INFO:nncf:96 ignored nodes was found by name in the NNCFGraph + INFO:nncf:96 ignored nodes were found by name in the NNCFGraph + .. parsed-literal:: - Statistics collection: 100%|██████████| 200/200 [03:57<00:00, 1.19s/it] + Output() + + + +.. raw:: html + +

+
+
+
+
+.. raw:: html
+
+    
+    
+ .. code:: ipython3 @@ -1098,8 +1110,7 @@ pipelines, we use median inference time on calibration subset. validation_size = 10 calibration_dataset = datasets.load_dataset("laion/laion2B-en", split="train", streaming=True).take(validation_size) validation_data = [] - while len(validation_data) < validation_size: - batch = next(iter(calibration_dataset)) + for batch in calibration_dataset: prompt = batch["TEXT"] validation_data.append(prompt) @@ -1122,6 +1133,13 @@ pipelines, we use median inference time on calibration subset. inference_time.append(delta) return np.median(inference_time) + + +.. parsed-literal:: + + Resolving data files: 0%| | 0/128 [00:00 -Index of /projects/ov-notebook/0.1.0-latest/20231114220808/dist/rst_files/263-latent-consistency-models-image-generation-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/263-latent-consistency-models-image-generation-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231114220808/dist/rst_files/263-latent-consistency-models-image-generation-with-output_files/


../
-263-latent-consistency-models-image-generation-..> 15-Nov-2023 00:43              345052
-263-latent-consistency-models-image-generation-..> 15-Nov-2023 00:43              352748
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/263-latent-consistency-models-image-generation-with-output_files/


../
+263-latent-consistency-models-image-generation-..> 07-Dec-2023 00:49               20243
+263-latent-consistency-models-image-generation-..> 07-Dec-2023 00:49              390255
+263-latent-consistency-models-image-generation-..> 07-Dec-2023 00:49               20298
+263-latent-consistency-models-image-generation-..> 07-Dec-2023 00:49              393756
 

diff --git a/docs/notebooks/263-lcm-lora-controlnet-with-output.rst b/docs/notebooks/263-lcm-lora-controlnet-with-output.rst new file mode 100644 index 00000000000000..39fb259bb019eb --- /dev/null +++ b/docs/notebooks/263-lcm-lora-controlnet-with-output.rst @@ -0,0 +1,1664 @@ +Text-to-Image Generation with LCM LoRA and ControlNet Conditioning +================================================================== + +Diffusion models make a revolution in AI-generated art. This technology +enables the creation of high-quality images simply by writing a text +prompt. Even though this technology gives very promising results, the +diffusion process, in the first order, is the process of generating +images from random noise and text conditions, which do not always +clarify how desired content should look, which forms it should have, and +where it is located in relation to other objects on the image. +Researchers have been looking for ways to have more control over the +results of the generation process. ControlNet provides a minimal +interface allowing users to customize the generation process to a great +extent. + +ControlNet was introduced in `Adding Conditional Control to +Text-to-Image Diffusion Models `__ +paper. It provides a framework that enables support for various spatial +contexts such as a depth map, a segmentation map, a scribble, and key +points that can serve as additional conditionings to Diffusion models +such as Stable Diffusion. + +Latent Consistency Models (LCM) are a way to decrease the number of +steps required to generate an image with Stable Diffusion (or SDXL) by +distilling the original model into another version that requires fewer +steps (4 to 8 instead of the original 25 to 50). Distillation is a type +of training procedure that attempts to replicate the outputs from a +source model using a new one. The distilled model may be designed to be +smaller or, in this case, require fewer steps to run. It’s usually a +lengthy and costly process that requires huge amounts of data, patience, +and powerful training hardware. + +For latent consistency distillation, each model needs to be distilled +separately. The LCM LoRA allows to train just a small number of +adapters, known as LoRA layers, instead of the full model. The resulting +LoRAs can then be applied to any fine-tuned version of the model without +having to distil them separately. The benefit of this LCM LoRA +distillation process is that it can be integrated into the existing +inference pipelines without changes to the main code, for example, into +the ControlNet-guided Stable Diffusion pipeline. More details about LCM +LoRA can be found in the `technical +report `__ and `blog +post `__ + +This notebook explores how to speed up ControlNet pipeline using LCM +LoRA, OpenVINO and quantization with +`NNCF `__. Let us get +“controlling”! + + +**Table of contents:** + +- `Background <#background>`__ +- `Stable Diffusion <#stable-diffusion>`__ +- `controlnet <#controlnet>`__ +- `Low-Rank Adaptation of Large Language Models (LoRA) <#low-rank-adaptation-of-large-language-models-lora>`__ +- `Prerequisites <#prerequisites>`__ +- `Load Original Diffusers pipeline and prepare models for conversion <#load-original-diffusers-pipeline-and-prepare-models-for-conversion>`__ +- `Condition Image <#condition-image>`__ +- `Convert models to OpenVINO Intermediate representation (IR) format <#convert-models-to-openvino-intermediate-representation-ir-format>`__ +- `ControlNet conversion <#controlnet-conversion>`__ +- `U-Net <#u-net>`__ +- `textencoder <#text-encoder>`__ +- `VAE Decoder conversion <#vae-decoder-conversion>`__ +- `Prepare Inference pipeline <#prepare-inference-pipeline>`__ +- `Prepare tokenizer and LCMScheduler <#prepare-tokenizer-and-lcmscheduler>`__ +- `Select inference device for Stable Diffusion pipeline <#select-inference-device-for-stable-diffusion-pipeline>`__ +- `Running Text-to-Image Generation with ControlNet Conditioning and OpenVINO <#running-text-to-image-generation-with-controlnet-conditioning-and-openvino>`__ +- `Quantization <#quantization>`__ +- `Prepare calibration datasets <#prepare-calibration-datasets>`__ +- `Run quantization <#run-quantization>`__ +- `Compare inference time of the FP16 and INT8 models <#compare-inference-time-of-the-fp-and-int-models>`__ +- `Compare model file sizes <#compare-model-file-sizes>`__ +- `Interactive Demo <#interactive-demo>`__ + +Background +---------- + + + +Stable Diffusion +~~~~~~~~~~~~~~~~ + + + +`Stable Diffusion `__ is a +text-to-image latent diffusion model created by researchers and +engineers from CompVis, Stability AI, and LAION. Diffusion models as +mentioned above can generate high-quality images. Stable Diffusion is +based on a particular type of diffusion model called Latent Diffusion, +proposed in `High-Resolution Image Synthesis with Latent Diffusion +Models `__ paper. Generally speaking, +diffusion models are machine learning systems that are trained to +denoise random Gaussian noise step by step, to get to a sample of +interest, such as an image. Diffusion models have been shown to achieve +state-of-the-art results for generating image data. But one downside of +diffusion models is that the reverse denoising process is slow because +of its repeated, sequential nature. In addition, these models consume a +lot of memory because they operate in pixel space, which becomes huge +when generating high-resolution images. Latent diffusion can reduce the +memory and compute complexity by applying the diffusion process over a +lower dimensional latent space, instead of using the actual pixel space. +This is the key difference between standard diffusion and latent +diffusion models: in latent diffusion, the model is trained to generate +latent (compressed) representations of the images. + +There are three main components in latent diffusion: + +- A text-encoder, for example `CLIP’s Text + Encoder `__ + for creation condition to generate image from text prompt. +- A U-Net for step-by-step denoising latent image representation. +- An autoencoder (VAE) for encoding input image to latent space (if + required) and decoding latent space to image back after generation. + +For more details regarding Stable Diffusion work, refer to the `project +website `__. +There is a tutorial for Stable Diffusion Text-to-Image generation with +OpenVINO, see the following +`notebook <225-stable-diffusion-text-to-image-with-output.html>`__. + +ControlNet +~~~~~~~~~~ + +ControlNet is a neural network +structure to control diffusion models by adding extra conditions. Using +this new framework, we can capture a scene, structure, object, or +subject pose from an inputted image, and then transfer that quality to +the generation process. In practice, this enables the model to +completely retain the original input shape, and create a novel image +that conserves the shape, pose, or outline while using the novel +features from the inputted prompt. + +.. figure:: https://raw.githubusercontent.com/lllyasviel/ControlNet/main/github_page/he.png + :alt: controlnet block + + controlnet block + +Functionally, ControlNet operates by wrapping around an image synthesis +process to impart attention to the shape required to operate the model +using either its inbuilt prediction or one of many additional annotator +models. Referring to the diagram above, we can see, on a rudimentary +level, how ControlNet uses a trainable copy in conjunction with the +original network to modify the final output with respect to the shape of +the input control source. + +By repeating the above simple structure 14 times, we can control stable +diffusion in the following way: + +.. figure:: https://raw.githubusercontent.com/lllyasviel/ControlNet/main/github_page/sd.png + :alt: sd + controlnet + + sd + controlnet + +The input is simultaneously passed through the SD blocks, represented on +the left, while simultaneously being processed by the ControlNet blocks +on the right. This process is almost the same during encoding. When +denoising the image, at each step the SD decoder blocks will receive +control adjustments from the parallel processing path from ControlNet. + +In the end, we are left with a very similar image synthesis pipeline +with an additional control added for the shape of the output features in +the final image. + +Low-Rank Adaptation of Large Language Models (LoRA) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + + +`Low-Rank Adaptation of Large Language Models +(LoRA) `__ is a training method that +accelerates the training of large models while consuming less memory. It +adds pairs of rank-decomposition weight matrices (called update +matrices) to existing weights, and only trains those newly added +weights. This has a couple of advantages: + +- LoRA makes fine-tuning more efficient by drastically reducing the + number of trainable parameters. +- The original pre-trained weights are kept frozen, which means you can + have multiple lightweight and portable LoRA models for various + downstream tasks built on top of them. +- LoRA is orthogonal to many other parameter-efficient methods and can + be combined with many of them. +- Performance of models fine-tuned using LoRA is comparable to the + performance of fully fine-tuned models. +- LoRA does not add any inference latency because adapter weights can + be merged with the base model. + +In principle, LoRA can be applied to any subset of weight matrices in a +neural network to reduce the number of trainable parameters. However, +for simplicity and further parameter efficiency, in Transformer models +LoRA is typically applied to attention blocks only. The resulting number +of trainable parameters in a LoRA model depends on the size of the +low-rank update matrices, which is determined mainly by the rank r and +the shape of the original weight matrix. More details about LoRA can be +found in HuggingFace `conceptual +guide `__, +`Diffusers +documentation `__ +and `blog post `__. + +Prerequisites +------------- + + + +Install required packages + +.. code:: ipython3 + + %pip install -q "torch" transformers "diffusers>=0.22.0" "controlnet-aux>=0.0.6" accelerate --extra-index-url https://download.pytorch.org/whl/cpu + %pip install -q "openvino>=2023.2.0" pillow gradio datasets "nncf>=2.7.0" + +Prepare PyTorch models + +.. code:: ipython3 + + from pathlib import Path + + controlnet_id = "lllyasviel/control_v11p_sd15_normalbae" + adapter_id = "latent-consistency/lcm-lora-sdv1-5" + stable_diffusion_id = "runwayml/stable-diffusion-v1-5" + + TEXT_ENCODER_OV_PATH = Path('model/text_encoder.xml') + UNET_OV_PATH = Path('model/unet_controlnet.xml') + CONTROLNET_OV_PATH = Path('model/controlnet-normalbae.xml') + VAE_DECODER_OV_PATH = Path('model/vae_decoder.xml') + TOKENIZER_PATH = Path('model/tokenizer') + SCHEDULER_PATH = Path('model/scheduler') + + skip_models = TEXT_ENCODER_OV_PATH.exists() and UNET_OV_PATH.exists() and CONTROLNET_OV_PATH.exists() and VAE_DECODER_OV_PATH.exists() + +Load Original Diffusers pipeline and prepare models for conversion +------------------------------------------------------------------ + + + +For working with Stable Diffusion and ControlNet models, we will use +Hugging Face `Diffusers `__ +library. To experiment with ControlNet, Diffusers exposes the +`StableDiffusionControlNetPipeline `__ +similar to the `other Diffusers +pipelines `__. +Central to the ``StableDiffusionControlNetPipeline`` is the +``controlnet`` argument which enables providing a particularly trained +`ControlNetModel `__ +instance while keeping the pre-trained diffusion model weights the same. + +The code below demonstrates how to create +``StableDiffusionControlNetPipeline``. The process consists of the +following steps: 1. Create ``ControlNetModel`` for passing to pipeline +using ``from_pretrained`` method. 2. Create +``StableDiffusionControlNetPipeline`` using Stable Diffusion and +ControlNet model 3. Load LoRA weights to the pipeline using +``load_lora_weights`` method. + +.. code:: ipython3 + + from diffusers import StableDiffusionControlNetPipeline, ControlNetModel + import gc + + + def load_original_pytorch_pipeline_components(controlnet_id:str, stable_diffusion_id:str, adapter_id:str): + """ + Helper function for loading Stable Diffusion ControlNet pipeline and applying LCM LoRA + + Parameters: + controlnet_id: model id from HuggingFace hub or local path for loading ControlNet model + stable_diffusion_id: model id from HuggingFace hub or local path for loading Stable Diffusion model + adapter_id: LCM LoRA id from HuggingFace hub or local path + Returns: + controlnet: ControlNet model + text_encoder: Stable Diffusion Text Encoder + unet: Stable Diffusion U-Net + vae: Stable Diffusion Variational Autoencoder (VAE) + """ + + # load controlnet model + controlnet = ControlNetModel.from_pretrained(controlnet_id) + # load stable diffusion pipeline + pipe = StableDiffusionControlNetPipeline.from_pretrained(stable_diffusion_id, controlnet=controlnet) + # load LCM LoRA weights + pipe.load_lora_weights(adapter_id) + # fuse LoRA weights with UNet + pipe.fuse_lora() + text_encoder = pipe.text_encoder + text_encoder.eval() + unet = pipe.unet + unet.eval() + vae = pipe.vae + vae.eval() + del pipe + gc.collect() + return controlnet, text_encoder, unet, vae + +.. code:: ipython3 + + controlnet, text_encoder, unet, vae = None, None, None, None + if not skip_models: + controlnet, text_encoder, unet, vae = load_original_pytorch_pipeline_components(controlnet_id, stable_diffusion_id, adapter_id) + +Condition Image +--------------- + + + +The process of extracting specific information from the input image is +called an annotation. ControlNet comes pre-packaged with compatibility +with several annotators-models that help it to identify the shape/form +of the target in the image: + +- Canny Edge Detection +- M-LSD Lines +- HED Boundary +- Scribbles +- Normal Map +- Human Pose Estimation +- Semantic Segmentation +- Depth Estimation + +In this tutorial we will use `Normal +Mapping `__ for +controlling diffusion process. For this case, ControlNet condition image +is an image with surface normal information, usually represented as a +color-coded image. + +.. code:: ipython3 + + from controlnet_aux import NormalBaeDetector + from diffusers.utils import load_image + from urllib.request import urlretrieve + import matplotlib.pyplot as plt + from PIL import Image + import numpy as np + + example_image_url = "https://huggingface.co/lllyasviel/control_v11p_sd15_normalbae/resolve/main/images/input.png" + urlretrieve(example_image_url, "example.png") + + processor = NormalBaeDetector.from_pretrained("lllyasviel/Annotators") + + image = load_image("example.png") + control_image = processor(image) + + + def visualize_results(orig_img:Image.Image, normal_img:Image.Image, result_img:Image.Image = None, save_fig:bool = False): + """ + Helper function for results visualization + + Parameters: + orig_img (Image.Image): original image + normal_img (Image.Image): image with bwith surface normal information + result_img (Image.Image, optional, default None): generated image + safe_fig (bool, optional, default False): allow saving visualization result on disk + Returns: + fig (matplotlib.pyplot.Figure): matplotlib generated figure contains drawing result + """ + orig_title = "Original image" + control_title = "Normal map" + orig_img = orig_img.resize(normal_img.size if result_img is None else result_img.size) + im_w, im_h = orig_img.size + is_horizontal = im_h <= im_w + figsize = (20, 20) + num_images = 3 if result_img is not None else 2 + fig, axs = plt.subplots(num_images if is_horizontal else 1, 1 if is_horizontal else num_images, figsize=figsize, sharex='all', sharey='all') + fig.patch.set_facecolor('white') + list_axes = list(axs.flat) + for a in list_axes: + a.set_xticklabels([]) + a.set_yticklabels([]) + a.get_xaxis().set_visible(False) + a.get_yaxis().set_visible(False) + a.grid(False) + list_axes[0].imshow(np.array(orig_img)) + list_axes[1].imshow(np.array(normal_img)) + list_axes[0].set_title(orig_title, fontsize=15) + list_axes[1].set_title(control_title, fontsize=15) + if result_img is not None: + list_axes[2].imshow(np.array(result_img)) + list_axes[2].set_title("Result", fontsize=15) + + fig.subplots_adjust(wspace=0.01 if is_horizontal else 0.00 , hspace=0.01 if is_horizontal else 0.1) + fig.tight_layout() + if save_fig: + fig.savefig("result.png", bbox_inches='tight') + return fig + + + fig = visualize_results(image, control_image) + + +.. parsed-literal:: + + Loading base model ()...Done. + Removing last two layers (global_pool & classifier). + + + +.. image:: 263-lcm-lora-controlnet-with-output_files/263-lcm-lora-controlnet-with-output_10_1.png + + +Convert models to OpenVINO Intermediate representation (IR) format +------------------------------------------------------------------ + + + +Starting from 2023.0 release, OpenVINO supports PyTorch models +conversion directly. We need to provide a model object, input data for +model tracing to ``ov.convert_model`` function to obtain OpenVINO +``ov.Model`` object instance. Model can be saved on disk for next +deployment using ``ov.save_model`` function. + +The pipeline consists of five important parts: + +- ControlNet for conditioning by image annotation. +- Text Encoder for creation condition to generate an image from a text + prompt. +- Unet for step-by-step denoising latent image representation. +- Autoencoder (VAE) for decoding latent space to image. + +Let us convert each part: + +ControlNet conversion +~~~~~~~~~~~~~~~~~~~~~ + + + +The ControlNet model accepts the same inputs like UNet in Stable +Diffusion pipeline and additional condition sample - skeleton key points +map predicted by pose estimator: + +- ``sample`` - latent image sample from the previous step, generation + process has not been started yet, so we will use random noise, +- ``timestep`` - current scheduler step, +- ``encoder_hidden_state`` - hidden state of text encoder, +- ``controlnet_cond`` - condition input annotation. + +The output of the model is attention hidden states from down and middle +blocks, which serves additional context for the UNet model. + +.. code:: ipython3 + + import torch + import openvino as ov + from functools import partial + + def cleanup_torchscript_cache(): + """ + Helper for removing cached model representation + """ + torch._C._jit_clear_class_registry() + torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore() + torch.jit._state._clear_class_state() + + + def flattenize_inputs(inputs): + """ + Helper function for resolve nested input structure (e.g. lists or tuples of tensors) + """ + flatten_inputs = [] + for input_data in inputs: + if input_data is None: + continue + if isinstance(input_data, (list, tuple)): + flatten_inputs.extend(flattenize_inputs(input_data)) + else: + flatten_inputs.append(input_data) + return flatten_inputs + + + dtype_mapping = { + torch.float32: ov.Type.f32, + torch.float64: ov.Type.f64, + torch.int32: ov.Type.i32, + torch.int64: ov.Type.i64 + } + + + def prepare_input_info(input_dict): + """ + Helper function for preparing input info (shapes and data types) for conversion based on example inputs + """ + flatten_inputs = flattenize_inputs(inputs.values()) + input_info = [] + for input_data in flatten_inputs: + updated_shape = list(input_data.shape) + if updated_shape: + updated_shape[0] = -1 + if input_data.ndim == 4: + updated_shape[2] = -1 + updated_shape[3] = -1 + + input_info.append((dtype_mapping[input_data.dtype], updated_shape)) + return input_info + + + inputs = { + "sample": torch.randn((1, 4, 64, 64)), + "timestep": torch.tensor(1, dtype=torch.float32), + "encoder_hidden_states": torch.randn((1,77,768)), + "controlnet_cond": torch.randn((1,3,512,512)) + } + + + # Prepare conditional inputs for U-Net + if not UNET_OV_PATH.exists(): + controlnet.eval() + with torch.no_grad(): + down_block_res_samples, mid_block_res_sample = controlnet(**inputs, return_dict=False) + + if not CONTROLNET_OV_PATH.exists(): + input_info = prepare_input_info(inputs) + with torch.no_grad(): + controlnet.forward = partial(controlnet.forward, return_dict=False) + ov_model = ov.convert_model(controlnet, example_input=inputs, input=input_info) + ov.save_model(ov_model, CONTROLNET_OV_PATH) + del ov_model + cleanup_torchscript_cache() + print('ControlNet successfully converted to IR') + else: + print(f"ControlNet will be loaded from {CONTROLNET_OV_PATH}") + + del controlnet + gc.collect() + + +.. parsed-literal:: + + ControlNet will be loaded from model/controlnet-normalbae.xml + + + + +.. parsed-literal:: + + 9 + + + +U-Net +~~~~~ + + + +The process of U-Net model conversion remains the same, like for +original Stable Diffusion model, but with respect to the new inputs +generated by ControlNet. + +.. code:: ipython3 + + from typing import Tuple + + class UnetWrapper(torch.nn.Module): + def __init__( + self, + unet, + sample_dtype=torch.float32, + timestep_dtype=torch.int64, + encoder_hidden_states=torch.float32, + down_block_additional_residuals=torch.float32, + mid_block_additional_residual=torch.float32 + ): + super().__init__() + self.unet = unet + self.sample_dtype = sample_dtype + self.timestep_dtype = timestep_dtype + self.encoder_hidden_states_dtype = encoder_hidden_states + self.down_block_additional_residuals_dtype = down_block_additional_residuals + self.mid_block_additional_residual_dtype = mid_block_additional_residual + + def forward( + self, + sample:torch.Tensor, + timestep:torch.Tensor, + encoder_hidden_states:torch.Tensor, + down_block_additional_residuals:Tuple[torch.Tensor], + mid_block_additional_residual:torch.Tensor + ): + sample.to(self.sample_dtype) + timestep.to(self.timestep_dtype) + encoder_hidden_states.to(self.encoder_hidden_states_dtype) + down_block_additional_residuals = [res.to(self.down_block_additional_residuals_dtype) for res in down_block_additional_residuals] + mid_block_additional_residual.to(self.mid_block_additional_residual_dtype) + return self.unet( + sample, + timestep, + encoder_hidden_states, + down_block_additional_residuals=down_block_additional_residuals, + mid_block_additional_residual=mid_block_additional_residual + ) + + + + if not UNET_OV_PATH.exists(): + inputs.pop("controlnet_cond", None) + inputs["down_block_additional_residuals"] = down_block_res_samples + inputs["mid_block_additional_residual"] = mid_block_res_sample + input_info = prepare_input_info(inputs) + + wrapped_unet = UnetWrapper(unet) + wrapped_unet.eval() + + with torch.no_grad(): + ov_model = ov.convert_model(wrapped_unet, example_input=inputs) + + for (input_dtype, input_shape), input_tensor in zip(input_info, ov_model.inputs): + input_tensor.get_node().set_partial_shape(ov.PartialShape(input_shape)) + input_tensor.get_node().set_element_type(input_dtype) + ov_model.validate_nodes_and_infer_types() + ov.save_model(ov_model, UNET_OV_PATH) + del ov_model + cleanup_torchscript_cache() + del wrapped_unet + del unet + gc.collect() + print('Unet successfully converted to IR') + else: + del unet + print(f"Unet will be loaded from {UNET_OV_PATH}") + gc.collect() + + +.. parsed-literal:: + + Unet will be loaded from model/unet_controlnet.xml + + + + +.. parsed-literal:: + + 0 + + + +Text Encoder +~~~~~~~~~~~~ + + + +The text-encoder is responsible for transforming the input prompt, for +example, “a photo of an astronaut riding a horse” into an embedding +space that can be understood by the U-Net. It is usually a simple +transformer-based encoder that maps a sequence of input tokens to a +sequence of latent text embeddings. + +The input of the text encoder is tensor ``input_ids``, which contains +indexes of tokens from text processed by the tokenizer and padded to the +maximum length accepted by the model. Model outputs are two tensors: +``last_hidden_state`` - hidden state from the last MultiHeadAttention +layer in the model and ``pooler_out`` - pooled output for whole model +hidden states. + +.. code:: ipython3 + + def convert_encoder(text_encoder:torch.nn.Module, ir_path:Path): + """ + Convert Text Encoder model to OpenVINO IR. + Function accepts text encoder model, prepares example inputs for conversion, and convert it to OpenVINO Model + Parameters: + text_encoder (torch.nn.Module): text_encoder model + ir_path (Path): File for storing model + Returns: + None + """ + if not ir_path.exists(): + input_ids = torch.ones((1, 77), dtype=torch.long) + # switch model to inference mode + text_encoder.eval() + + # disable gradients calculation for reducing memory consumption + with torch.no_grad(): + ov_model = ov.convert_model( + text_encoder, # model instance + example_input=input_ids, # inputs for model tracing + input=([1,77],) + ) + ov.save_model(ov_model, ir_path) + del ov_model + cleanup_torchscript_cache() + print('Text Encoder successfully converted to IR') + + + if not TEXT_ENCODER_OV_PATH.exists(): + convert_encoder(text_encoder, TEXT_ENCODER_OV_PATH) + else: + print(f"Text encoder will be loaded from {TEXT_ENCODER_OV_PATH}") + del text_encoder + gc.collect() + + +.. parsed-literal:: + + Text encoder will be loaded from model/text_encoder.xml + + + + +.. parsed-literal:: + + 0 + + + +VAE Decoder conversion +~~~~~~~~~~~~~~~~~~~~~~ + + + +The VAE model has two parts, an encoder, and a decoder. The encoder is +used to convert the image into a low-dimensional latent representation, +which will serve as the input to the U-Net model. The decoder, +conversely, transforms the latent representation back into an image. + +During latent diffusion training, the encoder is used to get the latent +representations (latents) of the images for the forward diffusion +process, which applies more and more noise at each step. During +inference, the denoised latents generated by the reverse diffusion +process are converted back into images using the VAE decoder. During +inference, we will see that we **only need the VAE decoder**. You can +find instructions on how to convert the encoder part in a stable +diffusion +`notebook <225-stable-diffusion-text-to-image-with-output.html>`__. + +.. code:: ipython3 + + def convert_vae_decoder(vae: torch.nn.Module, ir_path: Path): + """ + Convert VAE model to IR format. + Function accepts pipeline, creates wrapper class for export only necessary for inference part, + prepares example inputs for convert, + Parameters: + vae (torch.nn.Module): VAE model + ir_path (Path): File for storing model + Returns: + None + """ + class VAEDecoderWrapper(torch.nn.Module): + def __init__(self, vae): + super().__init__() + self.vae = vae + + def forward(self, latents): + return self.vae.decode(latents) + + if not ir_path.exists(): + vae_decoder = VAEDecoderWrapper(vae) + latents = torch.zeros((1, 4, 64, 64)) + + vae_decoder.eval() + with torch.no_grad(): + ov_model = ov.convert_model(vae_decoder, example_input=latents, input=[-1, 4, -1, -1]) + ov.save_model(ov_model, ir_path) + del ov_model + cleanup_torchscript_cache() + print('VAE decoder successfully converted to IR') + + + if not VAE_DECODER_OV_PATH.exists(): + convert_vae_decoder(vae, VAE_DECODER_OV_PATH) + else: + print(f"VAE decoder will be loaded from {VAE_DECODER_OV_PATH}") + + del vae + + +.. parsed-literal:: + + VAE decoder will be loaded from model/vae_decoder.xml + + +Prepare Inference pipeline +-------------------------- + + + +We already deeply discussed how the ControlNet-guided pipeline works on +example pose-controlled generation in `controlnet +notebook <../235-controlnet-stable-diffusion>`__. In our current +example, the pipeline remains without changes. Similarly to Diffusers +``StableDiffusionControlNetPipeline``, we define our own +``OVControlNetStableDiffusionPipeline`` inference pipeline based on +OpenVINO. + +.. code:: ipython3 + + from diffusers import DiffusionPipeline + from transformers import CLIPTokenizer + from typing import Union, List, Optional, Tuple + import cv2 + + + def scale_fit_to_window(dst_width:int, dst_height:int, image_width:int, image_height:int): + """ + Preprocessing helper function for calculating image size for resize with peserving original aspect ratio + and fitting image to specific window size + + Parameters: + dst_width (int): destination window width + dst_height (int): destination window height + image_width (int): source image width + image_height (int): source image height + Returns: + result_width (int): calculated width for resize + result_height (int): calculated height for resize + """ + im_scale = min(dst_height / image_height, dst_width / image_width) + return int(im_scale * image_width), int(im_scale * image_height) + + + def preprocess(image: Image.Image, dst_height:int = 512, dst_width:int = 512): + """ + Image preprocessing function. Takes image in PIL.Image format, resizes it to keep aspect ration and fits to model input window 512x512, + then converts it to np.ndarray and adds padding with zeros on right or bottom side of image (depends from aspect ratio), after that + converts data to float32 data type and change range of values from [0, 255] to [-1, 1], finally, converts data layout from planar NHWC to NCHW. + The function returns preprocessed input tensor and padding size, which can be used in postprocessing. + + Parameters: + image (Image.Image): input image + dst_width: destination image width + dst_height: destination image height + Returns: + image (np.ndarray): preprocessed image tensor + pad (Tuple[int]): pading size for each dimension for restoring image size in postprocessing + """ + src_width, src_height = image.size + res_width, res_height = scale_fit_to_window(dst_width, dst_height, src_width, src_height) + image = np.array(image.resize((res_width, res_height), resample=Image.Resampling.LANCZOS))[None, :] + pad_width = dst_width - res_width + pad_height = dst_height - res_height + pad = ((0, 0), (0, pad_height), (0, pad_width), (0, 0)) + image = np.pad(image, pad, mode="constant") + image = image.astype(np.float32) / 255.0 + image = image.transpose(0, 3, 1, 2) + return image, pad + + + def randn_tensor( + shape: Union[Tuple, List], + dtype: Optional[torch.dtype] = torch.float32, + ): + """ + Helper function for generation random values tensor with given shape and data type + + Parameters: + shape (Union[Tuple, List]): shape for filling random values + dtype (torch.dtype, *optiona*, torch.float32): data type for result + Returns: + latents (np.ndarray): tensor with random values with given data type and shape (usually represents noise in latent space) + """ + latents = torch.randn(shape, dtype=dtype) + return latents.numpy() + + + class OVControlNetStableDiffusionPipeline(DiffusionPipeline): + """ + OpenVINO inference pipeline for Stable Diffusion with ControlNet guidence + """ + def __init__( + self, + tokenizer: CLIPTokenizer, + scheduler, + core: ov.Core, + controlnet: ov.Model, + text_encoder: ov.Model, + unet: ov.Model, + vae_decoder: ov.Model, + device:str = "AUTO" + ): + super().__init__() + self.tokenizer = tokenizer + self.vae_scale_factor = 8 + self.scheduler = scheduler + self.load_models(core, device, controlnet, text_encoder, unet, vae_decoder) + + def load_models(self, core: ov.Core, device: str, controlnet:ov.Model, text_encoder: ov.Model, unet: ov.Model, vae_decoder: ov.Model): + """ + Function for loading models on device using OpenVINO + + Parameters: + core (Core): OpenVINO runtime Core class instance + device (str): inference device + controlnet (Model): OpenVINO Model object represents ControlNet + text_encoder (Model): OpenVINO Model object represents text encoder + unet (Model): OpenVINO Model object represents UNet + vae_decoder (Model): OpenVINO Model object represents vae decoder + Returns + None + """ + self.text_encoder = core.compile_model(text_encoder, device) + self.register_to_config(controlnet=core.compile_model(controlnet, device)) + self.register_to_config(unet=core.compile_model(unet, device)) + ov_config = {"INFERENCE_PRECISION_HINT": "f32"} if device != "CPU" else {} + self.vae_decoder = core.compile_model(vae_decoder, device, ov_config) + + def __call__( + self, + prompt: Union[str, List[str]], + image: Image.Image, + num_inference_steps: int = 4, + height:int = 512, + width:int = 512, + negative_prompt: Union[str, List[str]] = None, + guidance_scale: float = 0.5, + controlnet_conditioning_scale: float = 1.0, + latents: Optional[np.array] = None, + output_type: Optional[str] = "pil", + ): + """ + Function invoked when calling the pipeline for generation. + + Parameters: + prompt (`str` or `List[str]`): + The prompt or prompts to guide the image generation. + image (`Image.Image`): + `Image`, or tensor representing an image batch which will be repainted according to `prompt`. + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + height (int, *optional*, defaults to 512): generated image height + width (int, *optional*, defaults to 512): generated image width + negative_prompt (`str` or `List[str]`): + negative prompt or prompts for generation + guidance_scale (`float`, *optional*, defaults to 0.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. This pipeline requires a value of at least `1`. + latents (`np.ndarray`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `Image.Image` or `np.array`. + Returns: + image ([List[Union[np.ndarray, Image.Image]]): generaited images + + """ + + # 1. Define call parameters + batch_size = 1 if isinstance(prompt, str) else len(prompt) + if guidance_scale < 1 and negative_prompt: + guidance_scale += 1 + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + # 2. Encode input prompt + text_embeddings = self._encode_prompt(prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt) + + # 3. Preprocess image + orig_width, orig_height = image.size + image, pad = preprocess(image, height, width) + if do_classifier_free_guidance: + image = np.concatenate(([image] * 2)) + + # 4. set timesteps + self.scheduler.set_timesteps(num_inference_steps) + timesteps = self.scheduler.timesteps + + # 5. Prepare latent variables + num_channels_latents = 4 + latents = self.prepare_latents( + batch_size, + num_channels_latents, + height, + width, + latents=latents, + ) + + # 6. Denoising loop + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + # Expand the latents if we are doing classifier free guidance. + # The latents are expanded 3 times because for pix2pix the guidance\ + # is applied for both the text and the input image. + latent_model_input = np.concatenate( + [latents] * 2) if do_classifier_free_guidance else latents + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + + result = self.controlnet([latent_model_input, t, text_embeddings, image], share_inputs=True, share_outputs=True) + down_and_mid_blok_samples = [sample * controlnet_conditioning_scale for _, sample in result.items()] + + # predict the noise residual + noise_pred = self.unet([latent_model_input, t, text_embeddings, *down_and_mid_blok_samples], share_inputs=True, share_outputs=True)[0] + + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred[0], noise_pred[1] + noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step(torch.from_numpy(noise_pred), t, torch.from_numpy(latents)).prev_sample.numpy() + progress_bar.update() + + # 7. Post-processing + image = self.decode_latents(latents, pad) + + # 8. Convert to PIL + if output_type == "pil": + image = self.numpy_to_pil(image) + image = [img.resize((orig_width, orig_height), Image.Resampling.LANCZOS) for img in image] + else: + image = [cv2.resize(img, (orig_width, orig_width)) + for img in image] + + return image + + def _encode_prompt(self, prompt:Union[str, List[str]], num_images_per_prompt:int = 1, do_classifier_free_guidance:bool = True, negative_prompt:Union[str, List[str]] = None): + """ + Encodes the prompt into text encoder hidden states. + + Parameters: + prompt (str or list(str)): prompt to be encoded + num_images_per_prompt (int): number of images that should be generated per prompt + do_classifier_free_guidance (bool): whether to use classifier free guidance or not + negative_prompt (str or list(str)): negative prompt to be encoded + Returns: + text_embeddings (np.ndarray): text encoder hidden states + """ + batch_size = len(prompt) if isinstance(prompt, list) else 1 + + # tokenize input prompts + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=self.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + text_input_ids = text_inputs.input_ids + + text_embeddings = self.text_encoder(text_input_ids, share_inputs=True, share_outputs=True)[0] + + # duplicate text embeddings for each generation per prompt + if num_images_per_prompt != 1: + bs_embed, seq_len, _ = text_embeddings.shape + text_embeddings = np.tile( + text_embeddings, (1, num_images_per_prompt, 1)) + text_embeddings = np.reshape( + text_embeddings, (bs_embed * num_images_per_prompt, seq_len, -1)) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance: + uncond_tokens: List[str] + max_length = text_input_ids.shape[-1] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + else: + uncond_tokens = negative_prompt + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_tensors="np", + ) + + uncond_embeddings = self.text_encoder(uncond_input.input_ids, share_inputs=True, share_outputs=True)[0] + + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = uncond_embeddings.shape[1] + uncond_embeddings = np.tile(uncond_embeddings, (1, num_images_per_prompt, 1)) + uncond_embeddings = np.reshape(uncond_embeddings, (batch_size * num_images_per_prompt, seq_len, -1)) + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + text_embeddings = np.concatenate([uncond_embeddings, text_embeddings]) + + return text_embeddings + + def prepare_latents(self, batch_size:int, num_channels_latents:int, height:int, width:int, dtype:np.dtype = torch.float32, latents:np.ndarray = None): + """ + Preparing noise to image generation. If initial latents are not provided, they will be generated randomly, + then prepared latents scaled by the standard deviation required by the scheduler + + Parameters: + batch_size (int): input batch size + num_channels_latents (int): number of channels for noise generation + height (int): image height + width (int): image width + dtype (np.dtype, *optional*, np.float32): dtype for latents generation + latents (np.ndarray, *optional*, None): initial latent noise tensor, if not provided will be generated + Returns: + latents (np.ndarray): scaled initial noise for diffusion + """ + shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) + if latents is None: + latents = randn_tensor(shape, dtype=dtype) + else: + latents = latents + + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + def decode_latents(self, latents:np.array, pad:Tuple[int]): + """ + Decode predicted image from latent space using VAE Decoder and unpad image result + + Parameters: + latents (np.ndarray): image encoded in diffusion latent space + pad (Tuple[int]): each side padding sizes obtained on preprocessing step + Returns: + image: decoded by VAE decoder image + """ + latents = 1 / 0.18215 * latents + image = self.vae_decoder(latents)[0] + (_, end_h), (_, end_w) = pad[1:3] + h, w = image.shape[2:] + unpad_h = h - end_h + unpad_w = w - end_w + image = image[:, :, :unpad_h, :unpad_w] + image = np.clip(image / 2 + 0.5, 0, 1) + image = np.transpose(image, (0, 2, 3, 1)) + return image + +Prepare tokenizer and LCMScheduler +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + + +Tokenizer and scheduler are also important parts of the diffusion +pipeline. The tokenizer is responsible for preprocessing user-provided +prompts into token ids that then used by Text Encoder. + +The scheduler takes a model’s output (the sample which the diffusion +process is iterating on) and a timestep to return a denoised sample. The +timestep is important because it dictates where in the diffusion process +the step is; data is generated by iterating forward n timesteps and +inference occurs by propagating backward through the timesteps. There +are many +`schedulers `__ +implemented inside the diffusers library, LCM pipeline required changing +the original pipeline scheduler with +`LCMScheduler `__. + +.. code:: ipython3 + + from diffusers import LCMScheduler + from transformers import AutoTokenizer + + if not TOKENIZER_PATH.exists(): + tokenizer = AutoTokenizer.from_pretrained(stable_diffusion_id, subfolder="tokenizer") + tokenizer.save_pretrained(TOKENIZER_PATH) + else: + tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH) + if not SCHEDULER_PATH.exists(): + scheduler = LCMScheduler.from_pretrained(stable_diffusion_id, subfolder="scheduler") + scheduler.save_pretrained(SCHEDULER_PATH) + else: + scheduler = LCMScheduler.from_config(SCHEDULER_PATH) + +Select inference device for Stable Diffusion pipeline +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + + +select device from dropdown list for running inference using OpenVINO + +.. code:: ipython3 + + import ipywidgets as widgets + + core = ov.Core() + + device = widgets.Dropdown( + options=core.available_devices + ["AUTO"], + value='CPU', + description='Device:', + disabled=False, + ) + + device + + + + +.. parsed-literal:: + + Dropdown(description='Device:', options=('CPU', 'GPU.0', 'GPU.1', 'GPU.2', 'AUTO'), value='CPU') + + + +.. code:: ipython3 + + ov_pipe = OVControlNetStableDiffusionPipeline(tokenizer, scheduler, core, CONTROLNET_OV_PATH, TEXT_ENCODER_OV_PATH, UNET_OV_PATH, VAE_DECODER_OV_PATH, device=device.value) + +Running Text-to-Image Generation with ControlNet Conditioning and OpenVINO +-------------------------------------------------------------------------- + + + +Now, we are ready to start generation. For improving the generation +process, we also introduce an opportunity to provide a +``negative prompt``. Technically, positive prompt steers the diffusion +toward the images associated with it, while negative prompt steers the +diffusion away from it. More explanation of how it works can be found in +this +`article `__. +We can keep this field empty if we want to generate image without +negative prompting. + +`Classifier-free guidance (CFG) `__ or +guidance scale is a parameter that controls how much the image +generation process follows the text prompt. The higher the value, the +more the image sticks to a given text input. But this does not mean that +the value should always be set to maximum, as more guidance means less +diversity and quality. According to experiments, the optimal value of +guidance for LCM models is in range between 0 and 2. > Please note, that +negative prompt is applicable only when guidance scale > 1. + +Let’s see model in action + +.. code:: ipython3 + + prompt = "A head full of roses" + torch.manual_seed(4257) + + result = ov_pipe(prompt, control_image, 4) + result[0] + + + +.. parsed-literal:: + + 0%| | 0/4 [00:00`__ enables +post-training quantization by adding quantization layers into model +graph and then using a subset of the training dataset to initialize the +parameters of these additional quantization layers. Quantized operations +are executed in ``INT8`` instead of ``FP32``/``FP16`` making model +inference faster. + +According to ``OVControlNetStableDiffusionPipeline`` structure, +ControlNet and UNet are used in the cycle repeating inference on each +diffusion step, while other parts of pipeline take part only once. That +is why computation cost and speed of ControlNet and UNet become the +critical path in the pipeline. Quantizing the rest of the SD pipeline +does not significantly improve inference performance but can lead to a +substantial degradation of accuracy. + +The optimization process contains the following steps: + +1. Create a calibration dataset for quantization. +2. Run ``nncf.quantize()`` to obtain quantized model. +3. Save the ``INT8`` model using ``openvino.save_model()`` function. + +Please select below whether you would like to run quantization to +improve model inference speed. + +.. code:: ipython3 + + is_gpu_device = "GPU" in device.value + to_quantize = widgets.Checkbox( + value=not is_gpu_device, + description='Quantization', + disabled=is_gpu_device, + ) + + to_quantize + +Let’s load ``skip magic`` extension to skip quantization if +``to_quantize`` is not selected + +.. code:: ipython3 + + import sys + sys.path.append("../utils") + + int8_pipe = None + + %load_ext skip_kernel_extension + +Prepare calibration datasets +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + + +We use a portion of +`fusing/instructpix2pix-1000-samples `__ +dataset from Hugging Face as calibration data for ControlNet and UNet. + +To collect intermediate model inputs for calibration we should customize +``CompiledModel``. + +.. code:: ipython3 + + %%skip not $to_quantize.value + + import datasets + from tqdm.notebook import tqdm + from transformers import set_seed + from typing import Any, Dict, List + + set_seed(1) + + class CompiledModelDecorator(ov.CompiledModel): + def __init__(self, compiled_model, prob: float): + super().__init__(compiled_model) + self.data_cache = [] + self.prob = np.clip(prob, 0, 1) + + def __call__(self, *args, **kwargs): + if np.random.rand() >= self.prob: + self.data_cache.append(*args) + return super().__call__(*args, **kwargs) + + def collect_calibration_data(pipeline: OVControlNetStableDiffusionPipeline, subset_size: int) -> List[Dict]: + original_unet = pipeline.unet + pipeline.unet = CompiledModelDecorator(original_unet, prob=0.3) + + dataset = datasets.load_dataset("fusing/instructpix2pix-1000-samples", split="train", streaming=True).shuffle(seed=42) + pipeline.set_progress_bar_config(disable=True) + + # Run inference for data collection + pbar = tqdm(total=subset_size) + diff = 0 + control_images = [] + for batch in dataset: + prompt = batch["edit_prompt"] + if len(prompt) > tokenizer.model_max_length: + continue + image = batch["input_image"] + control_image = processor(image) + + _ = pipeline(prompt, image=control_image, num_inference_steps=4) + collected_subset_size = len(pipeline.unet.data_cache) + control_images.append((min(collected_subset_size, subset_size), control_image)) + if collected_subset_size >= subset_size: + pbar.update(subset_size - pbar.n) + break + pbar.update(collected_subset_size - diff) + diff = collected_subset_size + + control_calibration_dataset = pipeline.unet.data_cache + pipeline.set_progress_bar_config(disable=False) + pipeline.unet = original_unet + return control_calibration_dataset, control_images + +.. code:: ipython3 + + %%skip not $to_quantize.value + + CONTROLNET_INT8_OV_PATH = Path("model/controlnet-normalbae_int8.xml") + UNET_INT8_OV_PATH = Path("model/unet_controlnet_int8.xml") + if not (CONTROLNET_INT8_OV_PATH.exists() and UNET_INT8_OV_PATH.exists()): + subset_size = 200 + unet_calibration_data, control_images = collect_calibration_data(ov_pipe, subset_size=subset_size) + + + +.. parsed-literal:: + + 0%| | 0/200 [00:00 +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/263-lcm-lora-controlnet-with-output_files/ + +

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/263-lcm-lora-controlnet-with-output_files/


../
+263-lcm-lora-controlnet-with-output_10_1.png       07-Dec-2023 00:49             2088439
+263-lcm-lora-controlnet-with-output_27_2.jpg       07-Dec-2023 00:49               51043
+263-lcm-lora-controlnet-with-output_27_2.png       07-Dec-2023 00:49              479182
+263-lcm-lora-controlnet-with-output_28_0.png       07-Dec-2023 00:49             1877881
+263-lcm-lora-controlnet-with-output_42_1.png       07-Dec-2023 00:49             2501299
+

+ diff --git a/docs/notebooks/265-wuerstchen-image-generation-with-output.rst b/docs/notebooks/265-wuerstchen-image-generation-with-output.rst index 9e1e9e7362fe7f..1b09419feb6a99 100644 --- a/docs/notebooks/265-wuerstchen-image-generation-with-output.rst +++ b/docs/notebooks/265-wuerstchen-image-generation-with-output.rst @@ -28,6 +28,7 @@ Hub `__. **Table of contents:** + - `Prerequisites <#prerequisites>`__ - `Load the original model <#load-the-original-model>`__ @@ -51,9 +52,7 @@ Prerequisites .. code:: ipython3 - %pip install -q "diffusers>=0.21.0" transformers accelerate matplotlib gradio - %pip uninstall -q -y openvino-dev openvino openvino-nightly - %pip install -q openvino-nightly + %pip install -q "diffusers>=0.21.0" transformers accelerate matplotlib gradio "openvino>=2023.2.0" --extra-index-url https://download.pytorch.org/whl/cpu .. parsed-literal:: diff --git a/docs/notebooks/265-wuerstchen-image-generation-with-output_files/index.html b/docs/notebooks/265-wuerstchen-image-generation-with-output_files/index.html index c37effc83e1aab..b1135c65f26d1d 100644 --- a/docs/notebooks/265-wuerstchen-image-generation-with-output_files/index.html +++ b/docs/notebooks/265-wuerstchen-image-generation-with-output_files/index.html @@ -1,8 +1,8 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231114220808/dist/rst_files/265-wuerstchen-image-generation-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/265-wuerstchen-image-generation-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231114220808/dist/rst_files/265-wuerstchen-image-generation-with-output_files/


../
-265-wuerstchen-image-generation-with-output_11_..> 15-Nov-2023 00:43             1321476
-265-wuerstchen-image-generation-with-output_45_..> 15-Nov-2023 00:43             1293108
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/265-wuerstchen-image-generation-with-output_files/


../
+265-wuerstchen-image-generation-with-output_11_..> 07-Dec-2023 00:49             1321476
+265-wuerstchen-image-generation-with-output_45_..> 07-Dec-2023 00:49             1293108
 

diff --git a/docs/notebooks/266-speculative-sampling-with-output.rst b/docs/notebooks/266-speculative-sampling-with-output.rst index 42f694e151ab34..a31400677033d8 100644 --- a/docs/notebooks/266-speculative-sampling-with-output.rst +++ b/docs/notebooks/266-speculative-sampling-with-output.rst @@ -35,6 +35,7 @@ available at openvino.ai **Table of contents:** + - `Prerequisites <#prerequisites>`__ - `Select inference device <#select-inference-device>`__ @@ -71,7 +72,7 @@ useful modules. .. code:: ipython3 %pip install -q --upgrade pip - %pip install -q --upgrade transformers torch gradio openvino accelerate onnx onnxruntime ipywidgets + %pip install -q --upgrade transformers torch gradio openvino accelerate onnx ipywidgets --extra-index-url https://download.pytorch.org/whl/cpu %pip install -q "git+https://github.com/huggingface/optimum-intel.git" Select inference device diff --git a/docs/notebooks/267-distil-whisper-asr-with-output.rst b/docs/notebooks/267-distil-whisper-asr-with-output.rst index f9fc4e58cb43a7..3378adcca8d1d9 100644 --- a/docs/notebooks/267-distil-whisper-asr-with-output.rst +++ b/docs/notebooks/267-distil-whisper-asr-with-output.rst @@ -7,9 +7,9 @@ is a distilled variant of the OpenAI. The Distil-Whisper is proposed in the paper `Robust Knowledge Distillation via Large-Scale Pseudo Labelling `__. According to authors, -compared to Whisper, Distil-Whisper runs 6x faster with 50% fewer -parameters, while performing to within 1% word error rate (WER) on -out-of-distribution evaluation data. +compared to Whisper, Distil-Whisper runs in several times faster with +50% fewer parameters, while performing to within 1% word error rate +(WER) on out-of-distribution evaluation data. Whisper is a Transformer based encoder-decoder model, also referred to as a sequence-to-sequence model. It maps a sequence of audio spectrogram @@ -57,22 +57,16 @@ from `NNCF `__ is applied. - `Interactive demo <#interactive-demo>`__ Prerequisites -------------------------------------------------------- +------------- -.. code:: ipython3 - - %pip uninstall -q -y openvino-dev openvino openvino-nightly - %pip install -q openvino-nightly - %pip install -q "transformers" onnx datasets "git+https://github.com/eaidova/optimum-intel.git@ea/whisper" "gradio>=4.0" "librosa" "soundfile" - %pip install -q "nncf>=2.6.0" "jiwer" -.. parsed-literal:: - - Note: you may need to restart the kernel to use updated packages. - ERROR: tokenizers 0.14.1 has requirement huggingface_hub<0.18,>=0.16.4, but you'll have huggingface-hub 0.19.0 which is incompatible. - Note: you may need to restart the kernel to use updated packages. +.. code:: ipython3 + %pip uninstall -q -y optimum-intel optimum + %pip install -q transformers onnx "git+https://github.com/eaidova/optimum-intel.git@ea/whisper" --extra-index-url https://download.pytorch.org/whl/cpu + %pip install -q "openvino>=2023.2.0" datasets "gradio>=4.0" "librosa" "soundfile" + %pip install -q "nncf>=2.6.0" "jiwer" Load PyTorch model ------------------ @@ -238,8 +232,10 @@ OpenVINO model. It means that we can reuse initialized early processor. INFO:nncf:NNCF initialized successfully. Supported frameworks detected: torch, onnx, openvino -Select Inference device\ -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Select Inference device +~~~~~~~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -267,7 +263,9 @@ Select Inference device\ Compile OpenVINO model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -283,7 +281,9 @@ Compile OpenVINO model Run OpenVINO model inference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -320,8 +320,10 @@ Run OpenVINO model inference Result: Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel. -Compare performance PyTorch vs OpenVINO\ ---------------------------------------------------------------------------------- +Compare performance PyTorch vs OpenVINO +--------------------------------------- + + .. code:: ipython3 @@ -375,7 +377,9 @@ Compare performance PyTorch vs OpenVINO\ Compare with OpenAI Whisper -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Since Distil-Whisper is optimized version of original OpenAI Whisper model, let’s compare performance and check benefits of using it. @@ -1225,4 +1229,3 @@ recognition. Multilingual support will be provided later. ..
- diff --git a/docs/notebooks/268-table-question-answering-with-output.rst b/docs/notebooks/268-table-question-answering-with-output.rst index 60a6495dbd03d5..e26f30061988be 100644 --- a/docs/notebooks/268-table-question-answering-with-output.rst +++ b/docs/notebooks/268-table-question-answering-with-output.rst @@ -30,6 +30,7 @@ and is trained end-to-end. **Table of contents:** + - `Prerequisites <#prerequisites>`__ - `Use the original model to run an inference <#use-the-original-model-to-run-an-inference>`__ @@ -46,19 +47,13 @@ Prerequisites .. code:: ipython3 - %pip uninstall -q -y openvino-dev openvino openvino-nightly - %pip install -q openvino-nightly - # other dependencies + %pip install -q torch "transformers>=4.31.0" --extra-index-url https://download.pytorch.org/whl/cpu - %pip install -q "gradio>=4.0.2" + %pip install -q "openvino>=2023.2.0" "gradio>=4.0.2" .. parsed-literal:: - WARNING: Skipping openvino-nightly as it is not installed. - Note: you may need to restart the kernel to use updated packages. - DEPRECATION: pytorch-lightning 1.6.5 has a non-standard dependency specifier torch>=1.8.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063 - Note: you may need to restart the kernel to use updated packages. DEPRECATION: pytorch-lightning 1.6.5 has a non-standard dependency specifier torch>=1.8.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063 Note: you may need to restart the kernel to use updated packages. DEPRECATION: pytorch-lightning 1.6.5 has a non-standard dependency specifier torch>=1.8.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063 @@ -76,10 +71,10 @@ Prerequisites .. parsed-literal:: - 2023-11-15 00:16:22.014004: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. - 2023-11-15 00:16:22.047161: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. + 2023-12-07 00:22:18.058770: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. + 2023-12-07 00:22:18.092302: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. - 2023-11-15 00:16:22.631876: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT + 2023-12-07 00:22:18.675506: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT Use ``TapasForQuestionAnswering.from_pretrained`` to download a @@ -169,7 +164,7 @@ demonstrate how to make an inference. You can use ``pipeline`` from .. parsed-literal:: - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1785: UserWarning: scatter_reduce() is in beta and the API may change at any time. (Triggered internally at ../aten/src/ATen/native/TensorAdvancedIndexing.cpp:1615.) + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1785: UserWarning: scatter_reduce() is in beta and the API may change at any time. (Triggered internally at ../aten/src/ATen/native/TensorAdvancedIndexing.cpp:1615.) segment_means = out.scatter_reduce( @@ -221,55 +216,55 @@ function to serialize the result of conversion. .. parsed-literal:: [ WARNING ] Please fix your imports. Module %s has been moved to %s. The old module will be deleted in version %s. - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1600: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1600: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. self.indices = torch.as_tensor(indices) - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1601: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1601: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. self.num_segments = torch.as_tensor(num_segments, device=indices.device) - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1703: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1703: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. batch_size = torch.prod(torch.tensor(list(index.batch_shape()))) - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1779: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1779: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. [torch.as_tensor([-1], dtype=torch.long), torch.as_tensor(vector_shape, dtype=torch.long)], dim=0 - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1782: TracerWarning: Converting a tensor to a Python list might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1782: TracerWarning: Converting a tensor to a Python list might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! flat_values = values.reshape(flattened_shape.tolist()) - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1784: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1784: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! out = torch.zeros(int(flat_index.num_segments), dtype=torch.float, device=flat_values.device) - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1792: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1792: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. torch.as_tensor(index.batch_shape(), dtype=torch.long), - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1793: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1793: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. torch.as_tensor([index.num_segments], dtype=torch.long), - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1794: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1794: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. torch.as_tensor(vector_shape, dtype=torch.long), - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1799: TracerWarning: Converting a tensor to a Python list might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1799: TracerWarning: Converting a tensor to a Python list might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! output_values = segment_means.clone().view(new_shape.tolist()).to(values.dtype) - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1730: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1730: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. batch_shape = torch.as_tensor( - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1734: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1734: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. num_segments = torch.as_tensor(num_segments) # create a rank 0 tensor (scalar) containing num_segments (e.g. 64) - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1745: TracerWarning: Converting a tensor to a Python list might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1745: TracerWarning: Converting a tensor to a Python list might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! new_shape = [int(x) for x in new_tensor.tolist()] - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1748: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1748: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. multiples = torch.cat([batch_shape, torch.as_tensor([1])], dim=0) - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1749: TracerWarning: Converting a tensor to a Python list might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1749: TracerWarning: Converting a tensor to a Python list might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! indices = indices.repeat(multiples.tolist()) - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:316: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:316: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. torch.as_tensor(self.config.max_position_embeddings - 1, device=device), position - first_position - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1260: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1260: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. indices=torch.min(row_ids, torch.as_tensor(self.config.max_num_rows - 1, device=row_ids.device)), - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1265: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1265: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. indices=torch.min(column_ids, torch.as_tensor(self.config.max_num_columns - 1, device=column_ids.device)), - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1957: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1957: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * torch.as_tensor( - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1962: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1962: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * torch.as_tensor( - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1998: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:1998: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. labels_per_column, _ = reduce_sum(torch.as_tensor(labels, dtype=torch.float32, device=labels.device), col_index) - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:2021: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:2021: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. torch.as_tensor(labels, dtype=torch.long, device=labels.device), cell_index - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:2028: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:2028: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. column_mask = torch.as_tensor( - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:2053: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:2053: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. selected_column_id = torch.as_tensor( - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:2058: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/transformers/models/tapas/modeling_tapas.py:2058: TracerWarning: torch.as_tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect. selected_column_mask = torch.as_tensor( @@ -461,14 +456,6 @@ Interactive inference demo.queue().launch(share=True, debug=False) -.. parsed-literal:: - - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/gradio/blocks.py:928: UserWarning: api_name display_table already exists, using display_table_1 - warnings.warn(f"api_name {api_name} already exists, using {api_name_}") - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-545/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/gradio/blocks.py:928: UserWarning: api_name display_table already exists, using display_table_2 - warnings.warn(f"api_name {api_name} already exists, using {api_name_}") - - .. parsed-literal:: Running on local URL: http://127.0.0.1:7860 diff --git a/docs/notebooks/269-film-slowmo-with-output.rst b/docs/notebooks/269-film-slowmo-with-output.rst index 14841979ccc0fb..9cee0960e9c6cd 100644 --- a/docs/notebooks/269-film-slowmo-with-output.rst +++ b/docs/notebooks/269-film-slowmo-with-output.rst @@ -34,22 +34,34 @@ FILM is trained on regular video frame triplets, with the middle frame serving as the ground-truth for supervision. In this tutorial, we will use `TensorFlow Hub `__ as -a model source. +a model source. + + **NOTE**: To run this tutorial, your system is required to have a VP9 + video encoder. Ubuntu has it preinstalled, but for Windows, you + should install it manually. **Table of contents:** -- `Prerequisites <#prerequisites>`__ -- `Prepare images <#prepare-images>`__ -- `Load the model <#load-the-model>`__ -- `Infer the model <#infer-the-model>`__ -- `Single middle frame interpolation <#single-middle-frame-interpolation>`__ -- `Recursive frame generation <#recursive-frame-generation>`__ -- `Convert the model to OpenVINO IR <#convert-the-model-to-openvino-ir>`__ -- `Inference <#inference>`__ -- `Select inference device <#select-inference-device>`__ -- `Single middle frame interpolation <#single-middle-frame-interpolation>`__ -- `Recursive frame generation <#recursive-frame-generation>`__ -- `Interactive inference <#interactive-inference>`__ + +- `Prerequisites <#prerequisites>`__ +- `Prepare images <#prepare-images>`__ +- `Load the model <#load-the-model>`__ +- `Infer the model <#infer-the-model>`__ + + - `Single middle frame + interpolation <#single-middle-frame-interpolation>`__ + - `Recursive frame generation <#recursive-frame-generation>`__ + +- `Convert the model to OpenVINO + IR <#convert-the-model-to-openvino-ir>`__ +- `Inference <#inference>`__ + + - `Select inference device <#select-inference-device>`__ + - `Single middle frame + interpolation <#single-middle-frame-interpolation>`__ + - `Recursive frame generation <#recursive-frame-generation>`__ + +- `Interactive inference <#interactive-inference>`__ .. |image0| image:: https://github.com/googlestaging/frame-interpolation/raw/main/moment.gif @@ -60,9 +72,7 @@ Prerequisites .. code:: ipython3 - %pip install -q tensorflow tensorflow_hub numpy "opencv-python" tqdm matplotlib gradio Pillow - %pip uninstall -q -y openvino-dev openvino openvino-nightly - %pip install -q openvino-nightly + %pip install -q tensorflow tensorflow_hub numpy "opencv-python" tqdm matplotlib gradio Pillow "openvino>=2023.2.0" .. parsed-literal:: @@ -201,7 +211,8 @@ Hub `__. x1=tf.keras.layers.Input(shape=(None, None, 3)), time=tf.keras.layers.Input(shape=(1)), ) - film_layer = hub.KerasLayer("https://tfhub.dev/google/film/1")(inputs) + model_url = "https://www.kaggle.com/models/google/film/frameworks/tensorFlow2/variations/film/versions/1" + film_layer = hub.KerasLayer(model_url)(inputs) film_model = tf.keras.Model(inputs=inputs, outputs=film_layer) Infer the model diff --git a/docs/notebooks/269-film-slowmo-with-output_files/index.html b/docs/notebooks/269-film-slowmo-with-output_files/index.html index 48822d60d20385..d6b28a7018d889 100644 --- a/docs/notebooks/269-film-slowmo-with-output_files/index.html +++ b/docs/notebooks/269-film-slowmo-with-output_files/index.html @@ -1,9 +1,9 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231114220808/dist/rst_files/269-film-slowmo-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/269-film-slowmo-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231114220808/dist/rst_files/269-film-slowmo-with-output_files/


../
-269-film-slowmo-with-output_14_0.png               15-Nov-2023 00:43              550928
-269-film-slowmo-with-output_29_0.png               15-Nov-2023 00:43              550615
-269-film-slowmo-with-output_7_0.png                15-Nov-2023 00:43              794884
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/269-film-slowmo-with-output_files/


../
+269-film-slowmo-with-output_14_0.png               07-Dec-2023 00:49              550928
+269-film-slowmo-with-output_29_0.png               07-Dec-2023 00:49              550615
+269-film-slowmo-with-output_7_0.png                07-Dec-2023 00:49              794884
 

diff --git a/docs/notebooks/270-sound-generation-audioldm2-with-output.rst b/docs/notebooks/270-sound-generation-audioldm2-with-output.rst new file mode 100644 index 00000000000000..4999c60715a7f3 --- /dev/null +++ b/docs/notebooks/270-sound-generation-audioldm2-with-output.rst @@ -0,0 +1,774 @@ +Sound Generation with AudioLDM2 and OpenVINO™ +============================================= + +`AudioLDM 2 `__ is a latent +text-to-audio diffusion model capable of generating realistic audio +samples given any text input. + +AudioLDM 2 was proposed in the paper `AudioLDM 2: Learning Holistic +Audio Generation with Self-supervised +Pretraining `__ by ``Haohe Liu`` et +al. + +The model takes a text prompt as input and predicts the corresponding +audio. It can generate text-conditional sound effects, human speech and +music. + +|image0| + +In this tutorial we will try out the pipeline, convert the models +backing it one by one and will run an interactive app with Gradio! + +**Table of contents:** + + +- `Prerequisites <#prerequisites>`__ +- `Instantiating Generation + Pipeline <#instantiating-generation-pipeline>`__ +- `Convert models to OpenVINO Intermediate representation (IR) + format <#convert-models-to-openvino-intermediate-representation-ir-format>`__ + + - `Text Encoder <#text-encoder>`__ + - `Second text encoder + conversion <#second-text-encoder-conversion>`__ + - `Vocoder conversion <#vocoder-conversion>`__ + - `GPT-2 conversion <#gpt--conversion>`__ + - `Projection model conversion <#projection-model-conversion>`__ + - `UNet conversion <#unet-conversion>`__ + - `VAE Decoder conversion <#vae-decoder-conversion>`__ + +- `Select inference device for Stable Diffusion + pipeline <#select-inference-device-for-stable-diffusion-pipeline>`__ +- `Adapt OpenVINO models to the original + pipeline <#adapt-openvino-models-to-the-original-pipeline>`__ +- `Try out the converted pipeline <#try-out-the-converted-pipeline>`__ + +.. |image0| image:: https://github.com/openvinotoolkit/openvino_notebooks/assets/76463150/c93a0f86-d9cf-4bd1-93b9-e27532170d75 + +Prerequisites +------------- + + + +.. code:: ipython3 + + %pip install -q accelerate "diffusers>=0.21.0" transformers torch gradio --extra-index-url https://download.pytorch.org/whl/cpu + %pip install -q "openvino>=2023.2.0" + + +.. parsed-literal:: + + Note: you may need to restart the kernel to use updated packages. + Note: you may need to restart the kernel to use updated packages. + + +Instantiating Generation Pipeline +--------------------------------- + + + +To work with `AudioLDM 2 `__ by +`Centre for Vision, Speech and Signal Processing - University of Surrey `__, +we will use `Hugging Face Diffusers +package `__. Diffusers package +exposes the ``AudioLDM2Pipeline`` class, simplifying the model +instantiation and weights loading. The code below demonstrates how to +create a ``AudioLDM2Pipeline`` and generate a text-conditioned sound +sample. + +.. code:: ipython3 + + from collections import namedtuple + from functools import partial + import gc + from pathlib import Path + + from diffusers import AudioLDM2Pipeline + from IPython.display import Audio + import numpy as np + import openvino as ov + import torch + + MODEL_ID = "cvssp/audioldm2" + pipe = AudioLDM2Pipeline.from_pretrained(MODEL_ID) + + prompt = "birds singing in the forest" + negative_prompt = "Low quality" + audio = pipe( + prompt, + negative_prompt=negative_prompt, + num_inference_steps=150, + audio_length_in_s=3.0 + ).audios[0] + + sampling_rate = 16000 + Audio(audio, rate=sampling_rate) + + + +.. parsed-literal:: + + Loading pipeline components...: 0%| | 0/11 [00:00 + + Your browser does not support the audio element. + + + + + +Convert models to OpenVINO Intermediate representation (IR) format +------------------------------------------------------------------ + + + +`Model conversion +API `__ +enables direct conversion of PyTorch models backing the pipeline. We +need to provide a model object, input data for model tracing to +``ov.convert_model`` function to obtain OpenVINO ``ov.Model`` object +instance. Model can be saved on disk for next deployment using +``ov.save_model`` function. + +The pipeline consists of seven important parts: + +- T5 and CLAP Text Encoders for creation condition to generate an sound + from a text prompt. +- Projection model to merge outputs from the two text encoders. +- GPT-2 language model to generate a sequence of hidden-states + conditioned on the projected outputs from the two text encoders. +- Vocoder to convert the mel-spectrogram latents to the final audio + waveform. +- Unet for step-by-step denoising latent image representation. +- Autoencoder (VAE) for decoding latent space to image. + +.. code:: ipython3 + + models_base_folder = Path("models") + + def cleanup_torchscript_cache(): + """ + Helper for removing cached model representation + """ + torch._C._jit_clear_class_registry() + torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore() + torch.jit._state._clear_class_state() + +CLAP Text Encoder Conversion +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + + +First frozen text-encoder. AudioLDM2 uses the joint audio-text embedding +model +`CLAP `__, +specifically the +`laion/clap-htsat-unfused `__ +variant. The text branch is used to encode the text prompt to a prompt +embedding. The full audio-text model is used to rank generated waveforms +against the text prompt by computing similarity scores. + +.. code:: ipython3 + + class ClapEncoderWrapper(torch.nn.Module): + def __init__(self, encoder): + super().__init__() + encoder.eval() + self.encoder = encoder + + def forward(self, input_ids, attention_mask): + return self.encoder.get_text_features(input_ids, attention_mask) + + clap_text_encoder_ir_path = models_base_folder / "clap_text_encoder.xml" + + if not clap_text_encoder_ir_path.exists(): + with torch.no_grad(): + ov_model = ov.convert_model( + ClapEncoderWrapper(pipe.text_encoder), # model instance + example_input={ + "input_ids": torch.ones((1, 512), dtype=torch.long), + "attention_mask": torch.ones((1, 512), dtype=torch.long), + }, # inputs for model tracing + ) + ov.save_model(ov_model, clap_text_encoder_ir_path) + del ov_model + cleanup_torchscript_cache() + gc.collect() + print("Text Encoder successfully converted to IR") + else: + print(f"Text Encoder will be loaded from {clap_text_encoder_ir_path}") + + +.. parsed-literal:: + + Text Encoder will be loaded from clap_text_encoder.xml + + +T5 Text Encoder Conversion +~~~~~~~~~~~~~~~~~~~~~~~~~~ + + + +As second frozen text-encoder, AudioLDM2 uses the +`T5 `__, +specifically the +`google/flan-t5-large `__ +variant. + +The text-encoder is responsible for transforming the input prompt, for +example, “birds singing in the forest” into an embedding space that can +be understood by the U-Net. It is usually a simple transformer-based +encoder that maps a sequence of input tokens to a sequence of latent +text embeddings. + +The input of the text encoder is tensor ``input_ids``, which contains +indexes of tokens from text processed by the tokenizer and padded to the +maximum length accepted by the model. Model outputs are two tensors: +``last_hidden_state`` - hidden state from the last MultiHeadAttention +layer in the model and ``pooler_out`` - pooled output for whole model +hidden states. + +.. code:: ipython3 + + t5_text_encoder_ir_path = models_base_folder / "t5_text_encoder.xml" + + if not t5_text_encoder_ir_path.exists(): + pipe.text_encoder_2.eval() + with torch.no_grad(): + ov_model = ov.convert_model( + pipe.text_encoder_2, # model instance + example_input=torch.ones((1, 7), dtype=torch.long), # inputs for model tracing + ) + ov.save_model(ov_model, t5_text_encoder_ir_path) + del ov_model + cleanup_torchscript_cache() + gc.collect() + print("Text Encoder successfully converted to IR") + else: + print(f"Text Encoder will be loaded from {t5_text_encoder_ir_path}") + + +.. parsed-literal:: + + Text Encoder will be loaded from t5_text_encoder.xml + + +Projection model conversion +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + + +A trained model used to linearly project the hidden-states from the +first and second text encoder models and insert learned Start Of +Sequence and End Of Sequence token embeddings. The projected +hidden-states from the two text encoders are concatenated to give the +input to the language model. + +.. code:: ipython3 + + projection_model_ir_path = models_base_folder / "projection_model.xml" + + projection_model_inputs = { + "hidden_states": torch.randn((1, 1, 512), dtype=torch.float32), + "hidden_states_1": torch.randn((1, 7, 1024), dtype=torch.float32), + "attention_mask": torch.ones((1, 1), dtype=torch.int64), + "attention_mask_1": torch.ones((1, 7), dtype=torch.int64), + } + + if not projection_model_ir_path.exists(): + pipe.projection_model.eval() + with torch.no_grad(): + ov_model = ov.convert_model( + pipe.projection_model, # model instance + example_input=projection_model_inputs, # inputs for model tracing + ) + ov.save_model(ov_model, projection_model_ir_path) + del ov_model + cleanup_torchscript_cache() + gc.collect() + print("The Projection Model successfully converted to IR") + else: + print(f"The Projection Model will be loaded from {projection_model_ir_path}") + + +.. parsed-literal:: + + The Projection Model will be loaded from projection_model.xml + + +GPT-2 conversion +~~~~~~~~~~~~~~~~ + + + +`GPT-2 `__ is an auto-regressive language +model used to generate a sequence of hidden-states conditioned on the +projected outputs from the two text encoders. + +.. code:: ipython3 + + language_model_ir_path = models_base_folder / "language_model.xml" + + language_model_inputs = { + "inputs_embeds": torch.randn((1, 12, 768), dtype=torch.float32), + "attention_mask": torch.ones((1, 12), dtype=torch.int64), + } + + if not language_model_ir_path.exists(): + pipe.language_model.config.torchscript = True + pipe.language_model.eval() + pipe.language_model.__call__ = partial(pipe.language_model.__call__, kwargs={ + "past_key_values": None, + "use_cache": False, + "return_dict": False}) + with torch.no_grad(): + ov_model = ov.convert_model( + pipe.language_model, # model instance + example_input=language_model_inputs, # inputs for model tracing + ) + + ov_model.inputs[0].get_node().set_partial_shape(ov.PartialShape([1, -1])) + ov_model.inputs[0].get_node().set_element_type(ov.Type.i64) + ov_model.inputs[1].get_node().set_partial_shape(ov.PartialShape([1, -1, 768])) + ov_model.inputs[1].get_node().set_element_type(ov.Type.f32) + + ov_model.validate_nodes_and_infer_types() + + ov.save_model(ov_model, language_model_ir_path) + del ov_model + cleanup_torchscript_cache() + gc.collect() + print("The Projection Model successfully converted to IR") + else: + print(f"The Projection Model will be loaded from {language_model_ir_path}") + + +.. parsed-literal:: + + The Projection Model will be loaded from language_model.xml + + +Vocoder conversion +~~~~~~~~~~~~~~~~~~ + + + +`SpeechT5 HiFi-GAN Vocoder `__ +is used to convert the mel-spectrogram latents to the final audio +waveform. + +.. code:: ipython3 + + vocoder_ir_path = models_base_folder / "vocoder.xml" + + if not vocoder_ir_path.exists(): + pipe.vocoder.eval() + with torch.no_grad(): + ov_model = ov.convert_model( + pipe.vocoder, # model instance + example_input=torch.ones((1, 700, 64), dtype=torch.float32), # inputs for model tracing + ) + ov.save_model(ov_model, vocoder_ir_path) + del ov_model + cleanup_torchscript_cache() + gc.collect() + print("The Vocoder successfully converted to IR") + else: + print(f"The Vocoder will be loaded from {vocoder_ir_path}") + + +.. parsed-literal:: + + The Vocoder will be loaded from vocoder.xml + + +UNet conversion +~~~~~~~~~~~~~~~ + + + +The UNet model is used to denoise the encoded audio latents. The process +of UNet model conversion remains the same, like for original Stable +Diffusion model. + +.. code:: ipython3 + + unet_ir_path = models_base_folder / "unet.xml" + + pipe.unet.eval() + unet_inputs = { + "sample": torch.randn((2, 8, 75, 16), dtype=torch.float32), + "timestep": torch.tensor(1, dtype=torch.int64), + "encoder_hidden_states": torch.randn((2, 8, 768), dtype=torch.float32), + "encoder_hidden_states_1": torch.randn((2, 7, 1024), dtype=torch.float32), + "encoder_attention_mask_1": torch.ones((2, 7), dtype=torch.int64), + } + + if not unet_ir_path.exists(): + with torch.no_grad(): + ov_model = ov.convert_model(pipe.unet, example_input=unet_inputs) + + ov_model.inputs[0].get_node().set_partial_shape(ov.PartialShape((2, 8, -1, 16))) + ov_model.inputs[2].get_node().set_partial_shape(ov.PartialShape((2, 8, 768))) + ov_model.inputs[3].get_node().set_partial_shape(ov.PartialShape((2, -1, 1024))) + ov_model.inputs[4].get_node().set_partial_shape(ov.PartialShape((2, -1))) + ov_model.validate_nodes_and_infer_types() + + ov.save_model(ov_model, unet_ir_path) + + del ov_model + cleanup_torchscript_cache() + gc.collect() + print("Unet successfully converted to IR") + else: + print(f"Unet will be loaded from {unet_ir_path}") + + +.. parsed-literal:: + + Unet will be loaded from unet.xml + + +VAE Decoder conversion +~~~~~~~~~~~~~~~~~~~~~~ + + + +The VAE model has two parts, an encoder, and a decoder. The encoder is +used to convert the image into a low-dimensional latent representation, +which will serve as the input to the U-Net model. The decoder, +conversely, transforms the latent representation back into an image. + +During latent diffusion training, the encoder is used to get the latent +representations (latents) of the images for the forward diffusion +process, which applies more and more noise at each step. During +inference, the denoised latents generated by the reverse diffusion +process are converted back into images using the VAE decoder. During +inference, we will see that we **only need the VAE decoder**. You can +find instructions on how to convert the encoder part in a stable +diffusion +`notebook <225-stable-diffusion-text-to-image-with-output.html>`__. + +.. code:: ipython3 + + vae_ir_path = models_base_folder / "vae.xml" + + class VAEDecoderWrapper(torch.nn.Module): + def __init__(self, vae): + super().__init__() + vae.eval() + self.vae = vae + + def forward(self, latents): + return self.vae.decode(latents) + + if not vae_ir_path.exists(): + vae_decoder = VAEDecoderWrapper(pipe.vae) + latents = torch.zeros((1, 8, 175, 16)) + + vae_decoder.eval() + with torch.no_grad(): + ov_model = ov.convert_model(vae_decoder, example_input=latents) + ov.save_model(ov_model, vae_ir_path) + del ov_model + cleanup_torchscript_cache() + gc.collect() + print("VAE decoder successfully converted to IR") + else: + print(f"VAE decoder will be loaded from {vae_ir_path}") + + +.. parsed-literal:: + + VAE decoder will be loaded from vae.xml + + +Select inference device for Stable Diffusion pipeline +----------------------------------------------------- + + + +select device from dropdown list for running inference using OpenVINO + +.. code:: ipython3 + + import ipywidgets as widgets + + core = ov.Core() + + DEVICE = widgets.Dropdown( + options=core.available_devices + ["AUTO"], + value="CPU", + description="Device:", + disabled=False, + ) + + DEVICE + + + + +.. parsed-literal:: + + Dropdown(description='Device:', options=('CPU', 'AUTO'), value='CPU') + + + +Adapt OpenVINO models to the original pipeline +---------------------------------------------- + + + +Here we create wrapper classes for all three OpenVINO models that we +want to embed in the original inference pipeline. Here are some of the +things to consider when adapting an OV model: - Make sure that +parameters passed by the original pipeline are forwarded to the compiled +OV model properly; sometimes the OV model uses only a portion of the +input arguments and some are ignored, sometimes you need to convert the +argument to another data type or unwrap some data structures such as +tuples or dictionaries. - Do guarantee that the wrapper class returns +results to the pipeline in an expected format. In the example below you +can see how we pack OV model outputs into special named tuples to adapt +them for the pipeline. - Pay attention to the model method used in the +original pipeline for calling the model - it may be not the ``forward`` +method! Refer to the ``OVClapEncoderWrapper`` to see how we wrap OV +model inference into the ``get_text_features`` method. + +.. code:: ipython3 + + class OVClapEncoderWrapper: + def __init__(self, encoder_ir, config): + self.encoder = core.compile_model(encoder_ir, DEVICE.value) + self.config = config + + def get_text_features(self, input_ids, attention_mask, **_): + last_hidden_state = self.encoder([input_ids, attention_mask])[0] + return torch.from_numpy(last_hidden_state) + + class OVT5EncoderWrapper: + def __init__(self, encoder_ir, config): + self.encoder = core.compile_model(encoder_ir, DEVICE.value) + self.config = config + self.dtype = self.config.torch_dtype + + def __call__(self, input_ids, **_): + last_hidden_state = self.encoder(input_ids)[0] + return torch.from_numpy(last_hidden_state)[None, ...] + + class OVVocoderWrapper: + def __init__(self, vocoder_ir, config): + self.vocoder = core.compile_model(vocoder_ir, DEVICE.value) + self.config = config + + def __call__(self, mel_spectrogram, **_): + waveform = self.vocoder(mel_spectrogram)[0] + return torch.from_numpy(waveform) + + class OVProjectionModelWrapper: + def __init__(self, proj_model_ir, config): + self.proj_model = core.compile_model(proj_model_ir, DEVICE.value) + self.config = config + self.output_type = namedtuple("ProjectionOutput", ["hidden_states", "attention_mask"]) + + def __call__( + self, hidden_states, + hidden_states_1, + attention_mask, + attention_mask_1, **_ + ): + output = self.proj_model({ + "hidden_states": hidden_states, + "hidden_states_1": hidden_states_1, + "attention_mask": attention_mask, + "attention_mask_1": attention_mask_1, + }) + return self.output_type(torch.from_numpy(output[0]), torch.from_numpy(output[1])) + + class OVUnetWrapper: + def __init__(self, unet_ir, config): + self.unet = core.compile_model(unet_ir, DEVICE.value) + self.config = config + + def __call__( + self, sample, + timestep, + encoder_hidden_states, + encoder_hidden_states_1, + encoder_attention_mask_1, **_ + ): + output = self.unet({ + "sample": sample, + "timestep": timestep, + "encoder_hidden_states": encoder_hidden_states, + "encoder_hidden_states_1": encoder_hidden_states_1, + "encoder_attention_mask_1": encoder_attention_mask_1, + }) + return (torch.from_numpy(output[0]), ) + + class OVVaeDecoderWrapper: + def __init__(self, vae_ir, config): + self.vae = core.compile_model(vae_ir, DEVICE.value) + self.config = config + self.output_type = namedtuple("VaeOutput", ["sample"]) + + def decode(self, latents, **_): + last_hidden_state = self.vae(latents)[0] + return self.output_type(torch.from_numpy(last_hidden_state)) + + def generate_language_model( + gpt_2: ov.CompiledModel, + inputs_embeds: torch.Tensor, + attention_mask: torch.Tensor, + max_new_tokens: int = 8, + **_ + ) -> torch.Tensor: + """ + Generates a sequence of hidden-states from the language model, conditioned on the embedding inputs. + """ + if not max_new_tokens: + max_new_tokens = 8 + inputs_embeds = inputs_embeds.cpu().numpy() + attention_mask = attention_mask.cpu().numpy() + for _ in range(max_new_tokens): + # forward pass to get next hidden states + output = gpt_2({"inputs_embeds":inputs_embeds, "attention_mask":attention_mask}) + + next_hidden_states = output[0] + + # Update the model input + inputs_embeds = np.concatenate([inputs_embeds, next_hidden_states[:, -1:, :]], axis=1) + attention_mask = np.concatenate([attention_mask, np.ones((attention_mask.shape[0], 1))], axis=1) + return torch.from_numpy(inputs_embeds[:, -max_new_tokens:, :]) + + +Now we initialize the wrapper objects and load them to the HF pipeline + +.. code:: ipython3 + + pipe = AudioLDM2Pipeline.from_pretrained(MODEL_ID) + pipe.config.torchscript = True + pipe.config.return_dict = False + + np.random.seed(0) + torch.manual_seed(0) + + pipe.text_encoder = OVClapEncoderWrapper(clap_text_encoder_ir_path, pipe.text_encoder.config) + pipe.text_encoder_2 = OVT5EncoderWrapper(t5_text_encoder_ir_path, pipe.text_encoder_2.config) + pipe.projection_model = OVProjectionModelWrapper(projection_model_ir_path, pipe.projection_model.config) + pipe.vocoder = OVVocoderWrapper(vocoder_ir_path, pipe.vocoder.config) + pipe.unet = OVUnetWrapper(unet_ir_path, pipe.unet.config) + pipe.vae = OVVaeDecoderWrapper(vae_ir_path, pipe.vae.config) + + pipe.generate_language_model = partial(generate_language_model, core.compile_model(language_model_ir_path, DEVICE.value)) + + gc.collect() + + prompt = "birds singing in the forest" + negative_prompt = "Low quality" + audio = pipe( + prompt, + negative_prompt=negative_prompt, + num_inference_steps=150, + audio_length_in_s=3.0 + ).audios[0] + + sampling_rate = 16000 + Audio(audio, rate=sampling_rate) + + + +.. parsed-literal:: + + Loading pipeline components...: 0%| | 0/11 [00:00 + + Your browser does not support the audio element. + + + + + +Try out the converted pipeline +------------------------------ + + + +Now, we are ready to start generation. For improving the generation +process, we also introduce an opportunity to provide a +``negative prompt``. Technically, positive prompt steers the diffusion +toward the output associated with it, while negative prompt steers the +diffusion away from it. The demo app below is created using `Gradio +package `__ + +.. code:: ipython3 + + import gradio as gr + + def _generate(prompt, negative_prompt, audio_length_in_s, + num_inference_steps, _=gr.Progress(track_tqdm=True)): + """Gradio backing function.""" + audio_values = pipe( + prompt, + negative_prompt=negative_prompt, + num_inference_steps=num_inference_steps, + audio_length_in_s=audio_length_in_s + ) + waveform = audio_values[0].squeeze() * 2**15 + return (sampling_rate, waveform.astype(np.int16)) + + demo = gr.Interface( + _generate, + inputs=[ + gr.Textbox(label="Text Prompt"), + gr.Textbox(label="Negative Prompt", placeholder="Example: Low quality"), + gr.Slider( + minimum=1.0, + maximum=15.0, + step=0.25, + value=7, + label="Audio Length (s)", + ), + gr.Slider(label="Inference Steps", step=5, value=150, minimum=50, maximum=250) + ], + outputs=[ + "audio" + ], + examples=[ + ["birds singing in the forest", "Low quality", 7, 150], + ["The sound of a hammer hitting a wooden surface", "", 4, 200], + ], + ) + try: + demo.queue().launch(debug=False) + except Exception: + demo.queue().launch(share=True, debug=False) + + # If you are launching remotely, specify server_name and server_port + # EXAMPLE: `demo.launch(server_name="your server name", server_port="server port in int")` + # To learn more please refer to the Gradio docs: https://gradio.app/docs/ diff --git a/docs/notebooks/270-sound-generation-audioldm2-with-output_files/270-sound-generation-audioldm2.png b/docs/notebooks/270-sound-generation-audioldm2-with-output_files/270-sound-generation-audioldm2.png new file mode 100644 index 00000000000000..d865f303a4ea28 --- /dev/null +++ b/docs/notebooks/270-sound-generation-audioldm2-with-output_files/270-sound-generation-audioldm2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:722cb1925f39a9c325b6bd8d8ea463794f3028fd9fb44247ef7bf27abb45dde4 +size 600751 diff --git a/docs/notebooks/271-sdxl-turbo-with-output.rst b/docs/notebooks/271-sdxl-turbo-with-output.rst new file mode 100644 index 00000000000000..a632cd7181e0be --- /dev/null +++ b/docs/notebooks/271-sdxl-turbo-with-output.rst @@ -0,0 +1,860 @@ +Single step image generation using SDXL-turbo and OpenVINO +========================================================== + +SDXL-Turbo is a fast generative text-to-image model that can synthesize +photorealistic images from a text prompt in a single network evaluation. +SDXL-Turbo is a distilled version of `SDXL +1.0 `__, +trained for real-time synthesis. SDXL Turbo is based on a novel +distillation technique called Adversarial Diffusion Distillation (ADD), +which enables the model to synthesize image outputs in a single step and +generate real-time text-to-image outputs while maintaining high sampling +fidelity. More details about this distillation approach can be found in +`technical +report `__. +More details about model can be found in `Stability AI blog +post `__. + +Previously, we already discussed how to launch Stable Diffusion XL model +using OpenVINO in the following +`notebook <../248-stable-diffusion-xl>`__, in this tutorial we will +focus on the +`SDXL-turbo `__ version. +Additionally, to improve image decoding speed, we will use `Tiny +Autoencoder `__, which is useful +for real-time previewing of the SDXL generation process. + +We will use a pre-trained model from the `Hugging Face +Diffusers `__ library. To +simplify the user experience, the `Hugging Face Optimum +Intel `__ library is +used to convert the models to OpenVINO™ IR format. + +Prerequisites +------------- + +.. code:: ipython3 + + %pip uninstall -q -y openvino-dev openvino openvino-nightly + %pip install -q --extra-index-url https://download.pytorch.org/whl/cpu\ + torch transformers diffusers "git+https://github.com/huggingface/optimum-intel.git" gradio openvino-nightly + +Convert model to OpenVINO format +-------------------------------- + +`sdxl-turbo `__ is +available for downloading via the `HuggingFace +hub `__. We will use optimum-cli +interface for exporting it into OpenVINO Intermediate Representation +(IR) format. + +Optimum CLI interface for converting models supports export to OpenVINO +(supported starting optimum-intel 1.12 version). General command format: + +.. code:: bash + + optimum-cli export openvino --model --task + +where task is task to export the model for, if not specified, the task +will be auto-inferred based on the model. Available tasks depend on the +model, for sdxl should be selected ``stable-diffusion-xl`` + +You can find a mapping between tasks and model classes in Optimum +TaskManager +`documentation `__. + +Additionally, you can specify weights compression ``--fp16`` for the +compression model to FP16 and ``--int8`` for the compression model to +INT8. Please note, that for INT8, it is necessary to install nncf. + +Full list of supported arguments available via ``--help`` For more +details and examples of usage, please check `optimum +documentation `__. + +For Tiny Autoencoder, we will use ``ov.convert_model`` function for +obtaining ``ov.Model`` and save it using ``ov.save_model``. Model +consists of 2 parts that used in pipeline separately: ``vae_encoder`` +for encoding input image in latent space in image-to-image generation +task and ``vae_decoder`` that responsible for decoding diffusion result +back to image format. + +.. code:: ipython3 + + from pathlib import Path + + model_dir = Path("./model") + sdxl_model_id = "stabilityai/sdxl-turbo" + tae_id = "madebyollin/taesdxl" + skip_convert_model = model_dir.exists() + +.. code:: ipython3 + + import torch + import openvino as ov + from diffusers import AutoencoderTiny + import gc + + class VAEEncoder(torch.nn.Module): + def __init__(self, vae): + super().__init__() + self.vae = vae + + def forward(self, sample): + return self.vae.encode(sample) + + class VAEDecoder(torch.nn.Module): + def __init__(self, vae): + super().__init__() + self.vae = vae + + def forward(self, latent_sample): + return self.vae.decode(latent_sample) + + def convert_tiny_vae(model_id, output_path): + tiny_vae = AutoencoderTiny.from_pretrained(model_id) + tiny_vae.eval() + vae_encoder = VAEEncoder(tiny_vae) + ov_model = ov.convert_model(vae_encoder, example_input=torch.zeros((1,3,512,512))) + ov.save_model(ov_model, output_path / "vae_encoder/openvino_model.xml") + tiny_vae.save_config(output_path / "vae_encoder") + vae_decoder = VAEDecoder(tiny_vae) + ov_model = ov.convert_model(vae_decoder, example_input=torch.zeros((1,4,64,64))) + ov.save_model(ov_model, output_path / "vae_decoder/openvino_model.xml") + tiny_vae.save_config(output_path / "vae_decoder") + + + if not skip_convert_model: + !optimum-cli export openvino --model $sdxl_model_id --task stable-diffusion-xl $model_dir --fp16 + convert_tiny_vae(tae_id, model_dir) + +Text-to-image generation +------------------------ + +Text-to-image generation lets you create images using text description. +To start generating images, we need to load models first. To load an +OpenVINO model and run an inference with Optimum and OpenVINO Runtime, +you need to replace diffusers ``StableDiffusionXLPipeline`` with Optimum +``OVStableDiffusionXLPipeline``. Pipeline initialization starts with +using ``from_pretrained`` method, where a directory with OpenVINO models +should be passed. Additionally, you can specify an inference device. + +Select inference device for text-to-image generation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code:: ipython3 + + import ipywidgets as widgets + + core = ov.Core() + + device = widgets.Dropdown( + options=core.available_devices + ["AUTO"], + value='AUTO', + description='Device:', + disabled=False, + ) + + device + + + + +.. parsed-literal:: + + Dropdown(description='Device:', index=1, options=('CPU', 'AUTO'), value='AUTO') + + + +.. code:: ipython3 + + from optimum.intel.openvino import OVStableDiffusionXLPipeline + text2image_pipe = OVStableDiffusionXLPipeline.from_pretrained(model_dir, device=device.value) + + +.. parsed-literal:: + + INFO:nncf:NNCF initialized successfully. Supported frameworks detected: torch, tensorflow, onnx, openvino + + +.. parsed-literal:: + + 2023-12-01 11:21:33.190808: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. + 2023-12-01 11:21:33.194252: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used. + 2023-12-01 11:21:33.260150: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used. + 2023-12-01 11:21:33.261916: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. + To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. + 2023-12-01 11:21:33.994990: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT + Compiling the vae_decoder to AUTO ... + Compiling the unet to AUTO ... + Compiling the text_encoder_2 to AUTO ... + Compiling the text_encoder to AUTO ... + Compiling the vae_encoder to AUTO ... + + +The pipeline interface is similar to original +``StableDiffusionXLPipeline``. We should provide text prompt. The +default number of steps is 50, while sdxl-turbo required only 1 step. +According to the information provided in model card, model does not use +negative prompt and guidance scale and this parameters should be +disabled using ``guidance_scale = 0`` + +.. code:: ipython3 + + import numpy as np + + prompt = "cute cat" + image = text2image_pipe(prompt, num_inference_steps=1, height=512, width=512, guidance_scale=0.0, generator=np.random.RandomState(987)).images[0] + image.save("cat.png") + image + + +.. parsed-literal:: + + /home/ltalamanova/env_ci/lib/python3.8/site-packages/optimum/intel/openvino/modeling_diffusion.py:565: FutureWarning: `shared_memory` is deprecated and will be removed in 2024.0. Value of `shared_memory` is going to override `share_inputs` value. Please use only `share_inputs` explicitly. + outputs = self.request(inputs, shared_memory=True) + + + +.. parsed-literal:: + + 0%| | 0/1 [00:00= 1 after applying strength multiplication. e.g. in example +below, we will use ``num_inference_steps=2`` and ``stength=0.5``, +finally, we get 0.5 \* 2.0 = 1 step in our pipeline. + +.. code:: ipython3 + + photo_image = image2image_pipe(photo_prompt, image=image, num_inference_steps=2, generator=np.random.RandomState(511), guidance_scale=0.0, strength=0.5).images[0] + photo_image.save("cat_tie.png") + photo_image + + +.. parsed-literal:: + + /home/ltalamanova/env_ci/lib/python3.8/site-packages/optimum/intel/openvino/modeling_diffusion.py:636: FutureWarning: `shared_memory` is deprecated and will be removed in 2024.0. Value of `shared_memory` is going to override `share_inputs` value. Please use only `share_inputs` explicitly. + outputs = self.request(inputs, shared_memory=True) + + + +.. parsed-literal:: + + 0%| | 0/1 [00:00`__ enables +post-training quantization by adding quantization layers into model +graph and then using a subset of the training dataset to initialize the +parameters of these additional quantization layers. Quantized operations +are executed in ``INT8`` instead of ``FP32``/``FP16`` making model +inference faster. + +According to ``SDXL-Turbo Model`` structure, the UNet model takes up +significant portion of the overall pipeline execution time. Now we will +show you how to optimize the UNet part using +`NNCF `__ to reduce +computation cost and speed up the pipeline. Quantizing the rest of the +SDXL pipeline does not significantly improve inference performance but +can lead to a substantial degradation of accuracy. + +The optimization process contains the following steps: + +1. Create a calibration dataset for quantization. +2. Run ``nncf.quantize()`` to obtain quantized model. +3. Save the ``INT8`` model using ``openvino.save_model()`` function. + +Please select below whether you would like to run quantization to +improve model inference speed. + +.. code:: ipython3 + + to_quantize = widgets.Checkbox( + value=True, + description='Quantization', + disabled=False, + ) + + to_quantize + + + + +.. parsed-literal:: + + Checkbox(value=True, description='Quantization') + + + +.. code:: ipython3 + + import sys + sys.path.append("../utils") + + int8_pipe = None + + if to_quantize.value and "GPU" in device.value: + to_quantize.value = False + + %load_ext skip_kernel_extension + +Prepare calibration dataset +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +We use a portion of +`laion/laion2B-en `__ +dataset from Hugging Face as calibration data. To collect intermediate +model inputs for calibration we should customize ``CompiledModel``. + +.. code:: ipython3 + + UNET_INT8_OV_PATH = model_dir / "optimized_unet" / "openvino_model.xml" + + def disable_progress_bar(pipeline, disable=True): + if not hasattr(pipeline, "_progress_bar_config"): + pipeline._progress_bar_config = {'disable': disable} + else: + pipeline._progress_bar_config['disable'] = disable + +.. code:: ipython3 + + %%skip not $to_quantize.value + + import datasets + import numpy as np + from tqdm.notebook import tqdm + from transformers import set_seed + from typing import Any, Dict, List + + set_seed(1) + + class CompiledModelDecorator(ov.CompiledModel): + def __init__(self, compiled_model: ov.CompiledModel, data_cache: List[Any] = None): + super().__init__(compiled_model) + self.data_cache = data_cache if data_cache else [] + + def __call__(self, *args, **kwargs): + self.data_cache.append(*args) + return super().__call__(*args, **kwargs) + + def collect_calibration_data(pipe, subset_size: int) -> List[Dict]: + original_unet = pipe.unet.request + pipe.unet.request = CompiledModelDecorator(original_unet) + + dataset = datasets.load_dataset("laion/laion2B-en", split="train", streaming=True).shuffle(seed=42) + disable_progress_bar(pipe) + + # Run inference for data collection + pbar = tqdm(total=subset_size) + diff = 0 + for batch in dataset: + prompt = batch["TEXT"] + if len(prompt) > pipe.tokenizer.model_max_length: + continue + _ = pipe( + prompt, + num_inference_steps=1, + height=512, + width=512, + guidance_scale=0.0, + generator=np.random.RandomState(987) + ) + collected_subset_size = len(pipe.unet.request.data_cache) + if collected_subset_size >= subset_size: + pbar.update(subset_size - pbar.n) + break + pbar.update(collected_subset_size - diff) + diff = collected_subset_size + + calibration_dataset = pipe.unet.request.data_cache + disable_progress_bar(pipe, disable=False) + pipe.unet.request = original_unet + return calibration_dataset + +.. code:: ipython3 + + %%skip not $to_quantize.value + + if not UNET_INT8_OV_PATH.exists(): + text2image_pipe = OVStableDiffusionXLPipeline.from_pretrained(model_dir, device=device.value) + unet_calibration_data = collect_calibration_data(text2image_pipe, subset_size=200) + + +.. parsed-literal:: + + Compiling the vae_decoder to AUTO ... + Compiling the unet to AUTO ... + Compiling the text_encoder_2 to AUTO ... + Compiling the vae_encoder to AUTO ... + Compiling the text_encoder to AUTO ... + + + +.. parsed-literal:: + + Resolving data files: 0%| | 0/128 [00:00:17: FutureWarning: `shared_memory` is deprecated and will be removed in 2024.0. Value of `shared_memory` is going to override `share_inputs` value. Please use only `share_inputs` explicitly. + /home/ltalamanova/env_ci/lib/python3.8/site-packages/optimum/intel/openvino/modeling_diffusion.py:615: FutureWarning: `shared_memory` is deprecated and will be removed in 2024.0. Value of `shared_memory` is going to override `share_inputs` value. Please use only `share_inputs` explicitly. + outputs = self.request(inputs, shared_memory=True) + + +Run quantization +~~~~~~~~~~~~~~~~ + +Create a quantized model from the pre-trained converted OpenVINO model. +Quantization of the first and last ``Convolution`` layers impacts the +generation results. We recommend using ``IgnoredScope`` to keep accuracy +sensitive ``Convolution`` layers in FP16 precision. + + **NOTE**: Quantization is time and memory consuming operation. + Running quantization code below may take some time. + +.. code:: ipython3 + + %%skip not $to_quantize.value + + import nncf + from nncf.scopes import IgnoredScope + + UNET_OV_PATH = model_dir / "unet" / "openvino_model.xml" + if not UNET_INT8_OV_PATH.exists(): + unet = core.read_model(UNET_OV_PATH) + quantized_unet = nncf.quantize( + model=unet, + model_type=nncf.ModelType.TRANSFORMER, + calibration_dataset=nncf.Dataset(unet_calibration_data), + ignored_scope=IgnoredScope( + names=[ + "__module.model.conv_in/aten::_convolution/Convolution", + "__module.model.up_blocks.2.resnets.2.conv_shortcut/aten::_convolution/Convolution", + "__module.model.conv_out/aten::_convolution/Convolution" + ], + ), + ) + ov.save_model(quantized_unet, UNET_INT8_OV_PATH) + + + +.. parsed-literal:: + + Output() + + + +.. raw:: html + +

+
+
+
+
+.. raw:: html
+
+    
+    
+ + + + +.. parsed-literal:: + + Output() + + + +.. raw:: html + +

+
+
+
+
+.. raw:: html
+
+    
+    
+ + + +.. parsed-literal:: + + INFO:nncf:3 ignored nodes were found by name in the NNCFGraph + INFO:nncf:420 ignored nodes were found by name in the NNCFGraph + INFO:nncf:Not adding activation input quantizer for operation: 5 __module.model.conv_in/aten::_convolution/Convolution + 13 __module.model.conv_in/aten::_convolution/Add_87 + + INFO:nncf:Not adding activation input quantizer for operation: 460 __module.model.up_blocks.2.resnets.2.conv_shortcut/aten::_convolution/Convolution + 899 __module.model.up_blocks.2.resnets.2.conv_shortcut/aten::_convolution/Add_16859 + + INFO:nncf:Not adding activation input quantizer for operation: 3911 __module.model.conv_out/aten::_convolution/Convolution + 4032 __module.model.conv_out/aten::_convolution/Add_16873 + + + + +.. parsed-literal:: + + Output() + + + +.. raw:: html + +

+
+
+
+
+.. raw:: html
+
+    
+    
+ + + + +.. parsed-literal:: + + Output() + + + +.. raw:: html + +

+
+
+
+
+.. raw:: html
+
+    
+    
+ + + +Let us check predictions with the quantized UNet using the same input +data. + +.. code:: ipython3 + + %%skip not $to_quantize.value + + from IPython.display import display + + int8_text2image_pipe = OVStableDiffusionXLPipeline.from_pretrained(model_dir, device=device.value, compile=False) + int8_text2image_pipe.unet.model = core.read_model(UNET_INT8_OV_PATH) + int8_text2image_pipe.unet.request = None + + prompt = "cute cat" + image = int8_text2image_pipe(prompt, num_inference_steps=1, height=512, width=512, guidance_scale=0.0, generator=np.random.RandomState(987)).images[0] + display(image) + + +.. parsed-literal:: + + Compiling the text_encoder to AUTO ... + /home/ltalamanova/env_ci/lib/python3.8/site-packages/optimum/intel/openvino/modeling_diffusion.py:565: FutureWarning: `shared_memory` is deprecated and will be removed in 2024.0. Value of `shared_memory` is going to override `share_inputs` value. Please use only `share_inputs` explicitly. + outputs = self.request(inputs, shared_memory=True) + Compiling the text_encoder_2 to AUTO ... + + + +.. parsed-literal:: + + 0%| | 0/1 [00:00 +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/271-sdxl-turbo-with-output_files/ + +

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/271-sdxl-turbo-with-output_files/


../
+271-sdxl-turbo-with-output_11_3.jpg                07-Dec-2023 00:49               28118
+271-sdxl-turbo-with-output_11_3.png                07-Dec-2023 00:49              376542
+271-sdxl-turbo-with-output_17_2.jpg                07-Dec-2023 00:49               28230
+271-sdxl-turbo-with-output_17_2.png                07-Dec-2023 00:49              398923
+271-sdxl-turbo-with-output_29_3.jpg                07-Dec-2023 00:49               27869
+271-sdxl-turbo-with-output_29_3.png                07-Dec-2023 00:49              375737
+271-sdxl-turbo-with-output_30_1.jpg                07-Dec-2023 00:49               27505
+271-sdxl-turbo-with-output_30_1.png                07-Dec-2023 00:49              386457
+

+ diff --git a/docs/notebooks/272-paint-by-example-with-output.rst b/docs/notebooks/272-paint-by-example-with-output.rst new file mode 100644 index 00000000000000..abdc8266dad3a1 --- /dev/null +++ b/docs/notebooks/272-paint-by-example-with-output.rst @@ -0,0 +1,1104 @@ +Paint By Example: Exemplar-based Image Editing with Diffusion Models +==================================================================== + +Stable Diffusion in Diffusers library +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To work with Stable Diffusion, we will use the Hugging Face +`Diffusers `__ library. To +experiment with in-painting we can use Diffusers which exposes the +`StableDiffusionInpaintPipeline `__ +similar to the `other Diffusers +pipelines `__. +The code below demonstrates how to create +``StableDiffusionInpaintPipeline`` using +``stable-diffusion-2-inpainting``. To create the drawing tool we will +install Gradio for handling user interaction. + +This is the overall flow of the application: |Flow Diagram| + +This is the detailed flowchart for the pipeline: |pipeline-flowchart| + +.. |Flow Diagram| image:: https://user-images.githubusercontent.com/103226580/236954918-f364b227-293c-4f78-a9bf-9dcebcb1034a.png +.. |pipeline-flowchart| image:: https://github.com/openvinotoolkit/openvino_notebooks/assets/103226580/cde2d5c4-2540-4a45-ad9c-339f7a69459d + +.. code:: ipython3 + + %pip install -q "gradio == 3.50.2" + %pip install -q "diffusers>=-1.14.0" "openvino>=2023.2.0" "transformers >= 4.25.1" + + +.. parsed-literal:: + + Collecting gradio==3.50.2 + Downloading gradio-3.50.2-py3-none-any.whl (20.3 MB) + 0.0/20.3 MB ? eta -:--:-- + 0.4/20.3 MB 8.1 MB/s eta 0:00:03 + - 1.0/20.3 MB 10.2 MB/s eta 0:00:02 + --- 1.6/20.3 MB 11.0 MB/s eta 0:00:02 + --- 1.9/20.3 MB 10.1 MB/s eta 0:00:02 + ---- 2.3/20.3 MB 10.5 MB/s eta 0:00:02 + ----- 2.6/20.3 MB 9.8 MB/s eta 0:00:02 + ------ 3.1/20.3 MB 9.8 MB/s eta 0:00:02 + ------ 3.5/20.3 MB 9.7 MB/s eta 0:00:02 + ------- 4.0/20.3 MB 9.8 MB/s eta 0:00:02 + -------- 4.4/20.3 MB 9.7 MB/s eta 0:00:02 + --------- 4.9/20.3 MB 9.8 MB/s eta 0:00:02 + ---------- 5.4/20.3 MB 9.8 MB/s eta 0:00:02 + ----------- 5.9/20.3 MB 9.9 MB/s eta 0:00:02 + ------------ 6.4/20.3 MB 10.2 MB/s eta 0:00:02 + ------------- 6.9/20.3 MB 10.2 MB/s eta 0:00:02 + -------------- 7.4/20.3 MB 10.3 MB/s eta 0:00:02 + --------------- 7.9/20.3 MB 10.3 MB/s eta 0:00:02 + ---------------- 8.3/20.3 MB 10.2 MB/s eta 0:00:02 + ----------------- 8.8/20.3 MB 10.3 MB/s eta 0:00:02 + ------------------ 9.4/20.3 MB 10.4 MB/s eta 0:00:02 + ------------------- 9.9/20.3 MB 10.4 MB/s eta 0:00:01 + ------------------- 10.3/20.3 MB 10.2 MB/s eta 0:00:01 + -------------------- 10.8/20.3 MB 10.2 MB/s eta 0:00:01 + --------------------- 11.1/20.3 MB 10.2 MB/s eta 0:00:01 + ---------------------- 11.6/20.3 MB 10.1 MB/s eta 0:00:01 + ----------------------- 12.1/20.3 MB 10.2 MB/s eta 0:00:01 + ------------------------ 12.6/20.3 MB 10.4 MB/s eta 0:00:01 + ------------------------- 13.0/20.3 MB 10.4 MB/s eta 0:00:01 + ------------------------- 13.5/20.3 MB 10.4 MB/s eta 0:00:01 + -------------------------- 14.0/20.3 MB 10.4 MB/s eta 0:00:01 + --------------------------- 14.5/20.3 MB 10.6 MB/s eta 0:00:01 + ---------------------------- 15.0/20.3 MB 10.6 MB/s eta 0:00:01 + ----------------------------- 15.5/20.3 MB 10.6 MB/s eta 0:00:01 + ------------------------------ 16.0/20.3 MB 10.6 MB/s eta 0:00:01 + ------------------------------- 16.5/20.3 MB 10.6 MB/s eta 0:00:01 + -------------------------------- 17.1/20.3 MB 10.6 MB/s eta 0:00:01 + --------------------------------- 17.5/20.3 MB 10.7 MB/s eta 0:00:01 + ---------------------------------- 18.0/20.3 MB 10.7 MB/s eta 0:00:01 + ----------------------------------- 18.5/20.3 MB 10.7 MB/s eta 0:00:01 + ------------------------------------ 18.9/20.3 MB 10.6 MB/s eta 0:00:01 + ------------------------------------- 19.4/20.3 MB 10.6 MB/s eta 0:00:01 + -------------------------------------- 19.8/20.3 MB 10.4 MB/s eta 0:00:01 + -------------------------------------- 20.3/20.3 MB 10.6 MB/s eta 0:00:01 + -------------------------------------- 20.3/20.3 MB 10.6 MB/s eta 0:00:01 + -------------------------------------- 20.3/20.3 MB 10.6 MB/s eta 0:00:01 + -------------------------------------- 20.3/20.3 MB 10.6 MB/s eta 0:00:01 + -------------------------------------- 20.3/20.3 MB 10.6 MB/s eta 0:00:01 + ---------------------------------------- 20.3/20.3 MB 8.5 MB/s eta 0:00:00 + Requirement already satisfied: aiofiles<24.0,>=22.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (22.1.0) + Requirement already satisfied: altair<6.0,>=4.2.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (4.2.2) + Requirement already satisfied: fastapi in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (0.95.1) + Requirement already satisfied: ffmpy in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (0.3.0) + Collecting gradio-client==0.6.1 (from gradio==3.50.2) + Downloading gradio_client-0.6.1-py3-none-any.whl (299 kB) + 0.0/299.2 kB ? eta -:--:-- + -------------------------------------- 299.2/299.2 kB 6.3 MB/s eta 0:00:00 + Requirement already satisfied: httpx in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (0.24.0) + Requirement already satisfied: huggingface-hub>=0.14.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (0.14.1) + Collecting importlib-resources<7.0,>=1.3 (from gradio==3.50.2) + Downloading importlib_resources-6.1.1-py3-none-any.whl (33 kB) + Requirement already satisfied: jinja2<4.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (3.1.2) + Requirement already satisfied: markupsafe~=2.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (2.1.2) + Requirement already satisfied: matplotlib~=3.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (3.5.2) + Requirement already satisfied: numpy~=1.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (1.23.4) + Requirement already satisfied: orjson~=3.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (3.8.11) + Requirement already satisfied: packaging in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (23.1) + Requirement already satisfied: pandas<3.0,>=1.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (1.3.5) + Requirement already satisfied: pillow<11.0,>=8.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (9.5.0) + Requirement already satisfied: pydantic!=1.8,!=1.8.1,!=2.0.0,!=2.0.1,<3.0.0,>=1.7.4 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (1.10.7) + Requirement already satisfied: pydub in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (0.25.1) + Requirement already satisfied: python-multipart in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (0.0.6) + Requirement already satisfied: pyyaml<7.0,>=5.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (6.0) + Requirement already satisfied: requests~=2.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (2.29.0) + Requirement already satisfied: semantic-version~=2.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (2.10.0) + Requirement already satisfied: typing-extensions~=4.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (4.5.0) + Requirement already satisfied: uvicorn>=0.14.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (0.22.0) + Requirement already satisfied: websockets<12.0,>=10.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio==3.50.2) (11.0.2) + Requirement already satisfied: fsspec in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from gradio-client==0.6.1->gradio==3.50.2) (2023.4.0) + Requirement already satisfied: entrypoints in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from altair<6.0,>=4.2.0->gradio==3.50.2) (0.4) + Requirement already satisfied: jsonschema>=3.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from altair<6.0,>=4.2.0->gradio==3.50.2) (4.17.3) + Requirement already satisfied: toolz in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from altair<6.0,>=4.2.0->gradio==3.50.2) (0.12.0) + Requirement already satisfied: filelock in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from huggingface-hub>=0.14.0->gradio==3.50.2) (3.12.0) + Requirement already satisfied: tqdm>=4.42.1 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from huggingface-hub>=0.14.0->gradio==3.50.2) (4.65.0) + Requirement already satisfied: cycler>=0.10 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from matplotlib~=3.0->gradio==3.50.2) (0.11.0) + Requirement already satisfied: fonttools>=4.22.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from matplotlib~=3.0->gradio==3.50.2) (4.39.3) + Requirement already satisfied: kiwisolver>=1.0.1 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from matplotlib~=3.0->gradio==3.50.2) (1.4.4) + Requirement already satisfied: pyparsing>=2.2.1 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from matplotlib~=3.0->gradio==3.50.2) (2.4.7) + Requirement already satisfied: python-dateutil>=2.7 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from matplotlib~=3.0->gradio==3.50.2) (2.8.2) + Requirement already satisfied: pytz>=2017.3 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from pandas<3.0,>=1.0->gradio==3.50.2) (2023.3) + Requirement already satisfied: charset-normalizer<4,>=2 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from requests~=2.0->gradio==3.50.2) (3.1.0) + Requirement already satisfied: idna<4,>=2.5 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from requests~=2.0->gradio==3.50.2) (3.4) + Requirement already satisfied: urllib3<1.27,>=1.21.1 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from requests~=2.0->gradio==3.50.2) (1.26.15) + Requirement already satisfied: certifi>=2017.4.17 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from requests~=2.0->gradio==3.50.2) (2022.12.7) + Requirement already satisfied: click>=7.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from uvicorn>=0.14.0->gradio==3.50.2) (8.1.3) + Requirement already satisfied: h11>=0.8 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from uvicorn>=0.14.0->gradio==3.50.2) (0.14.0) + Requirement already satisfied: starlette<0.27.0,>=0.26.1 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from fastapi->gradio==3.50.2) (0.26.1) + Requirement already satisfied: httpcore<0.18.0,>=0.15.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from httpx->gradio==3.50.2) (0.17.0) + Requirement already satisfied: sniffio in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from httpx->gradio==3.50.2) (1.3.0) + Requirement already satisfied: colorama in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from click>=7.0->uvicorn>=0.14.0->gradio==3.50.2) (0.4.6) + Requirement already satisfied: anyio<5.0,>=3.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from httpcore<0.18.0,>=0.15.0->httpx->gradio==3.50.2) (3.6.2) + Requirement already satisfied: attrs>=17.4.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from jsonschema>=3.0->altair<6.0,>=4.2.0->gradio==3.50.2) (23.1.0) + Requirement already satisfied: pyrsistent!=0.17.0,!=0.17.1,!=0.17.2,>=0.14.0 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from jsonschema>=3.0->altair<6.0,>=4.2.0->gradio==3.50.2) (0.19.3) + Requirement already satisfied: six>=1.5 in c:\hackathon\openvino_notebooks\venv310\lib\site-packages (from python-dateutil>=2.7->matplotlib~=3.0->gradio==3.50.2) (1.16.0) + Installing collected packages: importlib-resources, gradio-client, gradio + Attempting uninstall: gradio-client + Found existing installation: gradio_client 0.1.4 + Uninstalling gradio_client-0.1.4: + Successfully uninstalled gradio_client-0.1.4 + Attempting uninstall: gradio + Found existing installation: gradio 3.28.1 + Uninstalling gradio-3.28.1: + Successfully uninstalled gradio-3.28.1 + Successfully installed gradio-3.50.2 gradio-client-0.6.1 importlib-resources-6.1.1 + Note: you may need to restart the kernel to use updated packages. + + +.. parsed-literal:: + + + [notice] A new release of pip is available: 23.1 -> 23.3.1 + [notice] To update, run: python.exe -m pip install --upgrade pip + + +.. parsed-literal:: + + Note: you may need to restart the kernel to use updated packages. + + +.. parsed-literal:: + + + [notice] A new release of pip is available: 23.1 -> 23.3.1 + [notice] To update, run: python.exe -m pip install --upgrade pip + + +Download the model from `HuggingFace +Paint-by-Example `__. +This might take several minutes because it is over 5GB + +.. code:: ipython3 + + from diffusers import DPMSolverMultistepScheduler, DiffusionPipeline + + pipeline = DiffusionPipeline.from_pretrained("Fantasy-Studio/Paint-By-Example") + + scheduler_inpaint = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) + + +.. parsed-literal:: + + Cannot initialize model with low cpu memory usage because `accelerate` was not found in the environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install `accelerate` for faster and less memory-intense model loading. You can do so with: + ``` + pip install accelerate + ``` + . + You are using a model of type clip_vision_model to instantiate a model of type clip. This is not supported for all configurations of models and can yield errors. + + +.. code:: ipython3 + + import gc + + extractor = pipeline.feature_extractor + image_encoder = pipeline.image_encoder + image_encoder.eval() + unet_inpaint = pipeline.unet + unet_inpaint.eval() + vae_inpaint = pipeline.vae + vae_inpaint.eval() + + del pipeline + gc.collect(); + +Download default images +~~~~~~~~~~~~~~~~~~~~~~~ + +Download default images. + +.. code:: ipython3 + + # Fetch `notebook_utils` module + import urllib.request + urllib.request.urlretrieve( + url='https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/main/notebooks/utils/notebook_utils.py', + filename='notebook_utils.py' + ) + + from notebook_utils import download_file + + download_file("https://github-production-user-asset-6210df.s3.amazonaws.com/103226580/286377210-edc98e97-0e43-4796-b771-dacd074c39ea.png", "0.png", "data/image") + + download_file("https://github-production-user-asset-6210df.s3.amazonaws.com/103226580/286377233-b2c2d902-d379-415a-8183-5bdd37c52429.png", "1.png", "data/image") + + download_file("https://github-production-user-asset-6210df.s3.amazonaws.com/103226580/286377248-da1db61e-3521-4cdb-85c8-1386d360ce22.png", "2.png", "data/image") + + download_file("https://github-production-user-asset-6210df.s3.amazonaws.com/103226580/286377279-fa496f17-e850-4351-87c5-2552dfbc4633.jpg", "bird.jpg", "data/reference") + + download_file("https://github-production-user-asset-6210df.s3.amazonaws.com/103226580/286377298-06a25ff2-84d8-4d46-95cd-8c25efa690d8.jpg", "car.jpg", "data/reference") + + download_file("https://github-production-user-asset-6210df.s3.amazonaws.com/103226580/286377318-8841a801-1933-4523-a433-7d2fb64c47e6.jpg", "dog.jpg", "data/reference") + + + + +.. parsed-literal:: + + data\image\0.png: 0%| | 0.00/453k [00:00`__ + +.. code:: ipython3 + + from pathlib import Path + import torch + import numpy as np + import openvino as ov + + model_dir = Path("model") + model_dir.mkdir(exist_ok=True) + sd2_inpainting_model_dir = Path("model/paint_by_example") + sd2_inpainting_model_dir.mkdir(exist_ok=True) + +Functions to convert to OpenVINO IR format + +.. code:: ipython3 + + def cleanup_torchscript_cache(): + """ + Helper for removing cached model representation + """ + torch._C._jit_clear_class_registry() + torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore() + torch.jit._state._clear_class_state() + + + def convert_image_encoder(image_encoder: torch.nn.Module, ir_path:Path): + """ + Convert Image Encoder model to IR. + Function accepts pipeline, prepares example inputs for conversion + Parameters: + image_encoder (torch.nn.Module): image encoder PyTorch model + ir_path (Path): File for storing model + Returns: + None + """ + class ImageEncoderWrapper(torch.nn.Module): + def __init__(self, image_encoder): + super().__init__() + self.image_encoder = image_encoder + + def forward(self, image): + image_embeddings, negative_prompt_embeds = self.image_encoder(image, return_uncond_vector=True) + return image_embeddings, negative_prompt_embeds + + if not ir_path.exists(): + image_encoder = ImageEncoderWrapper(image_encoder) + image_encoder.eval() + input_ids = torch.randn((1,3,224,224)) + # switch model to inference mode + + # disable gradients calculation for reducing memory consumption + with torch.no_grad(): + ov_model = ov.convert_model( + image_encoder, + example_input=input_ids, + input=([1,3,224,224],) + ) + ov.save_model(ov_model, ir_path) + del ov_model + cleanup_torchscript_cache() + print('Image Encoder successfully converted to IR') + + + def convert_unet(unet:torch.nn.Module, ir_path:Path, num_channels:int = 4, width:int = 64, height:int = 64): + """ + Convert Unet model to IR format. + Function accepts pipeline, prepares example inputs for conversion + Parameters: + unet (torch.nn.Module): UNet PyTorch model + ir_path (Path): File for storing model + num_channels (int, optional, 4): number of input channels + width (int, optional, 64): input width + height (int, optional, 64): input height + Returns: + None + """ + dtype_mapping = { + torch.float32: ov.Type.f32, + torch.float64: ov.Type.f64 + } + if not ir_path.exists(): + # prepare inputs + encoder_hidden_state = torch.ones((2, 1, 768)) + latents_shape = (2, num_channels, width, height) + latents = torch.randn(latents_shape) + t = torch.from_numpy(np.array(1, dtype=np.float32)) + unet.eval() + dummy_inputs = (latents, t, encoder_hidden_state) + input_info = [] + for input_tensor in dummy_inputs: + shape = ov.PartialShape(tuple(input_tensor.shape)) + element_type = dtype_mapping[input_tensor.dtype] + input_info.append((shape, element_type)) + + with torch.no_grad(): + ov_model = ov.convert_model( + unet, + example_input=dummy_inputs, + input=input_info + ) + ov.save_model(ov_model, ir_path) + del ov_model + cleanup_torchscript_cache() + print('U-Net successfully converted to IR') + + + def convert_vae_encoder(vae: torch.nn.Module, ir_path: Path, width:int = 512, height:int = 512): + """ + Convert VAE model to IR format. + Function accepts VAE model, creates wrapper class for export only necessary for inference part, + prepares example inputs for conversion, + Parameters: + vae (torch.nn.Module): VAE PyTorch model + ir_path (Path): File for storing model + width (int, optional, 512): input width + height (int, optional, 512): input height + Returns: + None + """ + class VAEEncoderWrapper(torch.nn.Module): + def __init__(self, vae): + super().__init__() + self.vae = vae + + def forward(self, image): + latents = self.vae.encode(image).latent_dist.sample() + return latents + + if not ir_path.exists(): + vae_encoder = VAEEncoderWrapper(vae) + vae_encoder.eval() + image = torch.zeros((1, 3, width, height)) + with torch.no_grad(): + ov_model = ov.convert_model(vae_encoder, example_input=image, input=([1,3, width, height],)) + ov.save_model(ov_model, ir_path) + del ov_model + cleanup_torchscript_cache() + print('VAE encoder successfully converted to IR') + + + def convert_vae_decoder(vae: torch.nn.Module, ir_path: Path, width:int = 64, height:int = 64): + """ + Convert VAE decoder model to IR format. + Function accepts VAE model, creates wrapper class for export only necessary for inference part, + prepares example inputs for conversion, + Parameters: + vae (torch.nn.Module): VAE model + ir_path (Path): File for storing model + width (int, optional, 64): input width + height (int, optional, 64): input height + Returns: + None + """ + class VAEDecoderWrapper(torch.nn.Module): + def __init__(self, vae): + super().__init__() + self.vae = vae + + def forward(self, latents): + latents = 1 / 0.18215 * latents + return self.vae.decode(latents) + + if not ir_path.exists(): + vae_decoder = VAEDecoderWrapper(vae) + latents = torch.zeros((1, 4, width, height)) + + vae_decoder.eval() + with torch.no_grad(): + ov_model = ov.convert_model(vae_decoder, example_input=latents, input=([1, 4, width, height],)) + ov.save_model(ov_model, ir_path) + del ov_model + cleanup_torchscript_cache() + print('VAE decoder successfully converted to ') + +Do the conversion of the in-painting model: + +.. code:: ipython3 + + IMAGE_ENCODER_OV_PATH_INPAINT = sd2_inpainting_model_dir / "image_encoder.xml" + + if not IMAGE_ENCODER_OV_PATH_INPAINT.exists(): + convert_image_encoder(image_encoder, IMAGE_ENCODER_OV_PATH_INPAINT) + else: + print(f"Image encoder will be loaded from {IMAGE_ENCODER_OV_PATH_INPAINT}") + + del image_encoder + gc.collect(); + + +.. parsed-literal:: + + Image encoder will be loaded from model\paint_by_example\image_encoder.xml + + +Do the conversion of the Unet model + +.. code:: ipython3 + + UNET_OV_PATH_INPAINT = sd2_inpainting_model_dir / 'unet.xml' + if not UNET_OV_PATH_INPAINT.exists(): + convert_unet(unet_inpaint, UNET_OV_PATH_INPAINT, num_channels=9, width=64, height=64) + del unet_inpaint + gc.collect() + else: + del unet_inpaint + print(f"U-Net will be loaded from {UNET_OV_PATH_INPAINT}") + gc.collect(); + + +.. parsed-literal:: + + U-Net will be loaded from model\paint_by_example\unet.xml + + +Do the conversion of the VAE Encoder model + +.. code:: ipython3 + + VAE_ENCODER_OV_PATH_INPAINT = sd2_inpainting_model_dir / 'vae_encoder.xml' + + if not VAE_ENCODER_OV_PATH_INPAINT.exists(): + convert_vae_encoder(vae_inpaint, VAE_ENCODER_OV_PATH_INPAINT, 512, 512) + else: + print(f"VAE encoder will be loaded from {VAE_ENCODER_OV_PATH_INPAINT}") + + VAE_DECODER_OV_PATH_INPAINT = sd2_inpainting_model_dir / 'vae_decoder.xml' + if not VAE_DECODER_OV_PATH_INPAINT.exists(): + convert_vae_decoder(vae_inpaint, VAE_DECODER_OV_PATH_INPAINT, 64, 64) + else: + print(f"VAE decoder will be loaded from {VAE_DECODER_OV_PATH_INPAINT}") + + del vae_inpaint + gc.collect(); + + +.. parsed-literal:: + + VAE encoder will be loaded from model\paint_by_example\vae_encoder.xml + VAE decoder will be loaded from model\paint_by_example\vae_decoder.xml + + +Prepare Inference pipeline +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Function to prepare the mask and masked image. + +Adapted from `236 Stable Diffusion v2 Infinite Zoom +notebook <236-stable-diffusion-v2-with-output.html>`__ + +The main difference is that instead of encoding a text prompt it will +now encode an image as the prompt. + +.. code:: ipython3 + + import inspect + from typing import Optional, Union, Dict + + import PIL + import cv2 + + from transformers import CLIPImageProcessor + from diffusers.pipelines.pipeline_utils import DiffusionPipeline + from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler + from openvino.runtime import Model + + + def prepare_mask_and_masked_image(image:PIL.Image.Image, mask:PIL.Image.Image): + """ + Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be + converted to ``np.array`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the + ``image`` and ``1`` for the ``mask``. + + The ``image`` will be converted to ``np.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be + binarized (``mask > 0.5``) and cast to ``np.float32`` too. + + Args: + image (Union[np.array, PIL.Image]): The image to inpaint. + It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` + mask (_type_): The mask to apply to the image, i.e. regions to inpaint. + It can be a ``PIL.Image``, or a ``height x width`` ``np.array``. + + Returns: + tuple[np.array]: The pair (mask, masked_image) as ``torch.Tensor`` with 4 + dimensions: ``batch x channels x height x width``. + """ + if isinstance(image, (PIL.Image.Image, np.ndarray)): + image = [image] + + if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): + image = [np.array(i.convert("RGB"))[None, :] for i in image] + image = np.concatenate(image, axis=0) + elif isinstance(image, list) and isinstance(image[0], np.ndarray): + image = np.concatenate([i[None, :] for i in image], axis=0) + + image = image.transpose(0, 3, 1, 2) + image = image.astype(np.float32) / 127.5 - 1.0 + + # preprocess mask + if isinstance(mask, (PIL.Image.Image, np.ndarray)): + mask = [mask] + + if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image): + mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0) + mask = mask.astype(np.float32) / 255.0 + elif isinstance(mask, list) and isinstance(mask[0], np.ndarray): + mask = np.concatenate([m[None, None, :] for m in mask], axis=0) + + mask = 1 - mask + + mask[mask < 0.5] = 0 + mask[mask >= 0.5] = 1 + + masked_image = image * mask + + return mask, masked_image + +Class for the pipeline which will connect all the models together: VAE +decode –> image encode –> tokenizer –> Unet –> VAE model –> scheduler + +.. code:: ipython3 + + class OVStableDiffusionInpaintingPipeline(DiffusionPipeline): + def __init__( + self, + vae_decoder: Model, + image_encoder: Model, + image_processor: CLIPImageProcessor, + unet: Model, + scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], + vae_encoder: Model = None, + ): + """ + Pipeline for text-to-image generation using Stable Diffusion. + Parameters: + vae_decoder (Model): + Variational Auto-Encoder (VAE) Model to decode images to and from latent representations. + image_encoder (Model): + https://huggingface.co/Fantasy-Studio/Paint-by-Example/blob/main/image_encoder/config.json + tokenizer (CLIPTokenizer): + Tokenizer of class CLIPTokenizer(https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). + unet (Model): Conditional U-Net architecture to denoise the encoded image latents. + vae_encoder (Model): + Variational Auto-Encoder (VAE) Model to encode images to latent representation. + scheduler (SchedulerMixin): + A scheduler to be used in combination with unet to denoise the encoded image latents. Can be one of + DDIMScheduler, LMSDiscreteScheduler, or PNDMScheduler. + """ + super().__init__() + self.scheduler = scheduler + self.vae_decoder = vae_decoder + self.vae_encoder = vae_encoder + self.image_encoder = image_encoder + self.unet = unet + self._unet_output = unet.output(0) + self._vae_d_output = vae_decoder.output(0) + self._vae_e_output = vae_encoder.output(0) if vae_encoder is not None else None + self.height = self.unet.input(0).shape[2] * 8 + self.width = self.unet.input(0).shape[3] * 8 + self.image_processor = image_processor + + def prepare_mask_latents( + self, + mask, + masked_image, + height=512, + width=512, + do_classifier_free_guidance=True, + ): + """ + Prepare mask as Unet nput and encode input masked image to latent space using vae encoder + + Parameters: + mask (np.array): input mask array + masked_image (np.array): masked input image tensor + heigh (int, *optional*, 512): generated image height + width (int, *optional*, 512): generated image width + do_classifier_free_guidance (bool, *optional*, True): whether to use classifier free guidance or not + Returns: + mask (np.array): resized mask tensor + masked_image_latents (np.array): masked image encoded into latent space using VAE + """ + mask = torch.nn.functional.interpolate(torch.from_numpy(mask), size=(height // 8, width // 8)) + mask = mask.numpy() + + # encode the mask image into latents space so we can concatenate it to the latents + masked_image_latents = self.vae_encoder(masked_image)[self._vae_e_output] + masked_image_latents = 0.18215 * masked_image_latents + + mask = np.concatenate([mask] * 2) if do_classifier_free_guidance else mask + masked_image_latents = ( + np.concatenate([masked_image_latents] * 2) + if do_classifier_free_guidance + else masked_image_latents + ) + return mask, masked_image_latents + + def __call__( + self, + image: PIL.Image.Image, + mask_image: PIL.Image.Image, + reference_image: PIL.Image.Image, + num_inference_steps: Optional[int] = 50, + guidance_scale: Optional[float] = 7.5, + eta: Optional[float] = 0, + output_type: Optional[str] = "pil", + seed: Optional[int] = None, + ): + """ + Function invoked when calling the pipeline for generation. + Parameters: + image (PIL.Image.Image): + Source image for inpainting. + mask_image (PIL.Image.Image): + Mask area for inpainting + reference_image (PIL.Image.Image): + Reference image to inpaint in mask area + num_inference_steps (int, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (float, *optional*, defaults to 7.5): + Guidance scale as defined in Classifier-Free Diffusion Guidance(https://arxiv.org/abs/2207.12598). + guidance_scale is defined as `w` of equation 2. + Higher guidance scale encourages to generate images that are closely linked to the text prompt, + usually at the expense of lower image quality. + eta (float, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [DDIMScheduler], will be ignored for others. + output_type (`str`, *optional*, defaults to "pil"): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): PIL.Image.Image or np.array. + seed (int, *optional*, None): + Seed for random generator state initialization. + Returns: + Dictionary with keys: + sample - the last generated image PIL.Image.Image or np.array + """ + if seed is not None: + np.random.seed(seed) + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + do_classifier_free_guidance = guidance_scale > 1.0 + + # get reference image embeddings + image_embeddings = self._encode_image(reference_image, do_classifier_free_guidance=do_classifier_free_guidance) + + # prepare mask + mask, masked_image = prepare_mask_and_masked_image(image, mask_image) + # set timesteps + accepts_offset = "offset" in set( + inspect.signature(self.scheduler.set_timesteps).parameters.keys() + ) + extra_set_kwargs = {} + if accepts_offset: + extra_set_kwargs["offset"] = 1 + + self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) + timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, 1) + latent_timestep = timesteps[:1] + + # get the initial random noise unless the user supplied it + latents, meta = self.prepare_latents(None, latent_timestep) + mask, masked_image_latents = self.prepare_mask_latents( + mask, + masked_image, + do_classifier_free_guidance=do_classifier_free_guidance, + ) + + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + accepts_eta = "eta" in set( + inspect.signature(self.scheduler.step).parameters.keys() + ) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + for t in self.progress_bar(timesteps): + # expand the latents if we are doing classifier free guidance + latent_model_input = ( + np.concatenate([latents] * 2) + if do_classifier_free_guidance + else latents + ) + latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) + latent_model_input = np.concatenate( + [latent_model_input, masked_image_latents, mask], axis=1 + ) + # predict the noise residual + noise_pred = self.unet( + [latent_model_input, np.array(t, dtype=np.float32), image_embeddings] + )[self._unet_output] + # perform guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred[0], noise_pred[1] + noise_pred = noise_pred_uncond + guidance_scale * ( + noise_pred_text - noise_pred_uncond + ) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + torch.from_numpy(noise_pred), + t, + torch.from_numpy(latents), + **extra_step_kwargs, + )["prev_sample"].numpy() + # scale and decode the image latents with vae + image = self.vae_decoder(latents)[self._vae_d_output] + + image = self.postprocess_image(image, meta, output_type) + return {"sample": image} + + def _encode_image(self, image:PIL.Image.Image, do_classifier_free_guidance:bool = True): + """ + Encodes the image into image encoder hidden states. + + Parameters: + image (PIL.Image.Image): base image to encode + do_classifier_free_guidance (bool): whether to use classifier free guidance or not + Returns: + image_embeddings (np.ndarray): image encoder hidden states + """ + processed_image = self.image_processor(image) + processed_image = processed_image['pixel_values'][0] + processed_image = np.expand_dims(processed_image, axis=0) + + output = self.image_encoder(processed_image) + image_embeddings = output[self.image_encoder.output(0)] + negative_embeddings = output[self.image_encoder.output(1)] + + image_embeddings = np.concatenate([negative_embeddings, image_embeddings]) + + return image_embeddings + + def prepare_latents(self, image:PIL.Image.Image = None, latent_timestep:torch.Tensor = None): + """ + Function for getting initial latents for starting generation + + Parameters: + image (PIL.Image.Image, *optional*, None): + Input image for generation, if not provided randon noise will be used as starting point + latent_timestep (torch.Tensor, *optional*, None): + Predicted by scheduler initial step for image generation, required for latent image mixing with nosie + Returns: + latents (np.ndarray): + Image encoded in latent space + """ + latents_shape = (1, 4, self.height // 8, self.width // 8) + noise = np.random.randn(*latents_shape).astype(np.float32) + if image is None: + # if we use LMSDiscreteScheduler, let's make sure latents are mulitplied by sigmas + if isinstance(self.scheduler, LMSDiscreteScheduler): + noise = noise * self.scheduler.sigmas[0].numpy() + return noise, {} + input_image, meta = preprocess(image) + moments = self.vae_encoder(input_image)[self._vae_e_output] + mean, logvar = np.split(moments, 2, axis=1) + std = np.exp(logvar * 0.5) + latents = (mean + std * np.random.randn(*mean.shape)) * 0.18215 + latents = self.scheduler.add_noise(torch.from_numpy(latents), torch.from_numpy(noise), latent_timestep).numpy() + return latents, meta + + def postprocess_image(self, image:np.ndarray, meta:Dict, output_type:str = "pil"): + """ + Postprocessing for decoded image. Takes generated image decoded by VAE decoder, unpad it to initila image size (if required), + normalize and convert to [0, 255] pixels range. Optionally, convertes it from np.ndarray to PIL.Image format + + Parameters: + image (np.ndarray): + Generated image + meta (Dict): + Metadata obtained on latents preparing step, can be empty + output_type (str, *optional*, pil): + Output format for result, can be pil or numpy + Returns: + image (List of np.ndarray or PIL.Image.Image): + Postprocessed images + """ + if "padding" in meta: + pad = meta["padding"] + (_, end_h), (_, end_w) = pad[1:3] + h, w = image.shape[2:] + unpad_h = h - end_h + unpad_w = w - end_w + image = image[:, :, :unpad_h, :unpad_w] + image = np.clip(image / 2 + 0.5, 0, 1) + image = np.transpose(image, (0, 2, 3, 1)) + # 9. Convert to PIL + if output_type == "pil": + image = self.numpy_to_pil(image) + if "src_height" in meta: + orig_height, orig_width = meta["src_height"], meta["src_width"] + image = [img.resize((orig_width, orig_height), + PIL.Image.Resampling.LANCZOS) for img in image] + else: + if "src_height" in meta: + orig_height, orig_width = meta["src_height"], meta["src_width"] + image = [cv2.resize(img, (orig_width, orig_width)) + for img in image] + return image + + def get_timesteps(self, num_inference_steps:int, strength:float): + """ + Helper function for getting scheduler timesteps for generation + In case of image-to-image generation, it updates number of steps according to strength + + Parameters: + num_inference_steps (int): + number of inference steps for generation + strength (float): + value between 0.0 and 1.0, that controls the amount of noise that is added to the input image. + Values that approach 1.0 allow for lots of variations but will also produce images that are not semantically consistent with the input. + """ + # get the original timestep using init_timestep + init_timestep = min(int(num_inference_steps * strength), num_inference_steps) + + t_start = max(num_inference_steps - init_timestep, 0) + timesteps = self.scheduler.timesteps[t_start:] + + return timesteps, num_inference_steps - t_start + +Select inference device +~~~~~~~~~~~~~~~~~~~~~~~ + + + +select device from dropdown list for running inference using OpenVINO + +.. code:: ipython3 + + from openvino.runtime import Core + import ipywidgets as widgets + + core = Core() + + device = widgets.Dropdown( + options=core.available_devices + ["AUTO"], + value='AUTO', + description='Device:', + disabled=False, + ) + + device + + + + +.. parsed-literal:: + + Dropdown(description='Device:', index=2, options=('CPU', 'GPU', 'AUTO'), value='AUTO') + + + +Configure Inference Pipeline +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Configuration steps: 1. Load models on device 2. Configure tokenizer and +scheduler 3. Create instance of OvStableDiffusionInpaintingPipeline +class + +This can take a while to run. + +.. code:: ipython3 + + ov_config = {"INFERENCE_PRECISION_HINT": "f32"} if device.value != "CPU" else {} + + image_encoder_inpaint = core.compile_model(IMAGE_ENCODER_OV_PATH_INPAINT, device.value) + unet_model_inpaint = core.compile_model(UNET_OV_PATH_INPAINT, device.value) + vae_decoder_inpaint = core.compile_model(VAE_DECODER_OV_PATH_INPAINT, device.value, ov_config) + vae_encoder_inpaint = core.compile_model(VAE_ENCODER_OV_PATH_INPAINT, device.value, ov_config) + + ov_pipe_inpaint = OVStableDiffusionInpaintingPipeline( + image_processor=extractor, + image_encoder=image_encoder_inpaint, + unet=unet_model_inpaint, + vae_encoder=vae_encoder_inpaint, + vae_decoder=vae_decoder_inpaint, + scheduler=scheduler_inpaint, + ) + +.. code:: ipython3 + + # Code adapated from https://huggingface.co/spaces/Fantasy-Studio/Paint-by-Example/blob/main/app.py + + import os + import gradio as gr + + def predict(dict:gr.components.Image, reference:PIL.Image.Image, seed:int, step:int): + """ + This function runs when the 'paint' button is pressed. It takes 3 input images. Takes generated image decoded by VAE decoder, unpad it to initila image size (if required), + normalize and convert to [0, 255] pixels range. Optionally, convertes it from np.ndarray to PIL.Image format + + Parameters: + dict (Dict): + Contains two images in a dictionary + 'image' is the image that will be painted on + 'mask' is the black/white image specifying where to paint (white) and not to paint (black) + image (PIL.Image.Image): + Reference image that will be used by the model to know what to paint in the specified area + seed (int): + Used to initialize the random number generator state + step (int): + The number of denoising steps to run during inference. Low = fast/low quality, High = slow/higher quality + Returns: + image (PIL.Image.Image): + Postprocessed images + """ + width,height = dict["image"].size + + # If the image is not 512x512 then resize + if width < height: + factor = width / 512.0 + width = 512 + height = int((height / factor) / 8.0) * 8 + else: + factor = height / 512.0 + height = 512 + width = int((width / factor) / 8.0) * 8 + + init_image = dict["image"].convert("RGB").resize((width,height)) + mask = dict["mask"].convert("RGB").resize((width,height)) + + # If the image is not a 512x512 square then crop + if width > height: + buffer = (width - height) / 2 + input_image = init_image.crop((buffer, 0, width - buffer, 512)) + mask = mask.crop((buffer, 0, width - buffer, 512)) + elif width < height: + buffer = (height - width) / 2 + input_image = init_image.crop((0, buffer, 512, height - buffer)) + mask = mask.crop((0, buffer, 512, height - buffer)) + else: + input_image = init_image + + if not os.path.exists('output'): + os.mkdir('output') + input_image.save('output/init.png') + mask.save('output/mask.png') + reference.save('output/ref.png') + + mask = [mask] + + result = ov_pipe_inpaint( + image=input_image, + mask_image=mask, + reference_image=reference, + seed=seed, + num_inference_steps=step, + )["sample"][0] + + out_dir = Path("output") + out_dir.mkdir(exist_ok=True) + result.save('output/result.png') + + return result + + + example = {} + ref_dir = 'data/reference' + image_dir = 'data/image' + ref_list = [os.path.join(ref_dir,file) for file in os.listdir(ref_dir)] + ref_list.sort() + image_list = [os.path.join(image_dir,file) for file in os.listdir(image_dir)] + image_list.sort() + + + image_blocks = gr.Blocks() + with image_blocks as demo: + with gr.Group(): + with gr.Box(): + with gr.Row(): + with gr.Column(): + image = gr.Image(source='upload', tool='sketch', elem_id="image_upload", type="pil", label="Source Image") + reference = gr.Image(source='upload', elem_id="image_upload", type="pil", label="Reference Image") + + with gr.Column(): + image_out = gr.Image(label="Output", elem_id="output-img") + steps = gr.Slider(label="Steps", value=15, minimum=2, maximum=75, step=1,interactive=True) + + seed = gr.Slider(0, 10000, label='Seed (0 = random)', value=0, step=1) + + with gr.Row(elem_id="prompt-container"): + btn = gr.Button("Paint!") + + with gr.Row(): + with gr.Column(): + gr.Examples(image_list, inputs=[image],label="Examples - Source Image",examples_per_page=12) + with gr.Column(): + gr.Examples(ref_list, inputs=[reference],label="Examples - Reference Image",examples_per_page=12) + + btn.click(fn=predict, inputs=[image, reference, seed, steps], outputs=[image_out]) + + # Launching the Gradio app + try: + image_blocks.launch(debug=False, height=680) + except Exception: + image_blocks.queue().launch(share=True, debug=False, height=680) + # if you are launching remotely, specify server_name and server_port + # image_blocks.launch(server_name='your server name', server_port='server port in int') + # Read more in the docs: https://gradio.app/docs/ + + +.. parsed-literal:: + + Running on local URL: http://127.0.0.1:7860 + + To create a public link, set `share=True` in `launch()`. + + + +.. .. raw:: html + +..
+ diff --git a/docs/notebooks/272-paint-by-example-with-output_files/272-paint-by-example.png b/docs/notebooks/272-paint-by-example-with-output_files/272-paint-by-example.png new file mode 100644 index 00000000000000..6e5d7b57c37464 --- /dev/null +++ b/docs/notebooks/272-paint-by-example-with-output_files/272-paint-by-example.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5c19760d8edbb1566b823983afbb85c07337cb5d50476e7dd6fd602114de649 +size 3943872 diff --git a/docs/notebooks/302-pytorch-quantization-aware-training-with-output.rst b/docs/notebooks/302-pytorch-quantization-aware-training-with-output.rst index 1d30ba14ae86e6..4b79ee714072c6 100644 --- a/docs/notebooks/302-pytorch-quantization-aware-training-with-output.rst +++ b/docs/notebooks/302-pytorch-quantization-aware-training-with-output.rst @@ -26,25 +26,28 @@ notebook. Using the smaller model and dataset will speed up training and download time. To see other ResNet models, visit `PyTorch hub `__. - **NOTE**: This notebook requires a C++ compiler. + **NOTE**: This notebook requires a C++ compiler for compiling PyTorch + custom operations for quantization. For Windows we recommend to + install Visual Studio with C++ support, you can find instruction + `here `__. + For MacOS ``xcode-select --install`` command installs many developer + tools, including C++. For Linux you can install gcc with your + distribution’s package manager. **Table of contents:** - `Imports and Settings <#imports-and-settings>`__ -- `Pre-train Floating-Point - Model <#pre-train-floating-point-model>`__ +- `Pre-train Floating-Point Model <#pre-train-floating-point-model>`__ - `Train Function <#train-function>`__ - `Validate Function <#validate-function>`__ - `Helpers <#helpers>`__ - - `Get a Pre-trained FP32 - Model <#get-a-pre-trained-fp-model>`__ + - `Get a Pre-trained FP32 Model <#get-a-pre-trained-fp-model>`__ - `Create and Initialize Quantization <#create-and-initialize-quantization>`__ -- `Fine-tune the Compressed - Model <#fine-tune-the-compressed-model>`__ +- `Fine-tune the Compressed Model <#fine-tune-the-compressed-model>`__ - `Export INT8 Model to OpenVINO IR <#export-int-model-to-openvino-ir>`__ - `Benchmark Model Performance by Computing Inference @@ -64,8 +67,10 @@ hub `__. Note: you may need to restart the kernel to use updated packages. -Imports and Settings --------------------------------------------------------------- +Imports and Settings +-------------------- + + On Windows, add the required C++ directories to the system PATH. @@ -196,7 +201,7 @@ models will be stored. .. parsed-literal:: - PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/302-pytorch-quantization-aware-training/model/resnet18_fp32.pth') + PosixPath('/opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/302-pytorch-quantization-aware-training/model/resnet18_fp32.pth') @@ -255,8 +260,10 @@ Download Tiny ImageNet dataset Successfully downloaded and prepared dataset at: data/tiny-imagenet-200 -Pre-train Floating-Point Model ------------------------------------------------------------------------- +Pre-train Floating-Point Model +------------------------------ + + Using NNCF for model compression assumes that a pre-trained model and a training pipeline are already in use. @@ -268,8 +275,10 @@ classes from Tiny-ImageNet. Subsequently, the training and validation functions will be reused as is for quantization-aware training. -Train Function -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Train Function +~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -313,8 +322,10 @@ Train Function if i % print_frequency == 0: progress.display(i) -Validate Function -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Validate Function +~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 @@ -355,8 +366,10 @@ Validate Function print(" * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}".format(top1=top1, top5=top5)) return top1.avg -Helpers -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Helpers +~~~~~~~ + + .. code:: ipython3 @@ -418,8 +431,10 @@ Helpers res.append(correct_k.mul_(100.0 / batch_size)) return res -Get a Pre-trained FP32 Model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Get a Pre-trained FP32 Model +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + А pre-trained floating-point model is a prerequisite for quantization. It can be obtained by tuning from scratch with the code below. However, @@ -485,9 +500,9 @@ section at the top of this notebook. .. parsed-literal:: - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead. warnings.warn( - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=None`. + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=None`. warnings.warn(msg) @@ -545,8 +560,10 @@ benchmark it in comparison with the ``INT8`` model. FP32 model was exported to model/resnet18_fp32.xml. -Create and Initialize Quantization ----------------------------------------------------------------------------- +Create and Initialize Quantization +---------------------------------- + + NNCF enables compression-aware training by integrating into regular training pipelines. The framework is designed so that modifications to @@ -584,10 +601,10 @@ scenario and requires only 3 modifications. .. parsed-literal:: - 2023-10-31 00:15:43.733728: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. - 2023-10-31 00:15:43.767038: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. + 2023-12-07 00:28:30.511089: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. + 2023-12-07 00:28:30.544997: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. - 2023-10-31 00:15:44.314131: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT + 2023-12-07 00:28:31.224999: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT Evaluate the new model on the validation set after initialization of @@ -603,20 +620,22 @@ demonstrated here. .. parsed-literal:: - Test: [ 0/79] Time 0.168 (0.168) Loss 0.981 (0.981) Acc@1 78.91 (78.91) Acc@5 89.84 (89.84) - Test: [10/79] Time 0.156 (0.165) Loss 1.905 (1.623) Acc@1 46.88 (60.51) Acc@5 82.03 (84.09) - Test: [20/79] Time 0.155 (0.160) Loss 1.734 (1.692) Acc@1 63.28 (58.63) Acc@5 79.69 (83.04) - Test: [30/79] Time 0.154 (0.157) Loss 2.282 (1.781) Acc@1 50.00 (57.31) Acc@5 69.53 (81.50) - Test: [40/79] Time 0.152 (0.156) Loss 1.540 (1.825) Acc@1 62.50 (55.83) Acc@5 85.94 (80.96) - Test: [50/79] Time 0.152 (0.156) Loss 1.972 (1.820) Acc@1 57.03 (56.05) Acc@5 75.00 (80.73) - Test: [60/79] Time 0.152 (0.156) Loss 1.731 (1.846) Acc@1 57.81 (55.51) Acc@5 85.16 (80.21) - Test: [70/79] Time 0.154 (0.155) Loss 2.412 (1.872) Acc@1 47.66 (55.15) Acc@5 71.88 (79.61) + Test: [ 0/79] Time 0.167 (0.167) Loss 0.981 (0.981) Acc@1 78.91 (78.91) Acc@5 89.84 (89.84) + Test: [10/79] Time 0.149 (0.151) Loss 1.905 (1.623) Acc@1 46.88 (60.51) Acc@5 82.03 (84.09) + Test: [20/79] Time 0.150 (0.151) Loss 1.734 (1.692) Acc@1 63.28 (58.63) Acc@5 79.69 (83.04) + Test: [30/79] Time 0.149 (0.150) Loss 2.282 (1.781) Acc@1 50.00 (57.31) Acc@5 69.53 (81.50) + Test: [40/79] Time 0.148 (0.150) Loss 1.540 (1.825) Acc@1 62.50 (55.83) Acc@5 85.94 (80.96) + Test: [50/79] Time 0.147 (0.150) Loss 1.972 (1.820) Acc@1 57.03 (56.05) Acc@5 75.00 (80.73) + Test: [60/79] Time 0.148 (0.150) Loss 1.731 (1.846) Acc@1 57.81 (55.51) Acc@5 85.16 (80.21) + Test: [70/79] Time 0.164 (0.151) Loss 2.412 (1.872) Acc@1 47.66 (55.15) Acc@5 71.88 (79.61) * Acc@1 55.540 Acc@5 80.200 Accuracy of initialized INT8 model: 55.540 -Fine-tune the Compressed Model ------------------------------------------------------------------------- +Fine-tune the Compressed Model +------------------------------ + + At this step, a regular fine-tuning process is applied to further improve quantized model accuracy. Normally, several epochs of tuning are @@ -641,37 +660,39 @@ training pipeline are required. Here is a simple example. .. parsed-literal:: - Epoch:[0][ 0/782] Time 0.412 (0.412) Loss 0.740 (0.740) Acc@1 84.38 (84.38) Acc@5 96.88 (96.88) - Epoch:[0][ 50/782] Time 0.383 (0.387) Loss 0.911 (0.802) Acc@1 78.91 (80.15) Acc@5 92.97 (94.42) - Epoch:[0][100/782] Time 0.387 (0.388) Loss 0.631 (0.798) Acc@1 84.38 (80.24) Acc@5 95.31 (94.38) - Epoch:[0][150/782] Time 0.381 (0.388) Loss 0.836 (0.792) Acc@1 80.47 (80.48) Acc@5 94.53 (94.43) - Epoch:[0][200/782] Time 0.369 (0.386) Loss 0.873 (0.780) Acc@1 75.00 (80.65) Acc@5 94.53 (94.59) - Epoch:[0][250/782] Time 0.385 (0.386) Loss 0.735 (0.778) Acc@1 84.38 (80.77) Acc@5 95.31 (94.53) - Epoch:[0][300/782] Time 0.368 (0.386) Loss 0.615 (0.771) Acc@1 85.16 (80.99) Acc@5 97.66 (94.58) - Epoch:[0][350/782] Time 0.392 (0.386) Loss 0.599 (0.767) Acc@1 85.16 (81.14) Acc@5 95.31 (94.58) - Epoch:[0][400/782] Time 0.382 (0.386) Loss 0.798 (0.765) Acc@1 82.03 (81.21) Acc@5 92.97 (94.56) - Epoch:[0][450/782] Time 0.377 (0.386) Loss 0.630 (0.762) Acc@1 85.16 (81.26) Acc@5 96.88 (94.58) - Epoch:[0][500/782] Time 0.367 (0.386) Loss 0.633 (0.757) Acc@1 85.94 (81.45) Acc@5 96.88 (94.63) - Epoch:[0][550/782] Time 0.406 (0.386) Loss 0.749 (0.755) Acc@1 82.03 (81.49) Acc@5 92.97 (94.65) - Epoch:[0][600/782] Time 0.397 (0.385) Loss 0.927 (0.753) Acc@1 78.12 (81.53) Acc@5 88.28 (94.67) - Epoch:[0][650/782] Time 0.392 (0.385) Loss 0.645 (0.749) Acc@1 84.38 (81.60) Acc@5 95.31 (94.71) - Epoch:[0][700/782] Time 0.399 (0.386) Loss 0.816 (0.749) Acc@1 82.03 (81.62) Acc@5 91.41 (94.69) - Epoch:[0][750/782] Time 0.404 (0.386) Loss 0.811 (0.746) Acc@1 80.47 (81.69) Acc@5 94.53 (94.72) - Test: [ 0/79] Time 0.166 (0.166) Loss 1.092 (1.092) Acc@1 75.00 (75.00) Acc@5 86.72 (86.72) - Test: [10/79] Time 0.150 (0.140) Loss 1.917 (1.526) Acc@1 48.44 (62.64) Acc@5 78.12 (83.88) - Test: [20/79] Time 0.137 (0.138) Loss 1.631 (1.602) Acc@1 64.06 (60.68) Acc@5 81.25 (83.71) - Test: [30/79] Time 0.136 (0.138) Loss 2.037 (1.691) Acc@1 57.81 (59.25) Acc@5 71.09 (82.23) - Test: [40/79] Time 0.134 (0.137) Loss 1.563 (1.743) Acc@1 64.84 (58.02) Acc@5 82.81 (81.33) - Test: [50/79] Time 0.137 (0.137) Loss 1.926 (1.750) Acc@1 52.34 (57.77) Acc@5 76.56 (81.04) - Test: [60/79] Time 0.137 (0.137) Loss 1.559 (1.781) Acc@1 67.19 (57.24) Acc@5 84.38 (80.58) - Test: [70/79] Time 0.137 (0.137) Loss 2.353 (1.806) Acc@1 46.88 (56.81) Acc@5 72.66 (80.08) + Epoch:[0][ 0/782] Time 0.407 (0.407) Loss 0.740 (0.740) Acc@1 84.38 (84.38) Acc@5 96.88 (96.88) + Epoch:[0][ 50/782] Time 0.383 (0.393) Loss 0.911 (0.802) Acc@1 78.91 (80.15) Acc@5 92.97 (94.42) + Epoch:[0][100/782] Time 0.406 (0.393) Loss 0.631 (0.798) Acc@1 84.38 (80.24) Acc@5 95.31 (94.38) + Epoch:[0][150/782] Time 0.408 (0.392) Loss 0.836 (0.792) Acc@1 80.47 (80.48) Acc@5 94.53 (94.43) + Epoch:[0][200/782] Time 0.413 (0.395) Loss 0.873 (0.780) Acc@1 75.00 (80.65) Acc@5 94.53 (94.59) + Epoch:[0][250/782] Time 0.384 (0.395) Loss 0.735 (0.778) Acc@1 84.38 (80.77) Acc@5 95.31 (94.53) + Epoch:[0][300/782] Time 0.372 (0.395) Loss 0.615 (0.771) Acc@1 85.16 (80.99) Acc@5 97.66 (94.58) + Epoch:[0][350/782] Time 0.398 (0.394) Loss 0.599 (0.767) Acc@1 85.16 (81.14) Acc@5 95.31 (94.58) + Epoch:[0][400/782] Time 0.386 (0.394) Loss 0.798 (0.765) Acc@1 82.03 (81.21) Acc@5 92.97 (94.56) + Epoch:[0][450/782] Time 0.397 (0.394) Loss 0.630 (0.762) Acc@1 85.16 (81.26) Acc@5 96.88 (94.58) + Epoch:[0][500/782] Time 0.368 (0.393) Loss 0.633 (0.757) Acc@1 85.94 (81.45) Acc@5 96.88 (94.63) + Epoch:[0][550/782] Time 0.416 (0.393) Loss 0.749 (0.755) Acc@1 82.03 (81.49) Acc@5 92.97 (94.65) + Epoch:[0][600/782] Time 0.388 (0.393) Loss 0.927 (0.753) Acc@1 78.12 (81.53) Acc@5 88.28 (94.67) + Epoch:[0][650/782] Time 0.424 (0.392) Loss 0.645 (0.749) Acc@1 84.38 (81.60) Acc@5 95.31 (94.71) + Epoch:[0][700/782] Time 0.376 (0.392) Loss 0.816 (0.749) Acc@1 82.03 (81.62) Acc@5 91.41 (94.69) + Epoch:[0][750/782] Time 0.362 (0.392) Loss 0.811 (0.746) Acc@1 80.47 (81.69) Acc@5 94.53 (94.72) + Test: [ 0/79] Time 0.157 (0.157) Loss 1.092 (1.092) Acc@1 75.00 (75.00) Acc@5 86.72 (86.72) + Test: [10/79] Time 0.139 (0.141) Loss 1.917 (1.526) Acc@1 48.44 (62.64) Acc@5 78.12 (83.88) + Test: [20/79] Time 0.135 (0.140) Loss 1.631 (1.602) Acc@1 64.06 (60.68) Acc@5 81.25 (83.71) + Test: [30/79] Time 0.133 (0.138) Loss 2.037 (1.691) Acc@1 57.81 (59.25) Acc@5 71.09 (82.23) + Test: [40/79] Time 0.138 (0.138) Loss 1.563 (1.743) Acc@1 64.84 (58.02) Acc@5 82.81 (81.33) + Test: [50/79] Time 0.136 (0.138) Loss 1.926 (1.750) Acc@1 52.34 (57.77) Acc@5 76.56 (81.04) + Test: [60/79] Time 0.139 (0.137) Loss 1.559 (1.781) Acc@1 67.19 (57.24) Acc@5 84.38 (80.58) + Test: [70/79] Time 0.140 (0.138) Loss 2.353 (1.806) Acc@1 46.88 (56.81) Acc@5 72.66 (80.08) * Acc@1 57.320 Acc@5 80.730 Accuracy of tuned INT8 model: 57.320 Accuracy drop of tuned INT8 model over pre-trained FP32 model: -1.800 -Export INT8 Model to OpenVINO IR --------------------------------------------------------------------------- +Export INT8 Model to OpenVINO IR +-------------------------------- + + .. code:: ipython3 @@ -690,8 +711,10 @@ Export INT8 Model to OpenVINO IR INT8 Omodel exported to model/resnet18_int8.xml. -Benchmark Model Performance by Computing Inference Time -------------------------------------------------------------------------------------------------- +Benchmark Model Performance by Computing Inference Time +------------------------------------------------------- + + Finally, measure the inference performance of the ``FP32`` and ``INT8`` models, using `Benchmark @@ -729,9 +752,9 @@ throughput (frames per second) values. .. parsed-literal:: Benchmark FP32 model (IR) - [ INFO ] Throughput: 2952.65 FPS + [ INFO ] Throughput: 2930.00 FPS Benchmark INT8 model (IR) - [ INFO ] Throughput: 11986.34 FPS + [ INFO ] Throughput: 11839.41 FPS Show CPU Information for reference. diff --git a/docs/notebooks/406-3D-pose-estimation-with-output.rst b/docs/notebooks/406-3D-pose-estimation-with-output.rst index 851fbbbc926006..c55447a9e76387 100644 --- a/docs/notebooks/406-3D-pose-estimation-with-output.rst +++ b/docs/notebooks/406-3D-pose-estimation-with-output.rst @@ -8,9 +8,11 @@ from `Open Model Zoo `__. At the end of this notebook, you will see live inference results from your webcam (if available). Alternatively, you can also upload a video file to test -out the algorithms. **Make sure you have properly installed the** -`Jupyter extension `__ -**and been using JupyterLab to run the demo as suggested in the README.md** +out the algorithms. **Make sure you have properly installed +the**\ `Jupyter +extension `__\ **and +been using JupyterLab to run the demo as suggested in the +``README.md``** **NOTE**: *To use a webcam, you must run this Jupyter notebook on a computer with a webcam. If you run on a remote server, the webcam @@ -49,10 +51,12 @@ Windows: Chrome* *macOS: Safari* - `Run Pose Estimation on a Video File <#run-pose-estimation-on-a-video-file>`__ -Prerequisites -------------------------------------------------------- +Prerequisites +------------- -**The "pythreejs" extension may not display properly when using the + + +**The ``pythreejs`` extension may not display properly when using the latest Jupyter Notebook release (2.4.1). Therefore, it is recommended to use Jupyter Lab instead.** @@ -65,61 +69,75 @@ use Jupyter Lab instead.** Collecting pythreejs Using cached pythreejs-2.4.2-py3-none-any.whl (3.4 MB) - Requirement already satisfied: openvino-dev==2023.1.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (2023.1.0) - Requirement already satisfied: addict>=2.4.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (2.4.0) - Requirement already satisfied: defusedxml>=0.7.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (0.7.1) - Requirement already satisfied: jstyleson>=0.0.2 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (0.0.2) - Requirement already satisfied: networkx<=3.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (2.8.2) - Requirement already satisfied: numpy>=1.16.6 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (1.24.3) - Requirement already satisfied: opencv-python in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (4.8.1.78) - Requirement already satisfied: openvino-telemetry>=2022.1.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (2023.2.1) - Requirement already satisfied: pillow>=8.1.2 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (10.0.1) - Requirement already satisfied: pyyaml>=5.4.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (6.0.1) - Requirement already satisfied: requests>=2.25.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (2.31.0) - Requirement already satisfied: texttable>=1.6.3 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (1.7.0) - Requirement already satisfied: tqdm>=4.54.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (4.66.1) - Requirement already satisfied: openvino==2023.1.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (2023.1.0) - Requirement already satisfied: scipy<1.11,>=1.8 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (1.10.1) - Requirement already satisfied: ipywidgets>=7.2.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from pythreejs) (8.1.1) + Collecting openvino-dev==2023.1.0 + Using cached openvino_dev-2023.1.0-12185-py3-none-any.whl.metadata (17 kB) + Requirement already satisfied: addict>=2.4.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (2.4.0) + Requirement already satisfied: defusedxml>=0.7.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (0.7.1) + Requirement already satisfied: jstyleson>=0.0.2 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (0.0.2) + Requirement already satisfied: networkx<=3.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (2.8.2) + Requirement already satisfied: numpy>=1.16.6 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (1.23.5) + Requirement already satisfied: opencv-python in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (4.8.1.78) + Requirement already satisfied: openvino-telemetry>=2022.1.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (2023.2.1) + Requirement already satisfied: pillow>=8.1.2 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (10.1.0) + Requirement already satisfied: pyyaml>=5.4.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (6.0.1) + Requirement already satisfied: requests>=2.25.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (2.31.0) + Requirement already satisfied: texttable>=1.6.3 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (1.7.0) + Requirement already satisfied: tqdm>=4.54.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (4.66.1) + Collecting openvino==2023.1.0 (from openvino-dev==2023.1.0) + Using cached openvino-2023.1.0-12185-cp38-cp38-manylinux2014_x86_64.whl.metadata (6.3 kB) + Requirement already satisfied: scipy<1.11,>=1.8 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from openvino-dev==2023.1.0) (1.10.1) + Requirement already satisfied: ipywidgets>=7.2.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from pythreejs) (8.1.1) Collecting ipydatawidgets>=1.1.1 (from pythreejs) Using cached ipydatawidgets-4.3.5-py2.py3-none-any.whl.metadata (1.4 kB) - Requirement already satisfied: traitlets in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from pythreejs) (5.13.0) + Requirement already satisfied: traitlets in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from pythreejs) (5.14.0) Collecting traittypes>=0.2.0 (from ipydatawidgets>=1.1.1->pythreejs) Using cached traittypes-0.2.1-py2.py3-none-any.whl (8.6 kB) - Requirement already satisfied: comm>=0.1.3 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipywidgets>=7.2.1->pythreejs) (0.1.4) - Requirement already satisfied: ipython>=6.1.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipywidgets>=7.2.1->pythreejs) (8.12.3) - Requirement already satisfied: widgetsnbextension~=4.0.9 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipywidgets>=7.2.1->pythreejs) (4.0.9) - Requirement already satisfied: jupyterlab-widgets~=3.0.9 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipywidgets>=7.2.1->pythreejs) (3.0.9) - Requirement already satisfied: charset-normalizer<4,>=2 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests>=2.25.1->openvino-dev==2023.1.0) (3.3.1) - Requirement already satisfied: idna<4,>=2.5 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests>=2.25.1->openvino-dev==2023.1.0) (3.4) - Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests>=2.25.1->openvino-dev==2023.1.0) (2.0.7) - Requirement already satisfied: certifi>=2017.4.17 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests>=2.25.1->openvino-dev==2023.1.0) (2023.7.22) - Requirement already satisfied: backcall in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (0.2.0) - Requirement already satisfied: decorator in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (5.1.1) - Requirement already satisfied: jedi>=0.16 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (0.19.1) - Requirement already satisfied: matplotlib-inline in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (0.1.6) - Requirement already satisfied: pickleshare in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (0.7.5) - Requirement already satisfied: prompt-toolkit!=3.0.37,<3.1.0,>=3.0.30 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (3.0.39) - Requirement already satisfied: pygments>=2.4.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (2.16.1) - Requirement already satisfied: stack-data in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (0.6.3) - Requirement already satisfied: typing-extensions in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (4.5.0) - Requirement already satisfied: pexpect>4.3 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (4.8.0) - Requirement already satisfied: parso<0.9.0,>=0.8.3 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from jedi>=0.16->ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (0.8.3) - Requirement already satisfied: ptyprocess>=0.5 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from pexpect>4.3->ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (0.7.0) - Requirement already satisfied: wcwidth in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from prompt-toolkit!=3.0.37,<3.1.0,>=3.0.30->ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (0.2.9) - Requirement already satisfied: executing>=1.2.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from stack-data->ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (2.0.1) - Requirement already satisfied: asttokens>=2.1.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from stack-data->ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (2.4.1) - Requirement already satisfied: pure-eval in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from stack-data->ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (0.2.2) - Requirement already satisfied: six>=1.12.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from asttokens>=2.1.0->stack-data->ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (1.16.0) + Requirement already satisfied: comm>=0.1.3 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipywidgets>=7.2.1->pythreejs) (0.2.0) + Requirement already satisfied: ipython>=6.1.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipywidgets>=7.2.1->pythreejs) (8.12.3) + Requirement already satisfied: widgetsnbextension~=4.0.9 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipywidgets>=7.2.1->pythreejs) (4.0.9) + Requirement already satisfied: jupyterlab-widgets~=3.0.9 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipywidgets>=7.2.1->pythreejs) (3.0.9) + Requirement already satisfied: charset-normalizer<4,>=2 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests>=2.25.1->openvino-dev==2023.1.0) (3.3.2) + Requirement already satisfied: idna<4,>=2.5 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests>=2.25.1->openvino-dev==2023.1.0) (3.6) + Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests>=2.25.1->openvino-dev==2023.1.0) (2.1.0) + Requirement already satisfied: certifi>=2017.4.17 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from requests>=2.25.1->openvino-dev==2023.1.0) (2023.11.17) + Requirement already satisfied: backcall in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (0.2.0) + Requirement already satisfied: decorator in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (5.1.1) + Requirement already satisfied: jedi>=0.16 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (0.19.1) + Requirement already satisfied: matplotlib-inline in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (0.1.6) + Requirement already satisfied: pickleshare in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (0.7.5) + Requirement already satisfied: prompt-toolkit!=3.0.37,<3.1.0,>=3.0.30 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (3.0.41) + Requirement already satisfied: pygments>=2.4.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (2.17.2) + Requirement already satisfied: stack-data in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (0.6.3) + Requirement already satisfied: typing-extensions in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (4.8.0) + Requirement already satisfied: pexpect>4.3 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (4.9.0) + Requirement already satisfied: parso<0.9.0,>=0.8.3 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from jedi>=0.16->ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (0.8.3) + Requirement already satisfied: ptyprocess>=0.5 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from pexpect>4.3->ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (0.7.0) + Requirement already satisfied: wcwidth in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from prompt-toolkit!=3.0.37,<3.1.0,>=3.0.30->ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (0.2.12) + Requirement already satisfied: executing>=1.2.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from stack-data->ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (2.0.1) + Requirement already satisfied: asttokens>=2.1.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from stack-data->ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (2.4.1) + Requirement already satisfied: pure-eval in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from stack-data->ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (0.2.2) + Requirement already satisfied: six>=1.12.0 in /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages (from asttokens>=2.1.0->stack-data->ipython>=6.1.0->ipywidgets>=7.2.1->pythreejs) (1.16.0) + Using cached openvino_dev-2023.1.0-12185-py3-none-any.whl (5.8 MB) + Using cached openvino-2023.1.0-12185-cp38-cp38-manylinux2014_x86_64.whl (35.2 MB) Using cached ipydatawidgets-4.3.5-py2.py3-none-any.whl (271 kB) DEPRECATION: pytorch-lightning 1.6.5 has a non-standard dependency specifier torch>=1.8.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063 - Installing collected packages: traittypes, ipydatawidgets, pythreejs - Successfully installed ipydatawidgets-4.3.5 pythreejs-2.4.2 traittypes-0.2.1 + Installing collected packages: traittypes, openvino, openvino-dev, ipydatawidgets, pythreejs + Attempting uninstall: openvino + Found existing installation: openvino 2023.2.0 + Uninstalling openvino-2023.2.0: + Successfully uninstalled openvino-2023.2.0 + Attempting uninstall: openvino-dev + Found existing installation: openvino-dev 2023.2.0 + Uninstalling openvino-dev-2023.2.0: + Successfully uninstalled openvino-dev-2023.2.0 + Successfully installed ipydatawidgets-4.3.5 openvino-2023.1.0 openvino-dev-2023.1.0 pythreejs-2.4.2 traittypes-0.2.1 Note: you may need to restart the kernel to use updated packages. -Imports -------------------------------------------------- +Imports +------- + + .. code:: ipython3 @@ -141,11 +159,15 @@ Imports import engine.engine3js as engine from engine.parse_poses import parse_poses -The model ---------------------------------------------------- +The model +--------- + + + +Download the model +~~~~~~~~~~~~~~~~~~ + -Download the model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We use ``omz_downloader``, which is a command line tool from the ``openvino-dev`` package. ``omz_downloader`` automatically creates a @@ -186,8 +208,10 @@ directory structure and downloads the selected model. -Convert Model to OpenVINO IR format -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Convert Model to OpenVINO IR format +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + The selected model comes from the public directory, which means it must be converted into OpenVINO Intermediate Representation (OpenVINO IR). We @@ -210,23 +234,25 @@ IR format. .. parsed-literal:: ========== Converting human-pose-estimation-3d-0001 to ONNX - Conversion to ONNX command: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/bin/python -- /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/openvino/model_zoo/internal_scripts/pytorch_to_onnx.py --model-path=model/public/human-pose-estimation-3d-0001 --model-name=PoseEstimationWithMobileNet --model-param=is_convertible_by_mo=True --import-module=model --weights=model/public/human-pose-estimation-3d-0001/human-pose-estimation-3d-0001.pth --input-shape=1,3,256,448 --input-names=data --output-names=features,heatmaps,pafs --output-file=model/public/human-pose-estimation-3d-0001/human-pose-estimation-3d-0001.onnx + Conversion to ONNX command: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/bin/python -- /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/lib/python3.8/site-packages/openvino/model_zoo/internal_scripts/pytorch_to_onnx.py --model-path=model/public/human-pose-estimation-3d-0001 --model-name=PoseEstimationWithMobileNet --model-param=is_convertible_by_mo=True --import-module=model --weights=model/public/human-pose-estimation-3d-0001/human-pose-estimation-3d-0001.pth --input-shape=1,3,256,448 --input-names=data --output-names=features,heatmaps,pafs --output-file=model/public/human-pose-estimation-3d-0001/human-pose-estimation-3d-0001.onnx ONNX check passed successfully. ========== Converting human-pose-estimation-3d-0001 to IR (FP32) - Conversion command: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/bin/python -- /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/.venv/bin/mo --framework=onnx --output_dir=model/public/human-pose-estimation-3d-0001/FP32 --model_name=human-pose-estimation-3d-0001 --input=data '--mean_values=data[128.0,128.0,128.0]' '--scale_values=data[255.0,255.0,255.0]' --output=features,heatmaps,pafs --input_model=model/public/human-pose-estimation-3d-0001/human-pose-estimation-3d-0001.onnx '--layout=data(NCHW)' '--input_shape=[1, 3, 256, 448]' --compress_to_fp16=False + Conversion command: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/bin/python -- /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/.venv/bin/mo --framework=onnx --output_dir=model/public/human-pose-estimation-3d-0001/FP32 --model_name=human-pose-estimation-3d-0001 --input=data '--mean_values=data[128.0,128.0,128.0]' '--scale_values=data[255.0,255.0,255.0]' --output=features,heatmaps,pafs --input_model=model/public/human-pose-estimation-3d-0001/human-pose-estimation-3d-0001.onnx '--layout=data(NCHW)' '--input_shape=[1, 3, 256, 448]' --compress_to_fp16=False [ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11. Find more information about API v2.0 and IR v11 at https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html [ SUCCESS ] Generated IR version 11 model. - [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/406-3D-pose-estimation-webcam/model/public/human-pose-estimation-3d-0001/FP32/human-pose-estimation-3d-0001.xml - [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/406-3D-pose-estimation-webcam/model/public/human-pose-estimation-3d-0001/FP32/human-pose-estimation-3d-0001.bin + [ SUCCESS ] XML file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/406-3D-pose-estimation-webcam/model/public/human-pose-estimation-3d-0001/FP32/human-pose-estimation-3d-0001.xml + [ SUCCESS ] BIN file: /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/406-3D-pose-estimation-webcam/model/public/human-pose-estimation-3d-0001/FP32/human-pose-estimation-3d-0001.bin -Select inference device -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Select inference device +~~~~~~~~~~~~~~~~~~~~~~~ + + select device from dropdown list for running inference using OpenVINO @@ -252,8 +278,10 @@ select device from dropdown list for running inference using OpenVINO -Load the model -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Load the model +~~~~~~~~~~~~~~ + + Converted models are located in a fixed structure, which indicates vendor, model name and precision. @@ -294,11 +322,15 @@ heat maps, PAF (part affinity fields) and features. -Processing ----------------------------------------------------- +Processing +---------- + + + +Model Inference +~~~~~~~~~~~~~~~ + -Model Inference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Frames captured from video files or the live webcam are used as the input for the 3D model. This is how you obtain the output heat maps, PAF @@ -335,8 +367,10 @@ input for the 3D model. This is how you obtain the output heat maps, PAF return results -Draw 2D Pose Overlays -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Draw 2D Pose Overlays +~~~~~~~~~~~~~~~~~~~~~ + + We need to define some connections between the joints in advance, so that we can draw the structure of the human body in the resulting image @@ -417,8 +451,10 @@ from Open Model Zoo. return frame -Main Processing Function -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Main Processing Function +~~~~~~~~~~~~~~~~~~~~~~~~ + + Run 3D pose estimation on the specified source. It could be either a webcam feed or a video file. @@ -581,11 +617,15 @@ webcam feed or a video file. if skeleton_set: engine3D.scene_remove(skeleton_set) -Run ---------------------------------------------- +Run +--- + + + +Run Live Pose Estimation +~~~~~~~~~~~~~~~~~~~~~~~~ + -Run Live Pose Estimation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Run, using a webcam as the video input. By default, the primary webcam is set with ``source=0``. If you have multiple webcams, each one will be @@ -610,8 +650,10 @@ picture on the left to interact. run_pose_estimation(source=0, flip=True, use_popup=False) -Run Pose Estimation on a Video File -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Run Pose Estimation on a Video File +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + If you do not have a webcam, you can still run this demo with a video file. Any `format supported by diff --git a/docs/notebooks/all_notebooks_paths.txt b/docs/notebooks/all_notebooks_paths.txt index f5932dc140a303..030465414f91ef 100644 --- a/docs/notebooks/all_notebooks_paths.txt +++ b/docs/notebooks/all_notebooks_paths.txt @@ -95,6 +95,7 @@ notebooks/244-named-entity-recognition/244-named-entity-recognition.ipynb notebooks/245-typo-detector/245-typo-detector.ipynb notebooks/246-depth-estimation-videpth/246-depth-estimation-videpth.ipynb notebooks/247-code-language-id/247-code-language-id.ipynb +notebooks/248-stable-diffusion-xl/248-ssd-b1.ipynb notebooks/248-stable-diffusion-xl/248-stable-diffusion-xl.ipynb notebooks/249-oneformer-segmentation/249-oneformer-segmentation.ipynb notebooks/250-music-generation/250-music-generation.ipynb @@ -111,12 +112,16 @@ notebooks/260-pix2struct-docvqa/260-pix2struct-docvqa.ipynb notebooks/261-fast-segment-anything/261-fast-segment-anything.ipynb notebooks/262-softvc-voice-conversion/262-softvc-voice-conversion.ipynb notebooks/263-latent-consistency-models-image-generation/263-latent-consistency-models-image-generation.ipynb +notebooks/263-latent-consistency-models-image-generation/263-lcm-lora-controlnet.ipynb notebooks/264-qrcode-monster/264-qrcode-monster.ipynb notebooks/265-wuerstchen-image-generation/265-wuerstchen-image-generation.ipynb notebooks/266-speculative-sampling/266-speculative-sampling.ipynb notebooks/267-distil-whisper-asr/267-distil-whisper-asr.ipynb notebooks/268-table-question-answering/268-table-question-answering.ipynb notebooks/269-film-slowmo/269-film-slowmo.ipynb +notebooks/270-sound-generation-audioldm2/270-sound-generation-audioldm2.ipynb +notebooks/271-sdxl-turbo/271-sdxl-turbo.ipynb +notebooks/272-paint-by-example/272-paint-by-example.ipynb notebooks/301-tensorflow-training-openvino/301-tensorflow-training-openvino.ipynb notebooks/301-tensorflow-training-openvino/301-tensorflow-training-openvino-nncf.ipynb notebooks/302-pytorch-quantization-aware-training/302-pytorch-quantization-aware-training.ipynb diff --git a/docs/notebooks/index.html b/docs/notebooks/index.html index 9ee8ff8112511e..7e753ff14f60f2 100644 --- a/docs/notebooks/index.html +++ b/docs/notebooks/index.html @@ -1,207 +1,229 @@ -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/


../
-001-hello-world-with-output_files/                 31-Oct-2023 00:35                   -
-003-hello-segmentation-with-output_files/          31-Oct-2023 00:35                   -
-004-hello-detection-with-output_files/             31-Oct-2023 00:35                   -
-101-tensorflow-classification-to-openvino-with-..> 31-Oct-2023 00:35                   -
-102-pytorch-onnx-to-openvino-with-output_files/    31-Oct-2023 00:35                   -
-102-pytorch-to-openvino-with-output_files/         31-Oct-2023 00:35                   -
-103-paddle-to-openvino-classification-with-outp..> 31-Oct-2023 00:35                   -
-106-auto-device-with-output_files/                 31-Oct-2023 00:35                   -
-109-latency-tricks-with-output_files/              31-Oct-2023 00:35                   -
-109-throughput-tricks-with-output_files/           31-Oct-2023 00:35                   -
-110-ct-scan-live-inference-with-output_files/      31-Oct-2023 00:35                   -
-110-ct-segmentation-quantize-nncf-with-output_f..> 31-Oct-2023 00:35                   -
-111-yolov5-quantization-migration-with-output_f..> 31-Oct-2023 00:35                   -
-113-image-classification-quantization-with-outp..> 31-Oct-2023 00:35                   -
-115-async-api-with-output_files/                   31-Oct-2023 00:35                   -
-117-model-server-with-output_files/                31-Oct-2023 00:35                   -
-118-optimize-preprocessing-with-output_files/      31-Oct-2023 00:35                   -
-119-tflite-to-openvino-with-output_files/          31-Oct-2023 00:35                   -
-120-tensorflow-instance-segmentation-to-openvin..> 31-Oct-2023 00:35                   -
-120-tensorflow-object-detection-to-openvino-wit..> 31-Oct-2023 00:35                   -
-123-detectron2-to-openvino-with-output_files/      31-Oct-2023 00:35                   -
-125-convnext-classification-with-output_files/     31-Oct-2023 00:35                   -
-126-tensorflow-hub-with-output_files/              31-Oct-2023 00:35                   -
-201-vision-monodepth-with-output_files/            31-Oct-2023 00:35                   -
-202-vision-superresolution-image-with-output_files 31-Oct-2023 00:35                   -
-203-meter-reader-with-output_files/                31-Oct-2023 00:35                   -
-204-segmenter-semantic-segmentation-with-output..> 31-Oct-2023 00:35                   -
-205-vision-background-removal-with-output_files/   31-Oct-2023 00:35                   -
-206-vision-paddlegan-anime-with-output_files/      31-Oct-2023 00:35                   -
-207-vision-paddlegan-superresolution-with-outpu..> 31-Oct-2023 00:35                   -
-208-optical-character-recognition-with-output_f..> 31-Oct-2023 00:35                   -
-209-handwritten-ocr-with-output_files/             31-Oct-2023 00:35                   -
-211-speech-to-text-with-output_files/              31-Oct-2023 00:35                   -
-212-pyannote-speaker-diarization-with-output_files 31-Oct-2023 00:35                   -
-215-image-inpainting-with-output_files/            31-Oct-2023 00:35                   -
-217-vision-deblur-with-output_files/               31-Oct-2023 00:35                   -
-218-vehicle-detection-and-recognition-with-outp..> 31-Oct-2023 00:35                   -
-220-cross-lingual-books-alignment-with-output_f..> 31-Oct-2023 00:35                   -
-222-vision-image-colorization-with-output_files/   31-Oct-2023 00:35                   -
-224-3D-segmentation-point-clouds-with-output_files 31-Oct-2023 00:35                   -
-225-stable-diffusion-text-to-image-with-output_..> 31-Oct-2023 00:35                   -
-226-yolov7-optimization-with-output_files/         31-Oct-2023 00:35                   -
-228-clip-zero-shot-convert-with-output_files/      31-Oct-2023 00:35                   -
-228-clip-zero-shot-quantize-with-output_files/     31-Oct-2023 00:35                   -
-230-yolov8-instance-segmentation-with-output_files 31-Oct-2023 00:35                   -
-230-yolov8-keypoint-detection-with-output_files/   31-Oct-2023 00:35                   -
-230-yolov8-object-detection-with-output_files/     31-Oct-2023 00:35                   -
-231-instruct-pix2pix-image-editing-with-output_..> 31-Oct-2023 00:35                   -
-232-clip-language-saliency-map-with-output_files/  31-Oct-2023 00:35                   -
-233-blip-convert-with-output_files/                31-Oct-2023 00:35                   -
-233-blip-optimize-with-output_files/               31-Oct-2023 00:35                   -
-234-encodec-audio-compression-with-output_files/   31-Oct-2023 00:35                   -
-235-controlnet-stable-diffusion-with-output_files/ 31-Oct-2023 00:35                   -
-236-stable-diffusion-v2-optimum-demo-comparison..> 31-Oct-2023 00:35                   -
-236-stable-diffusion-v2-optimum-demo-with-outpu..> 31-Oct-2023 00:35                   -
-236-stable-diffusion-v2-text-to-image-demo-with..> 31-Oct-2023 00:35                   -
-237-segment-anything-with-output_files/            31-Oct-2023 00:35                   -
-238-deep-floyd-if-optimize-with-output_files/      31-Oct-2023 00:35                   -
-239-image-bind-convert-with-output_files/          31-Oct-2023 00:35                   -
-241-riffusion-text-to-music-with-output_files/     31-Oct-2023 00:35                   -
-243-tflite-selfie-segmentation-with-output_files/  31-Oct-2023 00:35                   -
-246-depth-estimation-videpth-with-output_files/    31-Oct-2023 00:35                   -
-248-stable-diffusion-xl-with-output_files/         31-Oct-2023 00:35                   -
-249-oneformer-segmentation-with-output_files/      31-Oct-2023 00:35                   -
-251-tiny-sd-image-generation-with-output_files/    31-Oct-2023 00:35                   -
-257-llava-multimodal-chatbot-with-output_files/    31-Oct-2023 00:35                   -
-258-blip-diffusion-subject-generation-with-outp..> 31-Oct-2023 00:35                   -
-259-decidiffusion-image-generation-with-output_..> 31-Oct-2023 00:35                   -
-260-pix2struct-docvqa-with-output_files/           31-Oct-2023 00:35                   -
-261-fast-segment-anything-with-output_files/       31-Oct-2023 00:35                   -
-263-latent-consistency-models-image-generation-..> 31-Oct-2023 00:35                   -
-301-tensorflow-training-openvino-nncf-with-outp..> 31-Oct-2023 00:35                   -
-301-tensorflow-training-openvino-with-output_files 31-Oct-2023 00:35                   -
-305-tensorflow-quantization-aware-training-with..> 31-Oct-2023 00:35                   -
-401-object-detection-with-output_files/            31-Oct-2023 00:35                   -
-402-pose-estimation-with-output_files/             31-Oct-2023 00:35                   -
-403-action-recognition-webcam-with-output_files/   31-Oct-2023 00:35                   -
-404-style-transfer-with-output_files/              31-Oct-2023 00:35                   -
-405-paddle-ocr-webcam-with-output_files/           31-Oct-2023 00:35                   -
-407-person-tracking-with-output_files/             31-Oct-2023 00:35                   -
-notebook_utils-with-output_files/                  31-Oct-2023 00:35                   -
-001-hello-world-with-output.rst                    31-Oct-2023 00:35                5528
-002-openvino-api-with-output.rst                   31-Oct-2023 00:35               31911
-003-hello-segmentation-with-output.rst             31-Oct-2023 00:35                7250
-004-hello-detection-with-output.rst                31-Oct-2023 00:35                8354
-101-tensorflow-classification-to-openvino-with-..> 31-Oct-2023 00:35               10299
-102-pytorch-onnx-to-openvino-with-output.rst       31-Oct-2023 00:35               20461
-102-pytorch-to-openvino-with-output.rst            31-Oct-2023 00:35               25916
-103-paddle-to-openvino-classification-with-outp..> 31-Oct-2023 00:35               17486
-104-model-tools-with-output.rst                    31-Oct-2023 00:35               21502
-105-language-quantize-bert-with-output.rst         31-Oct-2023 00:35               20417
-106-auto-device-with-output.rst                    31-Oct-2023 00:35               24736
-107-speech-recognition-quantization-data2vec-wi..> 31-Oct-2023 00:35              970503
-108-gpu-device-with-output.rst                     31-Oct-2023 00:35               56803
-109-latency-tricks-with-output.rst                 31-Oct-2023 00:35               24713
-109-throughput-tricks-with-output.rst              31-Oct-2023 00:35               26977
-110-ct-scan-live-inference-with-output.rst         31-Oct-2023 00:35               18376
-110-ct-segmentation-quantize-nncf-with-output.rst  31-Oct-2023 00:35               38113
-111-yolov5-quantization-migration-with-output.rst  31-Oct-2023 00:35               50833
-112-pytorch-post-training-quantization-nncf-wit..> 31-Oct-2023 00:35               29881
-113-image-classification-quantization-with-outp..> 31-Oct-2023 00:35               22860
-115-async-api-with-output.rst                      31-Oct-2023 00:35               20748
-116-sparsity-optimization-with-output.rst          31-Oct-2023 00:35               18408
-117-model-server-with-output.rst                   31-Oct-2023 00:35               22391
-118-optimize-preprocessing-with-output.rst         31-Oct-2023 00:35               21598
-119-tflite-to-openvino-with-output.rst             31-Oct-2023 00:35               11796
-120-tensorflow-instance-segmentation-to-openvin..> 31-Oct-2023 00:35               25913
-120-tensorflow-object-detection-to-openvino-wit..> 31-Oct-2023 00:35               28132
-121-convert-to-openvino-with-output.rst            31-Oct-2023 00:35               90175
-122-speech-recognition-quantization-wav2vec2-wi..> 31-Oct-2023 00:35              482510
-122-yolov8-quantization-with-accuracy-control-w..> 31-Oct-2023 00:35               21676
-123-detectron2-to-openvino-with-output.rst         31-Oct-2023 00:35               16703
-124-hugging-face-hub-with-output.rst               31-Oct-2023 00:35               16482
-125-convnext-classification-with-output.rst        31-Oct-2023 00:35               10593
-126-tensorflow-hub-with-output.rst                 31-Oct-2023 00:35               15512
-201-vision-monodepth-with-output.rst               31-Oct-2023 00:35              969651
-202-vision-superresolution-image-with-output.rst   31-Oct-2023 00:35               28083
-202-vision-superresolution-video-with-output.rst   31-Oct-2023 00:35             9642402
-203-meter-reader-with-output.rst                   31-Oct-2023 00:35               27185
-204-segmenter-semantic-segmentation-with-output..> 31-Oct-2023 00:35               28012
-205-vision-background-removal-with-output.rst      31-Oct-2023 00:35               14676
-206-vision-paddlegan-anime-with-output.rst         31-Oct-2023 00:35               22317
-207-vision-paddlegan-superresolution-with-outpu..> 31-Oct-2023 00:35               18189
-208-optical-character-recognition-with-output.rst  31-Oct-2023 00:35               28750
-209-handwritten-ocr-with-output.rst                31-Oct-2023 00:35               13321
-210-slowfast-video-recognition-with-output.rst     31-Oct-2023 00:35              769241
-211-speech-to-text-with-output.rst                 31-Oct-2023 00:35               89249
-212-pyannote-speaker-diarization-with-output.rst   31-Oct-2023 00:35             1294876
-213-question-answering-with-output.rst             31-Oct-2023 00:35               23057
-214-grammar-correction-with-output.rst             31-Oct-2023 00:35               32428
-215-image-inpainting-with-output.rst               31-Oct-2023 00:35               10044
-217-vision-deblur-with-output.rst                  31-Oct-2023 00:35               12552
-218-vehicle-detection-and-recognition-with-outp..> 31-Oct-2023 00:35               18290
-219-knowledge-graphs-conve-with-output.rst         31-Oct-2023 00:35               26242
-220-cross-lingual-books-alignment-with-output.rst  31-Oct-2023 00:35               53274
-221-machine-translation-with-output.rst            31-Oct-2023 00:35               10182
-222-vision-image-colorization-with-output.rst      31-Oct-2023 00:35               19493
-224-3D-segmentation-point-clouds-with-output.rst   31-Oct-2023 00:35                9960
-225-stable-diffusion-text-to-image-with-output.rst 31-Oct-2023 00:35               56071
-226-yolov7-optimization-with-output.rst            31-Oct-2023 00:35               45974
-227-whisper-convert-with-output.rst                31-Oct-2023 00:35               18077
-227-whisper-nncf-quantize-with-output.rst          31-Oct-2023 00:35               21850
-228-clip-zero-shot-convert-with-output.rst         31-Oct-2023 00:35               15239
-228-clip-zero-shot-quantize-with-output.rst        31-Oct-2023 00:35               15739
-229-distilbert-sequence-classification-with-out..> 31-Oct-2023 00:35               13454
-230-yolov8-instance-segmentation-with-output.rst   31-Oct-2023 00:35               58539
-230-yolov8-keypoint-detection-with-output.rst      31-Oct-2023 00:35               55065
-230-yolov8-object-detection-with-output.rst        31-Oct-2023 00:35               58903
-231-instruct-pix2pix-image-editing-with-output.rst 31-Oct-2023 00:35               52684
-232-clip-language-saliency-map-with-output.rst     31-Oct-2023 00:35               34908
-233-blip-convert-with-output.rst                   31-Oct-2023 00:35               29348
-233-blip-optimize-with-output.rst                  31-Oct-2023 00:35               17131
-234-encodec-audio-compression-with-output.rst      31-Oct-2023 00:35             3864226
-235-controlnet-stable-diffusion-with-output.rst    31-Oct-2023 00:35               58531
-236-stable-diffusion-v2-infinite-zoom-with-outp..> 31-Oct-2023 00:35               52546
-236-stable-diffusion-v2-optimum-demo-comparison..> 31-Oct-2023 00:35                8059
-236-stable-diffusion-v2-optimum-demo-with-outpu..> 31-Oct-2023 00:35                8407
-236-stable-diffusion-v2-text-to-image-demo-with..> 31-Oct-2023 00:35               13860
-236-stable-diffusion-v2-text-to-image-with-outp..> 31-Oct-2023 00:35               44177
-237-segment-anything-with-output.rst               31-Oct-2023 00:35               64142
-238-deep-floyd-if-optimize-with-output.rst         31-Oct-2023 00:35               31042
-239-image-bind-convert-with-output.rst             31-Oct-2023 00:35             2397351
-240-dolly-2-instruction-following-with-output.rst  31-Oct-2023 00:35               31788
-241-riffusion-text-to-music-with-output.rst        31-Oct-2023 00:35              623769
-242-freevc-voice-conversion-with-output.rst        31-Oct-2023 00:35              653238
-243-tflite-selfie-segmentation-with-output.rst     31-Oct-2023 00:35               22673
-244-named-entity-recognition-with-output.rst       31-Oct-2023 00:35               23869
-245-typo-detector-with-output.rst                  31-Oct-2023 00:35               30340
-246-depth-estimation-videpth-with-output.rst       31-Oct-2023 00:35               53677
-247-code-language-id-with-output.rst               31-Oct-2023 00:35               38729
-248-stable-diffusion-xl-with-output.rst            31-Oct-2023 00:35               23133
-249-oneformer-segmentation-with-output.rst         31-Oct-2023 00:35               28917
-250-music-generation-with-output.rst               31-Oct-2023 00:35             1390751
-251-tiny-sd-image-generation-with-output.rst       31-Oct-2023 00:35               47330
-252-fastcomposer-image-generation-with-output.rst  31-Oct-2023 00:35               46698
-253-zeroscope-text2video-with-output.rst           31-Oct-2023 00:35             1875395
-254-llm-chatbot-with-output.rst                    31-Oct-2023 00:35               43239
-255-mms-massively-multilingual-speech-with-outp..> 31-Oct-2023 00:35             1388124
-256-bark-text-to-audio-with-output.rst             31-Oct-2023 00:35              698945
-257-llava-multimodal-chatbot-with-output.rst       31-Oct-2023 00:35               52293
-258-blip-diffusion-subject-generation-with-outp..> 31-Oct-2023 00:35               52630
-259-decidiffusion-image-generation-with-output.rst 31-Oct-2023 00:35               51032
-260-pix2struct-docvqa-with-output.rst              31-Oct-2023 00:35               14557
-261-fast-segment-anything-with-output.rst          31-Oct-2023 00:35               22553
-262-softvc-voice-conversion-with-output.rst        31-Oct-2023 00:35               10075
-263-latent-consistency-models-image-generation-..> 31-Oct-2023 00:35               34965
-301-tensorflow-training-openvino-nncf-with-outp..> 31-Oct-2023 00:35               32073
-301-tensorflow-training-openvino-with-output.rst   31-Oct-2023 00:35               34000
-302-pytorch-quantization-aware-training-with-ou..> 31-Oct-2023 00:35               30565
-305-tensorflow-quantization-aware-training-with..> 31-Oct-2023 00:35               22473
-401-object-detection-with-output.rst               31-Oct-2023 00:35               20565
-402-pose-estimation-with-output.rst                31-Oct-2023 00:35               17581
-403-action-recognition-webcam-with-output.rst      31-Oct-2023 00:35               27886
-404-style-transfer-with-output.rst                 31-Oct-2023 00:35               17562
-405-paddle-ocr-webcam-with-output.rst              31-Oct-2023 00:35               26070
-406-3D-pose-estimation-with-output.rst             31-Oct-2023 00:35               33937
-407-person-tracking-with-output.rst                31-Oct-2023 00:35               28861
-notebook_utils-with-output.rst                     31-Oct-2023 00:35               12520
-notebooks_tags.json                                31-Oct-2023 00:35               10406
-notebooks_with_binder_buttons.txt                  31-Oct-2023 00:35                1166
-notebooks_with_colab_buttons.txt                   31-Oct-2023 00:35                1569
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/


../
+001-hello-world-with-output_files/                 07-Dec-2023 00:49                   -
+003-hello-segmentation-with-output_files/          07-Dec-2023 00:49                   -
+004-hello-detection-with-output_files/             07-Dec-2023 00:49                   -
+101-tensorflow-classification-to-openvino-with-..> 07-Dec-2023 00:49                   -
+102-pytorch-onnx-to-openvino-with-output_files/    07-Dec-2023 00:49                   -
+102-pytorch-to-openvino-with-output_files/         07-Dec-2023 00:49                   -
+103-paddle-to-openvino-classification-with-outp..> 07-Dec-2023 00:49                   -
+106-auto-device-with-output_files/                 07-Dec-2023 00:49                   -
+109-latency-tricks-with-output_files/              07-Dec-2023 00:49                   -
+109-throughput-tricks-with-output_files/           07-Dec-2023 00:49                   -
+110-ct-scan-live-inference-with-output_files/      07-Dec-2023 00:49                   -
+110-ct-segmentation-quantize-nncf-with-output_f..> 07-Dec-2023 00:49                   -
+111-yolov5-quantization-migration-with-output_f..> 07-Dec-2023 00:49                   -
+113-image-classification-quantization-with-outp..> 07-Dec-2023 00:49                   -
+115-async-api-with-output_files/                   07-Dec-2023 00:49                   -
+117-model-server-with-output_files/                07-Dec-2023 00:49                   -
+118-optimize-preprocessing-with-output_files/      07-Dec-2023 00:49                   -
+119-tflite-to-openvino-with-output_files/          07-Dec-2023 00:49                   -
+120-tensorflow-instance-segmentation-to-openvin..> 07-Dec-2023 00:49                   -
+120-tensorflow-object-detection-to-openvino-wit..> 07-Dec-2023 00:49                   -
+123-detectron2-to-openvino-with-output_files/      07-Dec-2023 00:49                   -
+125-convnext-classification-with-output_files/     07-Dec-2023 00:49                   -
+125-lraspp-segmentation-with-output_files/         07-Dec-2023 00:49                   -
+126-tensorflow-hub-with-output_files/              07-Dec-2023 00:49                   -
+201-vision-monodepth-with-output_files/            07-Dec-2023 00:49                   -
+202-vision-superresolution-image-with-output_files 07-Dec-2023 00:49                   -
+203-meter-reader-with-output_files/                07-Dec-2023 00:49                   -
+204-segmenter-semantic-segmentation-with-output..> 07-Dec-2023 00:49                   -
+205-vision-background-removal-with-output_files/   07-Dec-2023 00:49                   -
+206-vision-paddlegan-anime-with-output_files/      07-Dec-2023 00:49                   -
+207-vision-paddlegan-superresolution-with-outpu..> 07-Dec-2023 00:49                   -
+208-optical-character-recognition-with-output_f..> 07-Dec-2023 00:49                   -
+209-handwritten-ocr-with-output_files/             07-Dec-2023 00:49                   -
+211-speech-to-text-with-output_files/              07-Dec-2023 00:49                   -
+212-pyannote-speaker-diarization-with-output_files 07-Dec-2023 00:49                   -
+215-image-inpainting-with-output_files/            07-Dec-2023 00:49                   -
+216-attention-center-with-output_files/            07-Dec-2023 00:49                   -
+217-vision-deblur-with-output_files/               07-Dec-2023 00:49                   -
+218-vehicle-detection-and-recognition-with-outp..> 07-Dec-2023 00:49                   -
+220-cross-lingual-books-alignment-with-output_f..> 07-Dec-2023 00:49                   -
+222-vision-image-colorization-with-output_files/   07-Dec-2023 00:49                   -
+224-3D-segmentation-point-clouds-with-output_files 07-Dec-2023 00:49                   -
+225-stable-diffusion-text-to-image-with-output_..> 07-Dec-2023 00:49                   -
+226-yolov7-optimization-with-output_files/         07-Dec-2023 00:49                   -
+228-clip-zero-shot-convert-with-output_files/      07-Dec-2023 00:49                   -
+228-clip-zero-shot-quantize-with-output_files/     07-Dec-2023 00:49                   -
+230-yolov8-instance-segmentation-with-output_files 07-Dec-2023 00:49                   -
+230-yolov8-keypoint-detection-with-output_files/   07-Dec-2023 00:49                   -
+230-yolov8-object-detection-with-output_files/     07-Dec-2023 00:49                   -
+231-instruct-pix2pix-image-editing-with-output_..> 07-Dec-2023 00:49                   -
+232-clip-language-saliency-map-with-output_files/  07-Dec-2023 00:49                   -
+233-blip-convert-with-output_files/                07-Dec-2023 00:49                   -
+233-blip-optimize-with-output_files/               07-Dec-2023 00:49                   -
+234-encodec-audio-compression-with-output_files/   07-Dec-2023 00:49                   -
+235-controlnet-stable-diffusion-with-output_files/ 07-Dec-2023 00:49                   -
+236-stable-diffusion-v2-optimum-demo-comparison..> 07-Dec-2023 00:49                   -
+236-stable-diffusion-v2-optimum-demo-with-outpu..> 07-Dec-2023 00:49                   -
+236-stable-diffusion-v2-text-to-image-demo-with..> 07-Dec-2023 00:49                   -
+237-segment-anything-with-output_files/            07-Dec-2023 00:49                   -
+238-deep-floyd-if-optimize-with-output_files/      07-Dec-2023 00:49                   -
+239-image-bind-convert-with-output_files/          07-Dec-2023 00:49                   -
+241-riffusion-text-to-music-with-output_files/     07-Dec-2023 00:49                   -
+243-tflite-selfie-segmentation-with-output_files/  07-Dec-2023 00:49                   -
+246-depth-estimation-videpth-with-output_files/    07-Dec-2023 00:49                   -
+248-ssd-b1-with-output_files/                      07-Dec-2023 00:49                   -
+248-stable-diffusion-xl-with-output_files/         07-Dec-2023 00:49                   -
+249-oneformer-segmentation-with-output_files/      07-Dec-2023 00:49                   -
+251-tiny-sd-image-generation-with-output_files/    07-Dec-2023 00:49                   -
+257-llava-multimodal-chatbot-with-output_files/    07-Dec-2023 00:49                   -
+258-blip-diffusion-subject-generation-with-outp..> 07-Dec-2023 00:49                   -
+259-decidiffusion-image-generation-with-output_..> 07-Dec-2023 00:49                   -
+260-pix2struct-docvqa-with-output_files/           07-Dec-2023 00:49                   -
+261-fast-segment-anything-with-output_files/       07-Dec-2023 00:49                   -
+263-latent-consistency-models-image-generation-..> 07-Dec-2023 00:49                   -
+263-lcm-lora-controlnet-with-output_files/         07-Dec-2023 00:49                   -
+265-wuerstchen-image-generation-with-output_files/ 07-Dec-2023 00:49                   -
+269-film-slowmo-with-output_files/                 07-Dec-2023 00:49                   -
+271-sdxl-turbo-with-output_files/                  07-Dec-2023 00:49                   -
+301-tensorflow-training-openvino-nncf-with-outp..> 07-Dec-2023 00:49                   -
+301-tensorflow-training-openvino-with-output_files 07-Dec-2023 00:49                   -
+305-tensorflow-quantization-aware-training-with..> 07-Dec-2023 00:49                   -
+401-object-detection-with-output_files/            07-Dec-2023 00:49                   -
+402-pose-estimation-with-output_files/             07-Dec-2023 00:49                   -
+403-action-recognition-webcam-with-output_files/   07-Dec-2023 00:49                   -
+404-style-transfer-with-output_files/              07-Dec-2023 00:49                   -
+405-paddle-ocr-webcam-with-output_files/           07-Dec-2023 00:49                   -
+407-person-tracking-with-output_files/             07-Dec-2023 00:49                   -
+notebook_utils-with-output_files/                  07-Dec-2023 00:49                   -
+001-hello-world-with-output.rst                    07-Dec-2023 00:49                5254
+002-openvino-api-with-output.rst                   07-Dec-2023 00:49               38674
+003-hello-segmentation-with-output.rst             07-Dec-2023 00:49                6880
+004-hello-detection-with-output.rst                07-Dec-2023 00:49                8034
+101-tensorflow-classification-to-openvino-with-..> 07-Dec-2023 00:49               11407
+102-pytorch-onnx-to-openvino-with-output.rst       07-Dec-2023 00:49               19675
+102-pytorch-to-openvino-with-output.rst            07-Dec-2023 00:49               24458
+103-paddle-to-openvino-classification-with-outp..> 07-Dec-2023 00:49               17004
+104-model-tools-with-output.rst                    07-Dec-2023 00:49               21016
+105-language-quantize-bert-with-output.rst         07-Dec-2023 00:49               20138
+106-auto-device-with-output.rst                    07-Dec-2023 00:49               24070
+107-speech-recognition-quantization-data2vec-wi..> 07-Dec-2023 00:49              970151
+107-speech-recognition-quantization-wav2vec2-wi..> 07-Dec-2023 00:49              223348
+108-gpu-device-with-output.rst                     07-Dec-2023 00:49               55430
+109-latency-tricks-with-output.rst                 07-Dec-2023 00:49               24164
+109-throughput-tricks-with-output.rst              07-Dec-2023 00:49               26370
+110-ct-scan-live-inference-with-output.rst         07-Dec-2023 00:49               17892
+110-ct-segmentation-quantize-nncf-with-output.rst  07-Dec-2023 00:49               38096
+111-yolov5-quantization-migration-with-output.rst  07-Dec-2023 00:49               50502
+112-pytorch-post-training-quantization-nncf-wit..> 07-Dec-2023 00:49               29261
+113-image-classification-quantization-with-outp..> 07-Dec-2023 00:49               22890
+115-async-api-with-output.rst                      07-Dec-2023 00:49               19965
+116-sparsity-optimization-with-output.rst          07-Dec-2023 00:49               17816
+117-model-server-with-output.rst                   07-Dec-2023 00:49               21790
+118-optimize-preprocessing-with-output.rst         07-Dec-2023 00:49               21592
+119-tflite-to-openvino-with-output.rst             07-Dec-2023 00:49               11397
+120-tensorflow-instance-segmentation-to-openvin..> 07-Dec-2023 00:49               25278
+120-tensorflow-object-detection-to-openvino-wit..> 07-Dec-2023 00:49               27517
+121-convert-to-openvino-with-output.rst            07-Dec-2023 00:49               90848
+122-speech-recognition-quantization-wav2vec2-wi..> 07-Dec-2023 00:49              483195
+122-yolov8-quantization-with-accuracy-control-w..> 07-Dec-2023 00:49               22498
+123-detectron2-to-openvino-with-output.rst         07-Dec-2023 00:49               16684
+124-hugging-face-hub-with-output.rst               07-Dec-2023 00:49               24923
+125-convnext-classification-with-output.rst        07-Dec-2023 00:49               11040
+125-lraspp-segmentation-with-output.rst            07-Dec-2023 00:49               11034
+126-tensorflow-hub-with-output.rst                 07-Dec-2023 00:49               15099
+201-vision-monodepth-with-output.rst               07-Dec-2023 00:49              969377
+202-vision-superresolution-image-with-output.rst   07-Dec-2023 00:49               27153
+202-vision-superresolution-video-with-output.rst   07-Dec-2023 00:49             9641843
+203-meter-reader-with-output.rst                   07-Dec-2023 00:49               26832
+204-segmenter-semantic-segmentation-with-output..> 07-Dec-2023 00:49               27572
+205-vision-background-removal-with-output.rst      07-Dec-2023 00:49               14003
+206-vision-paddlegan-anime-with-output.rst         07-Dec-2023 00:49               21580
+207-vision-paddlegan-superresolution-with-outpu..> 07-Dec-2023 00:49               17587
+208-optical-character-recognition-with-output.rst  07-Dec-2023 00:49               27964
+209-handwritten-ocr-with-output.rst                07-Dec-2023 00:49               12725
+210-slowfast-video-recognition-with-output.rst     07-Dec-2023 00:49              768967
+211-speech-to-text-with-output.rst                 07-Dec-2023 00:49               88279
+212-pyannote-speaker-diarization-with-output.rst   07-Dec-2023 00:49             1294686
+213-question-answering-with-output.rst             07-Dec-2023 00:49               22507
+214-grammar-correction-with-output.rst             07-Dec-2023 00:49               31853
+215-image-inpainting-with-output.rst               07-Dec-2023 00:49                9781
+216-attention-center-with-output.rst               07-Dec-2023 00:49               14112
+217-vision-deblur-with-output.rst                  07-Dec-2023 00:49               11904
+218-vehicle-detection-and-recognition-with-outp..> 07-Dec-2023 00:49               17774
+219-knowledge-graphs-conve-with-output.rst         07-Dec-2023 00:49               25536
+220-cross-lingual-books-alignment-with-output.rst  07-Dec-2023 00:49               50005
+221-machine-translation-with-output.rst            07-Dec-2023 00:49                9859
+222-vision-image-colorization-with-output.rst      07-Dec-2023 00:49               19076
+223-text-prediction-with-output.rst                07-Dec-2023 00:49               27142
+224-3D-segmentation-point-clouds-with-output.rst   07-Dec-2023 00:49               10121
+225-stable-diffusion-text-to-image-with-output.rst 07-Dec-2023 00:49               55735
+226-yolov7-optimization-with-output.rst            07-Dec-2023 00:49               46096
+227-whisper-convert-with-output.rst                07-Dec-2023 00:49               17645
+227-whisper-nncf-quantize-with-output.rst          07-Dec-2023 00:49               23414
+228-clip-zero-shot-convert-with-output.rst         07-Dec-2023 00:49               14962
+228-clip-zero-shot-quantize-with-output.rst        07-Dec-2023 00:49               15461
+229-distilbert-sequence-classification-with-out..> 07-Dec-2023 00:49               13235
+230-yolov8-instance-segmentation-with-output.rst   07-Dec-2023 00:49               57650
+230-yolov8-keypoint-detection-with-output.rst      07-Dec-2023 00:49               54173
+230-yolov8-object-detection-with-output.rst        07-Dec-2023 00:49               57728
+231-instruct-pix2pix-image-editing-with-output.rst 07-Dec-2023 00:49               52168
+232-clip-language-saliency-map-with-output.rst     07-Dec-2023 00:49               34489
+233-blip-convert-with-output.rst                   07-Dec-2023 00:49               28629
+233-blip-optimize-with-output.rst                  07-Dec-2023 00:49               16575
+234-encodec-audio-compression-with-output.rst      07-Dec-2023 00:49             3863698
+235-controlnet-stable-diffusion-with-output.rst    07-Dec-2023 00:49               55588
+236-stable-diffusion-v2-infinite-zoom-with-outp..> 07-Dec-2023 00:49               52084
+236-stable-diffusion-v2-optimum-demo-comparison..> 07-Dec-2023 00:49                7935
+236-stable-diffusion-v2-optimum-demo-with-outpu..> 07-Dec-2023 00:49                8283
+236-stable-diffusion-v2-text-to-image-demo-with..> 07-Dec-2023 00:49               13530
+236-stable-diffusion-v2-text-to-image-with-outp..> 07-Dec-2023 00:49               43738
+237-segment-anything-with-output.rst               07-Dec-2023 00:49               62765
+238-deep-floyd-if-optimize-with-output.rst         07-Dec-2023 00:49               30528
+239-image-bind-convert-with-output.rst             07-Dec-2023 00:49             2396943
+240-dolly-2-instruction-following-with-output.rst  07-Dec-2023 00:49               30765
+241-riffusion-text-to-music-with-output.rst        07-Dec-2023 00:49              623693
+243-tflite-selfie-segmentation-with-output.rst     07-Dec-2023 00:49               22157
+244-named-entity-recognition-with-output.rst       07-Dec-2023 00:49               20326
+245-typo-detector-with-output.rst                  07-Dec-2023 00:49               28788
+246-depth-estimation-videpth-with-output.rst       07-Dec-2023 00:49               53035
+247-code-language-id-with-output.rst               07-Dec-2023 00:49               36862
+248-ssd-b1-with-output.rst                         07-Dec-2023 00:49               18947
+248-stable-diffusion-xl-with-output.rst            07-Dec-2023 00:49               22703
+249-oneformer-segmentation-with-output.rst         07-Dec-2023 00:49               28293
+250-music-generation-with-output.rst               07-Dec-2023 00:49             1393573
+251-tiny-sd-image-generation-with-output.rst       07-Dec-2023 00:49               46798
+252-fastcomposer-image-generation-with-output.rst  07-Dec-2023 00:49               46119
+253-zeroscope-text2video-with-output.rst           07-Dec-2023 00:49             1876054
+254-llm-chatbot-with-output.rst                    07-Dec-2023 00:49               48901
+255-mms-massively-multilingual-speech-with-outp..> 07-Dec-2023 00:49             1390002
+256-bark-text-to-audio-with-output.rst             07-Dec-2023 00:49              698540
+257-llava-multimodal-chatbot-with-output.rst       07-Dec-2023 00:49               55772
+258-blip-diffusion-subject-generation-with-outp..> 07-Dec-2023 00:49               53393
+259-decidiffusion-image-generation-with-output.rst 07-Dec-2023 00:49               50338
+260-pix2struct-docvqa-with-output.rst              07-Dec-2023 00:49               14327
+261-fast-segment-anything-with-output.rst          07-Dec-2023 00:49               36293
+262-softvc-voice-conversion-with-output.rst        07-Dec-2023 00:49               10132
+263-latent-consistency-models-image-generation-..> 07-Dec-2023 00:49               45989
+263-lcm-lora-controlnet-with-output.rst            07-Dec-2023 00:49               66944
+264-qrcode-monster-with-output.rst                 07-Dec-2023 00:49               40878
+265-wuerstchen-image-generation-with-output.rst    07-Dec-2023 00:49               15781
+266-speculative-sampling-with-output.rst           07-Dec-2023 00:49               14848
+267-distil-whisper-asr-with-output.rst             07-Dec-2023 00:49             3460038
+268-table-question-answering-with-output.rst       07-Dec-2023 00:49               28733
+269-film-slowmo-with-output.rst                    07-Dec-2023 00:49             2090052
+270-sound-generation-audioldm2-with-output.rst     07-Dec-2023 00:49              367989
+271-sdxl-turbo-with-output.rst                     07-Dec-2023 00:49               31083
+272-paint-by-example-with-output.rst               07-Dec-2023 00:49               52442
+301-tensorflow-training-openvino-nncf-with-outp..> 07-Dec-2023 00:49               53590
+301-tensorflow-training-openvino-with-output.rst   07-Dec-2023 00:49               44409
+302-pytorch-quantization-aware-training-with-ou..> 07-Dec-2023 00:49               30524
+305-tensorflow-quantization-aware-training-with..> 07-Dec-2023 00:49               24552
+401-object-detection-with-output.rst               07-Dec-2023 00:49               19275
+402-pose-estimation-with-output.rst                07-Dec-2023 00:49               17019
+403-action-recognition-webcam-with-output.rst      07-Dec-2023 00:49               27306
+404-style-transfer-with-output.rst                 07-Dec-2023 00:49               16956
+405-paddle-ocr-webcam-with-output.rst              07-Dec-2023 00:49               25566
+406-3D-pose-estimation-with-output.rst             07-Dec-2023 00:49               33773
+407-person-tracking-with-output.rst                07-Dec-2023 00:49               28265
+all_notebooks_paths.txt                            07-Dec-2023 00:49                9708
+notebook_utils-with-output.rst                     07-Dec-2023 00:49               13124
+notebooks_tags.json                                07-Dec-2023 00:49               11338
+notebooks_with_binder_buttons.txt                  07-Dec-2023 00:49                1166
+notebooks_with_colab_buttons.txt                   07-Dec-2023 00:49                1598
 

diff --git a/docs/notebooks/notebook_utils-with-output.rst b/docs/notebooks/notebook_utils-with-output.rst index 7d23c0ccae3320..e4311baee76d5a 100644 --- a/docs/notebooks/notebook_utils-with-output.rst +++ b/docs/notebooks/notebook_utils-with-output.rst @@ -5,13 +5,12 @@ This notebook contains helper functions and classes for use with OpenVINO™ Notebooks. The code is synchronized with the ``notebook_utils.py`` file in the same directory as this notebook. -There are five categories: +There are four categories: - `Files <#files>`__ - `Images <#images>`__ - `Videos <#videos>`__ - `Visualization <#visualization>`__ -- `OpenVINO Tools <#openvino-tools>`__ - `Checks and Alerts <#checks-and-alerts>`__ Each category contains a test cell that also shows how to use the @@ -33,6 +32,8 @@ functions in the section. Files ----- + + Load an image, download a file, download an OpenVINO IR model, and create a progress bar to show download progress. @@ -58,6 +59,8 @@ create a progress bar to show download progress. Test File Functions ~~~~~~~~~~~~~~~~~~~ + + .. code:: ipython3 model_url = "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/models/002-example-models/segmentation.xml" @@ -91,7 +94,7 @@ Test File Functions .. parsed-literal:: - /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-534/.workspace/scm/ov-notebook/notebooks/utils/Safety_Full_Hat_and_Vest.mp4 + /opt/home/k8sworker/ci-ai/cibuilds/ov-notebook/OVNotebookOps-561/.workspace/scm/ov-notebook/notebooks/utils/Safety_Full_Hat_and_Vest.mp4 .. code:: ipython3 @@ -108,12 +111,12 @@ Test File Functions .. parsed-literal:: - openvino_notebooks_readme.md: 0%| | 0.00/13.8k [00:00 -Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/notebook_utils-with-output_files/ +Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/notebook_utils-with-output_files/ -

Index of /projects/ov-notebook/0.1.0-latest/20231030220807/dist/rst_files/notebook_utils-with-output_files/


../
-notebook_utils-with-output_12_0.jpg                31-Oct-2023 00:35              121563
-notebook_utils-with-output_12_0.png                31-Oct-2023 00:35              869307
-notebook_utils-with-output_26_0.png                31-Oct-2023 00:35               45498
-notebook_utils-with-output_41_0.png                31-Oct-2023 00:35               10059
-notebook_utils-with-output_41_1.png                31-Oct-2023 00:35               37584
-notebook_utils-with-output_41_2.png                31-Oct-2023 00:35               16690
-notebook_utils-with-output_41_3.png                31-Oct-2023 00:35               38992
+

Index of /projects/ov-notebook/0.1.0-latest/20231206220809/dist/rst_files/notebook_utils-with-output_files/


../
+notebook_utils-with-output_12_0.jpg                07-Dec-2023 00:49              121563
+notebook_utils-with-output_12_0.png                07-Dec-2023 00:49              869307
+notebook_utils-with-output_26_0.png                07-Dec-2023 00:49               43773
+notebook_utils-with-output_41_0.png                07-Dec-2023 00:49               10059
+notebook_utils-with-output_41_1.png                07-Dec-2023 00:49               37584
+notebook_utils-with-output_41_2.png                07-Dec-2023 00:49               16690
+notebook_utils-with-output_41_3.png                07-Dec-2023 00:49               38992
 

diff --git a/docs/notebooks/notebook_utils-with-output_files/notebook_utils-with-output_26_0.png b/docs/notebooks/notebook_utils-with-output_files/notebook_utils-with-output_26_0.png index bfbb21457a5712..d6b5e4a29bf621 100644 --- a/docs/notebooks/notebook_utils-with-output_files/notebook_utils-with-output_26_0.png +++ b/docs/notebooks/notebook_utils-with-output_files/notebook_utils-with-output_26_0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a447faf6463b3efa6f6d4c8bcedd6731e29c3807e9aa8dff94973e5e6c0263f5 -size 45498 +oid sha256:0b1d0537d8cbfb960b949e320a014f87a4c5d8b6db7b1714bdd0603f561e2503 +size 43773 diff --git a/docs/notebooks/notebook_utils-with-output_files/notebook_utils-with-output_41_0.png b/docs/notebooks/notebook_utils-with-output_files/notebook_utils-with-output_41_0.png index 6395be6a3a3cd3..ce444cbd6d247d 100644 --- a/docs/notebooks/notebook_utils-with-output_files/notebook_utils-with-output_41_0.png +++ b/docs/notebooks/notebook_utils-with-output_files/notebook_utils-with-output_41_0.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:5b51a1aaed162de717fc3e4d8c60add72da2c425c61a1d0c78b81284306559c7 +oid sha256:2f7387728cbddbbf39ef89389cb6bf5f4f3629b61c3c0a46079e6458dd969fc7 size 10059 diff --git a/docs/notebooks/notebook_utils-with-output_files/notebook_utils-with-output_41_1.png b/docs/notebooks/notebook_utils-with-output_files/notebook_utils-with-output_41_1.png index 8293bf48530916..4366e20c569995 100644 --- a/docs/notebooks/notebook_utils-with-output_files/notebook_utils-with-output_41_1.png +++ b/docs/notebooks/notebook_utils-with-output_files/notebook_utils-with-output_41_1.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9f5bf5d56666a54aefb7230259cc93c7ab32b775d19c70a37c3f057f002e9780 +oid sha256:1977fd5fca0211a53997161d8532792b4f4477ba74026a2cf706dc4d073d440f size 37584 diff --git a/docs/notebooks/notebook_utils-with-output_files/notebook_utils-with-output_41_2.png b/docs/notebooks/notebook_utils-with-output_files/notebook_utils-with-output_41_2.png index 117bfd26781441..4d411422f3c836 100644 --- a/docs/notebooks/notebook_utils-with-output_files/notebook_utils-with-output_41_2.png +++ b/docs/notebooks/notebook_utils-with-output_files/notebook_utils-with-output_41_2.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8cd96d054ba6bbe07433f6d913e7a2113cb19b689403ed8bdd1e632088a9e029 +oid sha256:7e626d98855cd395c6a5fa82e32e40ccb77fa5e3763cc4952fffe288dee82678 size 16690 diff --git a/docs/notebooks/notebook_utils-with-output_files/notebook_utils-with-output_41_3.png b/docs/notebooks/notebook_utils-with-output_files/notebook_utils-with-output_41_3.png index b715839e69dbff..4ac8d5477bcf4f 100644 --- a/docs/notebooks/notebook_utils-with-output_files/notebook_utils-with-output_41_3.png +++ b/docs/notebooks/notebook_utils-with-output_files/notebook_utils-with-output_41_3.png @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:bd7d398d3061ea2c89b816639925d342909012cb03786d00bef4100943ea24fa +oid sha256:eee1b0a122156df270e09ae5a6b2749f6c6182a1fcd31bdf00c62dafcdf7c941 size 38992 diff --git a/docs/notebooks/notebooks_tags.json b/docs/notebooks/notebooks_tags.json index 3979ba34021132..7cf917b57cf8cb 100644 --- a/docs/notebooks/notebooks_tags.json +++ b/docs/notebooks/notebooks_tags.json @@ -1,7 +1,9 @@ { "002-openvino-api": [ "ONNX", - "Reshape Model" + "Pytorch", + "Reshape Model", + "Torchvision" ], "101-tensorflow-classification-to-openvino": [ "Tensorflow" @@ -389,7 +391,6 @@ "Async Inference", "NNCF", "Pytorch", - "Reshape Model", "Transformers" ], "255-mms-massively-multilingual-speech": [ @@ -433,6 +434,12 @@ "Pytorch", "Transformers" ], + "263-lcm-lora-controlnet": [ + "Benchmark Model", + "NNCF", + "Pytorch", + "Transformers" + ], "264-qrcode-monster": [ "Pytorch", "Transformers" @@ -456,6 +463,19 @@ "269-film-slowmo": [ "Tensorflow" ], + "270-sound-generation-audioldm2": [ + "Pytorch" + ], + "271-sdxl-turbo": [ + "Benchmark Model", + "NNCF", + "Pytorch", + "Transformers" + ], + "272-paint-by-example": [ + "Pytorch", + "Transformers" + ], "301-tensorflow-training-openvino-nncf": [ "Benchmark Model", "NNCF", diff --git a/docs/snippets/ov_auto.py b/docs/snippets/ov_auto.py index 47d8d877ecda24..d912401e85f453 100644 --- a/docs/snippets/ov_auto.py +++ b/docs/snippets/ov_auto.py @@ -8,6 +8,7 @@ import openvino.properties.device as device import openvino.properties.hint as hints import openvino.properties.streams as streams +import openvino.properties.intel_auto as intel_auto #! [py_ov_property_import_header] import openvino.properties.log as log @@ -96,11 +97,13 @@ def part3(): }, ) # To use the “CUMULATIVE_THROUGHPUT” mode: + # To use the ROUND_ROBIN schedule policy: compiled_model = core.compile_model( model=model, device_name="AUTO", config={ - hints.performance_mode: hints.PerformanceMode.CUMULATIVE_THROUGHPUT + hints.performance_mode: hints.PerformanceMode.CUMULATIVE_THROUGHPUT, + intel_auto.schedule_policy: intel_auto.SchedulePolicy.ROUND_ROBIN }, ) #! [part3] diff --git a/docs/snippets/ov_model_with_state_infer.cpp b/docs/snippets/ov_model_with_state_infer.cpp index e7720c77123f4f..b4aa8e350d0b7e 100644 --- a/docs/snippets/ov_model_with_state_infer.cpp +++ b/docs/snippets/ov_model_with_state_infer.cpp @@ -57,9 +57,7 @@ int main(int argc, char* argv[]) { } // 7. Initialize memory state before starting - for (auto&& state : infer_request.query_state()) { - state.reset(); - } + infer_request.reset_state(); //! [part1] // input data diff --git a/src/bindings/c/src/CMakeLists.txt b/src/bindings/c/src/CMakeLists.txt index edc29574fe1a3b..8d8facf42179a6 100644 --- a/src/bindings/c/src/CMakeLists.txt +++ b/src/bindings/c/src/CMakeLists.txt @@ -52,17 +52,19 @@ ov_cpack_add_component(${OV_CPACK_COMP_CORE_C} HIDDEN) ov_cpack_add_component(${OV_CPACK_COMP_CORE_C_DEV} HIDDEN) install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets - RUNTIME DESTINATION ${OV_CPACK_RUNTIMEDIR} COMPONENT ${OV_CPACK_COMP_CORE_C} - ARCHIVE DESTINATION ${OV_CPACK_ARCHIVEDIR} COMPONENT ${OV_CPACK_COMP_CORE_C} - LIBRARY DESTINATION ${OV_CPACK_LIBRARYDIR} COMPONENT ${OV_CPACK_COMP_CORE_C} + RUNTIME DESTINATION ${OV_CPACK_RUNTIMEDIR} COMPONENT ${OV_CPACK_COMP_CORE_C} ${OV_CPACK_COMP_CORE_C_EXCLUDE_ALL} + ARCHIVE DESTINATION ${OV_CPACK_ARCHIVEDIR} COMPONENT ${OV_CPACK_COMP_CORE_C} ${OV_CPACK_COMP_CORE_C_EXCLUDE_ALL} + LIBRARY DESTINATION ${OV_CPACK_LIBRARYDIR} COMPONENT ${OV_CPACK_COMP_CORE_C} ${OV_CPACK_COMP_CORE_C_EXCLUDE_ALL} NAMELINK_COMPONENT ${OV_CPACK_COMP_CORE_C_DEV} INCLUDES DESTINATION ${OV_CPACK_INCLUDEDIR} ${OV_CPACK_INCLUDEDIR}/ie) install(DIRECTORY ${OpenVINO_C_API_SOURCE_DIR}/include/c_api DESTINATION ${OV_CPACK_INCLUDEDIR}/ie - COMPONENT ${OV_CPACK_COMP_CORE_C_DEV}) + COMPONENT ${OV_CPACK_COMP_CORE_C_DEV} + ${OV_CPACK_COMP_CORE_C_DEV_EXCLUDE_ALL}) install(DIRECTORY ${OpenVINO_C_API_SOURCE_DIR}/include/openvino/ DESTINATION ${OV_CPACK_INCLUDEDIR}/openvino - COMPONENT ${OV_CPACK_COMP_CORE_C_DEV}) + COMPONENT ${OV_CPACK_COMP_CORE_C_DEV} + ${OV_CPACK_COMP_CORE_C_DEV_EXCLUDE_ALL}) diff --git a/src/bindings/c/tests/ov_core_test.cpp b/src/bindings/c/tests/ov_core_test.cpp index 90ed036cec32b4..6579c9268418fe 100644 --- a/src/bindings/c/tests/ov_core_test.cpp +++ b/src/bindings/c/tests/ov_core_test.cpp @@ -154,7 +154,12 @@ TEST_P(ov_core_test, ov_core_compile_model) { ov_core_free(core); } +#ifdef OPENVINO_ARCH_ARM64 +// Ticket: 126283 +TEST_P(ov_core_test, DISABLED_ov_core_compile_model_with_property) { +#else TEST_P(ov_core_test, ov_core_compile_model_with_property) { +#endif auto device_name = GetParam(); ov_core_t* core = nullptr; OV_EXPECT_OK(ov_core_create(&core)); diff --git a/src/bindings/c/tests/test_model_repo.cpp b/src/bindings/c/tests/test_model_repo.cpp index b7af72817d3b77..4f722b3adebf1d 100644 --- a/src/bindings/c/tests/test_model_repo.cpp +++ b/src/bindings/c/tests/test_model_repo.cpp @@ -6,6 +6,8 @@ #include +#include "common_test_utils/subgraph_builders/conv_pool_relu_no_reshapes.hpp" + namespace TestDataHelpers { const std::string model_bin_name = "test_model.bin"; @@ -15,7 +17,7 @@ const std::string model_exported_name = "test_exported_model.blob"; void generate_test_model() { ov::pass::Manager manager; manager.register_pass(model_xml_name, model_bin_name); - auto function = ngraph::builder::subgraph::makeConvPoolReluNoReshapes({1, 3, 227, 227}); + auto function = ov::test::utils::make_conv_pool_relu_no_reshapes({1, 3, 227, 227}); manager.run_passes(function); } diff --git a/src/bindings/python/CMakeLists.txt b/src/bindings/python/CMakeLists.txt index bd2e82d50b779e..30abedbe2c30b7 100644 --- a/src/bindings/python/CMakeLists.txt +++ b/src/bindings/python/CMakeLists.txt @@ -325,11 +325,11 @@ macro(ov_define_setup_py_dependencies) "${OpenVINO_SOURCE_DIR}/licensing/onednn_third-party-programs.txt" "${OpenVINO_SOURCE_DIR}/licensing/runtime-third-party-programs.txt" "${OpenVINO_SOURCE_DIR}/licensing/tbb_third-party-programs.txt" - "${OpenVINO_SOURCE_DIR}/docs/install_guides/pypi-openvino-rt.md") + "${OpenVINO_SOURCE_DIR}/docs/dev/pypi_publish/pypi-openvino-rt.md") if(wheel_pre_release) list(APPEND ov_setup_py_deps - "${OpenVINO_SOURCE_DIR}/docs/install_guides/pre-release-note.md") + "${OpenVINO_SOURCE_DIR}/docs/dev/pypi_publish/pre-release-note.md") endif() endmacro() diff --git a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend.py b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend.py index 371d3fae131fe2..5f163f2302d15f 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend.py @@ -20,7 +20,8 @@ from openvino.frontend.pytorch.ts_decoder import TorchScriptPythonDecoder from openvino.frontend.pytorch.torchdynamo.partition import Partitioner from openvino.frontend.pytorch.torchdynamo.execute import execute, execute_cached -from openvino.frontend.pytorch.torchdynamo.compile import cached_model_name, cache_root_path, get_device, openvino_compile_cached_model +from openvino.frontend.pytorch.torchdynamo.compile import cached_model_name, openvino_compile_cached_model +from openvino.frontend.pytorch.torchdynamo.backend_utils import _get_cache_dir, _get_device, _get_model_caching from openvino.runtime import Core, Type, PartialShape @@ -44,8 +45,8 @@ @register_backend @fake_tensor_unsupported -def openvino(subgraph, example_inputs): - return fx_openvino(subgraph, example_inputs) +def openvino(subgraph, example_inputs, options=None): + return fx_openvino(subgraph, example_inputs, options) @register_backend @fake_tensor_unsupported @@ -111,18 +112,19 @@ def _call(*args): return compile_fx(subgraph, example_inputs) -def fx_openvino(subgraph, example_inputs): +def fx_openvino(subgraph, example_inputs, options): try: executor_parameters = None inputs_reversed = False - if os.getenv("OPENVINO_TORCH_MODEL_CACHING") is not None: + openvino_model_caching = _get_model_caching(options) + if openvino_model_caching is not None: # Create a hash to be used for caching model_hash_str = sha256(subgraph.code.encode('utf-8')).hexdigest() executor_parameters = {"model_hash_str": model_hash_str} # Check if the model was fully supported and already cached example_inputs.reverse() inputs_reversed = True - maybe_fs_cached_name = cached_model_name(model_hash_str + "_fs", get_device(), example_inputs, cache_root_path()) + maybe_fs_cached_name = cached_model_name(model_hash_str + "_fs", _get_device(options), example_inputs, _get_cache_dir(options)) if os.path.isfile(maybe_fs_cached_name + ".xml") and os.path.isfile(maybe_fs_cached_name + ".bin"): # Model is fully supported and already cached. Run the cached OV model directly. compiled_model = openvino_compile_cached_model(maybe_fs_cached_name, *example_inputs) @@ -146,7 +148,7 @@ def _call(*args): def _call(*args): res = execute(compiled_model, *args, executor="openvino", - executor_parameters=executor_parameters) + executor_parameters=executor_parameters, options=options) return res return _call except Exception as e: diff --git a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend_utils.py b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend_utils.py new file mode 100644 index 00000000000000..603a1e220a8317 --- /dev/null +++ b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend_utils.py @@ -0,0 +1,60 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# flake8: noqa +# mypy: ignore-errors + +from typing import Optional, Any +import os +from openvino.runtime import Core + + +def _get_device(options) -> Optional[Any]: + core = Core() + device = "CPU" + + if "device" in options: + device = options["device"] + else: + device = os.getenv("OPENVINO_TORCH_BACKEND_DEVICE") + + if device is not None: + assert device in core.available_devices, ( + "Specified device " + + device + + " is not in the list of OpenVINO Available Devices" + ) + + return device + +def _is_cache_dir_in_config(options) -> Optional[Any]: + if "config" in options: + cfg = options["config"] + if "CACHE_DIR" in cfg: + return True + return False + + +def _get_cache_dir(options) -> Optional[Any]: + cache_dir = "./cache" + if options is not None and "cache_dir" in options: + cache_dir = options["cache_dir"] + if _is_cache_dir_in_config(options): + cache_dir = options["config"]["CACHE_DIR"] + else: + cache_dir_env = os.getenv("OPENVINO_TORCH_CACHE_DIR") + if cache_dir_env is not None: + cache_dir = cache_dir_env + return cache_dir + + +def _get_model_caching(options) -> Optional[Any]: + if options is not None and "model_caching" in options: + return options["model_caching"] + else: + return os.getenv("OPENVINO_TORCH_MODEL_CACHING") + + +def _get_config(options) -> Optional[Any]: + if options is not None and "config" in options: + return options["config"] \ No newline at end of file diff --git a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/compile.py b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/compile.py index df8addbf68d44a..fa228c6c69e44a 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/compile.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/compile.py @@ -15,6 +15,7 @@ from openvino.frontend import FrontEndManager from openvino.frontend.pytorch.fx_decoder import TorchFXPythonDecoder from openvino.runtime import Core, Type, PartialShape, serialize +from openvino.frontend.pytorch.torchdynamo.backend_utils import _get_cache_dir, _get_device, _get_config, _is_cache_dir_in_config from typing import Callable, Optional @@ -32,33 +33,17 @@ def cached_model_name(model_hash_str, device, args, cache_root, reversed = False return None inputs_str = "" - for idx, input_data in enumerate(args): + for idx, input_data in enumerate(args): if reversed: inputs_str = "_" + str(input_data.type()) + str(input_data.size())[11:-1].replace(" ", "") + inputs_str else: inputs_str += "_" + str(input_data.type()) + str(input_data.size())[11:-1].replace(" ", "") inputs_str = sha256(inputs_str.encode('utf-8')).hexdigest() file_name += inputs_str - - return file_name - -def cache_root_path(): - cache_root = "./cache/" - if os.getenv("OPENVINO_TORCH_CACHE_DIR") is not None: - cache_root = os.getenv("OPENVINO_TORCH_CACHE_DIR") - return cache_root - -def get_device(): - core = Core() - device = "CPU" - - if os.getenv("OPENVINO_TORCH_BACKEND_DEVICE") is not None: - device = os.getenv("OPENVINO_TORCH_BACKEND_DEVICE") - assert device in core.available_devices, "Specified device " + device + " is not in the list of OpenVINO Available Devices" - return device + return file_name -def openvino_compile_cached_model(cached_model_path, *example_inputs): +def openvino_compile_cached_model(cached_model_path, *example_inputs, options): core = Core() om = core.read_model(cached_model_path + ".xml") @@ -78,17 +63,22 @@ def openvino_compile_cached_model(cached_model_path, *example_inputs): om.inputs[idx].get_node().set_partial_shape(PartialShape(list(input_data.shape))) om.validate_nodes_and_infer_types() - core.set_property({'CACHE_DIR': cache_root_path() + '/blob'}) + config = {} + + if _is_cache_dir_in_config(options): + config = _get_config(options) + else: + config["CACHE_DIR"] = _get_cache_dir(options) - compiled_model = core.compile_model(om, get_device()) + compiled_model = core.compile_model(om, _get_device(options), config) return compiled_model -def openvino_compile(gm: GraphModule, *args, model_hash_str: str = None): +def openvino_compile(gm: GraphModule, *args, model_hash_str: str = None, options=None): core = Core() - device = get_device() - cache_root = cache_root_path() + device = _get_device(options) + cache_root = _get_cache_dir(options) file_name = cached_model_name(model_hash_str, device, args, cache_root) if file_name is not None and os.path.isfile(file_name + ".xml") and os.path.isfile(file_name + ".bin"): @@ -99,7 +89,7 @@ def openvino_compile(gm: GraphModule, *args, model_hash_str: str = None): input_shapes = [] input_types = [] - for idx, input_data in enumerate(args): + for idx, input_data in enumerate(args): input_types.append(input_data.type()) input_shapes.append(input_data.size()) @@ -128,8 +118,13 @@ def openvino_compile(gm: GraphModule, *args, model_hash_str: str = None): om.inputs[idx].get_node().set_partial_shape(PartialShape(list(input_data.shape))) om.validate_nodes_and_infer_types() + config = {} + if model_hash_str is not None: - core.set_property({'CACHE_DIR': cache_root + '/blob'}) + if _is_cache_dir_in_config(options): + config = _get_config(options) + else: + config["CACHE_DIR"] = cache_root - compiled = core.compile_model(om, device) + compiled = core.compile_model(om, device, config) return compiled diff --git a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/execute.py b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/execute.py index 4b45c1b7a6af5a..609a7b73ea1f21 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/execute.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/execute.py @@ -21,8 +21,9 @@ from openvino.frontend.pytorch.torchdynamo.partition import Partitioner from openvino.frontend.pytorch.torchdynamo.compile import openvino_compile from openvino.runtime import Core, Type, PartialShape +from openvino.frontend.pytorch.torchdynamo.backend_utils import _get_cache_dir, _get_device -from typing import Callable, Optional +from typing import Callable, Optional, Any from torch.fx.experimental.proxy_tensor import make_fx, wrapper_and_args_for_make_fx @@ -44,9 +45,10 @@ def execute( *args, executor: str = "openvino", executor_parameters: Optional[dict] = None, + options: Optional[Any] = None, ): if executor == "openvino": - return openvino_execute_partitioned(gm, *args, executor_parameters=executor_parameters) + return openvino_execute_partitioned(gm, *args, executor_parameters=executor_parameters, options=options) elif executor == "strictly_openvino": return openvino_execute(gm, *args, executor_parameters=executor_parameters) @@ -65,7 +67,7 @@ def execute_cached(compiled_model, *args): return result -def openvino_execute(gm: GraphModule, *args, executor_parameters=None, partition_id): +def openvino_execute(gm: GraphModule, *args, executor_parameters=None, partition_id, options): executor_parameters = executor_parameters or DEFAULT_OPENVINO_PYTHON_CONFIG @@ -86,7 +88,7 @@ def openvino_execute(gm: GraphModule, *args, executor_parameters=None, partition if use_cache and (partition_id in compiled_cache): compiled = compiled_cache[partition_id] else: - compiled = openvino_compile(gm, *args, model_hash_str=model_hash_str) + compiled = openvino_compile(gm, *args, model_hash_str=model_hash_str, options=options) compiled_cache[partition_id] = compiled flat_args, _ = tree_flatten(args) @@ -101,20 +103,21 @@ def openvino_execute(gm: GraphModule, *args, executor_parameters=None, partition class OpenVINOGraphModule(torch.nn.Module): - def __init__(self, gm, partition_id, use_python_fusion_cache, model_hash_str: str = None): + def __init__(self, gm, partition_id, use_python_fusion_cache, model_hash_str: str = None, options=None): super().__init__() self.gm = gm self.partition_id = partition_id self.executor_parameters = {"use_python_fusion_cache": use_python_fusion_cache, "model_hash_str": model_hash_str} self.perm_fallback = False + self.options = options def __call__(self, *args): if self.perm_fallback: return self.gm(*args) try: - result = openvino_execute(self.gm, *args, executor_parameters=self.executor_parameters, partition_id=self.partition_id) + result = openvino_execute(self.gm, *args, executor_parameters=self.executor_parameters, partition_id=self.partition_id, options=self.options) except Exception: self.perm_fallback = True return self.gm(*args) @@ -122,7 +125,7 @@ def __call__(self, *args): return result -def partition_graph(gm: GraphModule, use_python_fusion_cache: bool, model_hash_str: str = None): +def partition_graph(gm: GraphModule, use_python_fusion_cache: bool, model_hash_str: str = None, options=None): global max_openvino_partitions partition_id = max_openvino_partitions for node in gm.graph.nodes: @@ -133,7 +136,7 @@ def partition_graph(gm: GraphModule, use_python_fusion_cache: bool, model_hash_s gm.add_submodule( node.target, OpenVINOGraphModule(openvino_submodule, partition_id, use_python_fusion_cache, - model_hash_str=model_hash_str), + model_hash_str=model_hash_str, options=options), ) partition_id = partition_id + 1 @@ -142,7 +145,7 @@ def partition_graph(gm: GraphModule, use_python_fusion_cache: bool, model_hash_s return gm -def openvino_execute_partitioned(gm: GraphModule, *args, executor_parameters=None): +def openvino_execute_partitioned(gm: GraphModule, *args, executor_parameters=None, options=None): executor_parameters = executor_parameters or DEFAULT_OPENVINO_PYTHON_CONFIG global partitioned_modules @@ -162,7 +165,7 @@ def openvino_execute_partitioned(gm: GraphModule, *args, executor_parameters=Non if signature not in partitioned_modules: partitioned_modules[signature] = partition_graph(gm, use_python_fusion_cache=use_python_fusion_cache, - model_hash_str=model_hash_str) + model_hash_str=model_hash_str, options=options) return partitioned_modules[signature](*args) diff --git a/src/bindings/python/src/openvino/properties/intel_auto/__init__.py b/src/bindings/python/src/openvino/properties/intel_auto/__init__.py index 2d8a52ac10920f..23486becc306b4 100644 --- a/src/bindings/python/src/openvino/properties/intel_auto/__init__.py +++ b/src/bindings/python/src/openvino/properties/intel_auto/__init__.py @@ -2,7 +2,11 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +# Enums +from openvino._pyopenvino.properties.intel_auto import SchedulePolicy + # Properties import openvino._pyopenvino.properties.intel_auto as __intel_auto from openvino.properties._properties import __make_properties + __make_properties(__intel_auto, __name__) diff --git a/src/bindings/python/src/openvino/runtime/op/__init__.py b/src/bindings/python/src/openvino/runtime/op/__init__.py index 069976998f415c..32ec2e8b909344 100644 --- a/src/bindings/python/src/openvino/runtime/op/__init__.py +++ b/src/bindings/python/src/openvino/runtime/op/__init__.py @@ -8,11 +8,10 @@ # flake8: noqa -import numpy as np - from openvino._pyopenvino.op import Constant from openvino._pyopenvino.op import assign from openvino._pyopenvino.op import Parameter from openvino._pyopenvino.op import if_op from openvino._pyopenvino.op import loop from openvino._pyopenvino.op import tensor_iterator +from openvino._pyopenvino.op import Result diff --git a/src/bindings/python/src/openvino/runtime/opset1/ops.py b/src/bindings/python/src/openvino/runtime/opset1/ops.py index 4f3c9629534adb..b5e8b63ec21dcb 100644 --- a/src/bindings/python/src/openvino/runtime/opset1/ops.py +++ b/src/bindings/python/src/openvino/runtime/opset1/ops.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 """Factory functions for all openvino ops.""" -from typing import List, Optional, Union +from typing import List, Optional, Union, get_args import numpy as np from functools import partial @@ -352,7 +352,7 @@ def constant( @nameable_op def convert( data: NodeInput, - destination_type: Union[str, NumericType], + destination_type: Union[str, NumericType, Type], name: Optional[str] = None, ) -> Node: """Return node which casts input node values to specified type. @@ -362,12 +362,15 @@ def convert( :param name: Optional name for the output node. :return: New node performing the conversion operation. """ - if not isinstance(destination_type, str): - destination_type = get_element_type_str(destination_type) + _destination_type = None # type: Union[str, Type] + if isinstance(destination_type, get_args(NumericType)): + _destination_type = get_element_type_str(destination_type).lower() + else: + _destination_type = destination_type return _get_node_factory_opset1().create( "Convert", [as_node(data)], - {"destination_type": destination_type.lower()}, + {"destination_type": _destination_type}, ) diff --git a/src/bindings/python/src/openvino/runtime/opset13/__init__.py b/src/bindings/python/src/openvino/runtime/opset13/__init__.py index 9c544b0d7e775f..1abac3e07a7f2f 100644 --- a/src/bindings/python/src/openvino/runtime/opset13/__init__.py +++ b/src/bindings/python/src/openvino/runtime/opset13/__init__.py @@ -144,7 +144,7 @@ from openvino.runtime.opset2.ops import reorg_yolo from openvino.runtime.opset1.ops import relu from openvino.runtime.opset1.ops import reshape -from openvino.runtime.opset1.ops import result +from openvino.runtime.opset13.ops import result from openvino.runtime.opset1.ops import reverse_sequence from openvino.runtime.opset3.ops import rnn_cell from openvino.runtime.opset5.ops import rnn_sequence diff --git a/src/bindings/python/src/openvino/runtime/opset13/ops.py b/src/bindings/python/src/openvino/runtime/opset13/ops.py index d323fc2e749526..4538612d0a3969 100644 --- a/src/bindings/python/src/openvino/runtime/opset13/ops.py +++ b/src/bindings/python/src/openvino/runtime/opset13/ops.py @@ -11,8 +11,8 @@ log = logging.getLogger(__name__) -from openvino.runtime import Node, Shape, Type -from openvino.runtime.op import Constant +from openvino.runtime import Node, Shape, Type, Output +from openvino.runtime.op import Constant, Result from openvino.runtime.opset_utils import _get_node_factory from openvino.runtime.utils.decorators import binary_op, nameable_op, unary_op from openvino.runtime.utils.types import ( @@ -292,11 +292,15 @@ def constant( - dtype force conversion of data. :return: The Constant node initialized with provided data. """ + def display_shared_memory_warning(warning_message: str) -> None: + if shared_memory: + log.warning(f"{warning_message}. Memory sharing is disabled by default. Set shared_memory=False to hide this warning.") + if isinstance(value, np.ndarray): _value, _shared_memory = value, shared_memory else: _value, _shared_memory = np.array(value), False - log.warning(f"Converting scalar to corresponding type of {_value.dtype}. Memory sharing is disabled by default.") + display_shared_memory_warning(f"Converting scalar to corresponding type of {_value.dtype}") # Handle type casting, when dtype is not None: if dtype: # Expect packed data, use different constructor to handle it correctly: @@ -305,30 +309,42 @@ def constant( raise RuntimeError( f"All values must be equal to 0 to initialize Constant with type of {dtype}. " "Please use `openvino.helpers` module and `pack_data`, `unpack_data` functions to fill this Constant's data.") - log.warning(f"Constant initialized with packed type of {dtype}. Memory sharing is disabled by default.") + display_shared_memory_warning(f"Constant initialized with packed type of {dtype}") return Constant(dtype, Shape(_value.shape), _value.flatten().tolist()) elif dtype in [Type.bf16]: if not np.allclose(_value, 0): raise RuntimeError( f"All values must be equal to 0 to initialize Constant with type of {dtype}. " "Please use `this_constant.data[:] = ...` to fill this Constant's data.") - log.warning(f"Constant initialized with OpenVINO custom {dtype}. Memory sharing is disabled by default.") + display_shared_memory_warning(f"Constant initialized with OpenVINO custom {dtype}") return Constant(dtype, Shape(_value.shape), _value.flatten().tolist()) # General use-case for all other types: else: _dtype = dtype.to_dtype() if isinstance(dtype, Type) else dtype if _dtype is int: - log.warning("Converting scalar type of undefined bitwidth to 32-bit integer. Memory sharing is disabled by default.") + display_shared_memory_warning("Converting scalar type of undefined bitwidth to 32-bit integer") _value, _shared_memory = _value.astype(np.int32), False elif _dtype is float: - log.warning("Converting scalar type of undefined bitwidth to 32-bit float. Memory sharing is disabled by default.") + display_shared_memory_warning("Converting scalar type of undefined bitwidth to 32-bit float") _value, _shared_memory = _value.astype(np.float32), False elif _dtype is bool: - log.warning("Converting bool type to numpy bool. Memory sharing is disabled by default.") + display_shared_memory_warning("Converting bool type to numpy bool") _value, _shared_memory = _value.astype(np.bool_), False else: if _dtype != _value.dtype: - log.warning(f"Converting value of {_value.dtype} to {_dtype}. Memory sharing is disabled by default.") + display_shared_memory_warning(f"Converting value of {_value.dtype} to {_dtype}") _value, _shared_memory = _value.astype(_dtype), False # Create Constant itself: return Constant(_value, shared_memory=_shared_memory) + + +@unary_op +def result(data: Union[Node, Output, NumericData], name: Optional[str] = None) -> Node: + """Return a node which represents an output of a graph (Model). + + :param data: The tensor containing the input data + :return: Result node + """ + if isinstance(data, Node): + return Result(data.output(0)) + return Result(data) diff --git a/src/bindings/python/src/openvino/runtime/utils/data_helpers/data_dispatcher.py b/src/bindings/python/src/openvino/runtime/utils/data_helpers/data_dispatcher.py index 683b4bf4efbc34..f0b7e96fbf0d48 100644 --- a/src/bindings/python/src/openvino/runtime/utils/data_helpers/data_dispatcher.py +++ b/src/bindings/python/src/openvino/runtime/utils/data_helpers/data_dispatcher.py @@ -8,7 +8,7 @@ import numpy as np from openvino._pyopenvino import ConstOutput, Tensor, Type -from openvino.runtime.utils.data_helpers.wrappers import _InferRequestWrapper +from openvino.runtime.utils.data_helpers.wrappers import _InferRequestWrapper, OVDict ContainerTypes = Union[dict, list, tuple] ScalarTypes = Union[np.number, int, float] @@ -132,6 +132,14 @@ def _( return {k: to_c_style(v) if is_shared else v for k, v in inputs.items()} +@normalize_arrays.register(OVDict) +def _( + inputs: OVDict, + is_shared: bool = False, +) -> dict: + return {i: to_c_style(v) if is_shared else v for i, (_, v) in enumerate(inputs.items())} + + @normalize_arrays.register(list) @normalize_arrays.register(tuple) def _( @@ -174,6 +182,7 @@ def create_shared( @create_shared.register(dict) @create_shared.register(list) @create_shared.register(tuple) +@create_shared.register(OVDict) def _( inputs: ContainerTypes, request: _InferRequestWrapper, @@ -300,7 +309,7 @@ def update_inputs(inputs: dict, request: _InferRequestWrapper) -> dict: @singledispatch def create_copied( - inputs: Union[ContainerTypes, np.ndarray, ScalarTypes], + inputs: Union[ContainerTypes, OVDict, np.ndarray, ScalarTypes], request: _InferRequestWrapper, ) -> Union[dict, None]: # Check the special case of the array-interface @@ -314,8 +323,9 @@ def create_copied( @create_copied.register(dict) @create_copied.register(list) @create_copied.register(tuple) +@create_copied.register(OVDict) def _( - inputs: ContainerTypes, + inputs: Union[ContainerTypes, OVDict], request: _InferRequestWrapper, ) -> dict: return update_inputs(normalize_arrays(inputs, is_shared=False), request) @@ -346,7 +356,7 @@ def _( def _data_dispatch( request: _InferRequestWrapper, - inputs: Union[ContainerTypes, Tensor, np.ndarray, ScalarTypes] = None, + inputs: Union[ContainerTypes, OVDict, Tensor, np.ndarray, ScalarTypes] = None, is_shared: bool = False, ) -> Union[dict, Tensor]: if inputs is None: diff --git a/src/bindings/python/src/openvino/runtime/utils/data_helpers/wrappers.py b/src/bindings/python/src/openvino/runtime/utils/data_helpers/wrappers.py index 1bf23a7cad4f30..bb535aa31c6485 100644 --- a/src/bindings/python/src/openvino/runtime/utils/data_helpers/wrappers.py +++ b/src/bindings/python/src/openvino/runtime/utils/data_helpers/wrappers.py @@ -4,13 +4,7 @@ import numpy as np -# TODO: remove this WA and refactor OVDict when Python3.8 -# becomes minimal supported version. -try: - from functools import singledispatchmethod -except ImportError: - from singledispatchmethod import singledispatchmethod # type: ignore[no-redef] - +from functools import singledispatchmethod from collections.abc import Mapping from typing import Dict, Set, Tuple, Union, Iterator, Optional from typing import KeysView, ItemsView, ValuesView diff --git a/src/bindings/python/src/pyopenvino/core/properties/properties.cpp b/src/bindings/python/src/pyopenvino/core/properties/properties.cpp index 2e70b63b5851dd..23e10bedadc72a 100644 --- a/src/bindings/python/src/pyopenvino/core/properties/properties.cpp +++ b/src/bindings/python/src/pyopenvino/core/properties/properties.cpp @@ -282,8 +282,14 @@ void regmodule_properties(py::module m) { py::module m_intel_auto = m_properties.def_submodule("intel_auto", "openvino.runtime.properties.intel_auto submodule that simulates ov::intel_auto"); + // Submodule intel_auto - enums + py::enum_(m_intel_auto, "SchedulePolicy", py::arithmetic()) + .value("ROUND_ROBIN", ov::intel_auto::SchedulePolicy::ROUND_ROBIN) + .value("DEVICE_PRIORITY", ov::intel_auto::SchedulePolicy::DEVICE_PRIORITY) + .value("DEFAULT", ov::intel_auto::SchedulePolicy::DEFAULT); wrap_property_RW(m_intel_auto, ov::intel_auto::device_bind_buffer, "device_bind_buffer"); wrap_property_RW(m_intel_auto, ov::intel_auto::enable_startup_fallback, "enable_startup_fallback"); wrap_property_RW(m_intel_auto, ov::intel_auto::enable_runtime_fallback, "enable_runtime_fallback"); + wrap_property_RW(m_intel_auto, ov::intel_auto::schedule_policy, "schedule_policy"); } diff --git a/src/bindings/python/src/pyopenvino/graph/model.cpp b/src/bindings/python/src/pyopenvino/graph/model.cpp index 6f5e1d2ff3d13a..374bbe8704eae6 100644 --- a/src/bindings/python/src/pyopenvino/graph/model.cpp +++ b/src/bindings/python/src/pyopenvino/graph/model.cpp @@ -111,7 +111,7 @@ void regclass_graph_Model(py::module m) { Create user-defined Model which is a representation of a model. :param results: List of results. - :type results: List[openvino.runtime.Node] + :type results: List[op.Result] :param sinks: List of Nodes to be used as Sinks (e.g. Assign ops). :type sinks: List[openvino.runtime.Node] :param parameters: List of parameters. @@ -221,7 +221,7 @@ void regclass_graph_Model(py::module m) { Create user-defined Model which is a representation of a model :param results: List of results. - :type results: List[openvino.runtime.Node] + :type results: List[op.Result] :param sinks: List of Nodes to be used as Sinks (e.g. Assign ops). :type sinks: List[openvino.runtime.Node] :param parameters: List of parameters. @@ -274,7 +274,7 @@ void regclass_graph_Model(py::module m) { Create user-defined Model which is a representation of a model :param results: List of results. - :type results: List[openvino.runtime.Node] + :type results: List[op.Result] :param parameters: List of parameters. :type parameters: List[op.Parameter] :param variables: List of variables. @@ -538,7 +538,7 @@ void regclass_graph_Model(py::module m) { Return a list of model outputs. :return: a list of model's result nodes. - :rtype: List[openvino.runtime.Node] + :rtype: List[op.Result] )"); model.def_property_readonly("results", &ov::Model::get_results, @@ -546,7 +546,7 @@ void regclass_graph_Model(py::module m) { Return a list of model outputs. :return: a list of model's result nodes. - :rtype: List[openvino.runtime.Node] + :rtype: List[op.Result] )"); model.def("get_result", &ov::Model::get_result, @@ -554,7 +554,7 @@ void regclass_graph_Model(py::module m) { Return single result. :return: Node object representing result. - :rtype: openvino.runtime.Node + :rtype: op.Result )"); model.def_property_readonly("result", &ov::Model::get_result, @@ -562,7 +562,7 @@ void regclass_graph_Model(py::module m) { Return single result. :return: Node object representing result. - :rtype: openvino.runtime.Node + :rtype: op.Result )"); model.def("get_result_index", (int64_t(ov::Model::*)(const ov::Output&) const) & ov::Model::get_result_index, @@ -747,7 +747,7 @@ void regclass_graph_Model(py::module m) { Delete Result node from the list of results. Method will not delete node from graph. :param result: Result node to delete. - :type result: openvino.runtime.Node + :type result: op.Result )"); model.def("remove_parameter", @@ -827,7 +827,7 @@ void regclass_graph_Model(py::module m) { Method doesn't validate graph, it should be done manually after all changes. :param results: new Result nodes. - :type results: List[openvino.runtime.Node] + :type results: List[op.Result] )"); model.def( diff --git a/src/bindings/python/src/pyopenvino/graph/ops/if.cpp b/src/bindings/python/src/pyopenvino/graph/ops/if.cpp index ad83891ebaa68a..10453341e38657 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/if.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/if.cpp @@ -115,10 +115,10 @@ void regclass_graph_op_If(py::module m) { Sets new output from the operation associated with results of each sub-graphs. :param then_result: result from then_body. - :type then_result: openvino.runtime.Node + :type then_result: op.Result :param else_result: result from else_body. - :type else_result: openvino.runtime.Node + :type else_result: op.Result :return: output from operation. :rtype: openvino.runtime.Output diff --git a/src/bindings/python/src/pyopenvino/graph/ops/result.cpp b/src/bindings/python/src/pyopenvino/graph/ops/result.cpp index 022982a27999aa..c8576c8ecaf2ab 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/result.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/result.cpp @@ -20,6 +20,8 @@ void regclass_graph_op_Result(py::module m) { result.doc() = "openvino.runtime.op.Result wraps ov::op::v0::Result"; + result.def(py::init&>()); + result.def("get_output_partial_shape", &ov::Node::get_output_partial_shape, py::arg("index")); result.def("get_output_element_type", &ov::Node::get_output_element_type, py::arg("index")); diff --git a/src/bindings/python/src/pyopenvino/utils/utils.cpp b/src/bindings/python/src/pyopenvino/utils/utils.cpp index 232a12e75281ae..2d94ea21f3092e 100644 --- a/src/bindings/python/src/pyopenvino/utils/utils.cpp +++ b/src/bindings/python/src/pyopenvino/utils/utils.cpp @@ -172,6 +172,8 @@ py::object from_ov_any(const ov::Any& any) { return py::cast(any.as()); } else if (any.is()) { return py::cast(any.as()); + } else if (any.is()) { + return py::cast(any.as()); } else if (any.is()) { return py::cast(any.as()); } else if (any.is()) { @@ -357,6 +359,8 @@ ov::Any py_object_to_any(const py::object& py_obj) { return py::cast(py_obj); } else if (py::isinstance(py_obj)) { return py::cast(py_obj); + } else if (py::isinstance(py_obj)) { + return py::cast(py_obj); } else if (py::isinstance(py_obj)) { return py::cast(py_obj); } else if (py::isinstance(py_obj)) { diff --git a/src/bindings/python/src/pyopenvino/utils/utils.hpp b/src/bindings/python/src/pyopenvino/utils/utils.hpp index e16797f2c6a225..0e7aa6055e0143 100644 --- a/src/bindings/python/src/pyopenvino/utils/utils.hpp +++ b/src/bindings/python/src/pyopenvino/utils/utils.hpp @@ -20,6 +20,7 @@ #include "openvino/core/type/element_type.hpp" #include "openvino/runtime/properties.hpp" +#include "openvino/runtime/auto/properties.hpp" #include "openvino/pass/serialize.hpp" namespace py = pybind11; diff --git a/src/bindings/python/tests/test_graph/test_basic.py b/src/bindings/python/tests/test_graph/test_basic.py index 1c5c8b10bbe538..259a8c1d73b2bc 100644 --- a/src/bindings/python/tests/test_graph/test_basic.py +++ b/src/bindings/python/tests/test_graph/test_basic.py @@ -25,6 +25,7 @@ from openvino._pyopenvino import DescriptorTensor from openvino.runtime.utils.types import get_element_type +from tests.utils.helpers import generate_model_with_memory def test_graph_api(): @@ -143,6 +144,8 @@ def test_convert_to_bool(destination_type, input_data): pytest.param(np.float64, (-16383, 16383), np.int64, np.float64), pytest.param("f32", (-8, 8), np.int32, np.float32), pytest.param("f64", (-16383, 16383), np.int64, np.float64), + pytest.param(Type.f32, (-8, 8), np.int32, np.float32), + pytest.param(Type.f64, (-16383, 16383), np.int64, np.float64), ], ) def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type): @@ -554,12 +557,7 @@ def test_multiple_outputs(): def test_sink_model_ctor(): - input_data = ops.parameter([2, 2], name="input_data", dtype=np.float32) - rv = ops.read_value(input_data, "var_id_667", np.float32, [2, 2]) - add = ops.add(rv, input_data, name="MemoryAdd") - node = ops.assign(add, "var_id_667") - res = ops.result(add, "res") - model = Model(results=[res], sinks=[node], parameters=[input_data], name="TestModel") + model = generate_model_with_memory(input_shape=[2, 2], data_type=np.float32) ordered_ops = model.get_ordered_ops() op_types = [op.get_type_name() for op in ordered_ops] @@ -570,7 +568,7 @@ def test_sink_model_ctor(): assert len(model.get_ops()) == 5 assert model.get_output_size() == 1 assert model.get_output_op(0).get_type_name() == "Result" - assert model.get_output_element_type(0) == input_data.get_element_type() + assert model.get_output_element_type(0) == model.get_parameters()[0].get_element_type() assert list(model.get_output_shape(0)) == [2, 2] assert (model.get_parameters()[0].get_partial_shape()) == PartialShape([2, 2]) assert len(model.get_parameters()) == 1 diff --git a/src/bindings/python/tests/test_graph/test_manager.py b/src/bindings/python/tests/test_graph/test_manager.py index 3a1b7740ab840f..95be2a1df2c7ac 100644 --- a/src/bindings/python/tests/test_graph/test_manager.py +++ b/src/bindings/python/tests/test_graph/test_manager.py @@ -1,8 +1,7 @@ +# -*- coding: utf-8 -*- # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -# flake8: noqa - import os import numpy as np @@ -15,6 +14,7 @@ from tests.test_graph.util import count_ops_of_type from tests.utils.helpers import create_filename_for_test, compare_models + def create_model(): shape = [100, 100, 2] parameter_a = ops.parameter(shape, dtype=np.float32, name="A") @@ -40,7 +40,8 @@ def test_constant_folding(): assert count_ops_of_type(model, node_ceil) == 0 assert count_ops_of_type(model, node_constant) == 1 - new_const = model.get_results()[0].input(0).get_source_output().get_node() + result = model.get_results()[0] + new_const = result.input(0).get_source_output().get_node() values_out = new_const.get_vector() values_expected = [0.0, 1.0, 0.0, -2.0, 3.0, 3.0] @@ -48,14 +49,14 @@ def test_constant_folding(): # request - https://docs.pytest.org/en/7.1.x/reference/reference.html#request -@pytest.fixture +@pytest.fixture() def prepare_ir_paths(request, tmp_path): xml_path, bin_path = create_filename_for_test(request.node.name, tmp_path) yield xml_path, bin_path - + # IR Files deletion should be done after `Model` is destructed. - # It may be achieved by splitting scopes (`Model` will be destructed + # It may be achieved by splitting scopes (`Model` will be destructed # just after test scope finished), or by calling `del Model` os.remove(xml_path) os.remove(bin_path) @@ -104,7 +105,7 @@ def test_serialize_separate_paths_args(prepare_ir_paths): def test_serialize_pass_mixed_args_kwargs(prepare_ir_paths): core = Core() - + shape = [3, 2] parameter_a = ops.parameter(shape, dtype=np.float32, name="A") parameter_b = ops.parameter(shape, dtype=np.float32, name="B") @@ -123,7 +124,7 @@ def test_serialize_pass_mixed_args_kwargs(prepare_ir_paths): def test_serialize_pass_mixed_args_kwargs_v2(prepare_ir_paths): core = Core() - + xml_path, bin_path = prepare_ir_paths model = create_model() pass_manager = Manager() @@ -175,7 +176,7 @@ def test_default_version(prepare_ir_paths): assert compare_models(model, res_model) -def test_default_version_IR_V11_separate_paths(prepare_ir_paths): +def test_default_version_ir_v11_separate_paths(prepare_ir_paths): core = Core() xml_path, bin_path = prepare_ir_paths diff --git a/src/bindings/python/tests/test_graph/test_ops.py b/src/bindings/python/tests/test_graph/test_ops.py index 6591cb8f577323..433b798dec0829 100644 --- a/src/bindings/python/tests/test_graph/test_ops.py +++ b/src/bindings/python/tests/test_graph/test_ops.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 @@ -12,6 +13,7 @@ from openvino.runtime import AxisSet from openvino.runtime.op import Constant, Parameter + @pytest.mark.parametrize(("ov_op", "expected_ov_str", "expected_type"), [ (lambda a, b: a + b, "Add", Type.f32), (ov.add, "Add", Type.f32), @@ -34,9 +36,9 @@ def test_binary_op(ov_op, expected_ov_str, expected_type): element_type = Type.f32 shape = Shape([2, 2]) - A = Parameter(element_type, shape) - B = Parameter(element_type, shape) - node = ov_op(A, B) + param1 = Parameter(element_type, shape) + param2 = Parameter(element_type, shape) + node = ov_op(param1, param2) assert node.get_type_name() == expected_ov_str assert node.get_output_size() == 1 @@ -48,10 +50,10 @@ def test_add_with_mul(): element_type = Type.f32 shape = Shape([4]) - A = Parameter(element_type, shape) - B = Parameter(element_type, shape) - C = Parameter(element_type, shape) - node = ov.multiply(ov.add(A, B), C) + param1 = Parameter(element_type, shape) + param2 = Parameter(element_type, shape) + param3 = Parameter(element_type, shape) + node = ov.multiply(ov.add(param1, param2), param3) assert node.get_type_name() == "Multiply" assert node.get_output_size() == 1 @@ -85,8 +87,8 @@ def test_unary_op(ov_op, expected_ov_str): element_type = Type.f32 shape = Shape([4]) - A = Parameter(element_type, shape) - node = ov_op(A) + param1 = Parameter(element_type, shape) + node = ov_op(param1) assert node.get_type_name() == expected_ov_str assert node.get_output_size() == 1 @@ -97,8 +99,8 @@ def test_unary_op(ov_op, expected_ov_str): def test_reshape(): element_type = Type.f32 shape = Shape([2, 3]) - A = Parameter(element_type, shape) - node = ov.reshape(A, Shape([3, 2]), special_zero=False) + param1 = Parameter(element_type, shape) + node = ov.reshape(param1, Shape([3, 2]), special_zero=False) assert node.get_type_name() == "Reshape" assert node.get_output_size() == 1 @@ -108,8 +110,8 @@ def test_reshape(): def test_broadcast(): element_type = Type.f32 - A = Parameter(element_type, Shape([3])) - node = ov.broadcast(A, [3, 3]) + param1 = Parameter(element_type, Shape([3])) + node = ov.broadcast(param1, [3, 3]) assert node.get_type_name() == "Broadcast" assert node.get_output_size() == 1 assert list(node.get_output_shape(0)) == [3, 3] @@ -134,10 +136,10 @@ def test_constant(const, args, expectation): def test_concat(): element_type = Type.f32 - A = Parameter(element_type, Shape([1, 2])) - B = Parameter(element_type, Shape([1, 2])) - C = Parameter(element_type, Shape([1, 2])) - node = ov.concat([A, B, C], axis=0) + param1 = Parameter(element_type, Shape([1, 2])) + param2 = Parameter(element_type, Shape([1, 2])) + param3 = Parameter(element_type, Shape([1, 2])) + node = ov.concat([param1, param2, param3], axis=0) assert node.get_type_name() == "Concat" assert node.get_output_size() == 1 assert list(node.get_output_shape(0)) == [3, 2] @@ -162,10 +164,10 @@ def test_axisset(): def test_select(): element_type = Type.f32 - A = Parameter(Type.boolean, Shape([1, 2])) - B = Parameter(element_type, Shape([1, 2])) - C = Parameter(element_type, Shape([1, 2])) - node = ov.select(A, B, C) + param1 = Parameter(Type.boolean, Shape([1, 2])) + param2 = Parameter(element_type, Shape([1, 2])) + param3 = Parameter(element_type, Shape([1, 2])) + node = ov.select(param1, param2, param3) assert node.get_type_name() == "Select" assert node.get_output_size() == 1 assert list(node.get_output_shape(0)) == [1, 2] @@ -175,7 +177,7 @@ def test_select(): def test_max_pool_1d(): element_type = Type.f32 shape = Shape([1, 1, 10]) - A = Parameter(element_type, shape) + param1 = Parameter(element_type, shape) window_shape = [3] strides = [1] * len(window_shape) @@ -187,7 +189,7 @@ def test_max_pool_1d(): idx_elem_type = "i32" model = ov.max_pool( - A, + param1, strides, dilations, pads_begin, @@ -204,10 +206,11 @@ def test_max_pool_1d(): assert model.get_output_element_type(0) == element_type assert model.get_output_element_type(1) == Type.i32 + def test_max_pool_1d_with_strides(): element_type = Type.f32 shape = Shape([1, 1, 10]) - A = Parameter(element_type, shape) + param1 = Parameter(element_type, shape) window_shape = [3] strides = [2] pads_begin = [0] * len(window_shape) @@ -218,7 +221,7 @@ def test_max_pool_1d_with_strides(): idx_elem_type = "i32" model = ov.max_pool( - A, + param1, strides, dilations, pads_begin, @@ -236,10 +239,11 @@ def test_max_pool_1d_with_strides(): assert model.get_output_element_type(0) == element_type assert model.get_output_element_type(1) == Type.i32 + def test_max_pool_2d(): element_type = Type.f32 shape = Shape([1, 1, 10, 10]) - A = Parameter(element_type, shape) + param1 = Parameter(element_type, shape) window_shape = [3, 3] rounding_type = "floor" auto_pad = "explicit" @@ -251,7 +255,7 @@ def test_max_pool_2d(): pads_end = [0, 0] model = ov.max_pool( - A, + param1, strides, dilations, pads_begin, @@ -272,7 +276,7 @@ def test_max_pool_2d(): def test_max_pool_2d_with_strides(): element_type = Type.f32 shape = Shape([1, 1, 10, 10]) - A = Parameter(element_type, shape) + param1 = Parameter(element_type, shape) strides = [2, 2] dilations = [1, 1] pads_begin = [0, 0] @@ -283,7 +287,7 @@ def test_max_pool_2d_with_strides(): idx_elem_type = "i32" model = ov.max_pool( - A, + param1, strides, dilations, pads_begin, diff --git a/src/bindings/python/tests/test_graph/test_ops_result.py b/src/bindings/python/tests/test_graph/test_ops_result.py new file mode 100644 index 00000000000000..105ce81849ac86 --- /dev/null +++ b/src/bindings/python/tests/test_graph/test_ops_result.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np + +from openvino import PartialShape, Model, Type +import openvino.runtime.opset13 as ops +from openvino.runtime.op import Result + + +def test_result(): + param = ops.parameter(PartialShape([1]), dtype=np.float32, name="param") + relu1 = ops.relu(param, name="relu1") + result = Result(relu1.output(0)) + assert result.get_output_element_type(0) == Type.f32 + assert result.get_output_partial_shape(0) == PartialShape([1]) + model = Model([result], [param], "test_model") + + result2 = ops.result(relu1, "res2") + model.add_results([result2]) + + results = model.get_results() + assert len(results) == 2 + assert results[1].get_output_element_type(0) == Type.f32 + assert results[1].get_output_partial_shape(0) == PartialShape([1]) + model.remove_result(result) + assert len(model.results) == 1 diff --git a/src/bindings/python/tests/test_graph/test_pad.py b/src/bindings/python/tests/test_graph/test_pad.py index e5d993e59f3746..1a6b96c0c964cb 100644 --- a/src/bindings/python/tests/test_graph/test_pad.py +++ b/src/bindings/python/tests/test_graph/test_pad.py @@ -1,24 +1,18 @@ +# -*- coding: utf-8 -*- # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -# flake8: noqa import numpy as np import pytest import openvino.runtime.opset13 as ov -from openvino import Type - - -@pytest.mark.parametrize( - "pad_mode", - [ - "constant", - "edge", - "reflect", - "symmetric", - ] -) +from openvino import Type + + +@pytest.mark.parametrize("pad_mode", [ + "constant", "edge", "reflect", "symmetric", +]) def test_pad_mode(pad_mode): pads_begin = np.array([0, 1], dtype=np.int32) pads_end = np.array([2, 3], dtype=np.int32) @@ -32,13 +26,10 @@ def test_pad_mode(pad_mode): assert model.get_output_element_type(0) == Type.i32 -@pytest.mark.parametrize( - ("pads_begin", "pads_end", "output_shape"), - [ - ([-1, -1], [-1, -1], [1, 2]), - ([2, -1], [-1, 3], [4, 6]), - ] -) +@pytest.mark.parametrize(("pads_begin", "pads_end", "output_shape"), [ + ([-1, -1], [-1, -1], [1, 2]), + ([2, -1], [-1, 3], [4, 6]), +]) def test_pad_being_and_end(pads_begin, pads_end, output_shape): input_param = ov.parameter((3, 4), name="input", dtype=np.int32) model = ov.pad(input_param, pads_begin, pads_end, "constant") diff --git a/src/bindings/python/tests/test_runtime/test_async_infer_request.py b/src/bindings/python/tests/test_runtime/test_async_infer_request.py new file mode 100644 index 00000000000000..6b8140809d03f7 --- /dev/null +++ b/src/bindings/python/tests/test_runtime/test_async_infer_request.py @@ -0,0 +1,327 @@ +# -*- coding: utf-8 -*- +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from collections.abc import Iterable +from copy import deepcopy +import numpy as np +import pytest + +import openvino.runtime.opset13 as ops +from openvino import ( + Core, + InferRequest, + AsyncInferQueue, + Model, + Shape, + Type, + Tensor, +) +from tests import skip_need_mock_op +from tests.utils.helpers import generate_image, get_relu_model, generate_model_with_memory + + +def concat_model_with_data(device, ov_type, numpy_dtype): + core = Core() + + input_shape = [5] + + params = [] + params += [ops.parameter(input_shape, ov_type)] + if ov_type == Type.bf16: + params += [ops.parameter(input_shape, ov_type)] + else: + params += [ops.parameter(input_shape, numpy_dtype)] + + model = Model(ops.concat(params, 0), params) + compiled_model = core.compile_model(model, device) + request = compiled_model.create_infer_request() + tensor1 = Tensor(ov_type, input_shape) + tensor1.data[:] = np.array([6, 7, 8, 9, 0]) + array1 = np.array([1, 2, 3, 4, 5], dtype=numpy_dtype) + + return request, tensor1, array1 + + +def create_model_with_memory(input_shape, data_type): + input_data = ops.parameter(input_shape, name="input_data", dtype=data_type) + rv = ops.read_value(input_data, "var_id_667", data_type, input_shape) + add = ops.add(rv, input_data, name="MemoryAdd") + node = ops.assign(add, "var_id_667") + res = ops.result(add, "res") + model = Model(results=[res], sinks=[node], parameters=[input_data], name="name") + return model + + +def abs_model_with_data(device, ov_type, numpy_dtype): + input_shape = [1, 4] + param = ops.parameter(input_shape, ov_type) + model = Model(ops.abs(param), [param]) + core = Core() + compiled_model = core.compile_model(model, device) + + request = compiled_model.create_infer_request() + + tensor1 = Tensor(ov_type, input_shape) + tensor1.data[:] = np.array([6, -7, -8, 9]) + + array1 = np.array([[-1, 2, 5, -3]]).astype(numpy_dtype) + + return compiled_model, request, tensor1, array1 + + +@pytest.mark.parametrize("share_inputs", [True, False]) +def test_infer_queue(device, share_inputs): + jobs = 8 + num_request = 4 + core = Core() + model = get_relu_model() + compiled_model = core.compile_model(model, device) + infer_queue = AsyncInferQueue(compiled_model, num_request) + jobs_done = [{"finished": False, "latency": 0} for _ in range(jobs)] + + def callback(request, job_id): + jobs_done[job_id]["finished"] = True + jobs_done[job_id]["latency"] = request.latency + + img = None + + if not share_inputs: + img = generate_image() + infer_queue.set_callback(callback) + assert infer_queue.is_ready() + + for i in range(jobs): + if share_inputs: + img = generate_image() + infer_queue.start_async({"data": img}, i, share_inputs=share_inputs) + infer_queue.wait_all() + assert all(job["finished"] for job in jobs_done) + assert all(job["latency"] > 0 for job in jobs_done) + + +def test_infer_queue_iteration(device): + core = Core() + param = ops.parameter([10]) + model = Model(ops.relu(param), [param]) + compiled_model = core.compile_model(model, device) + infer_queue = AsyncInferQueue(compiled_model, 1) + assert isinstance(infer_queue, Iterable) + for infer_req in infer_queue: + assert isinstance(infer_req, InferRequest) + + it = iter(infer_queue) + infer_request = next(it) + assert isinstance(infer_request, InferRequest) + assert infer_request.userdata is None + with pytest.raises(StopIteration): + next(it) + + +def test_infer_queue_userdata_is_empty(device): + core = Core() + param = ops.parameter([10]) + model = Model(ops.relu(param), [param]) + compiled_model = core.compile_model(model, device) + infer_queue = AsyncInferQueue(compiled_model, 1) + assert infer_queue.userdata == [None] + + +def test_infer_queue_userdata_is_empty_more_jobs(device): + core = Core() + param = ops.parameter([10]) + model = Model(ops.relu(param), [param]) + compiled_model = core.compile_model(model, device) + infer_queue = AsyncInferQueue(compiled_model, 5) + assert infer_queue.userdata == [None, None, None, None, None] + + +def test_infer_queue_fail_on_cpp_model(device): + jobs = 6 + num_request = 4 + core = Core() + model = get_relu_model() + compiled_model = core.compile_model(model, device) + infer_queue = AsyncInferQueue(compiled_model, num_request) + + def callback(request, _): + request.get_tensor("Unknown") + + img = generate_image() + infer_queue.set_callback(callback) + + with pytest.raises(RuntimeError) as e: + for _ in range(jobs): + infer_queue.start_async({"data": img}) + infer_queue.wait_all() + + assert "Port for tensor name Unknown was not found" in str(e.value) + + +def test_infer_queue_fail_on_py_model(device): + jobs = 1 + num_request = 1 + core = Core() + model = get_relu_model() + compiled_model = core.compile_model(model, device) + infer_queue = AsyncInferQueue(compiled_model, num_request) + + def callback(request, _): + request = request + 21 + + img = generate_image() + infer_queue.set_callback(callback) + + with pytest.raises(TypeError) as e: + for _ in range(jobs): + infer_queue.start_async({"data": img}) + infer_queue.wait_all() + + assert "unsupported operand type(s) for +" in str(e.value) + + +@skip_need_mock_op +@pytest.mark.parametrize("with_callback", [False, True]) +def test_infer_queue_fail_in_inference(device, with_callback): + jobs = 6 + num_request = 4 + core = Core() + data = ops.parameter([10], dtype=np.float32, name="data") + k_op = ops.parameter(Shape([]), dtype=np.int32, name="k") + emb = ops.topk(data, k_op, axis=0, mode="max", sort="value") + model = Model(emb, [data, k_op]) + compiled_model = core.compile_model(model, device) + infer_queue = AsyncInferQueue(compiled_model, num_request) + + def callback(request, _): + pytest.fail("Callback should not be called") + + if with_callback: + infer_queue.set_callback(callback) + + data_tensor = Tensor(np.arange(10).astype(np.float32)) + k_tensor = Tensor(np.array(11, dtype=np.int32)) + + with pytest.raises(RuntimeError) as e: + for _ in range(jobs): + infer_queue.start_async({"data": data_tensor, "k": k_tensor}) + infer_queue.wait_all() + + assert "Can not clone with new dims" in str(e.value) + + +def test_infer_queue_get_idle_handle(device): + param = ops.parameter([10]) + model = Model(ops.relu(param), [param]) + core = Core() + compiled_model = core.compile_model(model, device) + queue = AsyncInferQueue(compiled_model, 2) + niter = 10 + + for _ in range(len(queue)): + queue.start_async() + queue.wait_all() + for request in queue: + assert request.wait_for(0) + + for _ in range(niter): + idle_id = queue.get_idle_request_id() + assert queue[idle_id].wait_for(0) + queue.start_async() + queue.wait_all() + + +@pytest.mark.parametrize("share_inputs", [True, False]) +def test_results_async_infer(device, share_inputs): + jobs = 8 + num_request = 4 + core = Core() + model = get_relu_model() + compiled_model = core.compile_model(model, device) + infer_queue = AsyncInferQueue(compiled_model, num_request) + jobs_done = [{"finished": False, "latency": 0} for _ in range(jobs)] + + def callback(request, job_id): + jobs_done[job_id]["finished"] = True + jobs_done[job_id]["latency"] = request.latency + + img = generate_image() + infer_queue.set_callback(callback) + for i in range(jobs): + infer_queue.start_async({"data": img}, i, share_inputs=share_inputs) + infer_queue.wait_all() + + request = compiled_model.create_infer_request() + outputs = request.infer({0: img}) + + for i in range(num_request): + assert np.allclose(list(outputs.values()), list( + infer_queue[i].results.values())) + + +@pytest.mark.parametrize("share_inputs", [True, False]) +def test_array_like_input_async_infer_queue(device, share_inputs): + class ArrayLikeObject: + # Array-like object accepted by np.array to test inputs similar to torch tensor and tf.Tensor + def __init__(self, array) -> None: + self.data = array + + def __array__(self): + return self.data + + jobs = 8 + ov_type = Type.f32 + input_shape = [2, 2] + input_data = np.ascontiguousarray([[-2, -1], [0, 1]]) + param = ops.parameter(input_shape, ov_type) + layer = ops.abs(param) + model = Model([layer], [param]) + core = Core() + compiled_model = core.compile_model(model, "CPU") + + model_input_object = ArrayLikeObject(input_data) + model_input_list = [ + [ArrayLikeObject(deepcopy(input_data))] for _ in range(jobs)] + + # Test single array-like object in AsyncInferQueue.start_async() + infer_queue_object = AsyncInferQueue(compiled_model, jobs) + for _i in range(jobs): + infer_queue_object.start_async(model_input_object) + infer_queue_object.wait_all() + + for i in range(jobs): + assert np.array_equal( + infer_queue_object[i].get_output_tensor().data, np.abs(input_data)) + + # Test list of array-like objects in AsyncInferQueue.start_async() + infer_queue_list = AsyncInferQueue(compiled_model, jobs) + for i in range(jobs): + infer_queue_list.start_async( + model_input_list[i], share_inputs=share_inputs) + infer_queue_list.wait_all() + + for i in range(jobs): + assert np.array_equal( + infer_queue_list[i].get_output_tensor().data, np.abs(input_data)) + + +@pytest.mark.parametrize("shared_flag", [True, False]) +def test_shared_memory_deprecation(device, shared_flag): + compiled, request, _, input_data = abs_model_with_data( + device, Type.f32, np.float32) + + with pytest.warns(FutureWarning, match="`shared_memory` is deprecated and will be removed in 2024.0"): + _ = compiled(input_data, shared_memory=shared_flag) + + with pytest.warns(FutureWarning, match="`shared_memory` is deprecated and will be removed in 2024.0"): + _ = request.infer(input_data, shared_memory=shared_flag) + + with pytest.warns(FutureWarning, match="`shared_memory` is deprecated and will be removed in 2024.0"): + request.start_async(input_data, shared_memory=shared_flag) + request.wait() + + queue = AsyncInferQueue(compiled, jobs=1) + + with pytest.warns(FutureWarning, match="`shared_memory` is deprecated and will be removed in 2024.0"): + queue.start_async(input_data, shared_memory=shared_flag) + queue.wait_all() diff --git a/src/bindings/python/tests/test_runtime/test_properties.py b/src/bindings/python/tests/test_runtime/test_properties.py index 6a76ccb57cb6ab..44d38fb4afeabd 100644 --- a/src/bindings/python/tests/test_runtime/test_properties.py +++ b/src/bindings/python/tests/test_runtime/test_properties.py @@ -110,6 +110,14 @@ def test_deprecation(): (log.Level.TRACE, "Level.TRACE", 4), ), ), + ( + intel_auto.SchedulePolicy, + ( + (intel_auto.SchedulePolicy.ROUND_ROBIN, "SchedulePolicy.ROUND_ROBIN", 0), + (intel_auto.SchedulePolicy.DEVICE_PRIORITY, "SchedulePolicy.DEVICE_PRIORITY", 1), + (intel_auto.SchedulePolicy.DEFAULT, "SchedulePolicy.DEVICE_PRIORITY", 1), + ), + ), ], ) def test_properties_enums(ov_enum, expected_values): diff --git a/src/bindings/python/tests/test_runtime/test_infer_request.py b/src/bindings/python/tests/test_runtime/test_sync_infer_request.py similarity index 80% rename from src/bindings/python/tests/test_runtime/test_infer_request.py rename to src/bindings/python/tests/test_runtime/test_sync_infer_request.py index 632c63355fb2bf..f223b49f1a16b8 100644 --- a/src/bindings/python/tests/test_runtime/test_infer_request.py +++ b/src/bindings/python/tests/test_runtime/test_sync_infer_request.py @@ -2,7 +2,7 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -from collections.abc import Iterable +from contextlib import nullcontext as does_not_raise from copy import deepcopy import numpy as np import os @@ -14,19 +14,17 @@ from openvino import ( Core, CompiledModel, - InferRequest, - AsyncInferQueue, Model, Layout, PartialShape, Shape, Type, Tensor, + compile_model, ) from openvino.runtime import ProfilingInfo from openvino.preprocess import PrePostProcessor -from tests import skip_need_mock_op from tests.utils.helpers import generate_image, get_relu_model, generate_model_with_memory @@ -473,54 +471,6 @@ def test_async_single_input(device, ov_type, numpy_dtype, share_inputs): assert np.array_equal(request.get_output_tensor().data, np.abs(tensor1.data)) -@pytest.mark.parametrize("share_inputs", [True, False]) -def test_infer_queue(device, share_inputs): - jobs = 8 - num_request = 4 - core = Core() - model = get_relu_model() - compiled_model = core.compile_model(model, device) - infer_queue = AsyncInferQueue(compiled_model, num_request) - jobs_done = [{"finished": False, "latency": 0} for _ in range(jobs)] - - def callback(request, job_id): - jobs_done[job_id]["finished"] = True - jobs_done[job_id]["latency"] = request.latency - - img = None - - if not share_inputs: - img = generate_image() - infer_queue.set_callback(callback) - assert infer_queue.is_ready() - - for i in range(jobs): - if share_inputs: - img = generate_image() - infer_queue.start_async({"data": img}, i, share_inputs=share_inputs) - infer_queue.wait_all() - assert all(job["finished"] for job in jobs_done) - assert all(job["latency"] > 0 for job in jobs_done) - - -def test_infer_queue_iteration(device): - core = Core() - param = ops.parameter([10]) - model = Model(ops.relu(param), [param]) - compiled_model = core.compile_model(model, device) - infer_queue = AsyncInferQueue(compiled_model, 1) - assert isinstance(infer_queue, Iterable) - for infer_req in infer_queue: - assert isinstance(infer_req, InferRequest) - - it = iter(infer_queue) - infer_request = next(it) - assert isinstance(infer_request, InferRequest) - assert infer_request.userdata is None - with pytest.raises(StopIteration): - next(it) - - def test_get_compiled_model(device): core = Core() param = ops.parameter([10]) @@ -537,119 +487,6 @@ def test_get_compiled_model(device): assert np.allclose(ref[0], test[0]) -def test_infer_queue_userdata_is_empty(device): - core = Core() - param = ops.parameter([10]) - model = Model(ops.relu(param), [param]) - compiled_model = core.compile_model(model, device) - infer_queue = AsyncInferQueue(compiled_model, 1) - assert infer_queue.userdata == [None] - - -def test_infer_queue_userdata_is_empty_more_jobs(device): - core = Core() - param = ops.parameter([10]) - model = Model(ops.relu(param), [param]) - compiled_model = core.compile_model(model, device) - infer_queue = AsyncInferQueue(compiled_model, 5) - assert infer_queue.userdata == [None, None, None, None, None] - - -def test_infer_queue_fail_on_cpp_model(device): - jobs = 6 - num_request = 4 - core = Core() - model = get_relu_model() - compiled_model = core.compile_model(model, device) - infer_queue = AsyncInferQueue(compiled_model, num_request) - - def callback(request, _): - request.get_tensor("Unknown") - - img = generate_image() - infer_queue.set_callback(callback) - - with pytest.raises(RuntimeError) as e: - for _ in range(jobs): - infer_queue.start_async({"data": img}) - infer_queue.wait_all() - - assert "Port for tensor name Unknown was not found" in str(e.value) - - -def test_infer_queue_fail_on_py_model(device): - jobs = 1 - num_request = 1 - core = Core() - model = get_relu_model() - compiled_model = core.compile_model(model, device) - infer_queue = AsyncInferQueue(compiled_model, num_request) - - def callback(request, _): - request = request + 21 - - img = generate_image() - infer_queue.set_callback(callback) - - with pytest.raises(TypeError) as e: - for _ in range(jobs): - infer_queue.start_async({"data": img}) - infer_queue.wait_all() - - assert "unsupported operand type(s) for +" in str(e.value) - - -@skip_need_mock_op -@pytest.mark.parametrize("with_callback", [False, True]) -def test_infer_queue_fail_in_inference(device, with_callback): - jobs = 6 - num_request = 4 - core = Core() - data = ops.parameter([10], dtype=np.float32, name="data") - k_op = ops.parameter(Shape([]), dtype=np.int32, name="k") - emb = ops.topk(data, k_op, axis=0, mode="max", sort="value") - model = Model(emb, [data, k_op]) - compiled_model = core.compile_model(model, device) - infer_queue = AsyncInferQueue(compiled_model, num_request) - - def callback(request, _): - pytest.fail("Callback should not be called") - - if with_callback: - infer_queue.set_callback(callback) - - data_tensor = Tensor(np.arange(10).astype(np.float32)) - k_tensor = Tensor(np.array(11, dtype=np.int32)) - - with pytest.raises(RuntimeError) as e: - for _ in range(jobs): - infer_queue.start_async({"data": data_tensor, "k": k_tensor}) - infer_queue.wait_all() - - assert "Can not clone with new dims" in str(e.value) - - -def test_infer_queue_get_idle_handle(device): - param = ops.parameter([10]) - model = Model(ops.relu(param), [param]) - core = Core() - compiled_model = core.compile_model(model, device) - queue = AsyncInferQueue(compiled_model, 2) - niter = 10 - - for _ in range(len(queue)): - queue.start_async() - queue.wait_all() - for request in queue: - assert request.wait_for(0) - - for _ in range(niter): - idle_id = queue.get_idle_request_id() - assert queue[idle_id].wait_for(0) - queue.start_async() - queue.wait_all() - - @pytest.mark.parametrize("data_type", [np.float32, np.int32, @@ -713,33 +550,6 @@ def test_get_results(device, share_inputs): assert np.array_equal(results[output], request.results[output]) -@pytest.mark.parametrize("share_inputs", [True, False]) -def test_results_async_infer(device, share_inputs): - jobs = 8 - num_request = 4 - core = Core() - model = get_relu_model() - compiled_model = core.compile_model(model, device) - infer_queue = AsyncInferQueue(compiled_model, num_request) - jobs_done = [{"finished": False, "latency": 0} for _ in range(jobs)] - - def callback(request, job_id): - jobs_done[job_id]["finished"] = True - jobs_done[job_id]["latency"] = request.latency - - img = generate_image() - infer_queue.set_callback(callback) - for i in range(jobs): - infer_queue.start_async({"data": img}, i, share_inputs=share_inputs) - infer_queue.wait_all() - - request = compiled_model.create_infer_request() - outputs = request.infer({0: img}) - - for i in range(num_request): - assert np.allclose(list(outputs.values()), list(infer_queue[i].results.values())) - - @pytest.mark.skipif( os.environ.get("TEST_DEVICE") not in ["GPU"], reason="Device dependent test", @@ -979,48 +789,6 @@ def __array__(self): assert np.array_equal(request.get_output_tensor().data, np.abs(input_data)) -@pytest.mark.parametrize("share_inputs", [True, False]) -def test_array_like_input_async_infer_queue(device, share_inputs): - class ArrayLikeObject: - # Array-like object accepted by np.array to test inputs similar to torch tensor and tf.Tensor - def __init__(self, array) -> None: - self.data = array - - def __array__(self): - return self.data - - jobs = 8 - ov_type = Type.f32 - input_shape = [2, 2] - input_data = np.ascontiguousarray([[-2, -1], [0, 1]]) - param = ops.parameter(input_shape, ov_type) - layer = ops.abs(param) - model = Model([layer], [param]) - core = Core() - compiled_model = core.compile_model(model, "CPU") - - model_input_object = ArrayLikeObject(input_data) - model_input_list = [[ArrayLikeObject(deepcopy(input_data))] for _ in range(jobs)] - - # Test single array-like object in AsyncInferQueue.start_async() - infer_queue_object = AsyncInferQueue(compiled_model, jobs) - for _i in range(jobs): - infer_queue_object.start_async(model_input_object) - infer_queue_object.wait_all() - - for i in range(jobs): - assert np.array_equal(infer_queue_object[i].get_output_tensor().data, np.abs(input_data)) - - # Test list of array-like objects in AsyncInferQueue.start_async() - infer_queue_list = AsyncInferQueue(compiled_model, jobs) - for i in range(jobs): - infer_queue_list.start_async(model_input_list[i], share_inputs=share_inputs) - infer_queue_list.wait_all() - - for i in range(jobs): - assert np.array_equal(infer_queue_list[i].get_output_tensor().data, np.abs(input_data)) - - def test_convert_infer_request(device): request, arr_1, arr_2 = create_simple_request_and_inputs(device) inputs = [arr_1, arr_2] @@ -1166,27 +934,6 @@ def test_not_writable_inputs_infer(device, share_inputs, input_data, change_flag assert not np.shares_memory(input_data[0], input_tensor.data) -@pytest.mark.parametrize("shared_flag", [True, False]) -def test_shared_memory_deprecation(device, shared_flag): - compiled, request, _, input_data = abs_model_with_data(device, Type.f32, np.float32) - - with pytest.warns(FutureWarning, match="`shared_memory` is deprecated and will be removed in 2024.0"): - _ = compiled(input_data, shared_memory=shared_flag) - - with pytest.warns(FutureWarning, match="`shared_memory` is deprecated and will be removed in 2024.0"): - _ = request.infer(input_data, shared_memory=shared_flag) - - with pytest.warns(FutureWarning, match="`shared_memory` is deprecated and will be removed in 2024.0"): - request.start_async(input_data, shared_memory=shared_flag) - request.wait() - - queue = AsyncInferQueue(compiled, jobs=1) - - with pytest.warns(FutureWarning, match="`shared_memory` is deprecated and will be removed in 2024.0"): - queue.start_async(input_data, shared_memory=shared_flag) - queue.wait_all() - - @pytest.mark.parametrize("share_inputs", [True, False]) @pytest.mark.parametrize("share_outputs", [True, False]) @pytest.mark.parametrize("is_positional", [True, False]) @@ -1242,3 +989,35 @@ def test_infer_request_share_memory(device, share_inputs, share_outputs, is_posi else: assert not out_tensor_shares assert results[0].flags["OWNDATA"] is True + + +def test_output_result_to_input(): + def create_model_1(): + param1 = ops.parameter(Shape([1, 1]), Type.i32) + param1.set_friendly_name("input_1") + add = ops.add(param1, ops.constant([1], Type.i32)) + add1 = ops.add(param1, ops.constant([[5]], Type.i32)) + model = Model([add, add1], [param1]) + model.output(0).tensor.set_names({"output_1_1"}) + model.output(1).tensor.set_names({"outputs_1_2"}) + return model + + def create_model_2(): + param1 = ops.parameter(Shape([1, 1]), Type.i32) + param1.set_friendly_name("output_1_1") + param2 = ops.parameter(Shape([1, 1]), Type.i32) + param2.set_friendly_name("outputs_1_2") + + add = ops.add(param1, param2) + model = Model([add], [param1, param2]) + model.output(0).tensor.set_names({"output_2_1"}) + return model + + model_1 = create_model_1() + model_2 = create_model_2() + compiled_1, compiled_2 = compile_model(model_1), compile_model(model_2) + input_data = np.array([[1]]) + result_1 = compiled_1(input_data, share_inputs=False) + with does_not_raise(): + result_2 = compiled_2(result_1, share_inputs=False) + assert np.array_equal(result_2[0], [[8]]) diff --git a/src/bindings/python/tests/test_utils/test_data_dispatch.py b/src/bindings/python/tests/test_utils/test_data_dispatch.py index 09b175f5f41d47..d78a4405177e4e 100644 --- a/src/bindings/python/tests/test_utils/test_data_dispatch.py +++ b/src/bindings/python/tests/test_utils/test_data_dispatch.py @@ -7,8 +7,8 @@ import numpy as np from tests.utils.helpers import generate_relu_compiled_model -from openvino import Model, Type, Shape, Core, Tensor -from openvino.runtime import ConstOutput + +from openvino import Type, Shape, Tensor from openvino.runtime.utils.data_helpers import _data_dispatch is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD" diff --git a/src/bindings/python/wheel/setup.py b/src/bindings/python/wheel/setup.py index 4b056912212de0..cdd813f60c1bcc 100644 --- a/src/bindings/python/wheel/setup.py +++ b/src/bindings/python/wheel/setup.py @@ -625,8 +625,8 @@ def concat_files(input_files, output_file): ext_modules = find_prebuilt_extensions(get_install_dirs_list(PY_INSTALL_CFG)) entry_points = find_entry_points(PY_INSTALL_CFG) -long_description_md = OPENVINO_SOURCE_DIR / "docs" / "install_guides" / "pypi-openvino-rt.md" -md_files = [long_description_md, OPENVINO_SOURCE_DIR / "docs" / "install_guides" / "pre-release-note.md"] +long_description_md = OPENVINO_SOURCE_DIR / "docs" / "dev" / "pypi_publish" / "pypi-openvino-rt.md" +md_files = [long_description_md, OPENVINO_SOURCE_DIR / "docs" / "dev" / "pypi_publish" / "pre-release-note.md"] docs_url = "https://docs.openvino.ai/2023.0/index.html" if os.getenv("CI_BUILD_DEV_TAG"): diff --git a/src/cmake/openvino.cmake b/src/cmake/openvino.cmake index ad73269d475748..80ff498d548182 100644 --- a/src/cmake/openvino.cmake +++ b/src/cmake/openvino.cmake @@ -21,7 +21,7 @@ endif() add_library(${TARGET_NAME} $ $ - $ + $ $ $ $ @@ -96,9 +96,9 @@ export(TARGETS ${TARGET_NAME} NAMESPACE openvino:: APPEND FILE "${CMAKE_BINARY_DIR}/OpenVINOTargets.cmake") install(TARGETS ${TARGET_NAME} EXPORT OpenVINOTargets - RUNTIME DESTINATION ${OV_CPACK_RUNTIMEDIR} COMPONENT ${OV_CPACK_COMP_CORE} - ARCHIVE DESTINATION ${OV_CPACK_ARCHIVEDIR} COMPONENT ${OV_CPACK_COMP_CORE} - LIBRARY DESTINATION ${OV_CPACK_LIBRARYDIR} COMPONENT ${OV_CPACK_COMP_CORE} + RUNTIME DESTINATION ${OV_CPACK_RUNTIMEDIR} COMPONENT ${OV_CPACK_COMP_CORE} ${OV_CPACK_COMP_CORE_EXCLUDE_ALL} + ARCHIVE DESTINATION ${OV_CPACK_ARCHIVEDIR} COMPONENT ${OV_CPACK_COMP_CORE} ${OV_CPACK_COMP_CORE_EXCLUDE_ALL} + LIBRARY DESTINATION ${OV_CPACK_LIBRARYDIR} COMPONENT ${OV_CPACK_COMP_CORE} ${OV_CPACK_COMP_CORE_EXCLUDE_ALL} NAMELINK_COMPONENT ${OV_CPACK_COMP_CORE_DEV} INCLUDES DESTINATION ${OV_CPACK_INCLUDEDIR} ${OV_CPACK_INCLUDEDIR}/ie) @@ -147,7 +147,8 @@ ov_cpack_add_component(${OV_CPACK_COMP_CORE_DEV} if(ENABLE_PLUGINS_XML) install(FILES $/plugins.xml DESTINATION ${OV_CPACK_PLUGINSDIR} - COMPONENT ${OV_CPACK_COMP_CORE}) + COMPONENT ${OV_CPACK_COMP_CORE} + ${OV_CPACK_COMP_CORE_EXCLUDE_ALL}) if(ENABLE_TESTS) # for InferenceEngineUnitTest @@ -164,7 +165,8 @@ install(EXPORT OpenVINOTargets FILE OpenVINOTargets.cmake NAMESPACE openvino:: DESTINATION ${OV_CPACK_OPENVINO_CMAKEDIR} - COMPONENT ${OV_CPACK_COMP_CORE_DEV}) + COMPONENT ${OV_CPACK_COMP_CORE_DEV} + ${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL}) # build tree @@ -227,12 +229,14 @@ configure_file("${OpenVINO_SOURCE_DIR}/cmake/templates/OpenVINOConfig-version.cm install(FILES "${CMAKE_BINARY_DIR}/share/InferenceEngineConfig.cmake" "${CMAKE_BINARY_DIR}/InferenceEngineConfig-version.cmake" DESTINATION ${OV_CPACK_IE_CMAKEDIR} - COMPONENT ${OV_CPACK_COMP_CORE_DEV}) + COMPONENT ${OV_CPACK_COMP_CORE_DEV} + ${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL}) install(FILES "${CMAKE_BINARY_DIR}/share/OpenVINOConfig.cmake" "${CMAKE_BINARY_DIR}/OpenVINOConfig-version.cmake" DESTINATION ${OV_CPACK_OPENVINO_CMAKEDIR} - COMPONENT ${OV_CPACK_COMP_CORE_DEV}) + COMPONENT ${OV_CPACK_COMP_CORE_DEV} + ${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL}) # # Generate and install openvino.pc pkg-config file @@ -313,5 +317,6 @@ if(ENABLE_PKGCONFIG_GEN) install(FILES "${pkgconfig_out}" DESTINATION "${OV_CPACK_RUNTIMEDIR}/pkgconfig" - COMPONENT ${OV_CPACK_COMP_CORE_DEV}) + COMPONENT ${OV_CPACK_COMP_CORE_DEV} + ${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL}) endif() diff --git a/src/common/low_precision_transformations/CMakeLists.txt b/src/common/low_precision_transformations/CMakeLists.txt index 54a5bf6da7f5d2..5ea74c9ab509a5 100644 --- a/src/common/low_precision_transformations/CMakeLists.txt +++ b/src/common/low_precision_transformations/CMakeLists.txt @@ -28,7 +28,6 @@ ov_build_target_faster(${TARGET_NAME}_obj UNITY) target_link_libraries(${TARGET_NAME}_obj PRIVATE openvino::itt) target_include_directories(${TARGET_NAME}_obj PRIVATE $ - $> $>) add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}_obj) @@ -46,7 +45,6 @@ target_link_libraries(${TARGET_NAME} INTERFACE openvino::runtime) target_include_directories(${TARGET_NAME} INTERFACE $ - $> $>) # LTO diff --git a/src/common/low_precision_transformations/include/low_precision/batch_to_space.hpp b/src/common/low_precision_transformations/include/low_precision/batch_to_space.hpp index 2d6d2e3286ef25..99ed46e500ada9 100644 --- a/src/common/low_precision_transformations/include/low_precision/batch_to_space.hpp +++ b/src/common/low_precision_transformations/include/low_precision/batch_to_space.hpp @@ -5,7 +5,7 @@ #pragma once #include -#include +#include "openvino/pass/pattern/matcher.hpp" #include "low_precision/layer_transformation.hpp" namespace ov { @@ -25,7 +25,7 @@ class LP_TRANSFORMATIONS_API BatchToSpaceTransformation : public LayerTransforma OPENVINO_RTTI("BatchToSpaceTransformation", "0"); BatchToSpaceTransformation(const Params& params = Params()); bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; - bool transform(TransformationContext& context, ngraph::pattern::Matcher &m) override; + bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/lpt_visibility.hpp b/src/common/low_precision_transformations/include/low_precision/lpt_visibility.hpp index 016f34b0e3b50a..aedc2ce7616e19 100644 --- a/src/common/low_precision_transformations/include/low_precision/lpt_visibility.hpp +++ b/src/common/low_precision_transformations/include/low_precision/lpt_visibility.hpp @@ -4,7 +4,7 @@ #pragma once -#include "ngraph/visibility.hpp" +#include "openvino/core/visibility.hpp" /** * @file lpt_visibility.hpp diff --git a/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp b/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp index c3db52ce5d9e9d..8c369acdb3d00d 100644 --- a/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp +++ b/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp @@ -4,7 +4,7 @@ #pragma once -#include +#include "openvino/pass/pattern/matcher.hpp" #include "low_precision/eltwise_base_transformation.hpp" namespace ov { @@ -23,7 +23,7 @@ class LP_TRANSFORMATIONS_API MultiplyPartialTransformation : public EltwiseBaseT public: OPENVINO_RTTI("MultiplyPartialTransformation", "0"); MultiplyPartialTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ngraph::pattern::Matcher &m) override; + bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/network_helper.hpp b/src/common/low_precision_transformations/include/low_precision/network_helper.hpp index 83e486af697ff7..5ca510079124c1 100644 --- a/src/common/low_precision_transformations/include/low_precision/network_helper.hpp +++ b/src/common/low_precision_transformations/include/low_precision/network_helper.hpp @@ -126,8 +126,8 @@ class LP_TRANSFORMATIONS_API NetworkHelper { std::shared_ptr input = nullptr); static std::shared_ptr makeDequantizationSubtract( - const ngraph::Output& parent, - const ngraph::Output& subtract_constant); + const ov::Output& parent, + const ov::Output& subtract_constant); static bool areQuantizeAndDequantizeSupportedForSubtract(const std::shared_ptr& node, const std::vector& defaultPrecisions = precision_set::get_int8_support()); diff --git a/src/common/low_precision_transformations/include/low_precision/space_to_batch.hpp b/src/common/low_precision_transformations/include/low_precision/space_to_batch.hpp index bacd7f1d7c5d59..86ad225177ee58 100644 --- a/src/common/low_precision_transformations/include/low_precision/space_to_batch.hpp +++ b/src/common/low_precision_transformations/include/low_precision/space_to_batch.hpp @@ -5,7 +5,7 @@ #pragma once #include -#include +#include "openvino/pass/pattern/matcher.hpp" #include "low_precision/layer_transformation.hpp" namespace ov { @@ -25,7 +25,7 @@ class LP_TRANSFORMATIONS_API SpaceToBatchTransformation : public LayerTransforma OPENVINO_RTTI("SpaceToBatchTransformation", "0"); SpaceToBatchTransformation(const Params& params = Params()); bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; - bool transform(TransformationContext& context, ngraph::pattern::Matcher &m) override; + bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/src/multiply_to_group_convolution.cpp b/src/common/low_precision_transformations/src/multiply_to_group_convolution.cpp index a8999aeff8eec6..b679e0882f43e6 100644 --- a/src/common/low_precision_transformations/src/multiply_to_group_convolution.cpp +++ b/src/common/low_precision_transformations/src/multiply_to_group_convolution.cpp @@ -111,9 +111,9 @@ bool MultiplyToGroupConvolutionTransformation::transform(TransformationContext& const auto weightsNode = std::make_shared(weightsPrecision, weightsShape, weightsBuffer); const size_t spatialDimsSize = pShape.rank().get_length() - 2; - ngraph::Strides strides(spatialDimsSize, 1ul); - ngraph::CoordinateDiff pads(spatialDimsSize, 0ul); - ngraph::Strides dilations(spatialDimsSize, 1ul); + ov::Strides strides(spatialDimsSize, 1ul); + ov::CoordinateDiff pads(spatialDimsSize, 0ul); + ov::Strides dilations(spatialDimsSize, 1ul); const auto convolution = std::make_shared>( std::vector{ element::f32, element::f32 }, diff --git a/src/common/low_precision_transformations/tests/batch_to_space_transformation.cpp b/src/common/low_precision_transformations/tests/batch_to_space_transformation.cpp index 7015ede358c775..26e97fb4381e7b 100644 --- a/src/common/low_precision_transformations/tests/batch_to_space_transformation.cpp +++ b/src/common/low_precision_transformations/tests/batch_to_space_transformation.cpp @@ -11,7 +11,6 @@ #include #include -#include #include #include "common_test_utils/ov_test_utils.hpp" @@ -51,14 +50,14 @@ class BatchToSpaceTransformationTestValues { }; typedef std::tuple< - ngraph::PartialShape, + ov::PartialShape, BatchToSpaceTransformationTestValues> BatchToSpaceTransformationParams; class BatchToSpaceTransformation : public LayerTransformation, public testing::WithParamInterface { public: void SetUp() override { - const ngraph::PartialShape input_shape = std::get<0>(GetParam()); + const ov::PartialShape input_shape = std::get<0>(GetParam()); const BatchToSpaceTransformationTestValues test_values = std::get<1>(GetParam()); actualFunction = ngraph::builder::subgraph::BatchToSpaceFunction::get( @@ -85,7 +84,7 @@ class BatchToSpaceTransformation : public LayerTransformation, } static std::string getTestCaseName(testing::TestParamInfo obj) { - const ngraph::PartialShape shape = std::get<0>(obj.param); + const ov::PartialShape shape = std::get<0>(obj.param); const BatchToSpaceTransformationTestValues testValues = std::get<1>(obj.param); std::ostringstream result; @@ -109,7 +108,7 @@ TEST_P(BatchToSpaceTransformation, CompareFunctions) { } namespace testValues { -const std::vector input_shapes = { +const std::vector input_shapes = { {4, 3, 50, 86} }; diff --git a/src/common/low_precision_transformations/tests/convert_subtract_constant_transformation.cpp b/src/common/low_precision_transformations/tests/convert_subtract_constant_transformation.cpp index 46d9dbaff521dc..865fb8172c1baf 100644 --- a/src/common/low_precision_transformations/tests/convert_subtract_constant_transformation.cpp +++ b/src/common/low_precision_transformations/tests/convert_subtract_constant_transformation.cpp @@ -29,7 +29,7 @@ class ConvertSubtractConstantTransformationTestValues { ngraph::builder::subgraph::DequantizationOperations dequantizationOnActivations; ngraph::builder::subgraph::DequantizationOperations dequantizationOnWeights; ngraph::builder::subgraph::Constant weights; - ngraph:: builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; + ngraph::builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; ov::element::Type precisionAfterOperation; ngraph::builder::subgraph::DequantizationOperations dequantizationAfter; }; diff --git a/src/common/low_precision_transformations/tests/convolution_backprop_data_transformation.cpp b/src/common/low_precision_transformations/tests/convolution_backprop_data_transformation.cpp index c2cc9f8d52feef..d34cf021d92988 100644 --- a/src/common/low_precision_transformations/tests/convolution_backprop_data_transformation.cpp +++ b/src/common/low_precision_transformations/tests/convolution_backprop_data_transformation.cpp @@ -38,8 +38,8 @@ class ConvolutionBackpropDataTransformationTestValues { public: ov::element::Type precisionBeforeDequantization; ngraph::builder::subgraph::DequantizationOperations dequantizationOnActivations; - ngraph:: builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; - ngraph:: builder::subgraph::DequantizationOperations dequantizationOnWeights; + ngraph::builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; + ngraph::builder::subgraph::DequantizationOperations dequantizationOnWeights; std::shared_ptr weights; callback_function_type callback; @@ -85,7 +85,7 @@ class ConvolutionBackpropDataTransformationTestValues { public: ov::element::Type precisionBeforeDequantization; ngraph::builder::subgraph::DequantizationOperations dequantizationOnActivations; - ngraph:: builder::subgraph::DequantizationOperations dequantizationOnWeights; + ngraph::builder::subgraph::DequantizationOperations dequantizationOnWeights; ngraph::builder::subgraph::DequantizationOperations dequantizationAfter; std::shared_ptr weights; bool transformed; diff --git a/src/common/low_precision_transformations/tests/convolution_qdq_transformation.cpp b/src/common/low_precision_transformations/tests/convolution_qdq_transformation.cpp index 7abcc64a15f40d..a5c0970af93ffd 100644 --- a/src/common/low_precision_transformations/tests/convolution_qdq_transformation.cpp +++ b/src/common/low_precision_transformations/tests/convolution_qdq_transformation.cpp @@ -30,7 +30,7 @@ class ConvolutionQDqTransformationTestValues { ngraph::builder::subgraph::DequantizationOperations dequantizationOnActivations; ngraph::builder::subgraph::DequantizationOperations dequantizationOnWeights; ngraph::builder::subgraph::Constant weights; - ngraph:: builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; + ngraph::builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; ov::element::Type precisionAfterOperation; ngraph::builder::subgraph::DequantizationOperations dequantizationAfter; }; diff --git a/src/common/low_precision_transformations/tests/convolution_transformation.cpp b/src/common/low_precision_transformations/tests/convolution_transformation.cpp index 87ac3856aa699a..097c277a493156 100644 --- a/src/common/low_precision_transformations/tests/convolution_transformation.cpp +++ b/src/common/low_precision_transformations/tests/convolution_transformation.cpp @@ -29,7 +29,7 @@ class ConvolutionTransformationTestValues { ov::element::Type precisionBeforeDequantization; ngraph::builder::subgraph::DequantizationOperations dequantizationOnActivations; std::shared_ptr weights; - ngraph:: builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; + ngraph::builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; }; class Expected { @@ -37,7 +37,7 @@ class ConvolutionTransformationTestValues { ov::element::Type precisionBeforeDequantization; ngraph::builder::subgraph::DequantizationOperations dequantizationBefore; std::shared_ptr weights; - ngraph:: builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; + ngraph::builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; ov::element::Type precisionAfterOperation; ngraph::builder::subgraph::DequantizationOperations dequantizationAfter; ov::element::Type precisionAfterDequantization; diff --git a/src/common/low_precision_transformations/tests/fake_quantize_on_weights_with_unsupported_child.cpp b/src/common/low_precision_transformations/tests/fake_quantize_on_weights_with_unsupported_child.cpp index 81e84aaf4132b2..96477c9d205b6d 100644 --- a/src/common/low_precision_transformations/tests/fake_quantize_on_weights_with_unsupported_child.cpp +++ b/src/common/low_precision_transformations/tests/fake_quantize_on_weights_with_unsupported_child.cpp @@ -28,13 +28,13 @@ class FakeQuantizeOnWeightsWithUnsupportedChildTestValues { class Actual { public: std::shared_ptr weights; - ngraph:: builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; + ngraph::builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; }; class Expected { public: std::shared_ptr weights; - ngraph:: builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; + ngraph::builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; }; TestTransformationParams params; diff --git a/src/common/low_precision_transformations/tests/fake_quantize_precision_selection_transformation.cpp b/src/common/low_precision_transformations/tests/fake_quantize_precision_selection_transformation.cpp index 1d9a25cc5d87a1..27a76bec641c74 100644 --- a/src/common/low_precision_transformations/tests/fake_quantize_precision_selection_transformation.cpp +++ b/src/common/low_precision_transformations/tests/fake_quantize_precision_selection_transformation.cpp @@ -26,15 +26,15 @@ using namespace ov::pass; namespace { class ActualValues { public: - ngraph:: builder::subgraph::FakeQuantizeOnData fakeQuantizeOnData; - ngraph:: builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; + ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantizeOnData; + ngraph::builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; }; class ExpectedValues { public: element::Type fakeQuantizeOnDataOutPrecision; - ngraph:: builder::subgraph::FakeQuantizeOnData fakeQuantizeOnData; - ngraph:: builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; + ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantizeOnData; + ngraph::builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; }; class FakeQuantizePrecisionSelectionTransformationTestValues { diff --git a/src/common/low_precision_transformations/tests/fake_quantize_transformation.cpp b/src/common/low_precision_transformations/tests/fake_quantize_transformation.cpp index 80363c85e234b8..27911421ca6d6a 100644 --- a/src/common/low_precision_transformations/tests/fake_quantize_transformation.cpp +++ b/src/common/low_precision_transformations/tests/fake_quantize_transformation.cpp @@ -42,8 +42,8 @@ class FakeQuantizeTransformationTestValues { addNotPrecisionPreservedOperation(addNotPrecisionPreservedOperation) {} TestTransformationParams params; - ngraph:: builder::subgraph::FakeQuantizeOnDataWithConstant actual; - ngraph:: builder::subgraph::FakeQuantizeOnDataWithConstant expected; + ngraph::builder::subgraph::FakeQuantizeOnDataWithConstant actual; + ngraph::builder::subgraph::FakeQuantizeOnDataWithConstant expected; ov::element::Type expectedFakeQuantizeOnDataPrecision; std::map expectedValues; // add not precision preserved operation to set output precision for FakeQuantize diff --git a/src/common/low_precision_transformations/tests/fake_quantize_with_dq_not_optimal_transformation.cpp b/src/common/low_precision_transformations/tests/fake_quantize_with_dq_not_optimal_transformation.cpp index edbadea9c6bfc4..10ec95eb5e89c9 100644 --- a/src/common/low_precision_transformations/tests/fake_quantize_with_dq_not_optimal_transformation.cpp +++ b/src/common/low_precision_transformations/tests/fake_quantize_with_dq_not_optimal_transformation.cpp @@ -31,13 +31,13 @@ class FakeQuantizeWithNotOptimalTransformationTestValues { public: class Values { public: - ngraph:: builder::subgraph::FakeQuantizeOnDataWithConstant fqOnData; - ngraph:: builder::subgraph::DequantizationOperations::Convert convertOnData; - ngraph:: builder::subgraph::DequantizationOperations dequantizationOnData; - ngraph:: builder::subgraph::Constant constantOnWeights; - ngraph:: builder::subgraph::FakeQuantizeOnWeights fqOnWeights; - ngraph:: builder::subgraph::DequantizationOperations dequantizationOnWeights; - ngraph:: builder::subgraph::DequantizationOperations dequantizationAfter; + ngraph::builder::subgraph::FakeQuantizeOnDataWithConstant fqOnData; + ngraph::builder::subgraph::DequantizationOperations::Convert convertOnData; + ngraph::builder::subgraph::DequantizationOperations dequantizationOnData; + ngraph::builder::subgraph::Constant constantOnWeights; + ngraph::builder::subgraph::FakeQuantizeOnWeights fqOnWeights; + ngraph::builder::subgraph::DequantizationOperations dequantizationOnWeights; + ngraph::builder::subgraph::DequantizationOperations dequantizationAfter; }; TestTransformationParams params; Values actual; diff --git a/src/common/low_precision_transformations/tests/fake_quantize_with_dynamic_intervals_transformation.cpp b/src/common/low_precision_transformations/tests/fake_quantize_with_dynamic_intervals_transformation.cpp index 5756d0471edac4..b890e07a7bf1b5 100644 --- a/src/common/low_precision_transformations/tests/fake_quantize_with_dynamic_intervals_transformation.cpp +++ b/src/common/low_precision_transformations/tests/fake_quantize_with_dynamic_intervals_transformation.cpp @@ -117,9 +117,9 @@ class FakeQuantizeWithDynamicIntervalsTransformation : public LayerTransformatio auto fakeQuantize = std::make_shared(input, inputLow, inputHigh, outputLow, outputHigh, levels); fakeQuantize->set_friendly_name("fakeQuantize"); - ngraph::ResultVector results{ std::make_shared(fakeQuantize) }; + ov::ResultVector results{ std::make_shared(fakeQuantize) }; - ngraph::ParameterVector inputs{ input }; + ov::ParameterVector inputs{ input }; if (as_type_ptr(inputLow)) { inputs.push_back(as_type_ptr(inputLow)); } diff --git a/src/common/low_precision_transformations/tests/fold_convert_transformation.cpp b/src/common/low_precision_transformations/tests/fold_convert_transformation.cpp index 0b01123792d20c..a38afe5cda7434 100644 --- a/src/common/low_precision_transformations/tests/fold_convert_transformation.cpp +++ b/src/common/low_precision_transformations/tests/fold_convert_transformation.cpp @@ -54,8 +54,8 @@ class FoldConvertTransformation : public LayerTransformation, public testing::Wi output->set_friendly_name("output"); return std::make_shared( - ngraph::ResultVector{ std::make_shared(output) }, - ngraph::ParameterVector{ input }, + ov::ResultVector{ std::make_shared(output) }, + ov::ParameterVector{ input }, "FoldConvertTransformation"); }; actualFunction = createFunction(testValues.precision, inputShape, testValues.dequantizationActual); diff --git a/src/common/low_precision_transformations/tests/fold_fake_quantize_in_transformations.cpp b/src/common/low_precision_transformations/tests/fold_fake_quantize_in_transformations.cpp index 91d101559c6b36..42975c42797476 100644 --- a/src/common/low_precision_transformations/tests/fold_fake_quantize_in_transformations.cpp +++ b/src/common/low_precision_transformations/tests/fold_fake_quantize_in_transformations.cpp @@ -28,7 +28,7 @@ class FoldFakeQuantizeInTransformationsTestValues { public: std::vector constValues; ov::element::Type constPrecision; - ngraph:: builder::subgraph::FakeQuantizeOnData fakeQuantize; + ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize; ov::element::Type fqOutPrecision; }; @@ -90,10 +90,10 @@ class FoldFakeQuantizeInTransformations testValues.actual.fqOutPrecision); fq = ov::pass::low_precision::NetworkHelper::fold_fake_quantize(as_type_ptr(fq), testValues.roundValues); - ngraph::ResultVector results{std::make_shared(fq)}; + ov::ResultVector results{std::make_shared(fq)}; actualFunction = std::make_shared( results, - parameter ? ngraph::ParameterVector{parameter} : ngraph::ParameterVector{}, + parameter ? ov::ParameterVector{parameter} : ov::ParameterVector{}, "FoldFakeQuantizeFunction"); referenceFunction = @@ -115,11 +115,20 @@ class FoldFakeQuantizeInTransformations } }; +#ifdef OPENVINO_ARCH_ARM64 +// Ticket: 122660 +TEST_P(FoldFakeQuantizeInTransformations, DISABLED_CompareFunctions) { + actualFunction->validate_nodes_and_infer_types(); + auto res = compare_functions(actualFunction, referenceFunction, true, false); + ASSERT_TRUE(res.first) << res.second; +} +#else TEST_P(FoldFakeQuantizeInTransformations, CompareFunctions) { actualFunction->validate_nodes_and_infer_types(); auto res = compare_functions(actualFunction, referenceFunction, true, false); ASSERT_TRUE(res.first) << res.second; } +#endif const std::vector testValues = { { diff --git a/src/common/low_precision_transformations/tests/get_dequantization_below_transformation.cpp b/src/common/low_precision_transformations/tests/get_dequantization_below_transformation.cpp index 253543d4ba1e4f..d016567af28292 100644 --- a/src/common/low_precision_transformations/tests/get_dequantization_below_transformation.cpp +++ b/src/common/low_precision_transformations/tests/get_dequantization_below_transformation.cpp @@ -24,8 +24,8 @@ using namespace ov::pass; class GetDequantizationBelowTestValues { public: - ngraph:: builder::subgraph::FakeQuantizeOnData fakeQuantize; - ngraph:: builder::subgraph::DequantizationOperations dequantization; + ngraph::builder::subgraph::FakeQuantizeOnData fakeQuantize; + ngraph::builder::subgraph::DequantizationOperations dequantization; }; inline std::ostream& operator<<(std::ostream& os, const std::vector& values) { diff --git a/src/common/low_precision_transformations/tests/group_convolution_transformation.cpp b/src/common/low_precision_transformations/tests/group_convolution_transformation.cpp index ee977f566c8bb5..b98f03997a74b8 100644 --- a/src/common/low_precision_transformations/tests/group_convolution_transformation.cpp +++ b/src/common/low_precision_transformations/tests/group_convolution_transformation.cpp @@ -29,7 +29,7 @@ class GroupConvolutionTestValues { ov::element::Type precisionBeforeDequantization; ngraph::builder::subgraph::DequantizationOperations dequantization; std::shared_ptr weights; - ngraph:: builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; + ngraph::builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; ngraph::builder::subgraph::DequantizationOperations dequantizationOnWeights; }; @@ -38,7 +38,7 @@ class GroupConvolutionTestValues { ov::element::Type precisionBeforeDequantization; ngraph::builder::subgraph::DequantizationOperations dequantizationBefore; std::shared_ptr weights; - ngraph:: builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; + ngraph::builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; ngraph::builder::subgraph::DequantizationOperations dequantizationOnWeights; ov::element::Type precisionAfterOperation; ngraph::builder::subgraph::DequantizationOperations dequantizationAfter; diff --git a/src/common/low_precision_transformations/tests/interpolate_transformation.cpp b/src/common/low_precision_transformations/tests/interpolate_transformation.cpp index 32e6ff474f59d7..17eb8fc22374dd 100644 --- a/src/common/low_precision_transformations/tests/interpolate_transformation.cpp +++ b/src/common/low_precision_transformations/tests/interpolate_transformation.cpp @@ -27,7 +27,7 @@ using namespace ngraph::builder::subgraph; class interpAttributes { public: - ngraph::AxisSet axes; + ov::AxisSet axes; std::string mode; bool align_corners; bool antialias; @@ -36,7 +36,7 @@ class interpAttributes { interpAttributes() = default; - interpAttributes(const ngraph::AxisSet& axes, + interpAttributes(const ov::AxisSet& axes, const std::string& mode, const bool& align_corners, const bool& antialias, @@ -206,7 +206,7 @@ const std::vector testValues { ov::Shape{}, LayerTransformation::createParamsU8I8(), interpAttributes( - ngraph::AxisSet{2, 3}, + ov::AxisSet{2, 3}, "nearest", false, false, @@ -233,7 +233,7 @@ const std::vector testValues { ov::Shape{}, LayerTransformation::createParamsU8I8(), interpAttributes( - ngraph::AxisSet{2, 3}, + ov::AxisSet{2, 3}, "nearest", false, false, @@ -260,7 +260,7 @@ const std::vector testValues { ov::Shape{}, LayerTransformation::createParamsU8I8(), interpAttributes( - ngraph::AxisSet{2, 3}, + ov::AxisSet{2, 3}, "nearest", false, false, @@ -287,7 +287,7 @@ const std::vector testValues { ov::Shape{}, LayerTransformation::createParamsU8I8(), interpAttributes( - ngraph::AxisSet{2, 3}, + ov::AxisSet{2, 3}, "nearest", false, false, @@ -314,7 +314,7 @@ const std::vector testValues { ov::Shape{}, LayerTransformation::createParamsU8I8(), interpAttributes( - ngraph::AxisSet{2, 3}, + ov::AxisSet{2, 3}, "linear", false, false, @@ -341,7 +341,7 @@ const std::vector testValues { ov::Shape{}, LayerTransformation::createParamsU8I8(), interpAttributes( - ngraph::AxisSet{1, 2, 3}, + ov::AxisSet{1, 2, 3}, "nearest", false, false, @@ -368,7 +368,7 @@ const std::vector testValues { ov::Shape{}, LayerTransformation::createParamsU8I8(), interpAttributes( - ngraph::AxisSet{2, 3}, + ov::AxisSet{2, 3}, "nearest", true, false, @@ -395,7 +395,7 @@ const std::vector testValues { ov::Shape{}, LayerTransformation::createParamsU8I8(), interpAttributes( - ngraph::AxisSet{2, 3}, + ov::AxisSet{2, 3}, "nearest", false, false, diff --git a/src/common/low_precision_transformations/tests/is_asymmetric_on_weights_dequantization.cpp b/src/common/low_precision_transformations/tests/is_asymmetric_on_weights_dequantization.cpp index 7aa5cc54009465..17ca061df0c952 100644 --- a/src/common/low_precision_transformations/tests/is_asymmetric_on_weights_dequantization.cpp +++ b/src/common/low_precision_transformations/tests/is_asymmetric_on_weights_dequantization.cpp @@ -24,7 +24,7 @@ class IsAsymmetricOnWeightsDequantizationTestValues { ov::element::Type precisionBeforeDequantization; ngraph::builder::subgraph::DequantizationOperations dequantizationOnActivations; std::shared_ptr weights; - ngraph:: builder::subgraph::DequantizationOperations dequantizationOnWeights; + ngraph::builder::subgraph::DequantizationOperations dequantizationOnWeights; bool isAsymmetricOnWeights; }; diff --git a/src/common/low_precision_transformations/tests/is_asymmetric_on_weights_fq.cpp b/src/common/low_precision_transformations/tests/is_asymmetric_on_weights_fq.cpp index 7e5022375de4de..e229209a507054 100644 --- a/src/common/low_precision_transformations/tests/is_asymmetric_on_weights_fq.cpp +++ b/src/common/low_precision_transformations/tests/is_asymmetric_on_weights_fq.cpp @@ -25,7 +25,7 @@ class IsAsymmetricOnWeightsFakeQuantizeTestValues { ov::element::Type precisionBeforeDequantization; ngraph::builder::subgraph::DequantizationOperations dequantizationOnActivations; std::shared_ptr weights; - ngraph:: builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; + ngraph::builder::subgraph::FakeQuantizeOnWeights fakeQuantizeOnWeights; }; typedef std::tuple< diff --git a/src/common/low_precision_transformations/tests/is_constant_path_transformation.cpp b/src/common/low_precision_transformations/tests/is_constant_path_transformation.cpp index 7c89c647bcdcf8..36f44b00f60432 100644 --- a/src/common/low_precision_transformations/tests/is_constant_path_transformation.cpp +++ b/src/common/low_precision_transformations/tests/is_constant_path_transformation.cpp @@ -112,10 +112,10 @@ TEST(LPT, isConstantPathConvParentDqTransformation) { const auto conv = std::make_shared( input, weights, - ngraph::Strides{ 1, 1 }, - ngraph::CoordinateDiff{ 0, 0 }, - ngraph::CoordinateDiff{ 0, 0 }, - ngraph::Strides{ 1, 1 }); + ov::Strides{ 1, 1 }, + ov::CoordinateDiff{ 0, 0 }, + ov::CoordinateDiff{ 0, 0 }, + ov::Strides{ 1, 1 }); const auto dqAfterConv = makeDequantization(conv, DequantizationOperations{ {}, {}, {0.1f} }); const bool result = ov::pass::low_precision::NetworkHelper::isConstantPath(dqAfterConv); @@ -129,10 +129,10 @@ TEST(LPT, isConstantPathGroupConvParentDqTransformation) { const auto groupConv = std::make_shared( input, weights, - ngraph::Strides{ 1, 1 }, - ngraph::CoordinateDiff{ 0, 0 }, - ngraph::CoordinateDiff{ 0, 0 }, - ngraph::Strides{ 1, 1 }); + ov::Strides{ 1, 1 }, + ov::CoordinateDiff{ 0, 0 }, + ov::CoordinateDiff{ 0, 0 }, + ov::Strides{ 1, 1 }); const auto dqAfterGroupConv = makeDequantization(groupConv, DequantizationOperations{ {}, {}, {0.1f} }); const bool result = ov::pass::low_precision::NetworkHelper::isConstantPath(dqAfterGroupConv); diff --git a/src/common/low_precision_transformations/tests/is_function_quantized_transformation.cpp b/src/common/low_precision_transformations/tests/is_function_quantized_transformation.cpp index 9d07dc94bed16a..c6de0adc2c5d5a 100644 --- a/src/common/low_precision_transformations/tests/is_function_quantized_transformation.cpp +++ b/src/common/low_precision_transformations/tests/is_function_quantized_transformation.cpp @@ -21,7 +21,7 @@ class IsFunctionQuantizedTransformationValues { public: ov::Shape shape; ov::element::Type precision; - ngraph:: builder::subgraph::FakeQuantizeOnDataWithConstant fakeQuantize; + ngraph::builder::subgraph::FakeQuantizeOnDataWithConstant fakeQuantize; bool constantSubgraphOnParameters; bool inputOnParameters; @@ -44,8 +44,8 @@ class IsFunctionQuantizedTransformation : public LayerTransformation, public tes replace_node(fakeQuantize->get_input_node_shared_ptr(3), input); } - ngraph::ResultVector results{ std::make_shared(fakeQuantize) }; - model = std::make_shared(results, ngraph::ParameterVector{ input }, "IsFunctionQuantizedFunction"); + ov::ResultVector results{ std::make_shared(fakeQuantize) }; + model = std::make_shared(results, ov::ParameterVector{ input }, "IsFunctionQuantizedFunction"); model->validate_nodes_and_infer_types(); } diff --git a/src/common/low_precision_transformations/tests/lpt_avoid_shapeof_propagation_test.cpp b/src/common/low_precision_transformations/tests/lpt_avoid_shapeof_propagation_test.cpp index 35a80285def102..e059d886c10839 100644 --- a/src/common/low_precision_transformations/tests/lpt_avoid_shapeof_propagation_test.cpp +++ b/src/common/low_precision_transformations/tests/lpt_avoid_shapeof_propagation_test.cpp @@ -150,10 +150,10 @@ TEST(LPT, AvoidDequantizationToShapeOfPropagationConvolutionTransformation) { auto convolution = std::make_shared(mul, mulOnWeights, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}); + ov::Strides{1, 1}, + ov::CoordinateDiff{0, 0}, + ov::CoordinateDiff{0, 0}, + ov::Strides{1, 1}); auto shapeOf = std::make_shared(convolution); @@ -180,10 +180,10 @@ TEST(LPT, AvoidDequantizationToShapeOfPropagationConvolutionBackpropDataTransfor auto convolutionBackpropData = std::make_shared(mul, mulOnWeights, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}); + ov::Strides{1, 1}, + ov::CoordinateDiff{0, 0}, + ov::CoordinateDiff{0, 0}, + ov::Strides{1, 1}); auto shapeOf = std::make_shared(convolutionBackpropData); @@ -255,10 +255,10 @@ TEST(LPT, AvoidDequantizationToShapeOfPropagationGroupConvolutionTransformation) auto groupConvolution = std::make_shared(mul, reshapeOnWeights, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}); + ov::Strides{1, 1}, + ov::CoordinateDiff{0, 0}, + ov::CoordinateDiff{0, 0}, + ov::Strides{1, 1}); auto shapeOf = std::make_shared(groupConvolution); auto result1 = std::make_shared(groupConvolution); @@ -646,9 +646,9 @@ TEST(LPT, AvoidDequantizationToShapeOfPropagationStridedSliceTransformation) { auto convert = std::make_shared(input, element::f32); auto mul = std::make_shared(convert, ov::op::v0::Constant::create(element::f32, {}, {2.f})); - auto beginParam = ngraph::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 0, 0, 0}); - auto endParam = ngraph::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {1, 2, 1, 1}); - auto stridesParam = ngraph::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {1, 1, 1, 1}); + auto beginParam = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 0, 0, 0}); + auto endParam = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {1, 2, 1, 1}); + auto stridesParam = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {1, 1, 1, 1}); auto stridedSlice = std::make_shared(mul, beginParam, endParam, @@ -674,7 +674,7 @@ TEST(LPT, AvoidDequantizationToShapeOfPropagationTransposeTransformation) { auto convert = std::make_shared(input, element::f32); auto mul = std::make_shared(convert, ov::op::v0::Constant::create(element::f32, {}, {2.f})); - auto constant = ngraph::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 1, 3, 2}); + auto constant = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 1, 3, 2}); auto transpose = std::make_shared(mul, constant); auto shapeOf = std::make_shared(transpose); diff --git a/src/common/low_precision_transformations/tests/multiply_transformation.cpp b/src/common/low_precision_transformations/tests/multiply_transformation.cpp index 5fe7dec67e146f..a9aa1ecf0390aa 100644 --- a/src/common/low_precision_transformations/tests/multiply_transformation.cpp +++ b/src/common/low_precision_transformations/tests/multiply_transformation.cpp @@ -28,7 +28,7 @@ using namespace ngraph::builder::subgraph; class MultiplyBranch { public: ngraph::builder::subgraph::Constant constant; - ngraph::element::Type input_precision; + ov::element::Type input_precision; ngraph::builder::subgraph::DequantizationOperations dequantization; ngraph::builder::subgraph::FakeQuantizeOnData fake_quantize; }; diff --git a/src/common/low_precision_transformations/tests/mvn_transformation.cpp b/src/common/low_precision_transformations/tests/mvn_transformation.cpp index 1dbafbb445d10c..004324dd198ffb 100644 --- a/src/common/low_precision_transformations/tests/mvn_transformation.cpp +++ b/src/common/low_precision_transformations/tests/mvn_transformation.cpp @@ -41,7 +41,7 @@ class MVNTransformationTestValues { ngraph::builder::subgraph::DequantizationOperations dequantizationAfter; }; - ngraph::AxisSet reductionAxes; + ov::AxisSet reductionAxes; bool normalizeVariance; TestTransformationParams params; Actual actual; diff --git a/src/common/low_precision_transformations/tests/normalize_l2_transformation.cpp b/src/common/low_precision_transformations/tests/normalize_l2_transformation.cpp index 78a2fb21e6a111..a9aea8fbdad3ae 100644 --- a/src/common/low_precision_transformations/tests/normalize_l2_transformation.cpp +++ b/src/common/low_precision_transformations/tests/normalize_l2_transformation.cpp @@ -47,7 +47,7 @@ class NormalizeL2TransformationTestValues { typedef std::tuple< ov::element::Type, ov::PartialShape, - ngraph::op::EpsMode, + ov::op::EpsMode, std::vector, NormalizeL2TransformationTestValues> NormalizeL2TransformationParams; @@ -56,7 +56,7 @@ class NormalizeL2Transformation : public LayerTransformation, public testing::Wi void SetUp() override { ov::element::Type precision; ov::PartialShape shape; - ngraph::op::EpsMode epsMode; + ov::op::EpsMode epsMode; std::vector axes; NormalizeL2TransformationTestValues params; std::tie(precision, shape, epsMode, axes, params) = GetParam(); @@ -88,7 +88,7 @@ class NormalizeL2Transformation : public LayerTransformation, public testing::Wi ov::element::Type precision; ov::PartialShape shape; ov::Shape axes; - ngraph::op::EpsMode epsMode; + ov::op::EpsMode epsMode; NormalizeL2TransformationTestValues params; std::tie(precision, shape, epsMode, axes, params) = obj.param; @@ -115,9 +115,9 @@ const std::vector precisions = { ov::element::f16 }; -std::vector epsMode = { - ngraph::op::EpsMode::ADD, - ngraph::op::EpsMode::MAX +std::vector epsMode = { + ov::op::EpsMode::ADD, + ov::op::EpsMode::MAX }; std::vector> axes = { diff --git a/src/common/low_precision_transformations/tests/separate_in_standalone_branch_transformation.cpp b/src/common/low_precision_transformations/tests/separate_in_standalone_branch_transformation.cpp index a530f76ecef052..54ad8be926ab25 100644 --- a/src/common/low_precision_transformations/tests/separate_in_standalone_branch_transformation.cpp +++ b/src/common/low_precision_transformations/tests/separate_in_standalone_branch_transformation.cpp @@ -72,11 +72,11 @@ class SeparateInStandaloneBranchTransformation : reshape2->set_friendly_name("reshape2"); return std::make_shared( - ngraph::ResultVector{ + ov::ResultVector{ std::make_shared(reshape1), std::make_shared(reshape2) }, - std::vector> { input }, + std::vector> { input }, "SeparateInStandaloneBranchTransformation"); }; actualFunction = createActualFunction(testValues.precisionBefore, shape, testValues.dequantization); @@ -103,11 +103,11 @@ class SeparateInStandaloneBranchTransformation : reshape2->set_friendly_name("reshape2"); return std::make_shared( - ngraph::ResultVector{ + ov::ResultVector{ std::make_shared(reshape1), std::make_shared(reshape2) }, - std::vector> { input }, + std::vector> { input }, "SeparateInStandaloneBranchTransformation"); }; referenceFunction = createReferenceFunction(testValues.precisionBefore, shape, testValues.dequantization); diff --git a/src/common/low_precision_transformations/tests/space_to_batch_transformation.cpp b/src/common/low_precision_transformations/tests/space_to_batch_transformation.cpp index ed81d3b46dc9e3..cce257e05d6a15 100644 --- a/src/common/low_precision_transformations/tests/space_to_batch_transformation.cpp +++ b/src/common/low_precision_transformations/tests/space_to_batch_transformation.cpp @@ -11,7 +11,6 @@ #include #include -#include #include #include "common_test_utils/ov_test_utils.hpp" @@ -51,13 +50,13 @@ class SpaceToBatchTransformationTestValues { }; typedef std::tuple< - ngraph::PartialShape, + ov::PartialShape, SpaceToBatchTransformationTestValues> SpaceToBatchTransformationParams; class SpaceToBatchTransformation : public LayerTransformation, public testing::WithParamInterface { public: void SetUp() override { - const ngraph::PartialShape input_shape = std::get<0>(GetParam()); + const ov::PartialShape input_shape = std::get<0>(GetParam()); const SpaceToBatchTransformationTestValues test_values = std::get<1>(GetParam()); actualFunction = ngraph::builder::subgraph::SpaceToBatchFunction::get( @@ -84,7 +83,7 @@ class SpaceToBatchTransformation : public LayerTransformation, public testing::W } static std::string getTestCaseName(testing::TestParamInfo obj) { - const ngraph::PartialShape shape = std::get<0>(obj.param); + const ov::PartialShape shape = std::get<0>(obj.param); const SpaceToBatchTransformationTestValues testValues = std::get<1>(obj.param); std::ostringstream result; @@ -108,7 +107,7 @@ TEST_P(SpaceToBatchTransformation, CompareFunctions) { } namespace testValues { -const std::vector shapes = { +const std::vector shapes = { {1, 3, 100, 171}, }; diff --git a/src/common/low_precision_transformations/tests/transformer_is_function_quantized.cpp b/src/common/low_precision_transformations/tests/transformer_is_function_quantized.cpp index a71f50afb68468..be8f901b32f9cf 100644 --- a/src/common/low_precision_transformations/tests/transformer_is_function_quantized.cpp +++ b/src/common/low_precision_transformations/tests/transformer_is_function_quantized.cpp @@ -27,8 +27,8 @@ namespace { class TestValues { public: - ngraph:: builder::subgraph::FakeQuantizeOnData fqOnData; - ngraph:: builder::subgraph::FakeQuantizeOnWeights fqOnWeights; + ngraph::builder::subgraph::FakeQuantizeOnData fqOnData; + ngraph::builder::subgraph::FakeQuantizeOnWeights fqOnWeights; }; inline std::ostream& operator<<(std::ostream& out, const TestValues& testValue) { diff --git a/src/common/preprocessing/src/CMakeLists.txt b/src/common/preprocessing/src/CMakeLists.txt index 9e3fd2d3789a02..80eafe970a6455 100644 --- a/src/common/preprocessing/src/CMakeLists.txt +++ b/src/common/preprocessing/src/CMakeLists.txt @@ -217,7 +217,8 @@ ov_developer_package_export_targets(TARGET ${TARGET_NAME}) if(BUILD_SHARED_LIBS) install(TARGETS ${TARGET_NAME} - LIBRARY DESTINATION ${OV_CPACK_PLUGINSDIR} COMPONENT ${OV_CPACK_COMP_CORE}) + LIBRARY DESTINATION ${OV_CPACK_PLUGINSDIR} COMPONENT ${OV_CPACK_COMP_CORE} + ${OV_CPACK_COMP_CORE_EXCLUDE_ALL}) else() ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE}) endif() diff --git a/src/common/snippets/include/snippets/lowered/linear_ir.hpp b/src/common/snippets/include/snippets/lowered/linear_ir.hpp index 55722b4c03d3b3..8dc44b2361af1f 100644 --- a/src/common/snippets/include/snippets/lowered/linear_ir.hpp +++ b/src/common/snippets/include/snippets/lowered/linear_ir.hpp @@ -63,7 +63,7 @@ class LinearIR { LinearIR() = default; LinearIR(const std::shared_ptr& m, const std::shared_ptr& factory, Config config = {}); - ExpressionPtr create_expression(const std::shared_ptr& n, const std::vector& inputs); + ExpressionPtr create_expression(const std::shared_ptr& n, const std::vector& inputs) const; std::shared_ptr clone() const; static LinearIR::container deep_copy_range(LinearIR::container::const_iterator begin, @@ -125,7 +125,6 @@ class LinearIR { iterator find_after(iterator it, const ExpressionPtr& target) const; void init_emitters(const std::shared_ptr& target); - void serialize(const std::string& xml, const std::string& bin) const; class LoopManager; using LoopManagerPtr = std::shared_ptr; diff --git a/src/common/snippets/include/snippets/lowered/loop_manager.hpp b/src/common/snippets/include/snippets/lowered/loop_manager.hpp index b530e96c7d7a2d..93d1620f5fdbe7 100644 --- a/src/common/snippets/include/snippets/lowered/loop_manager.hpp +++ b/src/common/snippets/include/snippets/lowered/loop_manager.hpp @@ -21,9 +21,7 @@ class LinearIR::LoopManager { struct LoopPort { LoopPort() = default; - LoopPort(const ExpressionPort& port, bool is_scheduled = true) - : expr_port(std::make_shared(port)), is_incremented(is_scheduled) {} - + LoopPort(const ExpressionPort& port, bool is_incremented = true, size_t dim_idx = 0); std::shared_ptr clone_with_new_expr(const ExpressionPtr& new_expr) const; friend bool operator==(const LoopPort& lhs, const LoopPort& rhs); @@ -37,33 +35,68 @@ class LinearIR::LoopManager { int64_t ptr_increment = 0; int64_t finalization_offset = 0; int64_t data_size = 0; + size_t dim_idx = 0; // The numeration starts from the end (dim_idx = 0 -> is the most inner dimension) }; class LoopInfo { public: + enum {UNDEFINED_DIM_IDX = std::numeric_limits::max()}; LoopInfo() = default; - LoopInfo(size_t work_amount, size_t increment, size_t dim_idx, + LoopInfo(size_t work_amount, size_t increment, const std::vector& entries, - const std::vector& exits) - : work_amount(work_amount), increment(increment), dim_idx(dim_idx), - entry_points(entries), exit_points(exits), outer_splited_loop(false) {} - LoopInfo(size_t work_amount, size_t increment, size_t dim_idx, + const std::vector& exits, + bool outer_splited_loop = false) + : m_work_amount(work_amount), m_increment(increment), + m_entry_points(entries), m_exit_points(exits), m_outer_splited_loop(outer_splited_loop) {} + LoopInfo(size_t work_amount, size_t increment, const std::vector& entries, - const std::vector& exits); + const std::vector& exits, + bool outer_splited_loop = false); std::shared_ptr clone_with_new_expr(const ExressionMap& expr_map) const; - size_t work_amount = 0; - size_t increment = 0; - size_t dim_idx = 0; // The numeration begins from the end (dim_idx = 0 -> is the most inner dimension) + // Returns dimension index if dimension indices for all entry and exit points are equal, and UNDEFINED_DIM_IDX otherwise + size_t get_dim_idx() const; + size_t get_work_amount() const; + size_t get_increment() const; + const std::vector& get_entry_points() const; + const std::vector& get_exit_points() const; + bool get_outer_splited_loop() const; + + /** + * \brief Inserts a separate body for first loop iteration processing if needed. + * Can also modify both main and first iter loop bodies. + * TODO: replace this temporary solution when ticket 119851 is implemented + * + * \param linear_ir LIR which should be modified + * \param loop_end_it iterator on LoopEnd expression for which the handler is called + * + * \return bool value which indicates whether the linear_ir was changed or not. + */ + using FirstIterHandler = std::function; + const FirstIterHandler& get_first_iter_handler() const; + + // Sets dim_idx to all entry and exit points + void set_dim_idx(size_t dim_idx); + void set_work_amount(size_t work_amount); + void set_increment(size_t increment); + void set_entry_points(std::vector entry_points); + void set_exit_points(std::vector exit_points); + void set_outer_splited_loop(bool outer_splited_loop); + void set_first_iter_handler(FirstIterHandler handler); + + private: + size_t m_work_amount = 0; + size_t m_increment = 0; // The order of entry and exit expressions is important: // - The position before first entry expr is Loop Begin position // - The position after last exit expr is Loop End position // Note: Scalars aren't entry expressions but can be before first entry expr in Linear IR - std::vector entry_points = {}; - std::vector exit_points = {}; + std::vector m_entry_points = {}; + std::vector m_exit_points = {}; // True if this Loop is outer Loop for nested Loops that splits the same dimension - bool outer_splited_loop = false; + bool m_outer_splited_loop = false; + FirstIterHandler m_first_iter_handler = nullptr; }; using LoopInfoPtr = std::shared_ptr; @@ -83,11 +116,14 @@ class LinearIR::LoopManager { // Return Loop ID template size_t mark_loop(LinearIR::constExprIt loop_begin_pos, - LinearIR::constExprIt loop_end_pos, - size_t work_amount, size_t work_amount_increment, size_t dim_idx, - const std::vector& entries, - const std::vector& exits) { - const auto loop_info = std::make_shared(work_amount, work_amount_increment, dim_idx, entries, exits); + LinearIR::constExprIt loop_end_pos, + size_t work_amount, + size_t work_amount_increment, + size_t dim_idx, + const std::vector& entries, + const std::vector& exits) { + const auto loop_info = std::make_shared(work_amount, work_amount_increment, entries, exits); + loop_info->set_dim_idx(dim_idx); const auto loop_id = this->add_loop_info(loop_info); for (auto expr_it = loop_begin_pos; expr_it != loop_end_pos; ++expr_it) { insert_loop_id(*expr_it, loop_id); @@ -95,6 +131,30 @@ class LinearIR::LoopManager { return loop_id; } + template + size_t mark_loop(LinearIR::constExprIt loop_begin_pos, + LinearIR::constExprIt loop_end_pos, + size_t work_amount, + size_t increment, + const std::vector& entries, + const std::vector& exits) { + const auto loop_info = std::make_shared(work_amount, increment, entries, exits); + const auto loop_id = this->add_loop_info(loop_info); + for (auto expr_it = loop_begin_pos; expr_it != loop_end_pos; ++expr_it) { + insert_loop_id(*expr_it, loop_id); + } + return loop_id; + } + + size_t replace_with_new_loop(const LinearIR& linear_ir, + LinearIR::constExprIt loop_begin_pos, + LinearIR::constExprIt loop_end_pos, + size_t work_amount, + size_t increment, + const std::vector& entries, + const std::vector& exits, + const size_t old_id); + void fuse_loops(const LinearIR& linear_ir, size_t loop_id_upper, size_t loop_id_lower, bool fuse_into_upper = true); void fuse_loops(LinearIR::constExprIt loop_begin_target, LinearIR::constExprIt loop_end_target, size_t loop_id_upper, size_t loop_id_lower, bool fuse_into_upper = true); diff --git a/src/common/snippets/include/snippets/lowered/pass/init_loops.hpp b/src/common/snippets/include/snippets/lowered/pass/init_loops.hpp index 39c51f890f2f5d..e98c6caaafa49c 100644 --- a/src/common/snippets/include/snippets/lowered/pass/init_loops.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/init_loops.hpp @@ -25,14 +25,9 @@ class InitLoops : public Pass { bool run(LinearIR& linear_ir) override; private: - static void init_ptr_increments(std::vector& loop_inputs, - std::vector& loop_outputs, - size_t work_amount, size_t dim_idx); - static void init_finalization_offsets(std::vector& loop_inputs, - std::vector& loop_outputs, - size_t work_amount); - static void init_element_type_sizes(std::vector& loop_inputs, - std::vector& loop_outputs); + static void init_ptr_increments(const LinearIR::LoopManager::LoopInfoPtr& loop_info); + static void init_finalization_offsets(const LinearIR::LoopManager::LoopInfoPtr& loop_info); + static void init_element_type_sizes(const LinearIR::LoopManager::LoopInfoPtr& loop_info); }; } // namespace pass diff --git a/src/common/snippets/include/snippets/lowered/pass/insert_tail_loop.hpp b/src/common/snippets/include/snippets/lowered/pass/insert_tail_loop.hpp index 8801d4c7130ec4..faafd8186b8448 100644 --- a/src/common/snippets/include/snippets/lowered/pass/insert_tail_loop.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/insert_tail_loop.hpp @@ -7,6 +7,7 @@ #include "pass.hpp" #include "snippets/op/loop.hpp" +#include "snippets/lowered/loop_manager.hpp" namespace ov { namespace snippets { @@ -23,21 +24,26 @@ class InsertTailLoop : public Pass { public: OPENVINO_RTTI("InsertTailLoop", "Pass") bool run(LinearIR& linear_ir) override; + static LinearIR::container copy_loop(const LinearIR& linear_ir, const size_t loop_id); + + static constexpr size_t existing_subtensor_value = SIZE_MAX; + static void propagate_updated_subtensor_through_loop(const LinearIR& linear_ir, + const LinearIR::LoopManager::LoopInfoPtr& loop_info, + LinearIR::container::const_iterator begin, + LinearIR::container::const_iterator end, + const size_t new_dim_value = existing_subtensor_value); private: - static std::shared_ptr create_tail_loop(LinearIR& linear_ir, - LinearIR::constExprIt vector_begin, - LinearIR::constExprIt vector_end, - LinearIR::constExprIt& tail_begin, - LinearIR::constExprIt& tail_end, - const std::shared_ptr& vector_loop_end, - bool need_vector_loop, - size_t tail_size, const std::vector& tail_finalization_offsets); + static void create_tail_loop(LinearIR& linear_ir, + LinearIR::constExprIt begin, + LinearIR::constExprIt end, + const std::shared_ptr& loop_end, + bool need_vector_loop, + size_t tail_size); static void tail_transformations(LinearIR& linear_ir, LinearIR::constExprIt tail_begin, LinearIR::constExprIt tail_end, size_t tail_size); - static bool optimize_single_evaluation(const std::shared_ptr& loop); }; } // namespace pass diff --git a/src/common/snippets/include/snippets/lowered/pass/optimize_loop_single_evaluation.hpp b/src/common/snippets/include/snippets/lowered/pass/optimize_loop_single_evaluation.hpp new file mode 100644 index 00000000000000..9ac4181e61e861 --- /dev/null +++ b/src/common/snippets/include/snippets/lowered/pass/optimize_loop_single_evaluation.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "pass.hpp" + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +/** + * @interface OptimizeLoopSingleEvaluation + * @brief Does the following optimizations if the Loop body can be executed only once: + * - sets evaluate_once parameter to true + * - moves all ptr arithmetic to finalization offsets + * @ingroup snippets + */ +class OptimizeLoopSingleEvaluation : public Pass { +public: + OPENVINO_RTTI("OptimizeLoopSingleEvaluation", "Pass") + bool run(LinearIR& linear_ir) override; +}; + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/include/snippets/lowered/pass/serialize_base.hpp b/src/common/snippets/include/snippets/lowered/pass/serialize_base.hpp new file mode 100644 index 00000000000000..e0c3c3e7e640ec --- /dev/null +++ b/src/common/snippets/include/snippets/lowered/pass/serialize_base.hpp @@ -0,0 +1,35 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "pass.hpp" +#include "snippets/lowered/linear_ir.hpp" + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +/** + * @interface SerializeBase + * @brief Base class for LinearIR serialization passes + * @ingroup snippets + */ +class SerializeBase : public Pass { +public: + OPENVINO_RTTI("SerializeBase", "Pass") + SerializeBase(const std::string& xml_path); + +protected: + std::string get_bin_path_from_xml(const std::string& xml_path); + + const std::string m_xml_path; + const std::string m_bin_path; +}; + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/include/snippets/lowered/pass/serialize_control_flow.hpp b/src/common/snippets/include/snippets/lowered/pass/serialize_control_flow.hpp new file mode 100644 index 00000000000000..06c43258b25e79 --- /dev/null +++ b/src/common/snippets/include/snippets/lowered/pass/serialize_control_flow.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "serialize_base.hpp" +#include "snippets/lowered/linear_ir.hpp" + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +/** + * @interface SerializeControlFlow + * @brief Serializes control flow graph of LinearIR + * @ingroup snippets + */ +class SerializeControlFlow : public SerializeBase { +public: + OPENVINO_RTTI("SerializeControlFlow", "Pass", SerializeBase) + SerializeControlFlow(const std::string& xml_path) : SerializeBase(xml_path) {} + bool run(LinearIR& linear_ir) override; +}; + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/include/snippets/lowered/pass/serialize_data_flow.hpp b/src/common/snippets/include/snippets/lowered/pass/serialize_data_flow.hpp new file mode 100644 index 00000000000000..f1c5cfeb506333 --- /dev/null +++ b/src/common/snippets/include/snippets/lowered/pass/serialize_data_flow.hpp @@ -0,0 +1,32 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "serialize_base.hpp" +#include "snippets/lowered/linear_ir.hpp" + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +/** + * @interface SerializeDataFlow + * @brief Serializes data flow graph of LinearIR + * @attention - This pass can not be run on the LinearIR after tail loop insertion. + * @attention - Control flow operations (e.g. LoopBegin/LoopEnd) are not serialized + * @ingroup snippets + */ +class SerializeDataFlow : public SerializeBase { +public: + OPENVINO_RTTI("SerializeDataFlow", "Pass", SerializeBase) + SerializeDataFlow(const std::string& xml_path) : SerializeBase(xml_path) {} + bool run(LinearIR& linear_ir) override; +}; + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/include/snippets/op/broadcastload.hpp b/src/common/snippets/include/snippets/op/broadcastload.hpp index 540be423bb8eb5..a46311d30151ff 100644 --- a/src/common/snippets/include/snippets/op/broadcastload.hpp +++ b/src/common/snippets/include/snippets/op/broadcastload.hpp @@ -21,7 +21,7 @@ class BroadcastLoad : public MemoryAccess { public: OPENVINO_OP("BroadcastLoad", "SnippetsOpset", ov::snippets::op::MemoryAccess); - BroadcastLoad(const Output& x, ov::PartialShape output_shape, size_t offset = 0lu); + BroadcastLoad(const Output& x, ov::Dimension bcast_dimension, size_t offset = 0lu); BroadcastLoad() = default; size_t get_offset() const { return get_input_offset(0); } @@ -29,7 +29,8 @@ class BroadcastLoad : public MemoryAccess { bool visit_attributes(AttributeVisitor& visitor) override; std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; void validate_and_infer_types() override; - ov::PartialShape get_output_shape() {return output_shape;} + const ov::Dimension& get_bcast_dimension() {return bcast_dimension;} + void set_bcast_dimension(ov::Dimension new_dim) {bcast_dimension = std::move(new_dim);} // Note:BroadcastMove and BroadcastLoad are implemented as separate classes, // but have identical shapeInfer semantics. In order to avoid code duplication, @@ -39,7 +40,7 @@ class BroadcastLoad : public MemoryAccess { explicit ShapeInfer(const std::shared_ptr& n) : BroadcastShapeInfer(n) {} }; private: - ov::PartialShape output_shape; + ov::Dimension bcast_dimension; }; } // namespace op diff --git a/src/common/snippets/include/snippets/op/broadcastmove.hpp b/src/common/snippets/include/snippets/op/broadcastmove.hpp index d915fbc286330a..95579c174841c5 100644 --- a/src/common/snippets/include/snippets/op/broadcastmove.hpp +++ b/src/common/snippets/include/snippets/op/broadcastmove.hpp @@ -20,7 +20,7 @@ class BroadcastMove : public ov::op::Op { public: OPENVINO_OP("BroadcastMove", "SnippetsOpset"); - BroadcastMove(const Output& x, ov::PartialShape output_shape); + BroadcastMove(const Output& x, ov::Dimension bcast_dimension); BroadcastMove() = default; bool visit_attributes(AttributeVisitor& visitor) override; @@ -28,7 +28,8 @@ class BroadcastMove : public ov::op::Op { std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; void validate_and_infer_types() override; - ov::PartialShape get_output_shape() {return output_shape;} + const ov::Dimension& get_bcast_dimension() {return bcast_dimension;} + void set_bcast_dimension(ov::Dimension new_dim) {bcast_dimension = std::move(new_dim);} // Note:BroadcastMove and BroadcastLoad are implemented as separate classes, // but have identical shapeInfer semantics. In order to avoid code duplication, // we created dummy ShapeInfer classes that are essentially instantiations @@ -38,7 +39,7 @@ class BroadcastMove : public ov::op::Op { }; protected: - ov::PartialShape output_shape; + ov::Dimension bcast_dimension; }; } // namespace op diff --git a/src/common/snippets/include/snippets/op/buffer.hpp b/src/common/snippets/include/snippets/op/buffer.hpp index 9f522ed3d45688..d0e1152b589486 100644 --- a/src/common/snippets/include/snippets/op/buffer.hpp +++ b/src/common/snippets/include/snippets/op/buffer.hpp @@ -5,6 +5,7 @@ #pragma once #include "openvino/op/op.hpp" +#include "snippets/shape_inference/shape_inference.hpp" namespace ov { namespace snippets { @@ -13,13 +14,10 @@ namespace op { /** * @interface Buffer * @brief This is a base class for memory storage. - * If Buffer has a parent, the operation is for intermediate data storage - IntermediateMemory type. - * Otherwise, the operation is for allocation of new empty memory with shape `m_shape` - NewMemory type * Notes: * - All buffers with the same ID in a graph have the same memory pointer. So if we have a few buffers, * each the corresponding MemoryAccess op for Buffer should have offset for common memory pointer of this Buffer * - Buffer should be a single consumer for operation output port - * @param m_type - type of Buffer: IntermediateMemory/NewMemory * @param m_shape - output allocation shape for Buffer with type NewMemory * @param m_offset - offset in common Buffer scratchpad * @param m_id - Buffer ID in common Buffer system @@ -29,21 +27,11 @@ class Buffer : public ov::op::Op { public: OPENVINO_OP("Buffer", "SnippetsOpset"); Buffer() = default; - Buffer(const ov::Shape& shape, ov::element::Type element_type = ov::element::u8, size_t id = 0); - Buffer(const ov::Output& arg, const ov::Shape& shape, size_t id = 0); - Buffer(const ov::Output& arg, int32_t allocation_rank = -1, size_t id = 0); + Buffer(const OutputVector& arguments, const ov::Shape& shape, size_t id, ov::element::Type element_type = ov::element::u8); bool visit_attributes(AttributeVisitor& visitor) override; - void validate_and_infer_types() override; - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; - - enum Type { - NewMemory, - IntermediateMemory - }; size_t get_id() const { return m_id; } - Type get_type() const { return m_type; } int64_t get_offset() const { return m_offset; } void set_id(size_t id) { m_id = id; } const ov::Shape& get_allocation_shape() const { return m_shape; } @@ -51,17 +39,55 @@ class Buffer : public ov::op::Op { void set_offset(int64_t offset) { m_offset = offset; } size_t get_byte_size() const; - void set_element_type(ov::element::Type element_type); - - bool is_intermediate_memory() const { return m_type == Type::IntermediateMemory; } - bool is_new_memory() const { return m_type == Type::NewMemory; } - -private: - Type m_type = Type::IntermediateMemory; +protected: ov::Shape m_shape = {}; - int64_t m_offset = 0; size_t m_id = 0; // Default ID - 0. All Buffers are from the same set ov::element::Type m_element_type = ov::element::u8; // u8 - default 1 byte + int64_t m_offset = 0; +}; + +/** + * @interface IntermediateMemoryBuffer + * @brief Represents an intermediate memory storage operation. It always has a parent. + * @ingroup snippets + * + */ +class IntermediateMemoryBuffer : public Buffer { +public: + OPENVINO_OP("IntermediateMemoryBuffer", "SnippetsOpset", Buffer); + IntermediateMemoryBuffer() = default; + IntermediateMemoryBuffer(const ov::Output& arg, const ov::Shape& shape, size_t id = 0); + IntermediateMemoryBuffer(const ov::Output& arg, int32_t allocation_rank = -1, size_t id = 0); + + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + +private: + ov::Shape compute_shape_from_allocation_rank(const ov::Output& arg, int32_t allocation_rank); +}; + +/** + * @interface NewMemoryBuffer + * @brief Represents a new empty memory for allocation with specified shape. It has no parent operations. + * @ingroup snippets + * + */ +class NewMemoryBuffer : public Buffer { +public: + OPENVINO_OP("NewMemoryBuffer", "SnippetsOpset", Buffer); + NewMemoryBuffer() = default; + NewMemoryBuffer(const ov::Shape& shape, size_t id = 0, ov::element::Type element_type = ov::element::u8); + + void validate_and_infer_types() override; + void set_element_type(ov::element::Type element_type); + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + class ShapeInfer : public IShapeInferSnippets { + ov::Shape m_shape; + public: + explicit ShapeInfer(const std::shared_ptr& n); + Result infer(const std::vector& input_shapes) override; + }; }; } // namespace op diff --git a/src/common/snippets/include/snippets/op/loop.hpp b/src/common/snippets/include/snippets/op/loop.hpp index 1fd51649fc65d1..1740c00ccc3a64 100644 --- a/src/common/snippets/include/snippets/op/loop.hpp +++ b/src/common/snippets/include/snippets/op/loop.hpp @@ -56,11 +56,8 @@ class LoopBegin : public LoopBase { * @param args vector of input values + LoopBegin, all values except for the LoopBegin are passed directly to output. * @param work_amount total number of evaluations to be processed by the loop * @param increment number of evaluations processed in one iteration of the loop. - * @param apply_increment describes which data pointers attributed to the loop should be incremented on every iteration. - * should be used when Loop is connected to Parameters and/or Results. If apply_increment[i] == true then i-th i/o data - * pointer will be incremented by work_amount*data_size on every iteration. - * @param ptr_increments specifies i/o pointer increment performed on every iteration. This is an alternative to - * apply_increments, which enables more flexibility. + * @param is_incremented describes which data pointers attributed to the loop should be incremented on every iteration. + * @param ptr_increments specifies i/o pointer increment performed on every iteration if the following is_incremented[i] is true * @param finalization_offsets pointer increments that are be applied to i/o pointers before exiting the loop * @param id the identifier of Loop in Loop system in LoopManager * @ingroup snippets @@ -69,16 +66,14 @@ class LoopEnd : public LoopBase { public: OPENVINO_OP("LoopEnd", "SnippetsOpset", LoopBase); LoopEnd(const Output& loop_begin, size_t work_amount, size_t work_amount_increment, - std::vector apply_increment, std::vector finalization_offsets, - std::vector element_type_sizes, size_t input_num, size_t output_num, size_t id); - LoopEnd(const Output& loop_begin, size_t work_amount, size_t work_amount_increment, - std::vector ptr_increments, std::vector finalization_offsets, + std::vector is_incremented, std::vector ptr_increments, std::vector finalization_offsets, std::vector element_type_sizes, size_t input_num, size_t output_num, size_t id); LoopEnd() = default; std::shared_ptr get_loop_begin(); void validate_and_infer_types() override; std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override; const std::vector& get_finalization_offsets() const; + const std::vector& get_is_incremented() const; const std::vector& get_ptr_increments() const; const std::vector& get_element_type_sizes() const; size_t get_input_num() const; @@ -91,6 +86,7 @@ class LoopEnd : public LoopBase { void set_work_amount(size_t new_work_amount); void set_increment(size_t new_increment); void set_evaluate_once(bool once); + void set_id(size_t new_id); // Used to propagate information about Loop structure, needed to simplify some optimizations. For example, // to skip pointer increments when outer Loop is empty, and work_amount == vector_size (one inner vector Loop) // true by default, the optimizations enabled if it's false; @@ -102,6 +98,7 @@ class LoopEnd : public LoopBase { bool visit_attributes(AttributeVisitor& visitor) override; private: + std::vector m_is_incremented = {}; std::vector m_ptr_increments = {}; std::vector m_finalization_offsets = {}; std::vector m_element_type_sizes = {}; diff --git a/src/common/snippets/include/snippets/op/serialization_node.hpp b/src/common/snippets/include/snippets/op/serialization_node.hpp index 2804f4a2817c36..8910f98bd0a570 100644 --- a/src/common/snippets/include/snippets/op/serialization_node.hpp +++ b/src/common/snippets/include/snippets/op/serialization_node.hpp @@ -19,17 +19,28 @@ namespace op { */ class SerializationNode : public ov::op::Op { public: - OPENVINO_OP("SerializationNode", "SnippetsOpset"); - + enum SerializationMode { DATA_FLOW, CONTROL_FLOW }; SerializationNode() = default; - SerializationNode(const ov::OutputVector& args, const std::shared_ptr& expr); + SerializationNode(const ov::OutputVector& args, + const std::shared_ptr& expr, + SerializationMode mode = SerializationMode::CONTROL_FLOW); void validate_and_infer_types() override; std::shared_ptr clone_with_new_inputs(const OutputVector &new_args) const override; bool visit_attributes(AttributeVisitor &visitor) override; + _OPENVINO_HIDDEN_METHOD static const DiscreteTypeInfo& get_type_info_static() { + static ::ov::DiscreteTypeInfo type_info_static{"SerializationNode", "SnippetsOpset"}; + return type_info_static; + } + + const ::ov::DiscreteTypeInfo& get_type_info() const override { + return m_expr->get_node()->get_type_info(); + } + private: std::shared_ptr m_expr; + SerializationMode m_mode; }; } // namespace op diff --git a/src/common/snippets/include/snippets/op/subgraph.hpp b/src/common/snippets/include/snippets/op/subgraph.hpp index b642bbd7a23ccb..5d5b7f85270a55 100644 --- a/src/common/snippets/include/snippets/op/subgraph.hpp +++ b/src/common/snippets/include/snippets/op/subgraph.hpp @@ -124,7 +124,6 @@ class Subgraph : public ov::op::util::SubGraphOp { void print() const; - void serialize() const; VectorDims infer_master_shape(); static auto wrap_node_as_subgraph(const std::shared_ptr& node) -> std::shared_ptr; diff --git a/src/common/snippets/include/snippets/shape_inference/shape_infer_instances.hpp b/src/common/snippets/include/snippets/shape_inference/shape_infer_instances.hpp index af69ad905111e8..43ad0aa3d5ac97 100644 --- a/src/common/snippets/include/snippets/shape_inference/shape_infer_instances.hpp +++ b/src/common/snippets/include/snippets/shape_inference/shape_infer_instances.hpp @@ -21,7 +21,7 @@ class NumpyBroadcastShapeInfer : public IShapeInferSnippets { template class BroadcastShapeInfer : public IShapeInferSnippets { - VectorDims::value_type m_broadcasted_dim; + std::shared_ptr broadcast_op; public: explicit BroadcastShapeInfer(const std::shared_ptr& n); Result infer(const std::vector& input_shapes) override; diff --git a/src/common/snippets/src/generator.cpp b/src/common/snippets/src/generator.cpp index 0dacee4878d598..c0a2583aef23b4 100644 --- a/src/common/snippets/src/generator.cpp +++ b/src/common/snippets/src/generator.cpp @@ -6,7 +6,9 @@ #include "snippets/lowered/linear_ir.hpp" #include "snippets/lowered/pass/assign_registers.hpp" +#include "snippets/lowered/pass/cleanup_loop_offsets.hpp" #include "snippets/lowered/pass/insert_tail_loop.hpp" +#include "snippets/lowered/pass/optimize_loop_single_evaluation.hpp" #include "snippets/op/kernel.hpp" @@ -25,8 +27,17 @@ void Generator::generate(lowered::LinearIR& linear_ir, LoweringResult& result, c return get_op_reg_type(op); }; lowered::pass::PassPipeline lowered_pipeline; + // Note: the order of all passes in this pipeline must not be changed since they have hard dependencies + // 1. InsertTailLoop must be called after AssignRegisters since tail loop expressions must have the same + // assigned registers as the corresponding ops in the main body. + // 2. CleanupLoopOffsets must be called after InsertTailLoop to avoid violating the proportionality of the pointer increments + // (this might happen if tail loop and main loop have different increments) + // 3. OptimizeLoopSingleEvaluation must be called after CleanupLoopOffsets + // since CleanupLoopOffsets can't handle loops with evaluate_once = true lowered_pipeline.register_pass(reg_type_mapper); lowered_pipeline.register_pass(); + lowered_pipeline.register_pass(); + lowered_pipeline.register_pass(); lowered_pipeline.run(linear_ir); linear_ir.init_emitters(target); @@ -66,7 +77,8 @@ Generator::opRegType Generator::get_op_reg_type(const std::shared_ptr& op) std::dynamic_pointer_cast(op) || std::dynamic_pointer_cast(op) || std::dynamic_pointer_cast(op) || - std::dynamic_pointer_cast(op) || + std::dynamic_pointer_cast(op) || + std::dynamic_pointer_cast(op) || std::dynamic_pointer_cast(op) || std::dynamic_pointer_cast(op) || std::dynamic_pointer_cast(op)) diff --git a/src/common/snippets/src/lowered/linear_ir.cpp b/src/common/snippets/src/lowered/linear_ir.cpp index 8f5978638364d3..4456b6a240be4a 100644 --- a/src/common/snippets/src/lowered/linear_ir.cpp +++ b/src/common/snippets/src/lowered/linear_ir.cpp @@ -8,7 +8,6 @@ #include "snippets/lowered/loop_manager.hpp" #include "snippets/lowered/expression_factory.hpp" -#include "snippets/op/serialization_node.hpp" #include "openvino/core/graph_util.hpp" #include "openvino/core/type.hpp" @@ -67,7 +66,7 @@ ExpressionPtr LinearIR::create_expression(const std::shared_ptr& n, const return ExpressionFactory::build(n, *this, model); } -ExpressionPtr LinearIR::create_expression(const std::shared_ptr& n, const std::vector& inputs) { +ExpressionPtr LinearIR::create_expression(const std::shared_ptr& n, const std::vector& inputs) const { return ExpressionFactory::build(n, inputs, *this); } @@ -86,46 +85,15 @@ ov::NodeVector LinearIR::get_ordered_ops(const std::shared_ptr& m) { return ov::topological_sort(nodes); } -void LinearIR::serialize(const std::string& xml, const std::string& bin) const { - auto first_node = std::make_shared(element::f32, Shape{}); - first_node->set_friendly_name("Start"); - first_node->get_rt_info()["execTimeMcs"] = 0; - std::shared_ptr serialization_node = first_node; - - // This map allows to get LoopBegin serialization node by original LoopBegin node - // It is used to draw an edge between LoopBegin and LoopEnd serialization nodes - std::map, std::shared_ptr> loops_map; - for (const auto& expr : m_expressions) { - const auto node = expr->get_node(); - if (auto loop_end = ov::as_type_ptr(node)) { - OPENVINO_ASSERT(loops_map.count(loop_end->get_loop_begin()), - "Serialization can't find LoopBegin that corresponds to LoopEnd with friendly name ", - loop_end->get_friendly_name()); - auto loop_begin_serialization_node = loops_map.at(loop_end->get_loop_begin()); - serialization_node = std::make_shared(ov::OutputVector{serialization_node, loop_begin_serialization_node}, expr); - } else { - serialization_node = std::make_shared(ov::OutputVector{serialization_node}, expr); - if (auto loop_begin = ov::as_type_ptr(node)) { - loops_map[loop_begin] = serialization_node; - } - } - } - auto last_node = std::make_shared(serialization_node); - last_node->set_friendly_name("End"); - const auto tmp_model = std::make_shared(ResultVector {last_node}, - ParameterVector {first_node}, - "Lowered_IR_Serialization"); - ov::pass::Serialize(xml, bin).run_on_model(tmp_model); -} - LinearIR::container LinearIR::deep_copy_range(LinearIR::container::const_iterator begin, LinearIR::container::const_iterator end, ExressionMap& expression_map) { OPENVINO_ASSERT(expression_map.empty(), "deep_copy_range expects empty expression_map as an input"); LinearIR::container result; NodeVector original_nodes; - for (auto it = begin; it != end; it++) + for (auto it = begin; it != end; it++) { original_nodes.push_back((*it)->get_node()); + } // node_map and expr_map map original node pointer (expression) to a new pointer (expression) ngraph::NodeMap node_map; diff --git a/src/common/snippets/src/lowered/loop_manager.cpp b/src/common/snippets/src/lowered/loop_manager.cpp index 7747ec2d247f96..e7e83361ee0a39 100644 --- a/src/common/snippets/src/lowered/loop_manager.cpp +++ b/src/common/snippets/src/lowered/loop_manager.cpp @@ -19,21 +19,38 @@ using LoopManager = LinearIR::LoopManager; using LoopPort = LoopManager::LoopPort; using LoopInfo = LoopManager::LoopInfo; +LoopPort::LoopPort(const ExpressionPort& port, bool is_incremented, size_t dim_idx) + : expr_port(std::make_shared(port)), + is_incremented(is_incremented), + dim_idx(dim_idx) { + OPENVINO_ASSERT(dim_idx < port.get_descriptor_ptr()->get_shape().size(), + "LoopPort dim_idx (", + dim_idx, + ") must be less than the corresponding expression port shape rank (", + port.get_descriptor_ptr()->get_shape().size(), + ")"); +} + std::shared_ptr LoopPort::clone_with_new_expr(const ExpressionPtr& new_expr) const { auto new_loop_port = std::make_shared(*this); new_loop_port->expr_port = expr_port->clone_with_new_expr(new_expr); return new_loop_port; } -LoopInfo::LoopInfo(size_t work_amount, size_t increment, size_t dim_idx, - const std::vector& entries, const std::vector& exits) - : work_amount(work_amount), increment(increment), dim_idx(dim_idx), outer_splited_loop(false) { - entry_points.reserve(entries.size()); - exit_points.reserve(exits.size()); +LinearIR::LoopManager::LoopInfo::LoopInfo(size_t work_amount, + size_t increment, + const std::vector& entries, + const std::vector& exits, + bool outer_splited_loop) + : m_work_amount(work_amount), + m_increment(increment), + m_outer_splited_loop(outer_splited_loop) { + m_entry_points.reserve(entries.size()); + m_exit_points.reserve(exits.size()); for (const auto& port : entries) - entry_points.emplace_back(port); + m_entry_points.emplace_back(port); for (const auto& port : exits) - exit_points.emplace_back(port); + m_exit_points.emplace_back(port); } std::shared_ptr LoopInfo::clone_with_new_expr(const ExressionMap& expr_map) const { @@ -48,33 +65,103 @@ std::shared_ptr LoopInfo::clone_with_new_expr(const ExressionMap& expr } return cloned_port_points; }; - const auto& new_entry_points = clone_loop_ports(entry_points); - const auto& new_exit_points = clone_loop_ports(exit_points); + const auto& new_entry_points = clone_loop_ports(m_entry_points); + const auto& new_exit_points = clone_loop_ports(m_exit_points); - auto new_loop_info = std::make_shared(work_amount, increment, dim_idx, new_entry_points, new_exit_points); - new_loop_info->outer_splited_loop = outer_splited_loop; + return std::make_shared(m_work_amount, m_increment, new_entry_points, new_exit_points, m_outer_splited_loop); +} - return new_loop_info; +size_t LoopInfo::get_work_amount() const { + return m_work_amount; } -std::shared_ptr LoopManager::clone_with_new_expr(const ExressionMap& expr_map) const { - auto new_loop_manager = std::make_shared(); - for (const auto& id_info : m_map) - new_loop_manager->m_map.insert({id_info.first, id_info.second->clone_with_new_expr(expr_map)}); - new_loop_manager->next_id = next_id; - return new_loop_manager; +size_t LoopInfo::get_increment() const { + return m_increment; +} + +const std::vector& LoopInfo::get_entry_points() const { + return m_entry_points; +} + +const std::vector& LoopInfo::get_exit_points() const { + return m_exit_points; +} + +bool LoopInfo::get_outer_splited_loop() const { + return m_outer_splited_loop; +} + +const LoopInfo::FirstIterHandler& LoopInfo::get_first_iter_handler() const { + return m_first_iter_handler; +} + +size_t LinearIR::LoopManager::LoopInfo::get_dim_idx() const { + OPENVINO_ASSERT(!m_entry_points.empty(), "Loop info must have at least one entry point"); + auto equal_dim_idxes = [&](const LinearIR::LoopManager::LoopPort& p) { + return p.dim_idx == m_entry_points[0].dim_idx; + }; + if (std::all_of(m_entry_points.begin(), m_entry_points.end(), equal_dim_idxes) && + std::all_of(m_exit_points.begin(), m_exit_points.end(), equal_dim_idxes)) { + return m_entry_points[0].dim_idx; + } else { + return UNDEFINED_DIM_IDX; + } +} + +void LoopInfo::set_dim_idx(size_t dim_idx) { + auto set_common_dim_idx = [dim_idx](std::vector& ports) { + for (auto& port : ports) + port.dim_idx = dim_idx; + }; + set_common_dim_idx(m_entry_points); + set_common_dim_idx(m_exit_points); +} + +void LoopInfo::set_work_amount(size_t work_amount) { + m_work_amount = work_amount; +} + +void LoopInfo::set_increment(size_t increment) { + m_increment = increment; +} + +void LoopInfo::set_entry_points(std::vector entry_points) { + m_entry_points = std::move(entry_points); +} + +void LoopInfo::set_exit_points(std::vector exit_points) { + m_exit_points = std::move(exit_points);; +} + +void LoopInfo::set_outer_splited_loop(bool outer_splited_loop) { + m_outer_splited_loop = outer_splited_loop; +} + +void LoopInfo::set_first_iter_handler(LoopInfo::FirstIterHandler first_iter_handler) { + m_first_iter_handler = std::move(first_iter_handler); } bool operator==(const LinearIR::LoopManager::LoopPort& lhs, const LinearIR::LoopManager::LoopPort& rhs) { if (&lhs == &rhs) return true; - return lhs.expr_port == rhs.expr_port && lhs.is_incremented == rhs.is_incremented; + return lhs.expr_port == rhs.expr_port && lhs.is_incremented == rhs.is_incremented && lhs.dim_idx == rhs.dim_idx; } bool operator!=(const LinearIR::LoopManager::LoopPort& lhs, const LinearIR::LoopManager::LoopPort& rhs) { return !(lhs == rhs); } bool operator<(const LinearIR::LoopManager::LoopPort& lhs, const LinearIR::LoopManager::LoopPort& rhs) { - return (lhs.expr_port < rhs.expr_port) || (lhs.expr_port == rhs.expr_port && (lhs.is_incremented < rhs.is_incremented)); + return (lhs.expr_port < rhs.expr_port) || + (lhs.expr_port == rhs.expr_port && + (lhs.is_incremented < rhs.is_incremented || + (lhs.is_incremented == rhs.is_incremented && lhs.dim_idx < rhs.dim_idx))); +} + +std::shared_ptr LoopManager::clone_with_new_expr(const ExressionMap& expr_map) const { + auto new_loop_manager = std::make_shared(); + for (const auto& id_info : m_map) + new_loop_manager->m_map.insert({id_info.first, id_info.second->clone_with_new_expr(expr_map)}); + new_loop_manager->next_id = next_id; + return new_loop_manager; } size_t LinearIR::LoopManager::add_loop_info(const LoopInfoPtr &loop) { @@ -113,7 +200,7 @@ void LinearIR::LoopManager::get_loop_bounds(const LinearIR &linear_ir, LinearIR::constExprIt &loop_end_pos, bool loop_ops_inserted) const { const auto loop_info = get_loop_info(loop_id); - get_loop_bounds(linear_ir, loop_info->entry_points, loop_info->exit_points, loop_begin_pos, loop_end_pos, loop_id, loop_ops_inserted); + get_loop_bounds(linear_ir, loop_info->get_entry_points(), loop_info->get_exit_points(), loop_begin_pos, loop_end_pos, loop_id, loop_ops_inserted); } void LinearIR::LoopManager::get_loop_bounds(const LinearIR &linear_ir, @@ -157,8 +244,8 @@ LinearIR::LoopManager::LoopPort LinearIR::LoopManager::get_loop_port_by_expr_por return *it; }; const auto& loop_info = get_loop_info(loop_id); - return expr_port.get_type() == ExpressionPort::Input ? get_loop_port(loop_info->entry_points) - : get_loop_port(loop_info->exit_points); + return expr_port.get_type() == ExpressionPort::Input ? get_loop_port(loop_info->get_entry_points()) + : get_loop_port(loop_info->get_exit_points()); } void LinearIR::LoopManager::get_io_loop_ports(LinearIR::constExprIt loop_begin_pos, @@ -258,6 +345,31 @@ void LinearIR::LoopManager::mark_loop(LinearIR::constExprIt loop_begin_pos, } } +size_t LinearIR::LoopManager::replace_with_new_loop(const LinearIR& linear_ir, + LinearIR::constExprIt loop_begin_pos, + LinearIR::constExprIt loop_end_pos, + size_t work_amount, + size_t increment, + const std::vector& entries, + const std::vector& exits, + const size_t old_id) { + const auto loop_info = std::make_shared(work_amount, increment, entries, exits); + const auto loop_id = this->add_loop_info(loop_info); + for (auto expr_it = loop_begin_pos; expr_it != loop_end_pos; ++expr_it) { + replace_loop_id(*expr_it, old_id, loop_id); + } + + const auto old_loop_info = this->get_loop_info(old_id); + const auto old_loop_begin_pos = linear_ir.find(old_loop_info->get_entry_points().front().expr_port->get_expr()); + const auto old_loop_end_pos = linear_ir.find(old_loop_info->get_exit_points().back().expr_port->get_expr()); + // If new bounds are equal to old loop bounds, this means that old Loop is removed totally from LIR + // In this case old loop info must be completely removed from loop manager + if (loop_begin_pos == old_loop_begin_pos && loop_end_pos == old_loop_end_pos) { + this->remove_loop_info(old_id); + } + return loop_id; +} + void LinearIR::LoopManager::fuse_loops(const LinearIR& linear_ir, size_t loop_id_upper, size_t loop_id_lower, bool fuse_into_upper) { LinearIR::constExprIt loop_begin_target, loop_end_target; get_loop_bounds(linear_ir, fuse_into_upper ? loop_id_lower : loop_id_upper, loop_begin_target, loop_end_target); @@ -272,10 +384,10 @@ void LinearIR::LoopManager::fuse_loops(LinearIR::constExprIt loop_begin_target, const auto& loop_info_upper = m_map[loop_id_upper]; const auto& loop_info_lower = m_map[loop_id_lower]; - auto entry_points_upper = loop_info_upper->entry_points; - auto exit_points_upper = loop_info_upper->exit_points; - auto entry_points_lower = loop_info_lower->entry_points; - auto exit_points_lower = loop_info_lower->exit_points; + auto entry_points_upper = loop_info_upper->get_entry_points(); + auto exit_points_upper = loop_info_upper->get_exit_points(); + auto entry_points_lower = loop_info_lower->get_entry_points(); + auto exit_points_lower = loop_info_lower->get_exit_points(); fuse_loop_ports(exit_points_upper, entry_points_lower, loop_id_upper); std::vector new_entries = entry_points_upper; @@ -284,8 +396,8 @@ void LinearIR::LoopManager::fuse_loops(LinearIR::constExprIt loop_begin_target, new_exits.insert(new_exits.end(), exit_points_lower.begin(), exit_points_lower.end()); auto& loop_info = fuse_into_upper ? loop_info_upper : loop_info_lower; - loop_info->entry_points = new_entries; - loop_info->exit_points = new_exits; + loop_info->set_entry_points(new_entries); + loop_info->set_exit_points(new_exits); const auto& from = fuse_into_upper ? loop_id_lower : loop_id_upper; const auto& to = fuse_into_upper ? loop_id_upper : loop_id_lower; @@ -347,7 +459,7 @@ template<> void LinearIR::LoopManager::update_loop_port(size_t loop_id, const ExpressionPort& actual_port, const std::vector& target_ports, bool is_entry) { const auto& loop_info = get_loop_info(loop_id); - auto& ports = is_entry ? loop_info->entry_points : loop_info->exit_points; + auto ports = is_entry ? loop_info->get_entry_points() : loop_info->get_exit_points(); auto port_it = std::find_if(ports.begin(), ports.end(), [&actual_port](const LoopPort& point) { return *point.expr_port.get() == actual_port; }); // In some cases actual ExpressionPort may not be LoopPort. We shouldn't throw exception here since ExpressionPort is not strong condition as LoopPort @@ -364,19 +476,21 @@ void LinearIR::LoopManager::update_loop_port(size_t loop_id, const ExpressionPor return copy; }); port_it = ports.erase(port_it); - ports.insert(port_it, target_ports.cbegin(), target_ports.cend()); + ports.insert(port_it, target_loop_ports.cbegin(), target_loop_ports.cend()); + is_entry ? loop_info->set_entry_points(ports) : loop_info->set_exit_points(ports); } template<> void LinearIR::LoopManager::update_loop_port(size_t loop_id, const LoopPort& actual_port, const std::vector& target_ports, bool is_entry) { const auto& loop_info = get_loop_info(loop_id); - auto& ports = is_entry ? loop_info->entry_points : loop_info->exit_points; + auto ports = is_entry ? loop_info->get_entry_points() : loop_info->get_exit_points(); auto port_it = std::find_if(ports.begin(), ports.end(), [&actual_port](const LoopPort& point) { return point == actual_port; }); OPENVINO_ASSERT(port_it != ports.end(), "Failed update_loop_port: existing loop ports has not been found"); port_it = ports.erase(port_it); ports.insert(port_it, target_ports.cbegin(), target_ports.cend()); + is_entry ? loop_info->set_entry_points(ports) : loop_info->set_exit_points(ports); } void LinearIR::LoopManager::expression_replacement(constExprIt new_expr_begin, constExprIt new_expr_end, const ExpressionPtr& decomposed_expr, @@ -411,8 +525,8 @@ void LinearIR::LoopManager::sort_loop_ports(LinearIR::constExprIt& loop_begin_po } }; auto loop_info = get_loop_info(loop_id); - const auto& loop_entries = loop_info->entry_points; - const auto& loop_exits = loop_info->exit_points; + const auto& loop_entries = loop_info->get_entry_points(); + const auto& loop_exits = loop_info->get_exit_points(); std::vector entries, exits; entries.reserve(loop_entries.size()); exits.reserve(loop_exits.size()); @@ -421,8 +535,8 @@ void LinearIR::LoopManager::sort_loop_ports(LinearIR::constExprIt& loop_begin_po push(loop_entries, entries, expr); push(loop_exits, exits, expr); } - loop_info->entry_points = entries; - loop_info->exit_points = exits; + loop_info->set_entry_points(entries); + loop_info->set_exit_points(exits); } void LinearIR::LoopManager::insert_loop_id(const ExpressionPtr& expr, size_t new_id, bool before, size_t target_id) { diff --git a/src/common/snippets/src/lowered/pass/allocate_buffers.cpp b/src/common/snippets/src/lowered/pass/allocate_buffers.cpp index d285320701cc62..18ef0d09b9704e 100644 --- a/src/common/snippets/src/lowered/pass/allocate_buffers.cpp +++ b/src/common/snippets/src/lowered/pass/allocate_buffers.cpp @@ -31,20 +31,18 @@ void AllocateBuffers::set_buffer_offset(const ExpressionPtr& buffer_expr, const buffer->set_offset(static_cast(offset)); // Propagate to up: in Store. Buffer can have only one Store - { - if (buffer->is_intermediate_memory()) { - OPENVINO_ASSERT(buffer_expr->get_input_port_connectors().size() == 1, "Buffer with intermediate memory must have one parent"); - const auto& parent_output = buffer_expr->get_input_port_connector(0)->get_source(); - const auto& parent_expr = parent_output.get_expr(); - const auto port = parent_output.get_index(); - const auto& parent_node = parent_expr->get_node(); - auto memory_access = ov::as_type_ptr(parent_node); - if (memory_access && memory_access->is_memory_access_output_port(port)) { - memory_access->set_output_offset(offset, port); - } else { - OPENVINO_THROW( - "Buffer::set_offset() was called when Buffer didn't have the corresponding MemoryAccess op for offset propagation"); - } + if (ov::is_type(buffer)) { + OPENVINO_ASSERT(buffer_expr->get_input_port_connectors().size() == 1, "Buffer with intermediate memory must have one parent"); + const auto& parent_output = buffer_expr->get_input_port_connector(0)->get_source(); + const auto& parent_expr = parent_output.get_expr(); + const auto port = parent_output.get_index(); + const auto& parent_node = parent_expr->get_node(); + auto memory_access = ov::as_type_ptr(parent_node); + if (memory_access && memory_access->is_memory_access_output_port(port)) { + memory_access->set_output_offset(offset, port); + } else { + OPENVINO_THROW( + "Buffer::set_offset() was called when Buffer didn't have the corresponding MemoryAccess op for offset propagation"); } } // Propagate to down: in Load. Buffer can have several Load diff --git a/src/common/snippets/src/lowered/pass/assign_registers.cpp b/src/common/snippets/src/lowered/pass/assign_registers.cpp index 7755cfebe7cc38..d49cf8d63155a7 100644 --- a/src/common/snippets/src/lowered/pass/assign_registers.cpp +++ b/src/common/snippets/src/lowered/pass/assign_registers.cpp @@ -64,7 +64,7 @@ bool AssignRegisters::run(LinearIR& linear_ir) { } else if (const auto& buffer = ov::as_type_ptr(op)) { const auto buffer_id = buffer->get_id(); // All buffers have one common data pointer - if (buffer->is_intermediate_memory()) { + if (ov::is_type(buffer)) { manually_assigned_gprs[expr->get_input_port_connector(0)] = static_cast(num_results + num_parameters + buffer_id); } diff --git a/src/common/snippets/src/lowered/pass/cleanup_loop_offsets.cpp b/src/common/snippets/src/lowered/pass/cleanup_loop_offsets.cpp index 8d4a529c1667ca..79c9a115718c1f 100644 --- a/src/common/snippets/src/lowered/pass/cleanup_loop_offsets.cpp +++ b/src/common/snippets/src/lowered/pass/cleanup_loop_offsets.cpp @@ -48,16 +48,23 @@ bool CleanupLoopOffsets::run(LinearIR& linear_ir) { const auto& found = per_port_connector_offset.find(managed_connector); if (found != per_port_connector_offset.end()) { // Since data ptr is incremented on [ptr_increment x increment], - // we should guarantee proportionality of ptr shifts + // we should guarantee proportionality of ptr shifts. + // If the data ptr can't be proportionally shifted, the optimization is not applied // For example, // Inner Loop: WA = 32, Inc = 1, ptr_increment[0] = 20, final_offset[0] = -640 // Outer Loop: WA = 70, Inc = 32, ptr_increment[0] = 20, final_offset[0] = -1400 // To save data ptr shift proportionality, we have to calculate so: // outer_ptr_increment[0] = (inner_final_offset[0] + outer_ptr_increment[0] * outer_Inc) / outer_Inc // outer_ptr_increment[0] = (-640 + 20 x 32) / 32 = 0 - outer_ptr_increments[i] = (fin_offsets[found->second] + outer_ptr_increments[i] * outer_increment) / outer_increment; - fin_offsets[found->second] = 0; - is_modified = true; + + const auto full_outer_increment = outer_ptr_increments[i] * outer_increment; + const auto new_final_outer_increment = full_outer_increment + fin_offsets[found->second]; + + if (new_final_outer_increment % outer_increment == 0) { + outer_ptr_increments[i] = new_final_outer_increment / outer_increment; + fin_offsets[found->second] = 0; + is_modified = true; + } } } outer_loop_end->set_ptr_increments(outer_ptr_increments); diff --git a/src/common/snippets/src/lowered/pass/fuse_loops.cpp b/src/common/snippets/src/lowered/pass/fuse_loops.cpp index 39fe42c49dd810..1738d6d8fe9574 100644 --- a/src/common/snippets/src/lowered/pass/fuse_loops.cpp +++ b/src/common/snippets/src/lowered/pass/fuse_loops.cpp @@ -28,7 +28,7 @@ bool FuseLoops::loop_ports_are_compatible(const LinearIR::LoopManagerPtr& loop_m const size_t loop_lower_id, const size_t loop_upper_id) { const auto loop_lower = loop_manager->get_loop_info(loop_lower_id); - for (const auto& entry : loop_lower->entry_points) { + for (const auto& entry : loop_lower->get_entry_points()) { const auto& src_port = entry.expr_port->get_port_connector_ptr()->get_source(); if (is_loop_id_found(src_port.get_expr()->get_loop_ids(), loop_upper_id)) { if (!entry.is_incremented) @@ -36,14 +36,16 @@ bool FuseLoops::loop_ports_are_compatible(const LinearIR::LoopManagerPtr& loop_m auto src_loop_port = loop_manager->get_loop_port_by_expr_port(src_port, loop_upper_id); if (!src_loop_port.is_incremented) return false; + if (entry.dim_idx != src_loop_port.dim_idx) + return false; } } return true; } bool FuseLoops::can_be_fused(const LoopInfoPtr& loop_current, const LoopInfoPtr& loop_target) { - auto current_work_amount = loop_current->work_amount; - auto target_work_amount = loop_target->work_amount; + auto current_work_amount = loop_current->get_work_amount(); + auto target_work_amount = loop_target->get_work_amount(); // Loop fusion is supported only if Loops have equal increments and the equal/broadcastable work amounts. // Note: For example, Broadcastable work amounts are possible in the following case: // Relu_0 [16x1] Relu_1 [16x128] @@ -54,9 +56,8 @@ bool FuseLoops::can_be_fused(const LoopInfoPtr& loop_current, const LoopInfoPtr& // - Relu_1 and Add with work amount `128` and increment `vector size` // We can fuse them into one Loop with work amount `128` and increment `vector size` const auto supported_work_amount = current_work_amount == target_work_amount || current_work_amount == 1 || target_work_amount == 1; - const auto supported_increment = loop_current->increment == loop_target->increment; - const auto supported_dim_idxs = loop_current->dim_idx == loop_target->dim_idx; - return supported_work_amount && supported_increment && supported_dim_idxs; + const auto supported_increment = loop_current->get_increment() == loop_target->get_increment(); + return supported_work_amount && supported_increment; } void FuseLoops::move(LinearIR& linear_ir, const LinearIR::LoopManagerPtr& loop_manager, size_t loop_id, @@ -102,8 +103,8 @@ bool FuseLoops::fuse_upper_into_current(LinearIR& linear_ir, const LinearIR::Loo // We can fuse Loop_up to Loop_down only in cases when other consumers of Loop_up are after Loop_down // Because Loop_up should be explicitly moved before Loop_down in linear IR, and we must save control dependency bool is_fusion_allowed = true; - for (size_t i = 0; i < loop_target->exit_points.size() && is_fusion_allowed; ++i) { - const auto target_exit_point = loop_target->exit_points[i]; + for (size_t i = 0; i < loop_target->get_exit_points().size() && is_fusion_allowed; ++i) { + const auto target_exit_point = loop_target->get_exit_points()[i]; const auto consumer_inputs = target_exit_point.expr_port->get_connected_ports(); for (const auto& consumer_input : consumer_inputs) { const auto& consumer = consumer_input.get_expr(); @@ -124,10 +125,10 @@ bool FuseLoops::fuse_upper_into_current(LinearIR& linear_ir, const LinearIR::Loo loop_manager->get_loop_bounds(linear_ir, target_loop_id, target_loop_begin_pos, target_loop_end_pos); loop_manager->fuse_loops(target_loop_begin_pos, target_loop_end_pos, target_loop_id, current_loop_id, false); // Update work_amount for Loop (increment is constant because increments must be the identical for fusion): - loop_current->work_amount = std::max(loop_current->work_amount, loop_target->work_amount); + loop_current->set_work_amount(std::max(loop_current->get_work_amount(), loop_target->get_work_amount())); // If one of the Loops is outer for nested loops that splits the same dimension, // after fusion new common Loop save this status - loop_current->outer_splited_loop = loop_current->outer_splited_loop || loop_target->outer_splited_loop; + loop_current->set_outer_splited_loop(loop_current->get_outer_splited_loop() || loop_target->get_outer_splited_loop()); const auto insertion_place = current_loop_begin_pos; const auto is_move_needed = target_loop_end_pos != current_loop_begin_pos; @@ -152,8 +153,8 @@ bool FuseLoops::fuse_lower_into_current(LinearIR& linear_ir, const LinearIR::Loo // We can fuse Loop_down to Loop_up only in cases when other parents of Loop_down are before Loop_up // Because Loop_down should be explicitly moved after Loop_up in linear IR, and we must save control dependency bool is_fusion_allowed = true; - for (size_t i = 0; i < loop_target->entry_points.size() && is_fusion_allowed; ++i) { - const auto target_entry_port = loop_target->entry_points[i]; + for (size_t i = 0; i < loop_target->get_entry_points().size() && is_fusion_allowed; ++i) { + const auto target_entry_port = loop_target->get_entry_points()[i]; const auto parent_expr_output = *target_entry_port.expr_port->get_connected_ports().begin(); const auto& parent_expr = parent_expr_output.get_expr(); if (ov::is_type(parent_expr->get_node()) || parent_expr == current_exit_point->get_expr()) @@ -169,10 +170,10 @@ bool FuseLoops::fuse_lower_into_current(LinearIR& linear_ir, const LinearIR::Loo loop_manager->get_loop_bounds(linear_ir, target_loop_id, target_loop_begin_pos, target_loop_end_pos); loop_manager->fuse_loops(target_loop_begin_pos, target_loop_end_pos, current_loop_id, target_loop_id); // Update work_amount for Loop (increment is constant because increments must be the identical for fusion): - loop_current->work_amount = std::max(loop_current->work_amount, loop_target->work_amount); + loop_current->set_work_amount(std::max(loop_current->get_work_amount(), loop_target->get_work_amount())); // If one of the Loops is outer for nested loops that splits the same dimension, // after fusion new common Loop save this status - loop_current->outer_splited_loop = loop_current->outer_splited_loop || loop_target->outer_splited_loop; + loop_current->set_outer_splited_loop(loop_current->get_outer_splited_loop() || loop_target->get_outer_splited_loop()); const auto insertion_place = current_loop_end_pos; const auto is_move_needed = insertion_place != target_loop_begin_pos; @@ -221,7 +222,7 @@ bool FuseLoops::run(LinearIR& linear_ir) { // Loop_0 (Upper) | // | => | // Loop_1 (Current) Loop_0 + Loop_1 => new `Loop_1` - auto entry_points = current_loop_info->entry_points; + auto entry_points = current_loop_info->get_entry_points(); bool was_fusion_up = false; for (size_t in_port = 0; in_port < entry_points.size() && !was_fusion_up; ++in_port) { const auto entry_point = entry_points[in_port]; @@ -259,13 +260,13 @@ bool FuseLoops::run(LinearIR& linear_ir) { } // If Loops were fused and there are new entry_points, we should check for possible fusion again - if (was_fusion_up && entry_points != current_loop_info->entry_points) + if (was_fusion_up && entry_points != current_loop_info->get_entry_points()) continue; // Loop_0 (Current) Loop_0 + Loop_1 => new `Loop_0` // | => | // Loop_1 (Lower) | - auto exit_points = current_loop_info->exit_points; + auto exit_points = current_loop_info->get_exit_points(); bool was_fusion_down = false; for (size_t out_port = 0; out_port < exit_points.size() && !was_fusion_down; ++out_port) { const auto exit_point = exit_points[out_port]; diff --git a/src/common/snippets/src/lowered/pass/identify_buffers.cpp b/src/common/snippets/src/lowered/pass/identify_buffers.cpp index a92cd875daa5a5..7b19693c7c3b7c 100644 --- a/src/common/snippets/src/lowered/pass/identify_buffers.cpp +++ b/src/common/snippets/src/lowered/pass/identify_buffers.cpp @@ -74,48 +74,16 @@ void IdentifyBuffers::update_adj_matrix(const std::pair IdentifyBuffers::create_adjacency_matrix(const LinearIR& linear_ir, const BufferPool& pool) { - // There are several sync points for adjacency check: - // 1. Loop because only in Loop we increment pointers. So if some Buffers in the one Loop have conflict - // (cannot be inplace: the different ptr increment and data sizes) they are called as adjacent - // 2. Brgemm because its blocking implementation requires Buffers with unique memory on all inputs and outputs + // The sync point to check for adjacency is Loop because only in Loop we increment pointers. + // So if some Buffers in the one Loop have conflict (cannot be inplace: the different ptr increment and data sizes) + // they are called as adjacent const auto size = pool.size(); std::vector adj(size * size, false); for (size_t i = 0; i < size; ++i) adj[index(size, i, i)] = true; - auto is_buffer = [](const ExpressionPort& port) { - return ov::is_type(port.get_expr()->get_node()); - }; - for (auto expr_it = linear_ir.cbegin(); expr_it != linear_ir.cend(); expr_it++) { const auto &expr = *expr_it; - if (const auto brgemm = ov::as_type_ptr(expr->get_node())) { - const auto consumers = expr->get_output_port_connector(0)->get_consumers(); - - auto buffer_it = std::find_if(consumers.begin(), consumers.end(), is_buffer); - if (buffer_it == consumers.end()) - continue; - OPENVINO_ASSERT(std::count_if(consumers.begin(), consumers.end(), is_buffer) == 1, "Brgemm mustn't have more than 1 consumer buffer"); - - BufferPool adjacency_buffers; - adjacency_buffers.push_back(buffer_it->get_expr()); - - for (const auto& input_connector : expr->get_input_port_connectors()) { - const auto parent_expr = input_connector->get_source().get_expr(); - if (ov::is_type(parent_expr->get_node())) { - adjacency_buffers.push_back(parent_expr); - } - } - for (auto buffer_it = adjacency_buffers.begin(); buffer_it != adjacency_buffers.end(); ++buffer_it) { - for (auto neighbour_it = std::next(buffer_it); neighbour_it != adjacency_buffers.end(); ++neighbour_it) { - const auto buffer_idx = get_buffer_idx(*buffer_it, pool); - const auto neighbour_idx = get_buffer_idx(*neighbour_it, pool); - adj[index(size, neighbour_idx, buffer_idx)] = adj[index(size, buffer_idx, neighbour_idx)] = true; - } - } - continue; - } - const auto& loop_end = ov::as_type_ptr(expr->get_node()); if (!loop_end) continue; @@ -234,9 +202,7 @@ auto IdentifyBuffers::coloring(BufferPool& buffers, std::vector& adj) -> s bool IdentifyBuffers::run(LinearIR& linear_ir) { OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::IdentifyBuffers") - // Unite Buffers using Graph coloring algorithm. - // Notes: We identify only Buffer with Intermediate memory because Buffers with new memory are used only in Brgemm case - // so these Buffers are always IntermediateBuffer nonadjacent + // Identify Buffers using Graph coloring algorithm. BufferPool buffer_pool; for (const auto& expr : linear_ir) { diff --git a/src/common/snippets/src/lowered/pass/init_loops.cpp b/src/common/snippets/src/lowered/pass/init_loops.cpp index 8128ea0253d2a7..68e8cc7757e13f 100644 --- a/src/common/snippets/src/lowered/pass/init_loops.cpp +++ b/src/common/snippets/src/lowered/pass/init_loops.cpp @@ -37,63 +37,75 @@ int64_t get_output_stride(size_t dim, const VectorDims& shape) { InitLoops::InitLoops() : Pass() {} -void InitLoops::init_ptr_increments(std::vector& loop_inputs, std::vector& loop_outputs, size_t work_amount, size_t dim_idx) { - for (auto& loop_input : loop_inputs) { - loop_input.ptr_increment = 0; - if (loop_input.is_incremented) { - const auto& port = loop_input.expr_port; +void InitLoops::init_ptr_increments(const LinearIR::LoopManager::LoopInfoPtr& loop_info) { + const auto work_amount = loop_info->get_work_amount(); + auto loop_entries = loop_info->get_entry_points(); + auto loop_exits = loop_info->get_exit_points(); + + for (auto& loop_entry : loop_entries) { + loop_entry.ptr_increment = 0; + if (loop_entry.is_incremented) { + const auto& port = loop_entry.expr_port; const auto source = *port->get_connected_ports().begin(); const auto loop_ids = port->get_expr()->get_loop_ids(); const auto& layout = port->get_descriptor_ptr()->get_layout(); const auto& shape = port->get_descriptor_ptr()->get_shape(); - const auto& dim = *(layout.rbegin() + dim_idx); + const auto& dim = *(layout.rbegin() + loop_entry.dim_idx); // If relevant dim is not broadcasted, then ptr_increment is the dim stride in the new layout if (!(shape[dim] == 1 && work_amount != 1)) { // Input layout shows how we should read data by which order and strides - loop_input.ptr_increment = get_input_stride(dim, source.get_descriptor_ptr()->get_layout(), shape); + loop_entry.ptr_increment = get_input_stride(dim, source.get_descriptor_ptr()->get_layout(), shape); } } } - for (auto& loop_output : loop_outputs) { - loop_output.ptr_increment = 0; - if (loop_output.is_incremented) { - const auto& port = loop_output.expr_port; + for (auto& loop_exit : loop_exits) { + loop_exit.ptr_increment = 0; + if (loop_exit.is_incremented) { + const auto& port = loop_exit.expr_port; const auto loop_ids = port->get_expr()->get_loop_ids(); const auto& layout = port->get_descriptor_ptr()->get_layout(); const auto& shape = port->get_descriptor_ptr()->get_shape(); - const auto original_dim = layout.size() - 1 - dim_idx; + const auto original_dim = layout.size() - 1 - loop_exit.dim_idx; const auto& dim = std::distance(layout.cbegin(), std::find(layout.cbegin(), layout.cend(), original_dim)); // If relevant dim is not broadcasted, then ptr_increment is the dim stride in the new layout if (!(shape[dim] == 1 && work_amount != 1)) { // Output layout shows how we already written data by which order and strides - loop_output.ptr_increment = get_output_stride(dim, shape); + loop_exit.ptr_increment = get_output_stride(dim, shape); } } } + loop_info->set_entry_points(loop_entries); + loop_info->set_exit_points(loop_exits); } -void InitLoops::init_finalization_offsets(std::vector& loop_inputs, - std::vector& loop_outputs, - size_t work_amount) { - for (auto& loop_input : loop_inputs) { - loop_input.finalization_offset = -1 * loop_input.ptr_increment * work_amount; +void InitLoops::init_finalization_offsets(const LinearIR::LoopManager::LoopInfoPtr& loop_info) { + const auto work_amount = loop_info->get_work_amount(); + auto loop_entries = loop_info->get_entry_points(); + auto loop_exits = loop_info->get_exit_points(); + for (auto& loop_entry : loop_entries) { + loop_entry.finalization_offset = -1 * loop_entry.ptr_increment * work_amount; } - for (auto& loop_output : loop_outputs) { - loop_output.finalization_offset = -1 * loop_output.ptr_increment * work_amount; + for (auto& loop_exit : loop_exits) { + loop_exit.finalization_offset = -1 * loop_exit.ptr_increment * work_amount; } + loop_info->set_entry_points(loop_entries); + loop_info->set_exit_points(loop_exits); } -void InitLoops::init_element_type_sizes(std::vector& loop_inputs, - std::vector& loop_outputs) { - for (auto& loop_input : loop_inputs) { - const auto& port = loop_input.expr_port; - loop_input.data_size = static_cast(port->get_expr()->get_node()->get_input_element_type(port->get_index()).size()); +void InitLoops::init_element_type_sizes(const LinearIR::LoopManager::LoopInfoPtr& loop_info) { + auto loop_entries = loop_info->get_entry_points(); + auto loop_exits = loop_info->get_exit_points(); + for (auto& loop_entry : loop_entries) { + const auto& port = loop_entry.expr_port; + loop_entry.data_size = static_cast(port->get_expr()->get_node()->get_input_element_type(port->get_index()).size()); } - for (auto& loop_output : loop_outputs) { - const auto& port = loop_output.expr_port; - loop_output.data_size = static_cast(port->get_expr()->get_node()->get_output_element_type(port->get_index()).size()); + for (auto& loop_exit : loop_exits) { + const auto& port = loop_exit.expr_port; + loop_exit.data_size = static_cast(port->get_expr()->get_node()->get_output_element_type(port->get_index()).size()); } + loop_info->set_entry_points(loop_entries); + loop_info->set_exit_points(loop_exits); } bool InitLoops::run(LinearIR& linear_ir) { @@ -105,13 +117,9 @@ bool InitLoops::run(LinearIR& linear_ir) { const auto& loops = loop_manager->get_map(); for (const auto& loop : loops) { const auto loop_info = loop.second; - - const auto work_amount = loop_info->work_amount; - const auto dim_idx = loop_info->dim_idx; - - init_ptr_increments(loop_info->entry_points, loop_info->exit_points, work_amount, dim_idx); - init_finalization_offsets(loop_info->entry_points, loop_info->exit_points, work_amount); - init_element_type_sizes(loop_info->entry_points, loop_info->exit_points); + init_ptr_increments(loop_info); + init_finalization_offsets(loop_info); + init_element_type_sizes(loop_info); } return true; diff --git a/src/common/snippets/src/lowered/pass/insert_broadcastmove.cpp b/src/common/snippets/src/lowered/pass/insert_broadcastmove.cpp index a70698580a61e3..723b97b5a25788 100644 --- a/src/common/snippets/src/lowered/pass/insert_broadcastmove.cpp +++ b/src/common/snippets/src/lowered/pass/insert_broadcastmove.cpp @@ -56,12 +56,7 @@ bool InsertBroadcastMove::run(LinearIR& linear_ir) { OPENVINO_ASSERT(last_dims[i] == 1, "Attempt to broadcast non-1 dimension. Target dim: ", broadcasted_dim, " This dim: ", last_dims[i]); - auto input_shape = descriptors[i]->get_shape(); - // Note that input_shape could be empty (aka ngraph scalar), so we can't just replace the last dim - if (input_shape.empty()) - input_shape.resize(1); - input_shape.back() = last_dims[i]; - const auto broadcast = std::make_shared(node->get_input_source_output(i), utils::vdims_to_pshape(input_shape)); + const auto broadcast = std::make_shared(node->get_input_source_output(i), broadcasted_dim); PortDescriptorUtils::set_port_descriptor_ptr(broadcast->output(0), connectors[i]->get_source().get_descriptor_ptr()->clone()); const auto broadcast_expr = linear_ir.create_expression(broadcast, {connectors[i]}); diff --git a/src/common/snippets/src/lowered/pass/insert_buffers.cpp b/src/common/snippets/src/lowered/pass/insert_buffers.cpp index aefaca42f4094e..81835a4ca390ae 100644 --- a/src/common/snippets/src/lowered/pass/insert_buffers.cpp +++ b/src/common/snippets/src/lowered/pass/insert_buffers.cpp @@ -49,26 +49,52 @@ ov::Shape compute_allocation_shape(const LinearIR::LoopManagerPtr& loop_manager, return allocation_shape; } - auto set_rest_dims_to_ones = [&](const int filled_dims_count) { - for (int i = 0; i < static_cast(allocation_shape.size()) - filled_dims_count; ++i) { - allocation_shape[i] = 1; + // If subtensor is set, its information is used for allocation shape computation. Two situations are possible: + // 1. Buffer is outside the parent loop: the corresponding subtensor value is ignored, parent loop work amount is set instead + // 2. Buffer is inside the parent loop: the corresponding subtensor value is used in allocation shape. + // Since we can defenitely know which subtensor value corresponds to the loop only for 1st case + // (we can extract this info from loop exit port), we copy subtensor, and then replace subtensor values with parent loop work amount if needed. + // Example: + // Parent subtensor: [M_blk, N_blk] + // Buffer loop idces: [M_loop_idx], parent loop idces: [M_loop_idx, N_loop_idx] + // + // 1. Allocation shape is set to subtensor: [M_blk, N_blk] + // 2. Buffer is inside M_loop_idx loop => allocation shape is not changed + // 3. Buffer is outside N_loop_idx loop => the corresponding allocation shape value is replaced with N loop work amount + // So the result allocation shape is [M_blk, N_loop_work_amount] + const auto& subtensor = expr_port.get_descriptor_ptr()->get_subtensor(); + if (!subtensor.empty()) { + for (size_t i = 0; i < std::min(rank, subtensor.size()); ++i) { + auto& cur_val = *(allocation_shape.rbegin() + i); + const auto& subtensor_val = *(subtensor.rbegin() + i); + cur_val = std::min(cur_val, subtensor_val); + } + for (const auto& parent_loop : parent_loop_ids) { + if (std::find(buffer_loop_ids.begin(), buffer_loop_ids.end(), parent_loop) == buffer_loop_ids.end()) { + const auto loop_info = loop_manager->get_loop_info(parent_loop); + const auto& exit_points = loop_info->get_exit_points(); + auto it = std::find_if(exit_points.begin(), + exit_points.end(), + [&expr_port](const LinearIR::LoopManager::LoopPort& port) { + return *port.expr_port == expr_port; + }); + OPENVINO_ASSERT(it != exit_points.end(), "compute_allocation_shape: exit point of parent loop can not be found"); + const auto& loop_port = *it; + if (loop_port.is_incremented && loop_port.dim_idx < allocation_shape.size()) { + *(allocation_shape.rbegin() + loop_port.dim_idx) = loop_info->get_work_amount(); + } + } } - }; - - // In some cases it's possible to allocate less shape - // 1. Buffer and its parent are in the same loop: allocation size for the outer dimension can be extracted from loop increment - // 2. Buffer is outside the parent's loops: allocation size can be extracted from the corresponding loop work amount - // TODO: Use general logic with the help of memory counts for allocation shape computation - if (buffer_loop_ids.back() == parent_loop_ids.back()) { - const auto buffer_loop = loop_manager->get_loop_info(buffer_loop_ids.back()); - *(allocation_shape.rbegin() + 1) = buffer_loop->increment; - set_rest_dims_to_ones(2); } else { + // WA: In case of empty subtensors another information have to be used to update allocation shape. for (size_t i = 0; i < std::min(rank, parent_loop_ids.size()); ++i) { const auto loop = loop_manager->get_loop_info(*(parent_loop_ids.rbegin() + i)); - *(allocation_shape.rbegin() + i) = loop->work_amount; + OPENVINO_ASSERT(loop->get_dim_idx() == i, "compute_allocation_shape: eltwise loop has unexpected dimension index"); + *(allocation_shape.rbegin() + i) = loop->get_work_amount(); + } + for (int i = 0; i < allocation_rank - static_cast(parent_loop_ids.size()); ++i) { + allocation_shape[i] = 1; } - set_rest_dims_to_ones(static_cast(parent_loop_ids.size())); } return allocation_shape; } @@ -155,7 +181,7 @@ void InsertBuffers::insertion(LinearIR& linear_ir, const LinearIR::constExprIt& parent_loops, parent_expr_output, m_buffer_allocation_rank); - const auto buffer = std::make_shared(parent->output(parent_port), allocation_shape); + const auto buffer = std::make_shared(parent->output(parent_port), allocation_shape); PortDescriptorUtils::set_port_descriptor_ptr(buffer->output(0), parent_expr_output.get_descriptor_ptr()->clone()); // Output connector is automatically filled from PortDescriptor const auto buffer_expr = linear_ir.create_expression(buffer, {input_connector}); @@ -248,7 +274,7 @@ void InsertBuffers::insertion(LinearIR& linear_ir, const LinearIR::constExprIt& current_loops, *exit_port, m_buffer_allocation_rank); - auto buffer = std::make_shared(node->output(port_idx), allocation_shape); + auto buffer = std::make_shared(node->output(port_idx), allocation_shape); PortDescriptorUtils::set_port_descriptor_ptr(buffer->output(0), exit_port->get_descriptor_ptr()->clone()); // We cannot insert Node output connector on Buffer output because not all consumers of Node needs Buffer // Example: @@ -275,8 +301,8 @@ bool InsertBuffers::run(LinearIR& linear_ir) { const auto loop_data_map = loop_manager->get_map(); for (const auto& loop_data : loop_data_map) { const auto loop_info = loop_data.second; - const auto loop_entries = loop_info->entry_points; - const auto loop_exits = loop_info->exit_points; + const auto loop_entries = loop_info->get_entry_points(); + const auto loop_exits = loop_info->get_exit_points(); // using begin() as expr_it because we work with LoopInfo, not expressions in Linear IR insertion(linear_ir, linear_ir.cbegin(), loop_manager, loop_entries, loop_exits); } diff --git a/src/common/snippets/src/lowered/pass/insert_load_store.cpp b/src/common/snippets/src/lowered/pass/insert_load_store.cpp index ff75a5be0e6c5c..75e70c9c553c88 100644 --- a/src/common/snippets/src/lowered/pass/insert_load_store.cpp +++ b/src/common/snippets/src/lowered/pass/insert_load_store.cpp @@ -122,9 +122,9 @@ bool InsertLoadStore::run(LinearIR& linear_ir) { modified |= insert_load(linear_ir, expr_it); } else if (ov::is_type(node)) { modified |= insert_store(linear_ir, expr_it); - } else if (auto buffer = ov::as_type_ptr(node)) { + } else if (ov::is_type(node)) { modified |= insert_load(linear_ir, expr_it); - if (buffer->is_intermediate_memory()) + if (ov::is_type(node)) modified |= insert_store(linear_ir, expr_it); } } diff --git a/src/common/snippets/src/lowered/pass/insert_loops.cpp b/src/common/snippets/src/lowered/pass/insert_loops.cpp index 32d4151cb38b95..3eab6e97df33fb 100644 --- a/src/common/snippets/src/lowered/pass/insert_loops.cpp +++ b/src/common/snippets/src/lowered/pass/insert_loops.cpp @@ -55,10 +55,10 @@ void InsertLoops::filter_ports(std::vector& loop_entries, std::vector< void InsertLoops::insertion(LinearIR& linear_ir, const LinearIR::LoopManagerPtr& loop_manager, size_t loop_id, bool has_outer_loop) { const auto loop_info = loop_manager->get_loop_info(loop_id); - auto loop_entries = loop_info->entry_points; - auto loop_exits = loop_info->exit_points; - const auto work_amount = loop_info->work_amount; - const auto work_amount_increment = loop_info->increment; + auto loop_entries = loop_info->get_entry_points(); + auto loop_exits = loop_info->get_exit_points(); + const auto work_amount = loop_info->get_work_amount(); + const auto work_amount_increment = loop_info->get_increment(); LinearIR::constExprIt loop_begin_pos, loop_end_pos; loop_manager->get_loop_bounds(linear_ir, loop_id, loop_begin_pos, loop_end_pos); @@ -67,15 +67,18 @@ void InsertLoops::insertion(LinearIR& linear_ir, const LinearIR::LoopManagerPtr& filter_ports(loop_entries, loop_exits); const auto in_out_num = loop_entries.size() + loop_exits.size(); + std::vector is_incremented; std::vector ptr_increments, finalization_offsets, io_data_sizes; std::vector loop_end_inputs; + is_incremented.reserve(in_out_num); ptr_increments.reserve(in_out_num); finalization_offsets.reserve(in_out_num); io_data_sizes.reserve(in_out_num); loop_end_inputs.reserve(in_out_num); - auto init_params = [&ptr_increments, &finalization_offsets, &io_data_sizes, &loop_end_inputs](const std::vector& ports) { + auto init_params = [&](const std::vector& ports) { for (const auto& port : ports) { + is_incremented.push_back(port.is_incremented); ptr_increments.push_back(port.ptr_increment); finalization_offsets.push_back(port.finalization_offset); io_data_sizes.push_back(port.data_size); @@ -90,8 +93,8 @@ void InsertLoops::insertion(LinearIR& linear_ir, const LinearIR::LoopManagerPtr& linear_ir.insert(loop_begin_pos, loop_begin_expr); const auto& loop_end = std::make_shared( - loop_begin->output(0), work_amount, work_amount_increment, ptr_increments, finalization_offsets, - io_data_sizes, loop_entries.size(), loop_exits.size(), loop_id); + loop_begin->output(0), work_amount, work_amount_increment, is_incremented, ptr_increments, + finalization_offsets, io_data_sizes, loop_entries.size(), loop_exits.size(), loop_id); loop_end->has_outer_loop = has_outer_loop; // Add LoopBegin port connector diff --git a/src/common/snippets/src/lowered/pass/insert_tail_loop.cpp b/src/common/snippets/src/lowered/pass/insert_tail_loop.cpp index 50d684d4d7ddc3..cc685c1851157a 100644 --- a/src/common/snippets/src/lowered/pass/insert_tail_loop.cpp +++ b/src/common/snippets/src/lowered/pass/insert_tail_loop.cpp @@ -8,48 +8,223 @@ #include "snippets/lowered/loop_manager.hpp" #include "snippets/lowered/pass/init_loops.hpp" #include "snippets/snippets_isa.hpp" +#include "snippets/utils.hpp" #include "snippets/itt.hpp" namespace ov { namespace snippets { namespace lowered { namespace pass { +void InsertTailLoop::propagate_updated_subtensor_through_loop(const LinearIR& linear_ir, + const LinearIR::LoopManager::LoopInfoPtr& loop_info, + LinearIR::container::const_iterator begin, + LinearIR::container::const_iterator end, + const size_t new_dim_value) { + std::map original_shapes; + // First step: set new dim value to the corresponding entry_points' dimensions + if (new_dim_value != existing_subtensor_value) { + for (const auto& port : loop_info->get_entry_points()) { + if (port.is_incremented) { + const auto& expr = port.expr_port->get_expr(); + const auto node = expr->get_node(); + auto desc = port.expr_port->get_descriptor_ptr(); + auto subtensor = desc->get_subtensor(); + if (port.dim_idx < subtensor.size()) { + *(subtensor.rbegin() + port.dim_idx) = new_dim_value; + desc->set_subtensor(subtensor); + } + + const auto parent_desc = expr->get_input_port_connector(port.expr_port->get_index())->get_source().get_descriptor_ptr(); + const auto& layout = parent_desc->get_layout(); + const auto& shape = parent_desc->get_shape(); + if (original_shapes.find(parent_desc) == original_shapes.end()) { + original_shapes[parent_desc] = shape; + } + auto new_shape = shape; + new_shape[*(layout.rbegin() + port.dim_idx)] = new_dim_value; + parent_desc->set_shape(new_shape); + } + } + } + + auto update_only_dim_idx_with_subtensor_value = [&](const LinearIR::LoopManager::LoopPort& port) { + if (port.is_incremented) { + auto desc = port.expr_port->get_descriptor_ptr(); + const auto expr = port.expr_port->get_expr(); + const auto parent_desc = expr->get_input_port_connector(port.expr_port->get_index())->get_source().get_descriptor_ptr(); + + const auto& layout = parent_desc->get_layout(); + const auto& shape = parent_desc->get_shape(); + const auto& desc_subtensor = desc->get_subtensor(); + if (port.dim_idx < desc_subtensor.size()) { + if (original_shapes.find(parent_desc) == original_shapes.end()) { + original_shapes[parent_desc] = shape; + } + auto new_shape = shape; + new_shape[*(layout.rbegin() + port.dim_idx)] = *(desc_subtensor.rbegin() + port.dim_idx); + parent_desc->set_shape(new_shape); + } + } + }; + + auto update_subtensors = [](const std::vector& descs, bool is_input) { + for (const auto& desc : descs) { + const auto& subtensor = desc->get_subtensor(); + if (!subtensor.empty()) { + auto planar_dims = is_input ? snippets::utils::get_planar_vdims(desc->get_shape(), desc->get_layout()) + : snippets::utils::get_preordered_vdims(desc->get_shape(), desc->get_layout()); + const size_t subtensor_start = planar_dims.size() - subtensor.size(); + VectorDims new_subtensor(planar_dims.begin() + subtensor_start, planar_dims.end()); + for (size_t i = 0; i < new_subtensor.size(); ++i) { + new_subtensor[i] = std::min(new_subtensor[i], subtensor[i]); + } + desc->set_subtensor(new_subtensor); + } + } + }; + + auto shape_inference_end_it = end; + const bool loop_by_last_dim = loop_info->get_dim_idx() == 0; + // Subtensors are updated using shape inference infrastructure: + // For inner loops propagation function is called recursively + for (auto expr_it = begin; expr_it != end; expr_it++) { + const auto expr = *expr_it; + if (ov::is_type(expr->get_node())) + continue; + if (auto loop_begin = ov::as_type_ptr(expr->get_node())) { + const auto loop_end = loop_begin->get_loop_end(); + const auto inner_loop_info = linear_ir.get_loop_manager()->get_loop_info(loop_end->get_id()); + const auto inner_begin = std::next(expr_it); + const auto inner_end = linear_ir.find(linear_ir.get_expr_by_node(loop_end)); + + // The corresponding shapes of inner loops entry points must be updated using existing subtensor values + if (new_dim_value == existing_subtensor_value) { + for (const auto& port : loop_info->get_entry_points()) + update_only_dim_idx_with_subtensor_value(port); + } + propagate_updated_subtensor_through_loop(linear_ir, inner_loop_info, inner_begin, inner_end); + expr_it = inner_end; + continue; + } + if ((ov::is_type(expr_it->get()->get_node()) || + ov::is_type(expr_it->get()->get_node())) && + loop_by_last_dim) { + // WA: we have to break subtensor propagation if we try to propagate new last dim through Broadcast nodes + // which broadcast last dim in original dimension value anyway + // This workaround might be avoided if blocked shape are used for tail size propagation + shape_inference_end_it = expr_it; + break; + } + expr->updateShapes(); + update_subtensors(expr->get_input_port_descriptors(), true); + update_subtensors(expr->get_output_port_descriptors(), false); + } -std::shared_ptr InsertTailLoop::create_tail_loop(LinearIR& linear_ir, - LinearIR::constExprIt vector_begin, - LinearIR::constExprIt vector_end, - LinearIR::constExprIt& tail_begin, - LinearIR::constExprIt& tail_end, - const std::shared_ptr& vector_loop_end, - bool need_vector_loop, - size_t tail_size, - const std::vector& tail_finalization_offsets) { + // After subtensor propagation, the original shapes must be restored + for (const auto& elem : original_shapes) + elem.first->set_shape(elem.second); + for (auto expr_it = begin; expr_it != shape_inference_end_it; expr_it++) + (*expr_it)->updateShapes(); +} + +LinearIR::container InsertTailLoop::copy_loop(const LinearIR& linear_ir, const size_t loop_id) { + const auto& loop_manager = linear_ir.get_loop_manager(); + LinearIR::constExprIt loop_begin_pos, loop_end_pos; + loop_manager->get_loop_bounds(linear_ir, loop_id, loop_begin_pos, loop_end_pos, true); + ExressionMap expression_map; + const auto& loop_copy_range = LinearIR::deep_copy_range(loop_begin_pos, std::next(loop_end_pos), expression_map); + + const auto original_loop_info = loop_manager->get_loop_info(loop_id); + std::vector new_entry_points, new_exit_points; + // Clone loop ports from original loop info to new loop info + for (const auto& entry : original_loop_info->get_entry_points()) + new_entry_points.push_back(*entry.clone_with_new_expr(expression_map[entry.expr_port->get_expr().get()])); + for (const auto& exit : original_loop_info->get_exit_points()) + new_exit_points.push_back(*exit.clone_with_new_expr(expression_map[exit.expr_port->get_expr().get()])); + + for (const auto& elem : expression_map) { + const auto expr = elem.first->shared_from_this(); + const auto& new_expr = elem.second; + // Loop begin/end ops can't be loop ports + if (ov::is_type(expr->get_node())) + continue; + // Update loop info of all outer loops with new loop ports + const auto outer_loop_ids = LinearIR::LoopManager::get_outer_expr_loops(expr, loop_id); + for (size_t i = 0; i < expr->get_input_count(); ++i) + loop_manager->update_loops_port(outer_loop_ids, expr->get_input_port(i), {expr->get_input_port(i), new_expr->get_input_port(i)}, true); + for (size_t i = 0; i < expr->get_output_count(); ++i) + loop_manager->update_loops_port(outer_loop_ids, expr->get_output_port(i), {expr->get_output_port(i), new_expr->get_output_port(i)}, false); + } + + const auto new_loop_begin_pos = loop_copy_range.begin(); + const auto new_loop_end_pos = loop_copy_range.end(); + const auto new_id = loop_manager->replace_with_new_loop(linear_ir, + std::next(new_loop_begin_pos), + std::prev(new_loop_end_pos), + original_loop_info->get_work_amount(), + original_loop_info->get_increment(), + new_entry_points, + new_exit_points, + loop_id); + const auto loop_end = ov::as_type_ptr(std::prev(new_loop_end_pos)->get()->get_node()); + OPENVINO_ASSERT(loop_end, "Cloned Loop does not contain LoopEnd op at the expected place."); + loop_end->set_id(new_id); + return loop_copy_range; +} + +void InsertTailLoop::create_tail_loop(LinearIR& linear_ir, + LinearIR::constExprIt begin, + LinearIR::constExprIt end, + const std::shared_ptr& loop_end, + bool need_vector_loop, + size_t tail_size) { // tail is required => transform the body into a tail representation // tail loop is fake loop because for tail we should calculate only // finalization offsets which are supported by LoopEnd. + const auto& loop_manager = linear_ir.get_loop_manager(); + const auto original_loop_id = loop_end->get_id(); + auto original_loop_info = loop_manager->get_loop_info(original_loop_id); + auto tail_loop_info = original_loop_info; if (need_vector_loop) { - ExressionMap expression_map; - auto vector_loop_deep_copy = LinearIR::deep_copy_range(vector_begin, vector_end, expression_map); - tail_begin = linear_ir.insert(vector_end, vector_loop_deep_copy.begin(), vector_loop_deep_copy.end()); - tail_end = vector_end; - } else { - tail_begin = vector_begin; - tail_end = vector_end; + const auto new_loop_range = copy_loop(linear_ir, original_loop_id); + const auto new_loop_end = ov::as_type_ptr(std::prev(new_loop_range.end())->get()->get_node()); + OPENVINO_ASSERT(new_loop_end, "Cloned Loop does not contain LoopEnd op at the expected place."); + tail_loop_info = original_loop_info; + original_loop_info = loop_manager->get_loop_info(new_loop_end->get_id()); + + // Note: new loop body is inserted before the original loop + // So new loop becomes a main vector loop, the original loop becomes tail loop + // This is done in such way to have original ops from the main body at the end: + // this allows us to conveniently interact with outer loops in further passes + linear_ir.insert(begin, new_loop_range.begin(), new_loop_range.end()); + + const auto new_vector_loop_wa = original_loop_info->get_work_amount() - tail_size; + original_loop_info->set_work_amount(new_vector_loop_wa); + new_loop_end->set_work_amount(new_vector_loop_wa); + original_loop_info->set_outer_splited_loop(tail_loop_info->get_outer_splited_loop()); + // Note that finalization offsets should be applied after the last iteration. + // So if there is a tail, then we should apply offsets after it, but not now. + new_loop_end->set_finalization_offsets(std::vector(loop_end->get_finalization_offsets().size(), 0)); } + loop_end->set_increment(tail_size); + loop_end->set_work_amount(tail_size); + tail_loop_info->set_increment(tail_size); + tail_loop_info->set_work_amount(tail_size); // We have to check the loop body for any nested loops that work on the same dimension // and rescale their work_amount and increment accordingly - const auto& loop_manager = linear_ir.get_loop_manager(); - const auto& current_loop_Info = loop_manager->get_loop_info(vector_loop_end->get_id()); - if (current_loop_Info->outer_splited_loop) { - const auto current_dim_idx = current_loop_Info->dim_idx; - for (auto it = std::next(tail_begin); it != std::prev(tail_end); ++it) { + if (original_loop_info->get_outer_splited_loop()) { + const auto current_dim_idx = original_loop_info->get_dim_idx(); + OPENVINO_ASSERT(current_dim_idx != LinearIR::LoopManager::LoopInfo::UNDEFINED_DIM_IDX, + "Outer splitted loop unexpectedly iterates by several dimension indices"); + for (auto it = std::next(begin); it != std::prev(end); ++it) { const auto& expr = *it; const auto inner_loop_end = ov::as_type_ptr(expr->get_node()); if (!inner_loop_end) continue; - const auto loop_info = loop_manager->get_loop_info(inner_loop_end->get_id()); - if (loop_info->dim_idx != current_dim_idx) + const auto inner_loop_info = loop_manager->get_loop_info(inner_loop_end->get_id()); + const auto inner_dim_idx = inner_loop_info->get_dim_idx(); + if (inner_dim_idx != current_dim_idx) continue; const auto inner_loop_begin = inner_loop_end->get_loop_begin(); const auto inner_tail_work_amount = static_cast(inner_loop_end->get_work_amount()); @@ -61,21 +236,14 @@ std::shared_ptr InsertTailLoop::create_tail_loop(LinearIR& linear_i inner_loop_end->set_work_amount(tail_size); inner_loop_end->set_increment(std::min(inner_tail_increment, tail_size)); inner_loop_end->set_finalization_offsets(inner_finalization_offsets); - const auto inner_loop_begin_it = std::find(tail_begin, it, linear_ir.get_expr_by_node(inner_loop_begin)); - const auto inner_loop_end_it = std::next(tail_end); + const auto inner_loop_begin_it = std::find(begin, it, linear_ir.get_expr_by_node(inner_loop_begin)); + const auto inner_loop_end_it = std::next(end); OPENVINO_ASSERT(inner_loop_begin_it != it, "LoopBegin has not been found!"); tail_transformations(linear_ir, inner_loop_begin_it, inner_loop_end_it, tail_size); } } - - tail_transformations(linear_ir, tail_begin, tail_end, tail_size); - std::shared_ptr tail_loop_end = ov::as_type_ptr((*tail_begin)->get_node())->get_loop_end(); - tail_loop_end->set_increment(tail_size); - // ptr increments were set to the old increment, need to update them in accordance with the new one - tail_loop_end->set_work_amount(tail_size); - tail_loop_end->set_finalization_offsets(tail_finalization_offsets); - tail_loop_end->has_outer_loop = vector_loop_end->has_outer_loop; - return tail_loop_end; + tail_transformations(linear_ir, begin, end, tail_size); + propagate_updated_subtensor_through_loop(linear_ir, tail_loop_info, std::next(begin), end, tail_size); } void InsertTailLoop::tail_transformations(LinearIR& linear_ir, @@ -106,21 +274,30 @@ void InsertTailLoop::tail_transformations(LinearIR& linear_ir, // correct math calculations for ReduceMax and ReduceSum in scalar case. // Note: We find Maximum and Add ops because HorizonMax and HorizonSum are outside Loop, // so they are missed in - auto op = (*expr_it)->get_node(); + const auto& expr = *expr_it; + const auto op = expr->get_node(); if (config.m_need_fill_tail_register && (ov::is_type(op) || ov::is_type(op))) { for (size_t i = 0; i < op->inputs().size(); ++i) { if (auto fill = insertFill(op->input(i))) { - const auto& input = expr_it->get()->get_input_port_connector(i); + const auto& input = expr->get_input_port_connector(i); const auto consumers = input->get_consumers(); + // If there are several consumers, fill expression must be inserted before first of them + auto fst_consumer = std::min_element(consumers.cbegin(), consumers.cend(), [&](ExpressionPort lhs, ExpressionPort rhs) { + auto lhs_it = linear_ir.find(lhs.get_expr()); + auto rhs_it = linear_ir.find(rhs.get_expr()); + return std::distance(linear_ir.cbegin(), lhs_it) < std::distance(linear_ir.cbegin(), rhs_it); + }); + const auto insert_pos = linear_ir.find(fst_consumer->get_expr()); auto fill_expr = linear_ir.create_expression(fill, {input}); - linear_ir.insert(expr_it, fill_expr); + linear_ir.insert(insert_pos, fill_expr); linear_ir.replace_input(consumers, fill_expr->get_output_port_connector(0)); // in_reg == out_reg since we want to modify vector reg inplace - const auto reg = expr_it->get()->get_input_port_descriptor(0)->get_reg(); + const auto reg = expr->get_input_port_descriptor(0)->get_reg(); fill_expr->get_input_port_descriptor(0)->set_reg(reg); fill_expr->get_output_port_descriptor(0)->set_reg(reg); + fill_expr->set_loop_ids(expr->get_loop_ids()); } } } else if (const auto memory_access = std::dynamic_pointer_cast(op)) { @@ -140,28 +317,6 @@ void InsertTailLoop::tail_transformations(LinearIR& linear_ir, } } -bool InsertTailLoop::optimize_single_evaluation(const std::shared_ptr& loop) { - // *1* solo vector/tail loop + empty outer loop - // => skip increments (both counter & ptr) : set evaluate_once flag - // *2* solo vector/tail loop + non-empty outer loop - // => skip counter increments but perform ptr increments : set evaluate_once, - // and perform pointer increments through finalization offsets - // *3* vector loop(s) + one tail loop - // => vector as usual, tail depends on outer loop, see *1* and *2* - if (loop->get_work_amount() >= 2 * loop->get_increment()) - return false; - - std::vector new_finalization_offsets(loop->get_finalization_offsets()); - const auto& ptr_increments = loop->get_ptr_increments(); - const auto work_amount_incr = static_cast(loop->get_increment()); - for (size_t i = 0; i < new_finalization_offsets.size(); i++) { - new_finalization_offsets[i] += ptr_increments[i] * work_amount_incr; - } - loop->set_finalization_offsets(new_finalization_offsets); - loop->set_evaluate_once(true); - return true; -} - bool InsertTailLoop::run(LinearIR& linear_ir) { OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::insertTailLoop") const auto& loop_manager = linear_ir.get_loop_manager(); @@ -174,37 +329,24 @@ bool InsertTailLoop::run(LinearIR& linear_ir) { if (!loop_end) continue; + const auto loop_info = loop_manager->get_loop_info(loop_end->get_id()); + const auto& first_iter_handler = loop_info->get_first_iter_handler(); + if (first_iter_handler) { + modified |= first_iter_handler(linear_ir, expr_it); + } + const auto work_amount = loop_end->get_work_amount(); const auto increment = loop_end->get_increment(); - const auto loop_info = loop_manager->get_loop_info(loop_end->get_id()); const auto tail_size = work_amount % increment; - const auto need_tail = tail_size != 0; - const auto need_vector_loop = work_amount >= increment; - // Note, that finalization_offsets could be modified inside optimize_single_evaluation, - // so need to save them here to cover (evaluate_once vector with non-zero finalization_offsets + tail) - const auto tail_finalization_offsets = need_tail ? loop_end->get_finalization_offsets() : std::vector{}; - // vector loops are required => Just copy the body, original loop is already a vector one - if (need_vector_loop) { - // Note that finalization offsets should be applied after the last iteration. - // So if there is a tail, then we should apply offsets after it, but not now. - if (need_tail) - loop_end->set_finalization_offsets(std::vector(tail_finalization_offsets.size(), 0)); - - optimize_single_evaluation(loop_end); - } // tail is required => transform the body into a tail representation // tail loop is fake loop because for tail we should calculate only // finalization offsets which are supported by LoopEnd. - if (need_tail) { + if (tail_size != 0) { const auto loop_begin = loop_end->get_loop_begin(); const auto begin_it = linear_ir.find(linear_ir.get_expr_by_node(loop_begin)); - LinearIR::constExprIt tail_begin, tail_end; - const auto tail_loop_end = create_tail_loop(linear_ir, begin_it, std::next(expr_it), tail_begin, tail_end, - loop_end, need_vector_loop, tail_size, tail_finalization_offsets); - optimize_single_evaluation(tail_loop_end); - // Skip new tail loop. Note: tail_end refs to the next expression after LoopEnd of tail - expr_it = std::prev(tail_end); + const auto need_vector_loop = work_amount >= increment; + create_tail_loop(linear_ir, begin_it, std::next(expr_it), loop_end, need_vector_loop, tail_size); } modified = true; } diff --git a/src/common/snippets/src/lowered/pass/load_movebroadcast_to_broadcastload.cpp b/src/common/snippets/src/lowered/pass/load_movebroadcast_to_broadcastload.cpp index cd4d57cfd2c941..48f86cb2092972 100644 --- a/src/common/snippets/src/lowered/pass/load_movebroadcast_to_broadcastload.cpp +++ b/src/common/snippets/src/lowered/pass/load_movebroadcast_to_broadcastload.cpp @@ -44,7 +44,7 @@ bool LoadMoveBroadcastToBroadcastLoad::run(LinearIR& linear_ir) { continue; const auto& outshape = move_broadcast->get_output_partial_shape(0); - const auto broadcastload = std::make_shared(load->input_value(0), outshape, load->get_offset()); + const auto broadcastload = std::make_shared(load->input_value(0), *outshape.rbegin(), load->get_offset()); const auto move_consumers = expr->get_output_port_connector(0)->get_consumers(); PortDescriptorUtils::set_port_descriptor_ptr(broadcastload->output(0), expr->get_output_port(0).get_descriptor_ptr()->clone()); const auto broadcastload_expr = linear_ir.create_expression(broadcastload, { parent_expr->get_input_port_connector(0) }); diff --git a/src/common/snippets/src/lowered/pass/optimize_loop_single_evaluation.cpp b/src/common/snippets/src/lowered/pass/optimize_loop_single_evaluation.cpp new file mode 100644 index 00000000000000..317eb32f7ab1fe --- /dev/null +++ b/src/common/snippets/src/lowered/pass/optimize_loop_single_evaluation.cpp @@ -0,0 +1,53 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "snippets/lowered/pass/optimize_loop_single_evaluation.hpp" + +#include "snippets/lowered/linear_ir.hpp" +#include "snippets/snippets_isa.hpp" +#include "snippets/itt.hpp" + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +bool OptimizeLoopSingleEvaluation::run(LinearIR& linear_ir) { + OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::OptimizeLoopSingleEvaluation") + if (linear_ir.empty()) + return false; + + bool is_modified = false; + for (const auto& expr : linear_ir) { + if (auto loop_end = ov::as_type_ptr(expr->get_node())) { + // *1* solo vector/tail loop + empty outer loop + // => skip increments (both counter & ptr) : set evaluate_once flag + // *2* solo vector/tail loop + non-empty outer loop + // => skip counter increments but perform ptr increments : set evaluate_once, + // and perform pointer increments through finalization offsets + // *3* vector loop(s) + one tail loop + // => vector as usual, tail depends on outer loop, see *1* and *2* + if (loop_end->get_work_amount() >= 2 * loop_end->get_increment()) + continue; + + auto new_finalization_offsets = loop_end->get_finalization_offsets(); + const auto& ptr_increments = loop_end->get_ptr_increments(); + const auto work_amount_incr = static_cast(loop_end->get_increment()); + for (size_t i = 0; i < new_finalization_offsets.size(); i++) { + new_finalization_offsets[i] += ptr_increments[i] * work_amount_incr; + } + loop_end->set_finalization_offsets(new_finalization_offsets); + loop_end->set_ptr_increments(std::vector(new_finalization_offsets.size(), 0)); + loop_end->set_evaluate_once(true); + is_modified = true; + } + } + return is_modified; +} + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov + diff --git a/src/common/snippets/src/lowered/pass/serialize_base.cpp b/src/common/snippets/src/lowered/pass/serialize_base.cpp new file mode 100644 index 00000000000000..1535497c90a571 --- /dev/null +++ b/src/common/snippets/src/lowered/pass/serialize_base.cpp @@ -0,0 +1,29 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "snippets/lowered/pass/serialize_base.hpp" + +#include "snippets/itt.hpp" + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +SerializeBase::SerializeBase(const std::string& xml_path) + : m_xml_path(xml_path), + m_bin_path(get_bin_path_from_xml(xml_path)) {} + +std::string SerializeBase::get_bin_path_from_xml(const std::string& xml_path) { +#if defined(__linux__) + return "/dev/null"; +#else + return ""; +#endif +} + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/src/lowered/pass/serialize_control_flow.cpp b/src/common/snippets/src/lowered/pass/serialize_control_flow.cpp new file mode 100644 index 00000000000000..6b2dab515b6d19 --- /dev/null +++ b/src/common/snippets/src/lowered/pass/serialize_control_flow.cpp @@ -0,0 +1,55 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "snippets/lowered/pass/serialize_control_flow.hpp" + +#include "openvino/pass/serialize.hpp" +#include "snippets/itt.hpp" +#include "snippets/lowered/linear_ir.hpp" +#include "snippets/op/serialization_node.hpp" +#include "snippets/snippets_isa.hpp" + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +bool SerializeControlFlow::run(LinearIR& linear_ir) { + OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::SerializeControlFlow") + if (linear_ir.empty()) + return false; + + auto first_node = std::make_shared(element::f32, Shape{}); + first_node->set_friendly_name("Start"); + first_node->get_rt_info()["execTimeMcs"] = 0; + std::shared_ptr serialization_node = first_node; + + // This map allows to get LoopBegin serialization node by original LoopBegin node + // It is used to draw an edge between LoopBegin and LoopEnd serialization nodes + std::map, std::shared_ptr> loops_map; + for (const auto& expr : linear_ir) { + const auto node = expr->get_node(); + if (auto loop_end = ov::as_type_ptr(node)) { + OPENVINO_ASSERT(loops_map.count(loop_end->get_loop_begin()), + "Serialization can't find LoopBegin that corresponds to LoopEnd with friendly name ", + loop_end->get_friendly_name()); + auto loop_begin_serialization_node = loops_map.at(loop_end->get_loop_begin()); + serialization_node = std::make_shared(ov::OutputVector{serialization_node, loop_begin_serialization_node}, expr); + } else { + serialization_node = std::make_shared(ov::OutputVector{serialization_node}, expr); + if (auto loop_begin = ov::as_type_ptr(node)) { + loops_map[loop_begin] = serialization_node; + } + } + } + auto last_node = std::make_shared(serialization_node); + last_node->set_friendly_name("End"); + const auto model = std::make_shared(ResultVector{last_node}, ParameterVector{first_node}, "Lowered_IR_Control_Flow"); + return ov::pass::Serialize(m_xml_path, m_bin_path).run_on_model(model); +} + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/src/lowered/pass/serialize_data_flow.cpp b/src/common/snippets/src/lowered/pass/serialize_data_flow.cpp new file mode 100644 index 00000000000000..7ae3e7ce15e8af --- /dev/null +++ b/src/common/snippets/src/lowered/pass/serialize_data_flow.cpp @@ -0,0 +1,57 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "snippets/lowered/pass/serialize_data_flow.hpp" + +#include "openvino/pass/serialize.hpp" +#include "snippets/itt.hpp" +#include "snippets/lowered/linear_ir.hpp" +#include "snippets/op/serialization_node.hpp" +#include "snippets/snippets_isa.hpp" + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +bool SerializeDataFlow::run(LinearIR& linear_ir) { + OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::SerializeDataFlow") + if (linear_ir.empty()) + return false; + + ov::ResultVector results; + ov::ParameterVector parameters; + std::map> ops_map; + const auto serialization_mode = op::SerializationNode::SerializationMode::DATA_FLOW; + for (const auto& expr : linear_ir) { + const auto node = expr->get_node(); + ov::OutputVector inputs(expr->get_input_count()); + for (size_t i = 0; i < expr->get_input_count(); ++i) { + const auto& input_expr = expr->get_input_port_connector(i)->get_source().get_expr(); + OPENVINO_ASSERT(ops_map.count(input_expr), "input node wasn't found during serialization"); + inputs[i] = ops_map[input_expr]->output(expr->get_input_port_connector(i)->get_source().get_index()); + } + if (auto ioexpr = std::dynamic_pointer_cast(expr)) { + if (ioexpr->get_type() == IOExpression::io_type::INPUT) { + const auto parameter = std::make_shared(element::f32, Shape{}); + ops_map[ioexpr] = parameter; + parameters.push_back(parameter); + } else { + const auto result = std::make_shared(inputs[0]); + ops_map[ioexpr] = result; + results.push_back(result); + } + } else { + const auto serialization_node = std::make_shared(inputs, expr, serialization_mode); + ops_map[expr] = serialization_node; + } + } + const auto model = std::make_shared(results, parameters, "Lowered_IR_Data_Flow"); + return ov::pass::Serialize(m_xml_path, m_bin_path).run_on_model(model); +} + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/src/lowered/pass/softmax_decomposition.cpp b/src/common/snippets/src/lowered/pass/softmax_decomposition.cpp index b434e0f974beb3..4174f928352289 100644 --- a/src/common/snippets/src/lowered/pass/softmax_decomposition.cpp +++ b/src/common/snippets/src/lowered/pass/softmax_decomposition.cpp @@ -52,7 +52,7 @@ bool SoftmaxDecomposition::run(LinearIR& linear_ir) { expr->get()->updateShapes(); return std::make_pair(expr, n); }; - const ov::PartialShape broadcasted_shape(softmax_expr->get_input_port_descriptor(0)->get_shape()); + const ov::Dimension broadcasted_dim(*(softmax_expr->get_input_port_descriptor(0)->get_shape().rbegin())); // Note: VectorBuffer is a special case, since it should go before the initial Load. So we handle it separately const auto& vector_buffer_max = push_node(std::make_shared()); // Init value of vector buffer for ReduceMax is -FLOAT_MIN. @@ -67,8 +67,7 @@ bool SoftmaxDecomposition::run(LinearIR& linear_ir) { std::vector{(*max.first)->get_input_port(0), (*max.first)->get_input_port(1)}, std::vector{(*max.first)->get_output_port(0)}); - const auto broadcast_horizon_max = push_node( - std::make_shared(horizon_max.second, broadcasted_shape)); + const auto broadcast_horizon_max = push_node(std::make_shared(horizon_max.second, broadcasted_dim)); const auto vector_buffer_sum = push_node(std::make_shared()); // Init value of vector buffer for ReduceSum is zero. const auto fill_sum = push_node(std::make_shared(vector_buffer_sum.second, 0, zero_constant)); @@ -90,7 +89,7 @@ bool SoftmaxDecomposition::run(LinearIR& linear_ir) { // Divide is expensive operation, so we decompose it into 1 / x * y, where 1 / x is executed outside loop const auto pow = push_node(std::make_shared(horizon_sum.second, -1.f)); - const auto broadcast_pow = push_node(std::make_shared(pow.second, broadcasted_shape)); + const auto broadcast_pow = push_node(std::make_shared(pow.second, broadcasted_dim)); // Mul (pseudo-Divide loop) const auto mul = push_node(std::make_shared(exp.second, broadcast_pow.second)); diff --git a/src/common/snippets/src/lowered/pass/split_loops.cpp b/src/common/snippets/src/lowered/pass/split_loops.cpp index af5a4d9cceda9a..ba036eca8011f9 100644 --- a/src/common/snippets/src/lowered/pass/split_loops.cpp +++ b/src/common/snippets/src/lowered/pass/split_loops.cpp @@ -15,13 +15,16 @@ namespace snippets { namespace lowered { namespace pass { using LoopManager = LinearIR::LoopManager; +using LoopInfo = LoopManager::LoopInfo; using LoopInfoPtr = LoopManager::LoopInfoPtr; SplitLoops::SplitLoops() : Pass() {} bool SplitLoops::can_be_split(const LoopInfoPtr& current, const LoopInfoPtr& parent) { - return current->work_amount == parent->work_amount && current->dim_idx == parent->dim_idx && - current->increment != parent->increment; + const auto current_dim_idx = current->get_dim_idx(); + const auto parent_dim_idx = parent->get_dim_idx(); + const bool equal_dim_idxes = current_dim_idx != LoopInfo::UNDEFINED_DIM_IDX && current_dim_idx == parent_dim_idx; + return current->get_work_amount() == parent->get_work_amount() && current->get_increment() != parent->get_increment() && equal_dim_idxes; } bool SplitLoops::run(LinearIR& linear_ir) { @@ -42,7 +45,7 @@ bool SplitLoops::run(LinearIR& linear_ir) { // be in the same set of outer loops. Otherwise they won't be fused. const auto& loop_id = loop_ids.front(); const auto loop = loop_manager->get_loop_info(loop_id); - for (const auto& entry_point : loop->entry_points) { + for (const auto& entry_point : loop->get_entry_points()) { const auto& parent_port = entry_point.expr_port->get_port_connector_ptr()->get_source(); const auto& parent_expr = parent_port.get_expr(); const auto parent_loop_ids = parent_expr->get_loop_ids(); @@ -58,27 +61,27 @@ bool SplitLoops::run(LinearIR& linear_ir) { const auto parent_loop = loop_manager->get_loop_info(parent_loop_id); if (can_be_split(loop, parent_loop)) { loop_was_split = true; - const bool split_parent = parent_loop->increment < loop->increment; + const bool split_parent = parent_loop->get_increment() < loop->get_increment(); const auto& loop_to_split = split_parent ? parent_loop : loop; const auto& loop_to_split_id = split_parent ? parent_loop_id : loop_id; const auto& loop_to_fuse = !split_parent ? parent_loop : loop; - loop_to_split->work_amount = loop_to_fuse->increment; + loop_to_split->set_work_amount(loop_to_fuse->get_increment()); LinearIR::constExprIt loop_begin_pos, loop_end_pos; LoopManager::get_loop_bounds(linear_ir, - loop_to_split->entry_points, - loop_to_split->exit_points, + loop_to_split->get_entry_points(), + loop_to_split->get_exit_points(), loop_begin_pos, loop_end_pos, loop_to_split_id); const auto split_loop_id = loop_manager->mark_loop(loop_begin_pos, loop_end_pos, - loop_to_fuse->work_amount, - loop_to_fuse->increment, - loop_to_split->dim_idx, - loop_to_split->entry_points, - loop_to_split->exit_points); - loop_manager->get_loop_info(split_loop_id)->outer_splited_loop = true; + loop_to_fuse->get_work_amount(), + loop_to_fuse->get_increment(), + loop_to_split->get_dim_idx(), + loop_to_split->get_entry_points(), + loop_to_split->get_exit_points()); + loop_manager->get_loop_info(split_loop_id)->set_outer_splited_loop(true); break; } } diff --git a/src/common/snippets/src/lowered/pass/validate_loops.cpp b/src/common/snippets/src/lowered/pass/validate_loops.cpp index d618e84149b8fc..2377feec95c477 100644 --- a/src/common/snippets/src/lowered/pass/validate_loops.cpp +++ b/src/common/snippets/src/lowered/pass/validate_loops.cpp @@ -4,9 +4,10 @@ #include "snippets/lowered/pass/validate_loops.hpp" +#include "snippets/itt.hpp" #include "snippets/lowered/linear_ir.hpp" #include "snippets/lowered/loop_manager.hpp" -#include "snippets/itt.hpp" +#include "snippets/utils.hpp" namespace ov { namespace snippets { @@ -40,8 +41,8 @@ bool ValidateLoops::run(LinearIR& linear_ir) { std::vector dim_indexes; - auto validate_loop_ports = [&loop_manager, &dim_indexes, &validated_nested_loops, &is_already_verified](std::vector& loop_ports) { - for (auto& loop_port : loop_ports) { + auto validate_loop_ports = [&loop_manager, &dim_indexes, &validated_nested_loops, &is_already_verified](const std::vector& loop_ports) { + for (const auto& loop_port : loop_ports) { const auto expr = loop_port.expr_port->get_expr(); const auto loop_ids = expr->get_loop_ids(); // If loop_ids of the current port is subsequence of already validated IDs, skip @@ -53,27 +54,49 @@ bool ValidateLoops::run(LinearIR& linear_ir) { // Outer Loop -> Inner Loop for (size_t i = 0; i < loop_ids.size(); ++i) { const auto id = loop_ids[i]; - const auto dim_idx = loop_manager->get_loop_info(id)->dim_idx; + const auto dim_idx = loop_manager->get_loop_info(id)->get_dim_idx(); + // if the loop has different dimension indexes, it don't have to meet the split loop related requirements + if (dim_idx == LinearIR::LoopManager::LoopInfo::UNDEFINED_DIM_IDX) + continue; if (std::find(dim_indexes.cbegin(), dim_indexes.cend(), dim_idx) != dim_indexes.cend()) { OPENVINO_ASSERT(*dim_indexes.rbegin() == dim_idx, "Incorrect Loop ID configuration: the Loops with splitted dimension should be successively nested"); - OPENVINO_ASSERT(loop_manager->get_loop_info(loop_ids[i - 1])->increment == loop_manager->get_loop_info(id)->work_amount, + OPENVINO_ASSERT(loop_manager->get_loop_info(loop_ids[i - 1])->get_increment() == loop_manager->get_loop_info(id)->get_work_amount(), "Incorrect Loop ID configuration: the Loops with splitted dimension should be successively nested"); - OPENVINO_ASSERT(loop_manager->get_loop_info(loop_ids[i - 1])->outer_splited_loop, + OPENVINO_ASSERT(loop_manager->get_loop_info(loop_ids[i - 1])->get_outer_splited_loop(), "Incorrect Loop ID configuration: the outer Loop with splitted dimension should have `outer_splited_loop=True`"); } - OPENVINO_ASSERT(i == 0 || loop_manager->get_loop_info(loop_ids[i - 1])->dim_idx >= dim_idx, - "Incorrect Loop ID configuration: dim_idx should be sorted in accordance with loop nesting"); dim_indexes.push_back(dim_idx); } validated_nested_loops.insert(loop_ids); } }; + auto add_ports_dims_to_unique_dims = [](const std::vector& loop_ports, std::set& unique_dims, bool is_entry) { + for (const auto& loop_port : loop_ports) { + if (!loop_port.is_incremented) + continue; + const auto planar_shape = is_entry ? ov::snippets::utils::get_planar_vdims(*loop_port.expr_port) + : ov::snippets::utils::get_preordered_vdims(*loop_port.expr_port); + const auto& dim = *(planar_shape.rbegin() + loop_port.dim_idx); + // Since dim == 1 can be broadcasted to any value, it's not necessary to add it to unique dims + if (dim != 1) + unique_dims.insert(dim); + } + }; + for (const auto& pair : loops) { const auto& loop_info = pair.second; - validate_loop_ports(loop_info->entry_points); - validate_loop_ports(loop_info->exit_points); + const auto& entry_points = loop_info->get_entry_points(); + const auto& exit_points = loop_info->get_exit_points(); + validate_loop_ports(entry_points); + validate_loop_ports(exit_points); + + std::set unique_dimensions; + add_ports_dims_to_unique_dims(entry_points, unique_dimensions, true); + add_ports_dims_to_unique_dims(exit_points, unique_dimensions, false); + OPENVINO_ASSERT(unique_dimensions.size() <= 1, + "Loop ports have incompatible dimensions, by which the loop iterates"); } return true; diff --git a/src/common/snippets/src/op/broadcastload.cpp b/src/common/snippets/src/op/broadcastload.cpp index 68fd39c0610560..9775b2cc572421 100644 --- a/src/common/snippets/src/op/broadcastload.cpp +++ b/src/common/snippets/src/op/broadcastload.cpp @@ -11,8 +11,8 @@ namespace ov { namespace snippets { namespace op { -BroadcastLoad::BroadcastLoad(const Output& x, ov::PartialShape shape, size_t offset) - : MemoryAccess({x}, std::set{0}, std::set{}), output_shape(std::move(shape)) { +BroadcastLoad::BroadcastLoad(const Output& x, ov::Dimension bcast_dimension, size_t offset) + : MemoryAccess({x}, std::set{0}, std::set{}), bcast_dimension(std::move(bcast_dimension)) { set_input_port_descriptor({1, offset}, 0); constructor_validate_and_infer_types(); } @@ -25,7 +25,7 @@ bool BroadcastLoad::visit_attributes(AttributeVisitor& visitor) { std::shared_ptr BroadcastLoad::clone_with_new_inputs(const OutputVector& new_args) const { INTERNAL_OP_SCOPE(BroadcastLoad); check_new_args_count(this, new_args); - return std::make_shared(new_args.at(0), output_shape, get_offset()); + return std::make_shared(new_args.at(0), bcast_dimension, get_offset()); } void BroadcastLoad::validate_and_infer_types() { @@ -34,7 +34,11 @@ void BroadcastLoad::validate_and_infer_types() { const auto output_ma_ports = get_memory_access_output_ports(); OPENVINO_ASSERT(input_ma_ports.size() == 1 && is_memory_access_input_port(0), "BroadcastLoad node must have memory access input port"); OPENVINO_ASSERT(output_ma_ports.size() == 0, "BroadcastLoad node mustn't have memory access output port"); - set_output_type(0, get_input_element_type(0), output_shape); + auto broadcasted_shape = get_input_partial_shape(0); + if (broadcasted_shape.size() == 0) + broadcasted_shape.resize(1); + *broadcasted_shape.rbegin() = bcast_dimension; + set_output_type(0, get_input_element_type(0), broadcasted_shape); } } // namespace op diff --git a/src/common/snippets/src/op/broadcastmove.cpp b/src/common/snippets/src/op/broadcastmove.cpp index 12242d4ba76c44..ee9b715077beef 100644 --- a/src/common/snippets/src/op/broadcastmove.cpp +++ b/src/common/snippets/src/op/broadcastmove.cpp @@ -11,23 +11,27 @@ namespace ov { namespace snippets { namespace op { -BroadcastMove::BroadcastMove(const Output& x, ov::PartialShape shape) : Op({x}), output_shape(std::move(shape)) { +BroadcastMove::BroadcastMove(const Output& x, ov::Dimension bcast_dimension) : Op({x}), bcast_dimension(std::move(bcast_dimension)) { constructor_validate_and_infer_types(); } bool BroadcastMove::visit_attributes(AttributeVisitor& visitor) { - visitor.on_attribute("output_shape", output_shape); + visitor.on_attribute("bcast_dimension", bcast_dimension); return true; } std::shared_ptr BroadcastMove::clone_with_new_inputs(const OutputVector& new_args) const { INTERNAL_OP_SCOPE(BroadcastMove); check_new_args_count(this, new_args); - return std::make_shared(new_args.at(0), output_shape); + return std::make_shared(new_args.at(0), bcast_dimension); } void BroadcastMove::validate_and_infer_types() { - set_output_type(0, get_input_element_type(0), this->output_shape); + auto broadcasted_shape = get_input_partial_shape(0); + if (broadcasted_shape.size() == 0) + broadcasted_shape.resize(1); + *broadcasted_shape.rbegin() = bcast_dimension; + set_output_type(0, get_input_element_type(0), broadcasted_shape); } } // namespace op diff --git a/src/common/snippets/src/op/buffer.cpp b/src/common/snippets/src/op/buffer.cpp index 615979ec5e3281..36bc185dd8b2a9 100644 --- a/src/common/snippets/src/op/buffer.cpp +++ b/src/common/snippets/src/op/buffer.cpp @@ -13,79 +13,96 @@ namespace ov { namespace snippets { namespace op { +Buffer::Buffer(const OutputVector& arguments, const ov::Shape& shape, size_t id, ov::element::Type element_type) + : Op(arguments), m_shape(shape), m_id(id), m_element_type(std::move(element_type)), m_offset(0) { + constructor_validate_and_infer_types(); +} + +bool Buffer::visit_attributes(AttributeVisitor& visitor) { + INTERNAL_OP_SCOPE(Buffer_visit_attributes); + visitor.on_attribute("allocation_shape", m_shape); + visitor.on_attribute("offset", m_offset); + visitor.on_attribute("id", m_id); + visitor.on_attribute("element_type", m_element_type); + return true; +} + +size_t Buffer::get_byte_size() const { + const auto shape = get_allocation_shape(); + return ov::shape_size(shape) * m_element_type.size(); +} -Buffer::Buffer(const ov::Shape& shape, ov::element::Type element_type, size_t id) - : Op(), m_type(Type::NewMemory), m_shape(shape), m_offset(0), m_id(id), m_element_type(std::move(element_type)) { +IntermediateMemoryBuffer::IntermediateMemoryBuffer(const ov::Output& arg, const ov::Shape& shape, size_t id) + : Buffer({arg}, shape, id) { constructor_validate_and_infer_types(); } -Buffer::Buffer(const ov::Output& arg, const ov::Shape& shape, size_t id) - : Op({arg}), m_type(Type::IntermediateMemory), m_shape(shape), m_offset(0), m_id(id) { +IntermediateMemoryBuffer::IntermediateMemoryBuffer(const ov::Output& arg, int32_t allocation_rank, size_t id) + : Buffer({arg}, compute_shape_from_allocation_rank(arg, allocation_rank), id) { constructor_validate_and_infer_types(); } -Buffer::Buffer(const ov::Output& arg, int32_t allocation_rank, size_t id) - : Op({arg}), m_type(Type::IntermediateMemory), m_offset(0), m_id(id) { +ov::Shape IntermediateMemoryBuffer::compute_shape_from_allocation_rank(const ov::Output& arg, int32_t allocation_rank) { const auto& pshape = arg.get_partial_shape(); OPENVINO_ASSERT(pshape.is_static(), "Buffer supports only static input shape"); const auto shape = pshape.get_shape(); const auto normalize_rank = utils::normalize_rank(static_cast(allocation_rank), shape.size()); const auto offset = static_cast(shape.size()) - normalize_rank; - m_shape = {shape.begin() + offset, shape.end()}; - constructor_validate_and_infer_types(); -} - -bool Buffer::visit_attributes(AttributeVisitor& visitor) { - INTERNAL_OP_SCOPE(Buffer_visit_attributes); - visitor.on_attribute("allocation_shape", m_shape); - visitor.on_attribute("offset", m_offset); - visitor.on_attribute("id", m_id); - visitor.on_attribute("element_type", m_element_type); - return true; + return ov::Shape{shape.begin() + offset, shape.end()}; } -void Buffer::validate_and_infer_types() { +void IntermediateMemoryBuffer::validate_and_infer_types() { INTERNAL_OP_SCOPE(Buffer_validate_and_infer_types); ov::PartialShape output_shape; - if (m_type == Type::NewMemory) { - OPENVINO_ASSERT(get_input_size() == 0, "Buffer with new allocated memory must to not have arguments!"); - output_shape = m_shape; - } else if (m_type == Type::IntermediateMemory) { - m_element_type = get_input_element_type(0); - output_shape = get_input_partial_shape(0); - } else { - OPENVINO_THROW("Buffer supports only the following types: NewMemory and IntermediateMemory"); - } + m_element_type = get_input_element_type(0); + output_shape = get_input_partial_shape(0); set_output_type(0, m_element_type, output_shape); } -std::shared_ptr Buffer::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr IntermediateMemoryBuffer::clone_with_new_inputs(const OutputVector& new_args) const { INTERNAL_OP_SCOPE(Buffer_clone_with_new_inputs); check_new_args_count(this, new_args); - std::shared_ptr new_buffer = nullptr; - if (m_type == Type::NewMemory) { - new_buffer = std::make_shared(m_shape, m_element_type, m_id); - } else if (m_type == Type::IntermediateMemory) { - new_buffer = std::make_shared(new_args.at(0), m_shape, m_id); - } else { - OPENVINO_THROW("Buffer supports only the following types: NewMemory and IntermediateMemory"); - } - new_buffer->m_offset = m_offset; + auto new_buffer = std::make_shared(new_args.at(0), m_shape, m_id); + new_buffer->set_offset(m_offset); return new_buffer; } -size_t Buffer::get_byte_size() const { - const auto shape = get_allocation_shape(); - return ov::shape_size(shape) * get_element_type().size(); +NewMemoryBuffer::NewMemoryBuffer(const ov::Shape& shape, size_t id, ov::element::Type element_type) + : Buffer({}, shape, id, element_type) { + constructor_validate_and_infer_types(); +} + +void NewMemoryBuffer::validate_and_infer_types() { + INTERNAL_OP_SCOPE(Buffer_validate_and_infer_types); + OPENVINO_ASSERT(get_input_size() == 0, "Buffer with new allocated memory mustn't have arguments!"); + set_output_type(0, m_element_type, m_shape); +} + +std::shared_ptr NewMemoryBuffer::clone_with_new_inputs(const OutputVector& new_args) const { + INTERNAL_OP_SCOPE(Buffer_clone_with_new_inputs); + check_new_args_count(this, new_args); + auto new_buffer = std::make_shared(m_shape, m_id, m_element_type); + new_buffer->set_offset(m_offset); + return new_buffer; } -void Buffer::set_element_type(ov::element::Type element_type) { - OPENVINO_ASSERT(is_new_memory(), "Only Buffer with NewMemory can change his output precision!"); +void NewMemoryBuffer::set_element_type(ov::element::Type element_type) { m_element_type = std::move(element_type); // Apply the change validate_and_infer_types(); } +NewMemoryBuffer::ShapeInfer::ShapeInfer(const std::shared_ptr& n) { + const auto& buffer = ov::as_type_ptr(n); + OPENVINO_ASSERT(buffer, "Got invalid node in NewMemoryBuffer::ShapeInfer"); + m_shape = buffer->get_shape(); +} + +IShapeInferSnippets::Result NewMemoryBuffer::ShapeInfer::infer(const std::vector& input_shapes) { + OPENVINO_ASSERT(input_shapes.empty(), "NewMemoryBuffer shape inference mustn't have input shapes"); + return {{m_shape}, ShapeInferStatus::success}; +} + } // namespace op } // namespace snippets } // namespace ov diff --git a/src/common/snippets/src/op/loop.cpp b/src/common/snippets/src/op/loop.cpp index 47e3936e388b53..32fed5a6b28ed7 100644 --- a/src/common/snippets/src/op/loop.cpp +++ b/src/common/snippets/src/op/loop.cpp @@ -49,31 +49,11 @@ bool LoopBegin::visit_attributes(AttributeVisitor &visitor) { } LoopEnd::LoopEnd(const Output& loop_begin, size_t work_amount, size_t work_amount_increment, - std::vector apply_increments, std::vector finalization_offsets, - std::vector element_type_sizes, size_t input_num, size_t output_num, size_t id) - : LoopBase({loop_begin}), - has_outer_loop(true), - m_finalization_offsets(std::move(finalization_offsets)), - m_element_type_sizes(std::move(element_type_sizes)), - m_work_amount(work_amount), - m_work_amount_increment(work_amount_increment), - m_input_num(input_num), - m_output_num(output_num), - m_id(id), - m_evaluate_once(false) { - m_ptr_increments.resize(apply_increments.size()); - std::transform(apply_increments.begin(), apply_increments.end(), m_ptr_increments.begin(), - [](bool apply) { - return apply ? 1 : 0; - }); - constructor_validate_and_infer_types(); -} - -LoopEnd::LoopEnd(const Output& loop_begin, size_t work_amount, size_t work_amount_increment, - std::vector ptr_increments, std::vector finalization_offsets, + std::vector is_incremented, std::vector ptr_increments, std::vector finalization_offsets, std::vector element_type_sizes, size_t input_num, size_t output_num, size_t id) : LoopBase({loop_begin}), has_outer_loop(true), + m_is_incremented(std::move(is_incremented)), m_ptr_increments(std::move(ptr_increments)), m_finalization_offsets(std::move(finalization_offsets)), m_element_type_sizes(std::move(element_type_sizes)), @@ -88,7 +68,7 @@ LoopEnd::LoopEnd(const Output& loop_begin, size_t work_amount, size_t work std::shared_ptr LoopEnd::clone_with_new_inputs(const OutputVector& inputs) const { check_new_args_count(this, inputs); - const auto loop_end = std::make_shared(inputs.at(0), m_work_amount, m_work_amount_increment, m_ptr_increments, + const auto loop_end = std::make_shared(inputs.at(0), m_work_amount, m_work_amount_increment, m_is_incremented, m_ptr_increments, m_finalization_offsets, m_element_type_sizes, m_input_num, m_output_num, m_id); loop_end->m_evaluate_once = m_evaluate_once; return loop_end; @@ -105,6 +85,10 @@ const std::vector& LoopEnd::get_finalization_offsets() const { return m_finalization_offsets; } +const std::vector& LoopEnd::get_is_incremented() const { + return m_is_incremented; +} + const std::vector& LoopEnd::get_ptr_increments()const { return m_ptr_increments; } @@ -168,6 +152,10 @@ void LoopEnd::set_evaluate_once(bool once) { m_evaluate_once = once; } +void LoopEnd::set_id(size_t new_id) { + m_id = new_id; +} + void LoopEnd::validate_and_infer_types() { NODE_VALIDATION_CHECK(this, get_input_size() == 1, "LoopEnd must have one input"); const auto loop_begin = ov::as_type_ptr(get_input_node_shared_ptr(0)); @@ -194,6 +182,7 @@ bool LoopEnd::visit_attributes(AttributeVisitor &visitor) { visitor.on_attribute("input_num", m_input_num); visitor.on_attribute("output_num", m_output_num); visitor.on_attribute("id", m_id); + visitor.on_attribute("evaluate_once", m_evaluate_once); return true; } diff --git a/src/common/snippets/src/op/serialization_node.cpp b/src/common/snippets/src/op/serialization_node.cpp index 6c521cc856f214..a91c63beb9402b 100644 --- a/src/common/snippets/src/op/serialization_node.cpp +++ b/src/common/snippets/src/op/serialization_node.cpp @@ -9,26 +9,34 @@ namespace ov { namespace snippets { namespace op { -SerializationNode::SerializationNode(const ov::OutputVector& args, const std::shared_ptr& expr) - : Op(args), m_expr(expr) { - if (!m_expr || !m_expr->get_node()) - OPENVINO_THROW("SerializationNode requires a valid expression with non-null node pointer"); - const auto &node = expr->get_node(); +SerializationNode::SerializationNode(const ov::OutputVector& args, + const std::shared_ptr& expr, + SerializationMode mode) + : Op(args), + m_expr(expr), + m_mode(mode) { + OPENVINO_ASSERT(m_expr && m_expr->get_node(), "SerializationNode requires a valid expression with non-null node pointer"); + const auto& node = expr->get_node(); + set_friendly_name(node->get_friendly_name()); std::string type = node->get_type_name(); - std::string name = node->get_friendly_name(); - // If node is a parameter, show another type name, so the node will be displayed correctly get_rt_info()["layerType"] = type == "Parameter" ? "ParameterLowered" : type; - set_friendly_name(name); constructor_validate_and_infer_types(); } void SerializationNode::validate_and_infer_types() { - set_output_type(0, element::f32, {}); + // If SerializationNode is used for control flow serialization, it always has one output + // (since it represents a linear execution order) + if (m_mode == SerializationMode::CONTROL_FLOW) { + set_output_type(0, element::f32, {}); + } else if (m_mode == SerializationMode::DATA_FLOW) { + for (size_t i = 0; i < m_expr->get_output_count(); ++i) + set_output_type(i, element::f32, {}); + } } std::shared_ptr SerializationNode::clone_with_new_inputs(const OutputVector &new_args) const { check_new_args_count(this, new_args); - return std::make_shared(new_args, m_expr); + return std::make_shared(new_args, m_expr, m_mode); } bool SerializationNode::visit_attributes(AttributeVisitor &visitor) { diff --git a/src/common/snippets/src/op/subgraph.cpp b/src/common/snippets/src/op/subgraph.cpp index adeed6e26b4473..8af95824b834d5 100644 --- a/src/common/snippets/src/op/subgraph.cpp +++ b/src/common/snippets/src/op/subgraph.cpp @@ -34,7 +34,6 @@ #include "snippets/lowered/pass/load_movebroadcast_to_broadcastload.hpp" #include "snippets/lowered/pass/allocate_buffers.hpp" #include "snippets/lowered/pass/propagate_layout.hpp" -#include "snippets/lowered/pass/cleanup_loop_offsets.hpp" #include "snippets/lowered/pass/softmax_decomposition.hpp" #include "snippets/lowered/pass/move_scalar_to_consumer.hpp" #include "snippets/lowered/pass/move_result_out_of_loop.hpp" @@ -429,12 +428,19 @@ void Subgraph::control_flow_transformations(lowered::LinearIR& linear_ir, const size_t vector_size = get_generator()->get_target_machine()->get_lanes(); const int32_t buffer_allocation_rank = static_cast(linear_ir.get_config().m_loop_depth); + // We have to call MarkLoops before backend markup passes + // because these passes can update subtensor but not insert Loop (e.g. when loop increment is equal to the corresponding dim) + // If MarkLoops is called on such LIR, it inserts Eltwise-like loops which might not reflect backend expectations + // It should be fixed by ticket 113666 + lowered::pass::PassPipeline markup_pipeline; + markup_pipeline.register_pass(vector_size); + markup_pipeline.run(linear_ir); + // Ticket: 113666 // TODO: Make pass pipeline with backend passes more flexible backend_passes_pre_common.run(linear_ir); lowered::pass::PassPipeline common_pipeline; - common_pipeline.register_pass(vector_size); common_pipeline.register_pass(vector_size); common_pipeline.register_pass(); common_pipeline.register_pass(); @@ -458,7 +464,6 @@ void Subgraph::control_flow_transformations(lowered::LinearIR& linear_ir, final_pipeline.register_pass(lowering_result.buffer_scratchpad_size, linear_ir.get_config().m_are_buffers_optimized); final_pipeline.register_pass(); final_pipeline.register_pass(); - final_pipeline.register_pass(); final_pipeline.run(linear_ir); } @@ -528,16 +533,6 @@ void Subgraph::print() const { } } - -void Subgraph::serialize() const { - std::stringstream xmlFile, binFile; - ov::pass::Serialize serializer(xmlFile, xmlFile, ov::pass::Serialize::Version::IR_V10); - serializer.run_on_model(body_ptr()); - auto m_constants = binFile.str(); - auto m_model = xmlFile.str(); - std::cout << m_model << std::endl; -} - } // namespace op } // namespace snippets } // namespace ov diff --git a/src/common/snippets/src/pass/broadcast_to_movebroadcast.cpp b/src/common/snippets/src/pass/broadcast_to_movebroadcast.cpp index cd803b163b5bbf..a3e687d44b3b38 100644 --- a/src/common/snippets/src/pass/broadcast_to_movebroadcast.cpp +++ b/src/common/snippets/src/pass/broadcast_to_movebroadcast.cpp @@ -35,9 +35,8 @@ ov::snippets::pass::BroadcastToMoveBroadcast::BroadcastToMoveBroadcast() { // will be handled by pointer arithmetics. Note that this behavior should be changed in case of full op::Boradcast support. Output in_value = root->input_value(0); if (*target_shape.rbegin() != *value_shape.rbegin()) { - auto broadcasted_shape = value_shape; - *broadcasted_shape.rbegin() = *target_shape.rbegin(); - const auto& broadcast_node = std::make_shared(in_value, broadcasted_shape); + auto broadcasted_dim = ov::Dimension(*target_shape.rbegin()); + const auto& broadcast_node = std::make_shared(in_value, broadcasted_dim); in_value = broadcast_node->output(0); } diff --git a/src/common/snippets/src/pass/insert_movebroadcast.cpp b/src/common/snippets/src/pass/insert_movebroadcast.cpp index bbc2865fdb42a8..38b85d11a19c07 100644 --- a/src/common/snippets/src/pass/insert_movebroadcast.cpp +++ b/src/common/snippets/src/pass/insert_movebroadcast.cpp @@ -44,8 +44,7 @@ ov::Output ov::snippets::pass::InsertMoveBroadcast::BroadcastNodeLastD // will be handled by pointer arithmetics inside outer LoopEmitter if (*target_shape.rbegin() != *normalized_shape.rbegin()) { ov::PartialShape broadcasted_shape = normalized_shape; - *broadcasted_shape.rbegin() = *target_shape.rbegin(); - const auto broadcast_node = std::make_shared(value, broadcasted_shape); + const auto broadcast_node = std::make_shared(value, *target_shape.rbegin()); copy_runtime_info(value.get_node_shared_ptr(), broadcast_node); return broadcast_node->output(0); diff --git a/src/common/snippets/src/shape_inference/shape_infer_instances.cpp b/src/common/snippets/src/shape_inference/shape_infer_instances.cpp index 61404d208fd5a7..44c1065d8260a7 100644 --- a/src/common/snippets/src/shape_inference/shape_infer_instances.cpp +++ b/src/common/snippets/src/shape_inference/shape_infer_instances.cpp @@ -103,16 +103,17 @@ BroadcastShapeInfer::BroadcastShapeInfer(const std::shared_ptr() || std::is_base_of(), "This ShapeInfer class could be used only for BroadcastMove and BroadcastLoad operations."); - const auto& broadcast = as_type_ptr(n); - OPENVINO_ASSERT(broadcast, "Invalid node passed to BroadcastShapeInfer.", + broadcast_op = as_type_ptr(n); + OPENVINO_ASSERT(broadcast_op, "Invalid node passed to BroadcastShapeInfer.", "Expected ", typeid(BroadcastOP).name(), "got ", n->get_type_name()); - const auto last_dim = *broadcast->get_output_shape().rbegin(); - m_broadcasted_dim = last_dim.is_dynamic() ? IShapeInferSnippets::DYNAMIC_DIMENSION : last_dim.get_length(); } + template Result BroadcastShapeInfer::infer(const std::vector& input_shapes) { auto out_shape = input_shapes[0].get(); - out_shape.back() = m_broadcasted_dim; + const auto& bcasted_dim = broadcast_op->get_bcast_dimension(); + OPENVINO_ASSERT(bcasted_dim.is_static()); + out_shape.back() = bcasted_dim.get_length(); return {{out_shape}, ShapeInferStatus::success}; } diff --git a/src/common/snippets/src/shape_inference/shape_inference.cpp b/src/common/snippets/src/shape_inference/shape_inference.cpp index f2c6be9ae0b49c..2ae7487128b978 100644 --- a/src/common/snippets/src/shape_inference/shape_inference.cpp +++ b/src/common/snippets/src/shape_inference/shape_inference.cpp @@ -39,7 +39,7 @@ const IShapeInferSnippetsFactory::TRegistry IShapeInferSnippetsFactory::registry SHAPE_INFER_PREDEFINED(op::ConvertSaturation, PassThroughShapeInfer), SHAPE_INFER_PREDEFINED(op::Load, PassThroughShapeInfer), SHAPE_INFER_PREDEFINED(op::Store, PassThroughShapeInfer), - SHAPE_INFER_PREDEFINED(op::Buffer, PassThroughShapeInfer), + SHAPE_INFER_PREDEFINED(op::IntermediateMemoryBuffer, PassThroughShapeInfer), SHAPE_INFER_PREDEFINED(op::Fill, PassThroughShapeInfer), SHAPE_INFER_PREDEFINED(ov::op::v0::Parameter, PassThroughShapeInfer), // Note: We should remove Softmax shape infers after the decomposition activity, @@ -68,6 +68,7 @@ const IShapeInferSnippetsFactory::TRegistry IShapeInferSnippetsFactory::registry SHAPE_INFER_OP_SPECIFIC(op::RankNormalization), SHAPE_INFER_OP_SPECIFIC(op::BroadcastLoad), SHAPE_INFER_OP_SPECIFIC(op::BroadcastMove), + SHAPE_INFER_OP_SPECIFIC(op::NewMemoryBuffer), }; #undef SHAPE_INFER_OP_SPECIFIC_EXTERNAL #undef SHAPE_INFER_OP_SPECIFIC diff --git a/src/common/snippets/tests/src/lowered/pass/buffer_allocation.cpp b/src/common/snippets/tests/src/lowered/pass/buffer_allocation.cpp index 4dc6ac8d365208..fe887cecd96f17 100644 --- a/src/common/snippets/tests/src/lowered/pass/buffer_allocation.cpp +++ b/src/common/snippets/tests/src/lowered/pass/buffer_allocation.cpp @@ -92,9 +92,9 @@ std::shared_ptr EltwiseBufferAllocationTest::GetModel() const { const auto parameter0 = std::make_shared(ov::element::f32, ov::PartialShape({1, 3, 100, 100})); const auto parameter1 = std::make_shared(ov::element::f32, ov::PartialShape({1, 3, 100, 100})); const auto add = std::make_shared(parameter0, parameter1); - const auto buffer0 = std::make_shared(add, static_cast(subtensor_buffer.size())); + const auto buffer0 = std::make_shared(add, static_cast(subtensor_buffer.size())); const auto relu = std::make_shared(buffer0); - const auto buffer1 = std::make_shared(relu, static_cast(subtensor_buffer.size())); + const auto buffer1 = std::make_shared(relu, static_cast(subtensor_buffer.size())); const auto exp = std::make_shared(buffer1); const auto body = std::make_shared(std::make_shared(exp), ov::ParameterVector{parameter0, parameter1}); @@ -119,7 +119,7 @@ void MHABufferAllocationTest::MarkBrgemm(const std::shared_ptr MHABufferAllocationTest::GetModel() const { - const auto subtensor_scalar = std::vector{1, 1}; + const auto subtensor_scalar = std::vector{1}; const auto subtensor_eltwise = std::vector{1, m_vector_size}; const auto subtensor_brgemm = std::vector{32, ov::snippets::lowered::PortDescriptor::ServiceDimensions::FULL_DIM}; const auto subtensor_softmax = std::vector{1, ov::snippets::lowered::PortDescriptor::ServiceDimensions::FULL_DIM}; @@ -187,7 +187,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BufferAllocation_MHAOptimizedWSplit, MHA ::testing::Values(true), ::testing::Values(true), ::testing::Values(57344), // (Buffer before brgemm) + (between brgemms) + (after brgemm) - ::testing::Values(3)), // (Buffer before brgemm) + (between brgemms) + (after brgemm) + ::testing::Values(2)), // (Buffer before brgemm0 and after brgemm1) + (between brgemms) BufferAllocationTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BufferAllocation_MHANotOptimizedWOSplit, MHABufferAllocationTest, diff --git a/src/common/snippets/tests/src/lowered/pass/loop.cpp b/src/common/snippets/tests/src/lowered/pass/loop.cpp index 27b4e3ce95bdca..1e29efa955d517 100644 --- a/src/common/snippets/tests/src/lowered/pass/loop.cpp +++ b/src/common/snippets/tests/src/lowered/pass/loop.cpp @@ -2,21 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "snippets/op/loop.hpp" -#include "subgraph_simple.hpp" +#include #include "snippets/lowered/linear_ir.hpp" +#include "snippets/lowered/pass/cleanup_loop_offsets.hpp" #include "snippets/lowered/pass/init_loops.hpp" #include "snippets/lowered/pass/insert_load_store.hpp" -#include "snippets/lowered/pass/cleanup_loop_offsets.hpp" -#include "snippets/lowered/pass/validate_loops.hpp" #include "snippets/lowered/pass/insert_loops.hpp" #include "snippets/lowered/pass/insert_tail_loop.hpp" +#include "snippets/lowered/pass/optimize_loop_single_evaluation.hpp" +#include "snippets/lowered/pass/validate_loops.hpp" #include "snippets/shape_inference/shape_inference.hpp" - -#include "snippets/op/loop.hpp" - +#include "subgraph_simple.hpp" using Snippets_TailProcessingTransformation = ::testing::Test; // [Inserted Loop number, [ptr_increments, final_offsets] @@ -47,7 +46,7 @@ static void init_linear_ir(const std::vector& in_shapes, Linea loop_manager->mark_loop(expr_it, std::next(expr_it), inner_wa, inner_inc, 0, loop_entry_points, loop_exit_points); loop_manager->mark_loop(expr_it, std::next(expr_it), blocked_wa, blocked_inc, 1, loop_entry_points, loop_exit_points); const auto loop_id = loop_manager->mark_loop(expr_it, std::next(expr_it), outer_wa, outer_inc, 1, loop_entry_points, loop_exit_points); - loop_manager->get_loop_info(loop_id)->outer_splited_loop = true; + loop_manager->get_loop_info(loop_id)->set_outer_splited_loop(true); } static void init_pipeline(pass::PassPipeline& pass_pipeline) { @@ -69,6 +68,7 @@ static void validate(const LinearIR& linear_ir, const ref_map& reference) { ASSERT_TRUE(loop_end->get_finalization_offsets() == reference.at(loop_num).second); loop_num++; } + ASSERT_EQ(loop_num, reference.size()); } TEST(Snippets_TailProcessingTransformation, BlockedWOTail_OriginalPtrShifts) { @@ -119,19 +119,20 @@ TEST(Snippets_TailProcessingTransformation, BlockedTail_OriginalPtrShifts) { pass::PassPipeline pass_pipeline; init_pipeline(pass_pipeline); pass_pipeline.register_pass(); + pass_pipeline.register_pass(); pass_pipeline.run(linear_ir); // [Inserted Loop number, [ptr_increments, final_offsets] std::map, std::vector>> reference; - reference[0] = { std::vector(3, 1), std::vector(3, 16)}; // Vector Inner - reference[1] = { std::vector(3, 1), std::vector(3, -16)}; // Blocked Inner + reference[0] = { std::vector(3, 0), std::vector(3, 16)}; // Vector Inner + reference[1] = { std::vector(3, 0), std::vector(3, -16)}; // Blocked Inner reference[2] = { std::vector(3, 20), std::vector(3, -80)}; // Vector Blocked reference[3] = { std::vector(3, 20), std::vector(3, 0)}; // Vector Outer - reference[4] = { std::vector(3, 1), std::vector(3, 16)}; // Vector Inner - reference[5] = { std::vector(3, 1), std::vector(3, -16)}; // Blocked Inner + reference[4] = { std::vector(3, 0), std::vector(3, 16)}; // Vector Inner + reference[5] = { std::vector(3, 0), std::vector(3, -16)}; // Blocked Inner reference[6] = { std::vector(3, 20), std::vector(3, -40)}; // Tail Blocked - reference[7] = { std::vector(3, 20), std::vector(3, -320)}; // Tail Blocked + reference[7] = { std::vector(3, 0), std::vector(3, -320)}; // Tail Blocked validate(linear_ir, reference); } @@ -144,19 +145,20 @@ TEST(Snippets_TailProcessingTransformation, BlockedTail_CleanUpPtrShifts) { pass::PassPipeline pass_pipeline; init_pipeline(pass_pipeline); - pass_pipeline.register_pass(); pass_pipeline.register_pass(); + pass_pipeline.register_pass(); + pass_pipeline.register_pass(); pass_pipeline.run(linear_ir); // [Inserted Loop number, [ptr_increments, final_offsets] std::map, std::vector>> reference; - reference[0] = { std::vector(3, 1), std::vector(3, 16)}; // Vector Inner - reference[1] = { std::vector(3, 1), std::vector(3, 4)}; // Blocked Inner - reference[2] = { std::vector(3, 0), std::vector(3, 0)}; // Vector Blocked + reference[0] = { std::vector(3, 0), std::vector(3, 16)}; // Vector Inner + reference[1] = { std::vector(3, 0), std::vector(3, 4)}; // Blocked Inner + reference[2] = {std::vector(3, 0), std::vector(3, 0)}; // Vector Blocked reference[3] = { std::vector(3, 0), std::vector(3, 0)}; // Vector Outer - reference[4] = { std::vector(3, 1), std::vector(3, 16)}; // Vector Inner - reference[5] = { std::vector(3, 1), std::vector(3, 4)}; // Blocked Inner + reference[4] = { std::vector(3, 0), std::vector(3, 16)}; // Vector Inner + reference[5] = { std::vector(3, 0), std::vector(3, 4)}; // Blocked Inner reference[6] = { std::vector(3, 0), std::vector(3, 0)}; // Tail Blocked reference[7] = { std::vector(3, 0), std::vector(3, 0)}; // Tail Blocked diff --git a/src/common/snippets/tests/src/lowering_utils.cpp b/src/common/snippets/tests/src/lowering_utils.cpp index 0fa490353e4efa..0f7c86b48028d3 100644 --- a/src/common/snippets/tests/src/lowering_utils.cpp +++ b/src/common/snippets/tests/src/lowering_utils.cpp @@ -44,7 +44,8 @@ DummyTargetMachine::DummyTargetMachine(const std::vector& jitters[ov::snippets::op::PerfCountBegin::get_type_info_static()] = dummy_functor; jitters[ov::snippets::op::PerfCountEnd::get_type_info_static()] = dummy_functor; jitters[ov::snippets::op::Brgemm::get_type_info_static()] = dummy_functor; - jitters[ov::snippets::op::Buffer::get_type_info_static()] = dummy_functor; + jitters[ov::snippets::op::IntermediateMemoryBuffer::get_type_info_static()] = dummy_functor; + jitters[ov::snippets::op::NewMemoryBuffer::get_type_info_static()] = dummy_functor; jitters[ov::snippets::op::VectorBuffer::get_type_info_static()] = dummy_functor; jitters[ov::snippets::op::Fill::get_type_info_static()] = dummy_functor; diff --git a/src/common/snippets/tests/src/pass/movebroadcast.cpp b/src/common/snippets/tests/src/pass/movebroadcast.cpp index 3779cfaae9f532..caf48b76383d8b 100644 --- a/src/common/snippets/tests/src/pass/movebroadcast.cpp +++ b/src/common/snippets/tests/src/pass/movebroadcast.cpp @@ -28,7 +28,7 @@ TEST_F(TransformationTestsF, InsertBroadcastMove) { { auto data0 = std::make_shared(element::f32, Shape{2, 3}); auto data1 = std::make_shared(element::f32, Shape{1, 2, 1}); - auto move1 = std::make_shared(data1, Shape{1, 2, 3}); + auto move1 = std::make_shared(data1, ov::Dimension{3}); auto add = std::make_shared(data0, move1); model_ref = std::make_shared(NodeVector{add}, ParameterVector{data0, data1}); } diff --git a/src/common/transformations/CMakeLists.txt b/src/common/transformations/CMakeLists.txt index e7d365ca32492e..0eea2cdbfbdfbc 100644 --- a/src/common/transformations/CMakeLists.txt +++ b/src/common/transformations/CMakeLists.txt @@ -43,8 +43,7 @@ endif() add_library(${TARGET_NAME} INTERFACE) target_include_directories(${TARGET_NAME} INTERFACE - $ - $>) + $) target_link_libraries(${TARGET_NAME} INTERFACE openvino::runtime) diff --git a/src/common/transformations/include/ov_ops/rotary_positional_embeddings.hpp b/src/common/transformations/include/ov_ops/rotary_positional_embeddings.hpp new file mode 100644 index 00000000000000..0ae6db9ddcc3ad --- /dev/null +++ b/src/common/transformations/include/ov_ops/rotary_positional_embeddings.hpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "transformations_visibility.hpp" + +namespace ov { +namespace op { +namespace internal { + +/// +/// \brief Rotary Positional Embeddings operation +/// Internal operation which may change in the future +/// \ingroup ov_ops_cpp_api +class TRANSFORMATIONS_API RPE : public ov::op::Op { +public: + OPENVINO_OP("RPE", "ie_internal_opset", op::Op); + + RPE() = default; + RPE(const Output& data, const Output& sin, const Output& cos, int64_t axis); + + void set_axis(int64_t axis); + int64_t get_axis() const; + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + +private: + int64_t m_axis{}; +}; + +} // namespace internal +} // namespace op +} // namespace ov diff --git a/src/common/transformations/include/transformations/common_optimizations/fuse_rotary_positional_embeddings.hpp b/src/common/transformations/include/transformations/common_optimizations/fuse_rotary_positional_embeddings.hpp new file mode 100644 index 00000000000000..9c756b48896f62 --- /dev/null +++ b/src/common/transformations/include/transformations/common_optimizations/fuse_rotary_positional_embeddings.hpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/pass/graph_rewrite.hpp" +#include "transformations_visibility.hpp" + +namespace ov { +namespace pass { +class TRANSFORMATIONS_API RPE_Fusion; +} // namespace pass +} // namespace ov + +/** + * @ingroup ie_transformation_common_api + * @brief Fuses special sub-graph into an internal Rotary Positional Embedding operation + */ +class ov::pass::RPE_Fusion : public ov::pass::MatcherPass { +public: + OPENVINO_RTTI("RPE_Fusion", "0"); + RPE_Fusion(); +}; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_bitwise_to_logical_bool.hpp b/src/common/transformations/include/transformations/op_conversions/convert_bitwise_to_logical_bool.hpp index 91e8d430fd1fba..11c2bb8a3e40c5 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_bitwise_to_logical_bool.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_bitwise_to_logical_bool.hpp @@ -38,7 +38,8 @@ class ov::pass::ConvertBitwiseXorToLogicalXor : public ov::pass::MatcherPass { }; /** * @ingroup ie_transformation_common_api - * @brief Converts Bitwise operators to Logical for boolean datatype for plugins that don't support opset13 Bitwise + * @brief Converts Bitwise operators to Logical for boolean datatype for plugins that don't support opset13 Bitwise and + * to allow for constant folding for bool. */ class ConvertBitwiseToLogical : public ov::pass::GraphRewrite { public: diff --git a/src/common/transformations/src/ov_ops/rotary_positional_embeddings.cpp b/src/common/transformations/src/ov_ops/rotary_positional_embeddings.cpp new file mode 100644 index 00000000000000..02f16f01ce5298 --- /dev/null +++ b/src/common/transformations/src/ov_ops/rotary_positional_embeddings.cpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ov_ops/rotary_positional_embeddings.hpp" + +#include "itt.hpp" + +namespace ov { +namespace op { +namespace internal { + +RPE::RPE(const Output& data, const Output& sin, const Output& cos, int64_t axis) + : Op({data, sin, cos}), + m_axis{axis} { + constructor_validate_and_infer_types(); +} + +void RPE::set_axis(int64_t axis) { + m_axis = axis; +} + +int64_t RPE::get_axis() const { + return m_axis; +} + +void RPE::validate_and_infer_types() { + INTERNAL_OP_SCOPE(internal_RoPE_validate_and_infer_types); + set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); +} + +bool RPE::visit_attributes(ov::AttributeVisitor& visitor) { + INTERNAL_OP_SCOPE(internal_RoPE_visit_attributes); + visitor.on_attribute("axis", m_axis); + return true; +} + +std::shared_ptr RPE::clone_with_new_inputs(const ov::OutputVector& new_args) const { + INTERNAL_OP_SCOPE(internal_RoPE_clone_with_new_inputs); + check_new_args_count(this, new_args); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_axis); +} + +} // namespace internal +} // namespace op +} // namespace ov \ No newline at end of file diff --git a/src/common/transformations/src/transformations/common_optimizations/clamp_fusion.cpp b/src/common/transformations/src/transformations/common_optimizations/clamp_fusion.cpp index 49f1a0e2cab241..2c12b61443a0d0 100644 --- a/src/common/transformations/src/transformations/common_optimizations/clamp_fusion.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/clamp_fusion.cpp @@ -48,6 +48,8 @@ ov::pass::ClampFusion::ClampFusion() { double min_value = min_const->cast_vector()[0]; double max_value = max_const->cast_vector()[0]; + if (min_value > max_value) + return false; auto clamp = register_new_node(data, min_value, max_value); diff --git a/src/common/transformations/src/transformations/common_optimizations/common_optimizations.cpp b/src/common/transformations/src/transformations/common_optimizations/common_optimizations.cpp index 890bf914854cd1..d6c0b5f4cde515 100644 --- a/src/common/transformations/src/transformations/common_optimizations/common_optimizations.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/common_optimizations.cpp @@ -233,7 +233,8 @@ bool ov::pass::CommonOptimizations::run_on_model(const std::shared_ptrMultiply, // the full pattern to match is presented on the left hand side of the graph below. // On the right hand side is the graph after transformation. -// Currently transformation supports only i8 and u8 quantized data type. -// That implies 'levels' attribute to be 256, as well as (output_low, output_high) be (-128, 127) or (0, 255) (depends +// Currently transformation supports only i8, u8, i16, u16 quantized data type. +// That implies 'levels' attribute to be 256 or 65536, as well as (output_low, output_high) +// be (-128, 127) or (0, 255) or (-32768, 32767) or (0, 65535) (depends on type and depends // on sign of the quantized data type). Another limitation is that 'zero_point' and 'scale' have to be broadcastable to // the output of FakeQuantize. // @@ -69,9 +70,9 @@ ov::pass::ConvertQuantizeDequantize::ConvertQuantizeDequantize() { auto output_high_pattern = ov::pass::pattern::wrap_type(); auto fq_pattern = ov::pass::pattern::wrap_type( {data_pattern, input_low_pattern, input_high_pattern, output_low_pattern, output_high_pattern}); - auto convert1_pattern = - ov::pass::pattern::wrap_type({fq_pattern}, - pattern::type_matches_any({element::i8, element::u8})); + auto convert1_pattern = ov::pass::pattern::wrap_type( + {fq_pattern}, + pattern::type_matches_any({element::i8, element::u8, element::i16, element::u16})); auto convert2_pattern = ov::pass::pattern::wrap_type({convert1_pattern}, pattern::type_matches(element::f32)); auto zero_point_pattern = pass::pattern::any_input(); @@ -113,12 +114,14 @@ ov::pass::ConvertQuantizeDequantize::ConvertQuantizeDequantize() { if (convert2.get_target_inputs().size() != 1) return false; - // we support only i8 or u8 so 'levels' attribute must be 256 + // we support: + // i8 or u8: 'levels' attribute must be 256 + // i16 or u16: 'levels' attribute must be 65536 size_t levels = fq->get_levels(); - if (levels != 256) + if (levels != 256 && levels != 65536) return false; - // check if (out_low_val, out_high_val) is (-128, 127) or (0, 255) + // check if (out_low_val, out_high_val) is (-128, 127) or (0, 255) or (-32768, 32767) or (0, 65535) float out_low_val; if (!op::util::get_single_value(output_low, out_low_val)) return false; @@ -135,6 +138,14 @@ ov::pass::ConvertQuantizeDequantize::ConvertQuantizeDequantize() { if (out_low_val != 0 || out_high_val != 255) return false; break; + case element::Type_t::i16: + if (out_low_val != -32768 || out_high_val != 32767) + return false; + break; + case element::Type_t::u16: + if (out_low_val != 0 || out_high_val != 65535) + return false; + break; default: return false; } diff --git a/src/common/transformations/src/transformations/common_optimizations/fuse_rotary_positional_embeddings.cpp b/src/common/transformations/src/transformations/common_optimizations/fuse_rotary_positional_embeddings.cpp new file mode 100644 index 00000000000000..689664922486b7 --- /dev/null +++ b/src/common/transformations/src/transformations/common_optimizations/fuse_rotary_positional_embeddings.cpp @@ -0,0 +1,98 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "transformations/common_optimizations/fuse_rotary_positional_embeddings.hpp" + +#include "itt.hpp" +#include "openvino/core/validation_util.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/op.hpp" +#include "openvino/op/variadic_split.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "ov_ops/rotary_positional_embeddings.hpp" +#include "transformations/utils/utils.hpp" +#include "validation_util.hpp" + +using ov::op::v0::Concat; +using ov::op::v1::Add; +using ov::op::v1::Multiply; +using ov::op::v1::VariadicSplit; + +ov::pass::RPE_Fusion::RPE_Fusion() { + MATCHER_SCOPE(RPE_Fusion); + + auto sin = pattern::any_input(); + auto cos = pattern::any_input(); + + // FIXME: should be a single node match + auto source_1 = pattern::any_input(); + auto source = pattern::any_input(); + // BEGIN: rotate_half + + // Variadic Split into two equal parts + auto axis = pattern::any_input(); + auto split_length = INT_CONSTANT_WITH_PREDICATE(value.size() == 2 && value[0] == value[1]); + auto vsplit = pattern::wrap_type({source, axis, split_length}); + vsplit->set_output_size(2); + + // Negate + auto minus_1 = FLOAT_CONSTANT_WITH_PREDICATE(value.size() == 1 && value[0] == -1); + auto neg = pattern::wrap_type({vsplit->output(1), minus_1}); + + // Concat two splitted parts in the opposite order, first of them is negated + auto concat = pattern::wrap_type({neg, vsplit->output(0)}); // make sure axis eq to vsplit eq -1 + + // END: rotate half + + auto mul_sin = pattern::wrap_type({concat, sin}); + auto mul_cos = pattern::wrap_type({source_1, cos}); + auto add = pattern::wrap_type({mul_cos, mul_sin}); + + ov::matcher_pass_callback matcher_pass_callback = [=](pattern::Matcher& m) { + auto value_map = m.get_pattern_value_map(); + + auto actual_source = value_map.at(vsplit).get_node_shared_ptr()->input_value(0); + auto potential_source = value_map.at(mul_cos).get_node_shared_ptr()->input_value(0); + auto cos_output = value_map.at(mul_cos).get_node_shared_ptr()->input_value(1); + + if (actual_source != potential_source && actual_source != cos_output) + return false; // flawed match + if (actual_source == potential_source && actual_source == cos_output) + return false; // flawed match + if (actual_source != potential_source && actual_source == cos_output) + cos_output = potential_source; + + auto input = value_map.at(source); + auto concat_node = ov::as_type_ptr(value_map.at(concat).get_node_shared_ptr()); + if (!concat_node) + return false; + auto split_axis_node = ov::util::get_constant_from_source(value_map.at(axis)); + if (!split_axis_node) + return false; + auto value = split_axis_node->cast_vector(); + if (value.size() != 1) + return false; + auto concat_axis = concat_node->get_concatenation_axis(); + auto split_axis = value[0]; + if (concat_axis != split_axis) { + if (input.get_partial_shape().rank().is_static()) { + auto rank = input.get_partial_shape().rank().get_length(); + concat_axis = ov::util::normalize(concat_axis, rank); + split_axis = ov::util::normalize(split_axis, rank); + } + if (concat_axis != split_axis) + return false; + } + auto rpe = + std::make_shared(input, value_map.at(sin), cos_output, concat_node->get_axis()); + + for (const auto& label : {vsplit, neg, concat, mul_sin, mul_cos, add}) + ov::copy_runtime_info(value_map.at(label).get_node_shared_ptr(), rpe); + return ov::replace_output_update_name(value_map.at(add), rpe->output(0)); + }; + auto m = std::make_shared(add, matcher_name); + register_matcher(m, matcher_pass_callback); +} diff --git a/src/common/transformations/src/transformations/convert_precision.cpp b/src/common/transformations/src/transformations/convert_precision.cpp index 8878cb182b9992..3df6802ec360e8 100644 --- a/src/common/transformations/src/transformations/convert_precision.cpp +++ b/src/common/transformations/src/transformations/convert_precision.cpp @@ -11,6 +11,7 @@ #include "openvino/opsets/opset1.hpp" #include "openvino/opsets/opset10.hpp" #include "openvino/opsets/opset11.hpp" +#include "openvino/opsets/opset13.hpp" #include "openvino/opsets/opset3.hpp" #include "openvino/opsets/opset4.hpp" #include "openvino/opsets/opset5.hpp" @@ -58,6 +59,7 @@ bool fuse_type_to_nms9(const std::shared_ptr& node, const precisions_m bool fuse_type_to_nms_rotated(const std::shared_ptr& node, const precisions_map& precisions); bool fuse_type_to_matrix_nms(const std::shared_ptr& node, const precisions_map& precisions); bool fuse_type_to_multiclass_nms(const std::shared_ptr& node, const precisions_map& precisions); +bool fuse_type_to_multinomial_v13(const std::shared_ptr& node, const precisions_map& precisions); bool fuse_type_to_generate_proposals(const std::shared_ptr& node, const precisions_map& precisions); bool fuse_type_to_topk(const std::shared_ptr& node, const precisions_map& precisions); bool fuse_type_to_maxpool(const std::shared_ptr& node, const precisions_map& precisions); @@ -438,7 +440,9 @@ bool ov::pass::ConvertPrecision::run_on_model(const std::shared_ptr& {opset4::Range::get_type_info_static(), fuse_type_to_range_v4}, {opset9::Eye::get_type_info_static(), fuse_type_to_eye_v9}, {opset10::Unique::get_type_info_static(), fuse_type_to_unique_v10}, - {opset8::RandomUniform::get_type_info_static(), fuse_type_to_random_uniform_v8}}; + {opset8::RandomUniform::get_type_info_static(), fuse_type_to_random_uniform_v8}, + {opset13::Multinomial::get_type_info_static(), fuse_type_to_multinomial_v13}, + }; for (const auto& it : m_additional_type_to_fuse_map) { type_to_fuse[it.first] = it.second; @@ -844,6 +848,17 @@ bool fuse_type_to_multiclass_nms(const std::shared_ptr& node, const pr }); } +bool fuse_type_to_multinomial_v13(const std::shared_ptr& node, const precisions_map& precisions) { + auto multinomial = ov::as_type_ptr(node); + if (!multinomial) { + return false; + } + + return update_type(0, node, precisions, [&](const element::Type& type) { + multinomial->set_convert_type(type); + }); +} + bool fuse_type_to_generate_proposals(const std::shared_ptr& node, const precisions_map& precisions) { auto generate_proposals = ov::as_type_ptr(node); if (!generate_proposals) { @@ -1267,6 +1282,10 @@ bool fuse_type_to_constant(const std::shared_ptr& node, new_const = change_constant_precision(constant); } else if (from == ov::element::boolean && to == ov::element::i32) { new_const = change_constant_precision(constant); + } else if (from == ov::element::i8 && to == ov::element::f32) { + new_const = change_constant_precision(constant); + } else if (from == ov::element::u8 && to == ov::element::f32) { + new_const = change_constant_precision(constant); } else if (from == ov::element::i8 && to == ov::element::i64) { new_const = change_constant_precision(constant); } else if (from == ov::element::i4 || from == ov::element::u4 || from == ov::element::u1) { diff --git a/src/common/transformations/tests/common_optimizations/dimension_tracking.cpp b/src/common/transformations/tests/common_optimizations/dimension_tracking.cpp index 88f6a1c303f951..9a60dba64d62d0 100644 --- a/src/common/transformations/tests/common_optimizations/dimension_tracking.cpp +++ b/src/common/transformations/tests/common_optimizations/dimension_tracking.cpp @@ -11,6 +11,7 @@ #include #include "common_test_utils/ov_test_utils.hpp" +#include "common_test_utils/subgraph_builders/detection_output.hpp" #include "openvino/core/dimension_tracker.hpp" #include "openvino/core/model.hpp" #include "openvino/opsets/opset1.hpp" @@ -19,6 +20,7 @@ #include "transformations/common_optimizations/divide_fusion.hpp" #include "transformations/init_node_info.hpp" #include "transformations/utils/utils.hpp" + using namespace ov; using namespace testing; @@ -307,7 +309,7 @@ TEST(TransformationTests, AutoBatch_FindBatch_NegativeTracking) { } TEST(TransformationTests, AutoBatch_FindBatch_AutoBatch_LabelPropagation_DO_detachment) { - auto f = ngraph::builder::subgraph::makeDetectionOutput(); + auto f = ov::test::utils::make_detection_output(); auto& data = f->get_parameters()[0]; ov::pass::Manager m; diff --git a/src/common/transformations/tests/common_optimizations/fuse_rotary_positional_embeddings.cpp b/src/common/transformations/tests/common_optimizations/fuse_rotary_positional_embeddings.cpp new file mode 100644 index 00000000000000..48ee524c1fb182 --- /dev/null +++ b/src/common/transformations/tests/common_optimizations/fuse_rotary_positional_embeddings.cpp @@ -0,0 +1,97 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "transformations/common_optimizations/fuse_rotary_positional_embeddings.hpp" + +#include + +#include "common_test_utils/ov_test_utils.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/variadic_split.hpp" +#include "ov_ops/rotary_positional_embeddings.hpp" +#include "transformations/utils/utils.hpp" + +using namespace std; +using namespace ov; +using namespace ov::op; + +void name_node_and_output(const shared_ptr& op, const std::string& name) { + op->set_friendly_name(name); + op->output(0).set_names({name}); +} + +TEST_F(TransformationTestsF, FuseRPE) { + { + auto data = make_shared(element::f32, PartialShape::dynamic()); + name_node_and_output(data, "source"); + auto sin = make_shared(element::f32, PartialShape::dynamic()); + name_node_and_output(sin, "sin"); + auto cos = make_shared(element::f32, PartialShape::dynamic()); + name_node_and_output(cos, "cos"); + auto axis = v0::Constant::create(element::i64, {}, {-1}); + auto split_lengths = v0::Constant::create(element::i64, {2}, {10, 10}); + auto split = make_shared(data, axis, split_lengths); + + auto minus_one = v0::Constant::create(element::f32, {}, {-1}); + auto negate = make_shared(split->output(1), minus_one); + + auto concat = make_shared(OutputVector{negate, split->output(0)}, -1); + + auto mul_sin = make_shared(concat, sin); + auto mul_cos = make_shared(data, cos); + auto add = make_shared(mul_cos, mul_sin); + name_node_and_output(add, "rpe"); + + model = std::make_shared(NodeVector{add}, ParameterVector{data, sin, cos}); + + manager.register_pass(); + } + { + auto data = make_shared(element::f32, PartialShape::dynamic()); + name_node_and_output(data, "source"); + auto sin = make_shared(element::f32, PartialShape::dynamic()); + name_node_and_output(sin, "sin"); + auto cos = make_shared(element::f32, PartialShape::dynamic()); + name_node_and_output(cos, "cos"); + auto rpe = make_shared(data, sin, cos, -1); + name_node_and_output(rpe, "rpe"); + model_ref = std::make_shared(NodeVector{rpe}, ParameterVector{data, sin, cos}); + } + comparator.enable(FunctionsComparator::CmpValues::NAMES); +} + +TEST_F(TransformationTestsF, FuseRPESorcesAreMultiOutputed) { + /* Transformation matcher searches for a single source as a beginning of the pattern: + VariadicSplit ... + source ____/ + \ + Multiply ... + This test is designed to check that in case we feed VariadicSplit and Multiply from different outputs of the same + node, the transformation won't happen since the source isn't the same + */ + { + auto data_ = make_shared(element::f32, PartialShape::dynamic()); + auto sin = make_shared(element::f32, PartialShape::dynamic()); + auto cos = make_shared(element::f32, PartialShape::dynamic()); + + auto data = make_shared(data_, v0::Constant::create(element::i64, {}, {-1}), 2); + + auto axis = v0::Constant::create(element::i64, {}, {-1}); + auto split_lengths = v0::Constant::create(element::i64, {2}, {10, 10}); + auto split = make_shared(data->output(0), axis, split_lengths); + + auto minus_one = v0::Constant::create(element::f32, {}, {-1}); + auto negate = make_shared(split->output(1), minus_one); + + auto concat = make_shared(OutputVector{negate, split->output(0)}, -1); + + auto mul_sin = make_shared(concat, sin); + auto mul_cos = make_shared(data->output(1), cos); + auto add = make_shared(mul_cos, mul_sin); + + model = std::make_shared(NodeVector{add}, ParameterVector{data_, sin, cos}); + + manager.register_pass(); + } +} \ No newline at end of file diff --git a/src/common/transformations/tests/utils/compress_quantize_weights.cpp b/src/common/transformations/tests/utils/compress_quantize_weights.cpp index 0b730d0b36be5a..f76b4c901321d6 100644 --- a/src/common/transformations/tests/utils/compress_quantize_weights.cpp +++ b/src/common/transformations/tests/utils/compress_quantize_weights.cpp @@ -146,7 +146,12 @@ INSTANTIATE_TEST_SUITE_P(TransformationTests, CompressQuantizeWeightsTests, ::testing::Combine(::testing::ValuesIn(params), ::testing::ValuesIn(data_precisions))); +#ifdef OPENVINO_ARCH_ARM64 +// Ticket: 122666 +TEST_F(TransformationTestsF, DISABLED_CompressQuantizeWeightsWithDequantizationSubgraph) { +#else TEST_F(TransformationTestsF, CompressQuantizeWeightsWithDequantizationSubgraph) { +#endif { auto data = opset8::Constant::create(element::f32, Shape{2, 4, 1, 1}, {-1, 0, 1, 2, 3, 4, 5, 11}); auto input_low = opset8::Constant::create(element::f32, Shape{}, {1}); @@ -178,7 +183,12 @@ TEST_F(TransformationTestsF, CompressQuantizeWeightsWithDequantizationSubgraph) comparator.enable(FunctionsComparator::CmpValues::ACCURACY); } +#ifdef OPENVINO_ARCH_ARM64 +// Ticket: 122666 +TEST_F(TransformationTestsF, DISABLED_CompressQuantizeWeightsWithDequantizationSubgraphFP16) { +#else TEST_F(TransformationTestsF, CompressQuantizeWeightsWithDequantizationSubgraphFP16) { +#endif { auto data = opset8::Constant::create(element::f16, Shape{2, 4, 1, 1}, {-1, 0, 1, 2, 3, 4, 5, 11}); auto convert_to_f32 = std::make_shared(data, element::f32); @@ -213,7 +223,12 @@ TEST_F(TransformationTestsF, CompressQuantizeWeightsWithDequantizationSubgraphFP comparator.enable(FunctionsComparator::CmpValues::ACCURACY); } +#ifdef OPENVINO_ARCH_ARM64 +// Ticket: 122666 +TEST_F(TransformationTestsF, DISABLED_CompressQuantizeWeightsWithZeroPointEliminated) { +#else TEST_F(TransformationTestsF, CompressQuantizeWeightsWithZeroPointEliminated) { +#endif { auto data = opset8::Constant::create(element::f32, Shape{3, 1, 1, 1}, {-0.144816, 0.0858578, 0.110928}); auto input_low = opset8::Constant::create(element::f32, Shape{3, 1, 1, 1}, {-0.402659, -0.383148, -0.34054}); @@ -237,7 +252,12 @@ TEST_F(TransformationTestsF, CompressQuantizeWeightsWithZeroPointEliminated) { comparator.enable(FunctionsComparator::CmpValues::ACCURACY); } +#ifdef OPENVINO_ARCH_ARM64 +// Ticket: 122666 +TEST_F(TransformationTestsF, DISABLED_CompressQuantizeWeightsWithZeroPointEliminatedZeroScale) { +#else TEST_F(TransformationTestsF, CompressQuantizeWeightsWithZeroPointEliminatedZeroScale) { +#endif { auto data = opset8::Constant::create(element::f32, Shape{3, 1, 1, 1}, {-0.144816, 0.0858578, 0.110928}); auto input_low = opset8::Constant::create(element::f32, Shape{3, 1, 1, 1}, {-0.402659, -0.383148, -0.34054}); @@ -261,7 +281,12 @@ TEST_F(TransformationTestsF, CompressQuantizeWeightsWithZeroPointEliminatedZeroS comparator.enable(FunctionsComparator::CmpValues::ACCURACY); } +#ifdef OPENVINO_ARCH_ARM64 +// Ticket: 122666 +TEST_F(TransformationTestsF, DISABLED_CompressQuantizeWeightsWithZeroPointEliminatedFP16) { +#else TEST_F(TransformationTestsF, CompressQuantizeWeightsWithZeroPointEliminatedFP16) { +#endif { auto data = opset8::Constant::create(element::f16, Shape{3, 1, 1, 1}, {0.2, 1.2, 1.2}); auto input_low = @@ -290,7 +315,12 @@ TEST_F(TransformationTestsF, CompressQuantizeWeightsWithZeroPointEliminatedFP16) comparator.enable(FunctionsComparator::CmpValues::ACCURACY); } +#ifdef OPENVINO_ARCH_ARM64 +// Ticket: 122666 +TEST_F(TransformationTestsF, DISABLED_NegativeCompressQuantizeWeights) { +#else TEST_F(TransformationTestsF, NegativeCompressQuantizeWeights) { +#endif { auto data = opset8::Constant::create(element::f32, Shape{2, 4, 1, 1}, {-1, 0, 1, 2, 3, 4, 5, 11}); auto input_low = opset8::Constant::create(element::f32, Shape{}, {1}); @@ -315,7 +345,12 @@ TEST_F(TransformationTestsF, NegativeCompressQuantizeWeights) { comparator.enable(FunctionsComparator::CmpValues::ACCURACY); } +#ifdef OPENVINO_ARCH_ARM64 +// Ticket: 122666 +TEST_F(TransformationTestsF, DISABLED_NegativeCompressQuantizeWeightsNonConstantInput) { +#else TEST_F(TransformationTestsF, NegativeCompressQuantizeWeightsNonConstantInput) { +#endif auto data = std::make_shared(element::f32, Shape{2, 4, 1, 1}); auto input_low = opset8::Constant::create(element::f32, Shape{}, {1}); auto input_high = opset8::Constant::create(element::f32, Shape{}, {9}); diff --git a/src/common/transformations/tests/utils/primitives_priority_test.cpp b/src/common/transformations/tests/utils/primitives_priority_test.cpp index 64f6330a1da188..6baceddaaacc68 100644 --- a/src/common/transformations/tests/utils/primitives_priority_test.cpp +++ b/src/common/transformations/tests/utils/primitives_priority_test.cpp @@ -12,12 +12,9 @@ #include #include "common_test_utils/ov_test_utils.hpp" -#include "common_test_utils/test_common.hpp" -#include "ie_ngraph_utils.hpp" #include "openvino/core/model.hpp" #include "openvino/opsets/opset1.hpp" #include "transformations/rt_info/primitives_priority_attribute.hpp" -#include "transformations/utils/utils.hpp" using namespace ov; using namespace testing; diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index d389c1862703bf..0d0d42795ccac2 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -154,6 +154,7 @@ target_include_directories(ngraph INTERFACE $ shape_infer(const MatMul* op, const std::vector& input_s // Output shape of two 1D tensors multiplication will be a 0D tensor (scalar). if (arg0_shape.rank().get_length() == 1) { // arg0 input temporary axis inserted at ROW_INDEX_DIM is removed - output_shape.erase(output_shape.begin() + output_shape.size() - 2); + output_shape.erase(output_shape.begin() + (output_shape.size() - 2)); } if (arg1_shape.rank().get_length() == 1) { // arg1 input temporary axis inserted at COL_INDEX_DIM is removed - output_shape.erase(output_shape.begin() + output_shape.size() - 1); + output_shape.erase(std::prev(output_shape.end())); } output_shapes.emplace_back(std::move(output_shape)); return output_shapes; diff --git a/src/core/src/op/equal.cpp b/src/core/src/op/equal.cpp index 3460d1c7c2eb7a..12d94685dcf6b1 100644 --- a/src/core/src/op/equal.cpp +++ b/src/core/src/op/equal.cpp @@ -32,13 +32,6 @@ Tensor and_tensor(const Tensor& lhs, const Tensor& rhs) { return outs.front(); } -Tensor or_tensor(const Tensor& lhs, const Tensor& rhs) { - const auto logical_or = v1::LogicalOr(); - auto outs = TensorVector{{element::boolean, Shape{}}}; - logical_or.evaluate(outs, {lhs, rhs}); - return outs.front(); -} - void all_equal(const TensorVector& tensors, TensorVector& outputs) { auto& output = outputs[0]; auto eq_result = TensorVector{{output.get_element_type(), output.get_shape()}}; @@ -54,12 +47,6 @@ void all_equal(const TensorVector& tensors, TensorVector& outputs) { output = and_tensor(output, eq_result[0]); } } - -Tensor within_interval(const Tensor& lower, const Tensor& upper, const Tensor& subject_to_check) { - const auto lower_check = less_equal_tensor(lower, subject_to_check); - const auto upper_check = less_equal_tensor(subject_to_check, upper); - return and_tensor(lower_check, upper_check); -} } // namespace struct Evaluate : public element::NoAction { @@ -129,11 +116,11 @@ bool Equal::evaluate_upper(TensorVector& output_values) const { const auto &lhs = get_input_tensor(0), &rhs = get_input_tensor(1); const auto &lhs_lower = lhs.get_lower_value(), &lhs_upper = lhs.get_upper_value(); const auto &rhs_lower = rhs.get_lower_value(), &rhs_upper = rhs.get_upper_value(); - // check for intersection: - // ll <= rl <= lu or ll <= ru <= lu - const auto rl_check = equal::within_interval(lhs_lower, lhs_upper, rhs_lower); - const auto ru_check = equal::within_interval(lhs_lower, lhs_upper, rhs_upper); - output_values[0] = equal::or_tensor(rl_check, ru_check); + + // if (lhs_lower <= rhs_upper && rhs_lower <= lhs_upper) bounds have got intersection + const auto lb_check = equal::less_equal_tensor(lhs_lower, rhs_upper); + const auto ub_check = equal::less_equal_tensor(rhs_lower, lhs_upper); + output_values[0] = equal::and_tensor(lb_check, ub_check); return true; } diff --git a/src/core/tests/frontend/CMakeLists.txt b/src/core/tests/frontend/CMakeLists.txt index dd096fed759a94..a03ed97cac9ca9 100644 --- a/src/core/tests/frontend/CMakeLists.txt +++ b/src/core/tests/frontend/CMakeLists.txt @@ -12,7 +12,7 @@ target_compile_definitions(${MOCK1_FE_NAME} PRIVATE "-DMOCK_VARIANT=\"1\"") target_include_directories(${MOCK1_FE_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) -target_link_libraries(${MOCK1_FE_NAME} PRIVATE frontend_common) +target_link_libraries(${MOCK1_FE_NAME} PRIVATE openvino::frontend::common) add_dependencies(ov_core_unit_tests ${MOCK1_FE_NAME}) ov_add_clang_format_target(${MOCK1_FE_NAME}_clang FOR_TARGETS ${MOCK1_FE_NAME}) diff --git a/src/core/tests/type_prop/equal.cpp b/src/core/tests/type_prop/equal.cpp new file mode 100644 index 00000000000000..d57462dc40e47e --- /dev/null +++ b/src/core/tests/type_prop/equal.cpp @@ -0,0 +1,51 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/equal.hpp" + +#include + +#include "common_test_utils/type_prop.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/shape_of.hpp" + +using ov::op::v0::Convert; +using ov::op::v0::Parameter; +using ov::op::v3::Broadcast; +using ov::op::v3::ShapeOf; + +class TypePropEqualV1Test : public TypePropOpTest { + // Common test for Equal operator are in type_prop/binary_elementwise.cpp +}; + +TEST_F(TypePropEqualV1Test, lhs_upper_bound_within_rhs_bounds) { + constexpr auto et = ov::element::i32; + + const auto lhs = std::make_shared(et, ov::PartialShape{{1, 1}}); + const auto rhs = std::make_shared(et, ov::PartialShape{{0, -1}}); + const auto lhs_shape_of = std::make_shared(lhs, et); + const auto rhs_shape_of = std::make_shared(rhs, et); + const auto op = make_op(lhs_shape_of, rhs_shape_of); + + const auto p = std::make_shared(et, ov::PartialShape{1}); + const auto bc = std::make_shared(p, std::make_shared(op, et)); + + EXPECT_EQ(bc->get_output_partial_shape(0), ov::PartialShape({{0, 1}})); +} + +TEST_F(TypePropEqualV1Test, rhs_upper_bound_within_lhs_bounds) { + constexpr auto et = ov::element::i32; + + const auto lhs = std::make_shared(et, ov::PartialShape{{0, -1}}); + const auto rhs = std::make_shared(et, ov::PartialShape{{1, 1}}); + const auto lhs_shape_of = std::make_shared(lhs, et); + const auto rhs_shape_of = std::make_shared(rhs, et); + const auto op = make_op(lhs_shape_of, rhs_shape_of); + + const auto p = std::make_shared(et, ov::PartialShape{1}); + const auto bc = std::make_shared(p, std::make_shared(op, et)); + + EXPECT_EQ(bc->get_output_partial_shape(0), ov::PartialShape({{0, 1}})); +} diff --git a/src/frontends/common/CMakeLists.txt b/src/frontends/common/CMakeLists.txt index 4fd41e6f4d3601..9c3d45f949c5bb 100644 --- a/src/frontends/common/CMakeLists.txt +++ b/src/frontends/common/CMakeLists.txt @@ -2,18 +2,33 @@ # SPDX-License-Identifier: Apache-2.0 # -set(TARGET_NAME "frontend_common") +set(TARGET_NAME "openvino_frontend_common") + +set(FRONTEND_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include) +set(FRONTEND_DEV_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/dev_api) file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) file(GLOB_RECURSE LIBRARY_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/src/*.hpp) file(GLOB_RECURSE LIBRARY_PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp) -set(FRONTEND_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include) - source_group("src" FILES ${LIBRARY_SRC}) source_group("include" FILES ${LIBRARY_HEADERS}) source_group("public include" FILES ${LIBRARY_PUBLIC_HEADERS}) +# create frontend common library + +add_library(${TARGET_NAME} INTERFACE) + +target_include_directories(${TARGET_NAME} INTERFACE + $ + $) + +target_link_libraries(${TARGET_NAME} INTERFACE openvino::runtime) + +add_library(openvino::frontend::common ALIAS ${TARGET_NAME}) + +ov_install_static_lib(${TARGET_NAME} ${OV_CPACK_COMP_CORE}) + # create library add_library(${TARGET_NAME}_obj OBJECT ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${LIBRARY_PUBLIC_HEADERS}) @@ -23,7 +38,7 @@ target_include_directories(${TARGET_NAME}_obj $ PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src - $ + $ # for ov_frontends.hpp in static build ${CMAKE_CURRENT_BINARY_DIR}/src) @@ -60,22 +75,12 @@ ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME}_obj ADDITIONAL_INCLUDE_DIRECTORIES $) -# INTERFACE library for BW compatibility - -add_library(${TARGET_NAME} INTERFACE) -target_link_libraries(${TARGET_NAME} INTERFACE openvino::runtime) -target_include_directories(${TARGET_NAME} INTERFACE $ - $>) - -add_library(ngraph::${TARGET_NAME} ALIAS ${TARGET_NAME}) -add_library(openvino::frontend::common ALIAS ${TARGET_NAME}) -add_library(${TARGET_NAME}::static ALIAS ${TARGET_NAME}) - # Installation rules header files install(DIRECTORY ${FRONTEND_INCLUDE_DIR}/openvino DESTINATION ${FRONTEND_INSTALL_INCLUDE} - COMPONENT ${OV_CPACK_COMP_CORE_DEV}) + COMPONENT ${OV_CPACK_COMP_CORE_DEV} + ${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL}) # Shutdown protobuf library if(Protobuf_IN_FRONTEND AND BUILD_SHARED_LIBS) diff --git a/src/frontends/common/dev_api/openvino/frontend/common/random_normal_helper.hpp b/src/frontends/common/dev_api/openvino/frontend/common/random_normal_helper.hpp new file mode 100644 index 00000000000000..e88cf62354a148 --- /dev/null +++ b/src/frontends/common/dev_api/openvino/frontend/common/random_normal_helper.hpp @@ -0,0 +1,42 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "ngraph/output_vector.hpp" +#include "openvino/frontend/visibility.hpp" +#include "openvino/pass/graph_rewrite.hpp" + +namespace ov { +namespace frontend { + +/// \brief Creates a random normal tensor with the given shape and type. +/// \details Uses Box-Mueller algorithm to generate random numbers from a Gauassian distribution +/// \param sizes Shape of the output tensor +/// \param target_type Type of the output tensor +/// \param mean Mean of the distribution +/// \param scale Standard deviation of the distribution +/// \param seed Seed for the random number generator +FRONTEND_API OutputVector make_random_normal(pass::NodeRegistry& registry, + const Output& sizes, + element::Type target_type, + const Output& mean, + const Output& scale, + float seed); + +/// \brief Creates a random normal tensor with the given shape and type. +/// \details Uses Box-Mueller algorithm to generate random numbers from a Gauassian distribution +/// \param sizes Shape of the output tensor +/// \param target_type Type of the output tensor +/// \param mean Mean of the distribution +/// \param scale Standard deviation of the distribution +/// \param seed Seed for the random number generator +FRONTEND_API std::pair make_random_normal(const Output& sizes, + element::Type target_type, + const Output& mean, + const Output& scale, + float seed); + +} // namespace frontend +} // namespace ov diff --git a/src/frontends/common/include/openvino/frontend/visibility.hpp b/src/frontends/common/include/openvino/frontend/visibility.hpp index 4e07f3cedbbc17..ddc2d7eb6898e7 100644 --- a/src/frontends/common/include/openvino/frontend/visibility.hpp +++ b/src/frontends/common/include/openvino/frontend/visibility.hpp @@ -9,7 +9,7 @@ // Increment each time when FrontEnd/InputModel/Place interface is changed #define OV_FRONTEND_API_VERSION 1 -#if defined(USE_STATIC_FRONTEND_COMMON) || defined(OPENVINO_STATIC_LIBRARY) +#if defined(OPENVINO_STATIC_LIBRARY) # define FRONTEND_API # define FRONTEND_C_API #else @@ -20,5 +20,5 @@ # else # define FRONTEND_API OPENVINO_CORE_IMPORTS # define FRONTEND_C_API OPENVINO_EXTERN_C OPENVINO_CORE_IMPORTS -# endif // frontend_common_EXPORTS -#endif // USE_STATIC_FRONTEND_COMMON || OPENVINO_STATIC_LIBRARY +# endif // openvino_frontend_common_EXPORTS +#endif // OPENVINO_STATIC_LIBRARY diff --git a/src/frontends/common/src/random_normal_helper.cpp b/src/frontends/common/src/random_normal_helper.cpp new file mode 100644 index 00000000000000..5e789a9f72f2f5 --- /dev/null +++ b/src/frontends/common/src/random_normal_helper.cpp @@ -0,0 +1,77 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/common/random_normal_helper.hpp" + +#include "ngraph/output_vector.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/opsets/opset12.hpp" +#include "openvino/pass/graph_rewrite.hpp" +#include "transformations/rt_info/disable_fp16_compression.hpp" +#define _USE_MATH_DEFINES +#include + +namespace ov { +namespace frontend { + +OutputVector make_random_normal(pass::NodeRegistry& registry, + const Output& sizes, + element::Type target_type, + const Output& mean, + const Output& scale, + float seed) { + // We start by generating two random series from a uniform distribution + const uint64_t global_seed = 0; + + // ONNX specifies the seed as a float, but OpenVINO uses uint64_t + const auto op_seed = static_cast(seed * 1000); + + // We need to use two op_seeds to make sure we get different results for two RandomUniform series + // But we also have to keep original logic and pass "0" (auto-generated seed) to RandomUniform + const uint64_t seed_1 = op_seed; + const uint64_t seed_2 = (op_seed == 0 ? op_seed : op_seed + 10000); + + auto min_val = registry.make(target_type, Shape{1}, std::numeric_limits::min()); + auto max_val = registry.make(target_type, Shape{1}, 1); + + auto uniform_1 = registry.make(sizes, min_val, max_val, target_type, global_seed, seed_1); + auto uniform_2 = registry.make(sizes, min_val, max_val, target_type, global_seed, seed_2); + + // Compute Box–Muller transform + // random_normal = scale * sqrt(-2.0 * log(uniform_1)) * cos(2.0 * pi * uniform_2) + mean + auto pi = registry.make(target_type, Shape{1}, M_PI); + auto minus_two = registry.make(target_type, Shape{1}, -2.0); + auto two = registry.make(target_type, Shape{1}, 2.0); + + auto log = registry.make(uniform_1); + auto multiply_minus_two_log = registry.make(log, minus_two); + auto sqrt = registry.make(multiply_minus_two_log); + + auto multiply_2pi = registry.make(two, pi); + auto multiply_2pi_uniform_2 = registry.make(multiply_2pi, uniform_2); + auto cos = registry.make(multiply_2pi_uniform_2); + + auto sqrt_x_cos = registry.make(sqrt, cos); + auto product = registry.make(scale, sqrt_x_cos); + auto sum = registry.make(product, mean); + + // if we don't disable down-casting then log(float32_min) gives -inf + disable_fp16_compression(uniform_1); + disable_fp16_compression(log); + + return {sum}; +} + +std::pair make_random_normal(const Output& sizes, + element::Type target_type, + const Output& mean, + const Output& scale, + float seed) { + pass::NodeRegistry registry; + OutputVector res = make_random_normal(registry, sizes, target_type, mean, scale, seed); + return std::make_pair(res, registry); +} + +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/frontend/CMakeLists.txt b/src/frontends/onnx/frontend/CMakeLists.txt index d4681b54b08f93..50782959202168 100644 --- a/src/frontends/onnx/frontend/CMakeLists.txt +++ b/src/frontends/onnx/frontend/CMakeLists.txt @@ -16,10 +16,9 @@ target_compile_definitions(${TARGET_NAME} PRIVATE ONNX_OPSET_VERSION=${ONNX_OPSE ov_ncc_naming_style(FOR_TARGET ${TARGET_NAME} SOURCE_DIRECTORIES "${${TARGET_NAME}_INCLUDE_DIR}" DEFINITIONS - $ - ADDITIONAL_INCLUDE_DIRECTORIES - $) + $) install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/onnx_import DESTINATION ${FRONTEND_INSTALL_INCLUDE}/ngraph/frontend - COMPONENT ${OV_CPACK_COMP_CORE_DEV}) + COMPONENT ${OV_CPACK_COMP_CORE_DEV} + ${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL}) diff --git a/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp b/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp index 86a19fd4dc8f6c..0647e1f6adb0cc 100644 --- a/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp +++ b/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp @@ -174,10 +174,16 @@ OutputVector dequantize_linear(const Node& node) { const auto& scale = inputs[1]; const auto zero_point = op::detail::get_zero_point(inputs); + const auto& scale_shape = scale.get_partial_shape(); // per-tensor quantization, axis attribute ignored - if (scale.get_partial_shape().rank().is_static() && scale.get_partial_shape().rank().get_length() == 0) { - if (!zero_point || (zero_point->get_output_partial_shape(0).rank().is_static() && - zero_point->get_output_partial_shape(0).rank().get_length() == 0)) { + if ((scale_shape.rank().is_static() && scale_shape.size() == 0) || + (scale_shape.is_static() && shape_size(scale_shape.get_shape()) == 1)) { + if (!zero_point) { + return set_1::dequantize_linear(node); + } + const auto& zero_point_shape = zero_point->get_output_partial_shape(0); + if ((zero_point_shape.rank().is_static() && zero_point_shape.size() == 0) || + (zero_point_shape.is_static() && shape_size(zero_point_shape.get_shape()) == 1)) { return set_1::dequantize_linear(node); } } diff --git a/src/frontends/onnx/frontend/src/op/quantize_linear.cpp b/src/frontends/onnx/frontend/src/op/quantize_linear.cpp index 40ace174738bb3..9468a579e169f1 100644 --- a/src/frontends/onnx/frontend/src/op/quantize_linear.cpp +++ b/src/frontends/onnx/frontend/src/op/quantize_linear.cpp @@ -34,10 +34,12 @@ Output get_zero_point(const OutputVector& inputs) { void validate_zero_point_type(const Node& onnx_node, const Output& y_zero_point) { const auto& y_zero_point_et = y_zero_point.get_element_type(); - CHECK_VALID_NODE(onnx_node, - y_zero_point_et.is_static() && (y_zero_point_et == element::u8 || y_zero_point_et == element::i8), - "\"y_zero_point\" input data type must be static and of 8-bit " - "integer type."); + CHECK_VALID_NODE( + onnx_node, + y_zero_point_et.is_static() && (y_zero_point_et == element::u8 || y_zero_point_et == element::i8 || + y_zero_point_et == element::u16 || y_zero_point_et == element::i16), + "\"y_zero_point\" input data for QuantizeLinear operator must be one of the supported types: u8, i8, u16 or i16" + "integer type."); } Output validate_scale(const Node& onnx_node, const Output& y_scale) { @@ -65,12 +67,28 @@ std::tuple, std::shared_ptr> get_out std::shared_ptr output_low; std::shared_ptr output_high; - if (destination_type == element::i8) { + // These values could be used in a ConvertQuantizeDequantize transformation and + // should be aligned + switch (destination_type) { + case element::i8: output_low = std::make_shared(data_type, Shape{1}, -128); output_high = std::make_shared(data_type, Shape{1}, 127); - } else { + break; + case element::u8: output_low = std::make_shared(data_type, Shape{1}, 0); output_high = std::make_shared(data_type, Shape{1}, 255); + break; + case element::i16: + output_low = std::make_shared(data_type, Shape{1}, -32768); + output_high = std::make_shared(data_type, Shape{1}, 32767); + break; + case element::u16: + output_low = std::make_shared(data_type, Shape{1}, 0); + output_high = std::make_shared(data_type, Shape{1}, 65535); + break; + default: + OPENVINO_THROW("Unsupported element type for QuantizeLinear"); + break; } return std::make_tuple(output_low, output_high); diff --git a/src/frontends/onnx/frontend/src/op/random_normal.cpp b/src/frontends/onnx/frontend/src/op/random_normal.cpp index 35978df4dbf2ad..5b8ccb80e8e380 100644 --- a/src/frontends/onnx/frontend/src/op/random_normal.cpp +++ b/src/frontends/onnx/frontend/src/op/random_normal.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "utils/random_normal.hpp" - #include "exceptions.hpp" #include "ngraph/shape.hpp" +#include "openvino/frontend/common/random_normal_helper.hpp" +#include "openvino/op/constant.hpp" #include "utils/common.hpp" OPENVINO_SUPPRESS_DEPRECATED_START @@ -23,11 +23,13 @@ OutputVector random_normal(const Node& node) { const auto mean = node.get_attribute_value("mean", 0.0f); const auto scale = node.get_attribute_value("scale", 1.0f); - const auto seed = node.get_attribute_value("seed", 0); + auto scale_node = ov::op::v0::Constant::create(target_type, Shape{1}, {scale}); + auto mean_node = ov::op::v0::Constant::create(target_type, Shape{1}, {mean}); + const auto seed = node.get_attribute_value("seed", 0); const auto shape = node.get_attribute_as_constant>("shape"); - - return detail::make_random_normal(shape, target_type, mean, scale, seed); + auto res = ov::frontend::make_random_normal(shape, target_type, mean_node, scale_node, seed); + return res.first; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/op/random_normal_like.cpp b/src/frontends/onnx/frontend/src/op/random_normal_like.cpp index 8d77d8055db16b..0df669b4ada2aa 100644 --- a/src/frontends/onnx/frontend/src/op/random_normal_like.cpp +++ b/src/frontends/onnx/frontend/src/op/random_normal_like.cpp @@ -4,8 +4,8 @@ #include "ngraph/shape.hpp" #include "op/random_uniform_like.hpp" +#include "openvino/frontend/common/random_normal_helper.hpp" #include "utils/common.hpp" -#include "utils/random_normal.hpp" OPENVINO_SUPPRESS_DEPRECATED_START namespace ngraph { @@ -25,11 +25,15 @@ OutputVector random_normal_like(const Node& node) { } const auto shape = std::make_shared(input); + const auto seed = node.get_attribute_value("seed", 0.0f); + const auto mean = node.get_attribute_value("mean", 0.0f); const auto scale = node.get_attribute_value("scale", 1.0f); - const auto seed = node.get_attribute_value("seed", 0.0f); + auto scale_node = ov::op::v0::Constant::create(target_type, Shape{1}, {scale}); + auto mean_node = ov::op::v0::Constant::create(target_type, Shape{1}, {mean}); - return detail::make_random_normal(shape, target_type, mean, scale, seed); + auto res = ov::frontend::make_random_normal(shape, target_type, mean_node, scale_node, seed); + return res.first; } } // namespace set_1 diff --git a/src/frontends/onnx/frontend/src/ops_bridge.cpp b/src/frontends/onnx/frontend/src/ops_bridge.cpp index 88f00c59cdbe1d..b26efd10bf0b84 100644 --- a/src/frontends/onnx/frontend/src/ops_bridge.cpp +++ b/src/frontends/onnx/frontend/src/ops_bridge.cpp @@ -576,6 +576,15 @@ OperatorsBridge::OperatorsBridge() { REGISTER_OPERATOR_WITH_DOMAIN(MICROSOFT_DOMAIN, "SkipLayerNormalization", 1, skip_layer_normalization); REGISTER_OPERATOR_WITH_DOMAIN(MICROSOFT_DOMAIN, "Trilu", 1, trilu); + register_operator_in_custom_domain("DequantizeLinear", + VersionRange::since(1), + op::set_13::dequantize_linear, + "com.microsoft"); + register_operator_in_custom_domain("QuantizeLinear", + VersionRange::since(1), + op::set_13::quantize_linear, + "com.microsoft"); + REGISTER_OPERATOR_WITH_DOMAIN(PYTORCH_ATEN_DOMAIN, "adaptive_avg_pool2d", 1, adaptive_avg_pooling2d); REGISTER_OPERATOR_WITH_DOMAIN(MMDEPLOY_DOMAIN, "NMSRotated", 1, nms_rotated); } diff --git a/src/frontends/onnx/frontend/src/utils/random_normal.cpp b/src/frontends/onnx/frontend/src/utils/random_normal.cpp deleted file mode 100644 index 0905be6ddb1f8b..00000000000000 --- a/src/frontends/onnx/frontend/src/utils/random_normal.cpp +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "random_normal.hpp" - -#include "default_opset.hpp" -#include "ngraph/opsets/opset8.hpp" - -namespace ngraph { -namespace onnx_import { -namespace detail { - -OutputVector make_random_normal(const Output& shape, - element::Type target_type, - float mean, - float scale, - float seed) { - // We start by generating two random series from a uniform distribution - const uint64_t global_seed = 0; - - // ONNX specifies the seed as a float, but OpenVINO uses uint64_t - const auto op_seed = static_cast(seed * 1000); - - // We need to use two op_seeds to make sure we get different results for two RandomUniform series - // But we also have to keep original logic and pass "0" (auto-generated seed) to RandomUniform - const uint64_t seed_1 = op_seed; - const uint64_t seed_2 = (op_seed == 0 ? op_seed : op_seed + 10000); - - const auto min_val = default_opset::Constant::create(target_type, Shape{1}, {0}); - const auto max_val = default_opset::Constant::create(target_type, Shape{1}, {1}); - - const auto uniform_1 = - std::make_shared(shape, min_val, max_val, target_type, global_seed, seed_1); - const auto uniform_2 = - std::make_shared(shape, min_val, max_val, target_type, global_seed, seed_2); - - // Compute Box–Muller transform - // random_normal = scale * ng.sqrt(-2.0 * ng.log(uniform_1)) * ng.cos(2.0 * np.pi * uniform_2) + mean - const auto pi = default_opset::Constant::create(target_type, Shape{1}, {3.141592653589793}); - const auto minus_two = default_opset::Constant::create(target_type, Shape{1}, {-2.0}); - const auto two = default_opset::Constant::create(target_type, Shape{1}, {2.0}); - - const auto log = std::make_shared(uniform_1); - const auto multiply_minus_two_log = std::make_shared(log, minus_two); - const auto sqrt = std::make_shared(multiply_minus_two_log); - - const auto multiply_two_pi = std::make_shared(uniform_2, pi); - const auto multiply_two_pi_uniform_2 = std::make_shared(multiply_two_pi, uniform_2); - auto const cos = std::make_shared(multiply_two_pi_uniform_2); - - auto const scale_const = default_opset::Constant::create(target_type, Shape{1}, {scale}); - auto const mean_const = default_opset::Constant::create(target_type, Shape{1}, {mean}); - auto const product = - std::make_shared(scale_const, std::make_shared(sqrt, cos)); - auto const sum = std::make_shared(product, mean_const); - - return {sum}; -} - -} // namespace detail -} // namespace onnx_import -} // namespace ngraph diff --git a/src/frontends/onnx/frontend/src/utils/random_normal.hpp b/src/frontends/onnx/frontend/src/utils/random_normal.hpp deleted file mode 100644 index f581b2a01b393a..00000000000000 --- a/src/frontends/onnx/frontend/src/utils/random_normal.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "ngraph/op/reshape.hpp" -#include "ngraph/output_vector.hpp" - -namespace ngraph { -namespace onnx_import { -namespace detail { - -/// \brief Creates a random normal tensor with the given shape and type. -/// \details Uses Box-Mueller algorithm to generate random numbers from a Gauassian distribution -/// \param shape Shape of the output tensor -/// \param type Type of the output tensor -/// \param mean Mean of the distribution -/// \param scale Standard deviation of the distribution -/// \param seed Seed for the random number generator -OutputVector make_random_normal(const Output& shape, - element::Type type, - float mean, - float scale, - float seed); - -} // namespace detail -} // namespace onnx_import -} // namespace ngraph diff --git a/src/frontends/onnx/onnx_common/CMakeLists.txt b/src/frontends/onnx/onnx_common/CMakeLists.txt index 7e27aab9f025a6..d8f2b6d98d11be 100644 --- a/src/frontends/onnx/onnx_common/CMakeLists.txt +++ b/src/frontends/onnx/onnx_common/CMakeLists.txt @@ -28,7 +28,7 @@ target_include_directories(${TARGET_NAME} $ PRIVATE ${ONNX_COMMON_SRC_DIR}) -target_link_libraries(${TARGET_NAME} PRIVATE openvino::runtime) +target_link_libraries(${TARGET_NAME} PRIVATE openvino::runtime openvino::util) ov_link_system_libraries(${TARGET_NAME} PUBLIC onnx_proto onnx) diff --git a/src/frontends/onnx/onnx_common/include/onnx_common/parser.hpp b/src/frontends/onnx/onnx_common/include/onnx_common/parser.hpp index e51e66aff45cf3..081579e74673cd 100644 --- a/src/frontends/onnx/onnx_common/include/onnx_common/parser.hpp +++ b/src/frontends/onnx/onnx_common/include/onnx_common/parser.hpp @@ -3,24 +3,23 @@ // #pragma once +#include + #include #include -/// \ingroup ngraph_cpp_api -namespace ONNX_NAMESPACE { -class ModelProto; -} - -namespace ngraph { +namespace ov { +namespace frontend { namespace onnx_common { +using namespace ::ONNX_NAMESPACE; /// \brief Parses an ONNX model from a file located on a storage device. /// /// \param file_path Path to the file containing an ONNX model. /// /// \return The parsed in-memory representation of the ONNX model -ONNX_NAMESPACE::ModelProto parse_from_file(const std::string& file_path); +ModelProto parse_from_file(const std::string& file_path); #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) -ONNX_NAMESPACE::ModelProto parse_from_file(const std::wstring& file_path); +ModelProto parse_from_file(const std::wstring& file_path); #endif /// \brief Parses an ONNX model from a stream (representing for example a file) @@ -28,7 +27,13 @@ ONNX_NAMESPACE::ModelProto parse_from_file(const std::wstring& file_path); /// \param model_stream Path to the file containing an ONNX model. /// /// \return The parsed in-memory representation of the ONNX model -ONNX_NAMESPACE::ModelProto parse_from_istream(std::istream& model_stream); +ModelProto parse_from_istream(std::istream& model_stream); } // namespace onnx_common +} // namespace frontend +} // namespace ov +namespace ngraph { +namespace onnx_common { +using namespace ov::frontend::onnx_common; +} } // namespace ngraph diff --git a/src/frontends/onnx/onnx_common/src/parser.cpp b/src/frontends/onnx/onnx_common/src/parser.cpp index 9682c35088291a..dd64a13b430de7 100644 --- a/src/frontends/onnx/onnx_common/src/parser.cpp +++ b/src/frontends/onnx/onnx_common/src/parser.cpp @@ -8,11 +8,11 @@ #include #include -#include +#include "openvino/core/except.hpp" +#include "openvino/util/file_util.hpp" -#include "ngraph/except.hpp" - -namespace ngraph { +namespace ov { +namespace frontend { namespace onnx_common { ONNX_NAMESPACE::ModelProto parse_from_file(const std::string& file_path) { std::ifstream file_stream{file_path.c_str(), std::ios::in | std::ios::binary}; @@ -31,9 +31,7 @@ ONNX_NAMESPACE::ModelProto parse_from_file(const std::wstring& file_path) { std::ifstream file_stream{file_path.c_str(), std::ios::in | std::ios::binary}; if (!file_stream.is_open()) { - NGRAPH_SUPPRESS_DEPRECATED_START - OPENVINO_THROW("Could not open the file: " + file_util::wstring_to_string(file_path)); - NGRAPH_SUPPRESS_DEPRECATED_END + OPENVINO_THROW("Could not open the file: " + ov::util::wstring_to_string(file_path)); }; auto model_proto = parse_from_istream(file_stream); @@ -60,4 +58,5 @@ ONNX_NAMESPACE::ModelProto parse_from_istream(std::istream& model_stream) { return model_proto; } } // namespace onnx_common -} // namespace ngraph +} // namespace frontend +} // namespace ov diff --git a/src/frontends/onnx/tests/CMakeLists.txt b/src/frontends/onnx/tests/CMakeLists.txt index 76e7893efef623..2b2d4b0bc95198 100644 --- a/src/frontends/onnx/tests/CMakeLists.txt +++ b/src/frontends/onnx/tests/CMakeLists.txt @@ -68,7 +68,6 @@ set(MULTI_TEST_SRC onnx_import_rnn.in.cpp onnx_import_signal.in.cpp onnx_import_quant.in.cpp - onnx_test_utils.in.cpp onnx_import_with_editor.in.cpp) set(SRC conversion.cpp @@ -83,6 +82,7 @@ set(SRC onnx_importer_test.cpp onnx_tensor_names.cpp onnx_test_util.cpp + onnx_utils.cpp onnx_transformations.cpp op_extension.cpp telemetry.cpp @@ -142,8 +142,13 @@ if(ONNX_TESTS_DEPENDENCIES) add_dependencies(ov_onnx_frontend_tests ${ONNX_TESTS_DEPENDENCIES}) endif() -target_link_libraries(ov_onnx_frontend_tests PRIVATE gtest_main_manifest openvino::runtime::dev - openvino_onnx_frontend openvino_onnx_common func_test_utils) +target_link_libraries(ov_onnx_frontend_tests PRIVATE + gtest_main_manifest + frontend_shared_test_classes + openvino::runtime::dev + openvino_onnx_frontend + openvino_onnx_common + func_test_utils) # It's needed by onnx_import_library.cpp and onnx_import_exceptions.cpp tests to include onnx_pb.h. # Not linking statically to libprotobuf (linked into libonnx) avoids false-failing onnx_editor tests. diff --git a/src/frontends/onnx/tests/models/dequantize_linear_u16.prototxt b/src/frontends/onnx/tests/models/dequantize_linear_u16.prototxt new file mode 100644 index 00000000000000..c7a5a2b9f80b72 --- /dev/null +++ b/src/frontends/onnx/tests/models/dequantize_linear_u16.prototxt @@ -0,0 +1,62 @@ +ir_version: 3 +producer_name: "ngraph ONNXImporter" +graph { + node { + input: "x" + input: "x_scale" + input: "x_zero_point" + output: "y" + name: "node1" + op_type: "DequantizeLinear" + } + name: "test" + input { + name: "x" + type { + tensor_type { + elem_type: 4 + shape { + dim { + dim_value: 4 + } + } + } + } + } + input { + name: "x_scale" + type { + tensor_type { + elem_type: 1 + shape { + } + } + } + } + input { + name: "x_zero_point" + type { + tensor_type { + elem_type: 4 + shape { + } + } + } + } + output { + name: "y" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 4 + } + } + } + } + } +} +opset_import { + version: 21 +} diff --git a/src/frontends/onnx/tests/models/quantize_linear_u16.prototxt b/src/frontends/onnx/tests/models/quantize_linear_u16.prototxt new file mode 100644 index 00000000000000..1595fd9b481199 --- /dev/null +++ b/src/frontends/onnx/tests/models/quantize_linear_u16.prototxt @@ -0,0 +1,73 @@ +ir_version: 3 +producer_name: "ngraph ONNXImporter" +graph { + node { + input: "X" + input: "y_scale" + input: "y_zero_point" + output: "Y" + name: "QuantizeLinear" + op_type: "QuantizeLinear" + } + name: "test_graph" + initializer { + data_type: 4 + name: "y_zero_point" + raw_data: "\000" + } + input { + name: "X" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 2 + } + dim { + dim_value: 2 + } + } + } + } + } + input { + name: "y_scale" + type { + tensor_type { + elem_type: 1 + shape { + } + } + } + } + input { + name: "y_zero_point" + type { + tensor_type { + elem_type: 4 + shape { + } + } + } + } + output { + name: "Y" + type { + tensor_type { + elem_type: 4 + shape { + dim { + dim_value: 2 + } + dim { + dim_value: 2 + } + } + } + } + } +} +opset_import { + version: 21 +} diff --git a/src/frontends/onnx/tests/onnx_import.in.cpp b/src/frontends/onnx/tests/onnx_import.in.cpp index 10c8346c3dd833..dc302f500c4124 100644 --- a/src/frontends/onnx/tests/onnx_import.in.cpp +++ b/src/frontends/onnx/tests/onnx_import.in.cpp @@ -5366,7 +5366,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_random_normal) { file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, "onnx/random_normal.onnx")); auto test_case = ov::test::TestCase(function, s_device); - test_case.add_expected_output(Shape{2, 2}, {13.459274f, 41.75028f, -19.311913f, 131.79282f}); + test_case.add_expected_output(Shape{2, 2}, {83.052017f, 55.496368f, 119.31188f, -3.6946249f}); test_case.run(); } @@ -5377,7 +5377,7 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_random_normal_like) { auto test_case = ov::test::TestCase(function, s_device); test_case.add_input(Shape{2, 2}, {0, 0, 0, 0}); - test_case.add_expected_output(Shape{2, 2}, {13.459274f, 41.75028f, -19.311913f, 131.79282f}); + test_case.add_expected_output(Shape{2, 2}, {83.052017f, 55.496368f, 119.31188f, -3.6946249f}); test_case.run(); } diff --git a/src/frontends/onnx/tests/onnx_import_quant.in.cpp b/src/frontends/onnx/tests/onnx_import_quant.in.cpp index ea13274d1b8804..0be5d99d8ce15b 100644 --- a/src/frontends/onnx/tests/onnx_import_quant.in.cpp +++ b/src/frontends/onnx/tests/onnx_import_quant.in.cpp @@ -59,6 +59,19 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_quantize_linear) { test_case.run(); } +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_quantize_linear_u16) { + auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), + SERIALIZED_ZOO, + "onnx/quantize_linear_u16.onnx")); + + auto test_case = ov::test::TestCase(function, s_device); + test_case.add_input(std::vector{32.25f, 48.34f, 250.f, 22883.f}); + test_case.add_input(std::vector{0.5f}); + + test_case.add_expected_output(std::vector{64, 97, 500, 45766}); + test_case.run(); +} + OPENVINO_TEST(${BACKEND_NAME}, onnx_model_quantize_linear_zero_point) { auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, @@ -220,6 +233,20 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_dequantize_linear_scalar_zero_scale_ui test_case.run(); } +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_dequantize_linear_scalar_zero_scale_uint16) { + auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), + SERIALIZED_ZOO, + "onnx/dequantize_linear_u16.onnx")); + + auto test_case = ov::test::TestCase(function, s_device); + test_case.add_input(std::vector{0, 3, 32768, 65535}); // x + test_case.add_input(std::vector{2.0f}); // scale + test_case.add_input(std::vector{32768}); // zero_point + + test_case.add_expected_output({4}, std::vector{-65536.0f, -65530.0f, 0.0f, 65534.0f}); + test_case.run(); +} + OPENVINO_TEST(${BACKEND_NAME}, onnx_model_dequantize_linear_scalar_zero_scale_int8) { auto function = onnx_import::import_onnx_model(file_util::path_join(ov::test::utils::getExecutableDirectory(), SERIALIZED_ZOO, diff --git a/src/frontends/onnx/tests/onnx_test_util.cpp b/src/frontends/onnx/tests/onnx_test_util.cpp index 668bca583f8f98..2fb733a7f9e8a7 100644 --- a/src/frontends/onnx/tests/onnx_test_util.cpp +++ b/src/frontends/onnx/tests/onnx_test_util.cpp @@ -191,8 +191,10 @@ ComparisonResult compare_onnx_graphs(const ONNX_NAMESPACE::GraphProto& graph, return compare_nodes(graph, ref_graph, comp); } } // namespace -namespace ngraph { -namespace test { +namespace ov { +namespace frontend { +namespace onnx { +namespace tests { bool default_name_comparator(std::string lhs, std::string rhs) { return lhs == rhs; @@ -220,5 +222,7 @@ std::string change_opset_version(const std::string& model, return model_proto.SerializeAsString(); } -} // namespace test -} // namespace ngraph +} // namespace tests +} // namespace onnx +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/onnx/tests/onnx_test_util.hpp b/src/frontends/onnx/tests/onnx_test_util.hpp index a61371c03b882a..c5db6f6a0c54fb 100644 --- a/src/frontends/onnx/tests/onnx_test_util.hpp +++ b/src/frontends/onnx/tests/onnx_test_util.hpp @@ -8,8 +8,10 @@ #include #include -namespace ngraph { -namespace test { +namespace ov { +namespace frontend { +namespace onnx { +namespace tests { struct ComparisonResult { ComparisonResult() = default; ComparisonResult(std::string error) : is_ok{false}, error_message{std::move(error)} {} @@ -40,5 +42,13 @@ ComparisonResult compare_onnx_models(const std::string& model, std::string change_opset_version(const std::string& model, const std::vector& new_opset_version, const std::string& domain = "ai.onnx"); +} // namespace tests +} // namespace onnx +} // namespace frontend +} // namespace ov + +namespace ngraph { +namespace test { +using namespace ov::frontend::onnx::tests; } // namespace test } // namespace ngraph diff --git a/src/frontends/onnx/tests/onnx_test_utils.in.cpp b/src/frontends/onnx/tests/onnx_test_utils.in.cpp deleted file mode 100644 index 6290310a7a4a8f..00000000000000 --- a/src/frontends/onnx/tests/onnx_test_utils.in.cpp +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "common_test_utils/all_close.hpp" -#include "common_test_utils/file_utils.hpp" -#include "common_test_utils/test_case.hpp" -#include "common_test_utils/test_control.hpp" -#include "common_test_utils/test_tools.hpp" -#include "default_opset.hpp" -#include "editor.hpp" -#include "gtest/gtest.h" -#include "ngraph/file_util.hpp" -#include "ngraph/op/util/op_types.hpp" -#include "onnx_import/onnx.hpp" -#include "onnx_utils.hpp" - -using namespace ngraph; -OPENVINO_SUPPRESS_DEPRECATED_START - -static std::string s_manifest = ngraph::file_util::path_join(ov::test::utils::getExecutableDirectory(), "${MANIFEST}"); -static std::string s_device = backend_name_to_device("${BACKEND_NAME}"); - -// is there any benefit of running below tests on different backends? -// why are these here anyway? - -OPENVINO_TEST(${BACKEND_NAME}, add_abc_from_ir) { - const auto ir_xml = - file_util::path_join(ov::test::utils::getExecutableDirectory(), TEST_MODEL_ZOO, "core/models/ir/add_abc.xml"); - const auto function = function_from_ir(ir_xml); - - auto test_case = ov::test::TestCase(function, s_device); - test_case.add_input({1}); - test_case.add_input({2}); - test_case.add_input({3}); - test_case.add_expected_output(Shape{1}, {6}); - - test_case.run(); -} - -OPENVINO_TEST(${BACKEND_NAME}, add_abc_from_ir_with_bin_path) { - const auto ir_xml = - file_util::path_join(ov::test::utils::getExecutableDirectory(), TEST_MODEL_ZOO, "core/models/ir/add_abc.xml"); - const auto ir_bin = - file_util::path_join(ov::test::utils::getExecutableDirectory(), TEST_MODEL_ZOO, "core/models/ir/add_abc.bin"); - const auto function = function_from_ir(ir_xml, ir_bin); - - auto test_case = ov::test::TestCase(function, s_device); - test_case.add_input({1}); - test_case.add_input({2}); - test_case.add_input({3}); - test_case.add_expected_output(Shape{1}, {6}); - - test_case.run(); -} diff --git a/src/frontends/onnx/tests/onnx_utils.cpp b/src/frontends/onnx/tests/onnx_utils.cpp new file mode 100644 index 00000000000000..9795e1f4e09cc9 --- /dev/null +++ b/src/frontends/onnx/tests/onnx_utils.cpp @@ -0,0 +1,99 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "onnx_utils.hpp" + +#include +#include + +#include "utils.hpp" + +using namespace std; +using namespace ov; +using namespace ov::frontend; + +// For compatibility purposes, need to remove when will be unused +const std::string ONNX_FE = "onnx"; + +namespace ov { +namespace frontend { +namespace onnx { +namespace tests { + +const std::string ONNX_FE = ::ONNX_FE; + +shared_ptr convert_model(const string& model_path, const ov::frontend::ConversionExtensionBase::Ptr& conv_ext) { + auto fem = FrontEndManager(); + FrontEnd::Ptr front_end = fem.load_by_framework(ONNX_FE); + if (!front_end) { + throw "ONNX FrontEnd is not initialized"; + } + + if (conv_ext) { + front_end->add_extension(conv_ext); + } + + auto full_path = FrontEndTestUtils::make_model_path(string(TEST_ONNX_MODELS_DIRNAME) + model_path); + InputModel::Ptr input_model = front_end->load(full_path); + if (!input_model) { + throw "Input Model is not loaded"; + } + + shared_ptr model = front_end->convert(input_model); + if (!model) { + throw "Model is not converted"; + } + + return model; +} + +shared_ptr convert_model(ifstream& model_stream) { + auto fem = FrontEndManager(); + FrontEnd::Ptr front_end = fem.load_by_framework(ONNX_FE); + if (!front_end) { + throw "ONNX FrontEnd is not initialized"; + } + + InputModel::Ptr input_model = front_end->load(dynamic_cast(&model_stream)); + if (!input_model) { + throw "Input Model is not loaded"; + } + + shared_ptr model = front_end->convert(input_model); + if (!model) { + throw "Model is not converted"; + } + + return model; +} + +shared_ptr convert_partially(const string& model_path) { + auto fem = FrontEndManager(); + FrontEnd::Ptr front_end = fem.load_by_framework(ONNX_FE); + if (!front_end) { + throw "ONNX FrontEnd is not initialized"; + } + + auto full_path = FrontEndTestUtils::make_model_path(string(TEST_ONNX_MODELS_DIRNAME) + model_path); + InputModel::Ptr input_model = front_end->load(full_path); + if (!input_model) { + throw "Input Model is not loaded"; + } + + shared_ptr model = front_end->convert_partially(input_model); + if (!model) { + throw "Model is not converted"; + } + + return model; +} + +std::string onnx_backend_manifest(const std::string& manifest) { + return ov::util::path_join({ov::test::utils::getExecutableDirectory(), manifest}); +} + +} // namespace tests +} // namespace onnx +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/onnx/tests/onnx_utils.hpp b/src/frontends/onnx/tests/onnx_utils.hpp index 4aa68986fc097e..eea1e10475a764 100644 --- a/src/frontends/onnx/tests/onnx_utils.hpp +++ b/src/frontends/onnx/tests/onnx_utils.hpp @@ -4,13 +4,15 @@ #pragma once +#include + +#include +#include #include -#include "openvino/runtime/core.hpp" #include "common_test_utils/test_constants.hpp" -static const std::string ONNX_FE = "onnx"; - +// Resolves different backend names to an internal device enumeration inline std::string backend_name_to_device(const std::string& backend_name) { if (backend_name == "INTERPRETER") return ov::test::utils::DEVICE_TEMPLATE; @@ -18,10 +20,31 @@ inline std::string backend_name_to_device(const std::string& backend_name) { return ov::test::utils::DEVICE_CPU; if (backend_name == "IE_GPU") return ov::test::utils::DEVICE_GPU; - OPENVINO_THROW("Unsupported backend name"); + throw "Unsupported backend name"; } -inline std::shared_ptr function_from_ir(const std::string& xml_path, const std::string& bin_path = {}) { - ov::Core c; - return c.read_model(xml_path, bin_path); -} +namespace ov { +namespace frontend { +namespace onnx { +namespace tests { + +extern const std::string ONNX_FE; + +// A wrapper to create ONNX Frontend and configure the conversion pipeline +std::shared_ptr convert_model(const std::string& model_path, + const ov::frontend::ConversionExtensionBase::Ptr& conv_ext = nullptr); +// A wrapper to create ONNX Frontend and configure the conversion pipeline +std::shared_ptr convert_model(std::ifstream& model_stream); +// A wrapper to create ONNX Frontend and configure the conversion pipeline to get +// a model with possible Framework Nodes +std::shared_ptr convert_partially(const std::string& model_path); + +// Returns path to a manifest file +std::string onnx_backend_manifest(const std::string& manifest); +} // namespace tests +} // namespace onnx +} // namespace frontend +} // namespace ov + +// For compatibility purposes, need to remove when will be unused +extern const std::string ONNX_FE; diff --git a/src/frontends/paddle/src/op/tanh_shrink.cpp b/src/frontends/paddle/src/op/tanh_shrink.cpp new file mode 100644 index 00000000000000..aa125405ca5107 --- /dev/null +++ b/src/frontends/paddle/src/op/tanh_shrink.cpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "default_opset.hpp" +#include "openvino/frontend/paddle/node_context.hpp" + +namespace ov { +namespace frontend { +namespace paddle { +namespace op { +NamedOutputs tanh_shrink(const NodeContext& node) { + const auto x = node.get_input("X"); + const auto tanh = std::make_shared(x); + return node.default_single_output_mapping({std::make_shared(x, tanh)}, {"Out"}); +} + +} // namespace op +} // namespace paddle +} // namespace frontend +} // namespace ov diff --git a/src/frontends/paddle/src/op_table.cpp b/src/frontends/paddle/src/op_table.cpp index 4ed557a4edd13e..e92c7442fa1b4f 100644 --- a/src/frontends/paddle/src/op_table.cpp +++ b/src/frontends/paddle/src/op_table.cpp @@ -114,6 +114,7 @@ OP_CONVERTER(strided_slice); OP_CONVERTER(sum); OP_CONVERTER(swish); OP_CONVERTER(tanh); +OP_CONVERTER(tanh_shrink); OP_CONVERTER(tensor_array_to_tensor); OP_CONVERTER(tile); OP_CONVERTER(top_k_v2); @@ -244,6 +245,7 @@ std::map get_supported_ops() { {"swish", op::swish}, {"sync_batch_norm", op::batch_norm}, {"tanh", op::tanh}, + {"tanh_shrink", op::tanh_shrink}, {"tensor_array_to_tensor", op::tensor_array_to_tensor}, {"tile", op::tile}, {"top_k_v2", op::top_k_v2}, diff --git a/src/frontends/paddle/tests/op_fuzzy.cpp b/src/frontends/paddle/tests/op_fuzzy.cpp index d99862ceb69490..9a7a56fc364af2 100644 --- a/src/frontends/paddle/tests/op_fuzzy.cpp +++ b/src/frontends/paddle/tests/op_fuzzy.cpp @@ -526,6 +526,8 @@ static const std::vector models{ std::string("swish_default_params"), std::string("swish_beta"), std::string("tanh"), + std::string("tanh_shrink_1"), + std::string("tanh_shrink_2"), std::string("tile_repeat_times_tensor"), std::string("tile_list_float32"), std::string("tile_list_int32"), diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_tanh_shrink.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_tanh_shrink.py new file mode 100644 index 00000000000000..675f5280950456 --- /dev/null +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_tanh_shrink.py @@ -0,0 +1,51 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# tanh_shrink paddle model generator +# +import numpy as np +import sys +from save_model import saveModel + + +def tanh_shrink(name: str, x): + import paddle + + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) + out = paddle.nn.functional.tanhshrink(node_x) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run(feed={'x': x}, fetch_list=[out]) + + saveModel( + name, + exe, + feedkeys=['x'], + fetchlist=[out], + inputs=[x], + outputs=[outs[0]], + target_dir=sys.argv[1], + ) + + return outs[0] + + +def main(): + data = np.random.uniform(10, 20, [2, 3, 4]).astype(np.float32) + tanh_shrink("tanh_shrink_1", data) + + data = np.random.uniform(-10, 20, [4, 3, 2]).astype(np.float32) + tanh_shrink("tanh_shrink_2", data) + +if __name__ == "__main__": + main() + + diff --git a/src/frontends/pytorch/src/CMakeLists.txt b/src/frontends/pytorch/src/CMakeLists.txt index f51dee59b761db..814d820b5c17aa 100644 --- a/src/frontends/pytorch/src/CMakeLists.txt +++ b/src/frontends/pytorch/src/CMakeLists.txt @@ -6,4 +6,4 @@ ov_add_frontend(NAME pytorch LINKABLE_FRONTEND SHUTDOWN_PROTOBUF FILEDESCRIPTION "FrontEnd to load and convert TorchScript models from PyTorch" - LINK_LIBRARIES openvino::util openvino::core::dev) \ No newline at end of file + LINK_LIBRARIES openvino::util openvino::core::dev) diff --git a/src/frontends/pytorch/src/frontend.cpp b/src/frontends/pytorch/src/frontend.cpp index 1f021dfba441f5..af427798a2c1c4 100644 --- a/src/frontends/pytorch/src/frontend.cpp +++ b/src/frontends/pytorch/src/frontend.cpp @@ -36,7 +36,7 @@ #include "transforms/prim_list_construct_pad.hpp" #include "transforms/prim_list_tuple_construct_replacer.hpp" #include "transforms/prim_list_unpack_replacer.hpp" -#include "transforms/prim_tuple_unpack_parameter_replacer.hpp" +#include "transforms/prim_unpack_parameter_replacer.hpp" #include "transforms/quantized_node_remover.hpp" #include "transforms/reverseprop_resolver.hpp" #include "transforms/rfftn_complex_replacer.hpp" @@ -195,12 +195,13 @@ void FrontEnd::normalize(const std::shared_ptr& model) const { manager.register_pass(); manager.register_pass(); manager.register_pass(); - manager.register_pass(); + manager.register_pass(); manager.register_pass(); manager.register_pass(); manager.register_pass(); manager.register_pass(); - manager.register_pass(); + manager.register_pass(); + manager.register_pass(); manager.register_pass(); manager.register_pass(); manager.register_pass(); diff --git a/src/frontends/pytorch/src/op/erfc.cpp b/src/frontends/pytorch/src/op/erfc.cpp new file mode 100644 index 00000000000000..8e049097102c64 --- /dev/null +++ b/src/frontends/pytorch/src/op/erfc.cpp @@ -0,0 +1,45 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/erf.hpp" +#include "openvino/op/subtract.hpp" +#include "utils.hpp" + +using namespace std; +using namespace ov::op; + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +OutputVector translate_erfc(const NodeContext& context) { + // aten::erf(Tensor self) -> Tensor + // aten::erf.out(Tensor self, Tensor(!a) out) -> Tensor(!a) + num_inputs_check(context, 1, 2); + auto x = context.get_input(0); + + // create 'ones' to use to calculate complementary of Erf output + auto ones = context.mark_node(make_shared(element::f32, Shape{}, 1.0f))->output(0); + + // align data types of input 'x' and ones + align_eltwise_input_types(context, x, ones); + + // apply Erf to the input tensor 'x' + auto y = context.mark_node(make_shared(x)); + + y = context.mark_node(make_shared(ones, y)); + + if (!context.input_is_none(1)) { + context.mutate_input(1, y); + } + return {y}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/pytorch/src/op/getitem.cpp b/src/frontends/pytorch/src/op/getitem.cpp index 2dc698015f9666..58d3639cc8aa92 100644 --- a/src/frontends/pytorch/src/op/getitem.cpp +++ b/src/frontends/pytorch/src/op/getitem.cpp @@ -19,6 +19,9 @@ using namespace ov::op; OutputVector translate_getitem(const NodeContext& context) { num_inputs_check(context, 2, 2); auto input = context.get_input(0); + const auto idx_type = context.get_input_type(1); + FRONT_END_OP_CONVERSION_CHECK(!idx_type.is(), + "String index in aten::__getitem__ means dict input, this is not supported."); if (ov::as_type_ptr(input.get_node_shared_ptr())) { FRONT_END_OP_CONVERSION_CHECK(!cast_fw_node(input.get_node_shared_ptr(), "aten::split"), "special case for aten::__getitem__"); diff --git a/src/frontends/pytorch/src/op/lstm.cpp b/src/frontends/pytorch/src/op/lstm.cpp new file mode 100644 index 00000000000000..0ea42e8bfa1799 --- /dev/null +++ b/src/frontends/pytorch/src/op/lstm.cpp @@ -0,0 +1,366 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/broadcast.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert_like.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/gru_sequence.hpp" +#include "openvino/op/lstm_sequence.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/rnn_sequence.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/split.hpp" +#include "openvino/op/squeeze.hpp" +#include "openvino/op/transpose.hpp" +#include "openvino/op/unsqueeze.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +using namespace ov::op; + +namespace { +enum RnnVariant { LSTM, GRU, RNN, RNN_RELU, RNN_TANH }; + +Output convert_data_format(ov::pass::NodeRegistry& rg, RnnVariant variant, const Output& node) { + Output res; + switch (variant) { + case RnnVariant::LSTM: + res = ov::op::util::convert_lstm_node_format(node, ov::op::util::LSTMWeightsFormat::IFCO); + break; + case RnnVariant::GRU: + res = ov::op::util::convert_lstm_peepholes_format(node, ov::op::util::LSTMPeepholesFormat::IFO); + break; + default: + res = node; + break; + } + const auto axis_const = rg.make(element::i32, ov::Shape{}, 0); + return rg.make(res, axis_const); +} + +Output format_bias(ov::pass::NodeRegistry& rg, + RnnVariant variant, + const Output& b_ih, + const Output& b_hh) { + Output res; + if (variant == RnnVariant::GRU) { + const auto one = v0::Constant::create(element::i32, Shape{}, {1}); + const auto bias_ih = convert_data_format(rg, variant, b_ih); + const auto bias_hh = convert_data_format(rg, variant, b_hh); + const auto split_bias_ih = rg.make(bias_ih, one, 3); + const auto split_bias_hh = rg.make(bias_hh, one, 3); + const auto wr_z_bias = rg.make(split_bias_ih->output(0), split_bias_hh->output(0)); + const auto wr_r_bias = rg.make(split_bias_ih->output(1), split_bias_hh->output(1)); + // The result has shape: [num_directions, 4 * hidden_size] + // and data layout: [ [Wb_z + Rb_z], [Wb_r + Rb_r], [Wb_h], [Rb_h], ] + res = + rg.make(OutputVector{wr_z_bias, wr_r_bias, split_bias_ih->output(2), split_bias_hh->output(2)}, + 1); + } else { + res = rg.make(b_ih, b_hh); + res = convert_data_format(rg, variant, res); + } + return res; +} + +OutputVector generic_rnn(ov::pass::NodeRegistry& rg, + RnnVariant variant, + const Output& input, + const std::deque>& initial_states, + const std::deque>& all_weights, + bool has_biases, + int64_t num_layers, + bool bidirectional, + bool batch_first, + const Output& batch_sizes = {}) { + std::string rnn_activation; + if (variant == RnnVariant::RNN_RELU) { + variant = RnnVariant::RNN; + rnn_activation = "relu"; + } else if (variant == RnnVariant::RNN_TANH) { + variant = RnnVariant::RNN; + rnn_activation = "tanh"; + } + const auto direction = + bidirectional ? RecurrentSequenceDirection::BIDIRECTIONAL : RecurrentSequenceDirection::FORWARD; + int64_t weights_per_layer = has_biases ? 4 : 2; + int64_t mult = bidirectional ? 2 : 1; + FRONT_END_OP_CONVERSION_CHECK(static_cast(all_weights.size()) == num_layers * weights_per_layer * mult, + "Unexpected length of list with weights for rnn operation."); + + const auto w_hh = all_weights[1]; + const auto w_hh_pshape = w_hh.get_partial_shape(); + FRONT_END_OP_CONVERSION_CHECK(w_hh_pshape.rank().is_static() && w_hh_pshape[1].is_static(), ""); + const auto hidden_size = w_hh_pshape[1].get_length(); + + const auto zero = v0::Constant::create(element::i32, Shape{}, {0}); + const auto zero_1d = v0::Constant::create(element::i32, Shape{1}, {0}); + const auto one = v0::Constant::create(element::i32, Shape{}, {1}); + const auto one_1d = v0::Constant::create(element::i32, Shape{1}, {1}); + const auto order_102 = v0::Constant::create(element::i32, Shape{3}, {1, 0, 2}); + + OutputVector h_outs; + OutputVector c_outs; + Output h0; + Output c0; + if (variant == RnnVariant::RNN || variant == RnnVariant::GRU) { + h0 = initial_states[0]; + } else if (variant == RnnVariant::LSTM) { + h0 = initial_states[0]; + c0 = initial_states[1]; + } else { + FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported rnn variant."); + } + + Output prev_output = input; + if (!batch_first) + prev_output = rg.make(prev_output, order_102); + Output sequence_lens = batch_sizes; + + const auto h_states = rg.make(h0, zero, num_layers)->outputs(); + OutputVector c_states; + if (variant == RnnVariant::LSTM) { + c_states = rg.make(c0, zero, num_layers)->outputs(); + } + + Output bias_concat; + Shape::value_type num_directions = bidirectional ? 2 : 1; + if (!has_biases) { + Shape::value_type gates_count = variant == RnnVariant::RNN ? 1 : 4; + Shape::value_type gates_hidden = gates_count * static_cast(hidden_size); + bias_concat = rg.make(element::i32, Shape{num_directions, gates_hidden}, 0); + bias_concat = rg.make(bias_concat, input); + } + + for (int64_t i = 0; i < num_layers; i++) { + Output weight_ih; + Output weight_hh; + + int64_t idx = i * weights_per_layer; + if (!bidirectional) { + weight_ih = convert_data_format(rg, variant, all_weights[idx]); + weight_hh = convert_data_format(rg, variant, all_weights[idx + 1]); + if (has_biases) { + const auto bias_ih = all_weights[idx + 2]; + const auto bias_hh = all_weights[idx + 3]; + bias_concat = format_bias(rg, variant, bias_ih, bias_hh); + } + } else { + Output weight_ih_f; + Output weight_hh_f; + Output weight_ih_b; + Output weight_hh_b; + if (has_biases) { + weight_ih_f = all_weights[2 * idx]; + weight_hh_f = all_weights[2 * idx + 1]; + const auto bias_ih_f = all_weights[2 * idx + 2]; + const auto bias_hh_f = all_weights[2 * idx + 3]; + weight_ih_b = all_weights[2 * idx + 4]; + weight_hh_b = all_weights[2 * idx + 5]; + const auto bias_ih_b = all_weights[2 * idx + 6]; + const auto bias_hh_b = all_weights[2 * idx + 7]; + const auto bias_f = format_bias(rg, variant, bias_ih_f, bias_hh_f); + const auto bias_b = format_bias(rg, variant, bias_ih_b, bias_hh_b); + bias_concat = rg.make(OutputVector{bias_f, bias_b}, 0); + } else { + weight_ih_f = all_weights[2 * idx]; + weight_hh_f = all_weights[2 * idx + 1]; + weight_ih_b = all_weights[2 * idx + 2]; + weight_hh_b = all_weights[2 * idx + 3]; + } + weight_ih_f = convert_data_format(rg, variant, weight_ih_f); + weight_hh_f = convert_data_format(rg, variant, weight_hh_f); + weight_ih_b = convert_data_format(rg, variant, weight_ih_b); + weight_hh_b = convert_data_format(rg, variant, weight_hh_b); + weight_ih = rg.make(OutputVector{weight_ih_f, weight_ih_b}, 0); + weight_hh = rg.make(OutputVector{weight_hh_f, weight_hh_b}, 0); + } + + const auto shape_of_x = rg.make(prev_output, element::i32); + const auto axes = v0::Constant::create(element::i32, Shape{1}, {0}); + const auto batch_size_node = rg.make(shape_of_x, zero_1d, axes); + if (!sequence_lens.get_node_shared_ptr()) { + const auto seq_length_node = rg.make(shape_of_x, one_1d, axes); + sequence_lens = rg.make(seq_length_node, batch_size_node); + } + + const auto h_state = rg.make(h_states[i], order_102); + std::shared_ptr rnn_node; + if (variant == RnnVariant::GRU) { + rnn_node = rg.make(prev_output, + h_state, + sequence_lens, + weight_ih, + weight_hh, + bias_concat, + hidden_size, + direction, + std::vector{"sigmoid", "tanh"}, + std::vector{}, + std::vector{}, + 0.f, + true); + } else if (variant == RnnVariant::LSTM) { + Output c_state = rg.make(c_states[i], order_102); + rnn_node = rg.make(prev_output, + h_state, + c_state, + sequence_lens, + weight_ih, + weight_hh, + bias_concat, + hidden_size, + direction); + } else if (variant == RnnVariant::RNN) { + rnn_node = rg.make(prev_output, + h_state, + sequence_lens, + weight_ih, + weight_hh, + bias_concat, + hidden_size, + direction, + std::vector{rnn_activation}); + } + prev_output = rnn_node->output(0); + + if (bidirectional) { + const auto order = v0::Constant::create(element::i32, Shape{4}, {0, 2, 1, 3}); + prev_output = rg.make(prev_output, order); + const auto new_shape = v0::Constant::create(element::i32, Shape{3}, {0, 0, -1}); + prev_output = rg.make(prev_output, new_shape, true); + } else { + prev_output = rg.make(prev_output, one); + } + + h_outs.push_back(rnn_node->output(1)); + if (variant == RnnVariant::LSTM) + c_outs.push_back(rnn_node->output(2)); + } + if (!batch_first) + prev_output = rg.make(prev_output, order_102); + Output h_res = rg.make(h_outs, 1); + h_res = rg.make(h_res, order_102); + if (variant == RnnVariant::RNN || variant == RnnVariant::GRU) { + return {prev_output, h_res}; + } else if (variant == RnnVariant::LSTM) { + Output c_res = rg.make(c_outs, 1); + c_res = rg.make(c_res, order_102); + return {prev_output, h_res, c_res}; + } + FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported rnn variant."); +} + +} // namespace + +OutputVector translate_lstm(const NodeContext& context) { + num_inputs_check(context, 9, 9); + ov::pass::NodeRegistry rg; + if (context.get_input_type(3).is()) { + // lstm packed + FRONT_END_OP_CONVERSION_CHECK(false, "Unsupported lstm variant."); + } else { + // aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, + // bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor) + const auto data = context.get_input(0); + const auto hx = context.get_input(1); + const auto params = context.get_input(2); + const auto has_bias = context.const_input(3); + const auto num_layers = context.const_input(4); + // const auto dropout = context.const_input(5); - skip + const auto train = context.const_input(6); + FRONT_END_OP_CONVERSION_CHECK(!train, "LSTM in train mode is not supported."); + const auto bidirectional = context.const_input(7); + const auto batch_first = context.const_input(8); + + const auto initial_states = get_list_as_outputs(hx); + const auto all_weights = get_list_as_outputs(params); + const auto res = generic_rnn(rg, + RnnVariant::LSTM, + data, + initial_states, + all_weights, + has_bias, + num_layers, + bidirectional, + batch_first); + context.mark_nodes(rg.get()); + return res; + } +}; + +OutputVector translate_gru(const NodeContext& context) { + num_inputs_check(context, 9, 9); + ov::pass::NodeRegistry rg; + // aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, + // bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) + const auto input = context.get_input(0); + const auto hidden = context.get_input(1); + const auto weight_v = context.get_input(2); + const auto has_biases = context.const_input(3); + const auto num_layers = context.const_input(4); + // const auto dropout = context.const_input(5); - skip + const auto train = context.const_input(6); + FRONT_END_OP_CONVERSION_CHECK(!train, "GRU in train mode is not supported."); + const auto bidirectional = context.const_input(7); + const auto batch_first = context.const_input(8); + + const auto weight = get_list_as_outputs(weight_v); + const auto res = + generic_rnn(rg, RnnVariant::GRU, input, {hidden}, weight, has_biases, num_layers, bidirectional, batch_first); + context.mark_nodes(rg.get()); + return res; +}; + +namespace { +std::map RNN_VARIANT_MAP = { + {"aten::rnn_tanh", RnnVariant::RNN_TANH}, + {"aten::rnn_relu", RnnVariant::RNN_RELU}, +}; +} + +OutputVector translate_rnn(const NodeContext& context) { + num_inputs_check(context, 9, 9); + ov::pass::NodeRegistry rg; + // aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, + // bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) + const auto input = context.get_input(0); + const auto hidden = context.get_input(1); + const auto weight_v = context.get_input(2); + const auto has_biases = context.const_input(3); + const auto num_layers = context.const_input(4); + // const auto dropout = context.const_input(5); - skip + const auto train = context.const_input(6); + FRONT_END_OP_CONVERSION_CHECK(!train, "RNN in train mode is not supported."); + const auto bidirectional = context.const_input(7); + const auto batch_first = context.const_input(8); + + const auto weight = get_list_as_outputs(weight_v); + const auto variant_it = RNN_VARIANT_MAP.find(context.get_op_type()); + FRONT_END_OP_CONVERSION_CHECK(variant_it != RNN_VARIANT_MAP.end(), "Unsupported RNN variant."); + const auto res = generic_rnn(rg, + variant_it->second, + input, + {hidden}, + weight, + has_biases, + num_layers, + bidirectional, + batch_first); + context.mark_nodes(rg.get()); + return res; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op/pow.cpp b/src/frontends/pytorch/src/op/pow.cpp index d3a39694bf3953..ebdfb8c57155e0 100644 --- a/src/frontends/pytorch/src/op/pow.cpp +++ b/src/frontends/pytorch/src/op/pow.cpp @@ -15,8 +15,17 @@ OutputVector translate_pow(const NodeContext& context) { num_inputs_check(context, 2, 2); auto lhs = context.get_input(0); auto rhs = context.get_input(1); - align_eltwise_input_types(context, lhs, rhs, true); - return {context.mark_node(std::make_shared(lhs, rhs))}; + auto inplace = context.get_op_type() == "aten::pow_"; + if (inplace) { + rhs = std::make_shared(rhs, lhs); + } else { + align_eltwise_input_types(context, lhs, rhs, true); + } + auto res = context.mark_node(std::make_shared(lhs, rhs)); + if (inplace) { + context.mutate_input(0, res); + } + return {res}; } } // namespace op diff --git a/src/frontends/pytorch/src/op/rand.cpp b/src/frontends/pytorch/src/op/rand.cpp index 677a3e86a52ac0..d04b3bbd2780b7 100644 --- a/src/frontends/pytorch/src/op/rand.cpp +++ b/src/frontends/pytorch/src/op/rand.cpp @@ -3,6 +3,7 @@ // #include +#include "openvino/frontend/common/random_normal_helper.hpp" #include "openvino/frontend/pytorch/node_context.hpp" #include "openvino/op/add.hpp" #include "openvino/op/constant.hpp" @@ -15,6 +16,7 @@ #include "openvino/op/shape_of.hpp" #include "openvino/op/sqrt.hpp" #include "pt_framework_node.hpp" +#include "transformations/rt_info/disable_fp16_compression.hpp" #include "utils.hpp" namespace ov { @@ -32,40 +34,13 @@ OutputVector make_random_normal(const NodeContext& context, const Output& mean_const) { std::random_device rd; std::mt19937 gen(rd()); - std::uniform_int_distribution distrib(0, 9999); + std::uniform_real_distribution distrib(0.0f, 9999.0f); + float seed = distrib(gen); - const uint64_t global_seed = 0; - - const uint64_t seed_1 = distrib(gen); - const uint64_t seed_2 = distrib(gen); - - auto min_val = context.mark_node(v0::Constant::create(target_type, Shape{1}, {std::numeric_limits::min()})); - auto max_val = context.mark_node(v0::Constant::create(target_type, Shape{1}, {1})); - - auto uniform_1 = context.mark_node( - std::make_shared(sizes, min_val, max_val, target_type, global_seed, seed_1)); - auto uniform_2 = context.mark_node( - std::make_shared(sizes, min_val, max_val, target_type, global_seed, seed_2)); - - // Compute Box–Muller transform - // random_normal = scale * ng.sqrt(-2.0 * ng.log(uniform_1)) * ng.cos(2.0 * np.pi * uniform_2) + mean - auto pi = context.mark_node(v0::Constant::create(target_type, Shape{1}, {3.141592653589793})); - auto minus_two = context.mark_node(v0::Constant::create(target_type, Shape{1}, {-2.0})); - auto two = context.mark_node(v0::Constant::create(target_type, Shape{1}, {2.0})); - - auto log = context.mark_node(std::make_shared(uniform_1)); - auto multiply_minus_two_log = context.mark_node(std::make_shared(log, minus_two)); - auto sqrt = context.mark_node(std::make_shared(multiply_minus_two_log)); - - auto multiply_two_pi = context.mark_node(std::make_shared(uniform_2, pi)); - auto multiply_two_pi_uniform_2 = context.mark_node(std::make_shared(multiply_two_pi, uniform_2)); - auto cos = context.mark_node(std::make_shared(multiply_two_pi_uniform_2)); - - auto sqrt_x_cos = context.mark_node(std::make_shared(sqrt, cos)); - auto product = context.mark_node(std::make_shared(scale_const, sqrt_x_cos)); - auto sum = context.mark_node(std::make_shared(product, mean_const)); - - return {sum}; + pass::NodeRegistry registry; + auto res = ov::frontend::make_random_normal(registry, sizes, target_type, mean_const, scale_const, seed); + context.mark_nodes(registry.get()); + return res; } }; // namespace diff --git a/src/frontends/pytorch/src/op/scatter.cpp b/src/frontends/pytorch/src/op/scatter.cpp index 68abc51298265b..d60cfd91bf6c90 100644 --- a/src/frontends/pytorch/src/op/scatter.cpp +++ b/src/frontends/pytorch/src/op/scatter.cpp @@ -131,6 +131,23 @@ OutputVector translate_scatter_reduce(const NodeContext& context) { return {scatter_result}; }; +OutputVector translate_scatter_add(const NodeContext& context) { + // aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor + num_inputs_check(context, 4, 4); + auto input = context.get_input(0); + auto dim = context.get_input(1); + auto index = context.mark_node(std::make_shared(context.get_input(2), element::i32)); + auto src = context.get_input(3); + auto src_input_dtype = prepare_source(context, src, index, input); + auto scatter_result = + context.mark_node(std::make_shared(input, + index, + src_input_dtype, + dim, + v12::ScatterElementsUpdate::Reduction::SUM)); + return {scatter_result}; +}; + } // namespace op } // namespace pytorch } // namespace frontend diff --git a/src/frontends/pytorch/src/op/square.cpp b/src/frontends/pytorch/src/op/square.cpp index 2310fda75aa574..a17080574f79ac 100644 --- a/src/frontends/pytorch/src/op/square.cpp +++ b/src/frontends/pytorch/src/op/square.cpp @@ -4,6 +4,7 @@ #include "openvino/frontend/pytorch/node_context.hpp" #include "openvino/op/constant.hpp" +#include "openvino/op/convert_like.hpp" #include "openvino/op/power.hpp" #include "utils.hpp" @@ -15,9 +16,11 @@ namespace op { using namespace ov::op; OutputVector translate_square(const NodeContext& context) { + // aten::square(Tensor self) -> Tensor num_inputs_check(context, 1, 1); auto input_0 = context.get_input(0); - auto const_2 = context.mark_node(v0::Constant::create(input_0.get_element_type(), Shape{1}, {2})); + auto const_2 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {2})); + const_2 = context.mark_node(std::make_shared(const_2, input_0)); return {context.mark_node(std::make_shared(input_0, const_2))}; }; diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 230b2b4d06c7c8..d42839a92fde35 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -68,6 +68,7 @@ OP_CONVERTER(translate_embedding_bag); OP_CONVERTER(translate_empty); OP_CONVERTER(translate_empty_like); OP_CONVERTER(translate_erf); +OP_CONVERTER(translate_erfc); OP_CONVERTER(translate_expand); OP_CONVERTER(translate_expand_as); OP_CONVERTER(translate_eye); @@ -89,6 +90,7 @@ OP_CONVERTER(translate_getitem); OP_CONVERTER(translate_glu); OP_CONVERTER(translate_grid_sampler); OP_CONVERTER(translate_group_norm); +OP_CONVERTER(translate_gru); OP_CONVERTER(translate_hardtanh); OP_CONVERTER(translate_if); OP_CONVERTER(translate_im2col); @@ -114,6 +116,7 @@ OP_CONVERTER(translate_log2); OP_CONVERTER(translate_log10); OP_CONVERTER(translate_logsumexp); OP_CONVERTER(translate_loop); +OP_CONVERTER(translate_lstm); OP_CONVERTER(translate_masked_fill); OP_CONVERTER(translate_masked_scatter); OP_CONVERTER(translate_max); @@ -167,6 +170,7 @@ OP_CONVERTER(translate_remainder); OP_CONVERTER(translate_repeat_interleave); OP_CONVERTER(translate_reshape); OP_CONVERTER(translate_reshape_as); +OP_CONVERTER(translate_rnn); OP_CONVERTER(translate_roi_align); OP_CONVERTER(translate_roll); OP_CONVERTER(translate_round); @@ -174,6 +178,7 @@ OP_CONVERTER(translate_rsqrt); OP_CONVERTER(translate_rsub); OP_CONVERTER(translate_scaled_dot_product_attention); OP_CONVERTER(translate_scatter); +OP_CONVERTER(translate_scatter_add); OP_CONVERTER(translate_scatter_reduce); OP_CONVERTER(translate_select); OP_CONVERTER(translate_set_item); @@ -345,6 +350,8 @@ const std::map get_supported_ops_ts() { {"aten::eq", op::translate_1to1_match_2_inputs_align_types}, {"aten::erf", op::translate_erf}, {"aten::erf_", op::inplace_op}, + {"aten::erfc", op::translate_erfc}, + {"aten::erfc_", op::inplace_op}, {"aten::exp", op::translate_1to1_match_1_inputs_with_fp32_type_alignment}, {"aten::exp_", op::inplace_op>}, {"aten::expand", op::translate_expand}, @@ -372,6 +379,7 @@ const std::map get_supported_ops_ts() { {"aten::glu", op::translate_glu}, {"aten::grid_sampler", op::translate_grid_sampler}, {"aten::group_norm", op::translate_group_norm}, + {"aten::gru", op::translate_gru}, {"aten::gt", op::translate_1to1_match_2_inputs_align_types}, {"aten::hardsigmoid", op::quantizable_op>}, {"aten::hardswish", op::quantizable_op>}, @@ -415,6 +423,7 @@ const std::map get_supported_ops_ts() { {"aten::log2_", op::inplace_op}, {"aten::log10", op::translate_log10}, {"aten::log10_", op::inplace_op}, + {"aten::lstm", op::translate_lstm}, {"aten::lt", op::translate_1to1_match_2_inputs_align_types}, {"aten::masked_fill", op::translate_masked_fill}, {"aten::masked_fill_", op::inplace_op}, @@ -462,6 +471,7 @@ const std::map get_supported_ops_ts() { {"aten::pixel_unshuffle", op::translate_pixel_unshuffle}, {"aten::prelu", op::translate_1to1_match_2_inputs}, {"aten::pow", op::translate_pow}, + {"aten::pow_", op::translate_pow}, {"aten::prod", op::translate_prod}, {"aten::quantize_per_channel", op::translate_quantize_per_channel}, {"aten::quantize_per_tensor", op::translate_quantize_per_tensor}, @@ -484,6 +494,8 @@ const std::map get_supported_ops_ts() { // for real dtypes, these operations return input tensor without changes and can be skipped {"aten::resolve_conj", op::skip_node}, {"aten::resolve_neg", op::skip_node}, + {"aten::rnn_relu", op::translate_rnn}, + {"aten::rnn_tanh", op::translate_rnn}, {"aten::roll", op::translate_roll}, {"aten::round", op::translate_round}, {"aten::rsqrt", op::translate_rsqrt}, @@ -492,6 +504,8 @@ const std::map get_supported_ops_ts() { {"aten::scaled_dot_product_attention", op::translate_scaled_dot_product_attention}, {"aten::scatter", op::translate_scatter}, {"aten::scatter_", op::inplace_op}, + {"aten::scatter_add", op::translate_scatter_add}, + {"aten::scatter_add_", op::inplace_op}, {"aten::scatter_reduce", op::translate_scatter_reduce}, {"aten::scatter_reduce_", op::inplace_op}, {"aten::select", op::quantizable_op}, diff --git a/src/frontends/pytorch/src/transforms/aten_index_put_replacer.cpp b/src/frontends/pytorch/src/transforms/aten_index_put_replacer.cpp index 39c2baef8ef79f..141243a5a6e3af 100644 --- a/src/frontends/pytorch/src/transforms/aten_index_put_replacer.cpp +++ b/src/frontends/pytorch/src/transforms/aten_index_put_replacer.cpp @@ -129,9 +129,13 @@ AtenIndexPutReplacer::AtenIndexPutReplacer() { auto index_dtype = index.get_element_type(); // Do we need to also check u8? if (index_dtype == element::boolean) { + values = rg.make(values, input); // then apply masked scatter auto input_shape = rg.make(input, element::i32); - auto expanded_mask = rg.make(index, input_shape, BroadcastType::BIDIRECTIONAL); + auto input_rank = rg.make(input_shape, element::i32); + auto one_const = v0::Constant::create(element::i32, Shape{1}, {1}); + auto expand_shape = rg.make(one_const, input_rank, BroadcastType::BIDIRECTIONAL); + auto expanded_mask = rg.make(index, expand_shape, BroadcastType::BIDIRECTIONAL); auto nonzero = rg.make(expanded_mask, element::i32); auto input_order = v0::Constant::create(element::i32, Shape{2}, {1, 0}); index = rg.make(nonzero, input_order); diff --git a/src/frontends/pytorch/src/transforms/dict_resolver.cpp b/src/frontends/pytorch/src/transforms/dict_resolver.cpp index 455a1fc2cbc80a..d51eb793813bf7 100644 --- a/src/frontends/pytorch/src/transforms/dict_resolver.cpp +++ b/src/frontends/pytorch/src/transforms/dict_resolver.cpp @@ -5,6 +5,7 @@ #include "dict_resolver.hpp" #include "openvino/core/rt_info.hpp" +#include "openvino/op/parameter.hpp" #include "openvino/op/result.hpp" #include "openvino/op/util/framework_node.hpp" #include "openvino/pass/pattern/op/wrap_type.hpp" @@ -18,7 +19,56 @@ namespace pass { using namespace ov::pass; using namespace ov::op; -bool DictResolver::run_on_model(const std::shared_ptr& model) { +bool DictParameterResolver::run_on_model(const std::shared_ptr& model) { + bool changed = false; + const auto parameters = model->get_parameters(); + ParameterVector new_params; + + for (const auto& p : parameters) { + bool at_least_one_unused = false; + if (p->get_output_size() == 1) { + const auto targets = p->get_output_target_inputs(0); + for (const auto inp : targets) { + const auto getitem_node = cast_fw_node(inp.get_node()->shared_from_this(), "aten::__getitem__"); + if (getitem_node) { + const auto index_node = std::dynamic_pointer_cast( + getitem_node->get_input_node_shared_ptr(1)); + if (!index_node) { + at_least_one_unused = true; + continue; + } + const auto attrs = index_node->get_attrs(); + if (attrs.find("string_value") == attrs.end()) { + // index node must contain string value + at_least_one_unused = true; + continue; + } + const auto name = attrs.at("string_value"); + auto new_param = std::make_shared(getitem_node->get_output_element_type(0), + getitem_node->get_output_partial_shape(0)); + new_param->set_friendly_name(name); + getitem_node->output(0).replace(new_param); + new_params.push_back(new_param); + changed = true; + } else { + at_least_one_unused = true; + } + } + } + if (changed) { + model->remove_parameter(p); + if (at_least_one_unused || p->get_output_size() != 1) { + new_params.push_back(p); + } + } + } + if (changed) { + model->add_parameters(new_params); + } + return changed; +}; + +bool DictResultResolver::run_on_model(const std::shared_ptr& model) { bool changed = false; const auto results = model->get_results(); for (const auto& res : results) { diff --git a/src/frontends/pytorch/src/transforms/dict_resolver.hpp b/src/frontends/pytorch/src/transforms/dict_resolver.hpp index 7cdec639cf27d0..0494c7dc32d0fb 100644 --- a/src/frontends/pytorch/src/transforms/dict_resolver.hpp +++ b/src/frontends/pytorch/src/transforms/dict_resolver.hpp @@ -12,9 +12,16 @@ namespace frontend { namespace pytorch { namespace pass { -class DictResolver : public ov::pass::ModelPass { +// This transformation replaces pattern Parameter(Dict)->aten::__getitem__ +class DictParameterResolver : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::DictResolver"); + OPENVINO_RTTI("ov::frontend::pytorch::pass::DictParameterResolver"); + bool run_on_model(const std::shared_ptr& model) override; +}; +// This transformation replaces pattern prim::DictConstruct->Result +class DictResultResolver : public ov::pass::ModelPass { +public: + OPENVINO_RTTI("ov::frontend::pytorch::pass::DictResultResolver"); bool run_on_model(const std::shared_ptr& model) override; }; diff --git a/src/frontends/pytorch/src/transforms/prim_tuple_unpack_parameter_replacer.cpp b/src/frontends/pytorch/src/transforms/prim_unpack_parameter_replacer.cpp similarity index 88% rename from src/frontends/pytorch/src/transforms/prim_tuple_unpack_parameter_replacer.cpp rename to src/frontends/pytorch/src/transforms/prim_unpack_parameter_replacer.cpp index f18befab77927f..e8183815f849d1 100644 --- a/src/frontends/pytorch/src/transforms/prim_tuple_unpack_parameter_replacer.cpp +++ b/src/frontends/pytorch/src/transforms/prim_unpack_parameter_replacer.cpp @@ -1,7 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include "prim_tuple_unpack_parameter_replacer.hpp" +#include "prim_unpack_parameter_replacer.hpp" #include #include @@ -16,7 +16,7 @@ namespace frontend { namespace pytorch { namespace pass { -bool DecomposeTupleParameters::run_on_model(const std::shared_ptr& model) { +bool DecomposeUnpackParameters::run_on_model(const std::shared_ptr& model) { bool at_least_one_decomposed = false; const auto& orig_parameters = model->get_parameters(); std::deque> parameters(orig_parameters.begin(), orig_parameters.end()); @@ -29,7 +29,7 @@ bool DecomposeTupleParameters::run_on_model(const std::shared_ptr& model) size_t num_outputs = 0; // number of outputs in each unpack consumer should match bool all_unpacks = true; - // collects all outputs per each consumer operation for this tuple Parameter + // collects all outputs per each consumer operation for this tuple/list Parameter std::vector consumer_outputs; // The following vector track consumer nodes having prim::TupleUnpack type to form a detailed @@ -38,8 +38,11 @@ bool DecomposeTupleParameters::run_on_model(const std::shared_ptr& model) for (const auto& consumer : consumers) { auto node = consumer.get_node()->shared_from_this(); - auto tuple_unpack = cast_fw_node(node, "prim::TupleUnpack"); - if (!tuple_unpack) { + std::shared_ptr unpack = cast_fw_node(node, "prim::TupleUnpack"); + if (!unpack) { + unpack = cast_fw_node(node, "prim::ListUnpack"); + } + if (!unpack) { all_unpacks = false; continue; // need to look at all consumers to form good diagnostics } @@ -49,7 +52,7 @@ bool DecomposeTupleParameters::run_on_model(const std::shared_ptr& model) } else if (num_outputs != node->get_output_size()) { std::stringstream message; message << "Unpack node " << node - << " as one of the consumers of a tuple, which is introduced by parameter " + << " as one of the consumers of a tuple/list, which is introduced by parameter " << parameter->output(0) << ", has number of outputs " << node->get_output_size() << " not matching number of outputs " << num_outputs << " for other consumer(s) found earlier."; add_exception_to_fw_node(node, message.str()); @@ -64,11 +67,11 @@ bool DecomposeTupleParameters::run_on_model(const std::shared_ptr& model) // we cannot replace other unpacks even if they exist, leaving Unpack-op(s) in the graph for this Parameter updated_parameters.push_back(parameter); - // In case if at least one Unpack exists there is an opportinity to attach diagnostics + // In case if at least one Unpack exists there is an opportunity to attach diagnostics for (const auto& consumer : consumer_unpacks) { std::stringstream message; - message << "Not prim::TupleUnpack operations exist except this one: " << consumer - << " found as one of the consumers of a tuple, which is introduced by parameter " + message << "Not unpack operations exist except this one: " << consumer + << " found as one of the consumers of a tuple/list, which is introduced by parameter " << parameter->output(0) << "."; add_exception_to_fw_node(consumer, message.str()); } diff --git a/src/frontends/pytorch/src/transforms/prim_tuple_unpack_parameter_replacer.hpp b/src/frontends/pytorch/src/transforms/prim_unpack_parameter_replacer.hpp similarity index 63% rename from src/frontends/pytorch/src/transforms/prim_tuple_unpack_parameter_replacer.hpp rename to src/frontends/pytorch/src/transforms/prim_unpack_parameter_replacer.hpp index 46007a5c12a775..1f93da74dfafc4 100644 --- a/src/frontends/pytorch/src/transforms/prim_tuple_unpack_parameter_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/prim_unpack_parameter_replacer.hpp @@ -12,12 +12,12 @@ namespace frontend { namespace pytorch { namespace pass { -// This transformation replaces all prim::TupleUnpack operations coming after Parameters with -// more Parameters -- one new parameter for each prim::TupleUnpack output. The original Parameter +// This transformation replaces all prim::TupleUnpack/prim::ListUnpack operations coming after Parameters +// with more Parameters -- one new parameter for each unpacked output. The original Parameter // is replaced with these new Parameters preserving the order relative to other Parameters in a model. -// Order of new parameters is the same as the order of prim::TupleUnpack outputs. -// If prim::TupleUnpack has a consumer that is also prim::TupleUnpack, the transformation applies -// the replacement recursively until all prim::TupleUnpacks that take a Parameter output are eliminated. +// Order of new parameters is the same as the order of unpacked outputs. +// If unpack operation has a consumer that is also unpack operation, the transformation applies +// the replacement recursively until all unpack operations that take a Parameter output are eliminated. // // For example, if a model has the following signature: a, (b, (c, d)), e, where a, b, c, d, and e are // tensors, and (x1, x2) means tuple consisting two elements x1 and x2, then the resulting model @@ -25,9 +25,9 @@ namespace pass { // Note, that there is no special 'tuple' type of an input, tuple structure is restored by // following prim::TupleUnpack operations in the graph only assuming that they can be applied on // tuples only and the most nested objects in those tuples are tensors. -class DecomposeTupleParameters : public ov::pass::ModelPass { +class DecomposeUnpackParameters : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::DecomposeTupleParameters"); + OPENVINO_RTTI("ov::frontend::pytorch::pass::DecomposeUnpackParameters"); bool run_on_model(const std::shared_ptr& model) override; }; diff --git a/src/frontends/tensorflow/src/frontend.cpp b/src/frontends/tensorflow/src/frontend.cpp index b52931d726e692..020e8cd3ecf4db 100644 --- a/src/frontends/tensorflow/src/frontend.cpp +++ b/src/frontends/tensorflow/src/frontend.cpp @@ -129,14 +129,14 @@ bool FrontEnd::supported_impl(const std::vector& variants) const { // avoid parsing of checkpoints here if (variants[0].is()) { std::string model_path = variants[0].as(); - if (ov::util::ends_with(model_path, ".pb") && GraphIteratorProto::is_supported(model_path)) { + if (GraphIteratorProto::is_supported(model_path)) { // handle binary protobuf format // for automatic deduction of the frontend to convert the model // we have more strict rule that is to have `.pb` extension in the path return true; } else if (GraphIteratorSavedModel::is_supported(model_path)) { return true; - } else if (ov::util::ends_with(model_path, ".meta") && GraphIteratorMeta::is_supported(model_path)) { + } else if (GraphIteratorMeta::is_supported(model_path)) { return true; } else if (GraphIteratorProtoTxt::is_supported(model_path)) { // handle text protobuf format @@ -161,15 +161,14 @@ bool FrontEnd::supported_impl(const std::vector& variants) const { #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) else if (variants[0].is()) { std::wstring model_path = variants[0].as(); - if (ov::util::ends_with(model_path, std::wstring(L".pb")) && GraphIteratorProto::is_supported(model_path)) { + if (GraphIteratorProto::is_supported(model_path)) { // handle binary protobuf format with a path in Unicode // for automatic deduction of the frontend to convert the model // we have more strict rule that is to have `.pb` extension in the path return true; } else if (GraphIteratorSavedModel::is_supported(model_path)) { return true; - } else if (ov::util::ends_with(model_path, std::wstring(L".meta")) && - GraphIteratorMeta::is_supported(model_path)) { + } else if (GraphIteratorMeta::is_supported(model_path)) { return true; } else if (GraphIteratorProtoTxt::is_supported(model_path)) { // handle text protobuf format diff --git a/src/frontends/tensorflow/src/op_table.cpp b/src/frontends/tensorflow/src/op_table.cpp index 9311722fd66a66..df9dd2ac737197 100644 --- a/src/frontends/tensorflow/src/op_table.cpp +++ b/src/frontends/tensorflow/src/op_table.cpp @@ -232,6 +232,7 @@ const std::map get_supported_ops() { {"MaxPoolWithArgmax", CreatorFunction(translate_max_pool_with_argmax)}, {"Merge", CreatorFunction(translate_merge_op)}, {"MirrorPad", CreatorFunction(translate_mirror_pad_op)}, + {"MulNoNan", CreatorFunction(translate_mul_no_nan_op)}, {"MutableHashTable", CreatorFunction(translate_hash_table_op)}, {"MutableHashTableV2", CreatorFunction(translate_hash_table_op)}, {"NonMaxSuppression", CreatorFunction(translate_non_max_suppression_op)}, diff --git a/src/frontends/tensorflow/tests/compilation.cpp b/src/frontends/tensorflow/tests/compilation.cpp index e34a159a45dd06..dc3ef5187b987e 100644 --- a/src/frontends/tensorflow/tests/compilation.cpp +++ b/src/frontends/tensorflow/tests/compilation.cpp @@ -14,6 +14,20 @@ using namespace ov::frontend::tensorflow::tests; class CompileModelsTests : public ::testing::Test {}; +#ifdef OPENVINO_ARCH_ARM64 +// Ticket: 122666 +TEST_F(CompileModelsTests, DISABLED_NgramCompilation) { + ov::Core core; + auto model = convert_model("model_ngram/model_ngram.pbtxt"); + ov::CompiledModel compiled_model = core.compile_model(model, "CPU"); + const auto runtime_model = compiled_model.get_runtime_model(); + + // A convert node will be inserted for CPU plugin API 2.0 + EXPECT_EQ(runtime_model->get_ordered_ops().size(), 5); + EXPECT_EQ(runtime_model->get_parameters().size(), 2); + EXPECT_EQ(runtime_model->get_results().size(), 1); +} +#else TEST_F(CompileModelsTests, NgramCompilation) { ov::Core core; auto model = convert_model("model_ngram/model_ngram.pbtxt"); @@ -25,6 +39,7 @@ TEST_F(CompileModelsTests, NgramCompilation) { EXPECT_EQ(runtime_model->get_parameters().size(), 2); EXPECT_EQ(runtime_model->get_results().size(), 1); } +#endif #ifdef OPENVINO_ARCH_ARM64 // Ticket: CVS-122396 diff --git a/src/frontends/tensorflow/tests/convert_model.cpp b/src/frontends/tensorflow/tests/convert_model.cpp index f6ec18cf9cc12c..5419b2c4f77c6d 100644 --- a/src/frontends/tensorflow/tests/convert_model.cpp +++ b/src/frontends/tensorflow/tests/convert_model.cpp @@ -13,6 +13,8 @@ using TFConvertModelTest = FrontEndConvertModelTest; static const std::vector models{ std::string("2in_2out/2in_2out.pb"), + std::string("2in_2out/2in_2out.pb.frozen"), + std::string("2in_2out/2in_2out.pb.frozen_text"), std::string("forward_edge_model/forward_edge_model.pbtxt"), std::string("forward_edge_model2/forward_edge_model2.pbtxt"), std::string("concat_with_non_constant_axis/concat_with_non_constant_axis.pbtxt"), diff --git a/src/frontends/tensorflow/tests/test_models/gen_scripts/generate_2in_2out.py b/src/frontends/tensorflow/tests/test_models/gen_scripts/generate_2in_2out.py index 33264d2c6c749b..42f022c001c262 100644 --- a/src/frontends/tensorflow/tests/test_models/gen_scripts/generate_2in_2out.py +++ b/src/frontends/tensorflow/tests/test_models/gen_scripts/generate_2in_2out.py @@ -1,9 +1,10 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import numpy as np import os import sys + +import numpy as np import tensorflow as tf tf.compat.v1.reset_default_graph() @@ -33,3 +34,5 @@ tf_net = sess.graph_def tf.io.write_graph(tf_net, os.path.join(sys.argv[1], "2in_2out"), '2in_2out.pb', False) +tf.io.write_graph(tf_net, os.path.join(sys.argv[1], "2in_2out"), '2in_2out.pb.frozen', False) +tf.io.write_graph(tf_net, os.path.join(sys.argv[1], "2in_2out"), '2in_2out.pb.frozen_text', True) diff --git a/src/frontends/tensorflow_common/include/common_op_table.hpp b/src/frontends/tensorflow_common/include/common_op_table.hpp index 300a31b5dbfa54..d11b91049b8ba6 100644 --- a/src/frontends/tensorflow_common/include/common_op_table.hpp +++ b/src/frontends/tensorflow_common/include/common_op_table.hpp @@ -101,6 +101,7 @@ OP_CONVERTER(translate_matrix_diag_op); OP_CONVERTER(translate_max_pool_op); OP_CONVERTER_NAMED(translate_max_pool_with_argmax); OP_CONVERTER(translate_mirror_pad_op); +OP_CONVERTER(translate_mul_no_nan_op); OP_CONVERTER_NAMED(translate_non_max_suppression_op); OP_CONVERTER(translate_parallel_dynamic_stitch_op); OP_CONVERTER(translate_placeholder_op); diff --git a/src/frontends/tensorflow_common/src/op/mul_no_nan.cpp b/src/frontends/tensorflow_common/src/op/mul_no_nan.cpp new file mode 100644 index 00000000000000..513507207d2635 --- /dev/null +++ b/src/frontends/tensorflow_common/src/op/mul_no_nan.cpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_op_table.hpp" +#include "openvino/op/equal.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/select.hpp" +#include "utils.hpp" + +using namespace std; +using namespace ov::op; + +namespace ov { +namespace frontend { +namespace tensorflow { +namespace op { +OutputVector translate_mul_no_nan_op(const NodeContext& node) { + default_op_checks(node, 2, {"MulNoNan"}); + + // first = x, second = y + auto x = node.get_input(0); + auto y = node.get_input(1); + + // prepare zero constant of the same type as the inputs + auto const_zero = create_same_type_const_scalar(x, 0); + + // get mask where y equals 0 + auto is_zero = make_shared(y, const_zero); + + // replace all values in x at is_zero mask with zeros + auto x_zeros = make_shared(is_zero, const_zero, x); + + // multiply y with the updated x + auto mul_no_nan = make_shared(x_zeros, y); + + set_node_name(node.get_name(), mul_no_nan); + return mul_no_nan->outputs(); +} +} // namespace op +} // namespace tensorflow +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/tests/frontend/shared/src/op_fuzzy.cpp b/src/frontends/tests/frontend/shared/src/op_fuzzy.cpp index 8a911e6544f094..90826a6e164e2b 100644 --- a/src/frontends/tests/frontend/shared/src/op_fuzzy.cpp +++ b/src/frontends/tests/frontend/shared/src/op_fuzzy.cpp @@ -107,7 +107,12 @@ void FrontEndFuzzyOpTest::runConvertedModel(const std::shared_ptr mod } } +#ifdef OPENVINO_ARCH_ARM64 +// Ticket: 126830 +TEST_P(FrontEndFuzzyOpTest, DISABLED_testOpFuzzy) { +#else TEST_P(FrontEndFuzzyOpTest, testOpFuzzy) { +#endif // load ASSERT_NO_THROW(doLoadFromFile()); diff --git a/src/frontends/tests/frontend/shared/test_builtin_extensions/CMakeLists.txt b/src/frontends/tests/frontend/shared/test_builtin_extensions/CMakeLists.txt index 959834bbe0d15c..cc541e17b9017a 100644 --- a/src/frontends/tests/frontend/shared/test_builtin_extensions/CMakeLists.txt +++ b/src/frontends/tests/frontend/shared/test_builtin_extensions/CMakeLists.txt @@ -13,7 +13,7 @@ endif() file(GLOB LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) file(GLOB LIBRARY_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp) -set(DEPENDENCIES openvino::runtime::dev openvino::frontend::common) +set(DEPENDENCIES openvino::runtime::dev) if (ENABLE_OV_ONNX_FRONTEND) list(APPEND DEPENDENCIES openvino::frontend::onnx) diff --git a/src/inference/CMakeLists.txt b/src/inference/CMakeLists.txt index 11ad85b3740d6a..768e7cf1185b7f 100644 --- a/src/inference/CMakeLists.txt +++ b/src/inference/CMakeLists.txt @@ -137,13 +137,11 @@ target_compile_definitions(${TARGET_NAME}_obj PRIVATE IMPLEMENT_INFERENCE_ENGINE_API $<$:PROXY_PLUGIN_ENABLED> $ - $ $) target_include_directories(${TARGET_NAME}_obj SYSTEM PRIVATE $ $ - $ $<$:$> $<$:$>) @@ -224,7 +222,8 @@ ov_install_static_lib(${TARGET_NAME}_plugin_api ${OV_CPACK_COMP_CORE}) install(DIRECTORY "${PUBLIC_HEADERS_DIR}/" DESTINATION ${OV_CPACK_INCLUDEDIR} - COMPONENT ${OV_CPACK_COMP_CORE_DEV}) + COMPONENT ${OV_CPACK_COMP_CORE_DEV} + ${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL}) if(ENABLE_TESTS) add_subdirectory(tests) diff --git a/src/inference/dev_api/openvino/runtime/system_conf.hpp b/src/inference/dev_api/openvino/runtime/system_conf.hpp index c2e0cc37fd942c..c4525265c2349a 100644 --- a/src/inference/dev_api/openvino/runtime/system_conf.hpp +++ b/src/inference/dev_api/openvino/runtime/system_conf.hpp @@ -260,6 +260,24 @@ OPENVINO_RUNTIME_API void set_cpu_used(const std::vector& cpu_ids, const in */ OPENVINO_RUNTIME_API int get_socket_by_numa_node(int numa_node_id); +/** + * @brief Get original socket id by current socket id, the input socket id is recalculated after filtering (like + * numactl), while the original socket id is the original id before filtering + * @ingroup ie_dev_api_system_conf + * @param[in] socket_id socket id + * @return socket id + */ +OPENVINO_RUNTIME_API int get_org_socket_id(int socket_id); + +/** + * @brief Get original numa node id by current numa node id, the input numa node id is recalculated after filtering + * (like numactl), while the original numa node id is the original id before filtering + * @ingroup ie_dev_api_system_conf + * @param[in] numa_node_id numa node id + * @return numa node id + */ +OPENVINO_RUNTIME_API int get_org_numa_id(int numa_node_id); + /** * @enum ColumnOfCPUMappingTable * @brief This enum contains definition of each columns in CPU mapping table which use processor id as index. diff --git a/src/inference/include/openvino/runtime/auto/properties.hpp b/src/inference/include/openvino/runtime/auto/properties.hpp index 9cdfee865bd95d..090d9620550626 100644 --- a/src/inference/include/openvino/runtime/auto/properties.hpp +++ b/src/inference/include/openvino/runtime/auto/properties.hpp @@ -28,5 +28,51 @@ static constexpr Property enable_startup_fallback{"ENABLE_STARTUP_FALLBACK * selected device */ static constexpr Property enable_runtime_fallback{"ENABLE_RUNTIME_FALLBACK"}; + +/** + * @brief Enum to define the policy of scheduling inference request to target device in cumulative throughput mode on + * AUTO + * @ingroup ov_runtime_cpp_prop_api + */ +enum class SchedulePolicy { + ROUND_ROBIN = 0, // will schedule the infer request using round robin policy + DEVICE_PRIORITY = 1, // will schedule the infer request based on the device priority + DEFAULT = DEVICE_PRIORITY, //!< Default schedule policy is DEVICE_PRIORITY +}; + +/** @cond INTERNAL */ +inline std::ostream& operator<<(std::ostream& os, const SchedulePolicy& policy) { + switch (policy) { + case SchedulePolicy::ROUND_ROBIN: + return os << "ROUND_ROBIN"; + case SchedulePolicy::DEVICE_PRIORITY: + return os << "DEVICE_PRIORITY"; + default: + OPENVINO_THROW("Unsupported schedule policy value"); + } +} + +inline std::istream& operator>>(std::istream& is, SchedulePolicy& policy) { + std::string str; + is >> str; + if (str == "ROUND_ROBIN") { + policy = SchedulePolicy::ROUND_ROBIN; + } else if (str == "DEVICE_PRIORITY") { + policy = SchedulePolicy::DEVICE_PRIORITY; + } else if (str == "DEFAULT") { + policy = SchedulePolicy::DEFAULT; + } else { + OPENVINO_THROW("Unsupported schedule policy: ", str); + } + return is; +} +/** @endcond */ + +/** + * @brief High-level OpenVINO model policy hint + * Defines what scheduling policy should be used in AUTO CUMULATIVE_THROUGHPUT or MULTI case + * @ingroup ov_runtime_cpp_prop_api + */ +static constexpr Property schedule_policy{"SCHEDULE_POLICY"}; } // namespace intel_auto } // namespace ov \ No newline at end of file diff --git a/src/inference/include/openvino/runtime/infer_request.hpp b/src/inference/include/openvino/runtime/infer_request.hpp index 0ff2e7a8956216..b810caa7e0dfb7 100644 --- a/src/inference/include/openvino/runtime/infer_request.hpp +++ b/src/inference/include/openvino/runtime/infer_request.hpp @@ -315,6 +315,12 @@ class OPENVINO_RUNTIME_API InferRequest { */ std::vector query_state(); + /** + * @brief Resets all internal variable states for relevant infer request to a value specified as + * default for the corresponding `ReadValue` node + */ + void reset_state(); + /** * @brief Returns a compiled model that creates this inference request. * @return Compiled model object. diff --git a/src/inference/src/dev/core_impl.cpp b/src/inference/src/dev/core_impl.cpp index 458563633ed0b3..6882c3a7035bf7 100644 --- a/src/inference/src/dev/core_impl.cpp +++ b/src/inference/src/dev/core_impl.cpp @@ -325,8 +325,9 @@ bool ov::CoreImpl::is_proxy_device(const ov::Plugin& plugin) const { } bool ov::CoreImpl::is_proxy_device(const std::string& dev_name) const { #ifdef PROXY_PLUGIN_ENABLED - return pluginRegistry.find(dev_name) != pluginRegistry.end() && - pluginRegistry.at(dev_name).pluginCreateFunc == ov::proxy::create_plugin; + std::string real_name = ov::parseDeviceNameIntoConfig(dev_name)._deviceName; + return pluginRegistry.find(real_name) != pluginRegistry.end() && + pluginRegistry.at(real_name).pluginCreateFunc == ov::proxy::create_plugin; #else return false; #endif @@ -837,7 +838,7 @@ ov::SoPtr ov::CoreImpl::compile_model(const std::string& mod return compile_model_and_cache(model, plugin, parsed._config, {}, cacheContent); }); } else if (cacheManager) { - // this code path is enabled for AUTO / MULTI / BATCH devices which don't support + // this code path is enabled for AUTO / MULTI / BATCH / PROXY devices which don't support // import / export explicitly, but can redirect this functionality to actual HW plugin compiled_model = plugin.compile_model(model_path, parsed._config); } else { diff --git a/src/inference/src/dev/threading/cpu_streams_executor.cpp b/src/inference/src/dev/threading/cpu_streams_executor.cpp index 0a4719854ab88d..eb706a37192143 100644 --- a/src/inference/src/dev/threading/cpu_streams_executor.cpp +++ b/src/inference/src/dev/threading/cpu_streams_executor.cpp @@ -144,7 +144,7 @@ struct CPUStreamsExecutor::Impl { .set_max_threads_per_core(max_threads_per_core)}); } else if (stream_type == STREAM_WITH_NUMA_ID) { _taskArena.reset(new custom::task_arena{custom::task_arena::constraints{} - .set_numa_id(_numaNodeId) + .set_numa_id(get_org_numa_id(_numaNodeId)) .set_max_concurrency(concurrency) .set_max_threads_per_core(max_threads_per_core)}); } else if (stream_type == STREAM_WITH_CORE_TYPE) { diff --git a/src/inference/src/dev/threading/istreams_executor.cpp b/src/inference/src/dev/threading/istreams_executor.cpp index e77a9256c6fd1a..891de30aba695d 100644 --- a/src/inference/src/dev/threading/istreams_executor.cpp +++ b/src/inference/src/dev/threading/istreams_executor.cpp @@ -563,6 +563,10 @@ void IStreamsExecutor::Config::update_executor_config(int stream_nums, return; } + if (proc_type_table.size() > 1) { + core_type = ov::threading::IStreamsExecutor::Config::ANY; + } + // IStreamsExecutor::Config config = initial; const auto total_num_cores = proc_type_table[0][ALL_PROC]; const auto total_num_big_cores = proc_type_table[0][MAIN_CORE_PROC] + proc_type_table[0][HYPER_THREADING_PROC]; diff --git a/src/inference/src/infer_request.cpp b/src/inference/src/infer_request.cpp index 1023479546d1a8..d9e9c388947f3c 100644 --- a/src/inference/src/infer_request.cpp +++ b/src/inference/src/infer_request.cpp @@ -291,6 +291,12 @@ std::vector InferRequest::query_state() { return variable_states; } +void InferRequest::reset_state(){OV_INFER_REQ_CALL_STATEMENT({ + for (auto&& state : _impl->query_state()) { + state->reset(); + } +})} + CompiledModel InferRequest::get_compiled_model() { OV_INFER_REQ_CALL_STATEMENT(return {std::const_pointer_cast(_impl->get_compiled_model()), _so}); } diff --git a/src/inference/src/os/cpu_map_info.hpp b/src/inference/src/os/cpu_map_info.hpp index 88606a846c3cb6..9121b706f7d620 100644 --- a/src/inference/src/os/cpu_map_info.hpp +++ b/src/inference/src/os/cpu_map_info.hpp @@ -27,6 +27,8 @@ class CPU { std::vector> _org_proc_type_table; std::vector> _proc_type_table; std::vector> _cpu_mapping_table; + std::map _socketid_mapping_table; + std::map _numaid_mapping_table; std::mutex _cpu_mutex; int _socket_idx = 0; }; @@ -155,14 +157,13 @@ void parse_processor_info_win(const char* base_ptr, * @param[out] _sockets total number for sockets in system * @param[out] _cores total number for physical CPU cores in system * @param[out] _proc_type_table summary table of number of processors per type - * @return */ -int parse_processor_info_macos(const std::vector>& system_info_table, - int& _processors, - int& _numa_nodes, - int& _sockets, - int& _cores, - std::vector>& _proc_type_table); +void parse_processor_info_macos(const std::vector>& system_info_table, + int& _processors, + int& _numa_nodes, + int& _sockets, + int& _cores, + std::vector>& _proc_type_table); #endif } // namespace ov diff --git a/src/inference/src/os/lin/lin_system_conf.cpp b/src/inference/src/os/lin/lin_system_conf.cpp index 2cfb12a7826b33..99ded17f0f1597 100644 --- a/src/inference/src/os/lin/lin_system_conf.cpp +++ b/src/inference/src/os/lin/lin_system_conf.cpp @@ -169,10 +169,16 @@ CPU::CPU() { } } for (size_t i = 0; i < valid_cpu_mapping_table.size(); i++) { - valid_cpu_mapping_table[i][CPU_MAP_NUMA_NODE_ID] = - numa_node_map.at(valid_cpu_mapping_table[i][CPU_MAP_NUMA_NODE_ID]); - valid_cpu_mapping_table[i][CPU_MAP_SOCKET_ID] = - sockets_map.at(valid_cpu_mapping_table[i][CPU_MAP_SOCKET_ID]); + auto new_numa_id = numa_node_map.at(valid_cpu_mapping_table[i][CPU_MAP_NUMA_NODE_ID]); + auto new_socket_id = sockets_map.at(valid_cpu_mapping_table[i][CPU_MAP_SOCKET_ID]); + if (_numaid_mapping_table.find(new_numa_id) == _numaid_mapping_table.end()) { + _numaid_mapping_table.insert({new_numa_id, valid_cpu_mapping_table[i][CPU_MAP_NUMA_NODE_ID]}); + } + if (_socketid_mapping_table.find(new_socket_id) == _socketid_mapping_table.end()) { + _socketid_mapping_table.insert({new_socket_id, valid_cpu_mapping_table[i][CPU_MAP_SOCKET_ID]}); + } + valid_cpu_mapping_table[i][CPU_MAP_NUMA_NODE_ID] = new_numa_id; + valid_cpu_mapping_table[i][CPU_MAP_SOCKET_ID] = new_socket_id; } } @@ -265,12 +271,13 @@ CPU::CPU() { _cores); } } - _org_proc_type_table = _proc_type_table; std::vector>().swap(system_info_table); if (check_valid_cpu() < 0) { OPENVINO_THROW("CPU affinity check failed. No CPU is eligible to run inference."); }; + + _org_proc_type_table = _proc_type_table; } void parse_node_info_linux(const std::vector node_info_table, @@ -601,6 +608,16 @@ void parse_freq_info_linux(const std::vector> system_in std::vector line_value_0(PROC_TYPE_TABLE_SIZE, 0); + auto clean_up_output = [&]() { + _processors = 0; + _cores = 0; + _numa_nodes = 0; + _sockets = 0; + _cpu_mapping_table.clear(); + _proc_type_table.clear(); + return; + }; + for (int n = 0; n < _processors; n++) { if (-1 == _cpu_mapping_table[n][CPU_MAP_SOCKET_ID]) { std::string::size_type pos = 0; @@ -618,6 +635,10 @@ void parse_freq_info_linux(const std::vector> system_in core_1 = std::stoi(sub_str); sub_str = system_info_table[n][0].substr(endpos1 + 1); core_2 = std::stoi(sub_str); + if ((core_1 != n) && (core_2 != n)) { + clean_up_output(); + return; + } _cpu_mapping_table[core_1][CPU_MAP_PROCESSOR_ID] = core_1; _cpu_mapping_table[core_1][CPU_MAP_SOCKET_ID] = std::stoi(system_info_table[core_1][1]); diff --git a/src/inference/src/os/mac/mac_system_conf.cpp b/src/inference/src/os/mac/mac_system_conf.cpp index 04b9a77c7d61da..092cd0f33cddef 100644 --- a/src/inference/src/os/mac/mac_system_conf.cpp +++ b/src/inference/src/os/mac/mac_system_conf.cpp @@ -7,6 +7,7 @@ #include #include "dev/threading/parallel_custom_arena.hpp" +#include "openvino/core/except.hpp" #include "openvino/runtime/system_conf.hpp" #include "os/cpu_map_info.hpp" @@ -28,17 +29,16 @@ CPU::CPU() { } } - if (!parse_processor_info_macos(system_info_table, _processors, _numa_nodes, _sockets, _cores, _proc_type_table)) { - _org_proc_type_table = _proc_type_table; - } + parse_processor_info_macos(system_info_table, _processors, _numa_nodes, _sockets, _cores, _proc_type_table); + _org_proc_type_table = _proc_type_table; } -int parse_processor_info_macos(const std::vector>& system_info_table, - int& _processors, - int& _numa_nodes, - int& _sockets, - int& _cores, - std::vector>& _proc_type_table) { +void parse_processor_info_macos(const std::vector>& system_info_table, + int& _processors, + int& _numa_nodes, + int& _sockets, + int& _cores, + std::vector>& _proc_type_table) { _processors = 0; _numa_nodes = 0; _sockets = 0; @@ -51,7 +51,7 @@ int parse_processor_info_macos(const std::vector(it->second); } @@ -63,63 +63,50 @@ int parse_processor_info_macos(const std::vector(it->second); } + _proc_type_table.resize(1, std::vector(PROC_TYPE_TABLE_SIZE, 0)); + _numa_nodes = 1; _sockets = 1; + _proc_type_table[0][ALL_PROC] = _processors; + _proc_type_table[0][MAIN_CORE_PROC] = _cores; + _proc_type_table[0][HYPER_THREADING_PROC] = _processors - _cores; + it = std::find_if(system_info_table.begin(), system_info_table.end(), [&](const std::pair& item) { return item.first == "hw.optional.arm64"; }); - if (it == system_info_table.end()) { - _proc_type_table.resize(1, std::vector(PROC_TYPE_TABLE_SIZE, 0)); - _proc_type_table[0][ALL_PROC] = _processors; - _proc_type_table[0][MAIN_CORE_PROC] = _cores; - _proc_type_table[0][HYPER_THREADING_PROC] = _processors - _cores; - _proc_type_table[0][PROC_NUMA_NODE_ID] = 0; - _proc_type_table[0][PROC_SOCKET_ID] = 0; - } else { + if (it != system_info_table.end()) { it = std::find_if(system_info_table.begin(), system_info_table.end(), [&](const std::pair& item) { return item.first == "hw.perflevel0.physicalcpu"; }); - if (it == system_info_table.end()) { - _processors = 0; - _cores = 0; - _numa_nodes = 0; - _sockets = 0; - return -1; - } else { - _proc_type_table.resize(1, std::vector(PROC_TYPE_TABLE_SIZE, 0)); - _proc_type_table[0][ALL_PROC] = _processors; + if (it != system_info_table.end()) { _proc_type_table[0][MAIN_CORE_PROC] = it->second; - _proc_type_table[0][PROC_NUMA_NODE_ID] = 0; - _proc_type_table[0][PROC_SOCKET_ID] = 0; - } - it = std::find_if(system_info_table.begin(), - system_info_table.end(), - [&](const std::pair& item) { - return item.first == "hw.perflevel1.physicalcpu"; - }); + it = std::find_if(system_info_table.begin(), + system_info_table.end(), + [&](const std::pair& item) { + return item.first == "hw.perflevel1.physicalcpu"; + }); - if (it == system_info_table.end()) { - return 0; + if (it != system_info_table.end()) { + _proc_type_table[0][EFFICIENT_CORE_PROC] = it->second; + } } else { - _proc_type_table[0][EFFICIENT_CORE_PROC] = it->second; + _proc_type_table[0][EFFICIENT_CORE_PROC] = _cores / 2; + _proc_type_table[0][MAIN_CORE_PROC] = _cores - _proc_type_table[0][EFFICIENT_CORE_PROC]; } } - - return 0; } } // namespace ov diff --git a/src/inference/src/system_conf.cpp b/src/inference/src/system_conf.cpp index b4fded633ffdb0..7b048accbaf4b7 100644 --- a/src/inference/src/system_conf.cpp +++ b/src/inference/src/system_conf.cpp @@ -215,6 +215,14 @@ int get_socket_by_numa_node(int numa_node_id) { return -1; }; +int get_org_socket_id(int socket_id) { + return -1; +} + +int get_org_numa_id(int numa_node_id) { + return -1; +} + #elif defined(__APPLE__) // for Linux and Windows the getNumberOfCPUCores (that accounts only for physical cores) implementation is OS-specific // (see cpp files in corresponding folders), for __APPLE__ it is default : @@ -267,6 +275,24 @@ int get_socket_by_numa_node(int numa_node_id) { return -1; }; +int get_org_socket_id(int socket_id) { + CPU& cpu = cpu_info(); + auto iter = cpu._socketid_mapping_table.find(socket_id); + if (iter != cpu._socketid_mapping_table.end()) { + return iter->second; + } + return -1; +} + +int get_org_numa_id(int numa_node_id) { + CPU& cpu = cpu_info(); + auto iter = cpu._numaid_mapping_table.find(numa_node_id); + if (iter != cpu._numaid_mapping_table.end()) { + return iter->second; + } + return -1; +} + #else # ifndef _WIN32 @@ -417,6 +443,25 @@ int get_number_of_logical_cpu_cores(bool bigCoresOnly) { # endif return logical_cores; } + +int get_org_socket_id(int socket_id) { + CPU& cpu = cpu_info(); + auto iter = cpu._socketid_mapping_table.find(socket_id); + if (iter != cpu._socketid_mapping_table.end()) { + return iter->second; + } + return -1; +} + +int get_org_numa_id(int numa_node_id) { + CPU& cpu = cpu_info(); + auto iter = cpu._numaid_mapping_table.find(numa_node_id); + if (iter != cpu._numaid_mapping_table.end()) { + return iter->second; + } + return -1; +} + #endif #if ((OV_THREAD == OV_THREAD_TBB) || (OV_THREAD == OV_THREAD_TBB_AUTO)) diff --git a/src/inference/tests/functional/CMakeLists.txt b/src/inference/tests/functional/CMakeLists.txt index 4a42e9eb5bd5b4..187f48599746e5 100644 --- a/src/inference/tests/functional/CMakeLists.txt +++ b/src/inference/tests/functional/CMakeLists.txt @@ -36,6 +36,10 @@ if(ENABLE_AUTO_BATCH) list(APPEND COMPILE_DEFINITIONS ENABLE_AUTO_BATCH) endif() +if(ENABLE_PROXY) + list(APPEND COMPILE_DEFINITIONS PROXY_PLUGIN_ENABLED) +endif() + ov_add_test_target( NAME ${TARGET_NAME} ROOT ${CMAKE_CURRENT_SOURCE_DIR} @@ -48,6 +52,7 @@ ov_add_test_target( ${COMPILE_DEFINITIONS} INCLUDES $/src + $<$:$> ${CMAKE_CURRENT_SOURCE_DIR} ADD_CLANG_FORMAT LABELS diff --git a/src/inference/tests/functional/caching_test.cpp b/src/inference/tests/functional/caching_test.cpp index 2668eafc44a34e..8991e2c7b4100e 100644 --- a/src/inference/tests/functional/caching_test.cpp +++ b/src/inference/tests/functional/caching_test.cpp @@ -14,6 +14,7 @@ #include #include "common_test_utils/file_utils.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" #include "ie_plugin_config.hpp" #include "openvino/core/any.hpp" #include "openvino/core/except.hpp" @@ -22,6 +23,9 @@ #include "openvino/op/parameter.hpp" #include "openvino/pass/manager.hpp" #include "openvino/pass/serialize.hpp" +#ifdef PROXY_PLUGIN_ENABLED +# include "openvino/proxy/properties.hpp" +#endif #include "openvino/runtime/common.hpp" #include "openvino/runtime/compiled_model.hpp" #include "openvino/runtime/core.hpp" @@ -29,7 +33,6 @@ #include "openvino/runtime/iplugin.hpp" #include "openvino/runtime/iremote_context.hpp" #include "openvino/runtime/properties.hpp" -#include "ov_models/subgraph_builders.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_iasync_infer_request.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icompiled_model.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_iplugin.hpp" @@ -217,7 +220,7 @@ class CachingTest : public ::testing::TestWithParam(modelName, weightsName); - manager.run_passes(ngraph::builder::subgraph::makeConvPoolRelu({1, 3, 227, 227}, ov::element::Type_t::f32)); + manager.run_passes(ov::test::utils::make_conv_pool_relu({1, 3, 227, 227}, ov::element::Type_t::f32)); } void TearDown() override { @@ -2341,3 +2344,58 @@ INSTANTIATE_TEST_SUITE_P(CachingTest, ::testing::Combine(::testing::ValuesIn(loadVariants), ::testing::ValuesIn(cacheFolders)), getTestCaseName); #endif // defined(ENABLE_OV_IR_FRONTEND) + +class CacheTestWithProxyEnabled : public CachingTest { +protected: + void testLoadProxy(const std::function& func) { + ov::Core core; + injectPlugin(mockPlugin.get()); + core.register_plugin(ov::util::make_plugin_library_name(ov::test::utils::getExecutableDirectory(), + std::string("mock_engine") + OV_BUILD_POSTFIX), + deviceName, + {{ov::proxy::configuration::alias.name(), "mock"}, + {ov::proxy::configuration::internal_name.name(), "internal_mock"}}); + ON_CALL(*mockPlugin, get_default_context(_)).WillByDefault(Invoke([&](const ov::AnyMap&) { + return std::make_shared("internal_mock"); + })); + func(core); + core.unload_plugin(deviceName); + } +}; + +#ifdef PROXY_PLUGIN_ENABLED +TEST_P(CacheTestWithProxyEnabled, TestLoad) { + ON_CALL(*mockPlugin, get_property(ov::available_devices.name(), _)) + .WillByDefault(Invoke([&](const std::string&, const ov::AnyMap&) { + std::vector available_devices = {}; + available_devices.push_back("mock"); + return decltype(ov::available_devices)::value_type(available_devices); + })); + EXPECT_CALL(*mockPlugin, get_default_context(_)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::architecture.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::supported_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::internal::caching_properties.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::available_devices.name(), _)).Times(AnyNumber()); + EXPECT_CALL(*mockPlugin, get_property(ov::device::capabilities.name(), _)) + .Times(AnyNumber()) + .WillRepeatedly(Return(decltype(ov::device::capabilities)::value_type{})); + // proxy should direct the compile from file to hardware plugin + EXPECT_CALL(*mockPlugin, OnCompileModelFromFile()).Times(m_type == TestLoadType::EModelName ? 1 : 0); + + { + EXPECT_CALL(*mockPlugin, compile_model(_, _, _)).Times(m_remoteContext ? 1 : 0); + EXPECT_CALL(*mockPlugin, compile_model(A&>(), _)) + .Times(!m_remoteContext ? 1 : 0); + testLoadProxy([&](ov::Core& core) { + core.set_property(ov::cache_dir(m_cacheDir)); + m_testFunction(core); + }); + } +} + +INSTANTIATE_TEST_SUITE_P(CacheTestWithProxyEnabled, + CacheTestWithProxyEnabled, + ::testing::Combine(::testing::ValuesIn(loadVariants), ::testing::ValuesIn(cacheFolders)), + getTestCaseName); +#endif \ No newline at end of file diff --git a/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp b/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp index 118c32218ed7b2..0609798e9669a4 100644 --- a/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp +++ b/src/inference/tests/unit/cpu_map_parser/freq_parser_linux.cpp @@ -967,6 +967,22 @@ LinuxCpuMapTestCase freq_1sockets_4cores = { {}, }; +LinuxCpuMapTestCase freq_1sockets_4cores_2 = { + 0, + 0, + 0, + 0, + {}, + {}, + { + {"0-3", "-1", "1848000"}, + {"0-3", "-1", "1848000"}, + {"0-3", "-1", "1848000"}, + {"0-3", "-1", "1848000"}, + }, + {}, +}; + TEST_P(LinuxCpuMapFreqParserTests, LinuxFreq) {} INSTANTIATE_TEST_SUITE_P(CPUMap, @@ -986,7 +1002,8 @@ INSTANTIATE_TEST_SUITE_P(CPUMap, freq_1sockets_12cores_hyperthreading, freq_1sockets_8cores_hyperthreading, freq_1sockets_8cores_hyperthreading_1, - freq_1sockets_4cores)); + freq_1sockets_4cores, + freq_1sockets_4cores_2)); #endif } // namespace \ No newline at end of file diff --git a/src/inference/tests/unit/cpu_map_parser/parser_macos.cpp b/src/inference/tests/unit/cpu_map_parser/parser_macos.cpp index 59fe8aef0ac543..9b550ee9a04f97 100644 --- a/src/inference/tests/unit/cpu_map_parser/parser_macos.cpp +++ b/src/inference/tests/unit/cpu_map_parser/parser_macos.cpp @@ -52,7 +52,7 @@ class MacOSCpuMapParserTests : public ov::test::TestsCommon, } }; -MacOSCpuMapTestCase test_case_arm = { +MacOSCpuMapTestCase test_case_arm_1 = { 8, // param[expected out]: total 8 logcial processors on this simulated platform 1, // param[expected out]: total 1 numa nodes on this simulated platform 1, // param[expected out]: total 1 sockets on this simulated platform @@ -67,7 +67,32 @@ MacOSCpuMapTestCase test_case_arm = { }, // param[in]: The system information table of this simulated platform }; -MacOSCpuMapTestCase test_case_x86 = { +MacOSCpuMapTestCase test_case_arm_2 = { + 8, + 1, + 1, + 8, + {{8, 4, 4, 0, 0, 0}}, + { + {"hw.ncpu", 8}, + {"hw.physicalcpu", 8}, + {"hw.optional.arm64", 1}, + }, +}; + +MacOSCpuMapTestCase test_case_arm_3 = { + 8, + 1, + 1, + 8, + {{8, 4, 4, 0, 0, 0}}, + { + {"hw.ncpu", 8}, + {"hw.optional.arm64", 1}, + }, +}; + +MacOSCpuMapTestCase test_case_x86_1 = { 12, 1, 1, @@ -76,9 +101,21 @@ MacOSCpuMapTestCase test_case_x86 = { {{"hw.ncpu", 12}, {"hw.physicalcpu", 6}}, }; +MacOSCpuMapTestCase test_case_x86_2 = { + 12, + 1, + 1, + 12, + {{12, 12, 0, 0, 0, 0}}, + {{"hw.ncpu", 12}}, +}; + TEST_P(MacOSCpuMapParserTests, MacOS) {} -INSTANTIATE_TEST_SUITE_P(CPUMap, MacOSCpuMapParserTests, testing::Values(test_case_arm, test_case_x86)); +INSTANTIATE_TEST_SUITE_P( + CPUMap, + MacOSCpuMapParserTests, + testing::Values(test_case_arm_1, test_case_arm_2, test_case_arm_3, test_case_x86_1, test_case_x86_2)); #endif } // namespace diff --git a/src/plugins/auto/CMakeLists.txt b/src/plugins/auto/CMakeLists.txt index b3b60de0ab5d1b..5afb0173667db1 100644 --- a/src/plugins/auto/CMakeLists.txt +++ b/src/plugins/auto/CMakeLists.txt @@ -35,9 +35,8 @@ elseif(ENABLE_MULTI) VERSION_DEFINES_FOR src/plugin.cpp) endif() -if(ENABLE_TESTS) - add_subdirectory(tests) -endif() +find_package(Threads REQUIRED) +target_link_libraries(${TARGET_NAME} PRIVATE Threads::Threads) ov_set_threading_interface_for(${TARGET_NAME}) @@ -45,3 +44,7 @@ ov_set_threading_interface_for(${TARGET_NAME}) ov_add_api_validator_post_build_step(TARGET ${TARGET_NAME}) set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) + +if(ENABLE_TESTS) + add_subdirectory(tests) +endif() diff --git a/src/plugins/auto/src/auto_compiled_model.cpp b/src/plugins/auto/src/auto_compiled_model.cpp index a4c214bc721b30..2577b2b8fa26f3 100644 --- a/src/plugins/auto/src/auto_compiled_model.cpp +++ b/src/plugins/auto/src/auto_compiled_model.cpp @@ -257,18 +257,25 @@ ov::Any AutoCompiledModel::get_property(const std::string& name) const { OPENVINO_SUPPRESS_DEPRECATED_END } else if (name == ov::loaded_from_cache) { std::lock_guard lock(m_context->m_fallback_mutex); - if (m_scheduler->m_compile_context[FALLBACKDEVICE].m_is_already) { + std::string device_name; + try { + std::lock_guard lock(m_context->m_mutex); + if (m_scheduler->m_compile_context[FALLBACKDEVICE].m_is_already) { + device_name = m_scheduler->m_compile_context[FALLBACKDEVICE].m_device_info.device_name; return m_scheduler->m_compile_context[FALLBACKDEVICE].m_compiled_model->get_property(name).as(); } - if (m_scheduler->m_compile_context[ACTUALDEVICE].m_is_already) { - return m_scheduler->m_compile_context[ACTUALDEVICE]. - m_compiled_model->get_property(name).as(); - } else { - std::lock_guard lock(m_context->m_mutex); - OPENVINO_ASSERT(m_scheduler->m_compile_context[CPU].m_is_already == true && - m_scheduler->m_compile_context[CPU].m_compiled_model._ptr); - return m_scheduler->m_compile_context[CPU]. - m_compiled_model->get_property(name).as(); + if (m_scheduler->m_compile_context[ACTUALDEVICE].m_is_already) { + device_name = m_scheduler->m_compile_context[ACTUALDEVICE].m_device_info.device_name; + return m_scheduler->m_compile_context[ACTUALDEVICE].m_compiled_model->get_property(name).as(); + } else { + OPENVINO_ASSERT(m_scheduler->m_compile_context[CPU].m_is_already == true && + m_scheduler->m_compile_context[CPU].m_compiled_model._ptr); + device_name = m_scheduler->m_compile_context[CPU].m_device_info.device_name; + return m_scheduler->m_compile_context[CPU].m_compiled_model->get_property(name).as(); + } + } catch (const ov::Exception&) { + LOG_DEBUG_TAG("get_property loaded_from_cache from %s failed", device_name.c_str()); + return false; } } OPENVINO_THROW(get_log_tag(), ": not supported property ", name); diff --git a/src/plugins/auto/src/common.hpp b/src/plugins/auto/src/common.hpp index 8fac7e0e5e2444..317d6844c5af4c 100644 --- a/src/plugins/auto/src/common.hpp +++ b/src/plugins/auto/src/common.hpp @@ -219,6 +219,7 @@ class ScheduleContext : public std::enable_shared_from_this { std::string m_str_devices; unsigned int m_model_priority = 0; ov::Any m_performance_hint; + ov::Any m_schedule_policy = ov::intel_auto::SchedulePolicy::DEFAULT; std::mutex m_mutex; std::mutex m_fallback_mutex; SoCompiledModel m_hw_compiled_model; diff --git a/src/plugins/auto/src/cumulative_compiled_model.cpp b/src/plugins/auto/src/cumulative_compiled_model.cpp index 0f23b9fdc249f6..84407cc0c6c79e 100644 --- a/src/plugins/auto/src/cumulative_compiled_model.cpp +++ b/src/plugins/auto/src/cumulative_compiled_model.cpp @@ -47,7 +47,8 @@ ov::Any AutoCumuCompiledModel::get_property(const std::string& name) const { ov::optimal_number_of_infer_requests, ov::device::properties, ov::hint::model_priority, - ov::loaded_from_cache}; + ov::loaded_from_cache, + ov::intel_auto::schedule_policy}; return ro_properties; }; const auto& default_rw_properties = []() { @@ -72,6 +73,8 @@ ov::Any AutoCumuCompiledModel::get_property(const std::string& name) const { return decltype(ov::supported_properties)::value_type(supported_properties); } else if (name == ov::hint::performance_mode) { return m_context->m_performance_hint; + } else if (name == ov::intel_auto::schedule_policy) { + return m_context->m_schedule_policy; } else if (name == ov::device::priorities) { // device priority does not support change on-the-fly return decltype(ov::device::priorities)::value_type(m_context->m_str_devices); @@ -141,7 +144,14 @@ ov::Any AutoCumuCompiledModel::get_property(const std::string& name) const { std::lock_guard lock(m_context->m_fallback_mutex); for (size_t i = 0; i < m_scheduler->m_n_ctput_devicenums; i++) { if (m_scheduler->m_p_ctput_loadcontext[i].m_is_already) { - loaded_from_cache &= (m_scheduler->m_p_ctput_loadcontext[i].m_compiled_model->get_property(name).as()); + try { + loaded_from_cache &= + (m_scheduler->m_p_ctput_loadcontext[i].m_compiled_model->get_property(name).as()); + } catch (const ov::Exception&) { + LOG_DEBUG_TAG("get_property loaded_from_cache from %s failed", + m_scheduler->m_p_ctput_loadcontext[i].m_device_info.device_name.c_str()); + return false; + } } } return loaded_from_cache; diff --git a/src/plugins/auto/src/cumulative_schedule.cpp b/src/plugins/auto/src/cumulative_schedule.cpp index bf321e1aa58ebd..476c923bfedf58 100644 --- a/src/plugins/auto/src/cumulative_schedule.cpp +++ b/src/plugins/auto/src/cumulative_schedule.cpp @@ -10,6 +10,25 @@ // ------------------------------CumuSchedule---------------------------- namespace ov { namespace auto_plugin { +std::string CumuSchedule::schedule_to_next_device(const std::vector& devices, + std::size_t current_device_index) { + std::string selected_device_name = ""; + { + std::lock_guard lock(m_context->m_mutex); + m_n_ctput_schedule_next_device = + m_n_ctput_schedule_next_device >= devices.size() ? 0 : m_n_ctput_schedule_next_device; + selected_device_name = devices[m_n_ctput_schedule_next_device].device_name; + } + auto schedule_policy = m_context->m_schedule_policy; + if (schedule_policy == ov::intel_auto::SchedulePolicy::ROUND_ROBIN) { + std::lock_guard lock(m_context->m_mutex); + m_n_ctput_schedule_next_device++; + } else if (schedule_policy == ov::intel_auto::SchedulePolicy::DEVICE_PRIORITY) { + selected_device_name = devices[current_device_index].device_name; + } + return selected_device_name; +} + bool CumuSchedule::select_other_device(const std::string& cur_dev_name) { { std::lock_guard lock(m_context->m_fallback_mutex); @@ -209,7 +228,7 @@ bool CumuSchedule::schedule_to_worker_infer_request(ov::threading::Task pipeline std::unique_lock lock(m_context->m_fallback_mutex); if (!preferred_device.empty()) { devices = m_context->m_device_priorities; - if (!deviceChecker().check_if_device_in_list(preferred_device, devices)) { + if (!deviceChecker().check_if_device_in_list(preferred_device, devices)) { lock.unlock(); OPENVINO_THROW("The preferred device should be the selected device"); } @@ -217,14 +236,22 @@ bool CumuSchedule::schedule_to_worker_infer_request(ov::threading::Task pipeline devices = m_context->m_device_priorities; } lock.unlock(); - for (auto&& device : devices) { - if (!preferred_device.empty() && (device.device_name != preferred_device)) { + + std::size_t current_device_index = 0; + while (current_device_index < devices.size()) { + if (!preferred_device.empty() && (devices[current_device_index].device_name != preferred_device)) { + current_device_index++; continue; } - if (run_pipeline_task(pipeline_task, m_idle_worker_requests[device.device_name], preferred_device)) { + auto selected_device_name = + preferred_device.empty() ? schedule_to_next_device(devices, current_device_index) : preferred_device; + if (run_pipeline_task(pipeline_task, m_idle_worker_requests[selected_device_name], preferred_device)) { return true; + } else { + current_device_index++; } } + // no vacant requests this time, storing the task to the respective queue if (!preferred_device.empty()) { m_infer_pipeline_tasks_device_specific[preferred_device]->push(std::move(pipeline_task)); diff --git a/src/plugins/auto/src/cumulative_schedule.hpp b/src/plugins/auto/src/cumulative_schedule.hpp index b8b5defd218cbb..fdbb7be965af0f 100644 --- a/src/plugins/auto/src/cumulative_schedule.hpp +++ b/src/plugins/auto/src/cumulative_schedule.hpp @@ -17,7 +17,9 @@ class CumuSchedule : public Schedule { virtual ~CumuSchedule(); std::unique_ptr m_p_ctput_loadcontext = nullptr; size_t m_n_ctput_devicenums = 0; - + size_t m_n_ctput_schedule_next_device = 0; + std::string schedule_to_next_device(const std::vector& devices, + std::size_t current_device_index); private: void init() override; SoCompiledModel wait_first_compiled_model_ready() override; diff --git a/src/plugins/auto/src/plugin.cpp b/src/plugins/auto/src/plugin.cpp index 5d612a54216b4e..5b27a5f9f683ed 100644 --- a/src/plugins/auto/src/plugin.cpp +++ b/src/plugins/auto/src/plugin.cpp @@ -555,6 +555,7 @@ std::shared_ptr Plugin::compile_model_impl(const std::string auto_s_context->m_startup_fallback = load_config.get_property(ov::intel_auto::enable_startup_fallback); auto_s_context->m_runtime_fallback = load_config.get_property(ov::intel_auto::enable_runtime_fallback); auto_s_context->m_bind_buffer = load_config.get_property(ov::intel_auto::device_bind_buffer); + auto_s_context->m_schedule_policy = load_config.get_property(ov::intel_auto::schedule_policy); std::shared_ptr impl; std::shared_ptr scheduler = is_cumulative ? std::static_pointer_cast(std::make_shared()) : std::static_pointer_cast(std::make_shared()); diff --git a/src/plugins/auto/src/plugin_config.cpp b/src/plugins/auto/src/plugin_config.cpp index 0f49680856d1a4..c8e32fc68c2f0c 100644 --- a/src/plugins/auto/src/plugin_config.cpp +++ b/src/plugins/auto/src/plugin_config.cpp @@ -21,6 +21,7 @@ void PluginConfig::set_default() { std::make_tuple(ov::hint::model_priority, ov::hint::Priority::MEDIUM), std::make_tuple(ov::log::level, ov::log::Level::NO), std::make_tuple(ov::intel_auto::device_bind_buffer, false), + std::make_tuple(ov::intel_auto::schedule_policy, ov::intel_auto::SchedulePolicy::DEVICE_PRIORITY), std::make_tuple(ov::hint::performance_mode, ov::hint::PerformanceMode::LATENCY), std::make_tuple(ov::hint::execution_mode, ov::hint::ExecutionMode::PERFORMANCE), std::make_tuple(ov::hint::num_requests, 0, UnsignedTypeValidator()), diff --git a/src/plugins/auto/tests/functional/CMakeLists.txt b/src/plugins/auto/tests/functional/CMakeLists.txt index b15afe68b96660..f905c1b61b327e 100644 --- a/src/plugins/auto/tests/functional/CMakeLists.txt +++ b/src/plugins/auto/tests/functional/CMakeLists.txt @@ -10,7 +10,7 @@ if(ENABLE_AUTO_BATCH) endif() if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - ie_add_compiler_flags(/wd4305) + ov_add_compiler_flags(/wd4305) endif() ov_add_test_target( diff --git a/src/plugins/auto/tests/functional/behavior/infer_schedule_test.cpp b/src/plugins/auto/tests/functional/behavior/infer_schedule_test.cpp new file mode 100644 index 00000000000000..53004953d7f749 --- /dev/null +++ b/src/plugins/auto/tests/functional/behavior/infer_schedule_test.cpp @@ -0,0 +1,96 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "auto_func_test.hpp" + +namespace ov { +namespace auto_plugin { +namespace tests { +using schedule_policy_param = std::tuple; + +class InferSchedulePolicyTest : public AutoFuncTests, public testing::WithParamInterface { +public: + void SetUp() override { + AutoFuncTests::SetUp(); + std::tie(property, niters) = this->GetParam(); + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + ov::AnyMap property; + int niters; + std::tie(property, niters) = obj.param; + std::ostringstream result; + result << "numberOfInfer=" << niters << "_"; + if (!property.empty()) { + for (auto& iter : property) { + result << "priority=" << iter.first << "_" << iter.second.as(); + } + } + return result.str(); + } + +public: + ov::AnyMap property; + int niters; +}; + +TEST_P(InferSchedulePolicyTest, can_run_async_requests_with_different_schedule_policy) { + ov::CompiledModel compiled_model; + property.emplace(ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)); + ASSERT_NO_THROW(compiled_model = core.compile_model(model_cannot_batch, "AUTO", property)); + std::vector inferReqsQueue; + int count = niters; + while (count--) { + ov::InferRequest req; + ASSERT_NO_THROW(req = compiled_model.create_infer_request()); + inferReqsQueue.push_back(req); + } + for (auto& req : inferReqsQueue) { + ASSERT_NO_THROW(req.start_async()); + } + for (auto& req : inferReqsQueue) { + ASSERT_NO_THROW(req.wait()); + } +} + +TEST_P(InferSchedulePolicyTest, can_run_sync_requests_with_different_schedule_policy) { + ov::CompiledModel compiled_model; + property.emplace(ov::hint::performance_mode(ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT)); + ASSERT_NO_THROW(compiled_model = core.compile_model(model_cannot_batch, "AUTO", property)); + std::vector inferReqsQueue; + int count = niters; + while (count--) { + ov::InferRequest req; + ASSERT_NO_THROW(req = compiled_model.create_infer_request()); + inferReqsQueue.push_back(req); + } + for (auto& req : inferReqsQueue) { + ASSERT_NO_THROW(req.infer()); + ASSERT_NO_THROW(req.wait()); + } +} + +auto properties = std::vector{ + {ov::device::priorities("MOCK_GPU"), ov::intel_auto::schedule_policy(ov::intel_auto::SchedulePolicy::ROUND_ROBIN)}, + {ov::device::priorities("MOCK_GPU"), + ov::intel_auto::schedule_policy(ov::intel_auto::SchedulePolicy::DEVICE_PRIORITY)}, + {ov::device::priorities("MOCK_CPU"), ov::intel_auto::schedule_policy(ov::intel_auto::SchedulePolicy::ROUND_ROBIN)}, + {ov::device::priorities("MOCK_CPU"), + ov::intel_auto::schedule_policy(ov::intel_auto::SchedulePolicy::DEVICE_PRIORITY)}, + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::intel_auto::schedule_policy(ov::intel_auto::SchedulePolicy::ROUND_ROBIN)}, + {ov::device::priorities("MOCK_GPU", "MOCK_CPU"), + ov::intel_auto::schedule_policy(ov::intel_auto::SchedulePolicy::DEVICE_PRIORITY)}, + {ov::device::priorities("MOCK_CPU", "MOCK_GPU"), + ov::intel_auto::schedule_policy(ov::intel_auto::SchedulePolicy::ROUND_ROBIN)}}; +auto niters = std::vector{10, 20, 30}; + +INSTANTIATE_TEST_SUITE_P(AutoFuncTests, + InferSchedulePolicyTest, + ::testing::Combine(::testing::ValuesIn(properties), ::testing::ValuesIn(niters)), + InferSchedulePolicyTest::getTestCaseName); +} // namespace tests +} // namespace auto_plugin +} // namespace ov diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index 7401dfff81fe9e..e941a856190002 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -24,13 +24,13 @@ std::shared_ptr getFunction2() { auto in2add = ngraph::builder::makeConstant(ngPrc, {1, 2, 1, 1}, std::vector{}, true); auto add = ngraph::builder::makeEltwise(split->output(0), in2add, ngraph::helpers::EltwiseTypes::ADD); - auto relu1 = std::make_shared(add); + auto relu1 = std::make_shared(add); auto in2mult = ngraph::builder::makeConstant(ngPrc, {1, 2, 1, 1}, std::vector{}, true); auto mult = ngraph::builder::makeEltwise(split->output(1), in2mult, ngraph::helpers::EltwiseTypes::MULTIPLY); - auto relu2 = std::make_shared(mult); + auto relu2 = std::make_shared(mult); - auto concat = std::make_shared(ngraph::OutputVector{relu1->output(0), relu2->output(0)}, 3); + auto concat = std::make_shared(ngraph::OutputVector{relu1->output(0), relu2->output(0)}, 3); concat->get_output_tensor(0).set_names({"concat"}); return std::make_shared(concat, params, "SplitAddConcat"); diff --git a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp index 39756244e9fdad..90cfa56407ca6f 100644 --- a/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp +++ b/src/plugins/auto/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -80,13 +80,15 @@ INSTANTIATE_TEST_SUITE_P(smoke_AutoCompileModelBehaviorTests, ::testing::ValuesIn(auto_compileModel_properties)), OVSetPropComplieModleGetPropTests::getTestCaseName); -const std::vector default_properties = {{ov::enable_profiling(false)}, - {ov::log::level("LOG_NONE")}, - {ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, - {ov::hint::execution_mode(ov::hint::ExecutionMode::PERFORMANCE)}, - {ov::intel_auto::device_bind_buffer(false)}, - {ov::intel_auto::enable_startup_fallback(true)}, - {ov::device::priorities("")}}; +const std::vector default_properties = { + {ov::enable_profiling(false)}, + {ov::log::level("LOG_NONE")}, + {ov::hint::model_priority(ov::hint::Priority::MEDIUM)}, + {ov::hint::execution_mode(ov::hint::ExecutionMode::PERFORMANCE)}, + {ov::intel_auto::device_bind_buffer(false)}, + {ov::intel_auto::enable_startup_fallback(true)}, + {ov::intel_auto::schedule_policy(ov::intel_auto::SchedulePolicy::DEVICE_PRIORITY)}, + {ov::device::priorities("")}}; INSTANTIATE_TEST_SUITE_P(smoke_AutoBehaviorTests, OVPropertiesDefaultTests, ::testing::Combine(::testing::Values(ov::test::utils::DEVICE_AUTO), diff --git a/src/plugins/auto/tests/unit/compile_model_property_test.cpp b/src/plugins/auto/tests/unit/compile_model_property_test.cpp index 278c3dbbfe3363..d1ef9adcbdd573 100644 --- a/src/plugins/auto/tests/unit/compile_model_property_test.cpp +++ b/src/plugins/auto/tests/unit/compile_model_property_test.cpp @@ -3,6 +3,7 @@ // #include "include/auto_unit_test.hpp" +#include "openvino/runtime/properties.hpp" // define a matcher if all the elements of subMap are contained in the map. MATCHER_P(MapContains, subMap, "Check if all the elements of the subMap are contained in the map.") { @@ -217,4 +218,122 @@ const std::vector testConfigsAutoLoadFailed = { INSTANTIATE_TEST_SUITE_P(smoke_AutoLoadExeNetworkFailedTest, AutoLoadExeNetworkFailedTest, ::testing::ValuesIn(testConfigsAutoLoadFailed), - AutoLoadExeNetworkFailedTest::getTestCaseName); \ No newline at end of file + AutoLoadExeNetworkFailedTest::getTestCaseName); + +using PropertyTestParams = std::tuple, // if supported property + ov::AnyMap>; // optional property and its expected value + +class CompiledModelPropertyMockTest : public tests::AutoTest, public ::testing::TestWithParam { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + std::string deviceName; + std::string devicePriorities; + std::map isSupportProperty; + ov::AnyMap properties; + std::tie(deviceName, devicePriorities, isSupportProperty, properties) = obj.param; + std::ostringstream result; + result << "_virtual_device_" << deviceName; + result << "_loadnetwork_to_device_" << devicePriorities; + for (auto& property : properties) { + result << "_property_" << property.first; + bool isSupport = isSupportProperty[property.first]; + if (isSupport) + result << "_isSupport_No_"; + else + result << "_isSupport_Yes_"; + result << "_expectedValue_" << property.second.as(); + } + return result.str(); + } + + void SetUp() override { + std::string deviceName; + std::string devicePriorities; + ov::AnyMap properties; + std::map isSupportProperty; + std::tie(deviceName, devicePriorities, isSupportProperty, properties) = GetParam(); + std::vector availableDevs = {"CPU", "GPU"}; + ON_CALL(*core, get_available_devices()).WillByDefault(Return(availableDevs)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrEq(ov::test::utils::DEVICE_CPU)), + _)) + .WillByDefault(Return(mockExeNetwork)); + ON_CALL(*core, + compile_model(::testing::Matcher&>(_), + ::testing::Matcher(StrNe(ov::test::utils::DEVICE_CPU)), + _)) + .WillByDefault(Return(mockExeNetworkActual)); + std::vector supported_props = {}; + for (auto& property : properties) { + bool isSupport = isSupportProperty[property.first]; + if (isSupport) { + supported_props.push_back(property.first); + auto value = property.second.as(); + ON_CALL(*mockIExeNet.get(), get_property(StrEq(property.first))) + .WillByDefault(RETURN_MOCK_VALUE(value)); + ON_CALL(*mockIExeNetActual.get(), get_property(StrEq(property.first))) + .WillByDefault(RETURN_MOCK_VALUE(value)); + } else { + ON_CALL(*mockIExeNet.get(), get_property(StrEq(property.first))) + .WillByDefault(Throw(ov::Exception{"unsupported property"})); + ON_CALL(*mockIExeNetActual.get(), get_property(StrEq(property.first))) + .WillByDefault(Throw(ov::Exception{"unsupported property"})); + } + } + ON_CALL(*mockIExeNet.get(), get_property(StrEq(ov::supported_properties.name()))) + .WillByDefault(Return(ov::Any(supported_props))); + ON_CALL(*mockIExeNetActual.get(), get_property(StrEq(ov::supported_properties.name()))) + .WillByDefault(Return(ov::Any(supported_props))); + } +}; + +TEST_P(CompiledModelPropertyMockTest, compiledModelGetPropertyNoThrow) { + std::string deviceName; + std::string devicePriorities; + ov::AnyMap properties; + std::map isSupportProperty; + std::tie(deviceName, devicePriorities, isSupportProperty, properties) = GetParam(); + if (deviceName.find("AUTO") != std::string::npos) + plugin->set_device_name("AUTO"); + if (deviceName.find("MULTI") != std::string::npos) + plugin->set_device_name("MULTI"); + std::shared_ptr autoExecNetwork; + ASSERT_NO_THROW(autoExecNetwork = plugin->compile_model(model, {ov::device::priorities(devicePriorities)})); + for (auto& property : properties) { + auto result = autoExecNetwork->get_property(property.first).as(); + EXPECT_EQ(result, property.second.as()); + } +} +const std::vector testCompiledModelProperty = { + PropertyTestParams{"AUTO", + "CPU,GPU", + {{ov::loaded_from_cache.name(), true}}, + {{ov::loaded_from_cache.name(), true}}}, + PropertyTestParams{"AUTO", + "CPU,GPU", + {{ov::loaded_from_cache.name(), true}}, + {{ov::loaded_from_cache.name(), false}}}, + PropertyTestParams{"AUTO", + "CPU,GPU", + {{ov::loaded_from_cache.name(), false}}, + {{ov::loaded_from_cache.name(), false}}}, + PropertyTestParams{"MULTI", + "CPU,GPU", + {{ov::loaded_from_cache.name(), true}}, + {{ov::loaded_from_cache.name(), true}}}, + PropertyTestParams{"MULTI", + "CPU,GPU", + {{ov::loaded_from_cache.name(), true}}, + {{ov::loaded_from_cache.name(), false}}}, + PropertyTestParams{"MULTI", + "CPU,GPU", + {{ov::loaded_from_cache.name(), false}}, + {{ov::loaded_from_cache.name(), false}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_AutoCompiledModelPropertyMockTest, + CompiledModelPropertyMockTest, + ::testing::ValuesIn(testCompiledModelProperty), + CompiledModelPropertyMockTest::getTestCaseName); diff --git a/src/plugins/auto/tests/unit/infer_request_schedule_policy_test.cpp b/src/plugins/auto/tests/unit/infer_request_schedule_policy_test.cpp new file mode 100644 index 00000000000000..70dc61ed9aff95 --- /dev/null +++ b/src/plugins/auto/tests/unit/infer_request_schedule_policy_test.cpp @@ -0,0 +1,142 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include +#include + +#include + +#include "async_infer_request.hpp" +#include "common.hpp" +#include "cumulative_schedule.hpp" +#include "openvino/runtime/auto/properties.hpp" +#include "plugin.hpp" +using ConfigParams = std::tuple, // device candidate list + ov::intel_auto::SchedulePolicy, // schedule policy + std::map, // number of infer request for each device + std::vector // the expected device where each of infer request comes from + >; +class MockCumuSchedule : public ov::auto_plugin::CumuSchedule, public ::testing::TestWithParam { +protected: + std::vector devicesInfo; + ov::intel_auto::SchedulePolicy schedulePolicy; + std::map numOfInferRequests; + std::vector expectedScheDevs; + +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + std::vector devicesInfo; + ov::intel_auto::SchedulePolicy schedulePolicy; + std::map numOfInferRequests; + std::vector expectedScheDevs; + std::tie(devicesInfo, schedulePolicy, numOfInferRequests, expectedScheDevs) = obj.param; + std::ostringstream result; + std::string candidateDevList; + result << "candaidateDeviceList_"; + for (auto dev : devicesInfo) + result << dev.device_name << "_"; + result << "schedulePolicy_" << schedulePolicy << "_"; + result << "inferRequestNumberOnEachDevice_"; + for (auto ninfer : numOfInferRequests) + result << ninfer.first << "_" << ninfer.second << "_"; + result << "expectedDeviceSelection_"; + for (auto dev : expectedScheDevs) + result << dev << "_"; + return result.str(); + } + + void TearDown() override { + devicesInfo.clear(); + numOfInferRequests.clear(); + expectedScheDevs.clear(); + m_context.reset(); + } + + void SetUp() override { + std::tie(devicesInfo, schedulePolicy, numOfInferRequests, expectedScheDevs) = GetParam(); + m_context = std::make_shared(); + m_context->m_schedule_policy = schedulePolicy; + } +}; + +TEST_P(MockCumuSchedule, scheduleInferRequestBasedOnSchedulePolicy) { + std::size_t deviceIndexWithInferReq = 0; + int expectedDevIndex = 0; + while (true) { + std::string actualSelectedDev; + ASSERT_NO_THROW(actualSelectedDev = schedule_to_next_device(devicesInfo, deviceIndexWithInferReq)); + if (numOfInferRequests[actualSelectedDev] > 0) { + EXPECT_EQ(actualSelectedDev, expectedScheDevs[expectedDevIndex++]); + // consume an available infer request on selected device + numOfInferRequests[actualSelectedDev]--; + } else { + // schecdule to next priority device + deviceIndexWithInferReq++; + if (deviceIndexWithInferReq >= devicesInfo.size()) { + // no available infer request on all of the devices + break; + } + } + } +} + +const std::vector metaDevicesWithSingleDev = { + {"DEVICE_0", {}, -1, "01", "DEVICE_0_01", 0}}; +const std::vector metaDevicesWithTwoDevs = { + {"DEVICE_0", {}, -1, "01", "DEVICE_0_01", 0}, + {"DEVICE_1", {}, -1, "01", "DEVICE_1_01", 1}}; +const std::vector metaDevices = {{"DEVICE_0", {}, -1, "01", "DEVICE_0_01", 0}, + {"DEVICE_1", {}, -1, "01", "DEVICE_1_01", 1}, + {"DEVICE_2", {}, -1, "01", "DEVICE_2_01", 2}}; +const std::vector configs = { + ConfigParams{ + metaDevicesWithSingleDev, // param[in]: device candidate list for AUTO plugin + ov::intel_auto::SchedulePolicy::ROUND_ROBIN, // param[in]: specified schedule policy + {{"DEVICE_0", 6}}, // param[in]: a map recorded the count of infer request on each hw device + {"DEVICE_0", + "DEVICE_0", + "DEVICE_0", + "DEVICE_0", + "DEVICE_0", + "DEVICE_0"}}, // param[output]: the expected device list where the next available infer request comes from + ConfigParams{metaDevicesWithSingleDev, + ov::intel_auto::SchedulePolicy::DEVICE_PRIORITY, + {{"DEVICE_0", 6}}, + {"DEVICE_0", "DEVICE_0", "DEVICE_0", "DEVICE_0", "DEVICE_0", "DEVICE_0"}}, + ConfigParams{metaDevicesWithTwoDevs, + ov::intel_auto::SchedulePolicy::ROUND_ROBIN, + {{"DEVICE_0", 3}, {"DEVICE_1", 2}}, + {"DEVICE_0", "DEVICE_1", "DEVICE_0", "DEVICE_1", "DEVICE_0"}}, + ConfigParams{metaDevicesWithTwoDevs, + ov::intel_auto::SchedulePolicy::DEVICE_PRIORITY, + {{"DEVICE_0", 3}, {"DEVICE_1", 2}}, + {"DEVICE_0", "DEVICE_0", "DEVICE_0", "DEVICE_1", "DEVICE_1"}}, + ConfigParams{metaDevicesWithTwoDevs, + ov::intel_auto::SchedulePolicy::DEVICE_PRIORITY, + {{"DEVICE_0", 2}, {"DEVICE_1", 3}}, + {"DEVICE_0", "DEVICE_0", "DEVICE_1", "DEVICE_1", "DEVICE_1"}}, + ConfigParams{metaDevices, + ov::intel_auto::SchedulePolicy::ROUND_ROBIN, + {{"DEVICE_0", 3}, {"DEVICE_1", 2}, {"DEVICE_2", 1}}, + {"DEVICE_0", "DEVICE_1", "DEVICE_2", "DEVICE_0", "DEVICE_1", "DEVICE_0"}}, + ConfigParams{metaDevices, + ov::intel_auto::SchedulePolicy::ROUND_ROBIN, + {{"DEVICE_0", 1}, {"DEVICE_1", 2}, {"DEVICE_2", 3}}, + {"DEVICE_0", "DEVICE_1", "DEVICE_2", "DEVICE_1", "DEVICE_2", "DEVICE_2"}}, + ConfigParams{metaDevices, + ov::intel_auto::SchedulePolicy::ROUND_ROBIN, + {{"DEVICE_0", 1}, {"DEVICE_1", 3}, {"DEVICE_2", 2}}, + {"DEVICE_0", "DEVICE_1", "DEVICE_2", "DEVICE_1", "DEVICE_2", "DEVICE_1"}}, + ConfigParams{metaDevices, + ov::intel_auto::SchedulePolicy::DEVICE_PRIORITY, + {{"DEVICE_0", 1}, {"DEVICE_1", 3}, {"DEVICE_2", 2}}, + {"DEVICE_0", "DEVICE_1", "DEVICE_1", "DEVICE_1", "DEVICE_2", "DEVICE_2"}}, + ConfigParams{metaDevices, + ov::intel_auto::SchedulePolicy::DEVICE_PRIORITY, + {{"DEVICE_0", 3}, {"DEVICE_1", 2}, {"DEVICE_2", 1}}, + {"DEVICE_0", "DEVICE_0", "DEVICE_0", "DEVICE_1", "DEVICE_1", "DEVICE_2"}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, + MockCumuSchedule, + ::testing::ValuesIn(configs), + MockCumuSchedule::getTestCaseName); \ No newline at end of file diff --git a/src/plugins/auto_batch/CMakeLists.txt b/src/plugins/auto_batch/CMakeLists.txt index 9ff585740abc98..a8419940ff4c6c 100644 --- a/src/plugins/auto_batch/CMakeLists.txt +++ b/src/plugins/auto_batch/CMakeLists.txt @@ -9,7 +9,6 @@ endif() set(TARGET_NAME "openvino_auto_batch_plugin") file(GLOB SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp) - file(GLOB HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/src/*.hpp) ov_add_plugin(NAME ${TARGET_NAME} @@ -18,6 +17,9 @@ ov_add_plugin(NAME ${TARGET_NAME} SOURCES ${SOURCES} ${HEADERS} VERSION_DEFINES_FOR src/plugin.cpp ADD_CLANG_FORMAT) +find_package(Threads REQUIRED) +target_link_libraries(${TARGET_NAME} PRIVATE Threads::Threads) + ov_set_threading_interface_for(${TARGET_NAME}) # must be called after all target_link_libraries diff --git a/src/plugins/auto_batch/tests/functional/behavior/plugin/auto_batching_tests.hpp b/src/plugins/auto_batch/tests/functional/behavior/plugin/auto_batching_tests.hpp index 91635a2c4b7294..b054f4a9816a06 100644 --- a/src/plugins/auto_batch/tests/functional/behavior/plugin/auto_batching_tests.hpp +++ b/src/plugins/auto_batch/tests/functional/behavior/plugin/auto_batching_tests.hpp @@ -12,6 +12,9 @@ #include "common_test_utils/test_common.hpp" #include "functional_test_utils/blob_utils.hpp" #include "ov_models/subgraph_builders.hpp" +#include "common_test_utils/subgraph_builders/single_conv.hpp" +#include "common_test_utils/subgraph_builders/detection_output.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" using namespace ::testing; using namespace InferenceEngine; @@ -29,7 +32,7 @@ class AutoBatching_Test : public BehaviorTestsUtils::IEPluginTestBase, std::tie(target_device, use_get_blob, num_streams, num_requests, num_batch) = this->GetParam(); // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() - fn_ptrs = {ngraph::builder::subgraph::makeSingleConv(), ngraph::builder::subgraph::makeMultiSingleConv()}; + fn_ptrs = {ov::test::utils::make_single_conv(), ov::test::utils::make_multi_single_conv()}; }; public: @@ -144,7 +147,7 @@ class AutoBatching_Test_DetectionOutput : public AutoBatching_Test { std::tie(target_device, use_get_blob, num_streams, num_requests, num_batch) = this->GetParam(); // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() - fn_ptrs = {ngraph::builder::subgraph::makeDetectionOutput(), ngraph::builder::subgraph::makeDetectionOutput()}; + fn_ptrs = {ov::test::utils::make_detection_output(), ov::test::utils::make_detection_output()}; }; static std::string getTestCaseName(const testing::TestParamInfo& obj) { diff --git a/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp b/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp index e4d03c4e832a93..646d7403df47e7 100644 --- a/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp +++ b/src/plugins/auto_batch/tests/unit/async_infer_request_test.cpp @@ -13,6 +13,7 @@ #include "openvino/runtime/threading/immediate_executor.hpp" #include "transformations/utils/utils.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" using ::testing::_; using ::testing::AnyNumber; @@ -115,7 +116,7 @@ class AutoBatchAsyncInferRequestTest : public ::testing::TestWithParamGetParam(); m_terminate = false; std::vector inputShape = {1, 3, 24, 24}; - m_model = ngraph::builder::subgraph::makeMultiSingleConv(inputShape, m_element_type); + m_model = ov::test::utils::make_multi_single_conv(inputShape, m_element_type); prepare_input(m_model, m_batch_size); diff --git a/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp index 30b06945c4dfd9..39094d161393ca 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_create_infer_request_test.cpp @@ -10,6 +10,7 @@ #include "openvino/core/dimension_tracker.hpp" #include "openvino/runtime/threading/immediate_executor.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" using ::testing::_; using ::testing::AnyNumber; @@ -83,7 +84,7 @@ class CompileModelCreateInferRequestTest : public ::testing::TestWithParamGetParam(); - m_model = ngraph::builder::subgraph::makeMultiSingleConv(); + m_model = ov::test::utils::make_multi_single_conv(); m_core = std::shared_ptr>(new NiceMock()); m_auto_batch_plugin = diff --git a/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp index 805e2f5e672df8..b3fc8497c9f052 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_get_property_test.cpp @@ -9,6 +9,7 @@ #include "ov_models/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" using ::testing::_; using ::testing::AnyNumber; @@ -67,7 +68,7 @@ class CompileModelGetPropertyTest : public ::testing::TestWithParamGetParam(); - m_model = ngraph::builder::subgraph::makeMultiSingleConv(); + m_model = ov::test::utils::make_multi_single_conv(); m_core = std::shared_ptr>(new NiceMock()); m_plugin = std::shared_ptr>(new NiceMock()); diff --git a/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp index bbe0b1fb18a9a1..f338e6dd3e610a 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_get_runtime_model_test.cpp @@ -9,6 +9,7 @@ #include "ov_models/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" using ::testing::_; using ::testing::AnyNumber; @@ -48,7 +49,7 @@ class CompileModelGetRuntimeModelTest : public ::testing::Test { } void SetUp() override { - m_model = ngraph::builder::subgraph::makeMultiSingleConv(); + m_model = ov::test::utils::make_multi_single_conv(); m_core = std::shared_ptr>(new NiceMock()); m_plugin = std::shared_ptr>(new NiceMock()); diff --git a/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp b/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp index a801b619566e94..ee03043a162c93 100644 --- a/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp +++ b/src/plugins/auto_batch/tests/unit/compile_model_set_property_test.cpp @@ -9,6 +9,7 @@ #include "ov_models/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" using ::testing::_; using ::testing::AnyNumber; @@ -70,7 +71,7 @@ class CompileModelSetPropertyTest : public ::testing::TestWithParamGetParam(); - m_model = ngraph::builder::subgraph::makeMultiSingleConv(); + m_model = ov::test::utils::make_multi_single_conv(); m_core = std::shared_ptr>(new NiceMock()); m_plugin = std::shared_ptr>(new NiceMock()); diff --git a/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp index 8f06a44cff06a2..ebc52426bfe504 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_compile_model_test.cpp @@ -9,6 +9,8 @@ #include "ov_models/subgraph_builders.hpp" #include "openvino/core/dimension_tracker.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu_non_zero.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" using ::testing::_; using ::testing::AnyNumber; @@ -118,17 +120,17 @@ class PluginCompileModelTest : public ::testing::TestWithParamcompile_model(m_model, m_plugin_properities)); } TEST_P(PluginCompileModelTest, PluginCompileModelWithRemoteContextTestCase) { - m_model = ngraph::builder::subgraph::makeMultiSingleConv(); + m_model = ov::test::utils::make_multi_single_conv(); ASSERT_NO_THROW(m_plugin->compile_model(m_model, m_plugin_properities, m_remote_context)); } TEST_P(PluginCompileModelTest, PluginCompileModelBatchedModelTestCase) { - m_model = ngraph::builder::subgraph::makeConvPoolReluNonZero({1, 1, 32, 32}); + m_model = ov::test::utils::make_conv_pool_relu_non_zero({1, 1, 32, 32}); auto batch = ov::Dimension(5); ov::DimensionTracker::set_label(batch, 11); auto p_shape = ov::PartialShape{batch, 1, 32, 32}; @@ -137,7 +139,7 @@ TEST_P(PluginCompileModelTest, PluginCompileModelBatchedModelTestCase) { } TEST_P(PluginCompileModelTest, PluginCompileModelBatchedModelWithRemoteContextTestCase) { - m_model = ngraph::builder::subgraph::makeConvPoolReluNonZero({1, 1, 32, 32}); + m_model = ov::test::utils::make_conv_pool_relu_non_zero({1, 1, 32, 32}); auto batch = ov::Dimension(5); ov::DimensionTracker::set_label(batch, 11); auto p_shape = ov::PartialShape{batch, 1, 32, 32}; diff --git a/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp b/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp index e15ee5121f0611..d36945693bd51c 100644 --- a/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp +++ b/src/plugins/auto_batch/tests/unit/plugin_query_model_test.cpp @@ -8,6 +8,7 @@ #include "mock_common.hpp" #include "ov_models/subgraph_builders.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" using ::testing::_; using ::testing::AnyNumber; @@ -60,7 +61,7 @@ class QueryModelTest : public ::testing::TestWithParam { void SetUp() override { std::tie(m_properties, m_throw_exception) = this->GetParam(); - m_model = ngraph::builder::subgraph::makeMultiSingleConv(); + m_model = ov::test::utils::make_multi_single_conv(); m_core = std::shared_ptr>(new NiceMock()); m_plugin = std::shared_ptr>(new NiceMock()); diff --git a/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp b/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp index d05cc53ceb05e6..6d2b0a32a2b5ac 100644 --- a/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp +++ b/src/plugins/auto_batch/tests/unit/sync_infer_request_test.cpp @@ -13,6 +13,7 @@ #include "openvino/runtime/threading/immediate_executor.hpp" #include "transformations/utils/utils.hpp" #include "unit_test_utils/mocks/openvino/runtime/mock_icore.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" using ::testing::_; using ::testing::AnyNumber; @@ -97,7 +98,7 @@ class AutoBatchRequestTest : public ::testing::TestWithParamGetParam(); std::vector inputShape = {1, 3, 24, 24}; - m_model = ngraph::builder::subgraph::makeMultiSingleConv(inputShape, m_element_type); + m_model = ov::test::utils::make_multi_single_conv(inputShape, m_element_type); m_core = std::shared_ptr>(new NiceMock()); m_auto_batch_plugin = diff --git a/src/plugins/intel_cpu/src/dnnl_extension_utils.cpp b/src/plugins/intel_cpu/src/dnnl_extension_utils.cpp index 5e8536f8acc31c..04238e7eb5fd58 100644 --- a/src/plugins/intel_cpu/src/dnnl_extension_utils.cpp +++ b/src/plugins/intel_cpu/src/dnnl_extension_utils.cpp @@ -68,7 +68,7 @@ dnnl::memory::data_type DnnlExtensionUtils::ElementTypeToDataType(const ov::elem case ov::element::undefined: return memory::data_type::undef; default: { - OPENVINO_THROW("The plugin does not support ", elementType.to_string(), " for use with oneDNN"); + OPENVINO_THROW("CPU plugin does not support ", elementType.to_string(), " for use with oneDNN."); } } } diff --git a/src/plugins/intel_cpu/src/emitters/x64/cpu_generator.cpp b/src/plugins/intel_cpu/src/emitters/x64/cpu_generator.cpp index a765ac7c60dd91..7596c8117ab19a 100644 --- a/src/plugins/intel_cpu/src/emitters/x64/cpu_generator.cpp +++ b/src/plugins/intel_cpu/src/emitters/x64/cpu_generator.cpp @@ -66,7 +66,8 @@ intel_cpu::CPUTargetMachine::CPUTargetMachine(dnnl::impl::cpu::x64::cpu_isa_t ho // data movement jitters[op::v0::Parameter::get_type_info_static()] = CREATE_SNIPPETS_EMITTER(NopEmitter); jitters[op::v0::Result::get_type_info_static()] = CREATE_SNIPPETS_EMITTER(NopEmitter); - jitters[snippets::op::Buffer::get_type_info_static()] = CREATE_SNIPPETS_EMITTER(NopEmitter); + jitters[snippets::op::IntermediateMemoryBuffer::get_type_info_static()] = CREATE_SNIPPETS_EMITTER(NopEmitter); + jitters[snippets::op::NewMemoryBuffer::get_type_info_static()] = CREATE_SNIPPETS_EMITTER(NopEmitter); jitters[snippets::op::VectorBuffer::get_type_info_static()] = CREATE_SNIPPETS_EMITTER(NopEmitter); jitters[snippets::op::RankNormalization::get_type_info_static()] = CREATE_SNIPPETS_EMITTER(NopEmitter); // jitters[op::v1::Constant::get_type_info_static()] = CREATE_CPU_EMITTER(); // Not supported diff --git a/src/plugins/intel_cpu/src/emitters/x64/jit_snippets_emitters.cpp b/src/plugins/intel_cpu/src/emitters/x64/jit_snippets_emitters.cpp index b87b265a03f562..c54b031db04645 100644 --- a/src/plugins/intel_cpu/src/emitters/x64/jit_snippets_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/x64/jit_snippets_emitters.cpp @@ -729,9 +729,6 @@ void StoreConvertEmitter::emit_isa(const std::vector &in, const std::vec void StoreConvertEmitter::emit_data() const { store_emitter->emit_data(); } -size_t BrgemmEmitter::getBrgIdx(size_t kIdx, size_t nIdx) { - return kIdx * BRGEMM_N_KERNEL_NUM + nIdx; -} size_t BrgemmEmitter::get_in_leading_dim(const VectorDims& shape, const std::vector& layout) { // Input shape is original, so we need to correctly read this data by order @@ -761,17 +758,11 @@ size_t BrgemmEmitter::get_out_leading_dim(const VectorDims& shape, const std::ve } BrgemmEmitter::BrgemmEmitter(jit_generator* h, cpu_isa_t isa, const ExpressionPtr& expr) : jit_emitter(h, isa) { - m_brgCtxs.fill(brgemmCtx()); - std::generate(m_brgKernels.begin(), m_brgKernels.end(), [](){ return nullptr; }); in_out_type_ = emitter_in_out_map::gpr_to_gpr; const auto& brgemm_node = as_type_ptr(expr->get_node()); - if (brgemm_node->is_dynamic()) - OPENVINO_THROW("Snippets don't support code generation for dynamic Brgemm"); - const auto brgemm_copy = brgemm_node->is_with_data_repacking() ? brgemm_node->get_brgemm_copy() : nullptr; + OPENVINO_ASSERT(!brgemm_node->is_dynamic(), "Snippets don't support code generation for dynamic Brgemm"); std::vector leading_dimensions; - std::vector> io_layouts; - auto get_layout = [](const std::vector& layout, const snippets::VectorDims& io_shape) { if (!layout.empty()) return layout; std::vector default_layout(io_shape.size()); @@ -780,45 +771,30 @@ BrgemmEmitter::BrgemmEmitter(jit_generator* h, cpu_isa_t isa, const ExpressionPt }; auto init_in_scheduling_params = [&](const snippets::lowered::PortDescriptorPtr& input) { - io_layouts.push_back(get_layout(input->get_layout(), input->get_shape())); - leading_dimensions.push_back(get_in_leading_dim(input->get_shape(), io_layouts.back())); + const auto& layout = get_layout(input->get_layout(), input->get_shape()); + leading_dimensions.push_back(get_in_leading_dim(input->get_shape(), layout)); }; auto init_out_scheduling_params = [&](const snippets::lowered::PortDescriptorPtr& output) { - io_layouts.push_back(get_layout(output->get_layout(), output->get_shape())); - leading_dimensions.push_back(get_out_leading_dim(output->get_shape(), io_layouts.back())); + const auto& layout = get_layout(output->get_layout(), output->get_shape()); + leading_dimensions.push_back(get_out_leading_dim(output->get_shape(), layout)); }; - init_in_scheduling_params(expr->get_input_port_descriptor(0)); + + const auto& input_0_desc = expr->get_input_port_descriptor(0); + const auto& input_1_desc = expr->get_input_port_descriptor(1); + const auto& output_desc = expr->get_output_port_descriptor(0); + + init_in_scheduling_params(input_0_desc); if (brgemm_node->is_with_data_repacking()) { - io_layouts.push_back(std::vector{}); - leading_dimensions.push_back(0); + const auto& brgemm_copy = brgemm_node->get_brgemm_copy(); + const auto& allocated_shape = brgemm_copy->get_data_repacking_shape(input_1_desc->get_shape()); + leading_dimensions.push_back(*allocated_shape.rbegin()); } else { - init_in_scheduling_params(expr->get_input_port_descriptor(1)); + init_in_scheduling_params(input_1_desc); } - init_out_scheduling_params(expr->get_output_port_descriptor(0)); - - const auto& A_shape = expr->get_input_port_descriptor(0)->get_shape(); - const auto& A_layout = io_layouts[0]; - const auto& C_shape = expr->get_output_port_descriptor(0)->get_shape(); - const auto& C_layout = io_layouts[2]; - - // We need find original M,N,K having layouts and ordered shapes - // Layout: 0, 1, 2, 3 => New layout: 0, 2, 1, 3 - // Shape: 1, 3, 5, 9 => New Shape: 1, 5, 3, 9 - // To find original 2nd dimension, we should find index of position value `2` in new layout - // and get dimension from new shape by this index - auto get_ordered_idx = [](const std::vector& layout, size_t idx) { - return std::distance(layout.begin(), std::find(layout.begin(), layout.end(), idx)); - }; - - m_K = A_shape[get_ordered_idx(A_layout, A_layout.size() - 1)]; - m_M = brgemm_node->get_input_count(0); - m_N = C_shape[get_ordered_idx(C_layout, C_layout.size() - 1)]; + init_out_scheduling_params(output_desc); - if (brgemm_node->is_with_data_repacking()) - leading_dimensions[1] = rnd_up(m_N, brgemm_copy->get_n_block_size()); - auto brg0Prc = brgemm_node->get_input_element_type(0); - auto brg1Prc = brgemm_node->get_input_element_type(1); - m_brg0VnniFactor = 4 / brg0Prc.size(); + const auto& brg0Prc = brgemm_node->get_input_element_type(0); + const auto& brg1Prc = brgemm_node->get_input_element_type(1); bool brgWithAMX = brgemm_node->is_amx(); io_data_size = {brg0Prc.size(), brg1Prc.size()}; @@ -829,59 +805,28 @@ BrgemmEmitter::BrgemmEmitter(jit_generator* h, cpu_isa_t isa, const ExpressionPt m_with_comp = brgemm_node->is_with_compensations(); m_with_scratch = brgemm_node->is_with_scratchpad(); - m_N_blk = brgemm_node->get_n_block_size(); - m_K_blk = brgemm_node->get_k_block_size(); - m_N_tail = m_N % m_N_blk; - m_K_tail = m_K % m_K_blk; - - m_N_blk_loop = m_N >= 2 * m_N_blk; - m_K_blk_loop = m_K >= 3 * m_K_blk; - OPENVINO_ASSERT((!brgemm_node->is_with_data_repacking()) || (!m_N_blk_loop && !m_K_blk_loop), - "BrgemmEmitter doesn't support blocking by K, N dimensions when data repacking is needed!"); - - auto N = [&](size_t n) { - switch (n) { - case 0: return m_N_blk; - case 1: return m_N_tail; - default: OPENVINO_THROW("BrgemmEmitter detected unsupported N value"); - } - }; - auto K = [&](size_t k) { - switch (k) { - case 0: return m_K_blk; - case 1: return m_K >= 2 * m_K_blk ? m_K_blk : 0; - case 2: return m_K_tail; - default: OPENVINO_THROW("BrgemmEmitter detected unsupported K value"); - } - }; - - bool has_K_kernel = false; - for (size_t k = 0; k < BRGEMM_K_KERNEL_NUM; k++) { - bool has_N_kernel = false; - for (size_t n = 0; n < BRGEMM_N_KERNEL_NUM; n++) { - const size_t kernel_idx = getBrgIdx(k, n); - auto& brgemmCtx = m_brgCtxs[kernel_idx]; - - brgemmCtx.M = m_M; - brgemmCtx.N = N(n); - brgemmCtx.K = K(k); - brgemmCtx.LDA = leading_dimensions[0]; - brgemmCtx.LDB = leading_dimensions[1]; - brgemmCtx.LDC = leading_dimensions[2]; - brgemmCtx.dt_in0 = static_cast(DnnlExtensionUtils::ElementTypeToDataType(brg0Prc)); - brgemmCtx.dt_in1 = static_cast(DnnlExtensionUtils::ElementTypeToDataType(brg1Prc)); - brgemmCtx.beta = has_K_kernel ? 1 : 0; - - if (brgemmCtx.N == 0 || brgemmCtx.N > m_N || - brgemmCtx.K == 0 || brgemmCtx.K > m_K) - continue; - - initBrgemm(brgemmCtx, m_brgKernels[kernel_idx], brgWithAMX); - has_N_kernel = true; - } - if (has_N_kernel) - has_K_kernel = true; - } + const auto& output_subtensor = output_desc->get_subtensor(); + const auto& input_0_subtensor = input_0_desc->get_subtensor(); + const auto& input_1_subtensor = input_1_desc->get_subtensor(); + + OPENVINO_ASSERT(*(output_subtensor.rbegin() + 1) == *(input_0_subtensor.rbegin() + 1), + "Brgemm has different M dimension subtensors on input0 and output"); + OPENVINO_ASSERT(*output_subtensor.rbegin() == *input_1_subtensor.rbegin(), + "Brgemm has different N dimension subtensors on input1 and output"); + OPENVINO_ASSERT(*input_0_subtensor.rbegin() == *(input_1_subtensor.rbegin() + 1), + "Brgemm has different K dimension subtensors on input0 and input1"); + + m_brgCtx.M = *(output_subtensor.rbegin() + 1); + m_brgCtx.N = *output_subtensor.rbegin(); + m_brgCtx.K = *input_0_subtensor.rbegin(); + m_brgCtx.LDA = leading_dimensions[0]; + m_brgCtx.LDB = leading_dimensions[1]; + m_brgCtx.LDC = leading_dimensions[2]; + m_brgCtx.dt_in0 = static_cast(DnnlExtensionUtils::ElementTypeToDataType(brg0Prc)); + m_brgCtx.dt_in1 = static_cast(DnnlExtensionUtils::ElementTypeToDataType(brg1Prc)); + m_brgCtx.beta = brgemm_node->get_beta(); + + initBrgemm(m_brgCtx, m_brgKernel, brgWithAMX); m_load_offset_a = brgemm_node->get_offset_a(); m_load_offset_b = brgemm_node->get_offset_b(); @@ -911,28 +856,8 @@ std::set> BrgemmEmitter::get_supported_precisions(con } void BrgemmEmitter::validate_arguments(const std::vector &in, const std::vector &out) const { - std::set unique_ids{in[0], in[1], out[0]}; - size_t unique_ids_count = 3; - auto add_reg_to_unique_ids = [&](const size_t reg_number) { - unique_ids.insert(reg_number); - unique_ids_count++; - }; - - if (m_N_blk_loop || m_K_blk_loop) { - if (aux_gpr_idxs.size() < static_cast(m_N_blk_loop) + static_cast(m_K_blk_loop)) - OPENVINO_THROW("BRGEMM Emitter requires extra gpr which was not allocated"); - if (m_N_blk_loop) - add_reg_to_unique_ids(aux_gpr_idxs[0]); - if (m_K_blk_loop) - add_reg_to_unique_ids(aux_gpr_idxs[m_N_blk_loop]); - } - if (m_with_scratch) { - if (in.size() != 3) - OPENVINO_THROW("BRGEMM Emitter expects 3 inputs if there are compensations/wsp"); - add_reg_to_unique_ids(in[2]); - } - if (unique_ids.size() != unique_ids_count) { - OPENVINO_THROW("BRGEMM Emitter expects that all input/output registers are unique"); + if (m_with_scratch && in.size() != 3) { + IE_THROW() << "BRGEMM Emitter expects 3 inputs if there are compensations/wsp"; } } @@ -960,118 +885,24 @@ void BrgemmEmitter::initBrgemm(brgemmCtx& ctx, std::unique_ptr& brgKernel.reset(brgKernel_); } -size_t BrgemmEmitter::aux_gprs_count() const { - return m_N_blk_loop + m_K_blk_loop; -} - -void BrgemmEmitter::emit_N_blocking_loops(size_t k_kernel_id, - const Xbyak::Reg64& input_0, const Xbyak::Reg64& input_1, - const Xbyak::Reg64& input_2, const Xbyak::Reg64& output_0, - const Xbyak::Reg64& work_amount_N) const { - // Blocked N loop - size_t kernel_idx = getBrgIdx(k_kernel_id, 0); - if (m_brgKernels[kernel_idx]) { - const auto& brgemmCtx = m_brgCtxs[kernel_idx]; - Label N_loop_begin; - if (m_N_blk_loop) { - h->mov(work_amount_N, m_N); - h->L(N_loop_begin); - } - - emit_brgemm_kernel_call(m_brgKernels[kernel_idx].get(), brgemmCtx, input_0, input_1, input_2, output_0); - // We don't need to increment pointers if we cover full N dimension in one kernel call - if (m_N_blk_loop || m_N_tail != 0) { - h->add(output_0, brgemmCtx.N * io_data_size.back()); - h->add(input_1, brgemmCtx.N * io_data_size[1]); - if (m_with_scratch && m_with_comp) - h->add(input_2, brgemmCtx.N * io_data_size[2]); - } - - if (m_N_blk_loop) { - h->sub(work_amount_N, brgemmCtx.N); - h->cmp(work_amount_N, brgemmCtx.N); - h->jge(N_loop_begin); - } - } - // N loop tail - kernel_idx = getBrgIdx(k_kernel_id, 1); - if (m_brgKernels[kernel_idx]) - emit_brgemm_kernel_call(m_brgKernels[kernel_idx].get(), m_brgCtxs[kernel_idx], input_0, input_1, input_2, output_0); - - if (m_N_blk_loop || m_N_tail != 0) { - h->sub(input_1, (m_N - m_N_tail) * io_data_size[1]); - h->sub(output_0, (m_N - m_N_tail) * io_data_size.back()); - if (m_with_scratch && m_with_comp) - h->sub(input_2, (m_N - m_N_tail) * io_data_size[2]); - } -} - void BrgemmEmitter::emit_impl(const std::vector& in, const std::vector& out) const { validate_arguments(in, out); if (host_isa_ == cpu::x64::avx512_core) { Xbyak::Reg64 input_0(static_cast(in[0])); Xbyak::Reg64 input_1(static_cast(in[1])); - Xbyak::Reg64 input_2(static_cast(0)); // scratch. Default reg index is 0 if there isn't scratch + Xbyak::Reg64 input_2(static_cast(m_with_scratch ? in[2] : 0)); // scratch. Default reg index is 0 if there isn't scratch Xbyak::Reg64 output_0(static_cast(out[0])); - Xbyak::Reg64 work_amount_N(m_N_blk_loop ? static_cast(aux_gpr_idxs[0]) : 0); - Xbyak::Reg64 work_amount_K(m_K_blk_loop ? static_cast(aux_gpr_idxs[m_N_blk_loop]) : 0); - h->add(input_0, m_load_offset_a); - h->add(input_1, m_load_offset_b); - h->add(output_0, m_store_offset_c); - if (m_with_scratch) { - input_2 = Xbyak::Reg64(static_cast(in[2])); - h->add(input_2, m_load_offset_scratch); - } - - // fills kernel_idx with the first idx of non-empty K kernel or returns false - auto get_K_kernel_idx = [&](size_t k_kernel_id, size_t& kernel_idx) { - for (size_t n = 0; n < BRGEMM_N_KERNEL_NUM; n++) { - const auto idx = getBrgIdx(k_kernel_id, n); - if (m_brgKernels[idx]) { - kernel_idx = idx; - return true; - } - } - return false; - }; - // Blocked K loop - const auto k_tail_id = BRGEMM_K_KERNEL_NUM - 1; - size_t total_K_work_amount = m_K; - size_t kernel_idx = SIZE_MAX; - for (size_t k_blocked_id = 0; k_blocked_id < k_tail_id; k_blocked_id++) { - if (get_K_kernel_idx(k_blocked_id, kernel_idx)) { - const auto& brgemmCtx = m_brgCtxs[kernel_idx]; - Label K_loop_begin; - // Note: we never emit loop for the first blocked kernel, since it always executed only once. - // The purpose of the first blocked K kernel is to initializes output, because it has beta = 0 - if (k_blocked_id == 0) { - total_K_work_amount -= brgemmCtx.K; - } else if (m_K_blk_loop) { - h->mov(work_amount_K, total_K_work_amount); - h->L(K_loop_begin); - } - - emit_N_blocking_loops(k_blocked_id, input_0, input_1, input_2, output_0, work_amount_N); - h->add(input_0, brgemmCtx.K * io_data_size[0]); - h->add(input_1, (brgemmCtx.K * brgemmCtx.LDB) * io_data_size[1]); - if (m_K_blk_loop && k_blocked_id) { - h->sub(work_amount_K, brgemmCtx.K); - h->cmp(work_amount_K, brgemmCtx.K); - h->jge(K_loop_begin); - } - } - } - // K loop tail - if (get_K_kernel_idx(k_tail_id, kernel_idx)) { - emit_N_blocking_loops(k_tail_id, input_0, input_1, input_2, output_0, work_amount_N); - } - - h->sub(input_0, m_load_offset_a + (m_K - m_K_tail) * io_data_size[0]); - h->sub(input_1, m_load_offset_b + (m_K - m_K_tail) * m_brgCtxs[0].LDB * io_data_size[1]); - if (m_with_scratch) - h->sub(input_2, m_load_offset_scratch); - h->sub(output_0, m_store_offset_c); + emit_brgemm_kernel_call(m_brgKernel.get(), + m_brgCtx, + input_0, + input_1, + input_2, + output_0, + m_load_offset_a, + m_load_offset_b, + m_load_offset_scratch, + m_store_offset_c); } else { OPENVINO_THROW("BrgemmEmitter requires at least avx512_core instruction set"); } diff --git a/src/plugins/intel_cpu/src/emitters/x64/jit_snippets_emitters.hpp b/src/plugins/intel_cpu/src/emitters/x64/jit_snippets_emitters.hpp index c2e3a07cfa108a..2ee34f34fef8d8 100644 --- a/src/plugins/intel_cpu/src/emitters/x64/jit_snippets_emitters.hpp +++ b/src/plugins/intel_cpu/src/emitters/x64/jit_snippets_emitters.hpp @@ -365,7 +365,6 @@ class BrgemmEmitter : public jit_emitter { size_t get_inputs_num() const override { return m_with_scratch ? 3 : 2; } static std::set> get_supported_precisions(const std::shared_ptr& node = nullptr); - size_t aux_gprs_count() const override; static size_t get_in_leading_dim(const VectorDims& shape, const std::vector& layout); static size_t get_out_leading_dim(const VectorDims& shape, const std::vector& layout); @@ -387,30 +386,15 @@ class BrgemmEmitter : public jit_emitter { float beta; }; static void initBrgemm(brgemmCtx& ctx, std::unique_ptr& brgKernel, bool use_amx); - static size_t getBrgIdx(size_t kIdx, size_t nIdx); void emit_brgemm_kernel_call(const dnnl::impl::cpu::x64::brgemm_kernel_t* brg_kernel, const brgemmCtx& ctx, Xbyak::Reg64 addr_A, Xbyak::Reg64 addr_B, Xbyak::Reg64 scratch, Xbyak::Reg64 addr_C, size_t in0_kernel_offset = 0, size_t in1_kernel_offset = 0, size_t in2_kernel_offset = 0, size_t out0_kernel_offset = 0) const; static void kernel_execute(const dnnl::impl::cpu::x64::brgemm_kernel_t *brg_kernel, const void *A, const void *B, void *C, void *scratch, int with_comp); - void emit_N_blocking_loops(size_t k_kernel_id, - const Xbyak::Reg64& input_0, const Xbyak::Reg64& input_1, - const Xbyak::Reg64& input_2, const Xbyak::Reg64& output_0, - const Xbyak::Reg64& work_amount_N) const; - - // Note: K dimension is covered by TWO blocked kernels (with beta = 0 and 1) + 1 for tail - static constexpr size_t BRGEMM_K_KERNEL_NUM = 3; - static constexpr size_t BRGEMM_N_KERNEL_NUM = 2; - std::array m_brgCtxs; - std::array, BRGEMM_K_KERNEL_NUM * BRGEMM_N_KERNEL_NUM> m_brgKernels; - - size_t m_M; - size_t m_K, m_K_blk, m_K_tail; - size_t m_N, m_N_blk, m_N_tail; - size_t m_brg0VnniFactor; - bool m_N_blk_loop = false; - bool m_K_blk_loop = false; + + brgemmCtx m_brgCtx; + std::unique_ptr m_brgKernel = nullptr; bool m_with_scratch = false; bool m_with_comp = false; diff --git a/src/plugins/intel_cpu/src/extension.cpp b/src/plugins/intel_cpu/src/extension.cpp index 373c4b90e8cb93..faa3368f39c163 100644 --- a/src/plugins/intel_cpu/src/extension.cpp +++ b/src/plugins/intel_cpu/src/extension.cpp @@ -140,7 +140,6 @@ std::map Extension::getOpSets() { #define NGRAPH_OP(NAME, NAMESPACE) opset.insert(); NGRAPH_OP(Brgemm, ov::snippets::op) - NGRAPH_OP(Buffer, ov::snippets::op) NGRAPH_OP(BroadcastLoad, ov::snippets::op) NGRAPH_OP(BroadcastMove, ov::snippets::op) NGRAPH_OP(ConvertSaturation, ov::snippets::op) @@ -149,10 +148,12 @@ std::map Extension::getOpSets() { NGRAPH_OP(HorizonMax, ov::snippets::op) NGRAPH_OP(HorizonSum, ov::snippets::op) NGRAPH_OP(Kernel, ov::snippets::op) + NGRAPH_OP(IntermediateMemoryBuffer, ov::snippets::op) NGRAPH_OP(Load, ov::snippets::op) NGRAPH_OP(LoadReshape, ov::snippets::op) NGRAPH_OP(LoopBegin, ov::snippets::op) NGRAPH_OP(LoopEnd, ov::snippets::op) + NGRAPH_OP(NewMemoryBuffer, ov::snippets::op) NGRAPH_OP(Nop, ov::snippets::op) NGRAPH_OP(PowerStatic, ov::snippets::op) NGRAPH_OP(Scalar, ov::snippets::op) diff --git a/src/plugins/intel_cpu/src/graph.cpp b/src/plugins/intel_cpu/src/graph.cpp index d19c6b9bbcd649..fd58805a5cf59e 100644 --- a/src/plugins/intel_cpu/src/graph.cpp +++ b/src/plugins/intel_cpu/src/graph.cpp @@ -47,7 +47,6 @@ #include "utils/ngraph_utils.hpp" #include "utils/node_dumper.h" #include "utils/verbose.h" -#include "memory_desc/cpu_memory_desc_utils.h" #include "openvino/runtime/memory_solver.hpp" @@ -898,61 +897,23 @@ void Graph::PushInputData(const std::string& name, const ov::SoPtr& inp if (!IsReady()) OPENVINO_THROW("Wrong state. Topology not ready."); auto input_itr = inputNodesMap.find(name); if (input_itr != inputNodesMap.end()) { - auto create_mem_desc = [&](const ov::SoPtr& tensor) -> CpuBlockedMemoryDesc { - auto element_type = tensor->get_element_type(); - auto shape = tensor->get_shape(); - if (shape.empty()) - shape = {tensor->get_size()}; - std::vector blk_order(shape.size()); - std::iota(blk_order.begin(), blk_order.end(), 0); - std::vector dim_offset(shape.size(), 0); - std::vector blk_strides; - auto byte_strides = element_type.bitwidth() >= 8 ? tensor->get_strides() : Strides{}; - if (byte_strides.empty()) { - blk_strides = ov::row_major_strides(shape); - } else { - // ROI tensor need figure out correct blk_strides - blk_strides.resize(byte_strides.size()); - std::transform(byte_strides.begin(), - byte_strides.end(), - blk_strides.begin(), - [&element_type](size_t byte_stride) { - OPENVINO_ASSERT(byte_stride % element_type.size() == 0, - "Limitation: Stride in bytes ", - byte_stride, - " should be divisible by size of element ", - element_type.size()); - return byte_stride / element_type.size(); - }); - } - InferenceEngine::TensorDesc tensorDesc( - InferenceEngine::details::convertPrecision(tensor->get_element_type()), - shape, - InferenceEngine::BlockingDesc{shape, blk_order, 0, dim_offset, blk_strides}); - return MemoryDescUtils::convertToCpuBlockedMemoryDesc(tensorDesc); - }; - auto node = input_itr->second; auto childEdge = node->getChildEdgeAt(0); - const auto& outDims = node->getOutputShapeAtPort(0); + auto edgeMemory = childEdge->getMemoryPtr(); const void* ext_data_ptr = input->data(); - void* inter_data_ptr = childEdge->getMemory().getData(); + void* inter_data_ptr = edgeMemory->getData(); if (ext_data_ptr != inter_data_ptr) { - auto ext_tensor_desc = create_mem_desc(input); - Memory ext_mem(getEngine(), ext_tensor_desc, ext_data_ptr, false); - childEdge->getMemory().load(ext_mem, false); - } + auto ext_tensor_desc = MemoryDescUtils::generateCpuBlockedMemoryDesc(input); + auto actualDesc = edgeMemory->getDescPtr(); - // todo: make sure 'name' exists in this map... - if (_normalizePreprocMap.find(name) != _normalizePreprocMap.end()) { - if (input->get_element_type() == ov::element::f32) { - _normalizePreprocMap[name].NormalizeImage(outDims, - reinterpret_cast(inter_data_ptr), - TensorDesc::getLayoutByDims(input->get_shape())); + if (!actualDesc->isCompatible(*ext_tensor_desc)) { + Memory ext_mem(getEngine(), ext_tensor_desc, ext_data_ptr, false); + edgeMemory->load(ext_mem, false); } else { - OPENVINO_THROW("Mean image of type ", input->get_element_type().get_type_name(), " is unsupported"); + size_t size_to_copy = ext_tensor_desc->getCurrentMemSize(); + cpu_parallel_memcpy(inter_data_ptr, ext_data_ptr, size_to_copy); } } } else { @@ -977,44 +938,32 @@ void Graph::PullOutputData(std::unordered_map>& OPENVINO_THROW("The CPU plugin graph doesn't contain output node with name: ", name.c_str()); } - InferenceEngine::TensorDesc expectedDesc( - InferenceEngine::details::convertPrecision(ext_blob->get_element_type()), - ext_blob->get_shape(), - InferenceEngine::TensorDesc::getLayoutByRank(ext_blob->get_shape().size())); - DEBUG_LOG(name, ", tensor data addr ", static_cast(output[name]->data())); + auto expected_desc_ptr = MemoryDescUtils::generateCpuBlockedMemoryDesc(ext_blob); + const auto actualDesc = intr_blob.getDescWithType(); - const auto actualDesc = MemoryDescUtils::convertToTensorDesc(intr_blob.getDesc()); + DEBUG_LOG(name, ", tensor data addr ", static_cast(output[name]->data())); // TODO [NM]: need to create universal reorder which will be detect cases when we really need to use it // WA: for cases when output shape after transformation will be 1x1x1x1 but model output is scalar bool isScalarOutput = false; - if (actualDesc.getLayout() == SCALAR) { - isScalarOutput = expectedDesc.getLayout() == SCALAR || - (!expectedDesc.getDims().empty() && - std::accumulate(expectedDesc.getDims().begin(), expectedDesc.getDims().end(), (size_t)1, std::multiplies()) == 1); - } else if (expectedDesc.getLayout() == SCALAR) { - isScalarOutput = actualDesc.getLayout() == SCALAR || - (!actualDesc.getDims().empty() && - std::accumulate(actualDesc.getDims().begin(), actualDesc.getDims().end(), (size_t)1, std::multiplies()) == 1); + if (ext_blob->get_shape().empty() && ext_blob->get_size() == 1) { + const auto& actualDims = expected_desc_ptr->getShape().getStaticDims(); + isScalarOutput = + !actualDims.empty() && + std::accumulate(actualDims.begin(), actualDims.end(), (size_t)1, std::multiplies()) == 1; } auto outDims = intr_blob.getStaticDims(); if (ext_blob->get_shape() != outDims && !isScalarOutput) { // WA: because input/output info initially contains non empty dims, order etc. // and setDims (called inside setShape) can't correct modify blocked desc for desc with blocked layout - if (expectedDesc.getLayout() == InferenceEngine::Layout::BLOCKED) { - expectedDesc = TensorDesc(expectedDesc.getPrecision(), expectedDesc.getLayout()); - } DEBUG_LOG(name, ", tensor data addr ", static_cast(output[name]->data()), " dims ", PartialShape(output[name]->get_shape()), " -> ", PartialShape(outDims), ", intr ptr ", intr_blob.getData(), " , parentedge's memory object ", parentEdge->getMemoryPtr().get()); ext_blob->set_shape(outDims); DEBUG_LOG(name, ", tensor data addr ", static_cast(output[name]->data()), " dims ", PartialShape(output[name]->get_shape()), ", intr ptr ", intr_blob.getData()); - expectedDesc = - InferenceEngine::TensorDesc(InferenceEngine::details::convertPrecision(ext_blob->get_element_type()), - ext_blob->get_shape(), - InferenceEngine::TensorDesc::getLayoutByRank(ext_blob->get_shape().size())); + expected_desc_ptr = MemoryDescUtils::generateCpuBlockedMemoryDesc(ext_blob); } // check for empty output blob @@ -1022,8 +971,8 @@ void Graph::PullOutputData(std::unordered_map>& continue; } - auto srcPrec = actualDesc.getPrecision(); - auto dstPrec = expectedDesc.getPrecision(); + auto srcPrec = actualDesc->getPrecision(); + auto dstPrec = expected_desc_ptr->getPrecision(); if (!getConfig().isLegacyApi && srcPrec == dstPrec && ext_blob->get_byte_size() != intr_blob.getSize()) OPENVINO_THROW("Output blob byte size is not equal network output byte size (", ext_blob->get_byte_size(), @@ -1038,24 +987,13 @@ void Graph::PullOutputData(std::unordered_map>& // That is the same memory. No need to copy if (ext_blob_ptr == intr_blob_ptr) continue; - if (actualDesc.getBlockingDesc() != expectedDesc.getBlockingDesc() && !isScalarOutput) { - // User can initialize output via SetOutput API using tensorDesc with ANY layout. - // For these cases we create planar memory descriptor. - auto outBlobDesc = - expectedDesc.getLayout() == InferenceEngine::Layout::ANY - ? DnnlBlockedMemoryDesc(InferenceEngine::details::convertPrecision(expectedDesc.getPrecision()), - Shape(expectedDesc.getDims())) - : MemoryDescUtils::convertToDnnlBlockedMemoryDesc(expectedDesc); - Memory outBloMem(getEngine(), outBlobDesc, ext_blob_ptr, false); + if (!actualDesc->isCompatible(*expected_desc_ptr) && !isScalarOutput) { + Memory outBloMem(getEngine(), expected_desc_ptr, ext_blob_ptr, false); outBloMem.load(intr_blob, false); } else { - size_t size_to_copy = intr_blob.getDescWithType()->getPaddedElementsCount(); - DEBUG_LOG("pull_output: convert ", srcPrec, " to ", dstPrec); - cpu_convert(intr_blob_ptr, - ext_blob_ptr, - InferenceEngine::details::convertPrecision(srcPrec), - InferenceEngine::details::convertPrecision(dstPrec), - size_to_copy); + OPENVINO_ASSERT(srcPrec == dstPrec, "The precision of the CPU output tensor ", name, " is different from the external one"); + size_t size_to_copy = intr_blob.getSize(); + cpu_parallel_memcpy(ext_blob_ptr, intr_blob_ptr, size_to_copy); } } } diff --git a/src/plugins/intel_cpu/src/graph.h b/src/plugins/intel_cpu/src/graph.h index 890b9de8bcf604..955cbe014fd7a4 100644 --- a/src/plugins/intel_cpu/src/graph.h +++ b/src/plugins/intel_cpu/src/graph.h @@ -11,7 +11,6 @@ #include "edge.h" #include "graph_context.h" #include "node.h" -#include "normalize_preprocess.h" #include "openvino/runtime/make_tensor.hpp" #include "openvino/runtime/profiling_info.hpp" @@ -60,10 +59,6 @@ class Graph { const GraphContext::CPtr ctx, std::string name); - bool hasMeanImageFor(const std::string& name) { - return _normalizePreprocMap.find(name) != _normalizePreprocMap.end(); - } - void PushInputData(const std::string& name, const ov::SoPtr& input); void PullOutputData(std::unordered_map>& output); @@ -212,7 +207,6 @@ class Graph { outputNodesMap.clear(); graphNodes.clear(); graphEdges.clear(); - _normalizePreprocMap.clear(); syncNodesInds.clear(); } Status status { Status::NotReady }; @@ -228,7 +222,6 @@ class Graph { std::vector graphNodes; std::vector graphEdges; - std::map _normalizePreprocMap; std::string _name; bool graphHasDynamicInput = false; diff --git a/src/plugins/intel_cpu/src/infer_request.cpp b/src/plugins/intel_cpu/src/infer_request.cpp index 2ca2913b7ccdc6..b3620b37623cbc 100644 --- a/src/plugins/intel_cpu/src/infer_request.cpp +++ b/src/plugins/intel_cpu/src/infer_request.cpp @@ -332,37 +332,6 @@ void SyncInferRequest::throw_if_canceled() const { } } -static InferenceEngine::TensorDesc create_tensor_desc(const ov::SoPtr& tensor) { - auto element_type = tensor->get_element_type(); - auto shape = tensor->get_shape(); - std::vector blk_order(shape.size()); - std::iota(blk_order.begin(), blk_order.end(), 0); - std::vector dim_offset(shape.size(), 0); - std::vector blk_strides; - auto byte_strides = element_type.bitwidth() >= 8 ? tensor->get_strides() : Strides{}; - if (byte_strides.empty()) { - blk_strides = ov::row_major_strides(shape); - } else { - blk_strides.resize(byte_strides.size()); - std::transform(byte_strides.begin(), - byte_strides.end(), - blk_strides.begin(), - [&element_type](size_t byte_stride) { - OPENVINO_ASSERT(byte_stride % element_type.size() == 0, - "Limitation: Stride in bytes ", - byte_stride, - " should be divisible by size of element ", - element_type.size()); - return byte_stride / element_type.size(); - }); - } - OPENVINO_SUPPRESS_DEPRECATED_START - return InferenceEngine::TensorDesc{InferenceEngine::details::convertPrecision(element_type), - shape, - InferenceEngine::BlockingDesc{shape, blk_order, 0, dim_offset, blk_strides}}; - OPENVINO_SUPPRESS_DEPRECATED_END -} - ov::SoPtr SyncInferRequest::get_tensor(const ov::Output& in_port) const { auto port = get_internal_port(in_port); return ov::ISyncInferRequest::get_tensor(port); @@ -398,7 +367,7 @@ void SyncInferRequest::set_tensor(const ov::Output& in_port, con tensor = ov::make_tensor(in_tensor->get_element_type(), in_port.get_shape(), in_tensor->data()); } auto name = get_port_name(in_port, m_is_legacy_api); - auto tensor_desc = create_tensor_desc(tensor); + auto mem_desc_ptr = MemoryDescUtils::generateCpuBlockedMemoryDesc(tensor); bool is_input = ov::op::util::is_parameter(port.get_node()); if (is_input) { const auto netInPrc = port.get_element_type(); @@ -436,14 +405,11 @@ void SyncInferRequest::set_tensor(const ov::Output& in_port, con // we must define desc for dynamic case // otherwise we got incorrect check on shape compatibility inside isCompatible // because lower and upper bound will be compared - OPENVINO_SUPPRESS_DEPRECATED_START - actualDesc = actualDesc->cloneWithNewDims(tensor_desc.getLayout() == InferenceEngine::Layout::SCALAR - ? InferenceEngine::SizeVector{1} - : tensor_desc.getDims()); - OPENVINO_SUPPRESS_DEPRECATED_END + actualDesc = actualDesc->cloneWithNewDims( + ov::is_scalar(tensor->get_shape()) ? VectorDims{1} : VectorDims{tensor->get_shape()}); } - if (actualDesc->isCompatible(MemoryDescUtils::convertToCpuBlockedMemoryDesc(tensor_desc)) && - m_graph->_normalizePreprocMap.find(name) == m_graph->_normalizePreprocMap.end()) { + + if (actualDesc->isCompatible(*mem_desc_ptr)) { m_external_ptr[name] = tensor; } else if (m_external_ptr.find(name) != m_external_ptr.end()) { m_external_ptr.erase(name); @@ -481,7 +447,7 @@ void SyncInferRequest::set_tensor(const ov::Output& in_port, con } const auto& desc = m_graph->getOutputNodeByName(name)->getParentEdgesAtPort(0)[0]->getMemory().getDesc(); - if (!isDynamic && tensor_desc == MemoryDescUtils::convertToTensorDesc(desc)) { + if (!isDynamic && mem_desc_ptr->isCompatible(desc)) { m_external_ptr[name] = tensor; } else if (m_external_ptr.find(name) != m_external_ptr.end()) { m_external_ptr.erase(name); @@ -538,12 +504,12 @@ void SyncInferRequest::init_tensor(const std::string& name) { tensor = ov::make_tensor(port.get_element_type(), tensor_shape); ov::ISyncInferRequest::set_tensor(port, tensor); - auto desc = create_tensor_desc(tensor); - if (!isDynamic && - desc == MemoryDescUtils::convertToTensorDesc( - m_graph->getInputNodeByName(name)->getChildEdgesAtPort(0)[0]->getMemory().getDesc()) && - m_graph->_normalizePreprocMap.find(name) == m_graph->_normalizePreprocMap.end()) { - m_external_ptr[name] = tensor; + if (!isDynamic) { + auto mem_desc_ptr = MemoryDescUtils::generateCpuBlockedMemoryDesc(tensor); + if (mem_desc_ptr->isCompatible( + m_graph->getInputNodeByName(name)->getChildEdgesAtPort(0)[0]->getMemory().getDesc())) { + m_external_ptr[name] = tensor; + } } } } @@ -626,11 +592,11 @@ void SyncInferRequest::init_tensor(const std::string& name) { } } m_outputs[name] = tensor; - auto desc = create_tensor_desc(tensor); - if (!port_shape.is_dynamic() && !m_external_ptr.count(name) && - desc == MemoryDescUtils::convertToTensorDesc( - output->second->getParentEdgesAtPort(0)[0]->getMemory().getDesc())) { - m_external_ptr[name] = tensor; + if (!port_shape.is_dynamic() && !m_external_ptr.count(name)) { + auto desc = MemoryDescUtils::generateCpuBlockedMemoryDesc(tensor); + if (desc->isCompatible(output->second->getParentEdgesAtPort(0)[0]->getMemory().getDesc())) { + m_external_ptr[name] = tensor; + } } // update tensors in case of multiple output ports with the same name for (const auto& out : get_outputs()) { diff --git a/src/plugins/intel_cpu/src/memory_desc/cpu_memory_desc_utils.cpp b/src/plugins/intel_cpu/src/memory_desc/cpu_memory_desc_utils.cpp index b7a3ac26a18b1f..2f39090ac40837 100644 --- a/src/plugins/intel_cpu/src/memory_desc/cpu_memory_desc_utils.cpp +++ b/src/plugins/intel_cpu/src/memory_desc/cpu_memory_desc_utils.cpp @@ -5,15 +5,18 @@ #include #include "cpu_memory_desc.h" #include "memory_desc/cpu_memory_desc_utils.h" -#include #include "memory_desc/dnnl_blocked_memory_desc.h" -#include "utils/general_utils.h" +#include "openvino/runtime/itensor.hpp" +#include "openvino/runtime/so_ptr.hpp" #include "utils/cpu_utils.hpp" -#include -#include -#include +#include "utils/general_utils.h" + #include +#include #include +#include +#include +#include using namespace dnnl; using namespace InferenceEngine; @@ -46,27 +49,12 @@ DnnlBlockedMemoryDesc MemoryDescUtils::convertToDnnlBlockedMemoryDesc(const Memo } } -CpuBlockedMemoryDesc MemoryDescUtils::convertToCpuBlockedMemoryDesc(const InferenceEngine::TensorDesc& desc) { - if (desc.getLayout() == InferenceEngine::Layout::ANY) - OPENVINO_THROW("Cannot convert InferenceEngine::TensorDesc with ANY layout to CpuBlockedMemoryDesc"); - - const auto& blkDesc = desc.getBlockingDesc(); - const auto& dims = desc.getDims(); - - auto strides = blkDesc.getStrides(); - // for empty tensor case InferenceEngine::TensorDesc fill strides with non zero values before first 0 dims - // i.e. dims[1, 0, 2, 3] -> strides [0, 6, 3, 1] - if (std::any_of(dims.begin(), dims.end(), [](size_t dim){ return dim == 0; })) { - std::fill(strides.begin(), strides.end(), 0); +BlockedMemoryDescPtr MemoryDescUtils::convertToBlockedMemoryDesc(const MemoryDescPtr &desc) { + if (desc->getType() & MemoryDescType::Blocked) { + return std::dynamic_pointer_cast(desc); + } else { + OPENVINO_THROW("Can not convert unsupported memory descriptor"); } - - return CpuBlockedMemoryDesc(InferenceEngine::details::convertPrecision(desc.getPrecision()), - Shape(dims), - blkDesc.getBlockDims(), - blkDesc.getOrder(), - blkDesc.getOffsetPadding(), - blkDesc.getOffsetPaddingToData(), - strides); } CpuBlockedMemoryDescPtr MemoryDescUtils::generateCpuBlockedMemoryDesc(const ov::SoPtr& tensor) { @@ -82,6 +70,8 @@ CpuBlockedMemoryDescPtr MemoryDescUtils::generateCpuBlockedMemoryDesc(const ov:: if (byte_strides.empty()) { blk_strides = ov::row_major_strides(shape); + } else if (tensor->get_size() == 0) { + blk_strides.resize(shape.size()); } else { // ROI tensor need figure out correct blk_strides blk_strides.resize(byte_strides.size()); @@ -108,6 +98,7 @@ CpuBlockedMemoryDescPtr MemoryDescUtils::generateCpuBlockedMemoryDesc(const ov:: blk_strides); } +OPENVINO_SUPPRESS_DEPRECATED_START DnnlBlockedMemoryDesc MemoryDescUtils::convertToDnnlBlockedMemoryDesc(const InferenceEngine::TensorDesc& desc) { if (desc.getLayout() == InferenceEngine::Layout::ANY) OPENVINO_THROW("Cannot convert InferenceEngine::TensorDesc with ANY layout to DnnlBlockedMemoryDesc"); @@ -131,15 +122,7 @@ DnnlBlockedMemoryDesc MemoryDescUtils::convertToDnnlBlockedMemoryDesc(const Infe strides); } -BlockedMemoryDescPtr MemoryDescUtils::convertToBlockedMemoryDesc(const MemoryDescPtr &desc) { - if (desc->getType() & MemoryDescType::Blocked) { - return std::dynamic_pointer_cast(desc); - } else { - OPENVINO_THROW("Can not convert unsupported memory descriptor"); - } -} - -InferenceEngine::Blob::Ptr MemoryDescUtils::interpretAsBlob(const IMemory &mem) { +InferenceEngine::Blob::Ptr MemoryDescUtils::interpretAsBlob(const IMemory& mem) { // TODO [DS]: Rewrite when IE is moved to the new TensorDescriptor auto& memDesc = mem.getDesc(); InferenceEngine::TensorDesc desc = convertToTensorDesc(memDesc); @@ -148,7 +131,7 @@ InferenceEngine::Blob::Ptr MemoryDescUtils::interpretAsBlob(const IMemory &mem) return make_blob_with_precision(desc, mem.getData()); } -InferenceEngine::TensorDesc MemoryDescUtils::interpretAsBlobDesc(const IMemory &mem) { +InferenceEngine::TensorDesc MemoryDescUtils::interpretAsBlobDesc(const IMemory& mem) { auto& memDesc = mem.getDesc(); InferenceEngine::TensorDesc desc = convertToTensorDesc(memDesc); @@ -174,6 +157,7 @@ InferenceEngine::TensorDesc MemoryDescUtils::convertToTensorDesc(const MemoryDes OPENVINO_THROW("Cannot convert MemoryDesc to InferenceEngine::TensorDesc"); } } +OPENVINO_SUPPRESS_DEPRECATED_END std::string MemoryDescUtils::dim2str(Dim dim) { return dim == Shape::UNDEFINED_DIM ? "?" : std::to_string(dim); diff --git a/src/plugins/intel_cpu/src/memory_desc/cpu_memory_desc_utils.h b/src/plugins/intel_cpu/src/memory_desc/cpu_memory_desc_utils.h index 43c1d2a2d410ef..d930612ac0a173 100644 --- a/src/plugins/intel_cpu/src/memory_desc/cpu_memory_desc_utils.h +++ b/src/plugins/intel_cpu/src/memory_desc/cpu_memory_desc_utils.h @@ -7,7 +7,8 @@ #include #include "cpu_types.h" #include "cpu_shape.h" - +#include "openvino/runtime/itensor.hpp" +#include "openvino/runtime/so_ptr.hpp" #include #include #include @@ -42,11 +43,11 @@ class MemoryDescUtils { static DnnlBlockedMemoryDesc convertToDnnlBlockedMemoryDesc(const MemoryDesc& desc); /** - * @brief Converts InferenceEngine::TensorDesc to CpuBlockedMemoryDesc - * @param desc InferenceEngine::TensorDesc to be converted - * @return converted CpuBlockedMemoryDesc + * @brief Converts MemoryDesc to BlockedMemoryDesc + * @param desc MemoryDesc to be converted + * @return converted BlockedMemoryDesc */ - static CpuBlockedMemoryDesc convertToCpuBlockedMemoryDesc(const InferenceEngine::TensorDesc& desc); + static std::shared_ptr convertToBlockedMemoryDesc(const std::shared_ptr &desc); /** * @brief Builds CpuBlockedMemoryDesc for given ov::ITensor @@ -55,6 +56,7 @@ class MemoryDescUtils { */ static std::shared_ptr generateCpuBlockedMemoryDesc(const ov::SoPtr& tensor); + OPENVINO_SUPPRESS_DEPRECATED_START /** * @brief Converts InferenceEngine::TensorDesc to DnnlBlockedMemoryDesc * @param desc InferenceEngine::TensorDesc to be converted @@ -62,13 +64,6 @@ class MemoryDescUtils { */ static DnnlBlockedMemoryDesc convertToDnnlBlockedMemoryDesc(const InferenceEngine::TensorDesc& desc); - /** - * @brief Converts MemoryDesc to BlockedMemoryDesc - * @param desc MemoryDesc to be converted - * @return converted BlockedMemoryDesc - */ - static std::shared_ptr convertToBlockedMemoryDesc(const std::shared_ptr &desc); - /** * @brief Creates InferenceEngine::Blob from Memory with the memory reuse * @param desc Memory from which will be created InferenceEngine::Blob @@ -89,6 +84,7 @@ class MemoryDescUtils { * @return converted InferenceEngine::TensorDesc */ static InferenceEngine::TensorDesc convertToTensorDesc(const MemoryDesc& desc); + OPENVINO_SUPPRESS_DEPRECATED_END static constexpr Dim DEFAULT_DUMMY_VAL = 64; diff --git a/src/plugins/intel_cpu/src/node.cpp b/src/plugins/intel_cpu/src/node.cpp index f6dc2284200961..2c3749f2b08dda 100644 --- a/src/plugins/intel_cpu/src/node.cpp +++ b/src/plugins/intel_cpu/src/node.cpp @@ -23,7 +23,6 @@ #include "nodes/eltwise.h" #include "nodes/matmul.h" #include "nodes/fullyconnected.h" -#include "nodes/generic.h" #include "nodes/if.h" #include "nodes/input.h" #include "nodes/lrn.h" @@ -855,13 +854,11 @@ void Node::prepareMemory(const DnnlMemoryDescPtr& intDesc, size_t indx) { internalBlobs.size()); } - const auto &internalBlob = internalBlobs[indx]; + const auto& internalBlob = internalBlobs[indx]; - auto create = [&] () { - // TODO [DS]: internal blobs should be removed or rewritten using Memory object - auto newDesc = MemoryDescUtils::convertToDnnlBlockedMemoryDesc(internalBlob->getTensorDesc()); - - Memory memory{engine, newDesc, internalBlob->buffer()}; + auto create = [&]() { + auto newDesc = internalBlob->getDescPtr(); + Memory memory{engine, newDesc, internalBlob->getData()}; MemoryPtr _ptr = std::make_shared(engine, intDesc); node::Reorder::reorderData(memory, *_ptr, context->getParamsCache()); @@ -872,12 +869,13 @@ void Node::prepareMemory(const DnnlMemoryDescPtr& intDesc, size_t indx) { auto weightCache = context->getWeightsCache(); if (weightCache != nullptr && memory::format_kind::blocked == intDesc->getDnnlDesc().get_format_kind()) { const auto& format = intDesc->serializeFormat(); - const uint64_t data_hash = weightCache->GetHashFunc().hash( - internalBlob->buffer(), internalBlob->byteSize()); + const uint64_t data_hash = + weightCache->GetHashFunc().hash(static_cast(internalBlob->getData()), + internalBlob->getSize()); const std::string string_hash = name + "_" + std::to_string(indx) + "_" + format - + "_" + std::to_string(internalBlob->byteSize()) + + "_" + std::to_string(internalBlob->getSize()) + "_" + std::to_string(data_hash); ptr = *weightCache->findOrCreate(string_hash, create); @@ -1254,24 +1252,22 @@ bool Node::isFusedWith(Type fusedNodeType) const { return false; } -InferenceEngine::Layout Node::getWeightsLayoutByDims(SizeVector dims, bool isGrouped) { +dnnl::memory::format_tag Node::getWeightsFormatTagByDims(const SizeVector& dims) const { switch (dims.size()) { - case 0: - return InferenceEngine::Layout::SCALAR; - case 1: - return InferenceEngine::Layout::C; - case 2: - return InferenceEngine::Layout::NC; - case 3: - return InferenceEngine::Layout::CHW; - case 4: - return InferenceEngine::Layout::OIHW; - case 5: - return isGrouped ? InferenceEngine::Layout::GOIHW : InferenceEngine::Layout::OIDHW; - case 6: - return isGrouped ? InferenceEngine::Layout::GOIDHW : InferenceEngine::Layout::BLOCKED; - default: - return InferenceEngine::Layout::BLOCKED; + case 1: + return dnnl::memory::format_tag::a; + case 2: + return dnnl::memory::format_tag::ab; + case 3: + return dnnl::memory::format_tag::abc; + case 4: + return dnnl::memory::format_tag::abcd; + case 5: + return dnnl::memory::format_tag::abcde; + case 6: + return dnnl::memory::format_tag::abcdef; + default: + OPENVINO_THROW("getWeightsFormatTagByDims doesn't support dims.size() = ", dims.size()); } } diff --git a/src/plugins/intel_cpu/src/node.h b/src/plugins/intel_cpu/src/node.h index 0e56f0632aa9fb..0d998c39d889a1 100644 --- a/src/plugins/intel_cpu/src/node.h +++ b/src/plugins/intel_cpu/src/node.h @@ -526,7 +526,7 @@ class Node { return outputShapes[port]; } - const std::vector& getInternalBlobs() const { + const std::vector& getInternalBlobs() const { return internalBlobs; } @@ -606,7 +606,7 @@ class Node { }; mutable InPlaceType inplace = InPlaceType::Unknown; ConstantType constant = ConstantType::Unknown; - std::vector internalBlobs; + std::vector internalBlobs; std::vector internalBlobMemory; std::vector supportedPrimitiveDescriptors; std::unordered_map primArgs; @@ -633,7 +633,7 @@ class Node { virtual std::vector getAvailableFormatsForDims(const Shape& dims) const; - InferenceEngine::Layout getWeightsLayoutByDims(InferenceEngine::SizeVector dims, bool isGrouped); + dnnl::memory::format_tag getWeightsFormatTagByDims(const InferenceEngine::SizeVector& dims) const; /** * @brief Auxiliary function to get node input precisions diff --git a/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp b/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp index 9c836c7a16c6aa..f235688f533c57 100644 --- a/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp +++ b/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp @@ -105,10 +105,6 @@ void AdaptivePooling::initSupportedPrimitiveDescriptors() { // we supports only fp32 currently precision = ov::element::f32; - InferenceEngine::LayerConfig config; - config.inConfs.resize(2); - config.outConfs.resize((algorithm == Algorithm::AdaptivePoolingAvg ? 1 : 2)); - std::vector dataFormats{ LayoutType::ncsp }; const auto &inDims = getInputShapeAtPort(0).getDims(); if (inDims[1] != Shape::UNDEFINED_DIM && inDims[1] != 1) { diff --git a/src/plugins/intel_cpu/src/nodes/conv.cpp b/src/plugins/intel_cpu/src/nodes/conv.cpp index d605f5271da6e6..705c825b31b282 100644 --- a/src/plugins/intel_cpu/src/nodes/conv.cpp +++ b/src/plugins/intel_cpu/src/nodes/conv.cpp @@ -1193,35 +1193,6 @@ bool Convolution::isNspcAvailable() const { return true; } -InferenceEngine::Blob::Ptr Convolution::createInternalBlob(InferenceEngine::SizeVector dims, size_t edgeNum, bool isGrouped) { - const auto constNode = std::dynamic_pointer_cast(getParentEdgeAt(edgeNum)->getParent()); - if (!constNode) { - OPENVINO_THROW("Cannot cast ", edgeNum, " input to Input node for ", getName(), "."); - } - auto blb = constNode->getMemoryPtr(); - if (blb == nullptr) - OPENVINO_THROW("Cannot get const blob for node ", getName(), "."); - - auto const elementsCount = blb->getDescWithType()->getPaddedElementsCount(); - - InferenceEngine::TensorDesc desc(InferenceEngine::details::convertPrecision(ov::element::f32), dims, getWeightsLayoutByDims(dims, isGrouped)); - - Blob::Ptr internalBlob = InferenceEngine::make_shared_blob(desc); - internalBlob->allocate(); - - if (internalBlob->size() != elementsCount) { - OPENVINO_THROW("Created internal blob and const blob has different size for node: ", getName(), "."); - } - - cpu_convert(blb->getData(), - internalBlob->buffer(), - DnnlExtensionUtils::DataTypeToElementType(blb->getDataType()), - InferenceEngine::details::convertPrecision(internalBlob->getTensorDesc().getPrecision()), - elementsCount); - - return internalBlob; -} - void Convolution::prepareParams() { auto srcMemPtr = getParentEdgesAtPort(0)[0]->getMemoryPtr(); auto wghMemPtr = getParentEdgesAtPort(1)[0]->getMemoryPtr(); diff --git a/src/plugins/intel_cpu/src/nodes/conv.h b/src/plugins/intel_cpu/src/nodes/conv.h index 2279e2fc80ef67..5d7e89aa7fc009 100644 --- a/src/plugins/intel_cpu/src/nodes/conv.h +++ b/src/plugins/intel_cpu/src/nodes/conv.h @@ -116,7 +116,6 @@ class Convolution : public Node { void SetPostOpsAndZeroPoints(std::vector &attrs); void filterSupportedDescriptors(); bool isNspcAvailable() const; - InferenceEngine::Blob::Ptr createInternalBlob(InferenceEngine::SizeVector dims, size_t edgeNum, bool isGrouped = false); void updatePadding(); MemoryDescPtr getSumMemDesc(const dnnl::primitive_desc &primitive_desc_it); diff --git a/src/plugins/intel_cpu/src/nodes/deconv.cpp b/src/plugins/intel_cpu/src/nodes/deconv.cpp index a3d141ff28be98..cc1731ca596ebf 100644 --- a/src/plugins/intel_cpu/src/nodes/deconv.cpp +++ b/src/plugins/intel_cpu/src/nodes/deconv.cpp @@ -233,7 +233,7 @@ Deconvolution::Deconvolution(const std::shared_ptr& op, attr = std::make_shared(); } -InferenceEngine::Blob::Ptr Deconvolution::createWeiBlobAsIO(InferenceEngine::SizeVector dims) { +MemoryPtr Deconvolution::createWeiBlobAsIO(const VectorDims& dims) { auto constNode = std::dynamic_pointer_cast(getParentEdgeAt(1)->getParent()); if (!constNode) OPENVINO_THROW("Cannot cast const input node for node ", getName(), "."); @@ -244,7 +244,7 @@ InferenceEngine::Blob::Ptr Deconvolution::createWeiBlobAsIO(InferenceEngine::Siz auto const blbSize = blb->getSize(); // WA: In int8 case, we are processing weights using internal blob. - InferenceEngine::SizeVector dimsForBlockedDesc{dims}; + VectorDims dimsForBlockedDesc{dims}; std::swap(dimsForBlockedDesc[withGroups + 0], dimsForBlockedDesc[withGroups + 1]); VectorDims orderForBlockedDesc; @@ -256,18 +256,15 @@ InferenceEngine::Blob::Ptr Deconvolution::createWeiBlobAsIO(InferenceEngine::Siz for (size_t i = 2 + withGroups; i < dimsForBlockedDesc.size(); i++) orderForBlockedDesc.push_back(i); - BlockingDesc blkDesc(dimsForBlockedDesc, orderForBlockedDesc); - InferenceEngine::TensorDesc tensorDesc( - InferenceEngine::details::convertPrecision(DnnlExtensionUtils::DataTypeToElementType(blb->getDataType())), - dims, - blkDesc); - - Blob::Ptr internalBlob = InferenceEngine::make_shared_blob(tensorDesc); - internalBlob->allocate(); - char *data = internalBlob->buffer(); - if (data == nullptr) - OPENVINO_THROW("NotAllocated: Internal blob was not allocated for node ", getName(), "."); - size_t intBuffSize = internalBlob->byteSize(); + auto desc = CpuBlockedMemoryDesc(DnnlExtensionUtils::DataTypeToElementType(blb->getDataType()), + Shape(dims), + dimsForBlockedDesc, + orderForBlockedDesc); + MemoryPtr mem_ptr = std::make_shared(getEngine(), desc); + if (!mem_ptr->isAllocated()) + OPENVINO_THROW("NotAllocated: Internal tensor was not allocated for node ", getName(), "."); + char* data = static_cast(mem_ptr->getData()); + size_t intBuffSize = mem_ptr->getSize(); size_t offset = blbSize; if (intBuffSize < offset) { @@ -275,7 +272,7 @@ InferenceEngine::Blob::Ptr Deconvolution::createWeiBlobAsIO(InferenceEngine::Siz } cpu_memcpy_s(data, intBuffSize, blb->getData(), blbSize); - return internalBlob; + return mem_ptr; } bool Deconvolution::canBeExecutedInInt8() const { @@ -846,8 +843,7 @@ void Deconvolution::createPrimitive() { if (found) { prepareMemory({DnnlExtensionUtils::makeDescriptor(prim_desc.weights_desc(0))}); } else { - prepareMemory({std::make_shared( - MemoryDescUtils::convertToDnnlBlockedMemoryDesc(internalBlobs.front()->getTensorDesc()))}); + prepareMemory({internalBlobs.front()->getDescWithType()}); } } diff --git a/src/plugins/intel_cpu/src/nodes/deconv.h b/src/plugins/intel_cpu/src/nodes/deconv.h index 5477feadc4ffc6..8043705d7e4152 100644 --- a/src/plugins/intel_cpu/src/nodes/deconv.h +++ b/src/plugins/intel_cpu/src/nodes/deconv.h @@ -118,7 +118,7 @@ class Deconvolution : public Node { std::string errorPrefix; - InferenceEngine::Blob::Ptr createWeiBlobAsIO(InferenceEngine::SizeVector dims); + MemoryPtr createWeiBlobAsIO(const VectorDims& dims); }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/generic.cpp b/src/plugins/intel_cpu/src/nodes/generic.cpp index 36b8ba8fc609d5..689d6a1f60a4f8 100644 --- a/src/plugins/intel_cpu/src/nodes/generic.cpp +++ b/src/plugins/intel_cpu/src/nodes/generic.cpp @@ -195,6 +195,7 @@ void Generic::initDescriptor(const NodeConfig &config) { } for (auto &outConf : rightConfig.outConfs) { if (outConf.inPlace() < static_cast(getParentEdges().size()) && + outConf.inPlace() >= 0 && getParentEdgeAt(static_cast(outConf.inPlace()))->getParent()->getChildEdges().size() > 1) { outConf.inPlace(-1); } diff --git a/src/plugins/intel_cpu/src/nodes/generic.h b/src/plugins/intel_cpu/src/nodes/generic.h index dd78515b2922b3..43408d826cd8b1 100644 --- a/src/plugins/intel_cpu/src/nodes/generic.h +++ b/src/plugins/intel_cpu/src/nodes/generic.h @@ -18,7 +18,7 @@ namespace node { class Generic : public Node { public: - Generic(const std::shared_ptr& op, const GraphContext::CPtr context); + Generic(const std::shared_ptr& op, const GraphContext::CPtr context); ~Generic() = default; void getSupportedDescriptors() override; diff --git a/src/plugins/intel_cpu/src/nodes/multinomial.cpp b/src/plugins/intel_cpu/src/nodes/multinomial.cpp index 54f481d4f0e7e8..951044295be979 100644 --- a/src/plugins/intel_cpu/src/nodes/multinomial.cpp +++ b/src/plugins/intel_cpu/src/nodes/multinomial.cpp @@ -68,42 +68,6 @@ void Multinomial::initSupportedPrimitiveDescriptors() { ref_any); } -std::string Multinomial::getPrimitiveDescriptorType() const { - std::string str_type; - auto selectedPrimitiveDesc = getSelectedPrimitiveDescriptor(); - - impl_desc_type type = impl_desc_type::undef; - if (selectedPrimitiveDesc) { - type = selectedPrimitiveDesc->getImplementationType(); - } - - if (type == impl_desc_type::unknown) - str_type += "unknown_"; - if ((type & impl_desc_type::jit) == impl_desc_type::jit) - str_type += "jit_"; - if ((type & impl_desc_type::ref) == impl_desc_type::ref) - str_type += "ref_"; - if ((type & impl_desc_type::avx512) == impl_desc_type::avx512) - str_type += "avx512_"; - if ((type & impl_desc_type::avx2) == impl_desc_type::avx2) - str_type += "avx2_"; - if ((type & impl_desc_type::sse42) == impl_desc_type::sse42) - str_type += "sse42_"; - if ((type & impl_desc_type::any) == impl_desc_type::any) - str_type += "any_"; - - if (str_type.empty()) - str_type += "undef_"; - - if (selectedPrimitiveDesc) { - str_type += m_output_precision.get_type_name(); - } else { - str_type.pop_back(); - } - - return str_type; -} - bool Multinomial::needShapeInfer() const { return !(m_const_inputs[NUM_SAMPLES_PORT] && m_const_batch); } diff --git a/src/plugins/intel_cpu/src/nodes/multinomial.hpp b/src/plugins/intel_cpu/src/nodes/multinomial.hpp index f492814633c7e0..43493828bf4636 100644 --- a/src/plugins/intel_cpu/src/nodes/multinomial.hpp +++ b/src/plugins/intel_cpu/src/nodes/multinomial.hpp @@ -21,7 +21,6 @@ class Multinomial : public Node { void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - std::string getPrimitiveDescriptorType() const override; bool created() const override; diff --git a/src/plugins/intel_cpu/src/nodes/mvn.cpp b/src/plugins/intel_cpu/src/nodes/mvn.cpp index 818f386259e2cb..8db1c7b1ac941e 100644 --- a/src/plugins/intel_cpu/src/nodes/mvn.cpp +++ b/src/plugins/intel_cpu/src/nodes/mvn.cpp @@ -605,6 +605,7 @@ struct jit_uni_mvn_mean_variance_kernel_f32 : public jit_uni_mvn_mean_variance_k // cover vector and tails on avx512, avx2 // cover on sse, 2 part vector, first part vector and second part tails, first part tails + Xbyak::Label label_exit; for (int i = 0; i < repeats; i++) { if (i > 0) { reset_with_offset(4); @@ -621,12 +622,6 @@ struct jit_uni_mvn_mean_variance_kernel_f32 : public jit_uni_mvn_mean_variance_k L(label_tails); { - if (i > 0) { - // empty second half on sse - cmp(reg_rt_shape, 0); - jbe(label_end); - } - Xbyak::Label label_sse_full_size; if (isa == cpu::x64::sse41) { // on sse, first 4 could be done with vector manner @@ -635,17 +630,22 @@ struct jit_uni_mvn_mean_variance_kernel_f32 : public jit_uni_mvn_mean_variance_k } worker_tails_unroll(); - jmp(label_end, T_NEAR); + // exit directly if tails is done, for all platforms + jmp(label_exit, T_NEAR); L(label_sse_full_size); { worker_vector_unroll(); save_result(); sub(reg_rt_shape, 4); + // exit directly if empty second half + cmp(reg_rt_shape, 0); + jbe(label_exit, T_NEAR); } } L(label_end); } + L(label_exit); } inline void worker_vector_unroll() { @@ -1098,6 +1098,7 @@ struct jit_uni_mvn_kernel_f32 : public jit_uni_mvn_kernel, public jit_generator // cover vector and tails on avx512, avx2 // cover on sse, 2 part vector, first part vector and second part tails, first part tails + Xbyak::Label label_exit; int repeats = (isa == cpu::x64::sse41) ? 2 : 1; for (int i = 0; i < repeats; i++) { if (i > 0) { @@ -1114,12 +1115,6 @@ struct jit_uni_mvn_kernel_f32 : public jit_uni_mvn_kernel, public jit_generator L(label_tails); { - if (i > 0) { - // empty second half on sse - cmp(reg_rt_shape, 0); - jbe(label_end); - } - Xbyak::Label label_sse_full_size; if (isa == cpu::x64::sse41) { // on sse, first 4 could be done with vector manner @@ -1128,16 +1123,21 @@ struct jit_uni_mvn_kernel_f32 : public jit_uni_mvn_kernel, public jit_generator } worker_tails_unroll(); - jmp(label_end, T_NEAR); + // exit directly if tails is done, for all platforms + jmp(label_exit, T_NEAR); L(label_sse_full_size); { worker_mvn_vector_unroll(reg_work_amount); sub(reg_rt_shape, 4); + // exit directly if empty second half + cmp(reg_rt_shape, 0); + jbe(label_exit, T_NEAR); } } L(label_end); } + L(label_exit); } // nspc norm per channel with unroll diff --git a/src/plugins/intel_cpu/src/nodes/rnn.cpp b/src/plugins/intel_cpu/src/nodes/rnn.cpp index 44fba765ceffc7..d7e4d204e64354 100644 --- a/src/plugins/intel_cpu/src/nodes/rnn.cpp +++ b/src/plugins/intel_cpu/src/nodes/rnn.cpp @@ -746,24 +746,23 @@ void RNN::fillSequenceDesc() { template void RNN::fillWeights(const int *gate_map, const size_t wIdx, const size_t rIdx) { - const auto& weightPrec = DnnlExtensionUtils::DataTypeToElementType(inDataTypes[wIdx]); - const auto& targetWeightPrec = DnnlExtensionUtils::DataTypeToElementType(weightsByinputDataType.at(inDataTypes[xIdx])); + const auto& weightPrec = DnnlExtensionUtils::DataTypeToElementType(inDataTypes[wIdx]); + const auto& targetWeightDataType = weightsByinputDataType.at(inDataTypes[xIdx]); + const auto& targetWeightPrec = DnnlExtensionUtils::DataTypeToElementType(targetWeightDataType); // create weight blobs (data and state part) - const VectorDims dims_w = { L, D, DC, G, SC }; - TensorDesc w_data_desc(InferenceEngine::details::convertPrecision(targetWeightPrec), dims_w, getWeightsLayoutByDims(dims_w, false)); + const VectorDims dims_w = {L, D, DC, G, SC}; + auto w_data_desc = DnnlBlockedMemoryDesc(Shape(dims_w), targetWeightDataType, getWeightsFormatTagByDims(dims_w)); + MemoryPtr w_data_mem = std::make_shared(getEngine(), w_data_desc); + auto w_ptr = static_cast(w_data_mem->getData()); - Blob::Ptr w_data_mem = make_shared_blob(w_data_desc); - w_data_mem->allocate(); - auto w_ptr = static_cast(w_data_mem->buffer()); if (w_ptr == nullptr) OPENVINO_THROW("NotAllocated: Internal blob was not allocated for node ", getName(), "."); - const VectorDims dims_s = { L, D, SC, G, SC }; - TensorDesc w_state_desc(InferenceEngine::details::convertPrecision(targetWeightPrec), dims_s, getWeightsLayoutByDims(dims_s, false)); - Blob::Ptr w_state_mem = make_shared_blob(w_state_desc); - w_state_mem->allocate(); - auto r_ptr = static_cast(w_state_mem->buffer()); + const VectorDims dims_s = {L, D, SC, G, SC}; + auto w_state_desc = DnnlBlockedMemoryDesc(Shape(dims_s), targetWeightDataType, getWeightsFormatTagByDims(dims_s)); + MemoryPtr w_state_mem = std::make_shared(getEngine(), w_state_desc); + auto r_ptr = static_cast(w_state_mem->getData()); if (r_ptr == nullptr) OPENVINO_THROW("NotAllocated: Internal blob was not allocated for node ", getName(), "."); @@ -803,7 +802,6 @@ void RNN::fillWeights(const int *gate_map, const size_t wIdx, const size_t rIdx) } } } - internalBlobs.push_back(w_data_mem); internalBlobs.push_back(w_state_mem); } @@ -817,10 +815,11 @@ void RNN::fillBiases(const int *gate_map) { } VectorDims dims_b = { L, D, Gb, SC }; - TensorDesc w_bias_data_desc(InferenceEngine::details::convertPrecision(Prec), dims_b, getWeightsLayoutByDims(dims_b, false)); - Blob::Ptr w_bias_data_mem = make_shared_blob(w_bias_data_desc); - w_bias_data_mem->allocate(); - auto b_ptr = static_cast(w_bias_data_mem->buffer()); + + auto _data_type = DnnlExtensionUtils::ElementTypeToDataType(Prec); + auto w_bias_data_desc = DnnlBlockedMemoryDesc(Shape(dims_b), _data_type, getWeightsFormatTagByDims(dims_b)); + MemoryPtr w_bias_data_mem = std::make_shared(getEngine(), w_bias_data_desc); + auto b_ptr = static_cast(w_bias_data_mem->getData()); if (b_ptr == nullptr) OPENVINO_THROW("NotAllocated: Internal blob was not allocated for node ", getName(), "."); diff --git a/src/plugins/intel_cpu/src/normalize_preprocess.cpp b/src/plugins/intel_cpu/src/normalize_preprocess.cpp deleted file mode 100644 index 4d1bf74484f693..00000000000000 --- a/src/plugins/intel_cpu/src/normalize_preprocess.cpp +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "normalize_preprocess.h" -#include "ie_parallel.hpp" -#include "nodes/common/cpu_memcpy.h" -#include "utils/general_utils.h" -#include "ie_ngraph_utils.hpp" - -using namespace InferenceEngine; - -namespace ov { -namespace intel_cpu { - -NormalizePreprocess::NormalizePreprocess() : meanBuffer(nullptr) { -} - -void NormalizePreprocess::Load(const Shape& inputShape, InputInfo::Ptr inputInfo) { - PreProcessInfo &pp = inputInfo->getPreProcess(); - size_t inChannels = pp.getNumberOfChannels(); - if (inChannels == 0) { - meanBuffer = nullptr; - return; - } - - if (!dimsEqualStrong(inChannels, inputShape.getDims()[1])) { - OPENVINO_THROW("channels mismatch between mean and input"); - } - - switch (pp.getMeanVariant()) { - case MEAN_VALUE: { - // mean and standard deviation image common value per channel (1x1xC) - meanValues.resize(inChannels); - stdScales.resize(inChannels); - - for (unsigned channel = 0; channel < inChannels; channel++) { - if (pp[channel]->stdScale == 0) { - OPENVINO_THROW("Preprocessing error: stdScale cannot be equal zero"); - } - meanValues[channel] = pp[channel]->meanValue; - stdScales[channel] = pp[channel]->stdScale; - } - } - break; - case MEAN_IMAGE: { - // since oneDNN expects all channels in the same buffer - we copy it here as it comes from different channels... - auto meanWidth = pp[0]->meanData->getTensorDesc().getDims()[pp[0]->meanData->getTensorDesc().getDims().size() - 1]; - auto meanHeight = pp[0]->meanData->getTensorDesc().getDims()[pp[0]->meanData->getTensorDesc().getDims().size() - 2]; - - TensorDesc desc(InferenceEngine::details::convertPrecision(ov::element::f32), {inChannels, meanHeight, meanWidth}, InferenceEngine::Layout::CHW); - - meanBuffer = make_shared_blob(desc); - - meanBuffer->allocate(); - - for (unsigned channel = 0; channel < inChannels; channel++) { - Blob::Ptr meanBlob = pp[channel]->meanData; - if (!meanBlob || InferenceEngine::details::convertPrecision(meanBlob->getTensorDesc().getPrecision()) != ov::element::f32) - OPENVINO_THROW("mean image not provided or not in Float 32"); - if (meanBlob->size() != meanHeight*meanWidth) { - OPENVINO_THROW("mean image size does not match expected network input, expecting ", - meanWidth, - " x ", - meanHeight); - } - // todo: cast to TBlob and make sure it is floats - cpu_memcpy_s(meanBuffer->data() + channel*meanBlob->size(), meanBuffer->byteSize() - channel*meanBlob->byteSize(), - meanBlob->buffer(), meanBlob->byteSize()); - } - } - break; - - case NONE: { - // there is no mean image. So disable mean image step - meanBuffer = nullptr; - } - break; - - default: { - OPENVINO_THROW("Unsupported mean variant: ", pp.getMeanVariant()); - } - } -} - -void NormalizePreprocess::NormalizeImage(const Shape &inputShape, float *input, InferenceEngine::Layout layout) { - OPENVINO_ASSERT(input != nullptr); - - const auto inputDims = inputShape.getStaticDims(); - if (inputDims.size() != 4) { - OPENVINO_THROW("Expecting input as 4 dimension blob with format NxCxHxW."); - } - - if (layout != NCHW && layout != NHWC) { - OPENVINO_THROW("Expecting input layout NCHW or NHWC."); - } - - int MB = inputDims[0]; - int srcSize = inputShape.getElementsCount() / MB; - - if (meanBuffer && meanBuffer->size()) { - const float * meanBufferValues = meanBuffer->readOnly(); - - parallel_for2d(MB, srcSize, [&](int mb, int i) { - input[srcSize * mb + i] -= meanBufferValues[i]; - }); - } else if (!meanValues.empty() && !stdScales.empty()) { - int C = inputDims[1]; - srcSize /= inputDims[1]; - - if (layout == NCHW) { - parallel_for3d(MB, C, srcSize, [&](int mb, int c, int i) { - input[mb * C * srcSize + c * srcSize + i] -= meanValues[c]; - input[mb * C * srcSize + c * srcSize + i] /= stdScales[c]; - }); - } else if (layout == NHWC) { - parallel_for2d(MB, srcSize, [&](int mb, int i) { - for (int c = 0; c < C; c++) { - input[mb * srcSize * C + i * C + c] -= meanValues[c]; - input[mb * srcSize * C + i * C + c] /= stdScales[c]; - } - }); - } - } else { - OPENVINO_THROW("Preprocessing error: meanValues and stdScales arrays are inconsistent."); - } -} - -} // namespace intel_cpu -} // namespace ov diff --git a/src/plugins/intel_cpu/src/normalize_preprocess.h b/src/plugins/intel_cpu/src/normalize_preprocess.h deleted file mode 100644 index a6d03cfcad5297..00000000000000 --- a/src/plugins/intel_cpu/src/normalize_preprocess.h +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "ie_input_info.hpp" - -#include "cpu_shape.h" -#include "ie_parallel.hpp" -#include -#include - -namespace ov { -namespace intel_cpu { - -class NormalizePreprocess { -public: - NormalizePreprocess(); - -public: - void Load(const Shape& inputShape, InferenceEngine::InputInfo::Ptr inputInfo); - void NormalizeImage(const Shape &inputShape, float *input, InferenceEngine::Layout layout); - - template::value>::type* = nullptr> - void NormalizeImage(const Shape &inputShape, T *input, InferenceEngine::Layout layout) { - OPENVINO_ASSERT(input != nullptr); - - const auto inputDims = inputShape.getStaticDims(); - if (inputDims.size() != 4) { - OPENVINO_THROW("Expecting input as 4 dimension blob with format NxCxHxW."); - } - - if (layout != InferenceEngine::NCHW && layout != InferenceEngine::NHWC) { - OPENVINO_THROW("Expecting input layout NCHW or NHWC."); - } - - int MB = inputDims[0]; - int srcSize = inputShape.getElementsCount() / MB; - - if (meanBuffer && meanBuffer->size()) { - const float * meanBufferValues = meanBuffer->readOnly(); - - InferenceEngine::parallel_for2d(MB, srcSize, [&](int mb, int i) { - int buf = input[srcSize * mb + i]; - buf -= meanBufferValues[i]; - if (buf < (std::numeric_limits::min)()) buf = (std::numeric_limits::min)(); - if (buf > (std::numeric_limits::max)()) buf = (std::numeric_limits::max)(); - input[srcSize * mb + i] = buf; - }); - } else if (!meanValues.empty() && !stdScales.empty()) { - int C = inputDims[1]; - srcSize /= inputDims[1]; - - for (int c = 0; c < C; c++) { - if (stdScales[c] != 1) - OPENVINO_THROW("Preprocessing error: fractional normalization is not supported for integer data. "); - } - - if (layout == InferenceEngine::NCHW) { - InferenceEngine::parallel_for3d(MB, C, srcSize, [&](int mb, int c, int i) { - int buf = input[srcSize * mb * C + c * srcSize + i]; - buf -= meanValues[c]; - if (buf < (std::numeric_limits::min)()) buf = (std::numeric_limits::min)(); - if (buf > (std::numeric_limits::max)()) buf = (std::numeric_limits::max)(); - input[srcSize * mb * C + c * srcSize + i] = buf; - }); - } else if (layout == InferenceEngine::NHWC) { - InferenceEngine::parallel_for2d(MB, srcSize, [&](int mb, int i) { - for (int c = 0; c < C; c++) { - int buf = input[mb * srcSize * C + i * C + c]; - buf -= meanValues[c]; - if (buf < (std::numeric_limits::min)()) buf = (std::numeric_limits::min)(); - if (buf > (std::numeric_limits::max)()) buf = (std::numeric_limits::max)(); - input[mb * srcSize * C + i * C + c] = buf; - } - }); - } - } else { - OPENVINO_THROW("Preprocessing error: meanValues and stdScales arrays are inconsistent."); - } - } - -private: - std::vector meanValues; - - std::vector stdScales; - - InferenceEngine::TBlob::Ptr meanBuffer; -}; - -} // namespace intel_cpu -} // namespace ov diff --git a/src/plugins/intel_cpu/src/plugin.cpp b/src/plugins/intel_cpu/src/plugin.cpp index 03bd79e28c85dd..394db23ef5894d 100644 --- a/src/plugins/intel_cpu/src/plugin.cpp +++ b/src/plugins/intel_cpu/src/plugin.cpp @@ -59,9 +59,10 @@ static std::string getDeviceFullName() { #else __cpuid(regs[0], regs[0], regs[1], regs[2], regs[3]); #endif - char *ch = reinterpret_cast(®s[0]); + char* ch = reinterpret_cast(®s[0]); for (size_t j = 0; j < sizeof(regs); j++) - brand_string += ch[j]; + if (ch[j] != '\0') + brand_string += ch[j]; } #else # error "Unkown CPU architecture. Please, add support to openvino/core/visibility.hpp" @@ -136,7 +137,7 @@ std::mutex Engine::SchedulerGuard::mutex; std::weak_ptr Engine::SchedulerGuard::ptr; Engine::SchedulerGuard::SchedulerGuard() { -#if IE_THREAD == IE_THREAD_SEQ +#if OV_THREAD == OV_THREAD_SEQ // To save state for ACL cores in single-thread mode arm_compute::Scheduler::set(arm_compute::Scheduler::Type::ST); #else diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.cpp index 643b5d74fc963b..193d2ce808f002 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.cpp @@ -57,32 +57,31 @@ void BrgemmCopyB::custom_constructor_validate_and_infer_types(std::vectorget_shape()); - const auto& element_type = get_input_element_type(0); const auto& planar_pshape = snippets::utils::get_planar_pshape(shape, port->get_layout()); set_output_type(0, element_type, planar_pshape); if (is_with_compensations()) { set_output_type(1, ov::element::f32, planar_pshape); } - validate(planar_pshape, element_type); } -void BrgemmCopyB::validate(const ov::PartialShape& planar_pshape, const ov::element::Type& element_type) { +void BrgemmCopyB::validate_element_type(const ov::element::Type& element_type) { OPENVINO_ASSERT(one_of(element_type, element::bf16, element::i8), "BrgemmCopyB doesn't support element type" + element_type.get_type_name()); } diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp index 9274ad026e5f01..f803e5d55fcb8d 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp @@ -67,7 +67,7 @@ class BrgemmCopyB : public snippets::op::MemoryAccess { private: void custom_constructor_validate_and_infer_types(std::vector layout_input = {}); - void validate(const ov::PartialShape& planar_pshape, const ov::element::Type& element_type); + void validate_element_type(const ov::element::Type& element_type); void compute_block_size_values(const size_t blk_size_k, const size_t blk_size_n); Type m_type = Type::OnlyRepacking; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.cpp index 20f7fccafe311a..3dd23a04acdd55 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.cpp @@ -16,8 +16,8 @@ namespace intel_cpu { BrgemmCPU::BrgemmCPU(const Output& A, const Output& B, const Type type, const size_t offset_a, const size_t offset_b, const size_t offset_c, std::vector layout_a, std::vector layout_b, std::vector layout_c, - const size_t blk_size_m, const size_t blk_size_k, const size_t blk_size_n) - : Brgemm(), m_type(type) { + const size_t blk_size_m, const size_t blk_size_k, const size_t blk_size_n, const float beta) + : Brgemm(), m_type(type), m_beta(beta) { // We call default ctor of Brgemm class to avoid incorrect shape infer in constructor_validate_and_type_infer() call set_arguments({A, B}); set_output_size(1); @@ -32,8 +32,8 @@ BrgemmCPU::BrgemmCPU(const Output& A, const Output& B, const Type ty BrgemmCPU::BrgemmCPU(const Output& A, const Output& B, const Output& scratch, const Type type, const size_t offset_a, const size_t offset_b, const size_t offset_scratch, const size_t offset_c, std::vector layout_a, std::vector layout_b, std::vector layout_c, - const size_t blk_size_m, const size_t blk_size_k, const size_t blk_size_n) - : Brgemm(), m_type(type) { + const size_t blk_size_m, const size_t blk_size_k, const size_t blk_size_n, const float beta) + : Brgemm(), m_type(type), m_beta(beta) { set_arguments({A, B, scratch}); set_output_size(1); ctor_initialize(std::set{0, 1, 2}, std::set{0}); @@ -48,8 +48,8 @@ BrgemmCPU::BrgemmCPU(const Output& A, const Output& B, const Output< BrgemmCPU::BrgemmCPU(const Output& A, const Output& B, const Type type, const PortDescriptor& desc_a, const PortDescriptor& desc_b, const PortDescriptor& desc_c, std::vector layout_a, std::vector layout_b, std::vector layout_c, - const size_t blk_size_m, const size_t blk_size_k, const size_t blk_size_n) - : Brgemm(), m_type(type) { + const size_t blk_size_m, const size_t blk_size_k, const size_t blk_size_n, const float beta) + : Brgemm(), m_type(type), m_beta(beta) { set_arguments({A, B}); set_output_size(1); m_input_ports = {{0, desc_a}, {1, desc_b}}; @@ -61,8 +61,8 @@ BrgemmCPU::BrgemmCPU(const Output& A, const Output& B, const Type ty BrgemmCPU::BrgemmCPU(const Output& A, const Output& B, const Output& scratch, const Type type, const PortDescriptor& desc_a, const PortDescriptor& desc_b, const PortDescriptor& desc_scratch, const PortDescriptor& desc_c, std::vector layout_a, std::vector layout_b, std::vector layout_c, - const size_t blk_size_m, const size_t blk_size_k, const size_t blk_size_n) - : Brgemm(), m_type(type) { + const size_t blk_size_m, const size_t blk_size_k, const size_t blk_size_n, const float beta) + : Brgemm(), m_type(type), m_beta(beta) { set_arguments({A, B, scratch}); set_output_size(1); m_input_ports = {{0, desc_a}, {1, desc_b}, {2, desc_scratch}}; @@ -134,20 +134,22 @@ void BrgemmCPU::validate_inputs() const { std::shared_ptr BrgemmCPU::clone_with_new_inputs(const OutputVector& new_args) const { INTERNAL_OP_SCOPE(BrgemmCPU_clone_with_new_inputs); check_new_args_count(this, new_args); + std::shared_ptr brgemm; if (!is_with_scratchpad()) { return std::make_shared(new_args.at(0), new_args.at(1), m_type, get_input_port_descriptor(0), get_input_port_descriptor(1), get_output_port_descriptor(0), snippets::lowered::PortDescriptorUtils::get_port_descriptor_ptr(input(0))->get_layout(), snippets::lowered::PortDescriptorUtils::get_port_descriptor_ptr(input(1))->get_layout(), snippets::lowered::PortDescriptorUtils::get_port_descriptor_ptr(output(0))->get_layout(), - m_M_blk, m_K_blk, m_N_blk); + m_M_blk, m_K_blk, m_N_blk, m_beta); + } else { + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_type, + get_input_port_descriptor(0), get_input_port_descriptor(1), get_input_port_descriptor(2), get_output_port_descriptor(0), + snippets::lowered::PortDescriptorUtils::get_port_descriptor_ptr(input(0))->get_layout(), + snippets::lowered::PortDescriptorUtils::get_port_descriptor_ptr(input(1))->get_layout(), + snippets::lowered::PortDescriptorUtils::get_port_descriptor_ptr(output(0))->get_layout(), + m_M_blk, m_K_blk, m_N_blk, m_beta); } - return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_type, - get_input_port_descriptor(0), get_input_port_descriptor(1), get_input_port_descriptor(2), get_output_port_descriptor(0), - snippets::lowered::PortDescriptorUtils::get_port_descriptor_ptr(input(0))->get_layout(), - snippets::lowered::PortDescriptorUtils::get_port_descriptor_ptr(input(1))->get_layout(), - snippets::lowered::PortDescriptorUtils::get_port_descriptor_ptr(output(0))->get_layout(), - m_M_blk, m_K_blk, m_N_blk); } std::shared_ptr BrgemmCPU::get_brgemm_copy() const { @@ -156,7 +158,7 @@ std::shared_ptr BrgemmCPU::get_brgemm_copy() const { if (const auto brgemm_copy_b = ov::as_type_ptr(b_input_node)) { return brgemm_copy_b; } - if (ov::is_type(b_input_node)) { + if (ov::is_type(b_input_node)) { if (const auto brgemm_copy_b = ov::as_type_ptr(b_input_node->get_input_node_shared_ptr(0))) { return brgemm_copy_b; } @@ -169,5 +171,14 @@ size_t BrgemmCPU::get_offset_scratch() const { return get_input_offset(2); } +bool BrgemmCPU::visit_attributes(AttributeVisitor& visitor) { + Brgemm::visit_attributes(visitor); + visitor.on_attribute("blk_M", m_M_blk); + visitor.on_attribute("blk_K", m_K_blk); + visitor.on_attribute("blk_N", m_N_blk); + visitor.on_attribute("beta", m_beta); + return true; +} + } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.hpp index bf07b7a8546eac..1ea2418f995463 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.hpp @@ -32,19 +32,19 @@ class BrgemmCPU : public snippets::op::Brgemm { BrgemmCPU(const Output& A, const Output& B, const Type type, const size_t offset_a = 0, const size_t offset_b = 0, const size_t offset_c = 0, std::vector layout_a = {}, std::vector layout_b = {}, std::vector layout_c = {}, - const size_t blk_size_m = 0, const size_t blk_size_k = 0, const size_t blk_size_n = 0); + const size_t blk_size_m = 0, const size_t blk_size_k = 0, const size_t blk_size_n = 0, const float beta = 0.f); BrgemmCPU(const Output& A, const Output& B, const Output& scratch, const Type type, const size_t offset_a = 0, const size_t offset_b = 0, const size_t offset_scratch = 0, const size_t offset_c = 0, std::vector layout_a = {}, std::vector layout_b = {}, std::vector layout_c = {}, - const size_t blk_size_m = 0, const size_t blk_size_k = 0, const size_t blk_size_n = 0); + const size_t blk_size_m = 0, const size_t blk_size_k = 0, const size_t blk_size_n = 0, const float beta = 0.f); BrgemmCPU(const Output& A, const Output& B, const Type type, const PortDescriptor& desc_a, const PortDescriptor& desc_b, const PortDescriptor& desc_c, std::vector layout_a = {}, std::vector layout_b = {}, std::vector layout_c = {}, - const size_t blk_size_m = 0, const size_t blk_size_k = 0, const size_t blk_size_n = 0); + const size_t blk_size_m = 0, const size_t blk_size_k = 0, const size_t blk_size_n = 0, const float beta = 0.f); BrgemmCPU(const Output& A, const Output& B, const Output& scratch, const Type type, const PortDescriptor& desc_a, const PortDescriptor& desc_b, const PortDescriptor& desc_scratch, const PortDescriptor& desc_c, std::vector layout_a = {}, std::vector layout_b = {}, std::vector layout_c = {}, - const size_t blk_size_m = 0, const size_t blk_size_k = 0, const size_t blk_size_n = 0); + const size_t blk_size_m = 0, const size_t blk_size_k = 0, const size_t blk_size_n = 0, const float beta = 0.f); BrgemmCPU() = default; void validate_and_infer_types() override; @@ -54,10 +54,12 @@ class BrgemmCPU : public snippets::op::Brgemm { size_t get_m_block_size() const { return m_M_blk; } size_t get_k_block_size() const { return m_K_blk; } size_t get_n_block_size() const { return m_N_blk; } + float get_beta() const { return m_beta; } void set_m_block_size(size_t block_size) { m_M_blk = block_size; } void set_k_block_size(size_t block_size) { m_K_blk = block_size; } void set_n_block_size(size_t block_size) { m_N_blk = block_size; } + void set_beta(float beta) { m_beta = beta; } bool is_with_compensations() const { return m_type == Type::WithCompensations; } bool is_with_data_repacking() const { return m_type != Type::Floating; } @@ -67,6 +69,8 @@ class BrgemmCPU : public snippets::op::Brgemm { size_t get_offset_scratch() const; std::shared_ptr get_brgemm_copy() const; + bool visit_attributes(AttributeVisitor& visitor) override; + constexpr static size_t SCRATCH_BYTE_SIZE = 32 * 1024; private: @@ -79,6 +83,7 @@ class BrgemmCPU : public snippets::op::Brgemm { size_t m_M_blk = 0; size_t m_K_blk = 0; size_t m_N_blk = 0; + float m_beta = 0.f; }; } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.cpp index 9a08eda8b781eb..ff19f05d591d2d 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.cpp @@ -92,7 +92,7 @@ pass::BrgemmToBrgemmCPU::BrgemmToBrgemmCPU() { set_full_port_desc(brgemm_repacking->output(0)); if (with_amx) { - const auto scratch = std::make_shared(ov::Shape{BrgemmCPU::SCRATCH_BYTE_SIZE}); + const auto scratch = std::make_shared(ov::Shape{BrgemmCPU::SCRATCH_BYTE_SIZE}); brgemm_cpu = std::make_shared(brgemm->input_value(0), brgemm_repacking->output(0), scratch, BrgemmCPU::Type::AMX, offset_a, offset_b, 0, offset_c, brgemm_in0_desc->get_layout(), std::vector{}, brgemm_out_desc->get_layout()); diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_blocking.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_blocking.cpp index 5fffd007b93676..fc9aeeac10ee92 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_blocking.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_blocking.cpp @@ -7,8 +7,10 @@ #include "openvino/pass/pattern/matcher.hpp" #include "openvino/pass/pattern/op/wrap_type.hpp" #include "snippets/itt.hpp" +#include "snippets/utils.hpp" #include "snippets/lowered/linear_ir.hpp" #include "snippets/lowered/loop_manager.hpp" +#include "snippets/lowered/pass/insert_tail_loop.hpp" #include "snippets/snippets_isa.hpp" #include "transformations/snippets/x64/op/brgemm_cpu.hpp" @@ -16,19 +18,17 @@ namespace ov { namespace intel_cpu { namespace pass { -using LoopManager = snippets::lowered::LinearIR::LoopManager; -using LoopInfoPtr = LoopManager::LoopInfoPtr; -using LoopPort = LoopManager::LoopPort; +using LinearIR = snippets::lowered::LinearIR; +using LoopPort = LinearIR::LoopManager::LoopPort; +using ExpressionPtr = ov::snippets::lowered::ExpressionPtr; BrgemmBlocking::BrgemmBlocking() : Pass() {} void BrgemmBlocking::move_new_memory_buffer(snippets::lowered::LinearIR& linear_ir, const snippets::lowered::LinearIR::constExprIt& brgemm_it) { const auto& brgemm_expr = brgemm_it->get(); const auto wsp_expr = brgemm_expr->get_input_port_connector(2)->get_source().get_expr(); - const auto wsp_buffer = ov::as_type_ptr(wsp_expr->get_node()); - OPENVINO_ASSERT(wsp_buffer && wsp_buffer->is_new_memory(), "Incorrect Scratchpad buffer for Brgemm AMX"); - // [115164] Should be fully supported by explicit loops of blocking by K, N - OPENVINO_ASSERT(brgemm_expr->get_loop_ids().empty() && wsp_expr->get_loop_ids().empty(), "Incorrect blocking loop marking for Brgemm AMX"); + const auto wsp_buffer = ov::as_type_ptr(wsp_expr->get_node()); + OPENVINO_ASSERT(wsp_buffer, "Incorrect Scratchpad buffer for Brgemm AMX"); // If scratchpad with temp memory is not explicitly before Brgemm, need to move to there. if (wsp_expr != *std::prev(brgemm_it)) { const auto wsp_it = linear_ir.find(wsp_expr); @@ -36,24 +36,22 @@ void BrgemmBlocking::move_new_memory_buffer(snippets::lowered::LinearIR& linear_ } } -bool BrgemmBlocking::run(snippets::lowered::LinearIR& linear_ir) { +bool BrgemmBlocking::run(LinearIR& linear_ir) { OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::BrgemmBlocking") if (linear_ir.empty()) return false; - const auto& loop_manager = linear_ir.get_loop_manager(); - const size_t dim_idx = 1; + auto blocking_loop_exists = [&](const ExpressionPtr& brgemm_expr, const std::shared_ptr& brgemm) { + auto check_port = [&](const LoopPort& p) { + return p.expr_port->get_expr() == brgemm_expr && ov::snippets::utils::one_of(p.dim_idx, 0ul, 1ul); + }; - auto blocking_loop_exists = [&](const ov::snippets::lowered::ExpressionPtr& expr, - const std::shared_ptr& brgemm) { - const auto& loop_ids = expr->get_loop_ids(); + const auto& loop_ids = brgemm_expr->get_loop_ids(); for (const auto& id : loop_ids) { const auto loop = loop_manager->get_loop_info(id); - if (loop->dim_idx == dim_idx) { - OPENVINO_ASSERT(brgemm->get_input_count(0) == loop->increment, - "Brgemm ", brgemm, " has input count (", brgemm->get_input_count(0), - ") which doesn't match the increment(", loop->increment, ") of loop by M"); + if (std::any_of(loop->get_entry_points().begin(), loop->get_entry_points().end(), check_port) || + std::any_of(loop->get_exit_points().begin(), loop->get_exit_points().end(), check_port)) { return true; } } @@ -62,34 +60,141 @@ bool BrgemmBlocking::run(snippets::lowered::LinearIR& linear_ir) { bool modified = false; for (auto expr_it = linear_ir.begin(); expr_it != linear_ir.end(); expr_it++) { - const auto& expr = *expr_it; - const auto brgemm = ov::as_type_ptr(expr->get_node()); - if (!brgemm || blocking_loop_exists(expr, brgemm)) + const auto& brgemm_expr = *expr_it; + const auto brgemm = ov::as_type_ptr(brgemm_expr->get_node()); + if (!brgemm || blocking_loop_exists(brgemm_expr, brgemm)) continue; - const auto& input_shape_0 = expr->get_input_port_descriptor(0)->get_shape(); - const auto& input_layout_0 = expr->get_input_port_descriptor(0)->get_layout(); - const auto& dim = *(input_layout_0.rbegin() + dim_idx); - const auto& m = input_shape_0[dim]; - - const auto block_size = brgemm->get_m_block_size(); - brgemm->set_input_count(block_size); - - const auto work_amount = m; - const auto increment = block_size; - - auto loop_begin_it = expr_it, loop_end_it = std::next(expr_it); - std::vector entries{LoopPort(expr->get_input_port(0), true), LoopPort(expr->get_input_port(1), false)}; - // Scratchpad for AMX scenario is needed only as temporary buffer for each M block - it means that the Buffer should be in this loop. - // Other scratchpads (that after BrgemmCopyB) should be the loop outside. - if (brgemm->is_with_compensations()) { - entries.emplace_back(expr->get_input_port(2), false); - } else if (brgemm->is_amx()) { - move_new_memory_buffer(linear_ir, expr_it); - loop_begin_it = std::prev(expr_it); - } - std::vector exits{LoopPort(expr->get_output_port(0), true)}; - loop_manager->mark_loop(loop_begin_it, loop_end_it, work_amount, increment, dim_idx, entries, exits); + const auto& in_0_desc = brgemm_expr->get_input_port_descriptor(0); + const auto& in_1_desc = brgemm_expr->get_input_port_descriptor(1); + const auto& out_desc = brgemm_expr->get_output_port_descriptor(0); + + const auto& in_0_planar_dims = ov::snippets::utils::get_planar_vdims(in_0_desc->get_shape(), in_0_desc->get_layout()); + const auto& in_1_planar_dims = ov::snippets::utils::get_planar_vdims(in_1_desc->get_shape(), in_1_desc->get_layout()); + const auto& out_preordered_dims = ov::snippets::utils::get_preordered_vdims(out_desc->get_shape(), out_desc->get_layout()); + + auto in_0_subtensor = in_0_desc->get_subtensor(); + auto in_1_subtensor = in_1_desc->get_subtensor(); + auto out_subtensor = out_desc->get_subtensor(); + + auto apply_m_blocking = [&]() { + const auto& m = *(out_preordered_dims.rbegin() + 1); + const auto block_size_m = brgemm->get_m_block_size(); + if (block_size_m >= m) { + *(in_0_subtensor.rbegin() + 1) = m; + *(out_subtensor.rbegin() + 1) = m; + } else { + *(in_0_subtensor.rbegin() + 1) = block_size_m; + *(out_subtensor.rbegin() + 1) = block_size_m; + + auto loop_begin_it = expr_it, loop_end_it = std::next(expr_it); + std::vector entries{LoopPort(brgemm_expr->get_input_port(0), true), + LoopPort(brgemm_expr->get_input_port(1), false)}; + if (brgemm->is_with_compensations()) { + entries.emplace_back(brgemm_expr->get_input_port(2), false); + } else if (brgemm->is_amx()) { + move_new_memory_buffer(linear_ir, expr_it); + loop_begin_it = std::prev(expr_it); + } + std::vector exits{LoopPort(brgemm_expr->get_output_port(0), true)}; + loop_manager->mark_loop(loop_begin_it, loop_end_it, m, block_size_m, 1, entries, exits); + } + }; + + auto apply_n_blocking = [&]() { + const auto& n = *out_preordered_dims.rbegin(); + const auto block_size_n = brgemm->get_n_block_size(); + if (block_size_n >= n) { + *in_1_subtensor.rbegin() = n; + *out_subtensor.rbegin() = n; + } else { + *in_1_subtensor.rbegin() = block_size_n; + *out_subtensor.rbegin() = block_size_n; + + auto loop_begin_it = expr_it, loop_end_it = std::next(expr_it); + std::vector entries{LoopPort(brgemm_expr->get_input_port(0), false), + LoopPort(brgemm_expr->get_input_port(1), true)}; + if (brgemm->is_with_compensations()) { + entries.emplace_back(brgemm_expr->get_input_port(2), true); + } else if (brgemm->is_amx()) { + move_new_memory_buffer(linear_ir, expr_it); + loop_begin_it = std::prev(expr_it); + } + std::vector exits{LoopPort(brgemm_expr->get_output_port(0), true)}; + loop_manager->mark_loop(loop_begin_it, loop_end_it, n, block_size_n, 0, entries, exits); + } + }; + + auto apply_k_blocking = [&]() { + const auto& k = *in_0_planar_dims.rbegin(); + OPENVINO_ASSERT(k == *(in_1_planar_dims.rbegin() + 1), "Brgemm input descriptors have different K dimension value."); + const auto block_size_k = brgemm->get_k_block_size(); + if (block_size_k >= k) { + *in_0_subtensor.rbegin() = k; + *(in_1_subtensor.rbegin() + 1) = k; + } else { + *in_0_subtensor.rbegin() = block_size_k; + *(in_1_subtensor.rbegin() + 1) = block_size_k; + + auto loop_begin_it = expr_it, loop_end_it = std::next(expr_it); + std::vector entries{LoopPort(brgemm_expr->get_input_port(0), true, 0), + LoopPort(brgemm_expr->get_input_port(1), true, 1)}; + if (brgemm->is_with_compensations()) { + entries.emplace_back(brgemm_expr->get_input_port(2), false, 1); + } else if (brgemm->is_amx()) { + move_new_memory_buffer(linear_ir, expr_it); + loop_begin_it = std::prev(expr_it); + } + std::vector exits{LoopPort(brgemm_expr->get_output_port(0), false)}; + auto loop_id = loop_manager->mark_loop(loop_begin_it, loop_end_it, k, block_size_k, entries, exits); + const auto loop_info = loop_manager->get_loop_info(loop_id); + + auto first_iter_handler = [](LinearIR& linear_ir, LinearIR::constExprIt loop_end_it) { + const auto loop_end = ov::as_type_ptr(loop_end_it->get()->get_node()); + OPENVINO_ASSERT(loop_end, "First loop iteraton handler must be called on LoopEnd expression"); + const auto loop_id = loop_end->get_id(); + const auto& loop_manager = linear_ir.get_loop_manager(); + const auto& loop_info = loop_manager->get_loop_info(loop_id); + const auto work_amount = loop_info->get_work_amount(); + const auto increment = loop_info->get_increment(); + if (work_amount <= increment) + return false; + + auto new_loop_range = snippets::lowered::pass::InsertTailLoop::copy_loop(linear_ir, loop_id); + const auto firt_iter_loop_end = ov::as_type_ptr(std::prev(new_loop_range.end())->get()->get_node()); + auto first_iter_loop_info = loop_manager->get_loop_info(firt_iter_loop_end->get_id()); + firt_iter_loop_end->set_work_amount(increment); + first_iter_loop_info->set_work_amount(increment); + firt_iter_loop_end->set_finalization_offsets(std::vector(loop_end->get_finalization_offsets().size(), 0)); + + const auto loop_begin_it = linear_ir.find(linear_ir.get_expr_by_node(loop_end->get_loop_begin())); + linear_ir.insert(loop_begin_it, new_loop_range.begin(), new_loop_range.end()); + + const auto new_work_amount = work_amount - increment; + loop_info->set_work_amount(new_work_amount); + loop_end->set_work_amount(new_work_amount); + + // Update original body's Brgemms with new beta parameter + for (auto expr_it = loop_begin_it; expr_it != loop_end_it; ++expr_it) { + const auto& expr_node = expr_it->get()->get_node(); + if (const auto brgemm = ov::as_type_ptr(expr_node)) { + brgemm->set_beta(1.f); + } + } + return true; + }; + loop_info->set_first_iter_handler(first_iter_handler); + } + }; + + apply_k_blocking(); + apply_n_blocking(); + apply_m_blocking(); + + brgemm_expr->get_input_port_descriptor(0)->set_subtensor(in_0_subtensor); + brgemm_expr->get_input_port_descriptor(1)->set_subtensor(in_1_subtensor); + brgemm_expr->get_output_port_descriptor(0)->set_subtensor(out_subtensor); + modified = true; } return modified; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_blocking.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_blocking.hpp index 585cd2ad0ae23e..81ae47aa3c6948 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_blocking.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_blocking.hpp @@ -12,7 +12,7 @@ namespace pass { /** * @interface BrgemmBlocking - * @brief Covers BrgemmCPU with blocking loop by M + * @brief Covers BrgemmCPU with blocking loops * @ingroup snippets */ diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/fuse_load_store_and_convert.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/fuse_load_store_and_convert.cpp index fff9182883df2d..319b17d3e6cb07 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/fuse_load_store_and_convert.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/fuse_load_store_and_convert.cpp @@ -6,6 +6,7 @@ #include "fuse_load_store_and_convert.hpp" #include "snippets/snippets_isa.hpp" +#include "snippets/lowered/loop_manager.hpp" #include "transformations/snippets/x64/op/load_convert.hpp" #include "transformations/snippets/x64/op/store_convert.hpp" @@ -32,13 +33,13 @@ bool ov::intel_cpu::pass::FuseLoadStoreConvert::fuse_load_convert(snippets::lowe return false; std::shared_ptr load_convert = nullptr; - if (const auto convert_saturation = ov::as_type_ptr(convert)) { + if (ov::is_type(convert)) { load_convert = std::make_shared(load->input_value(0), - convert_saturation->get_destination_type(), + convert->get_destination_type(), load->get_count(), load->get_offset()); - } else if (const auto convert_truncation = ov::as_type_ptr(convert)) { + } else if (ov::is_type(convert)) { load_convert = std::make_shared(load->input_value(0), - convert_truncation->get_destination_type(), + convert->get_destination_type(), load->get_count(), load->get_offset()); } else { OPENVINO_THROW("Type of Convert op is undefined. Supports only fusing Load and ConvertTruncation or ConvertSaturation ops"); @@ -51,6 +52,13 @@ bool ov::intel_cpu::pass::FuseLoadStoreConvert::fuse_load_convert(snippets::lowe const auto convert_expr_it = convert_it; const auto insertion_pos = std::next(convert_it); convert_it = linear_ir.insert(insertion_pos, load_convert_expr); + + const auto& load_loop_ids = load_expr->get_loop_ids(); + load_convert_expr->set_loop_ids(load_loop_ids); + const auto& loop_manager = linear_ir.get_loop_manager(); + loop_manager->update_loops_port(load_loop_ids, load_expr->get_input_port(0), {load_convert_expr->get_input_port(0)}, true); + loop_manager->update_loops_port(load_loop_ids, convert_expr->get_output_port(0), {load_convert_expr->get_output_port(0)}, false); + linear_ir.erase(std::find(linear_ir.cbegin(), convert_expr_it, load_expr)); linear_ir.erase(convert_expr_it); linear_ir.replace_input(convert_consumers, load_convert_expr->get_output_port_connector(0)); @@ -60,7 +68,7 @@ bool ov::intel_cpu::pass::FuseLoadStoreConvert::fuse_load_convert(snippets::lowe bool ov::intel_cpu::pass::FuseLoadStoreConvert::fuse_store_convert(snippets::lowered::LinearIR& linear_ir, snippets::lowered::LinearIR::constExprIt& convert_it) { const auto& convert_expr = *convert_it; - const auto& convert = convert_expr->get_node(); + const auto& convert = ov::as_type_ptr(convert_expr->get_node()); const auto& input_connector = convert_expr->get_input_port_connector(0); const auto& output_connector = convert_expr->get_output_port_connector(0); if (convert->get_input_element_type(0) != ov::element::f32 && convert->get_input_element_type(0) != ov::element::i32) @@ -77,13 +85,13 @@ bool ov::intel_cpu::pass::FuseLoadStoreConvert::fuse_store_convert(snippets::low return false; std::shared_ptr store_convert = nullptr; - if (const auto convert_saturation = ov::as_type_ptr(convert)) { + if (ov::is_type(convert)) { store_convert = std::make_shared(convert->input_value(0), - convert_saturation->get_destination_type(), + convert->get_destination_type(), store->get_count(), store->get_offset()); - } else if (const auto convert_truncation = ov::as_type_ptr(convert)) { + } else if (ov::is_type(convert)) { store_convert = std::make_shared(convert->input_value(0), - convert_truncation->get_destination_type(), + convert->get_destination_type(), store->get_count(), store->get_offset()); } else { OPENVINO_THROW("Type of Convert op is undefined. Supports only fusing Store and ConvertTruncation or ConvertSaturation ops"); @@ -96,6 +104,13 @@ bool ov::intel_cpu::pass::FuseLoadStoreConvert::fuse_store_convert(snippets::low const auto convert_expr_it = convert_it; const auto insertion_pos = std::next(convert_it); convert_it = linear_ir.insert(insertion_pos, store_convert_expr); + + const auto& convert_loop_ids = convert_expr->get_loop_ids(); + store_convert_expr->set_loop_ids(convert_loop_ids); + const auto& loop_manager = linear_ir.get_loop_manager(); + loop_manager->update_loops_port(convert_loop_ids, convert_expr->get_input_port(0), {store_convert_expr->get_input_port(0)}, true); + loop_manager->update_loops_port(convert_loop_ids, store_expr->get_output_port(0), {store_convert_expr->get_output_port(0)}, false); + linear_ir.erase(std::find(convert_expr_it, linear_ir.cend(), store_expr)); linear_ir.erase(convert_expr_it); linear_ir.replace_input(store_consumers, store_convert_expr->get_output_port_connector(0)); diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/set_brgemm_copy_b_buffers_shape.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/set_brgemm_copy_b_buffers_shape.cpp index 91bec8aee60d4a..0f14f9a7dc5d8a 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/set_brgemm_copy_b_buffers_shape.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/set_brgemm_copy_b_buffers_shape.cpp @@ -16,7 +16,7 @@ bool ov::intel_cpu::pass::SetBrgemmCopyBBuffersShape::run(snippets::lowered::Lin auto get_buffer_from_output = [](const snippets::lowered::ExpressionPtr& expr, const size_t out_idx) { const auto& consumers = expr->get_output_port_connector(out_idx)->get_consumers(); OPENVINO_ASSERT(consumers.size() == 1, "BrgemmCopyB must have only 1 consumer"); - const auto buffer = ov::as_type_ptr(consumers.begin()->get_expr()->get_node()); + const auto buffer = ov::as_type_ptr(consumers.begin()->get_expr()->get_node()); OPENVINO_ASSERT(buffer, "BrgemmCopyB consumer must be Buffer"); return buffer; }; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/set_brgemm_copy_b_buffers_shape.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/set_brgemm_copy_b_buffers_shape.hpp index fcac51286e00a6..c7eec92700a16a 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/set_brgemm_copy_b_buffers_shape.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/set_brgemm_copy_b_buffers_shape.hpp @@ -13,6 +13,8 @@ namespace pass { /** * @interface SetBrgemmCopyBBuffersShape * @brief Sets the allocation shape for the Buffers after BrgemmCopyB node using BrgemmCopyB parameters + * This pass may be deprecated when a more generic memory management approach is introduced. + * Ticket: 113744 * @ingroup snippets */ class SetBrgemmCopyBBuffersShape: public snippets::lowered::pass::Pass { diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/set_brgemm_cpu_blocking_params.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/set_brgemm_cpu_blocking_params.cpp index 939998c08bd79e..bd87737ed2c96e 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/set_brgemm_cpu_blocking_params.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/set_brgemm_cpu_blocking_params.cpp @@ -19,7 +19,6 @@ #include "cpu_shape.h" #include "utils/general_utils.h" - namespace ov { namespace intel_cpu { pass::SetBrgemmCPUBlockingParams::SetBrgemmCPUBlockingParams() { @@ -35,42 +34,43 @@ pass::SetBrgemmCPUBlockingParams::SetBrgemmCPUBlockingParams() { return false; } - const auto dimsMatMulIn0 = snippets::utils::get_planar_pshape(brgemm->input(0)).get_shape(); - const auto dimsMatMulIn1 = snippets::utils::get_planar_pshape(brgemm->input(1)).get_shape(); - const auto K = *dimsMatMulIn0.rbegin(); - const auto N = *dimsMatMulIn1.rbegin(); - const auto& input_1_precision = brgemm->get_input_element_type(1); - // Ticket: 113745 // TODO: extend block size selection heuristics - const size_t brgemm_block_size_m = 32; - const size_t brgemm_block_size_k = [&]() { + auto get_block_size_m = [&](const size_t M) { + return 32; + }; + auto get_block_size_k = [&](const size_t K) { if (input_1_precision != ov::element::f32) return K; return K > 1024 ? 1024 : K > 512 ? 512 : K; - }(); - const size_t brgemm_block_size_n = input_1_precision != ov::element::f32 ? N : 64; - - brgemm->set_m_block_size(brgemm_block_size_m); - brgemm->set_k_block_size(brgemm_block_size_k); - brgemm->set_n_block_size(brgemm_block_size_n); - + }; + auto get_block_size_n = [&](const size_t N) { + return input_1_precision != ov::element::f32 ? N : 64; + }; + + const auto brgemm_in0_dims = snippets::utils::get_planar_pshape(brgemm->input(0)).get_shape(); + const auto M = *(brgemm_in0_dims.rbegin() + 1); + const auto K = *brgemm_in0_dims.rbegin(); + const auto brgemm_in1_dims = snippets::utils::get_planar_pshape(brgemm->input(1)).get_shape(); + const auto N = *brgemm_in1_dims.rbegin(); if (brgemm->is_with_data_repacking()) { const auto brgemm_copy_b = brgemm->get_brgemm_copy(); - const bool isAMXSupported = dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core_amx); const auto precision = brgemm_copy_b->get_src_element_type(); const auto brgemmVNNIFactor = brgemm_copy_b->get_brgemm_vnni_factor(); const bool use_amx = isAMXSupported && precision != ov::element::f32 && (K % brgemmVNNIFactor == 0) && (N % brgemmVNNIFactor == 0); - const size_t copy_b_block_size_k = use_amx ? brgemm_block_size_k : K; + const size_t copy_b_block_size_k = use_amx ? get_block_size_k(K) : K; const size_t copy_b_block_size_n = 64; brgemm_copy_b->set_k_block_size(copy_b_block_size_k); brgemm_copy_b->set_n_block_size(copy_b_block_size_n); } + brgemm->set_m_block_size(get_block_size_m(M)); + brgemm->set_k_block_size(get_block_size_k(K)); + brgemm->set_n_block_size(get_block_size_n(N)); return false; }; @@ -78,4 +78,4 @@ pass::SetBrgemmCPUBlockingParams::SetBrgemmCPUBlockingParams() { register_matcher(m, callback); } } // namespace intel_cpu -} // namespace ov +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp index 9f2afa638877ef..cf961d7978c5d7 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp @@ -30,6 +30,7 @@ #include "transformations/common_optimizations/common_optimizations.hpp" #include "transformations/common_optimizations/wrap_interpolate_into_transposes.hpp" #include "transformations/common_optimizations/matmul_const_transposes_extraction.hpp" +#include "transformations/common_optimizations/fuse_rotary_positional_embeddings.hpp" #include "transformations/control_flow/unroll_tensor_iterator.hpp" #include "transformations/fp16_compression/mark_decompression_convert_constant_folding.hpp" #include "transformations/op_conversions/convert_batch_to_space.hpp" @@ -326,6 +327,7 @@ void Transformations::PreLpt(const std::vector& defaultPrecis CPU_REGISTER_PASS_COMMON(manager, ov::pass::AUGRUCellFusion); CPU_REGISTER_PASS_COMMON(manager, ov::pass::CommonOptimizations); + CPU_REGISTER_PASS_COMMON(manager, ov::pass::RPE_Fusion); CPU_REGISTER_PASS_COMMON(manager, ov::pass::WrapInterpolateIntoTransposes); CPU_REGISTER_PASS_COMMON(manager, ov::pass::TransposeSinking); CPU_REGISTER_PASS_COMMON(manager, ov::pass::ConvertSequenceToTensorIterator); @@ -473,6 +475,8 @@ void Transformations::PreLpt(const std::vector& defaultPrecis CPU_DISABLE_PASS_COMMON(manager, ov::pass::ConvertTopK11ToTopK3); CPU_DISABLE_PASS_COMMON(manager, ov::pass::HSwishDecomposition); CPU_DISABLE_PASS_COMMON(manager, ov::pass::MatMulConstTransposesExtraction); + // CVS-126827: should be disabled until CPU supports this internal op + CPU_DISABLE_PASS_COMMON(manager, ov::pass::RPE_Fusion); CPU_DISABLE_PASS_X64(manager, ov::pass::HSigmoidDecomposition); CPU_DISABLE_PASS_X64(manager, ov::pass::ReduceL1Decomposition); @@ -484,11 +488,6 @@ void Transformations::PreLpt(const std::vector& defaultPrecis CPU_ENABLE_PASS_COMMON(manager, ov::pass::ConvertDetectionOutput1ToDetectionOutput8); CPU_ENABLE_PASS_COMMON(manager, ov::pass::ConvertROIAlign3To9); - CPU_DISABLE_PASS_COMMON(manager, ov::pass::ConvertBitwiseAndToLogicalAnd); - CPU_ENABLE_PASS_COMMON(manager, ov::pass::ConvertBitwiseNotToLogicalNot); - CPU_DISABLE_PASS_COMMON(manager, ov::pass::ConvertBitwiseOrToLogicalOr); - CPU_DISABLE_PASS_COMMON(manager, ov::pass::ConvertBitwiseXorToLogicalXor); - if (useLpt) { CPU_LPT_SCOPE(LowPrecisionTransformations_Part3); CPU_SET_CALLBACK_COMMON(manager, diff --git a/src/plugins/intel_cpu/src/utils/cpu_utils.hpp b/src/plugins/intel_cpu/src/utils/cpu_utils.hpp index 1c607d6c805c90..c2f7e867956382 100644 --- a/src/plugins/intel_cpu/src/utils/cpu_utils.hpp +++ b/src/plugins/intel_cpu/src/utils/cpu_utils.hpp @@ -86,11 +86,6 @@ inline bool isPerTensorOrPerChannelBroadcastable(const VectorDims &firstInputDim return true; } -inline bool isEmptyTensorDesc(const InferenceEngine::TensorDesc &td) { - const auto dims = td.getDims(); - return std::any_of(dims.begin(), dims.end(), [](size_t dim) { return dim == 0; } ); -} - /** * @brief Return precision to which given precision must be converted to be supported in plug-in * @param precision diff --git a/src/plugins/intel_cpu/src/utils/node_dumper.cpp b/src/plugins/intel_cpu/src/utils/node_dumper.cpp index ed4793ab2e88da..24d7a6d403081e 100644 --- a/src/plugins/intel_cpu/src/utils/node_dumper.cpp +++ b/src/plugins/intel_cpu/src/utils/node_dumper.cpp @@ -107,12 +107,10 @@ static void dumpInternalBlobs(const NodePtr& node, const DebugCapsConfig& config std::string file_name = NameFromType(node->getType()) + "_" + nodeName + "_blb" + std::to_string(i) + ".ieb"; auto dump_file = config.blobDumpDir + "/#" + std::to_string(node->getExecIndex()) + "_" + file_name; - TensorDesc desc = blb->getTensorDesc(); - if (InferenceEngine::details::convertPrecision(desc.getPrecision()) == ov::element::u1) + if (blb->getDesc().getPrecision() == ov::element::u1) continue; - MemoryPtr memory = std::make_shared(node->getEngine(), MemoryDescUtils::convertToDnnlBlockedMemoryDesc(desc), blb->buffer()); - BlobDumper dumper(memory); + BlobDumper dumper(blb); dump(dumper, dump_file, config); } } diff --git a/src/plugins/intel_cpu/tests/functional/extension/extension.cpp b/src/plugins/intel_cpu/tests/functional/extension/extension.cpp index b2f9c1c59ac0c1..1b9c0158e51bd3 100644 --- a/src/plugins/intel_cpu/tests/functional/extension/extension.cpp +++ b/src/plugins/intel_cpu/tests/functional/extension/extension.cpp @@ -205,7 +205,7 @@ static std::string get_extension_path() { } -TEST(Extension, XmlModelWithExtensionFromDSO) { +TEST(Extension, smoke_XmlModelWithExtensionFromDSO) { std::string model = R"V0G0N( @@ -260,6 +260,7 @@ TEST(Extension, XmlModelWithExtensionFromDSO) { std::vector input_values{1, 2, 3, 4, 5, 6, 7, 8}; std::vector expected{12, 13, 14, 15, 16, 17, 18, 19}; InferenceEngine::Core ie; + ie.SetConfig({ { ov::hint::inference_precision.name(), ov::element::f32.get_type_name() } }, "CPU"); ie.AddExtension(std::make_shared(get_extension_path())); InferenceEngine::Blob::CPtr weights; auto network = ie.ReadNetwork(model, weights); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index eb35d1ded6d1db..227e0dd40874cf 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -29,9 +29,9 @@ std::shared_ptr getFunction1() { auto in2add = ngraph::builder::makeConstant(ngPrc, {1, 4, 1, 1}, std::vector{}, true); auto add = ngraph::builder::makeEltwise(params[0], in2add, ngraph::helpers::EltwiseTypes::ADD); - auto relu1 = std::make_shared(add->output(0)); + auto relu1 = std::make_shared(add->output(0)); relu1->get_output_tensor(0).set_names({"relu1"}); - auto relu2 = std::make_shared(add->output(0)); + auto relu2 = std::make_shared(add->output(0)); relu2->get_output_tensor(0).set_names({"relu2"}); ngraph::NodeVector results{relu1, relu2}; @@ -51,13 +51,13 @@ std::shared_ptr getFunction2() { auto in2add = ngraph::builder::makeConstant(ngPrc, {1, 2, 1, 1}, std::vector{}, true); auto add = ngraph::builder::makeEltwise(split->output(0), in2add, ngraph::helpers::EltwiseTypes::ADD); - auto relu1 = std::make_shared(add); + auto relu1 = std::make_shared(add); auto in2mult = ngraph::builder::makeConstant(ngPrc, {1, 2, 1, 1}, std::vector{}, true); auto mult = ngraph::builder::makeEltwise(split->output(1), in2mult, ngraph::helpers::EltwiseTypes::MULTIPLY); - auto relu2 = std::make_shared(mult); + auto relu2 = std::make_shared(mult); - auto concat = std::make_shared(ngraph::OutputVector{relu1->output(0), relu2->output(0)}, 3); + auto concat = std::make_shared(ngraph::OutputVector{relu1->output(0), relu2->output(0)}, 3); concat->get_output_tensor(0).set_names({"concat"}); return std::make_shared(concat, params, "SplitAddConcat"); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp index dfcac55e191f13..7bc6f7b10d3512 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/plugin/caching_tests.cpp @@ -40,37 +40,37 @@ namespace { }; static std::shared_ptr simple_function_non_max_suppression_internal(ngraph::element::Type, size_t) { - auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); - auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); - auto max_output_boxes_per_class = opset1::Constant::create(element::i32, Shape{1}, {10}); - auto iou_threshold = opset1::Constant::create(element::f32, Shape{1}, {0.75}); - auto score_threshold = opset1::Constant::create(element::f32, Shape{1}, {0.7}); + auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); + auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); + auto max_output_boxes_per_class = ov::op::v0::Constant::create(element::i32, Shape{1}, {10}); + auto iou_threshold = ov::op::v0::Constant::create(element::f32, Shape{1}, {0.75}); + auto score_threshold = ov::op::v0::Constant::create(element::f32, Shape{1}, {0.7}); auto nms = std::make_shared(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, 0, true, element::i32); - auto res = std::make_shared(nms); + auto res = std::make_shared(nms); auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); return func; } static std::shared_ptr simple_function_matrix_nms_internal(ngraph::element::Type, size_t) { - auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); - auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); + auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); + auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); ov::op::v8::MatrixNms::Attributes attr; // convert_precision does not support internal op 'NmsStaticShapeIE' attr.output_type = element::i32; auto nms = std::make_shared>(boxes, scores, attr); - auto res = std::make_shared(nms); + auto res = std::make_shared(nms); auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); return func; } static std::shared_ptr simple_function_multiclass_nms_internal(ngraph::element::Type, size_t) { - auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); - auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); - op::util::MulticlassNmsBase::Attributes attr; + auto boxes = std::make_shared(element::f32, Shape{1, 1000, 4}); + auto scores = std::make_shared(element::f32, Shape{1, 1, 1000}); + ov::op::util::MulticlassNmsBase::Attributes attr; attr.output_type = element::i32; auto nms = std::make_shared(boxes, scores, attr); - auto res = std::make_shared(nms); + auto res = std::make_shared(nms); auto func = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); return func; } diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/core_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/core_config.cpp index fee91b5c5a38d3..20c996333e661b 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/core_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/core_config.cpp @@ -34,8 +34,9 @@ void core_configuration(ov::test::SubgraphBaseTest* test) { test->configuration.insert({ov::hint::inference_precision.name(), ov::element::f32.to_string()}); } #endif - // todo: issue: 123320 - test->convert_precisions = {{ ov::element::bf16, ov::element::f32 }, { ov::element::f16, ov::element::f32 }}; + // todo: issue: 123320 + test->convert_precisions.insert({ov::element::bf16, ov::element::f32}); + test->convert_precisions.insert({ov::element::f16, ov::element::f32}); } } // namespace test diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp index 7f3e8cbea3a67d..3de84defe4b609 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp @@ -18,7 +18,7 @@ inline InferenceEngine::CNNNetwork getTargetNetwork() { auto input = std::make_shared(type, shape); auto mem_i = std::make_shared(type, shape, 0); auto mem_r = std::make_shared(mem_i, "id"); - auto mul = std::make_shared(mem_r, input); + auto mul = std::make_shared(mem_r, input); auto mem_w = std::make_shared(mul, "id"); auto sigm = std::make_shared(mul); mem_r->set_friendly_name("Memory"); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp index 852d4811963389..43068e06ead893 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp @@ -108,7 +108,7 @@ const std::vector testValues = { {}, { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, {}, - std::make_shared(ov::element::u8, ov::Shape{1, 3, 16, 16}, std::vector(3 * 16 * 16, 1.0)), + std::make_shared(ov::element::u8, ov::Shape{1, 3, 16, 16}, std::vector(3 * 16 * 16, 1.0)), {}, { { ov::element::f16 }, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp index 53ecd904f12b3b..df4bccfc05931e 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp @@ -9,7 +9,6 @@ using namespace LayerTestsDefinitions; using namespace InferenceEngine::details; -using namespace ngraph::opset1; namespace { const std::vector precisions = { @@ -17,9 +16,9 @@ const std::vector precisions = { // ngraph::element::f16 }; -const std::vector modes = { - DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, - DepthToSpace::DepthToSpaceMode::DEPTH_FIRST +const std::vector modes = { + ov::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, + ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST }; const std::vector inputShapesBS2 = { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/binary_convolution.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/binary_convolution.cpp index da2df80eb7dda0..31c13e56b02baa 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/binary_convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/binary_convolution.cpp @@ -36,7 +36,7 @@ const auto binConv2DParams_ExplicitPadding = ::testing::Combine( ::testing::ValuesIn(padsEnd), ::testing::ValuesIn(dilations), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::ValuesIn(padValues)); const auto binConv2DParams_ValidPadding = ::testing::Combine( @@ -46,7 +46,7 @@ const auto binConv2DParams_ValidPadding = ::testing::Combine( ::testing::Values(std::vector({0, 0})), ::testing::ValuesIn(dilations), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::VALID), + ::testing::Values(ov::op::PadType::VALID), ::testing::ValuesIn(padValues)); INSTANTIATE_TEST_SUITE_P( diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/comparison.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/comparison.cpp index 0b68b33e2074a9..22a4d9a9740d89 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/comparison.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/comparison.cpp @@ -37,18 +37,18 @@ std::vector model_type = { ov::element::boolean, }; -std::vector comparisonOpTypes = { - ngraph::helpers::ComparisonTypes::EQUAL, - ngraph::helpers::ComparisonTypes::NOT_EQUAL, - ngraph::helpers::ComparisonTypes::GREATER, - ngraph::helpers::ComparisonTypes::GREATER_EQUAL, - ngraph::helpers::ComparisonTypes::LESS, - ngraph::helpers::ComparisonTypes::LESS_EQUAL, +std::vector comparisonOpTypes = { + ov::test::utils::ComparisonTypes::EQUAL, + ov::test::utils::ComparisonTypes::NOT_EQUAL, + ov::test::utils::ComparisonTypes::GREATER, + ov::test::utils::ComparisonTypes::GREATER_EQUAL, + ov::test::utils::ComparisonTypes::LESS, + ov::test::utils::ComparisonTypes::LESS_EQUAL, }; -std::vector secondInputTypes = { - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER, +std::vector secondInputTypes = { + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER, }; std::map additional_config = {}; @@ -81,15 +81,15 @@ std::vector> input_shapes_is_ops_static = { {{2, 17, 3, 4}, {1}} }; -std::vector comparisonOpTypesIs = { - ngraph::helpers::ComparisonTypes::IS_FINITE, - ngraph::helpers::ComparisonTypes::IS_NAN +std::vector comparisonOpTypesIs = { + ov::test::utils::ComparisonTypes::IS_FINITE, + ov::test::utils::ComparisonTypes::IS_NAN }; const auto ComparisonTestParamsIs = ::testing::Combine( ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_is_ops_static)), ::testing::ValuesIn(comparisonOpTypesIs), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::Values(ov::element::f32), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(additional_config)); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/convolution.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/convolution.cpp index 3bfe2fb06b814d..3a885a08f8d459 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/convolution.cpp @@ -64,13 +64,13 @@ const auto conv2DParams_ExplicitPadding = ::testing::Combine( ::testing::ValuesIn(kernels), ::testing::ValuesIn(strides), ::testing::ValuesIn(padBegins), ::testing::ValuesIn(padEnds), ::testing::ValuesIn(dilations), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT)); + ::testing::Values(ov::op::PadType::EXPLICIT)); const auto conv2DParams_AutoPadValid = ::testing::Combine( ::testing::ValuesIn(kernels), ::testing::ValuesIn(strides), ::testing::Values(std::vector({0, 0})), ::testing::Values(std::vector({0, 0})), ::testing::ValuesIn(dilations), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::VALID)); + ::testing::Values(ov::op::PadType::VALID)); INSTANTIATE_TEST_SUITE_P( smoke_Convolution2D_ExplicitPadding, ConvolutionLayerTest, @@ -103,7 +103,7 @@ const auto conv2DParams_WeightLayout = ::testing::Combine(::testing::Values(kern ::testing::Values(padEnds), ::testing::Values(dilations), ::testing::Values(numOutChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT)); + ::testing::Values(ov::op::PadType::EXPLICIT)); INSTANTIATE_TEST_SUITE_P(smoke_Convolution2D_SpecificWeightLayout, ConvolutionLayerTest, ::testing::Combine(conv2DParams_WeightLayout, @@ -124,13 +124,13 @@ const auto conv3DParams_ExplicitPadding = ::testing::Combine( ::testing::ValuesIn(kernels3d), ::testing::ValuesIn(strides3d), ::testing::ValuesIn(paddings3d), ::testing::ValuesIn(paddings3d), ::testing::ValuesIn(dilations3d), ::testing::ValuesIn(numOutChannels3D), - ::testing::Values(ngraph::op::PadType::EXPLICIT)); + ::testing::Values(ov::op::PadType::EXPLICIT)); const auto conv3DParams_AutoPadValid = ::testing::Combine( ::testing::ValuesIn(kernels3d), ::testing::ValuesIn(strides3d), ::testing::Values(std::vector({0, 0, 0})), ::testing::Values(std::vector({0, 0, 0})), ::testing::ValuesIn(dilations3d), ::testing::ValuesIn(numOutChannels3D), - ::testing::Values(ngraph::op::PadType::VALID)); + ::testing::Values(ov::op::PadType::VALID)); INSTANTIATE_TEST_SUITE_P( smoke_Convolution3D_ExplicitPadding, ConvolutionLayerTest, diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/deformable_convolution.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/deformable_convolution.cpp index 8192ab595e5fc7..89b793f8bf7551 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/deformable_convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/deformable_convolution.cpp @@ -166,7 +166,7 @@ const auto deformableConv2DParams_SingleTestCase = ::testing::Combine( ::testing::ValuesIn(groups), ::testing::ValuesIn(single_deform_groups), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::ValuesIn(with_bilinear_interpolation_pad)); INSTANTIATE_TEST_SUITE_P( @@ -215,7 +215,7 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(std::vector {2}), // gr. ::testing::ValuesIn(std::vector {2}), // def. gr. ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::ValuesIn(with_bilinear_interpolation_pad)), ::testing::Values(false), ::testing::ValuesIn(netPrecisions), @@ -234,7 +234,7 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(std::vector {2}), // gr. ::testing::ValuesIn(std::vector {2}), // def. gr. ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::ValuesIn(with_bilinear_interpolation_pad)), ::testing::Values(true), ::testing::ValuesIn(netPrecisions), @@ -266,7 +266,7 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(std::vector {4}), // gr. ::testing::ValuesIn(std::vector {1}), // def. gr. ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::ValuesIn(with_bilinear_interpolation_pad)), ::testing::Values(false), ::testing::ValuesIn(netPrecisions), @@ -285,7 +285,7 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(std::vector {4}), // gr. ::testing::ValuesIn(std::vector {1}), // def. gr. ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::ValuesIn(with_bilinear_interpolation_pad)), ::testing::Values(true), ::testing::ValuesIn(netPrecisions), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp index 5e84ede53312b5..95b9f4587518b0 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/depth_to_space.cpp @@ -15,9 +15,9 @@ const std::vector model_types = { ov::element::i16, }; -const std::vector modes = { - DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, - DepthToSpace::DepthToSpaceMode::DEPTH_FIRST +const std::vector modes = { + ov::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, + ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST }; const std::vector> input_shapes_bs2_static = { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp index f8a80efe1f6223..ff7e75adf0f264 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp @@ -40,12 +40,12 @@ std::vector> in_shapes_static_check_collapse = { }; std::vector> in_shapes_dynamic = { - {{{ngraph::Dimension(1, 10), 200}, {{2, 200}, {1, 200}}}, - {{ngraph::Dimension(1, 10), 200}, {{2, 200}, {5, 200}}}}, + {{{ov::Dimension(1, 10), 200}, {{2, 200}, {1, 200}}}, + {{ov::Dimension(1, 10), 200}, {{2, 200}, {5, 200}}}}, }; std::vector> in_shapes_dynamic_large_upper_bound = { - {{{ngraph::Dimension(1, 1000000000000), 200}, {{2, 200}, {5, 200}}}}, + {{{ov::Dimension(1, 1000000000000), 200}, {{2, 200}, {5, 200}}}}, }; std::vector model_types = { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp index 36a35d7bffd819..3745268569552d 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp @@ -3,7 +3,6 @@ // #include -#include #include "single_op_tests/gru_sequence.hpp" #include "common_test_utils/test_constants.hpp" #include "common_test_utils/test_enums.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/invalid_cases/proposal.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/invalid_cases/proposal.cpp index dbb6dac26440de..86b57b92a44905 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/invalid_cases/proposal.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/invalid_cases/proposal.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "single_layer_tests/invalid_cases/proposal.hpp" +#include "shared_test_classes/single_op/invalid_cases/proposal.hpp" #include diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/is_inf.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/is_inf.cpp index 74bec47ba16896..ce45353ab90e50 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/is_inf.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/is_inf.cpp @@ -28,7 +28,7 @@ std::vector> input_shapes_static = { }; std::vector> input_shapes_dynamic = { - {{{ngraph::Dimension(1, 10), 200}, {{2, 200}, {1, 200}}}} + {{{ov::Dimension(1, 10), 200}, {{2, 200}, {1, 200}}}} }; std::vector model_types = { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/matrix_nms.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/matrix_nms.cpp index 9dff6869e3272f..90fadce0861ae9 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/matrix_nms.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/matrix_nms.cpp @@ -18,14 +18,14 @@ const std::vector> inStaticShapeParams = { const std::vector> inDynamicShapeParams = { // num_batches, num_boxes, 4 - {{{ngraph::Dimension::dynamic(), ngraph::Dimension::dynamic(), 4}, + {{{ov::Dimension::dynamic(), ov::Dimension::dynamic(), 4}, {{1, 10, 4}, {2, 100, 4}}}, // num_batches, num_classes, num_boxes - {{ngraph::Dimension::dynamic(), ngraph::Dimension::dynamic(), ngraph::Dimension::dynamic()}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 3, 10}, {2, 5, 100}}}}, - {{{ngraph::Dimension(1, 10), ngraph::Dimension(1, 100), 4}, + {{{ov::Dimension(1, 10), ov::Dimension(1, 100), 4}, {{1, 10, 4}, {2, 100, 4}}}, - {{{ngraph::Dimension(1, 10), ngraph::Dimension(1, 100), ngraph::Dimension(1, 100)}}, + {{{ov::Dimension(1, 10), ov::Dimension(1, 100), ov::Dimension(1, 100)}}, {{1, 3, 10}, {2, 5, 100}}}} }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/mvn.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/mvn.cpp index 9daa95881ec134..8da9c1249f823e 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/mvn.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/mvn.cpp @@ -12,7 +12,7 @@ using ov::test::Mvn1LayerTest; using ov::test::Mvn6LayerTest; const std::vector empty_across_channels = {{}}; -const std::vector empty_reduction_axes = {{}}; +const std::vector empty_reduction_axes = {{}}; const std::vector> input_shapes_static = { {{8}}, @@ -69,7 +69,7 @@ const std::vector> input_shapes_reduction_axes_static = { const auto MvnReductionAxes = ::testing::Combine( ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_reduction_axes_static)), ::testing::Values(ov::element::f32), - ::testing::ValuesIn(std::vector{{1, 2, 3}, {2, 3}}), + ::testing::ValuesIn(std::vector{{1, 2, 3}, {2, 3}}), ::testing::ValuesIn(empty_across_channels), ::testing::ValuesIn(normalize_variance), ::testing::ValuesIn(epsilon), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp index 5838096b20eed0..ff058cecf6fc35 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp @@ -22,10 +22,10 @@ const std::vector inputStaticShape2D = { }; const std::vector inputDynamicShape2D = { - {{ngraph::Dimension::dynamic(), 10}, {{1, 10}, {2, 10}, {10, 10}}}, - {{ngraph::Dimension(1, 10), 10}, {{1, 10}, {2, 10}, {10, 10}}}, - {{10, ngraph::Dimension::dynamic()}, {{10, 1}, {10, 5}, {10, 10}}}, - {{ngraph::Dimension::dynamic(), ngraph::Dimension::dynamic()}, {{1, 10}, {2, 10}, {10, 10}}} + {{ov::Dimension::dynamic(), 10}, {{1, 10}, {2, 10}, {10, 10}}}, + {{ov::Dimension(1, 10), 10}, {{1, 10}, {2, 10}, {10, 10}}}, + {{10, ov::Dimension::dynamic()}, {{10, 1}, {10, 5}, {10, 10}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 10}, {2, 10}, {10, 10}}} }; const std::vector axis2D = { @@ -73,8 +73,8 @@ const std::vector inputStaticShape4D = { }; const std::vector inputDynamicShape4D = { - {{ngraph::Dimension::dynamic(), 100, ngraph::Dimension(1, 10), 1}, {{1, 100, 1, 1}, {100, 100, 5, 1}}}, - {{ngraph::Dimension::dynamic(), ngraph::Dimension::dynamic(), ngraph::Dimension::dynamic(), ngraph::Dimension::dynamic()}, + {{ov::Dimension::dynamic(), 100, ov::Dimension(1, 10), 1}, {{1, 100, 1, 1}, {100, 100, 5, 1}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 100, 1, 1}, {50, 100, 4, 1}, {2, 100, 10, 1}}}, }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 6348e1afccfd40..289ba62a3044e2 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -41,7 +41,7 @@ std::vector disabledTestPatterns() { // TODO: 53578. fork DW bf16 convolution does not support 3d cases yet R"(.*_DW_GroupConv.*_inFmts=(ndhwc|nCdhw16c).*ENFORCE_BF16=YES.*)", // TODO: 56143. Enable nspc convolutions for bf16 precision - R"(.*ConvolutionLayerCPUTest.*_inFmts=(ndhwc|nhwc).*ENFORCE_BF16=YES.*)", + R"(.*ConvolutionLayerCPUTest.*_inFmts=(ndhwc|nhwc).*INFERENCE_PRECISION_HINT=bf16.*)", // TODO: 56827. Sporadic test failures R"(.*smoke_Conv.+_FP32.ConvolutionLayerCPUTest\.CompareWithRefs.*TS=\(\(.\.67.+\).*inFmts=n.+c.*_primitive=jit_avx2.*)", // incorrect jit_uni_planar_convolution with dilation = {1, 2, 1} and output channel 1 @@ -260,6 +260,13 @@ std::vector disabledTestPatterns() { // Issue: 123321 retVector.emplace_back( R"(.*smoke_RNNSequenceCommonZeroClip/RNNSequenceTest.Inference.*hidden_size=1.*relu.*direction=reverse.*)"); + // Ticket: 122769 + retVector.emplace_back(R"(.*smoke_nonzero/NonZeroLayerTest.Inference/IS.*)"); + retVector.emplace_back(R"(.*smoke_NormalizeL2_.*)"); + retVector.emplace_back(R"(.*Extension.XmlModelWithExtensionFromDSO.*)"); + retVector.emplace_back(R"(.*Extension.OnnxModelWithExtensionFromDSO.*)"); + retVector.emplace_back(R"(.*ONNXQuantizedModels/QuantizedModelsTests.MaxPool.*)"); + retVector.emplace_back(R"(.*ONNXQuantizedModels/QuantizedModelsTests.Convolution.*)"); } // invalid test: checks u8 precision for runtime graph, while it should be f32 retVector.emplace_back(R"(smoke_NegativeQuantizedMatMulMultiplyFusion.*)"); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/matmul.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/matmul.cpp index 78c1c8dccf0f84..77c78e31ca6b00 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/matmul.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/matmul.cpp @@ -20,6 +20,7 @@ std::vector> input_shapes{ {{1, 1, 32, 23}, {1, 1, 23, 68}}, {{1, 16, 384, 64}, {1, 16, 64, 384}}, {{1, 1, 100, 700}, {1, 1, 700, 100}}, + {{1, 1, 100, 2500}, {1, 1, 2500, 100}}, }; static inline std::vector> quantized_precisions() { diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/quantized_convolution_backprop_data.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/quantized_convolution_backprop_data.cpp index 0a4d6d2e6e23b4..fe5bc52f776990 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/quantized_convolution_backprop_data.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/subgraph_tests/quantized_convolution_backprop_data.cpp @@ -34,7 +34,7 @@ const auto quantConvBackpropData2DParams = ::testing::Combine( ::testing::ValuesIn(padEnds2D), ::testing::ValuesIn(dilations2D), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::AUTO), + ::testing::Values(ov::op::PadType::AUTO), ::testing::ValuesIn(levels), ::testing::ValuesIn(granularity) ); diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/adaptive_pooling.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/adaptive_pooling.cpp index 575d8031216c6e..d7df67e2eb9d06 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/adaptive_pooling.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/adaptive_pooling.cpp @@ -2,49 +2,42 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "common_test_utils/ov_tensor_utils.hpp" +#include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { -namespace { - std::vector pooledSpatialShape; - std::string mode; - std::vector inputShape; -} // namespace +namespace ov { +namespace test { -using AdaPoolSpecificParams = std::tuple< - std::vector, // pooled vector - std::vector>; // feature map shape +using AdaPoolSpecificParams = std::tuple, // pooled vector + std::vector>; // feature map shape -using AdaPoolLayerTestParams = std::tuple< - AdaPoolSpecificParams, - std::string, // mode - bool, // second Input is Constant - ElementType, // Net precision - TargetDevice>; // Device name +using AdaPoolLayerTestParams = std::tuple; // Device name -using AdaPoolLayerCPUTestParamsSet = std::tuple< - CPULayerTestsDefinitions::AdaPoolLayerTestParams, - CPUSpecificParams>; +using AdaPoolLayerCPUTestParamsSet = std::tuple; class AdaPoolLayerCPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CPUTestsBase { + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - CPULayerTestsDefinitions::AdaPoolLayerTestParams basicParamsSet; + AdaPoolLayerTestParams basicParamsSet; CPUSpecificParams cpuParams; std::tie(basicParamsSet, cpuParams) = obj.param; std::string td; ElementType netPr; bool isStatic; AdaPoolSpecificParams adaPar; + std::vector pooledSpatialShape; + std::vector inputShape; + std::string mode; std::tie(adaPar, mode, isStatic, netPr, td) = basicParamsSet; std::tie(pooledSpatialShape, inputShape) = adaPar; std::ostringstream result; @@ -66,22 +59,24 @@ class AdaPoolLayerCPUTest : public testing::WithParamInterfaceGetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; - CPULayerTestsDefinitions::AdaPoolSpecificParams adaPoolParams; + AdaPoolSpecificParams adaPoolParams; ElementType netPrecision; bool isStatic; + std::vector inputShape; std::tie(adaPoolParams, mode, isStatic, netPrecision, targetDevice) = basicParamsSet; std::tie(pooledVector, inputShape) = adaPoolParams; init_input_shapes(inputShape); if (!isStatic) { - for (auto &target : targetStaticShapes) { + for (auto& target : targetStaticShapes) { target.push_back({pooledVector.size()}); } } @@ -105,34 +100,36 @@ class AdaPoolLayerCPUTest : public testing::WithParamInterface createFunction(bool secondInputConst) { - ov::ParameterVector params{std::make_shared(ngraph::element::f32, inputDynamicShapes[0])}; + std::shared_ptr createFunction(bool secondInputConst) { + ov::ParameterVector params{std::make_shared(ov::element::f32, inputDynamicShapes[0])}; params.front()->set_friendly_name("ParamsInput"); std::shared_ptr secondInput; if (secondInputConst) { - secondInput = ngraph::op::Constant::create(ngraph::element::i32, ngraph::Shape{pooledVector.size()}, pooledVector); + secondInput = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{pooledVector.size()}, pooledVector); } else { - auto pooledParam = std::make_shared(ngraph::element::i32, ngraph::Shape{pooledVector.size()}); + auto pooledParam = + std::make_shared(ov::element::i32, ov::Shape{pooledVector.size()}); pooledParam->set_friendly_name("ParamSecondInput"); params.push_back(pooledParam); secondInput = pooledParam; } - auto adapoolMax = std::make_shared(params[0], secondInput, ngraph::element::i32); + auto adapoolMax = std::make_shared(params[0], secondInput, ov::element::i32); adapoolMax->get_rt_info() = getCPUInfo(); - auto adapoolAvg = std::make_shared(params[0], secondInput); + auto adapoolAvg = std::make_shared(params[0], secondInput); adapoolAvg->get_rt_info() = getCPUInfo(); - auto function = (mode == "max" ? std::make_shared(adapoolMax->outputs(), params, "AdaPoolMax") : - std::make_shared(adapoolAvg->outputs(), params, "AdaPoolAvg")); + auto function = (mode == "max" ? std::make_shared(adapoolMax->outputs(), params, "AdaPoolMax") + : std::make_shared(adapoolAvg->outputs(), params, "AdaPoolAvg")); return function; } void validate() override { auto actualOutputs = get_plugin_outputs(); if (function->get_parameters().size() == 2) { - auto pos = std::find_if(inputs.begin(), inputs.end(), - [](const std::pair, ov::Tensor> ¶ms) { + auto pos = std::find_if(inputs.begin(), + inputs.end(), + [](const std::pair, ov::Tensor>& params) { return params.first->get_friendly_name() == "ParamSecondInput"; }); OPENVINO_ASSERT(pos != inputs.end()); @@ -140,10 +137,10 @@ class AdaPoolLayerCPUTest : public testing::WithParamInterface(); + auto* dataPtr = tensor.data(); for (size_t i = 0; i < pooledVector.size(); i++) { dataPtr[i] = pooledVector[i]; } } else { - tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], 2560, 0, 256); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 2560; + in_data.resolution = 256; + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], in_data); } inputs.insert({funcInput.get_node_shared_ptr(), tensor}); } @@ -170,6 +171,7 @@ class AdaPoolLayerCPUTest : public testing::WithParamInterface pooledVector; + std::string mode; }; TEST_P(AdaPoolLayerCPUTest, CompareWithRefs) { @@ -238,329 +240,275 @@ std::vector filterCPUInfoForDevice(std::string dims = "3D", s return resCPUParams; } -const std::vector netPrecisions = { - ElementType::f32, - ElementType::bf16 -}; +const std::vector netPrecisions = {ElementType::f32, ElementType::bf16}; -const std::vector> pooled3DVector = { - { 1 }, - { 3 }, - { 5 } -}; -const std::vector> pooled4DVector = { - { 1, 1 }, - { 3, 5 }, - { 5, 5 } -}; +const std::vector> pooled3DVector = {{1}, {3}, {5}}; +const std::vector> pooled4DVector = {{1, 1}, {3, 5}, {5, 5}}; const std::vector> pooled5DVector = { - { 1, 1, 1 }, - { 3, 5, 1 }, - { 3, 5, 3 }, + {1, 1, 1}, + {3, 5, 1}, + {3, 5, 3}, }; std::vector> staticInput3DShapeVector = {{{1, 17, 3}, {3, 7, 5}}}; const std::vector> input3DShapeVector = { - { - {{{-1, 17, -1}, {{1, 17, 3}, {3, 17, 5}, {3, 17, 5}}}}, - {{{{1, 10}, 20, {1, 10}}, {{1, 20, 5}, {2, 20, 4}, {3, 20, 6}}}} - } -}; + {{{{-1, 17, -1}, {{1, 17, 3}, {3, 17, 5}, {3, 17, 5}}}}, + {{{{1, 10}, 20, {1, 10}}, {{1, 20, 5}, {2, 20, 4}, {3, 20, 6}}}}}}; std::vector> staticInput4DShapeVector = {{{1, 3, 1, 1}, {3, 17, 5, 2}}}; const std::vector> input4DShapeVector = { - { - {{{-1, 3, -1, -1}, {{1, 3, 1, 1}, {3, 3, 5, 2}, {3, 3, 5, 2}}}}, - {{{{1, 10}, 3, {1, 10}, {1, 10}}, {{2, 3, 10, 6}, {3, 3, 6, 5}, {3, 3, 6, 5}}}} - } -}; + {{{{-1, 3, -1, -1}, {{1, 3, 1, 1}, {3, 3, 5, 2}, {3, 3, 5, 2}}}}, + {{{{1, 10}, 3, {1, 10}, {1, 10}}, {{2, 3, 10, 6}, {3, 3, 6, 5}, {3, 3, 6, 5}}}}}}; -std::vector> staticInput5DShapeVector = {{{ 1, 17, 2, 5, 2}, {3, 17, 4, 5, 4}}}; +std::vector> staticInput5DShapeVector = {{{1, 17, 2, 5, 2}, {3, 17, 4, 5, 4}}}; const std::vector> input5DShapeVector = { - { - {{{-1, 17, -1, -1, -1}, {{1, 17, 2, 5, 2}, {3, 17, 4, 5, 4}, {3, 17, 4, 5, 4}}}}, - {{{{1, 10}, 3, {1, 10}, {1, 10}, {1, 10}}, {{3, 3, 2, 5, 2}, {1, 3, 4, 5, 4}, {1, 3, 4, 5, 4}}}} - } -}; + {{{{-1, 17, -1, -1, -1}, {{1, 17, 2, 5, 2}, {3, 17, 4, 5, 4}, {3, 17, 4, 5, 4}}}}, + {{{{1, 10}, 3, {1, 10}, {1, 10}, {1, 10}}, {{3, 3, 2, 5, 2}, {1, 3, 4, 5, 4}, {1, 3, 4, 5, 4}}}}}}; -const auto adaPool3DParams = ::testing::Combine( - ::testing::ValuesIn(pooled3DVector), // output spatial shape - ::testing::ValuesIn(input3DShapeVector) // feature map shape +const auto adaPool3DParams = ::testing::Combine(::testing::ValuesIn(pooled3DVector), // output spatial shape + ::testing::ValuesIn(input3DShapeVector) // feature map shape ); -const auto adaPool4DParams = ::testing::Combine( - ::testing::ValuesIn(pooled4DVector), // output spatial shape - ::testing::ValuesIn(input4DShapeVector) // feature map shape +const auto adaPool4DParams = ::testing::Combine(::testing::ValuesIn(pooled4DVector), // output spatial shape + ::testing::ValuesIn(input4DShapeVector) // feature map shape ); -const auto adaPool5DParams = ::testing::Combine( - ::testing::ValuesIn(pooled5DVector), // output spatial shape - ::testing::ValuesIn(input5DShapeVector) // feature map shape +const auto adaPool5DParams = ::testing::Combine(::testing::ValuesIn(pooled5DVector), // output spatial shape + ::testing::ValuesIn(input5DShapeVector) // feature map shape ); const auto staticAdaPool3DParams = ::testing::Combine( - ::testing::ValuesIn(pooled3DVector), // output spatial shape - ::testing::ValuesIn(static_shapes_to_test_representation(staticInput3DShapeVector)) // feature map shape + ::testing::ValuesIn(pooled3DVector), // output spatial shape + ::testing::ValuesIn(static_shapes_to_test_representation(staticInput3DShapeVector)) // feature map shape ); const auto staticAdaPool4DParams = ::testing::Combine( - ::testing::ValuesIn(pooled4DVector), // output spatial shape - ::testing::ValuesIn(static_shapes_to_test_representation(staticInput4DShapeVector)) // feature map shape + ::testing::ValuesIn(pooled4DVector), // output spatial shape + ::testing::ValuesIn(static_shapes_to_test_representation(staticInput4DShapeVector)) // feature map shape ); const auto staticAdaPool5DParams = ::testing::Combine( - ::testing::ValuesIn(pooled5DVector), // output spatial shape - ::testing::ValuesIn(static_shapes_to_test_representation(staticInput5DShapeVector)) // feature map shape + ::testing::ValuesIn(pooled5DVector), // output spatial shape + ::testing::ValuesIn(static_shapes_to_test_representation(staticInput5DShapeVector)) // feature map shape ); -INSTANTIATE_TEST_SUITE_P(smoke_AdaPoolAvg3DLayoutTest, AdaPoolLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - adaPool3DParams, - ::testing::Values("avg"), - ::testing::Values(false), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice("3D", "avg"))), +INSTANTIATE_TEST_SUITE_P(smoke_AdaPoolAvg3DLayoutTest, + AdaPoolLayerCPUTest, + ::testing::Combine(::testing::Combine(adaPool3DParams, + ::testing::Values("avg"), + ::testing::Values(false), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice("3D", "avg"))), AdaPoolLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_AdaPoolAvg4DLayoutTest, AdaPoolLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - adaPool4DParams, - ::testing::Values("avg"), - ::testing::Values(false), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice("4D", "avg"))), +INSTANTIATE_TEST_SUITE_P(smoke_AdaPoolAvg4DLayoutTest, + AdaPoolLayerCPUTest, + ::testing::Combine(::testing::Combine(adaPool4DParams, + ::testing::Values("avg"), + ::testing::Values(false), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice("4D", "avg"))), AdaPoolLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_AdaPoolAvg5DLayoutTest, AdaPoolLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - adaPool5DParams, - ::testing::Values("avg"), - ::testing::Values(false), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice("5D", "avg"))), +INSTANTIATE_TEST_SUITE_P(smoke_AdaPoolAvg5DLayoutTest, + AdaPoolLayerCPUTest, + ::testing::Combine(::testing::Combine(adaPool5DParams, + ::testing::Values("avg"), + ::testing::Values(false), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice("5D", "avg"))), AdaPoolLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_AdaPoolMax3DLayoutTest, AdaPoolLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - adaPool3DParams, - ::testing::Values("max"), - ::testing::Values(false), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice("3D", "max"))), +INSTANTIATE_TEST_SUITE_P(smoke_AdaPoolMax3DLayoutTest, + AdaPoolLayerCPUTest, + ::testing::Combine(::testing::Combine(adaPool3DParams, + ::testing::Values("max"), + ::testing::Values(false), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice("3D", "max"))), AdaPoolLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_AdaPoolMax4DLayoutTest, AdaPoolLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - adaPool4DParams, - ::testing::Values("max"), - ::testing::Values(false), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice("4D", "max"))), +INSTANTIATE_TEST_SUITE_P(smoke_AdaPoolMax4DLayoutTest, + AdaPoolLayerCPUTest, + ::testing::Combine(::testing::Combine(adaPool4DParams, + ::testing::Values("max"), + ::testing::Values(false), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice("4D", "max"))), AdaPoolLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_AdaPoolMax5DLayoutTest, AdaPoolLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - adaPool5DParams, - ::testing::Values("max"), - ::testing::Values(false), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice("5D", "max"))), +INSTANTIATE_TEST_SUITE_P(smoke_AdaPoolMax5DLayoutTest, + AdaPoolLayerCPUTest, + ::testing::Combine(::testing::Combine(adaPool5DParams, + ::testing::Values("max"), + ::testing::Values(false), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice("5D", "max"))), AdaPoolLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_StaticAdaPoolAvg3DLayoutTest, AdaPoolLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - staticAdaPool3DParams, - ::testing::Values("avg"), - ::testing::Values(true), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice("3D", "avg"))), +INSTANTIATE_TEST_SUITE_P(smoke_StaticAdaPoolAvg3DLayoutTest, + AdaPoolLayerCPUTest, + ::testing::Combine(::testing::Combine(staticAdaPool3DParams, + ::testing::Values("avg"), + ::testing::Values(true), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice("3D", "avg"))), AdaPoolLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_StaticAdaPoolAvg4DLayoutTest, AdaPoolLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - staticAdaPool4DParams, - ::testing::Values("avg"), - ::testing::Values(true), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice("4D", "avg"))), +INSTANTIATE_TEST_SUITE_P(smoke_StaticAdaPoolAvg4DLayoutTest, + AdaPoolLayerCPUTest, + ::testing::Combine(::testing::Combine(staticAdaPool4DParams, + ::testing::Values("avg"), + ::testing::Values(true), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice("4D", "avg"))), AdaPoolLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_StaticAdaPoolAvg5DLayoutTest, AdaPoolLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - staticAdaPool5DParams, - ::testing::Values("avg"), - ::testing::Values(true), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice("5D", "avg"))), +INSTANTIATE_TEST_SUITE_P(smoke_StaticAdaPoolAvg5DLayoutTest, + AdaPoolLayerCPUTest, + ::testing::Combine(::testing::Combine(staticAdaPool5DParams, + ::testing::Values("avg"), + ::testing::Values(true), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice("5D", "avg"))), AdaPoolLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_StaticAdaPoolMax3DLayoutTest, AdaPoolLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - staticAdaPool3DParams, - ::testing::Values("max"), - ::testing::Values(true), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice("3D", "max"))), +INSTANTIATE_TEST_SUITE_P(smoke_StaticAdaPoolMax3DLayoutTest, + AdaPoolLayerCPUTest, + ::testing::Combine(::testing::Combine(staticAdaPool3DParams, + ::testing::Values("max"), + ::testing::Values(true), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice("3D", "max"))), AdaPoolLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_StaticAdaPoolMax4DLayoutTest, AdaPoolLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - staticAdaPool4DParams, - ::testing::Values("max"), - ::testing::Values(true), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice("4D", "max"))), +INSTANTIATE_TEST_SUITE_P(smoke_StaticAdaPoolMax4DLayoutTest, + AdaPoolLayerCPUTest, + ::testing::Combine(::testing::Combine(staticAdaPool4DParams, + ::testing::Values("max"), + ::testing::Values(true), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice("4D", "max"))), AdaPoolLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_StaticAdaPoolMax5DLayoutTest, AdaPoolLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - staticAdaPool5DParams, - ::testing::Values("max"), - ::testing::Values(true), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice("5D", "max"))), +INSTANTIATE_TEST_SUITE_P(smoke_StaticAdaPoolMax5DLayoutTest, + AdaPoolLayerCPUTest, + ::testing::Combine(::testing::Combine(staticAdaPool5DParams, + ::testing::Values("max"), + ::testing::Values(true), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice("5D", "max"))), AdaPoolLayerCPUTest::getTestCaseName); - -// in 1-channel cases {..., 1, 1, 1} shape cannot be correctly resolved on oneDnn level, so it was removed from instances +// in 1-channel cases {..., 1, 1, 1} shape cannot be correctly resolved on oneDnn level, so it was removed from +// instances const std::vector> input3DShape1Channel = { - { - {{{-1, -1, -1}, {{1, 1, 2}, {1, 1, 2}, {1, 1, 2}}}}, - {{{{1, 10}, {1, 10}, {1, 10}}, {{1, 1, 2}, {2, 1, 2}, {2, 1, 2}}}} - } -}; + {{{{-1, -1, -1}, {{1, 1, 2}, {1, 1, 2}, {1, 1, 2}}}}, + {{{{1, 10}, {1, 10}, {1, 10}}, {{1, 1, 2}, {2, 1, 2}, {2, 1, 2}}}}}}; const std::vector> input4DShape1Channel = { - { - {{{-1, -1, -1, -1}, {{1, 1, 1, 2}, {2, 1, 2, 1}, {2, 1, 2, 1}}}}, - {{{{1, 10}, {1, 10}, {1, 10}, {1, 10}}, {{1, 1, 1, 2}, {1, 1, 1, 2}, {2, 1, 2, 1}}}} - } -}; + {{{{-1, -1, -1, -1}, {{1, 1, 1, 2}, {2, 1, 2, 1}, {2, 1, 2, 1}}}}, + {{{{1, 10}, {1, 10}, {1, 10}, {1, 10}}, {{1, 1, 1, 2}, {1, 1, 1, 2}, {2, 1, 2, 1}}}}}}; const std::vector> input5DShape1Channel = { - { - {{{-1, -1, -1, -1, -1}, {{1, 1, 1, 1, 2}, {1, 1, 1, 1, 2}, {2, 1, 1, 2, 1}}}}, - {{{{1, 10}, {1, 10}, {1, 10}, {1, 10}, {1, 10}}, {{1, 1, 1, 1, 2}, {1, 1, 1, 1, 2}, {2, 1, 1, 2, 1}}}} - } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_AdaPool_1ch_Avg3DLayoutTest, AdaPoolLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(std::vector> { - {1}, {2}}), - ::testing::ValuesIn(input3DShape1Channel)), - ::testing::Values("avg"), - ::testing::Values(true), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{ncw, x}, {ncw}, {}, {}})), - AdaPoolLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_AdaPool_1ch_Avg4DLayoutTest, AdaPoolLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(std::vector> { - {1, 1}, - {2, 2} - }), - ::testing::ValuesIn(input4DShape1Channel)), - ::testing::Values("avg"), - ::testing::Values(true), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{nchw, x}, {nchw}, {}, {}})), - AdaPoolLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_AdaPool_1ch_Avg5DLayoutTest, AdaPoolLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(std::vector> { - {1, 1, 1}, {2, 2, 2}}), - ::testing::ValuesIn(input5DShape1Channel)), - ::testing::Values("avg"), - ::testing::Values(true), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{ncdhw, x}, {ncdhw}, {}, {}})), - AdaPoolLayerCPUTest::getTestCaseName); - + {{{{-1, -1, -1, -1, -1}, {{1, 1, 1, 1, 2}, {1, 1, 1, 1, 2}, {2, 1, 1, 2, 1}}}}, + {{{{1, 10}, {1, 10}, {1, 10}, {1, 10}, {1, 10}}, {{1, 1, 1, 1, 2}, {1, 1, 1, 1, 2}, {2, 1, 1, 2, 1}}}}}}; + +INSTANTIATE_TEST_SUITE_P( + smoke_AdaPool_1ch_Avg3DLayoutTest, + AdaPoolLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::Combine(::testing::ValuesIn(std::vector>{{1}, + {2}}), + ::testing::ValuesIn(input3DShape1Channel)), + ::testing::Values("avg"), + ::testing::Values(true), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{ncw, x}, {ncw}, {}, {}})), + AdaPoolLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_AdaPool_1ch_Avg4DLayoutTest, + AdaPoolLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::Combine(::testing::ValuesIn(std::vector>{{1, 1}, + {2, 2}}), + ::testing::ValuesIn(input4DShape1Channel)), + ::testing::Values("avg"), + ::testing::Values(true), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{nchw, x}, {nchw}, {}, {}})), + AdaPoolLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_AdaPool_1ch_Avg5DLayoutTest, + AdaPoolLayerCPUTest, + ::testing::Combine( + ::testing::Combine(::testing::Combine(::testing::ValuesIn(std::vector>{{1, 1, 1}, {2, 2, 2}}), + ::testing::ValuesIn(input5DShape1Channel)), + ::testing::Values("avg"), + ::testing::Values(true), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{ncdhw, x}, {ncdhw}, {}, {}})), + AdaPoolLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_AdaPool_1ch_Max3DLayoutTest, + AdaPoolLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::Combine(::testing::ValuesIn(std::vector>{{1}, + {2}}), + ::testing::ValuesIn(input3DShape1Channel)), + ::testing::Values("max"), + ::testing::Values(true), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{ncw, x}, {ncw}, {}, {}})), + AdaPoolLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_AdaPool_1ch_Max4DLayoutTest, + AdaPoolLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::Combine(::testing::ValuesIn(std::vector>{{1, 1}, + {2, 2}}), + ::testing::ValuesIn(input4DShape1Channel)), + ::testing::Values("max"), + ::testing::Values(true), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{nchw, x}, {nchw}, {}, {}})), + AdaPoolLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_AdaPool_1ch_Max5DLayoutTest, + AdaPoolLayerCPUTest, + ::testing::Combine( + ::testing::Combine(::testing::Combine(::testing::ValuesIn(std::vector>{{1, 1, 1}, {2, 2, 2}}), + ::testing::ValuesIn(input5DShape1Channel)), + ::testing::Values("max"), + ::testing::Values(true), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{ncdhw, x}, {ncdhw}, {}, {}})), + AdaPoolLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_AdaPool_1ch_Max3DLayoutTest, AdaPoolLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(std::vector> { - {1}, {2}}), - ::testing::ValuesIn(input3DShape1Channel)), - ::testing::Values("max"), - ::testing::Values(true), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{ncw, x}, {ncw}, {}, {}})), - AdaPoolLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_AdaPool_1ch_Max4DLayoutTest, AdaPoolLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(std::vector> { - {1, 1}, {2, 2}}), - ::testing::ValuesIn(input4DShape1Channel)), - ::testing::Values("max"), - ::testing::Values(true), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{nchw, x}, {nchw}, {}, {}})), - AdaPoolLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_AdaPool_1ch_Max5DLayoutTest, AdaPoolLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(std::vector> { - {1, 1, 1}, - {2, 2, 2} - }), - ::testing::ValuesIn(input5DShape1Channel)), - ::testing::Values("max"), - ::testing::Values(true), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{ncdhw, x}, {ncdhw}, {}, {}})), - AdaPoolLayerCPUTest::getTestCaseName); - -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/augru_cell.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/augru_cell.cpp index bc407ef7e19303..2c5f131cbbe448 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/augru_cell.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/augru_cell.cpp @@ -2,39 +2,47 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/base/ov_subgraph.hpp" #include "common_test_utils/node_builders/augru_cell.hpp" + +#include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -using AUGRUCellCpuSpecificParams = typename std::tuple< - std::vector, // Shapes - bool, // Using decompose to sub-ops transformation - std::vector, // Activations - float, // Clip - bool, // Linear before reset - ElementType, // Network precision - CPUSpecificParams, // CPU specific params - std::map // Additional config ->; +using AUGRUCellCpuSpecificParams = typename std::tuple, // Shapes + bool, // Using decompose to sub-ops transformation + std::vector, // Activations + float, // Clip + bool, // Linear before reset + ElementType, // Network precision + CPUSpecificParams, // CPU specific params + ov::AnyMap // Additional config + >; class AUGRUCellCPUTest : public testing::WithParamInterface, - virtual public ov::test::SubgraphBaseTest, public CPUTestsBase { + virtual public ov::test::SubgraphBaseTest, + public CPUTestsBase { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj) { + static std::string getTestCaseName(const testing::TestParamInfo& obj) { std::vector inputShapes; bool decompose, linearBeforeReset; std::vector activations; float clip = 0.f; ElementType netPrecision; CPUSpecificParams cpuParams; - std::map additionalConfig; + ov::AnyMap additionalConfig; - std::tie(inputShapes, decompose, activations, clip, linearBeforeReset, netPrecision, cpuParams, additionalConfig) = obj.param; + std::tie(inputShapes, + decompose, + activations, + clip, + linearBeforeReset, + netPrecision, + cpuParams, + additionalConfig) = obj.param; std::ostringstream result; result << "IS=("; @@ -50,7 +58,7 @@ class AUGRUCellCPUTest : public testing::WithParamInterface> additionalConfig - = {{{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::YES}}}; +std::vector additionalConfig = {{ov::hint::inference_precision(ov::element::f32)}, + {ov::hint::inference_precision(ov::element::bf16)}}; CPUSpecificParams cpuParams{{nc, nc}, {nc}, {"ref_any"}, "ref_any"}; @@ -127,79 +144,80 @@ std::vector> activations = {{"sigmoid", "tanh"}}; std::vector clip = {0.f}; // dev_api::augrucell does not support lbr so far. std::vector linearBeforeReset = {false}; -std::vector netPrecisions = { ElementType::f32 }; - -const std::vector> staticShapes = { - { { {}, { {1, 1} } }, // Static shapes - { {}, { {1, 1} } }, - { {}, { {1, 1} } } }, - { { {}, { {1, 1} } }, // Static shapes - { {}, { {1, 10} } }, - { {}, { {1, 1} } } }, - { { {}, { {1, 30} } }, // Static shapes - { {}, { {1, 10} } }, - { {}, { {1, 1} } } }, - { { {}, { {1, 30} } }, // Static shapes - { {}, { {1, 1} } }, - { {}, { {1, 1} } } }, - { { {}, { {3, 1} } }, // Static shapes - { {}, { {3, 1} } }, - { {}, { {3, 1} } } }, - { { {}, { {5, 1} } }, // Static shapes - { {}, { {5, 1} } }, - { {}, { {5, 1} } } }, - { { {}, { {5, 30} } }, // Static shapes - { {}, { {5, 10} } }, - { {}, { {5, 1} } } } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_static, AUGRUCellCPUTest, - ::testing::Combine(::testing::ValuesIn(staticShapes), - ::testing::ValuesIn(shouldDecompose), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip), - ::testing::ValuesIn(linearBeforeReset), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(cpuParams), - ::testing::ValuesIn(additionalConfig)), - AUGRUCellCPUTest::getTestCaseName); +std::vector netPrecisions = {ElementType::f32}; + +const std::vector> staticShapes = {{{{}, {{1, 1}}}, // Static shapes + {{}, {{1, 1}}}, + {{}, {{1, 1}}}}, + {{{}, {{1, 1}}}, // Static shapes + {{}, {{1, 10}}}, + {{}, {{1, 1}}}}, + {{{}, {{1, 30}}}, // Static shapes + {{}, {{1, 10}}}, + {{}, {{1, 1}}}}, + {{{}, {{1, 30}}}, // Static shapes + {{}, {{1, 1}}}, + {{}, {{1, 1}}}}, + {{{}, {{3, 1}}}, // Static shapes + {{}, {{3, 1}}}, + {{}, {{3, 1}}}}, + {{{}, {{5, 1}}}, // Static shapes + {{}, {{5, 1}}}, + {{}, {{5, 1}}}}, + {{{}, {{5, 30}}}, // Static shapes + {{}, {{5, 10}}}, + {{}, {{5, 1}}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_static, + AUGRUCellCPUTest, + ::testing::Combine(::testing::ValuesIn(staticShapes), + ::testing::ValuesIn(shouldDecompose), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip), + ::testing::ValuesIn(linearBeforeReset), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(cpuParams), + ::testing::ValuesIn(additionalConfig)), + AUGRUCellCPUTest::getTestCaseName); const std::vector> dynamicShapes = { - { { { {-1}, 1 }, // Dynamic shape 0 - { {1, 1}, {3, 1}, {5, 1} } }, // Target shapes - { { {-1}, 1 }, // Dynamic shape 1 - { {1, 1}, {3, 1}, {5, 1} } }, // Target shapes - { { {-1}, 1 }, // Dynamic shape 2 - { {1, 1}, {3, 1}, {5, 1} } } }, // Target shapes - { { { {1, 10}, 30 }, // Dynamic shape 0 - { {2, 30}, {5, 30}, {8, 30} } }, // Target shapes - { { {1, 10}, 10 }, // Dynamic shape 1 - { {2, 10}, {5, 10}, {8, 10} } }, // Target shapes - { { {1, 10}, 1 }, // Dynamic shape 2 - { {2, 1}, {5, 1}, {8, 1} } } }, // Target shapes - { { { {1, 10}, {25, 35} }, // Dynamic shape 0 - { {2, 30}, {5, 30}, {8, 30} } }, // Target shapes - { { {1, 10}, -1 }, // Dynamic shape 1 - { {2, 10}, {5, 10}, {8, 10} } }, // Target shapes - { { {1, 10}, 1 }, // Dynamic shape 2 - { {2, 1}, {5, 1}, {8, 1} } } }, // Target shapes - { { { {1, 10}, {25, 35} }, // Dynamic shape 0 - { {2, 30}, {5, 30}, {8, 30}, {2, 30}, {5, 30}, {8, 30} } }, // Target shapes - { { {1, 10}, -1 }, // Dynamic shape 1 - { {2, 10}, {5, 10}, {8, 10}, {2, 10}, {5, 10}, {8, 10} } }, // Target shapes - { { {1, 10}, 1 }, // Dynamic shape 2 - { {2, 1}, {5, 1}, {8, 1}, {2, 1}, {5, 1}, {8, 1} } } } // Target shapes + {{{{-1}, 1}, // Dynamic shape 0 + {{1, 1}, {3, 1}, {5, 1}}}, // Target shapes + {{{-1}, 1}, // Dynamic shape 1 + {{1, 1}, {3, 1}, {5, 1}}}, // Target shapes + {{{-1}, 1}, // Dynamic shape 2 + {{1, 1}, {3, 1}, {5, 1}}}}, // Target shapes + {{{{1, 10}, 30}, // Dynamic shape 0 + {{2, 30}, {5, 30}, {8, 30}}}, // Target shapes + {{{1, 10}, 10}, // Dynamic shape 1 + {{2, 10}, {5, 10}, {8, 10}}}, // Target shapes + {{{1, 10}, 1}, // Dynamic shape 2 + {{2, 1}, {5, 1}, {8, 1}}}}, // Target shapes + {{{{1, 10}, {25, 35}}, // Dynamic shape 0 + {{2, 30}, {5, 30}, {8, 30}}}, // Target shapes + {{{1, 10}, -1}, // Dynamic shape 1 + {{2, 10}, {5, 10}, {8, 10}}}, // Target shapes + {{{1, 10}, 1}, // Dynamic shape 2 + {{2, 1}, {5, 1}, {8, 1}}}}, // Target shapes + {{{{1, 10}, {25, 35}}, // Dynamic shape 0 + {{2, 30}, {5, 30}, {8, 30}, {2, 30}, {5, 30}, {8, 30}}}, // Target shapes + {{{1, 10}, -1}, // Dynamic shape 1 + {{2, 10}, {5, 10}, {8, 10}, {2, 10}, {5, 10}, {8, 10}}}, // Target shapes + {{{1, 10}, 1}, // Dynamic shape 2 + {{2, 1}, {5, 1}, {8, 1}, {2, 1}, {5, 1}, {8, 1}}}} // Target shapes }; -INSTANTIATE_TEST_SUITE_P(smoke_dynamic, AUGRUCellCPUTest, - ::testing::Combine(::testing::ValuesIn(dynamicShapes), - ::testing::ValuesIn(shouldDecompose), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip), - ::testing::ValuesIn(linearBeforeReset), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(cpuParams), - ::testing::ValuesIn(additionalConfig)), - AUGRUCellCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +INSTANTIATE_TEST_SUITE_P(smoke_dynamic, + AUGRUCellCPUTest, + ::testing::Combine(::testing::ValuesIn(dynamicShapes), + ::testing::ValuesIn(shouldDecompose), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip), + ::testing::ValuesIn(linearBeforeReset), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(cpuParams), + ::testing::ValuesIn(additionalConfig)), + AUGRUCellCPUTest::getTestCaseName); +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/augru_sequence.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/augru_sequence.cpp index 0765b65907d7d2..3940d3bd77b6fb 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/augru_sequence.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/augru_sequence.cpp @@ -2,44 +2,53 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/base/ov_subgraph.hpp" #include "common_test_utils/node_builders/augru_cell.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" #include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp" #include "transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp" using namespace CPUTestUtils; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { - -using AUGRUSequenceCpuSpecificParams = typename std::tuple< - std::vector, // Shapes - ngraph::helpers::SequenceTestsMode, // Pure Sequence or TensorIterator - std::vector, // Activations - float, // Clip - bool, // Linear_before_reset - ov::op::RecurrentSequenceDirection, // Direction - ElementType, // Network precision - CPUSpecificParams, // CPU specific params - std::map // Additional config ->; + +namespace ov { +namespace test { + +using AUGRUSequenceCpuSpecificParams = + typename std::tuple, // Shapes + ov::test::utils::SequenceTestsMode, // Pure Sequence or TensorIterator + std::vector, // Activations + float, // Clip + bool, // Linear_before_reset + ov::op::RecurrentSequenceDirection, // Direction + ElementType, // Network precision + CPUSpecificParams, // CPU specific params + ov::AnyMap // Additional config + >; class AUGRUSequenceCPUTest : public testing::WithParamInterface, - virtual public ov::test::SubgraphBaseTest, public CPUTestsBase { + virtual public ov::test::SubgraphBaseTest, + public CPUTestsBase { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj) { + static std::string getTestCaseName(const testing::TestParamInfo& obj) { std::vector inputShapes; - ngraph::helpers::SequenceTestsMode seqMode; + ov::test::utils::SequenceTestsMode seqMode; std::vector activations; float clip; bool linearBeforeRest; ov::op::RecurrentSequenceDirection direction; ElementType netPrecision; CPUSpecificParams cpuParams; - std::map additionalConfig; - - std::tie(inputShapes, seqMode, activations, clip, linearBeforeRest, direction, netPrecision, cpuParams, additionalConfig) = obj.param; + ov::AnyMap additionalConfig; + + std::tie(inputShapes, + seqMode, + activations, + clip, + linearBeforeRest, + direction, + netPrecision, + cpuParams, + additionalConfig) = obj.param; std::ostringstream result; result << "IS=("; @@ -55,7 +64,7 @@ class AUGRUSequenceCPUTest : public testing::WithParamInterface(netPrecision, shape)); } - const size_t batchSize = inputDynamicShapes[0][0].is_static() ? inputDynamicShapes[0][0].get_length() : - inputDynamicShapes[1][0].is_static() ? inputDynamicShapes[1][0].get_length() : - inputDynamicShapes.size() > 2 && inputDynamicShapes[2][0].is_static() ? inputDynamicShapes[2][0].get_length() : - 1lu; - /** - * There are 2 options to paramter "in" when "make_sequence" is true. - * 0 1 2 3 - * X init_hidden_state attention seq_length - * or, - * 0 1 2 - * X init_hidden_state attention - * - */ + const size_t batchSize = inputDynamicShapes[0][0].is_static() ? inputDynamicShapes[0][0].get_length() + : inputDynamicShapes[1][0].is_static() ? inputDynamicShapes[1][0].get_length() + : inputDynamicShapes.size() > 2 && inputDynamicShapes[2][0].is_static() + ? inputDynamicShapes[2][0].get_length() + : 1lu; + /** + * There are 2 options to paramter "in" when "make_sequence" is true. + * 0 1 2 3 + * X init_hidden_state attention seq_length + * or, + * 0 1 2 + * X init_hidden_state attention + * + */ if (inputDynamicShapes.size() > 3) { if (!inputDynamicShapes[3].is_dynamic() && - seqMode != ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM && - seqMode != ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM) { + seqMode != ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM && + seqMode != ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM) { params.pop_back(); } else { params[3]->set_element_type(ElementType::i64); @@ -134,18 +153,15 @@ class AUGRUSequenceCPUTest : public testing::WithParamInterface WRB = {{numDirections, 3 * hiddenSize, inputSize}, {numDirections, 3 * hiddenSize, hiddenSize}, - {numDirections, (linearBeforeReset ? 4 : 3) * hiddenSize}, {batchSize}}; - auto augruSequenceOp = ov::test::utils::make_augru(paramsOuts, - WRB, - hiddenSize, - true, - direction, - seqMode); + std::vector WRB = {{numDirections, 3 * hiddenSize, inputSize}, + {numDirections, 3 * hiddenSize, hiddenSize}, + {numDirections, (linearBeforeReset ? 4 : 3) * hiddenSize}, + {batchSize}}; + auto augruSequenceOp = ov::test::utils::make_augru(paramsOuts, WRB, hiddenSize, true, direction, seqMode); function = makeNgraphFunction(netPrecision, params, augruSequenceOp, "augruSequenceOp"); - if (seqMode != ngraph::helpers::SequenceTestsMode::PURE_SEQ) { + if (seqMode != ov::test::utils::SequenceTestsMode::PURE_SEQ) { // TODO: ConvertAUGRUSequenceToTensorIterator throw std::runtime_error("ConvertAUGRUSequenceToTensorIterator not implemented yet."); } else { @@ -178,14 +194,13 @@ TEST_P(AUGRUSequenceCPUTest, CompareWithRefs) { namespace { /* CPU PARAMS */ -std::vector> additionalConfig - = {{{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::YES}}}; +std::vector additionalConfig = {{ov::hint::inference_precision(ov::element::f32)}, + {ov::hint::inference_precision(ov::element::bf16)}}; CPUSpecificParams cpuParams{{ntc, tnc}, {ntc, tnc}, {"ref_any"}, "ref_any"}; -CPUSpecificParams cpuParamsBatchSizeOne{{tnc, tnc}, {tnc, tnc}, {"ref_any"}, "ref_any"};; +CPUSpecificParams cpuParamsBatchSizeOne{{tnc, tnc}, {tnc, tnc}, {"ref_any"}, "ref_any"}; -std::vector mode{ngraph::helpers::SequenceTestsMode::PURE_SEQ}; +std::vector mode{ov::test::utils::SequenceTestsMode::PURE_SEQ}; // output values increase rapidly without clip, so use only seq_lengths = 2 std::vector> activations = {{"sigmoid", "tanh"}}; // dev_api::augrucell does not support lbr so far. @@ -194,190 +209,198 @@ std::vector clip{0.f}; // dev_api::augrusequence only supports forward so far. std::vector direction = {ov::op::RecurrentSequenceDirection::FORWARD}; -std::vector netPrecisions = { ElementType::f32 }; - -const std::vector> staticShapes = { - { { {}, { {10, 2, 10} } }, // #0. Static shapes - { {}, { {10, 1, 1} } }, - { {}, { {10, 2, 1} } }, - { {}, { {10} } } }, - { { {}, { {10, 2, 10} } }, // #1. Static shapes - { {}, { {10, 1, 10} } }, - { {}, { {10, 2, 1} } }, - { {}, { {10} } } }, - { { {}, { {1, 2, 10} } }, // #2. Static shapes - { {}, { {1, 1, 1} } }, - { {}, { {1, 2, 1} } }, - { {}, { {1} } } }, - { { {}, { {1, 2, 10} } }, // #3. Static shapes - { {}, { {1, 1, 10} } }, - { {}, { {1, 2, 1} } }, - { {}, { {1} } } }, - { { {}, { {10, 2, 10} } }, // #4. Static shapes - { {}, { {10, 1, 1} } }, - { {}, { {10, 2, 1} } } }, - { { {}, { {10, 2, 10} } }, // #5. Static shapes - { {}, { {10, 1, 10} } }, - { {}, { {10, 2, 1} } } } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_static, AUGRUSequenceCPUTest, - ::testing::Combine(::testing::ValuesIn(std::vector>{staticShapes[0], staticShapes[1]}), - ::testing::ValuesIn(mode), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip), - ::testing::ValuesIn(linearBeforeReset), - ::testing::ValuesIn(direction), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(cpuParams), - ::testing::Values(std::map{})), - AUGRUSequenceCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_static_BatchSizeOne, AUGRUSequenceCPUTest, - ::testing::Combine(::testing::ValuesIn(std::vector>{staticShapes[3]}), - ::testing::ValuesIn(mode), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip), - ::testing::ValuesIn(linearBeforeReset), - ::testing::ValuesIn(direction), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(cpuParamsBatchSizeOne), - ::testing::Values(std::map{})), - AUGRUSequenceCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_static_bf16, AUGRUSequenceCPUTest, - ::testing::Combine(::testing::ValuesIn(std::vector>{staticShapes[4], staticShapes[5]}), - ::testing::ValuesIn(mode), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip), - ::testing::ValuesIn(linearBeforeReset), - ::testing::ValuesIn(direction), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(cpuParams), - ::testing::Values(additionalConfig[1])), - AUGRUSequenceCPUTest::getTestCaseName); +std::vector netPrecisions = {ElementType::f32}; + +const std::vector> staticShapes = {{{{}, {{10, 2, 10}}}, // #0. Static shapes + {{}, {{10, 1, 1}}}, + {{}, {{10, 2, 1}}}, + {{}, {{10}}}}, + {{{}, {{10, 2, 10}}}, // #1. Static shapes + {{}, {{10, 1, 10}}}, + {{}, {{10, 2, 1}}}, + {{}, {{10}}}}, + {{{}, {{1, 2, 10}}}, // #2. Static shapes + {{}, {{1, 1, 1}}}, + {{}, {{1, 2, 1}}}, + {{}, {{1}}}}, + {{{}, {{1, 2, 10}}}, // #3. Static shapes + {{}, {{1, 1, 10}}}, + {{}, {{1, 2, 1}}}, + {{}, {{1}}}}, + {{{}, {{10, 2, 10}}}, // #4. Static shapes + {{}, {{10, 1, 1}}}, + {{}, {{10, 2, 1}}}}, + {{{}, {{10, 2, 10}}}, // #5. Static shapes + {{}, {{10, 1, 10}}}, + {{}, {{10, 2, 1}}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_static, + AUGRUSequenceCPUTest, + ::testing::Combine(::testing::ValuesIn(std::vector>{staticShapes[0], + staticShapes[1]}), + ::testing::ValuesIn(mode), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip), + ::testing::ValuesIn(linearBeforeReset), + ::testing::ValuesIn(direction), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(cpuParams), + ::testing::Values(ov::AnyMap{})), + AUGRUSequenceCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_static_BatchSizeOne, + AUGRUSequenceCPUTest, + ::testing::Combine(::testing::ValuesIn(std::vector>{staticShapes[3]}), + ::testing::ValuesIn(mode), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip), + ::testing::ValuesIn(linearBeforeReset), + ::testing::ValuesIn(direction), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(cpuParamsBatchSizeOne), + ::testing::Values(ov::AnyMap{})), + AUGRUSequenceCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_static_bf16, + AUGRUSequenceCPUTest, + ::testing::Combine(::testing::ValuesIn(std::vector>{staticShapes[4], + staticShapes[5]}), + ::testing::ValuesIn(mode), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip), + ::testing::ValuesIn(linearBeforeReset), + ::testing::ValuesIn(direction), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(cpuParams), + ::testing::Values(additionalConfig[1])), + AUGRUSequenceCPUTest::getTestCaseName); const std::vector> dynamicShapes = { - { { {-1, {1, 5}, 10}, // #0. Dynamic shape 0 - { {10, 2, 10}, {8, 3, 10}, {5, 4, 10} } }, // Target shapes - { {{0, 15}, 1, 1}, // Dynamic shape 1 - { {10, 1, 1}, {8, 1, 1}, {5, 1, 1} } }, // Target shapes - { {-1, {1, 5}, 1}, // Dynamic shape 2 - { {10, 2, 1}, {8, 3, 1}, {5, 4, 1} } }, // Target shapes - { {{0, 12}}, // Dynamic shape 3 - { {10}, {8}, {5} } } }, // Target shapes - { { {{0, 11}, -1, 10}, // #1. Dynamic shape 0 - { {10, 2, 10}, {3, 4, 10}, {5, 5, 10} } }, // Target shapes - { {-1, 1, 10}, // Dynamic shape 1 - { {10, 1, 10}, {3, 1, 10}, {5, 1, 10} } }, // Target shapes - { {{0, 11}, -1, 1}, // Dynamic shape 3 - { {10, 2, 1}, {3, 4, 1}, {5, 5, 1} } }, // Target shapes - { {-1}, // Dynamic shape 2 - { {10}, {3}, {5} } } }, // Target shapes - { { {{0, 11}, -1, {7, 11}}, // #2. Dynamic shape 0 - { {10, 2, 10}, {3, 4, 10}, {5, 5, 10} } }, // Target shapes - { {-1, 1, {8, 12}}, // Dynamic shape 1 - { {10, 1, 10}, {3, 1, 10}, {5, 1, 10} } }, // Target shapes - { {{0, 11}, -1, 1}, // Dynamic shape 3 - { {10, 2, 1}, {3, 4, 1}, {5, 5, 1} } } , // Target shapes - { {-1}, // Dynamic shape 2 - { {10}, {3}, {5} } } }, // Target shapes - { { {-1, {0, 7}, 10}, // #3. Dynamic shape 0 - { {1, 2, 10}, {1, 3, 10}, {1, 6, 10} } }, // Target shapes - { {-1, 1, 1}, // Dynamic shape 1 - { {1, 1, 1}, {1, 1, 1}, {1, 1, 1} } }, // Target shapes - { {-1, {0, 7}, 1}, // Dynamic shape 3 - { {1, 2, 1}, {1, 3, 1}, {1, 6, 1} } }, // Target shapes - { {-1}, // Dynamic shape 2 - { {1}, {1}, {1} } } }, // Target shapes - { { {1, -1, 10}, // #4. Dynamic shape 0 - { {1, 2, 10}, {1, 4, 10}, {1, 8, 10} } }, // Target shapes - { {1, 1, 10}, // Dynamic shape 1 - { {1, 1, 10}, {1, 1, 10}, {1, 1, 10} } }, // Target shapes - { {1, -1, 1}, // Dynamic shape 0 - { {1, 2, 1}, {1, 4, 1}, {1, 8, 1} } }, // Target shapes - { {-1}, // Dynamic shape 2 - { {1}, {1}, {1} } } }, // Target shapes - { { {-1, -1, -1}, // #5. Dynamic shape 0 - { {1, 2, 10}, {1, 4, 10}, {1, 8, 10} } }, // Target shapes - { {-1, -1, -1}, // Dynamic shape 1 - { {1, 1, 10}, {1, 1, 10}, {1, 1, 10} } }, // Target shapes - { {-1, -1, -1}, // Dynamic shape 0 - { {1, 2, 1}, {1, 4, 1}, {1, 8, 1} } }, // Target shapes - { {-1}, // Dynamic shape 2 - { {1}, {1}, {1} } } }, // Target shapes - { { {2, {1, 5}, 10}, // #6. Dynamic shape 0 - { {2, 2, 10}, {2, 3, 10}, {2, 4, 10} } }, // Target shapes - { {2, 1, 1}, // Dynamic shape 1 - { {2, 1, 1}, {2, 1, 1}, {2, 1, 1} } }, // Target shapes - { {2, {1, 5}, 1}, // Dynamic shape 2 - { {2, 2, 1}, {2, 3, 1}, {2, 4, 1} } }, // Target shapes - { {-1}, // Dynamic shape 2 - { {2}, {2}, {2} } } }, // Target shapes - { { {5, -1, 10}, // #7. Dynamic shape 0 - { {5, 2, 10}, {5, 4, 10}, {5, 5, 10} } }, // Target shapes - { {5, 1, 10}, // Dynamic shape 1 - { {5, 1, 10}, {5, 1, 10}, {5, 1, 10} } }, // Target shapes - { {5, -1, 1}, // Dynamic shape 0 - { {5, 2, 1}, {5, 4, 1}, {5, 5, 1} } }, // Target shapes - { {-1}, // Dynamic shape 2 - { {5}, {5}, {5} } } }, // Target shapes - { { {{0, 11}, -1, {7, 11}}, // #8. Dynamic shape 0 - { {10, 2, 10}, {3, 4, 10}, {5, 5, 10}, {10, 2, 10}, {5, 5, 10} } }, // Target shapes - { {-1, 1, {8, 12}}, // Dynamic shape 1 - { {10, 1, 10}, {3, 1, 10}, {5, 1, 10}, {10, 1, 10}, {5, 1, 10} } }, // Target shapes - { {{0, 11}, -1, 1}, // Dynamic shape 3 - { {10, 2, 1}, {3, 4, 1}, {5, 5, 1}, {10, 2, 1}, {5, 5, 1} } }, // Target shapes - { {-1}, // Dynamic shape 2 - { {10}, {3}, {5}, {10}, {5} } } } // Target shapes + {{{-1, {1, 5}, 10}, // #0. Dynamic shape 0 + {{10, 2, 10}, {8, 3, 10}, {5, 4, 10}}}, // Target shapes + {{{0, 15}, 1, 1}, // Dynamic shape 1 + {{10, 1, 1}, {8, 1, 1}, {5, 1, 1}}}, // Target shapes + {{-1, {1, 5}, 1}, // Dynamic shape 2 + {{10, 2, 1}, {8, 3, 1}, {5, 4, 1}}}, // Target shapes + {{{0, 12}}, // Dynamic shape 3 + {{10}, {8}, {5}}}}, // Target shapes + {{{{0, 11}, -1, 10}, // #1. Dynamic shape 0 + {{10, 2, 10}, {3, 4, 10}, {5, 5, 10}}}, // Target shapes + {{-1, 1, 10}, // Dynamic shape 1 + {{10, 1, 10}, {3, 1, 10}, {5, 1, 10}}}, // Target shapes + {{{0, 11}, -1, 1}, // Dynamic shape 3 + {{10, 2, 1}, {3, 4, 1}, {5, 5, 1}}}, // Target shapes + {{-1}, // Dynamic shape 2 + {{10}, {3}, {5}}}}, // Target shapes + {{{{0, 11}, -1, {7, 11}}, // #2. Dynamic shape 0 + {{10, 2, 10}, {3, 4, 10}, {5, 5, 10}}}, // Target shapes + {{-1, 1, {8, 12}}, // Dynamic shape 1 + {{10, 1, 10}, {3, 1, 10}, {5, 1, 10}}}, // Target shapes + {{{0, 11}, -1, 1}, // Dynamic shape 3 + {{10, 2, 1}, {3, 4, 1}, {5, 5, 1}}}, // Target shapes + {{-1}, // Dynamic shape 2 + {{10}, {3}, {5}}}}, // Target shapes + {{{-1, {0, 7}, 10}, // #3. Dynamic shape 0 + {{1, 2, 10}, {1, 3, 10}, {1, 6, 10}}}, // Target shapes + {{-1, 1, 1}, // Dynamic shape 1 + {{1, 1, 1}, {1, 1, 1}, {1, 1, 1}}}, // Target shapes + {{-1, {0, 7}, 1}, // Dynamic shape 3 + {{1, 2, 1}, {1, 3, 1}, {1, 6, 1}}}, // Target shapes + {{-1}, // Dynamic shape 2 + {{1}, {1}, {1}}}}, // Target shapes + {{{1, -1, 10}, // #4. Dynamic shape 0 + {{1, 2, 10}, {1, 4, 10}, {1, 8, 10}}}, // Target shapes + {{1, 1, 10}, // Dynamic shape 1 + {{1, 1, 10}, {1, 1, 10}, {1, 1, 10}}}, // Target shapes + {{1, -1, 1}, // Dynamic shape 0 + {{1, 2, 1}, {1, 4, 1}, {1, 8, 1}}}, // Target shapes + {{-1}, // Dynamic shape 2 + {{1}, {1}, {1}}}}, // Target shapes + {{{-1, -1, -1}, // #5. Dynamic shape 0 + {{1, 2, 10}, {1, 4, 10}, {1, 8, 10}}}, // Target shapes + {{-1, -1, -1}, // Dynamic shape 1 + {{1, 1, 10}, {1, 1, 10}, {1, 1, 10}}}, // Target shapes + {{-1, -1, -1}, // Dynamic shape 0 + {{1, 2, 1}, {1, 4, 1}, {1, 8, 1}}}, // Target shapes + {{-1}, // Dynamic shape 2 + {{1}, {1}, {1}}}}, // Target shapes + {{{2, {1, 5}, 10}, // #6. Dynamic shape 0 + {{2, 2, 10}, {2, 3, 10}, {2, 4, 10}}}, // Target shapes + {{2, 1, 1}, // Dynamic shape 1 + {{2, 1, 1}, {2, 1, 1}, {2, 1, 1}}}, // Target shapes + {{2, {1, 5}, 1}, // Dynamic shape 2 + {{2, 2, 1}, {2, 3, 1}, {2, 4, 1}}}, // Target shapes + {{-1}, // Dynamic shape 2 + {{2}, {2}, {2}}}}, // Target shapes + {{{5, -1, 10}, // #7. Dynamic shape 0 + {{5, 2, 10}, {5, 4, 10}, {5, 5, 10}}}, // Target shapes + {{5, 1, 10}, // Dynamic shape 1 + {{5, 1, 10}, {5, 1, 10}, {5, 1, 10}}}, // Target shapes + {{5, -1, 1}, // Dynamic shape 0 + {{5, 2, 1}, {5, 4, 1}, {5, 5, 1}}}, // Target shapes + {{-1}, // Dynamic shape 2 + {{5}, {5}, {5}}}}, // Target shapes + {{{{0, 11}, -1, {7, 11}}, // #8. Dynamic shape 0 + {{10, 2, 10}, {3, 4, 10}, {5, 5, 10}, {10, 2, 10}, {5, 5, 10}}}, // Target shapes + {{-1, 1, {8, 12}}, // Dynamic shape 1 + {{10, 1, 10}, {3, 1, 10}, {5, 1, 10}, {10, 1, 10}, {5, 1, 10}}}, // Target shapes + {{{0, 11}, -1, 1}, // Dynamic shape 3 + {{10, 2, 1}, {3, 4, 1}, {5, 5, 1}, {10, 2, 1}, {5, 5, 1}}}, // Target shapes + {{-1}, // Dynamic shape 2 + {{10}, {3}, {5}, {10}, {5}}}} // Target shapes }; -INSTANTIATE_TEST_SUITE_P(smoke_dynamic, AUGRUSequenceCPUTest, - ::testing::Combine(::testing::ValuesIn({dynamicShapes[0], dynamicShapes[1], dynamicShapes[2]}), - ::testing::ValuesIn(mode), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip), - ::testing::ValuesIn(linearBeforeReset), - ::testing::ValuesIn(direction), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(cpuParams), - ::testing::Values(std::map{})), - AUGRUSequenceCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_dynamic_BatchSizeOne, AUGRUSequenceCPUTest, - ::testing::Combine(::testing::ValuesIn({dynamicShapes[4]}), - ::testing::ValuesIn(mode), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip), - ::testing::ValuesIn(linearBeforeReset), - ::testing::ValuesIn(direction), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(cpuParamsBatchSizeOne), - ::testing::Values(std::map{})), - AUGRUSequenceCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_dynamic, AUGRUSequenceCPUTest, - ::testing::Combine(::testing::ValuesIn({dynamicShapes[5], dynamicShapes[8]}), - ::testing::ValuesIn(mode), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip), - ::testing::ValuesIn(linearBeforeReset), - ::testing::ValuesIn(direction), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(cpuParams), - ::testing::Values(std::map{})), - AUGRUSequenceCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_dynamic_bf16, AUGRUSequenceCPUTest, - ::testing::Combine(::testing::ValuesIn({dynamicShapes[6], dynamicShapes[7]}), - ::testing::ValuesIn(mode), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip), - ::testing::ValuesIn(linearBeforeReset), - ::testing::ValuesIn(direction), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(cpuParams), - ::testing::Values(additionalConfig[1])), - AUGRUSequenceCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +INSTANTIATE_TEST_SUITE_P(smoke_dynamic, + AUGRUSequenceCPUTest, + ::testing::Combine(::testing::ValuesIn({dynamicShapes[0], dynamicShapes[1], dynamicShapes[2]}), + ::testing::ValuesIn(mode), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip), + ::testing::ValuesIn(linearBeforeReset), + ::testing::ValuesIn(direction), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(cpuParams), + ::testing::Values(ov::AnyMap{})), + AUGRUSequenceCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_dynamic_BatchSizeOne, + AUGRUSequenceCPUTest, + ::testing::Combine(::testing::ValuesIn({dynamicShapes[4]}), + ::testing::ValuesIn(mode), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip), + ::testing::ValuesIn(linearBeforeReset), + ::testing::ValuesIn(direction), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(cpuParamsBatchSizeOne), + ::testing::Values(ov::AnyMap{})), + AUGRUSequenceCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_dynamic, + AUGRUSequenceCPUTest, + ::testing::Combine(::testing::ValuesIn({dynamicShapes[5], dynamicShapes[8]}), + ::testing::ValuesIn(mode), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip), + ::testing::ValuesIn(linearBeforeReset), + ::testing::ValuesIn(direction), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(cpuParams), + ::testing::Values(ov::AnyMap{})), + AUGRUSequenceCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_dynamic_bf16, + AUGRUSequenceCPUTest, + ::testing::Combine(::testing::ValuesIn({dynamicShapes[6], dynamicShapes[7]}), + ::testing::ValuesIn(mode), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip), + ::testing::ValuesIn(linearBeforeReset), + ::testing::ValuesIn(direction), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(cpuParams), + ::testing::Values(additionalConfig[1])), + AUGRUSequenceCPUTest::getTestCaseName); +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/batch_to_space.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/batch_to_space.cpp index 99367ef14e8ba9..789d45d589f1a8 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/batch_to_space.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/batch_to_space.cpp @@ -3,32 +3,32 @@ // #include -#include "shared_test_classes/base/ov_subgraph.hpp" + #include "ov_models/builders.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace { - std::vector blockShape, cropsBegin, cropsEnd; +std::vector blockShape, cropsBegin, cropsEnd; } // namespace -using BatchToSpaceLayerTestCPUParams = std::tuple< - std::vector, // Input shapes - std::vector, // block shape - std::vector, // crops begin - std::vector, // crops end - ov::element::Type, // Network precision - CPUSpecificParams>; +using BatchToSpaceLayerTestCPUParams = std::tuple, // Input shapes + std::vector, // block shape + std::vector, // crops begin + std::vector, // crops end + ov::element::Type, // Network precision + CPUSpecificParams>; class BatchToSpaceCPULayerTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CPUTestsBase { + virtual public SubgraphBaseTest, + public CPUTestsBase { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj) { + static std::string getTestCaseName(const testing::TestParamInfo& obj) { std::vector inputShapes; ov::element::Type model_type; CPUSpecificParams cpuParams; @@ -36,7 +36,7 @@ class BatchToSpaceCPULayerTest : public testing::WithParamInterfaceget_output_element_type(0); const auto& static_shape = targetInputStaticShapes[i]; switch (i) { - case 0: { - tensor = ov::test::utils::create_and_fill_tensor(param_type, static_shape, 2560, 0, 256); - break; - } - case 1: { - ASSERT_EQ(ov::shape_size(static_shape), blockShape.size()); - tensor = ov::Tensor(param_type, static_shape, blockShape.data()); - break; - } - case 2: - case 3: { - ASSERT_EQ(ov::shape_size(static_shape), cropsEnd.size()); - tensor = ov::Tensor(param_type, static_shape, cropsEnd.data()); - break; - } - default: { - throw std::runtime_error("Incorrect parameter number!"); - } + case 0: { + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 2560; + in_data.resolution = 256; + tensor = ov::test::utils::create_and_fill_tensor(param_type, static_shape, in_data); + break; + } + case 1: { + ASSERT_EQ(ov::shape_size(static_shape), blockShape.size()); + tensor = ov::Tensor(param_type, static_shape, blockShape.data()); + break; + } + case 2: + case 3: { + ASSERT_EQ(ov::shape_size(static_shape), cropsEnd.size()); + tensor = ov::Tensor(param_type, static_shape, cropsEnd.data()); + break; + } + default: { + throw std::runtime_error("Incorrect parameter number!"); + } } inputs.insert({parameter, tensor}); } @@ -92,7 +96,7 @@ class BatchToSpaceCPULayerTest : public testing::WithParamInterface inputShapes; + std::vector inputShapes; ov::element::Type model_type; CPUSpecificParams cpuParams; std::tie(inputShapes, blockShape, cropsBegin, cropsEnd, model_type, cpuParams) = this->GetParam(); @@ -120,8 +124,8 @@ class BatchToSpaceCPULayerTest : public testing::WithParamInterface(ov::element::Type_t::i64, inputDynamicShapes[3]); auto btsNode = std::make_shared(in0, in1, in2, in3); btsNode->get_rt_info() = getCPUInfo(); - ngraph::ResultVector results{std::make_shared(btsNode)}; - function = std::make_shared(results, ov::ParameterVector{in0, in1, in2, in3}, "BatchToSpace"); + ov::ResultVector results{std::make_shared(btsNode)}; + function = std::make_shared(results, ov::ParameterVector{in0, in1, in2, in3}, "BatchToSpace"); } }; @@ -132,310 +136,273 @@ TEST_P(BatchToSpaceCPULayerTest, CompareWithRefs) { namespace { -const std::vector model_types = { - ov::element::Type_t::u8, - ov::element::Type_t::i8, - ov::element::Type_t::i32, - ov::element::Type_t::f32, - ov::element::Type_t::bf16 -}; +const std::vector model_types = {ov::element::Type_t::u8, + ov::element::Type_t::i8, + ov::element::Type_t::i32, + ov::element::Type_t::f32, + ov::element::Type_t::bf16}; -const std::vector> blockShape4D1 = {{1, 1, 1, 2}, {1, 2, 2, 1}}; -const std::vector> cropsBegin4D1 = {{0, 0, 0, 0}, {0, 0, 0, 1}, {0, 0, 2, 0}}; -const std::vector> cropsEnd4D1 = {{0, 0, 0, 0}, {0, 0, 1, 0}, {0, 0, 1, 1}}; +const std::vector> blockShape4D1 = {{1, 1, 1, 2}, {1, 2, 2, 1}}; +const std::vector> cropsBegin4D1 = {{0, 0, 0, 0}, {0, 0, 0, 1}, {0, 0, 2, 0}}; +const std::vector> cropsEnd4D1 = {{0, 0, 0, 0}, {0, 0, 1, 0}, {0, 0, 1, 1}}; -std::vector> staticInputShapes4D1 = { - {{8, 16, 10, 10}, {4}, {4}, {4}} -}; +std::vector> staticInputShapes4D1 = {{{8, 16, 10, 10}, {4}, {4}, {4}}}; std::vector> dynamicInputShapes4D1 = { - { - {{-1, -1, -1, -1}, {{8, 8, 6, 7}, {4, 10, 5, 5}, {12, 9, 7, 5}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}} - }, - { - {{{4, 12}, {8, 16}, 6, -1}, {{8, 8, 6, 7}, {4, 10, 6, 5}, {12, 9, 6, 5}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}} - } -}; + {{{-1, -1, -1, -1}, {{8, 8, 6, 7}, {4, 10, 5, 5}, {12, 9, 7, 5}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}}, + {{{{4, 12}, {8, 16}, 6, -1}, {{8, 8, 6, 7}, {4, 10, 6, 5}, {12, 9, 6, 5}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}}}; std::vector> dynamicInputShapes4D1Blocked = { - { - {{-1, 16, -1, -1}, {{4, 16, 5, 8}, {8, 16, 7, 6}, {12, 16, 4, 5}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}} - } -}; + {{{-1, 16, -1, -1}, {{4, 16, 5, 8}, {8, 16, 7, 6}, {12, 16, 4, 5}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}}}; -const std::vector> blockShape4D2 = {{1, 2, 3, 4}, {1, 3, 4, 2}}; -const std::vector> cropsBegin4D2 = {{0, 0, 0, 1}, {0, 0, 1, 2}}; -const std::vector> cropsEnd4D2 = {{0, 0, 1, 0}, {0, 0, 3, 1}}; +const std::vector> blockShape4D2 = {{1, 2, 3, 4}, {1, 3, 4, 2}}; +const std::vector> cropsBegin4D2 = {{0, 0, 0, 1}, {0, 0, 1, 2}}; +const std::vector> cropsEnd4D2 = {{0, 0, 1, 0}, {0, 0, 3, 1}}; -std::vector> staticInputShapes4D2 = { - {{24, 16, 7, 8}, {4}, {4}, {4}} -}; +std::vector> staticInputShapes4D2 = {{{24, 16, 7, 8}, {4}, {4}, {4}}}; std::vector> dynamicInputShapes4D2 = { - { - {{-1, -1, -1, -1}, {{48, 4, 7, 8}, {24, 8, 6, 7}, {24, 16, 5, 5}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}} - }, - { - {{24, {4, 10}, -1, -1}, {{24, 8, 6, 7}, {24, 6, 7, 5}, {24, 4, 5, 5}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}} - } -}; + {{{-1, -1, -1, -1}, {{48, 4, 7, 8}, {24, 8, 6, 7}, {24, 16, 5, 5}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}}, + {{{24, {4, 10}, -1, -1}, {{24, 8, 6, 7}, {24, 6, 7, 5}, {24, 4, 5, 5}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}}}; std::vector> dynamicInputShapes4D2Blocked = { - { - {{-1, 16, -1, -1}, {{24, 16, 5, 5}, {24, 16, 6, 7}, {48, 16, 4, 4}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}} - } -}; - -const std::vector cpuParamsWithBlock_4D = { - CPUSpecificParams({nChw16c}, {nChw16c}, {}, {}), - CPUSpecificParams({nChw8c}, {nChw8c}, {}, {}), - CPUSpecificParams({nhwc}, {nhwc}, {}, {}), - CPUSpecificParams({nchw}, {nchw}, {}, {}) -}; - -const std::vector cpuParams_4D = { - CPUSpecificParams({nhwc}, {nhwc}, {}, {}), - CPUSpecificParams({nchw}, {nchw}, {}, {}) -}; - -const auto staticBatchToSpaceParamsSet4D1 = ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(staticInputShapes4D1)), - ::testing::ValuesIn(blockShape4D1), - ::testing::ValuesIn(cropsBegin4D1), - ::testing::ValuesIn(cropsEnd4D1), - ::testing::ValuesIn(model_types), - ::testing::ValuesIn(cpuParamsWithBlock_4D)); - -const auto dynamicBatchToSpaceParamsSet4D1 = ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes4D1), - ::testing::ValuesIn(blockShape4D1), - ::testing::ValuesIn(cropsBegin4D1), - ::testing::ValuesIn(cropsEnd4D1), - ::testing::ValuesIn(model_types), - ::testing::ValuesIn(cpuParams_4D)); - -const auto dynamicBatchToSpaceParamsWithBlockedSet4D1 = ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes4D1Blocked), - ::testing::ValuesIn(blockShape4D1), - ::testing::ValuesIn(cropsBegin4D1), - ::testing::ValuesIn(cropsEnd4D1), - ::testing::ValuesIn(model_types), - ::testing::ValuesIn(cpuParamsWithBlock_4D)); - -const auto staticBatchToSpaceParamsSet4D2 = ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(staticInputShapes4D2)), - ::testing::ValuesIn(blockShape4D2), - ::testing::ValuesIn(cropsBegin4D2), - ::testing::ValuesIn(cropsEnd4D2), - ::testing::ValuesIn(model_types), - ::testing::ValuesIn(cpuParamsWithBlock_4D)); - -const auto dynamicBatchToSpaceParamsSet4D2 = ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes4D2), - ::testing::ValuesIn(blockShape4D2), - ::testing::ValuesIn(cropsBegin4D2), - ::testing::ValuesIn(cropsEnd4D2), - ::testing::ValuesIn(model_types), - ::testing::ValuesIn(cpuParams_4D)); - -const auto dynamicBatchToSpaceParamsWithBlockedSet4D2 = ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes4D2Blocked), - ::testing::ValuesIn(blockShape4D2), - ::testing::ValuesIn(cropsBegin4D2), - ::testing::ValuesIn(cropsEnd4D2), - ::testing::ValuesIn(model_types), - ::testing::ValuesIn(cpuParamsWithBlock_4D)); - -INSTANTIATE_TEST_SUITE_P(smoke_StaticBatchToSpaceCPULayerTestCase1_4D, BatchToSpaceCPULayerTest, - staticBatchToSpaceParamsSet4D1, BatchToSpaceCPULayerTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_DynamicBatchToSpaceCPULayerTestCase1_4D, BatchToSpaceCPULayerTest, - dynamicBatchToSpaceParamsSet4D1, BatchToSpaceCPULayerTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_DynamicBatchToSpaceCPULayerTestCaseWithBlocked1_4D, BatchToSpaceCPULayerTest, - dynamicBatchToSpaceParamsWithBlockedSet4D1, BatchToSpaceCPULayerTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_StaticBatchToSpaceCPULayerTestCase2_4D, BatchToSpaceCPULayerTest, - staticBatchToSpaceParamsSet4D2, BatchToSpaceCPULayerTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_DynamicBatchToSpaceCPULayerTestCase2_4D, BatchToSpaceCPULayerTest, - dynamicBatchToSpaceParamsSet4D2, BatchToSpaceCPULayerTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_DynamicBatchToSpaceCPULayerTestCaseWithBlocked2_4D, BatchToSpaceCPULayerTest, - dynamicBatchToSpaceParamsWithBlockedSet4D2, BatchToSpaceCPULayerTest::getTestCaseName); - -const std::vector> blockShape5D1 = {{1, 1, 2, 2, 1}, {1, 2, 1, 2, 2}}; -const std::vector> cropsBegin5D1 = {{0, 0, 0, 0, 0}, {0, 0, 0, 3, 3}}; -const std::vector> cropsEnd5D1 = {{0, 0, 0, 0, 0}, {0, 0, 1, 0, 1}}; - -std::vector> staticInputShapes5D1 = { - {{8, 16, 4, 10, 10}, {5}, {5}, {5}} -}; - + {{{-1, 16, -1, -1}, {{24, 16, 5, 5}, {24, 16, 6, 7}, {48, 16, 4, 4}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}}}; + +const std::vector cpuParamsWithBlock_4D = {CPUSpecificParams({nChw16c}, {nChw16c}, {}, {}), + CPUSpecificParams({nChw8c}, {nChw8c}, {}, {}), + CPUSpecificParams({nhwc}, {nhwc}, {}, {}), + CPUSpecificParams({nchw}, {nchw}, {}, {})}; + +const std::vector cpuParams_4D = {CPUSpecificParams({nhwc}, {nhwc}, {}, {}), + CPUSpecificParams({nchw}, {nchw}, {}, {})}; + +const auto staticBatchToSpaceParamsSet4D1 = + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(staticInputShapes4D1)), + ::testing::ValuesIn(blockShape4D1), + ::testing::ValuesIn(cropsBegin4D1), + ::testing::ValuesIn(cropsEnd4D1), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(cpuParamsWithBlock_4D)); + +const auto dynamicBatchToSpaceParamsSet4D1 = ::testing::Combine(::testing::ValuesIn(dynamicInputShapes4D1), + ::testing::ValuesIn(blockShape4D1), + ::testing::ValuesIn(cropsBegin4D1), + ::testing::ValuesIn(cropsEnd4D1), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(cpuParams_4D)); + +const auto dynamicBatchToSpaceParamsWithBlockedSet4D1 = + ::testing::Combine(::testing::ValuesIn(dynamicInputShapes4D1Blocked), + ::testing::ValuesIn(blockShape4D1), + ::testing::ValuesIn(cropsBegin4D1), + ::testing::ValuesIn(cropsEnd4D1), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(cpuParamsWithBlock_4D)); + +const auto staticBatchToSpaceParamsSet4D2 = + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(staticInputShapes4D2)), + ::testing::ValuesIn(blockShape4D2), + ::testing::ValuesIn(cropsBegin4D2), + ::testing::ValuesIn(cropsEnd4D2), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(cpuParamsWithBlock_4D)); + +const auto dynamicBatchToSpaceParamsSet4D2 = ::testing::Combine(::testing::ValuesIn(dynamicInputShapes4D2), + ::testing::ValuesIn(blockShape4D2), + ::testing::ValuesIn(cropsBegin4D2), + ::testing::ValuesIn(cropsEnd4D2), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(cpuParams_4D)); + +const auto dynamicBatchToSpaceParamsWithBlockedSet4D2 = + ::testing::Combine(::testing::ValuesIn(dynamicInputShapes4D2Blocked), + ::testing::ValuesIn(blockShape4D2), + ::testing::ValuesIn(cropsBegin4D2), + ::testing::ValuesIn(cropsEnd4D2), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(cpuParamsWithBlock_4D)); + +INSTANTIATE_TEST_SUITE_P(smoke_StaticBatchToSpaceCPULayerTestCase1_4D, + BatchToSpaceCPULayerTest, + staticBatchToSpaceParamsSet4D1, + BatchToSpaceCPULayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_DynamicBatchToSpaceCPULayerTestCase1_4D, + BatchToSpaceCPULayerTest, + dynamicBatchToSpaceParamsSet4D1, + BatchToSpaceCPULayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_DynamicBatchToSpaceCPULayerTestCaseWithBlocked1_4D, + BatchToSpaceCPULayerTest, + dynamicBatchToSpaceParamsWithBlockedSet4D1, + BatchToSpaceCPULayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_StaticBatchToSpaceCPULayerTestCase2_4D, + BatchToSpaceCPULayerTest, + staticBatchToSpaceParamsSet4D2, + BatchToSpaceCPULayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_DynamicBatchToSpaceCPULayerTestCase2_4D, + BatchToSpaceCPULayerTest, + dynamicBatchToSpaceParamsSet4D2, + BatchToSpaceCPULayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_DynamicBatchToSpaceCPULayerTestCaseWithBlocked2_4D, + BatchToSpaceCPULayerTest, + dynamicBatchToSpaceParamsWithBlockedSet4D2, + BatchToSpaceCPULayerTest::getTestCaseName); + +const std::vector> blockShape5D1 = {{1, 1, 2, 2, 1}, {1, 2, 1, 2, 2}}; +const std::vector> cropsBegin5D1 = {{0, 0, 0, 0, 0}, {0, 0, 0, 3, 3}}; +const std::vector> cropsEnd5D1 = {{0, 0, 0, 0, 0}, {0, 0, 1, 0, 1}}; + +std::vector> staticInputShapes5D1 = {{{8, 16, 4, 10, 10}, {5}, {5}, {5}}}; std::vector> dynamicInputShapes5D1 = { - { - {{-1, -1, -1, -1, -1}, {{8, 16, 4, 10, 10}, {16, 10, 5, 11, 9}, {24, 6, 6, 8, 8}}}, - {{5}, {{5}, {5}, {5}}}, - {{5}, {{5}, {5}, {5}}}, - {{5}, {{5}, {5}, {5}}} - }, - { - {{{8, 16}, {8, 16}, {2, 7}, -1, -1}, {{8, 16, 2, 6, 8}, {8, 10, 4, 7, 5}, {16, 8, 7, 5, 10}}}, - {{5}, {{5}, {5}, {5}}}, - {{5}, {{5}, {5}, {5}}}, - {{5}, {{5}, {5}, {5}}} - } -}; + {{{-1, -1, -1, -1, -1}, {{8, 16, 4, 10, 10}, {16, 10, 5, 11, 9}, {24, 6, 6, 8, 8}}}, + {{5}, {{5}, {5}, {5}}}, + {{5}, {{5}, {5}, {5}}}, + {{5}, {{5}, {5}, {5}}}}, + {{{{8, 16}, {8, 16}, {2, 7}, -1, -1}, {{8, 16, 2, 6, 8}, {8, 10, 4, 7, 5}, {16, 8, 7, 5, 10}}}, + {{5}, {{5}, {5}, {5}}}, + {{5}, {{5}, {5}, {5}}}, + {{5}, {{5}, {5}, {5}}}}}; std::vector> dynamicInputShapes5D1Blocked = { - { - {{-1, 16, -1, -1, -1}, {{24, 16, 3, 6, 7}, {48, 16, 4, 5, 5}, {24, 16, 5, 8, 5}}}, - {{5}, {{5}, {5}, {5}}}, - {{5}, {{5}, {5}, {5}}}, - {{5}, {{5}, {5}, {5}}} - } -}; + {{{-1, 16, -1, -1, -1}, {{24, 16, 3, 6, 7}, {48, 16, 4, 5, 5}, {24, 16, 5, 8, 5}}}, + {{5}, {{5}, {5}, {5}}}, + {{5}, {{5}, {5}, {5}}}, + {{5}, {{5}, {5}, {5}}}}}; -const std::vector> blockShape5D2 = {{1, 2, 4, 3, 1}, {1, 1, 2, 4, 3}}; -const std::vector> cropsBegin5D2 = {{0, 0, 1, 2, 0}, {0, 0, 1, 0, 1}}; -const std::vector> cropsEnd5D2 = {{0, 0, 1, 0, 1}, {0, 0, 1, 1, 1}}; +const std::vector> blockShape5D2 = {{1, 2, 4, 3, 1}, {1, 1, 2, 4, 3}}; +const std::vector> cropsBegin5D2 = {{0, 0, 1, 2, 0}, {0, 0, 1, 0, 1}}; +const std::vector> cropsEnd5D2 = {{0, 0, 1, 0, 1}, {0, 0, 1, 1, 1}}; -std::vector> staticInputShapes5D2 = { - {{48, 16, 3, 3, 3}, {5}, {5}, {5}} -}; +std::vector> staticInputShapes5D2 = {{{48, 16, 3, 3, 3}, {5}, {5}, {5}}}; std::vector> dynamicInputShapes5D2 = { - { - {{-1, -1, -1, -1, -1}, {{48, 4, 3, 3, 3}, {24, 16, 5, 3, 5}, {24, 8, 7, 5, 5}}}, - {{5}, {{5}, {5}, {5}}}, - {{5}, {{5}, {5}, {5}}}, - {{5}, {{5}, {5}, {5}}} - }, - { - {{24, {8, 16}, {3, 5}, -1, -1}, {{24, 16, 3, 4, 3}, {24, 12, 5, 3, 5}, {24, 8, 4, 5, 5}}}, - {{5}, {{5}, {5}, {5}}}, - {{5}, {{5}, {5}, {5}}}, - {{5}, {{5}, {5}, {5}}} - }, - { - // special case - {{{1, 24}, {1, 16}, {1, 10}, {1, 10}, {1, 10}}, - { - {24, 16, 5, 3, 5}, - {24, 16, 5, 3, 5}, - {24, 16, 7, 5, 5} - }}, - {{5}, {{5}, {5}, {5}}}, - {{5}, {{5}, {5}, {5}}}, - {{5}, {{5}, {5}, {5}}} - } -}; + {{{-1, -1, -1, -1, -1}, {{48, 4, 3, 3, 3}, {24, 16, 5, 3, 5}, {24, 8, 7, 5, 5}}}, + {{5}, {{5}, {5}, {5}}}, + {{5}, {{5}, {5}, {5}}}, + {{5}, {{5}, {5}, {5}}}}, + {{{24, {8, 16}, {3, 5}, -1, -1}, {{24, 16, 3, 4, 3}, {24, 12, 5, 3, 5}, {24, 8, 4, 5, 5}}}, + {{5}, {{5}, {5}, {5}}}, + {{5}, {{5}, {5}, {5}}}, + {{5}, {{5}, {5}, {5}}}}, + {// special case + {{{1, 24}, {1, 16}, {1, 10}, {1, 10}, {1, 10}}, {{24, 16, 5, 3, 5}, {24, 16, 5, 3, 5}, {24, 16, 7, 5, 5}}}, + {{5}, {{5}, {5}, {5}}}, + {{5}, {{5}, {5}, {5}}}, + {{5}, {{5}, {5}, {5}}}}}; std::vector> dynamicInputShapes5D2Blocked = { - { - {{-1, 16, -1, -1, -1}, {{24, 16, 4, 5, 5}, {48, 16, 3, 4, 3}, {24, 16, 5, 3, 5}}}, - {{5}, {{5}, {5}, {5}}}, - {{5}, {{5}, {5}, {5}}}, - {{5}, {{5}, {5}, {5}}} - } -}; - -const std::vector cpuParamsWithBlock_5D = { - CPUSpecificParams({nCdhw16c}, {nCdhw16c}, {}, {}), - CPUSpecificParams({nCdhw8c}, {nCdhw8c}, {}, {}), - CPUSpecificParams({ndhwc}, {ndhwc}, {}, {}), - CPUSpecificParams({ncdhw}, {ncdhw}, {}, {}) -}; - -const std::vector cpuParams_5D = { - CPUSpecificParams({ndhwc}, {ndhwc}, {}, {}), - CPUSpecificParams({ncdhw}, {ncdhw}, {}, {}) -}; - -const auto staticBatchToSpaceParamsSet5D1 = ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(staticInputShapes5D1)), - ::testing::ValuesIn(blockShape5D1), - ::testing::ValuesIn(cropsBegin5D1), - ::testing::ValuesIn(cropsEnd5D1), - ::testing::ValuesIn(model_types), - ::testing::ValuesIn(cpuParamsWithBlock_5D)); - -const auto dynamicBatchToSpaceParamsSet5D1 = ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes5D1), - ::testing::ValuesIn(blockShape5D1), - ::testing::ValuesIn(cropsBegin5D1), - ::testing::ValuesIn(cropsEnd5D1), - ::testing::ValuesIn(model_types), - ::testing::ValuesIn(cpuParams_5D)); - -const auto dynamicBatchToSpaceParamsWithBlockedSet5D1 = ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes5D1Blocked), - ::testing::ValuesIn(blockShape5D1), - ::testing::ValuesIn(cropsBegin5D1), - ::testing::ValuesIn(cropsEnd5D1), - ::testing::ValuesIn(model_types), - ::testing::ValuesIn(cpuParamsWithBlock_5D)); - -const auto staticBatchToSpaceParamsSet5D2 = ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(staticInputShapes5D2)), - ::testing::ValuesIn(blockShape5D2), - ::testing::ValuesIn(cropsBegin5D2), - ::testing::ValuesIn(cropsEnd5D2), - ::testing::ValuesIn(model_types), - ::testing::ValuesIn(cpuParamsWithBlock_5D)); - -const auto dynamicBatchToSpaceParamsSet5D2 = ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes5D2), - ::testing::ValuesIn(blockShape5D2), - ::testing::ValuesIn(cropsBegin5D2), - ::testing::ValuesIn(cropsEnd5D2), - ::testing::ValuesIn(model_types), - ::testing::ValuesIn(cpuParams_5D)); - -const auto dynamicBatchToSpaceParamsWithBlockedSet5D2 = ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes5D2Blocked), - ::testing::ValuesIn(blockShape5D2), - ::testing::ValuesIn(cropsBegin5D2), - ::testing::ValuesIn(cropsEnd5D2), - ::testing::ValuesIn(model_types), - ::testing::ValuesIn(cpuParamsWithBlock_5D)); - -INSTANTIATE_TEST_SUITE_P(smoke_StaticBatchToSpaceCPULayerTestCase1_5D, BatchToSpaceCPULayerTest, - staticBatchToSpaceParamsSet5D1, BatchToSpaceCPULayerTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_DynamicBatchToSpaceCPULayerTestCase1_5D, BatchToSpaceCPULayerTest, - dynamicBatchToSpaceParamsSet5D1, BatchToSpaceCPULayerTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_DynamicBatchToSpaceCPULayerTestCaseWithBlocked1_5D, BatchToSpaceCPULayerTest, - dynamicBatchToSpaceParamsWithBlockedSet5D1, BatchToSpaceCPULayerTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_StaticBatchToSpaceCPULayerTestCase2_5D, BatchToSpaceCPULayerTest, - staticBatchToSpaceParamsSet5D2, BatchToSpaceCPULayerTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_DynamicBatchToSpaceCPULayerTestCase2_5D, BatchToSpaceCPULayerTest, - dynamicBatchToSpaceParamsSet5D2, BatchToSpaceCPULayerTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_DynamicBatchToSpaceCPULayerTestCaseWithBlocked2_5D, BatchToSpaceCPULayerTest, - dynamicBatchToSpaceParamsWithBlockedSet5D2, BatchToSpaceCPULayerTest::getTestCaseName); + {{{-1, 16, -1, -1, -1}, {{24, 16, 4, 5, 5}, {48, 16, 3, 4, 3}, {24, 16, 5, 3, 5}}}, + {{5}, {{5}, {5}, {5}}}, + {{5}, {{5}, {5}, {5}}}, + {{5}, {{5}, {5}, {5}}}}}; + +const std::vector cpuParamsWithBlock_5D = {CPUSpecificParams({nCdhw16c}, {nCdhw16c}, {}, {}), + CPUSpecificParams({nCdhw8c}, {nCdhw8c}, {}, {}), + CPUSpecificParams({ndhwc}, {ndhwc}, {}, {}), + CPUSpecificParams({ncdhw}, {ncdhw}, {}, {})}; + +const std::vector cpuParams_5D = {CPUSpecificParams({ndhwc}, {ndhwc}, {}, {}), + CPUSpecificParams({ncdhw}, {ncdhw}, {}, {})}; + +const auto staticBatchToSpaceParamsSet5D1 = + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(staticInputShapes5D1)), + ::testing::ValuesIn(blockShape5D1), + ::testing::ValuesIn(cropsBegin5D1), + ::testing::ValuesIn(cropsEnd5D1), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(cpuParamsWithBlock_5D)); + +const auto dynamicBatchToSpaceParamsSet5D1 = ::testing::Combine(::testing::ValuesIn(dynamicInputShapes5D1), + ::testing::ValuesIn(blockShape5D1), + ::testing::ValuesIn(cropsBegin5D1), + ::testing::ValuesIn(cropsEnd5D1), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(cpuParams_5D)); + +const auto dynamicBatchToSpaceParamsWithBlockedSet5D1 = + ::testing::Combine(::testing::ValuesIn(dynamicInputShapes5D1Blocked), + ::testing::ValuesIn(blockShape5D1), + ::testing::ValuesIn(cropsBegin5D1), + ::testing::ValuesIn(cropsEnd5D1), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(cpuParamsWithBlock_5D)); + +const auto staticBatchToSpaceParamsSet5D2 = + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(staticInputShapes5D2)), + ::testing::ValuesIn(blockShape5D2), + ::testing::ValuesIn(cropsBegin5D2), + ::testing::ValuesIn(cropsEnd5D2), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(cpuParamsWithBlock_5D)); + +const auto dynamicBatchToSpaceParamsSet5D2 = ::testing::Combine(::testing::ValuesIn(dynamicInputShapes5D2), + ::testing::ValuesIn(blockShape5D2), + ::testing::ValuesIn(cropsBegin5D2), + ::testing::ValuesIn(cropsEnd5D2), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(cpuParams_5D)); + +const auto dynamicBatchToSpaceParamsWithBlockedSet5D2 = + ::testing::Combine(::testing::ValuesIn(dynamicInputShapes5D2Blocked), + ::testing::ValuesIn(blockShape5D2), + ::testing::ValuesIn(cropsBegin5D2), + ::testing::ValuesIn(cropsEnd5D2), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(cpuParamsWithBlock_5D)); + +INSTANTIATE_TEST_SUITE_P(smoke_StaticBatchToSpaceCPULayerTestCase1_5D, + BatchToSpaceCPULayerTest, + staticBatchToSpaceParamsSet5D1, + BatchToSpaceCPULayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_DynamicBatchToSpaceCPULayerTestCase1_5D, + BatchToSpaceCPULayerTest, + dynamicBatchToSpaceParamsSet5D1, + BatchToSpaceCPULayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_DynamicBatchToSpaceCPULayerTestCaseWithBlocked1_5D, + BatchToSpaceCPULayerTest, + dynamicBatchToSpaceParamsWithBlockedSet5D1, + BatchToSpaceCPULayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_StaticBatchToSpaceCPULayerTestCase2_5D, + BatchToSpaceCPULayerTest, + staticBatchToSpaceParamsSet5D2, + BatchToSpaceCPULayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_DynamicBatchToSpaceCPULayerTestCase2_5D, + BatchToSpaceCPULayerTest, + dynamicBatchToSpaceParamsSet5D2, + BatchToSpaceCPULayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_DynamicBatchToSpaceCPULayerTestCaseWithBlocked2_5D, + BatchToSpaceCPULayerTest, + dynamicBatchToSpaceParamsWithBlockedSet5D2, + BatchToSpaceCPULayerTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/broadcast.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/broadcast.cpp index cd5f2bae07f85f..308375934103c7 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/broadcast.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/broadcast.cpp @@ -2,30 +2,30 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "test_utils/cpu_test_utils.hpp" +#include + #include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include +#include "test_utils/cpu_test_utils.hpp" using namespace CPUTestUtils; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -using BroadcastLayerTestParamsSet = typename std::tuple< - std::vector, // Shapes - std::vector, // Target shapes - std::vector, // Axes mapping - ov::op::BroadcastType, // Broadcast mode - ov::element::Type_t, // Network precision - std::vector, // Const inputs - std::string>; // Device name +using BroadcastLayerTestParamsSet = typename std::tuple, // Shapes + std::vector, // Target shapes + std::vector, // Axes mapping + ov::op::BroadcastType, // Broadcast mode + ov::element::Type_t, // Network precision + std::vector, // Const inputs + std::string>; // Device name -using BroadcastLayerCPUTestParamsSet = typename std::tuple< - BroadcastLayerTestParamsSet, - CPUSpecificParams>; +using BroadcastLayerCPUTestParamsSet = typename std::tuple; class BroadcastLayerCPUTest : public testing::WithParamInterface, - virtual public ov::test::SubgraphBaseTest, public CPUTestsBase { + virtual public ov::test::SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { BroadcastLayerTestParamsSet basicParamsSet; @@ -38,7 +38,8 @@ class BroadcastLayerCPUTest : public testing::WithParamInterface isConstInputs; std::string deviceName; - std::tie(inputShapes, targetShapes, axesMapping, mode, netPrecision, isConstInputs, deviceName) = basicParamsSet; + std::tie(inputShapes, targetShapes, axesMapping, mode, netPrecision, isConstInputs, deviceName) = + basicParamsSet; std::ostringstream result; result << "IS=("; @@ -51,11 +52,12 @@ class BroadcastLayerCPUTest : public testing::WithParamInterface(targetShape.size()) }); + inputDynamicShapes.push_back({static_cast(targetShape.size())}); } if (!isAxesMapConst) { - inputDynamicShapes.push_back({ static_cast(axesMapping.size()) }); + inputDynamicShapes.push_back({static_cast(axesMapping.size())}); } } const size_t targetStaticShapeSize = inputShapes.front().second.size(); @@ -96,22 +99,25 @@ class BroadcastLayerCPUTest : public testing::WithParamInterface(netPrecision, targetStaticShapes.front().front())); + functionParams.push_back( + std::make_shared(netPrecision, targetStaticShapes.front().front())); } else { functionParams.push_back(std::make_shared(netPrecision, inputDynamicShapes.front())); if (!isTargetShapeConst) { - functionParams.push_back(std::make_shared(ov::element::i64, inputDynamicShapes[1])); + functionParams.push_back( + std::make_shared(ov::element::i64, inputDynamicShapes[1])); functionParams.back()->set_friendly_name("targetShape"); } if (!isAxesMapConst) { - functionParams.push_back(std::make_shared(ov::element::i64, inputDynamicShapes.back())); + functionParams.push_back( + std::make_shared(ov::element::i64, inputDynamicShapes.back())); functionParams.back()->set_friendly_name("axesMapping"); } } @@ -131,27 +137,21 @@ class BroadcastLayerCPUTest : public testing::WithParamInterface 2 ? functionParams[2] : functionParams[1]; } - broadcastOp = std::make_shared(functionParams[0], - targetShapeOp, - axesMappingOp, - mode); + broadcastOp = + std::make_shared(functionParams[0], targetShapeOp, axesMappingOp, mode); } else if (mode == ov::op::BroadcastType::NUMPY) { if (isTargetShapeConst) { auto targetShapeConst = ov::op::v0::Constant::create(ov::element::i64, {targetShapeRank}, targetShape); - broadcastOp = std::make_shared(functionParams[0], - targetShapeConst, - mode); + broadcastOp = std::make_shared(functionParams[0], targetShapeConst, mode); } else { - broadcastOp = std::make_shared(functionParams[0], - functionParams[1], - mode); + broadcastOp = std::make_shared(functionParams[0], functionParams[1], mode); } } function = makeNgraphFunction(netPrecision, functionParams, broadcastOp, "Broadcast"); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0lu; i < funcInputs.size(); i++) { @@ -171,10 +171,14 @@ class BroadcastLayerCPUTest : public testing::WithParamInterface inputPrecisions = { - ov::element::f32, - ov::element::bf16, - ov::element::i32, - ov::element::i8 -}; +const std::vector inputPrecisions = {ov::element::f32, + ov::element::bf16, + ov::element::i32, + ov::element::i8}; /* ============= */ /* INSTANCES */ // 4D -const std::vector CPUParams4D = { - cpuParams_nChw16c, - cpuParams_nChw8c, - cpuParams_nhwc -}; - -const std::vector> staticInputShapes4D = { - { - {{}, - { // Static shapes - {1, 16, 1, 1} - } - } - }, - { - {{}, - { // Static shapes - {50, 50} - } - } - } -}; - -INSTANTIATE_TEST_CASE_P(smoke_StaticShape4D, BroadcastLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::Values(staticInputShapes4D[0]), - ::testing::ValuesIn(std::vector>{{1, 16, 3, 3}, {1, 16, 1, 3}}), - ::testing::Values(std::vector{}), - ::testing::Values(ov::op::BroadcastType::NUMPY), - ::testing::ValuesIn(inputPrecisions), - ::testing::Values(std::vector{true, true}), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(CPUParams4D)), - BroadcastLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_CASE_P(smoke_StaticShape4DE, BroadcastLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::Values(staticInputShapes4D[1]), - ::testing::Values(std::vector{1, 50, 50, 16}), - ::testing::Values(std::vector{1, 2}), - ::testing::Values(ov::op::BroadcastType::EXPLICIT), - ::testing::ValuesIn(inputPrecisions), - ::testing::Values(std::vector{true, true}), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), - BroadcastLayerCPUTest::getTestCaseName); - -const std::vector> staticInputShapesScalar = { - { - {{}, - { // Static shapes - {1} - } - } - } -}; - -INSTANTIATE_TEST_CASE_P(smoke_StaticShape4DScalar, BroadcastLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(staticInputShapesScalar), - ::testing::Values(std::vector{1, 16, 3, 3}), - ::testing::Values(std::vector{}), - ::testing::Values(ov::op::BroadcastType::NUMPY), - ::testing::ValuesIn(inputPrecisions), - ::testing::Values(std::vector{true, true}), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), - BroadcastLayerCPUTest::getTestCaseName); +const std::vector CPUParams4D = {cpuParams_nChw16c, cpuParams_nChw8c, cpuParams_nhwc}; + +const std::vector> staticInputShapes4D = {{{{}, + {// Static shapes + {1, 16, 1, 1}}}}, + {{{}, + {// Static shapes + {50, 50}}}}}; + +INSTANTIATE_TEST_CASE_P(smoke_StaticShape4D, + BroadcastLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::Values(staticInputShapes4D[0]), + ::testing::ValuesIn(std::vector>{ + {1, 16, 3, 3}, + {1, 16, 1, 3}}), + ::testing::Values(std::vector{}), + ::testing::Values(ov::op::BroadcastType::NUMPY), + ::testing::ValuesIn(inputPrecisions), + ::testing::Values(std::vector{true, true}), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(CPUParams4D)), + BroadcastLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_CASE_P(smoke_StaticShape4DE, + BroadcastLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::Values(staticInputShapes4D[1]), + ::testing::Values(std::vector{1, 50, 50, 16}), + ::testing::Values(std::vector{1, 2}), + ::testing::Values(ov::op::BroadcastType::EXPLICIT), + ::testing::ValuesIn(inputPrecisions), + ::testing::Values(std::vector{true, true}), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + BroadcastLayerCPUTest::getTestCaseName); + +const std::vector> staticInputShapesScalar = {{{{}, + {// Static shapes + {1}}}}}; + +INSTANTIATE_TEST_CASE_P(smoke_StaticShape4DScalar, + BroadcastLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(staticInputShapesScalar), + ::testing::Values(std::vector{1, 16, 3, 3}), + ::testing::Values(std::vector{}), + ::testing::Values(ov::op::BroadcastType::NUMPY), + ::testing::ValuesIn(inputPrecisions), + ::testing::Values(std::vector{true, true}), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + BroadcastLayerCPUTest::getTestCaseName); const std::vector> dynamicInputShapes4D = { { - { // Origin dynamic shapes - {ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 20)}, - { // Dynamic shapes instances - {1, 16, 1, 1}, - {8, 1, 1, 7}, - {1, 1, 1, 7} - } - }, + {// Origin dynamic shapes + {ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 20)}, + {// Dynamic shapes instances + {1, 16, 1, 1}, + {8, 1, 1, 7}, + {1, 1, 1, 7}}}, }, - { - { // Origin dynamic shapes - {-1, -1, -1, -1}, - { // Dynamic shapes instances - {{1, 16, 1, 1}}, - {{8, 1, 1, 1}} - } - } - } -}; - -INSTANTIATE_TEST_CASE_P(smoke_DynamicShape4D, BroadcastLayerCPUTest, - ::testing::Combine(::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes4D), - ::testing::ValuesIn(std::vector>{{8, 16, 1, 7}, {8, 16, 10, 7}}), - ::testing::Values(std::vector{}), - ::testing::Values(ov::op::BroadcastType::NUMPY), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(std::vector>{{true, true}, {false, true}}), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), - BroadcastLayerCPUTest::getTestCaseName); - -const std::vector> dynamicInputShapesScalar = { - { - { // Origin dynamic shapes - {-1}, - { // Dynamic shapes instances - {1}, - {7} - } - } - } -}; - -INSTANTIATE_TEST_CASE_P(smoke_DynamicShape4DScalar, BroadcastLayerCPUTest, - ::testing::Combine(::testing::Combine( - ::testing::ValuesIn(dynamicInputShapesScalar), - ::testing::Values(std::vector{8, 16, 1, 7}), - ::testing::Values(std::vector{}), - ::testing::Values(ov::op::BroadcastType::NUMPY), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(std::vector>{{true, true}, {false, true}}), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), - BroadcastLayerCPUTest::getTestCaseName); + {{// Origin dynamic shapes + {-1, -1, -1, -1}, + {// Dynamic shapes instances + {{1, 16, 1, 1}}, + {{8, 1, 1, 1}}}}}}; + +INSTANTIATE_TEST_CASE_P( + smoke_DynamicShape4D, + BroadcastLayerCPUTest, + ::testing::Combine( + ::testing::Combine(::testing::ValuesIn(dynamicInputShapes4D), + ::testing::ValuesIn(std::vector>{{8, 16, 1, 7}, {8, 16, 10, 7}}), + ::testing::Values(std::vector{}), + ::testing::Values(ov::op::BroadcastType::NUMPY), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(std::vector>{{true, true}, {false, true}}), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + BroadcastLayerCPUTest::getTestCaseName); + +const std::vector> dynamicInputShapesScalar = {{{// Origin dynamic shapes + {-1}, + {// Dynamic shapes instances + {1}, + {7}}}}}; + +INSTANTIATE_TEST_CASE_P(smoke_DynamicShape4DScalar, + BroadcastLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(dynamicInputShapesScalar), + ::testing::Values(std::vector{8, 16, 1, 7}), + ::testing::Values(std::vector{}), + ::testing::Values(ov::op::BroadcastType::NUMPY), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(std::vector>{ + {true, true}, + {false, true}}), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + BroadcastLayerCPUTest::getTestCaseName); // 5D -const std::vector> staticInputShapes5D = { - { - {{}, - { // Static shapes - {1, 16, 1, 1, 1} - } - } - } -}; +const std::vector> staticInputShapes5D = {{{{}, + {// Static shapes + {1, 16, 1, 1, 1}}}}}; const std::vector> dynamicInputShapes5D = { - { - { // Origin dynamic shapes - {ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 20)}, - { // Dynamic shapes instances - {1, 16, 1, 1, 1}, - {8, 1, 1, 7, 1}, - {8, 1, 1, 1, 1} - } - } - }, - { - { // Origin dynamic shapes - {-1, -1, -1, -1, -1}, - { // Dynamic shapes instances - {1, 16, 1, 1, 1}, - {8, 16, 1, 7, 1} - } - } - } -}; -std::vector> targetShapes5D { - {8, 16, 1, 7, 1}, - {8, 16, 10, 7, 4} -}; + {{// Origin dynamic shapes + {ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 20)}, + {// Dynamic shapes instances + {1, 16, 1, 1, 1}, + {8, 1, 1, 7, 1}, + {8, 1, 1, 1, 1}}}}, + {{// Origin dynamic shapes + {-1, -1, -1, -1, -1}, + {// Dynamic shapes instances + {1, 16, 1, 1, 1}, + {8, 16, 1, 7, 1}}}}}; +std::vector> targetShapes5D{{8, 16, 1, 7, 1}, {8, 16, 10, 7, 4}}; const std::vector CPUParams5D = { - cpuParams_nCdhw16c, - cpuParams_nCdhw8c, - cpuParams_ndhwc, + cpuParams_nCdhw16c, + cpuParams_nCdhw8c, + cpuParams_ndhwc, }; -INSTANTIATE_TEST_CASE_P(smoke_StaticShape5D, BroadcastLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(staticInputShapes5D), - ::testing::ValuesIn(std::vector>{{1, 16, 1, 1, 3}, {1, 16, 3, 1, 3}}), - ::testing::Values(std::vector{}), - ::testing::Values(ov::op::BroadcastType::NUMPY), - ::testing::ValuesIn(inputPrecisions), - ::testing::Values(std::vector{true, true}), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(CPUParams5D)), - BroadcastLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_CASE_P(smoke_StaticShape5DScalar, BroadcastLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(staticInputShapesScalar), - ::testing::Values(std::vector{1, 16, 3, 1, 3}), - ::testing::Values(std::vector{}), - ::testing::Values(ov::op::BroadcastType::NUMPY), - ::testing::ValuesIn(inputPrecisions), - ::testing::Values(std::vector{true, true}), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), - BroadcastLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_CASE_P(smoke_DynamicShape5D, BroadcastLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes5D), - ::testing::ValuesIn(targetShapes5D), - ::testing::Values(std::vector{}), - ::testing::Values(ov::op::BroadcastType::NUMPY), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(std::vector>{{true, true}, {false, true}}), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), - BroadcastLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_CASE_P(smoke_DynamicShape5DScalar, BroadcastLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapesScalar), - ::testing::Values(std::vector{8, 16, 1, 1, 7}), - ::testing::Values(std::vector{}), - ::testing::Values(ov::op::BroadcastType::NUMPY), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(std::vector>{{true, true}, {false, true}}), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), - BroadcastLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_CASE_P(smoke_StaticShape5D, + BroadcastLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(staticInputShapes5D), + ::testing::ValuesIn(std::vector>{ + {1, 16, 1, 1, 3}, + {1, 16, 3, 1, 3}}), + ::testing::Values(std::vector{}), + ::testing::Values(ov::op::BroadcastType::NUMPY), + ::testing::ValuesIn(inputPrecisions), + ::testing::Values(std::vector{true, true}), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(CPUParams5D)), + BroadcastLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_CASE_P(smoke_StaticShape5DScalar, + BroadcastLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(staticInputShapesScalar), + ::testing::Values(std::vector{1, 16, 3, 1, 3}), + ::testing::Values(std::vector{}), + ::testing::Values(ov::op::BroadcastType::NUMPY), + ::testing::ValuesIn(inputPrecisions), + ::testing::Values(std::vector{true, true}), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + BroadcastLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_CASE_P(smoke_DynamicShape5D, + BroadcastLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(dynamicInputShapes5D), + ::testing::ValuesIn(targetShapes5D), + ::testing::Values(std::vector{}), + ::testing::Values(ov::op::BroadcastType::NUMPY), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(std::vector>{ + {true, true}, + {false, true}}), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + BroadcastLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_CASE_P(smoke_DynamicShape5DScalar, + BroadcastLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(dynamicInputShapesScalar), + ::testing::Values(std::vector{8, 16, 1, 1, 7}), + ::testing::Values(std::vector{}), + ::testing::Values(ov::op::BroadcastType::NUMPY), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(std::vector>{ + {true, true}, + {false, true}}), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + BroadcastLayerCPUTest::getTestCaseName); // 1D -const std::vector> dynamicShapes1D = { - { - { // Origin dynamic shapes - {-1}, - { // Dynamic shapes instances - {1}, - {1} - } - } - } -}; - -INSTANTIATE_TEST_CASE_P(smoke_DynamicShapes1D, BroadcastLayerCPUTest, - ::testing::Combine(::testing::Combine( - ::testing::ValuesIn(dynamicShapes1D), - ::testing::Values(std::vector{0}), - ::testing::Values(std::vector{}), - ::testing::Values(ov::op::BroadcastType::NUMPY), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(std::vector>{{false, true}}), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), - BroadcastLayerCPUTest::getTestCaseName); +const std::vector> dynamicShapes1D = {{{// Origin dynamic shapes + {-1}, + {// Dynamic shapes instances + {1}, + {1}}}}}; + +INSTANTIATE_TEST_CASE_P(smoke_DynamicShapes1D, + BroadcastLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(dynamicShapes1D), + ::testing::Values(std::vector{0}), + ::testing::Values(std::vector{}), + ::testing::Values(ov::op::BroadcastType::NUMPY), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(std::vector>{ + {false, true}}), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + BroadcastLayerCPUTest::getTestCaseName); /* ========= */ -} // namespace +} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/bucketize.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/bucketize.cpp index 6c345226e4f69f..9f9a99d6fef5a9 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/bucketize.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/bucketize.cpp @@ -3,16 +3,14 @@ // #include -#include "ov_models/builders.hpp" + #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::opset3; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { using BucketizeCPUParamsTuple = std::tuple& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); auto data_size = shape_size(targetInputStaticShapes[0]); - ov::Tensor tensorData = ov::test::utils::create_and_fill_tensor(funcInputs[0].get_element_type(), - targetInputStaticShapes[0], - data_size * 5, - 0, - 10, - 7235346); - + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = data_size * 5; + in_data.resolution = 10; + in_data.seed = 7235346; + ov::Tensor tensorData = ov::test::utils::create_and_fill_tensor(funcInputs[0].get_element_type(), targetInputStaticShapes[0], in_data); ov::Tensor tensorBucket = ov::test::utils::create_and_fill_tensor_unique_sequence(funcInputs[1].get_element_type(), targetInputStaticShapes[1], @@ -91,14 +88,14 @@ class BucketizeLayerCPUTest : public testing::WithParamInterfaceGetParam(); init_input_shapes({dataShape, bucketsShape}); - auto data = std::make_shared(inDataPrc, inputDynamicShapes[0]); + auto data = std::make_shared(inDataPrc, inputDynamicShapes[0]); data->set_friendly_name("a_data"); - auto buckets = std::make_shared(inBucketsPrc, inputDynamicShapes[1]); + auto buckets = std::make_shared(inBucketsPrc, inputDynamicShapes[1]); buckets->set_friendly_name("b_buckets"); - auto bucketize = std::make_shared(data, buckets, netPrc, with_right_bound); - function = std::make_shared(std::make_shared(bucketize), - ngraph::ParameterVector{data, buckets}, - "Bucketize"); + auto bucketize = std::make_shared(data, buckets, netPrc, with_right_bound); + function = std::make_shared(std::make_shared(bucketize), + ov::ParameterVector{data, buckets}, + "Bucketize"); } }; @@ -109,11 +106,11 @@ TEST_P(BucketizeLayerCPUTest, CompareWithRefs) { namespace { const std::vector dataShapesDynamic = { - {{ngraph::Dimension(1, 10), ngraph::Dimension::dynamic(), ngraph::Dimension::dynamic()}, + {{ov::Dimension(1, 10), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 20, 20}, {3, 16, 16}, {10, 16, 16}}}, - {{ngraph::Dimension(1, 10), 3, 50, 50}, {{1, 3, 50, 50}, {2, 3, 50, 50}, {10, 3, 50, 50}}}}; + {{ov::Dimension(1, 10), 3, 50, 50}, {{1, 3, 50, 50}, {2, 3, 50, 50}, {10, 3, 50, 50}}}}; -const std::vector bucketsShapesDynamic = {{{ngraph::Dimension::dynamic()}, {{5}, {20}, {100}}}}; +const std::vector bucketsShapesDynamic = {{{ov::Dimension::dynamic()}, {{5}, {20}, {100}}}}; const std::vector inPrc = {ov::element::f32, ov::element::i64, ov::element::i32}; const std::vector outPrc = {ov::element::i64, ov::element::i32}; @@ -142,4 +139,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_TestsBucketize_left_Dynamic, BucketizeLayerCPUTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/activation.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/activation.cpp index f061a6482ded74..5eafb558b057e4 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/activation.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/activation.cpp @@ -5,24 +5,24 @@ #include "activation.hpp" #include "gtest/gtest.h" #include "test_utils/cpu_test_utils.hpp" +#include "common_test_utils/node_builders/activation.hpp" +#include "shared_test_classes/single_op/activation.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { +using namespace ov::test::utils; +namespace ov { +namespace test { std::string ActivationLayerCPUTest::getTestCaseName(const testing::TestParamInfo &obj) { std::vector inputShapes; std::vector activationShapes; - std::pair> activationTypeAndConstValue; - InferenceEngine::Precision netPrecision, inPrecision, outPrecision; + std::pair> activationTypeAndConstValue; + ov::element::Type netPrecision, inPrecision, outPrecision; CPUTestUtils::CPUSpecificParams cpuParams; std::tie(inputShapes, activationShapes, activationTypeAndConstValue, netPrecision, inPrecision, outPrecision, cpuParams) = obj.param; std::ostringstream result; - result << LayerTestsDefinitions::activationNames[activationTypeAndConstValue.first] << "_"; + result << activationNames[activationTypeAndConstValue.first] << "_"; if (inputShapes.front().first.size() != 0) { result << "IS=("; for (const auto &shape : inputShapes) { @@ -39,30 +39,30 @@ std::string ActivationLayerCPUTest::getTestCaseName(const testing::TestParamInfo } result << "AS=" << ov::test::utils::vec2str(activationShapes) << "_"; result << "ConstantsValue=" << ov::test::utils::vec2str(activationTypeAndConstValue.second) << "_"; - result << "netPRC=" << netPrecision.name() << "_"; - result << "inPRC=" << inPrecision.name() << "_"; - result << "outPRC=" << outPrecision.name() << "_"; + result << "netPRC=" << netPrecision.to_string() << "_"; + result << "inPRC=" << inPrecision.to_string() << "_"; + result << "outPRC=" << outPrecision.to_string() << "_"; result << CPUTestUtils::CPUTestsBase::getTestCaseName(cpuParams); return result.str(); } -void ActivationLayerCPUTest::generate_inputs(const std::vector& targetInputStaticShapes) { +void ActivationLayerCPUTest::generate_inputs(const std::vector& targetInputStaticShapes) { int32_t startFrom = 0; uint32_t range = 0; int32_t resolution = 0; - if (activationType == ActivationTypes::Exp && netPrecision == Precision::BF16) { + if (activationType == utils::ActivationTypes::Exp && netPrecision == ov::element::bf16) { startFrom = 0; range = 2; resolution = 32768; - } else if (activationType == ActivationTypes::Acosh) { + } else if (activationType == utils::ActivationTypes::Acosh) { startFrom = 2; range = 2; resolution = 128; - } else if (activationType == ActivationTypes::Acos || - activationType == ActivationTypes::Asin || - activationType == ActivationTypes::Atanh) { + } else if (activationType == utils::ActivationTypes::Acos || + activationType == utils::ActivationTypes::Asin || + activationType == utils::ActivationTypes::Atanh) { // range [-1. 1] is required startFrom = -1; range = 2; @@ -78,8 +78,11 @@ void ActivationLayerCPUTest::generate_inputs(const std::vector& t const auto& funcInput = funcInputs[i]; ov::Tensor tensor; if (funcInput.get_element_type().is_real()) { - tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], - range, startFrom, resolution); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = startFrom; + in_data.range = range; + in_data.resolution = resolution; + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], in_data); } else { tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); } @@ -92,39 +95,38 @@ void ActivationLayerCPUTest::SetUp() { std::vector inputShapes; std::vector activationShapes; - std::pair> activationTypeAndConstValue; - InferenceEngine::Precision inPrecision, outPrecision; + std::pair> activationTypeAndConstValue; + ov::element::Type inPrecision, outPrecision; CPUTestUtils::CPUSpecificParams cpuParams; std::tie(inputShapes, activationShapes, activationTypeAndConstValue, netPrecision, inPrecision, outPrecision, cpuParams) = this->GetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; activationType = activationTypeAndConstValue.first; auto constantsValue = activationTypeAndConstValue.second; - inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inPrecision); - outType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(outPrecision); - selectedType = getPrimitiveType() + "_" + netPrecision.name(); + inType = inPrecision; + outType = outPrecision; + selectedType = getPrimitiveType() + "_" + netPrecision.to_string(); #if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64) # if defined(OPENVINO_ARCH_ARM) - if (activationType == ngraph::helpers::ActivationTypes::GeluErf) // @todo tmp fallback to ref, gelu erf is disabled for 32bit ARM - selectedType = std::string("ref_") + netPrecision.name(); + if (activationType == utils::ActivationTypes::GeluErf) // @todo tmp fallback to ref, gelu erf is disabled for 32bit ARM + selectedType = std::string("ref_") + netPrecision.to_string(); # endif - if (activationType == ngraph::helpers::ActivationTypes::GeluTanh || // @todo not supported by ACL, can be decomposed with ngraph transformation - activationType == ngraph::helpers::ActivationTypes::SoftSign || // @todo not supported by ACL, can be decomposed with ngraph transformation + if (activationType == utils::ActivationTypes::GeluTanh || // @todo not supported by ACL, can be decomposed with transformation + activationType == utils::ActivationTypes::SoftSign || // @todo not supported by ACL, can be decomposed with transformation inputShapes.front().first.rank().get_length() > 5) // @todo tmp fallback to ref, remove after 6D+ ranks are properly supported - selectedType = std::string("ref_") + netPrecision.name(); + selectedType = std::string("ref_") + netPrecision.to_string(); #else - if (activationType == ngraph::helpers::ActivationTypes::Log) // @todo tmp fallback to ref, remove after Log is supported in emitters - selectedType = std::string("ref_") + netPrecision.name(); + if (activationType == utils::ActivationTypes::Log) // @todo tmp fallback to ref, remove after Log is supported in emitters + selectedType = std::string("ref_") + netPrecision.to_string(); #endif init_input_shapes(inputShapes); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - auto params = std::make_shared(ngPrc, inputDynamicShapes.front()); - auto activation = ngraph::builder::makeActivation(params, ngPrc, activationType, activationShapes, constantsValue); + auto params = std::make_shared(netPrecision, inputDynamicShapes.front()); + auto activation = utils::make_activation(params, netPrecision, activationType, activationShapes, constantsValue); activation->get_rt_info() = getCPUInfo(); - function = std::make_shared(ngraph::NodeVector{activation}, ov::ParameterVector{params}, "Activation"); + function = std::make_shared(ov::NodeVector{activation}, ov::ParameterVector{params}, "Activation"); } TEST_P(ActivationLayerCPUTest, CompareWithRefs) { @@ -138,8 +140,8 @@ const std::vector activationShapes() { return {}; } -const std::map>>& activationTypes() { - static const std::map>> activationTypes { +const std::map>>& activationTypes() { + static const std::map>> activationTypes { {Sqrt, {{}}}, {Sigmoid, {{}}}, {Tanh, {{}}}, @@ -160,8 +162,8 @@ const std::map>>& activationType return activationTypes; } -const std::vector& netPrc() { - static const std::vector netPrc{Precision::FP32}; +const std::vector& netPrc() { + static const std::vector netPrc{ov::element::f32}; return netPrc; } @@ -223,8 +225,8 @@ const std::vector>& basic5D() { return basic5D; } -const std::map>>& activationTypesDynamicMath() { - static const std::map>> activationTypesDynamicMath { +const std::map>>& activationTypesDynamicMath() { + static const std::map>> activationTypesDynamicMath { {Log, {{}}}, {Sign, {{}}}, {Acos, {{}}}, @@ -245,9 +247,9 @@ const std::map>>& activationType return activationTypesDynamicMath; } -const std::vector& netPrecisions() { - static const std::vector netPrecisions { - InferenceEngine::Precision::FP32 +const std::vector& netPrecisions() { + static const std::vector netPrecisions { + ov::element::f32 }; return netPrecisions; @@ -269,5 +271,6 @@ const std::vector>& dynamicMathBasic() { return dynamicMathBasic; } -} // namespace Activation -} // namespace CPULayerTestsDefinitions +} // namespace Activation +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/activation.hpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/activation.hpp index b7881fae053691..26ac7d2c7f93cf 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/activation.hpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/activation.hpp @@ -7,19 +7,20 @@ #include "shared_test_classes/single_layer/activation.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include +#include "common_test_utils/ov_tensor_utils.hpp" #include "test_utils/cpu_test_utils.hpp" #include "gtest/gtest.h" -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { using ActivationLayerCPUTestParamSet = - std::tuple, // Input shapes - std::vector, // Activation shapes - std::pair>, // Activation type and constant value - InferenceEngine::Precision, // Net precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision + std::tuple, // Input shapes + std::vector, // Activation shapes + std::pair>, // Activation type and constant value + ov::element::Type, // Net precision + ov::element::Type, // Input precision + ov::element::Type, // Output precision CPUTestUtils::CPUSpecificParams>; class ActivationLayerCPUTest : public testing::WithParamInterface, @@ -27,23 +28,23 @@ class ActivationLayerCPUTest : public testing::WithParamInterface &obj); - void generate_inputs(const std::vector& targetInputStaticShapes) override; + void generate_inputs(const std::vector& targetInputStaticShapes) override; protected: void SetUp() override; private: - InferenceEngine::Precision netPrecision = InferenceEngine::Precision::UNSPECIFIED; - ngraph::helpers::ActivationTypes activationType = ngraph::helpers::None; + ov::element::Type netPrecision = ov::element::undefined; + utils::ActivationTypes activationType = utils::ActivationTypes::None; }; namespace Activation { const std::vector activationShapes(); -const std::map>>& activationTypes(); +const std::map>>& activationTypes(); -const std::vector& netPrc(); +const std::vector& netPrc(); /* ============= Activation (1D) ============= */ const std::vector& cpuParams3D(); @@ -60,13 +61,14 @@ const std::vector& cpuParams5D(); const std::vector>& basic5D(); -const std::map>>& activationTypesDynamicMath(); +const std::map>>& activationTypesDynamicMath(); -const std::vector& netPrecisions(); +const std::vector& netPrecisions(); const std::vector& cpuParamsDynamicMath(); const std::vector>& dynamicMathBasic(); -} // namespace Activation -} // namespace CPULayerTestsDefinitions +} // namespace Activation +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/conversion.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/conversion.cpp index 3b118f7037c689..2e0e0d603fd00e 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/conversion.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/conversion.cpp @@ -7,16 +7,14 @@ #include "gtest/gtest.h" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { std::string ConvertCPULayerTest::getTestCaseName(testing::TestParamInfo obj) { InputShape inputShape; - InferenceEngine::Precision inPrc, outPrc; + ov::element::Type inPrc, outPrc; CPUSpecificParams cpuParams; std::tie(inputShape, inPrc, outPrc, cpuParams) = obj.param; @@ -27,27 +25,26 @@ std::string ConvertCPULayerTest::getTestCaseName(testing::TestParamInfo primitive has to be changed // TODO: remove the WA after I32 is supported in snippets (ticket: 99803) #if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64) - if (inPrc == InferenceEngine::Precision::I32 || outPrc == InferenceEngine::Precision::I32) + if (inPrc == ov::element::i32 || outPrc == ov::element::i32) return false; #endif - // ACL does not support specific in-out precision pairs + // ACL does not support specific in-out precision pairs #if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64) - if ((inPrc == InferenceEngine::Precision::I8 && outPrc == InferenceEngine::Precision::U8) || - (inPrc == InferenceEngine::Precision::U8 && outPrc == InferenceEngine::Precision::I8) || - (inPrc == InferenceEngine::Precision::FP32 && (outPrc == InferenceEngine::Precision::U8 || - outPrc == InferenceEngine::Precision::I8))) - return false; + if ((inPrc == ov::element::i8 && outPrc == ov::element::u8) || + (inPrc == ov::element::u8 && outPrc == ov::element::i8) || + (inPrc == ov::element::f32 && (outPrc == ov::element::u8 || outPrc == ov::element::i8))) + return false; #endif return true; } @@ -66,30 +63,26 @@ void ConvertCPULayerTest::SetUp() { if (!isInOutPrecisionSupported(inPrc, outPrc)) primitive = "ref"; - auto exec_type_precision = inPrc != InferenceEngine::Precision::U8 - ? inPrc - : InferenceEngine::Precision(InferenceEngine::Precision::I8); - selectedType = makeSelectedTypeStr(primitive, InferenceEngine::details::convertPrecision(exec_type_precision)); + auto exec_type_precision = inPrc != ov::element::u8 ? inPrc : ov::element::Type(ov::element::i8); + selectedType = makeSelectedTypeStr(primitive, exec_type_precision); for (size_t i = 0; i < shapes.second.size(); i++) { - targetStaticShapes.push_back(std::vector{shapes.second[i]}); + targetStaticShapes.push_back(std::vector{shapes.second[i]}); } inputDynamicShapes.push_back(shapes.first); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inPrc); - auto targetPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(outPrc); ov::ParameterVector params; for (auto&& shape : inputDynamicShapes) { - params.push_back(std::make_shared(ngPrc, shape)); + params.push_back(std::make_shared(inPrc, shape)); } - auto conversion = std::make_shared(params.front(), targetPrc); + auto conversion = std::make_shared(params.front(), outPrc); - function = makeNgraphFunction(ngPrc, params, conversion, "ConversionCPU"); + function = makeNgraphFunction(inPrc, params, conversion, "ConversionCPU"); } -void ConvertCPULayerTest::generate_inputs(const std::vector& targetInputStaticShapes) { - if (outPrc != Precision::BOOL) { +void ConvertCPULayerTest::generate_inputs(const std::vector& targetInputStaticShapes) { + if (outPrc != ov::element::boolean) { SubgraphBaseTest::generate_inputs(targetInputStaticShapes); return; } @@ -97,7 +90,7 @@ void ConvertCPULayerTest::generate_inputs(const std::vector& targ // In the scenario where input precision is floating point and output precision is boolean, // for CPU plugin, the output precision boolean will be converted to u8 during common transformation, // the elements in the output tensor will retain the format of u8 with the range [0, 255]. - // But the output precision in ngraph reference is literal boolean, the elements are either 0 or 1. + // But the output precision in reference model is literal boolean, the elements are either 0 or 1. // Here input floating points values are set to be in the range of [-1, 1], so no extra precision // converting between actual output and expected output will be needed from the side of single layer tests. inputs.clear(); @@ -105,20 +98,24 @@ void ConvertCPULayerTest::generate_inputs(const std::vector& targ auto shape = targetInputStaticShapes.front(); size_t size = shape_size(shape); - ov::Tensor tensor = ov::test::utils::create_and_fill_tensor(funcInputs[0].get_element_type(), shape, 2 * size); - if (inPrc == Precision::FP32) { + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 2 * size; + ov::Tensor tensor = ov::test::utils::create_and_fill_tensor(funcInputs[0].get_element_type(), shape, in_data); + + if (inPrc == ov::element::f32) { auto* rawBlobDataPtr = static_cast(tensor.data()); for (size_t i = 0; i < size; ++i) { rawBlobDataPtr[i] = rawBlobDataPtr[i] / size - 1; } - } else if (inPrc == Precision::BF16) { - auto* rawBlobDataPtr = static_cast(tensor.data()); + } else if (inPrc == ov::element::bf16) { + auto* rawBlobDataPtr = static_cast(tensor.data()); for (size_t i = 0; i < size; ++i) { rawBlobDataPtr[i] = rawBlobDataPtr[i] / size - 1; } } else { - FAIL() << "Generating inputs with precision" << inPrc << " isn't supported, if output precision is boolean."; + FAIL() << "Generating inputs with precision " << inPrc.to_string() << " isn't supported, if output precision is boolean."; } inputs.insert({funcInputs[0].get_node_shared_ptr(), tensor}); @@ -199,16 +196,17 @@ const std::vector& inShapes_7D_dynamic() { return inShapes_7D_dynamic; } -const std::vector& precisions() { - static const std::vector precisions = { - Precision::U8, - Precision::I8, - Precision::I32, - Precision::FP32, - Precision::BF16 +const std::vector& precisions() { + static const std::vector precisions = { + ov::element::u8, + ov::element::i8, + ov::element::i32, + ov::element::f32, + ov::element::bf16 }; return precisions; } } // namespace Conversion -} // namespace CPULayerTestsDefinitions \ No newline at end of file +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/conversion.hpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/conversion.hpp index 4cac9c96a2501c..f0fbfbd29a2beb 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/conversion.hpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/conversion.hpp @@ -5,41 +5,39 @@ #pragma once #include "shared_test_classes/single_layer/activation.hpp" -#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include +#include "common_test_utils/ov_tensor_utils.hpp" #include "test_utils/cpu_test_utils.hpp" #include "gtest/gtest.h" -using namespace InferenceEngine; -using namespace ngraph; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { -using convertLayerTestParamsSet = std::tuple; +namespace ov { +namespace test { +using convertLayerTestParamsSet = std::tuple; class ConvertCPULayerTest : public testing::WithParamInterface, virtual public SubgraphBaseTest, public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj); - static bool isInOutPrecisionSupported(InferenceEngine::Precision inPrc, InferenceEngine::Precision outPrc); + static bool isInOutPrecisionSupported(ov::element::Type inPrc, ov::element::Type outPrc); protected: void SetUp() override; - void generate_inputs(const std::vector& targetInputStaticShapes) override; + void generate_inputs(const std::vector& targetInputStaticShapes) override; private: - InferenceEngine::Precision inPrc, outPrc; + ov::element::Type inPrc, outPrc; }; namespace Conversion { - const std::vector& inShapes_4D_static(); - const std::vector& inShapes_4D_dynamic(); - const std::vector& inShapes_7D_static(); - const std::vector& inShapes_7D_dynamic(); - const std::vector& precisions(); -} // namespace Conversion -} // namespace CPULayerTestsDefinitions \ No newline at end of file +const std::vector& inShapes_4D_static(); +const std::vector& inShapes_4D_dynamic(); +const std::vector& inShapes_7D_static(); +const std::vector& inShapes_7D_dynamic(); +const std::vector& precisions(); +} // namespace Conversion +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/convolution.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/convolution.cpp new file mode 100644 index 00000000000000..60205e1a0591a7 --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/convolution.cpp @@ -0,0 +1,761 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "convolution.hpp" + +#include "gtest/gtest.h" +#include "test_utils/cpu_test_utils.hpp" +#include "utils/general_utils.h" + +using namespace CPUTestUtils; +using namespace ov::intel_cpu; + +namespace ov { +namespace test { +namespace Convolution { + +std::string ConvolutionLayerCPUTest::getTestCaseName(const testing::TestParamInfo& obj) { + convLayerTestParamsSet basicParamsSet; + CPUSpecificParams cpuParams; + fusingSpecificParams fusingParams; + ov::AnyMap additionalConfig; + std::tie(basicParamsSet, cpuParams, fusingParams, additionalConfig) = obj.param; + + convSpecificParams convParams; + ElementType netType; + ElementType inType, outType; + InputShape inputShape; + std::string targetDevice; + std::tie(convParams, netType, inType, outType, inputShape, targetDevice) = basicParamsSet; + ov::op::PadType padType; + ov::Shape kernel, stride, dilation; + std::vector padBegin, padEnd; + size_t convOutChannels; + std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType) = convParams; + + std::ostringstream result; + result << "IS="; + result << ov::test::utils::partialShape2str({inputShape.first}) << "_"; + result << "TS=("; + for (const auto& shape : inputShape.second) { + result << ov::test::utils::vec2str(shape) << "_"; + } + result << ")_"; + result << "K" << ov::test::utils::vec2str(kernel) << "_"; + result << "S" << ov::test::utils::vec2str(stride) << "_"; + result << "PB" << ov::test::utils::vec2str(padBegin) << "_"; + result << "PE" << ov::test::utils::vec2str(padEnd) << "_"; + result << "D=" << ov::test::utils::vec2str(dilation) << "_"; + result << "O=" << convOutChannels << "_"; + result << "AP=" << padType << "_"; + result << "netPRC=" << netType << "_"; + result << "inPRC=" << inType << "_"; + result << "outPRC=" << outType << "_"; + result << "trgDev=" << targetDevice; + + result << CPUTestsBase::getTestCaseName(cpuParams); + result << CpuTestWithFusing::getTestCaseName(fusingParams); + + if (!additionalConfig.empty()) { + result << "_PluginConf"; + for (auto& item : additionalConfig) { + result << "_" << item.first << "=" << item.second.as(); + } + } + + return result.str(); +} + +void ConvolutionLayerCPUTest::checkBiasFusing(ov::CompiledModel& execNet) const { + if (!execNet) + return; + + auto execGraph = execNet.get_runtime_model(); + ASSERT_NE(nullptr, execGraph); + + bool foundConv = false; + for (const auto& node : execGraph->get_ops()) { + const auto& rtInfo = node->get_rt_info(); + auto getExecValue = [&rtInfo](const std::string& paramName) -> std::string { + auto it = rtInfo.find(paramName); + OPENVINO_ASSERT(rtInfo.end() != it); + return it->second.as(); + }; + + if (getExecValue(ExecGraphInfoSerialization::LAYER_TYPE) == "Convolution") { + foundConv = true; + ASSERT_EQ(3, node->inputs().size()); + break; + } + } + + ASSERT_TRUE(foundConv) << "Can't find Convolution node"; +} + +std::shared_ptr ConvolutionLayerCPUTest::modifyGraph(const ov::element::Type& ngPrc, + ov::ParameterVector& params, + const std::shared_ptr& lastNode) { + auto retNode = CpuTestWithFusing::modifyGraph(ngPrc, params, lastNode); + std::shared_ptr opToShapeInfer = nullptr; + for (auto& targetShapes : targetStaticShapes) { + for (size_t i = targetShapes.size(); i < params.size(); ++i) { + const auto& shape = params[i]->get_output_partial_shape(0); + if (shape.is_static()) { + targetShapes.push_back(shape.get_shape()); + } else { + // It is assumed that in such tests we have second parameter only if sum fusion is tested. + // Considering this fact, we need to set the appropriate static shape for the second term of the sum + // operation, and it has to match the convolution output shape. So the most suitable solution here is to + // perform shape inference on the convolution node + if (!opToShapeInfer) { + ov::OutputVector inputsForShapeInfer; + for (size_t j = 0; j < lastNode->get_input_size(); j++) { + if (ov::is_type(lastNode->get_input_node_ptr(j))) { + inputsForShapeInfer.push_back(lastNode->get_input_node_shared_ptr(j)); + } else { + inputsForShapeInfer.push_back( + std::make_shared(lastNode->get_input_element_type(j), + lastNode->get_input_partial_shape(j))); + } + } + opToShapeInfer = lastNode->clone_with_new_inputs(inputsForShapeInfer); + } + + std::vector secondParameterShapes; + if (auto parameter = dynamic_cast(opToShapeInfer->get_input_node_ptr(0))) { + parameter->set_partial_shape(targetShapes.front()); + parameter->validate_and_infer_types(); + } + opToShapeInfer->validate_and_infer_types(); + targetShapes.push_back(opToShapeInfer->get_output_shape(0)); + } + } + } + return retNode; +} + +void ConvolutionLayerCPUTest::SetUp() { + rel_threshold = 1e-4f; + + convLayerTestParamsSet basicParamsSet; + CPUSpecificParams cpuParams; + fusingSpecificParams fusingParams; + ov::AnyMap additionalConfig; + std::tie(basicParamsSet, cpuParams, fusingParams, additionalConfig) = this->GetParam(); + + configuration.insert(additionalConfig.begin(), additionalConfig.end()); + + std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; + std::tie(postOpMgrPtr, fusedOps) = fusingParams; + + if (postOpMgrPtr) + isBias = (postOpMgrPtr->getFusedOpsNames() == "Add(PerChannel)" && selectedType != "jit_avx512_winograd"); + + convSpecificParams convParams; + InputShape inputShape; + auto netType = ElementType::undefined; + std::tie(convParams, netType, inType, outType, inputShape, targetDevice) = basicParamsSet; + + init_input_shapes({inputShape}); + + auto it = configuration.find(ov::hint::inference_precision.name()); + if (it != configuration.end() && it->second.as() == ov::element::bf16) { + selectedType += "_BF16"; + rel_threshold = 1e-2f; + if (selectedType == "jit_gemm_BF16") + rel_threshold = 0.05f; + } else { + selectedType = makeSelectedTypeStr(selectedType, netType); + } + + ov::op::PadType padType; + ov::Shape stride; + std::vector padBegin, padEnd; + size_t convOutChannels; + std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType) = convParams; + + ov::ParameterVector inputParams; + for (auto&& shape : inputDynamicShapes) + inputParams.push_back(std::make_shared(ov::element::f32, shape)); + auto convolutionNode = ov::test::utils::make_convolution(inputParams[0], netType, kernel, stride, padBegin, + padEnd, dilation, padType, convOutChannels); + + function = makeNgraphFunction(netType, inputParams, convolutionNode, "Convolution"); +} + +TEST_P(ConvolutionLayerCPUTest, CompareWithRefs) { + // Skip tests for sse41 convolution where ic or oc cannot be exactly divided by the block size, + // since tails processing for sse41 nspc layout is not supported yet (see 52736). + if (!inFmts.empty() && (inFmts.front() == nwc || inFmts.front() == nhwc || inFmts.front() == ndhwc) && selectedType.find("jit_sse") != std::string::npos) { + auto inpChannels = function->get_parameters().front()->get_partial_shape()[1].get_length(); + auto outChannels = function->get_output_partial_shape(0)[1].get_length(); + if ((inpChannels % 8) || (outChannels % 8)) { + GTEST_SKIP() << "Disabled test due to the sse41 convolution kernel does not support tails for nspc layout." << std::endl; + } + } + + if (!priority.empty()) { + // Skip all the brgconv avx2 tests for now. Current brgconv_avx2 is disabled due to perf regression[CVS-105756]. + // This convolution test code has already covered brgconv avx2 primitive. + // @todo: Remove this once brgconv_avx2 is enabled for convolution node. + if (priority[0].find("brgconv_avx2") != std::string::npos) + GTEST_SKIP() << "Disabled test due to the brgconv_avx2 is not enabled." << std::endl; + // Skip tests for brgconv convolution where kernel size = 1x1 + if (one_of(priority[0], "brgconv_avx512", "brgconv_avx512_amx", "brgconv_avx2")) { + bool is_1x1 = true; + for (const auto &i : kernel) { + if (i != 1) { + is_1x1 = false; + break; + } + } + if (is_1x1) { + GTEST_SKIP() << "Disabled test due to the brgconv does not support 1x1 convolution kernel." << std::endl; + } + } + + // Skip tests for brgconv_amx convolution where dilation is not 1 + if (priority[0].find("amx") != std::string::npos) { + bool dilation_is_1x1 = true; + for (const auto &i : dilation) { + if (i != 1) { + dilation_is_1x1 = false; + break; + } + } + if (!dilation_is_1x1) { + GTEST_SKIP() << "Disabled test due to the brgconv amx does not support non 1 dilation convolution kernel." << std::endl; + } + } + } + +// FIXME: ACL output shape check fails if kernel, stride and padding equal to 1 +// CpuGemm::validate checks that 2nd and 3rd dimention of the input and output shapes are equal and fails (ticket 114201) +#if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64) + if (std::all_of(kernel.begin(), kernel.end(), [](size_t i){return i == 1;}) && + std::all_of(stride.begin(), stride.end(), [](size_t i){return i == 1;}) && + std::all_of(padBegin.begin(), padBegin.end(), [](ptrdiff_t i){return i == 1;})) { + GTEST_SKIP() << "Disabled test due to output shape check failed" << std::endl; + } +#endif + run(); + + if (isBias) { + checkBiasFusing(compiledModel); + } + CheckPluginRelatedResults(compiledModel, "Convolution"); +} + +const ov::Shape& numOutChannels() { + static const ov::Shape numOutChannels = { 64, 63 }; + return numOutChannels; +} + +const ov::Shape& numOutChannels_Gemm() { + static const ov::Shape numOutChannels_Gemm = { 6 }; + return numOutChannels_Gemm; +} + +const std::vector& kernels1d() { + static const std::vector kernels1d = { {3}, {1} }; + return kernels1d; +} + +const std::vector& strides1d() { + static const std::vector strides1d = { {1}, {2} }; + return strides1d; +} + +const std::vector>& padBegins1d() { + static const std::vector> padBegins1d = { {0}, {1} }; + return padBegins1d; +} + +const std::vector>& padEnds1d() { + static const std::vector> padEnds1d = { {0} }; + return padEnds1d; +} + +const std::vector& dilations1d() { + static const std::vector dilations1d = { {1}, {2} }; + return dilations1d; +} + +const std::vector& kernels2d() { + static const std::vector kernels2d = { {3, 3}, {1, 1} }; + return kernels2d; +} + +const std::vector& strides2d() { + static const std::vector strides2d = { {1, 1}, {2, 2} }; + return strides2d; +} + +const std::vector>& padBegins2d() { + static const std::vector> padBegins2d = { {0, 0}, {1, 1} }; + return padBegins2d; +} + +const std::vector>& padEnds2d() { + static const std::vector> padEnds2d = { {0, 0} }; + return padEnds2d; +} + +const std::vector& dilations2d() { + static const std::vector dilations2d = { {1, 1} }; + return dilations2d; +} + +const std::vector& inShapesGemm2D() { + static const std::vector inShapesGemm2D = { + {{}, {{ 2, 12, 7, 7 }}}, + { + //dynamic shape + { {1, 200}, 12, -1, {1, 200} }, + { //target static shapes + { 2, 12, 7, 7 }, + { 1, 12, 5, 5 } + } + } + }; + return inShapesGemm2D; +} + +const std::vector& inShapesGemm2D_cache() { + static const std::vector inShapesGemm2D_cache = { + {{}, {{ 2, 12, 7, 7 }}}, + { + //dynamic shape + { {1, 200}, 12, -1, {1, 200} }, + { //target static shapes + { 1, 12, 5, 5 }, + { 1, 12, 7, 7 }, + { 1, 12, 5, 5 } + } + } + }; + return inShapesGemm2D_cache; +} + +const std::vector& CPUParams_2D() { + static const std::vector CPUParams_2D = { + conv_sse42_2D, + conv_avx2_2D, + conv_avx512_2D, + conv_sse42_2D_nspc, + conv_avx2_2D_nspc, + conv_avx2_2D_nspc_brgconv, + conv_avx512_2D_nspc, + conv_avx512_2D_nspc_brgconv + }; + return CPUParams_2D; +} + +const std::vector& CPUParams_GEMM_1D() { + static const std::vector CPUParams_GEMM_1D = { + conv_gemm_1D, + conv_gemm_1D_nspc + }; + return CPUParams_GEMM_1D; +} + +const std::vector& CPUParams_GEMM_2D() { + static const std::vector CPUParams_GEMM_2D = { + conv_gemm_2D, + conv_gemm_2D_nspc, + conv_gemm_acl_2D_nspc + }; + return CPUParams_GEMM_2D; +} + +const std::vector& inputShapes1d() { + static const std::vector inputShapes1d = { + {{}, {{ 2, 64, 7 }}}, + {{}, {{ 1, 67, 7 }}}, + { + //dynamic shape + { -1, 64, {1, 200} }, + { //target static shapes + { 2, 64, 7 }, + { 1, 64, 9 } + } + }, + { + //dynamic shape + { -1, 67, {1, 200} }, + { //target static shapes + { 2, 67, 7 }, + { 1, 67, 9 } + } + }, + { + //dynamic shape + { {1, 200}, 64, -1 }, + { //target static shapes + { 2, 64, 7 }, + { 1, 64, 5 } + } + } + }; + return inputShapes1d; +} + +const std::vector& inputShapes2d() { + static const std::vector inputShapes2d = { + {{}, {{ 1, 64, 7, 7 }}}, + {{}, {{ 1, 67, 7, 7 }}}, + { + //dynamic shape + { -1, 64, -1, {1, 200} }, + { //target static shapes + { 2, 64, 7, 7 }, + { 1, 64, 9, 9} + } + }, + { + //dynamic shape + { -1, 67, -1, {1, 200} }, + { //target static shapes + { 2, 67, 7, 7 }, + { 1, 67, 9, 9} + } + } + }; + return inputShapes2d; +} + +const std::vector& inputShapesPlain2Blocked2d() { + static const std::vector inputShapesPlain2Blocked2d = { + {{}, {{ 1, 1, 7, 7 }}}, + {{}, {{ 1, 2, 7, 7 }}}, + {{}, {{ 1, 3, 7, 7 }}}, + { + //dynamic shape + { -1, 1, -1, {1, 200} }, + { //target static shapes + { 2, 1, 7, 7 }, + { 1, 1, 9, 9} + } + }, + { + //dynamic shape + { -1, 3, -1, {1, 200} }, + { //target static shapes + { 2, 3, 7, 7 }, + { 1, 3, 9, 9} + } + } + }; + return inputShapesPlain2Blocked2d; +} + +const std::vector& inputShapes2d_dynBatch() { + static const std::vector inputShapes2d_dynBatch = { + { + //dynamic shape + { {1, 10}, 64, 7, 7 }, + { //target static shapes + { 2, 64, 7, 7 }, + { 1, 64, 7, 7 } + } + }, + }; + return inputShapes2d_dynBatch; +} + +const std::vector& CPUParams_1x1_1D() { + static const std::vector CPUParams_1x1_1D = { + conv_sse42_1D_1x1, + conv_avx2_1D_1x1, + conv_avx512_1D_1x1, + conv_sse42_1D_1x1_nspc, + conv_avx2_1D_1x1_nspc, + conv_avx2_1D_1x1_nspc_brgconv, + conv_avx512_1D_1x1_nspc, + conv_avx512_1D_1x1_nspc_brgconv + }; + return CPUParams_1x1_1D; +} + +const std::vector& kernels3d() { + static const std::vector kernels3d = { {3, 3, 3}, {1, 1, 1} }; + return kernels3d; +} + +const std::vector& strides3d() { + static const std::vector strides3d = { {1, 1, 1}, {2, 2, 2} }; + return strides3d; +} + +const std::vector>& padBegins3d() { + static const std::vector> padBegins3d = { {0, 0, 0}, {1, 1, 1} }; + return padBegins3d; +} + +const std::vector>& padEnds3d() { + static const std::vector> padEnds3d = { {0, 0, 0} }; + return padEnds3d; +} + +const std::vector& dilations3d() { + static const std::vector dilations3d = { {1, 1, 1} }; + return dilations3d; +} + +const std::vector & inputShapes3d() { + static const std::vector inputShapes3d = { + {{}, {{ 1, 64, 7, 7, 7 }}}, + {{}, {{ 1, 67, 7, 7, 7 }}}, + { + //dynamic shapes + { -1, 64, -1, {1, 200}, -1 }, + { //target static shapes + { 1, 64, 7, 7, 7 }, + { 1, 64, 9, 9, 9} + } + }, + { + //dynamic shapes + { -1, 67, -1, {1, 200}, -1 }, + { //target static shapes + { 1, 67, 7, 7, 7 }, + { 1, 67, 9, 9, 9} + } + } + }; + return inputShapes3d; +} + +const std::vector & inShapesGemm3D() { + static const std::vector inShapesGemm3D = { + {{}, {{ 2, 12, 7, 7, 7 }}}, + { + //dynamic shape + { {1, 200}, 12, -1, {1, 200}, -1 }, + { //target static shapes + { 2, 12, 7, 7, 7 }, + { 1, 12, 5, 5, 5 } + } + } + }; + return inShapesGemm3D; +} + +const std::vector& CPUParams_GEMM_3D() { + static const std::vector CPUParams_GEMM_3D = { + conv_gemm_3D, + conv_gemm_3D_nspc, + conv_gemm_acl_3D, + conv_gemm_acl_3D_nspc + }; + return CPUParams_GEMM_3D; +} + +const std::vector& CPUParams_1x1_2D() { + static const std::vector CPUParams_1x1_2D = { + conv_sse42_2D_1x1, + conv_avx2_2D_1x1, + conv_avx512_2D_1x1, + conv_sse42_2D_1x1_nspc, + conv_avx2_2D_1x1_nspc, + conv_avx2_2D_1x1_nspc_brgconv, + conv_avx512_2D_1x1_nspc, + conv_avx512_2D_1x1_nspc_brgconv + }; + return CPUParams_1x1_2D; +} + +const std::vector& inputShapes2d_cache() { + static const std::vector inputShapes2d_cache = { + {{}, {{ 1, 64, 7, 7 }}}, + {{}, {{ 1, 67, 7, 7 }}}, + { + //dynamic shape + { -1, 64, -1, {1, 200} }, + { //target static shapes + { 1, 64, 7, 7 }, + { 1, 64, 9, 9 }, + { 1, 64, 7, 7 } + } + }, + { + //dynamic shape + { -1, 67, -1, {1, 200} }, + { //target static shapes + { 1, 67, 7, 7 }, + { 1, 67, 9, 9} + } + } + }; + return inputShapes2d_cache; +} + +const std::vector& fusingParamsSetWithEmpty() { + static const std::vector fusingParamsSetWithEmpty = { + emptyFusingSpec, + // eltwise + fusingRelu, + fusingPRelu1DScaleShift, + // depthwise + fusingReluScaleShift, + // fake quantize + fusingFakeQuantizePerTensorRelu, + fusingFakeQuantizePerChannelRelu, + // sum + fusingSumEluFQ, + fusingSum, + // bias + fusingAddPerChannel + }; + return fusingParamsSetWithEmpty; +} + +const std::vector& inShapesGemm1D() { + static const std::vector inShapesGemm1D = { + {{}, {{ 2, 12, 7 }}}, + { + //dynamic shape + { {1, 200}, 12, {1, 200} }, + { //target static shapes + { 2, 12, 7 }, + { 1, 12, 5 } + } + } + }; + return inShapesGemm1D; +} + +const convParams_ExplicitPaddingType& convParams_ExplicitPadding_GEMM_2D() { + static const auto convParams_ExplicitPadding_GEMM_2D = + ::testing::Combine(::testing::ValuesIn(kernels2d()), + ::testing::ValuesIn(strides2d()), + ::testing::ValuesIn(padBegins2d()), + ::testing::ValuesIn(padEnds2d()), + ::testing::ValuesIn(dilations2d()), + ::testing::ValuesIn(numOutChannels_Gemm()), + ::testing::Values(ov::op::PadType::EXPLICIT)); + return convParams_ExplicitPadding_GEMM_2D; +} + +const convParams_ExplicitPaddingDilatedType& convParams_ExplicitPadding_GEMM_2D_dilated() { + static const auto convParams_ExplicitPadding_GEMM_2D_dilated = + ::testing::Combine(::testing::ValuesIn(kernels2d()), + ::testing::ValuesIn(strides2d()), + ::testing::ValuesIn(padBegins2d()), + ::testing::ValuesIn(padEnds2d()), + ::testing::Values(ov::Shape{2, 2}), + ::testing::ValuesIn(numOutChannels_Gemm()), + ::testing::Values(ov::op::PadType::EXPLICIT)); + return convParams_ExplicitPadding_GEMM_2D_dilated; +} + +const convParams_ExplicitPaddingType& convParams_ExplicitPadding_GEMM_1D() { + static const auto convParams_ExplicitPadding_GEMM_1D = + ::testing::Combine(::testing::ValuesIn(kernels1d()), + ::testing::ValuesIn(strides1d()), + ::testing::ValuesIn(padBegins1d()), + ::testing::ValuesIn(padEnds1d()), + ::testing::ValuesIn(dilations1d()), + ::testing::ValuesIn(numOutChannels_Gemm()), + ::testing::Values(ov::op::PadType::EXPLICIT)); + return convParams_ExplicitPadding_GEMM_1D; +} + +const convParams_ExplicitPaddingType& convParams_ExplicitPadding_2D() { + static const auto convParams_ExplicitPadding_2D = ::testing::Combine(::testing::ValuesIn(kernels2d()), + ::testing::ValuesIn(strides2d()), + ::testing::ValuesIn(padBegins2d()), + ::testing::ValuesIn(padEnds2d()), + ::testing::ValuesIn(dilations2d()), + ::testing::ValuesIn(numOutChannels()), + ::testing::Values(ov::op::PadType::EXPLICIT)); + return convParams_ExplicitPadding_2D; +} + +const convParams_ExplicitPaddingDilatedType& convParams_ExplicitPadding_2D_dilated() { + static const auto convParams_ExplicitPadding_2D_dilated = + ::testing::Combine(::testing::ValuesIn(kernels2d()), + ::testing::ValuesIn(strides2d()), + ::testing::ValuesIn(padBegins2d()), + ::testing::ValuesIn(padEnds2d()), + ::testing::Values(ov::Shape{2, 2}), + ::testing::ValuesIn(numOutChannels()), + ::testing::Values(ov::op::PadType::EXPLICIT)); + return convParams_ExplicitPadding_2D_dilated; +} + +const convParams_ExplicitPaddingType& convParams_ExplicitPadding_GEMM_3D() { + static const auto convParams_ExplicitPadding_GEMM_3D = + ::testing::Combine(::testing::ValuesIn(kernels3d()), + ::testing::ValuesIn(strides3d()), + ::testing::ValuesIn(padBegins3d()), + ::testing::ValuesIn(padEnds3d()), + ::testing::ValuesIn(dilations3d()), + ::testing::ValuesIn(numOutChannels_Gemm()), + ::testing::Values(ov::op::PadType::EXPLICIT)); + return convParams_ExplicitPadding_GEMM_3D; +} + +const convParams_ExplicitPaddingDilatedType& convParams_ExplicitPadding_GEMM_3D_dilated() { + static const auto convParams_ExplicitPadding_GEMM_3D_dilated = + ::testing::Combine(::testing::ValuesIn(kernels3d()), + ::testing::ValuesIn(strides3d()), + ::testing::ValuesIn(padBegins3d()), + ::testing::ValuesIn(padEnds3d()), + ::testing::Values(ov::Shape{2, 2, 2}), + ::testing::ValuesIn(numOutChannels_Gemm()), + ::testing::Values(ov::op::PadType::EXPLICIT)); + return convParams_ExplicitPadding_GEMM_3D_dilated; +} + +const convParams_ExplicitPaddingType& convParams_ExplicitPadding_3D() { + static const auto convParams_ExplicitPadding_3D = ::testing::Combine(::testing::ValuesIn(kernels3d()), + ::testing::ValuesIn(strides3d()), + ::testing::ValuesIn(padBegins3d()), + ::testing::ValuesIn(padEnds3d()), + ::testing::ValuesIn(dilations3d()), + ::testing::ValuesIn(numOutChannels()), + ::testing::Values(ov::op::PadType::EXPLICIT)); + return convParams_ExplicitPadding_3D; +} + +const convParams_ExplicitPaddingDilatedType& convParams_ExplicitPadding_3D_dilated() { + static const auto convParams_ExplicitPadding_3D_dilated = + ::testing::Combine(::testing::ValuesIn(kernels3d()), + ::testing::ValuesIn(strides3d()), + ::testing::ValuesIn(padBegins3d()), + ::testing::ValuesIn(padEnds3d()), + ::testing::Values(ov::Shape{2, 2, 2}), + ::testing::ValuesIn(numOutChannels()), + ::testing::Values(ov::op::PadType::EXPLICIT)); + return convParams_ExplicitPadding_3D_dilated; +} + +const convParams_ExplicitPadding_1x1_Type& convParams_ExplicitPadding_1x1_1D() { + static const auto convParams_ExplicitPadding_1x1_1D = + ::testing::Combine(::testing::Values(ov::Shape({1})), + ::testing::Values(ov::Shape({1})), + ::testing::Values(std::vector({0})), + ::testing::Values(std::vector({0})), + ::testing::Values(ov::Shape({1})), + ::testing::Values(63), + ::testing::Values(ov::op::PadType::EXPLICIT)); + return convParams_ExplicitPadding_1x1_1D; +} + +const convParams_ExplicitPadding_1x1_Type& convParams_ExplicitPadding_1x1_2D() { + static const auto convParams_ExplicitPadding_1x1_2D = + ::testing::Combine(::testing::Values(ov::Shape({1, 1})), + ::testing::Values(ov::Shape({1, 1})), + ::testing::Values(std::vector({0, 0})), + ::testing::Values(std::vector({0, 0})), + ::testing::Values(ov::Shape({1, 1})), + ::testing::Values(63), + ::testing::Values(ov::op::PadType::EXPLICIT)); + return convParams_ExplicitPadding_1x1_2D; +} + +} // namespace Convolution +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/convolution.hpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/convolution.hpp new file mode 100644 index 00000000000000..0228f18942744f --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/convolution.hpp @@ -0,0 +1,136 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "common_test_utils/node_builders/convolution.hpp" +#include "openvino/core/visibility.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "shared_test_classes/single_op/convolution.hpp" +#include "test_utils/convolution_params.hpp" +#include "test_utils/cpu_test_utils.hpp" +#include "test_utils/fusing_test_utils.hpp" + +using namespace CPUTestUtils; + +namespace ov { +namespace test { +namespace Convolution { + +typedef std::tuple< + convSpecificParams, + ElementType, // Net precision + ElementType, // Input precision + ElementType, // Output precision + InputShape, // Input shape + LayerTestsUtils::TargetDevice // Device name +> convLayerTestParamsSet; + +typedef std::tuple< + convLayerTestParamsSet, + CPUSpecificParams, + fusingSpecificParams, + ov::AnyMap > convLayerCPUTestParamsSet; + +class ConvolutionLayerCPUTest : public testing::WithParamInterface, + virtual public SubgraphBaseTest, public CpuTestWithFusing { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj); +protected: + bool isBias = false; + ov::Shape kernel, dilation; + ov::Shape stride; + std::vector padBegin, padEnd; + + void checkBiasFusing(ov::CompiledModel &execNet) const; + std::shared_ptr modifyGraph(const ov::element::Type &ngPrc, + ov::ParameterVector ¶ms, + const std::shared_ptr &lastNode) override; + void SetUp() override; +}; + + using SizeVector = std::vector; + const std::vector& kernels1d(); + const std::vector& strides1d(); + const std::vector>& padBegins1d(); + const std::vector>& padEnds1d(); + const std::vector& dilations1d(); + + const std::vector& kernels2d(); + const std::vector& strides2d(); + const std::vector>& padBegins2d(); + const std::vector>& padEnds2d(); + const std::vector& dilations2d(); + + const std::vector& kernels3d(); + const std::vector& strides3d(); + const std::vector>& padBegins3d(); + const std::vector>& padEnds3d(); + const std::vector& dilations3d(); + + const std::vector& CPUParams_1x1_1D(); + const std::vector& CPUParams_1x1_2D(); + const std::vector& CPUParams_2D(); + const std::vector& CPUParams_GEMM_1D(); + const std::vector& CPUParams_GEMM_2D(); + const std::vector& CPUParams_GEMM_3D(); + + const std::vector& inputShapes1d(); + const std::vector& inputShapes2d(); + const std::vector& inputShapes3d(); + const std::vector& inputShapes2d_cache(); + const std::vector& inputShapesPlain2Blocked2d(); + const std::vector& inputShapes2d_dynBatch(); + const std::vector& inShapesGemm1D(); + + const std::vector& inShapesGemm2D(); + const std::vector& inShapesGemm2D_cache(); + const std::vector& inShapesGemm3D(); + + const ov::Shape& numOutChannels(); + const ov::Shape& numOutChannels_Gemm(); + + const std::vector& fusingParamsSetWithEmpty(); + + using convParams_ExplicitPaddingType = decltype(::testing::Combine( + ::testing::ValuesIn(kernels2d()), + ::testing::ValuesIn(strides2d()), + ::testing::ValuesIn(padBegins2d()), + ::testing::ValuesIn(padEnds2d()), + ::testing::ValuesIn(dilations2d()), + ::testing::ValuesIn(numOutChannels_Gemm()), + ::testing::Values(ov::op::PadType::EXPLICIT))); + using convParams_ExplicitPaddingDilatedType = decltype(::testing::Combine( + ::testing::ValuesIn(kernels2d()), + ::testing::ValuesIn(strides2d()), + ::testing::ValuesIn(padBegins2d()), + ::testing::ValuesIn(padEnds2d()), + ::testing::Values(ov::Shape{2, 2}), + ::testing::ValuesIn(numOutChannels_Gemm()), + ::testing::Values(ov::op::PadType::EXPLICIT))); + using convParams_ExplicitPadding_1x1_Type = decltype(::testing::Combine( + ::testing::Values(ov::Shape({1})), + ::testing::Values(ov::Shape({1})), + ::testing::Values(std::vector({0})), + ::testing::Values(std::vector({0})), + ::testing::Values(ov::Shape({1})), + ::testing::Values(63), + ::testing::Values(ov::op::PadType::EXPLICIT))); + const convParams_ExplicitPaddingType& convParams_ExplicitPadding_GEMM_1D(); + const convParams_ExplicitPaddingType& convParams_ExplicitPadding_GEMM_2D(); + const convParams_ExplicitPaddingType& convParams_ExplicitPadding_GEMM_3D(); + const convParams_ExplicitPaddingType& convParams_ExplicitPadding_2D(); + const convParams_ExplicitPaddingType& convParams_ExplicitPadding_3D(); + + const convParams_ExplicitPaddingDilatedType& convParams_ExplicitPadding_2D_dilated(); + const convParams_ExplicitPaddingDilatedType& convParams_ExplicitPadding_3D_dilated(); + const convParams_ExplicitPaddingDilatedType& convParams_ExplicitPadding_GEMM_2D_dilated(); + const convParams_ExplicitPaddingDilatedType& convParams_ExplicitPadding_GEMM_3D_dilated(); + + const convParams_ExplicitPadding_1x1_Type& convParams_ExplicitPadding_1x1_1D(); + const convParams_ExplicitPadding_1x1_Type& convParams_ExplicitPadding_1x1_2D(); + } // namespace Convolution + } // namespace test + } // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/eltwise.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/eltwise.cpp index 667a1b08f6af18..f57d10b171a9fe 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/eltwise.cpp @@ -3,18 +3,18 @@ // #include "eltwise.hpp" + +#include "common_test_utils/node_builders/eltwise.hpp" #include "gtest/gtest.h" +#include "internal_properties.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/runtime/properties.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { std::string EltwiseLayerCPUTest::getTestCaseName(testing::TestParamInfo obj) { subgraph::EltwiseTestParams basicParamsSet; @@ -33,7 +33,7 @@ std::string EltwiseLayerCPUTest::getTestCaseName(testing::TestParamInfo& targetInputStaticShapes) { +void EltwiseLayerCPUTest::generate_inputs(const std::vector& targetInputStaticShapes) { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { @@ -105,7 +109,7 @@ void EltwiseLayerCPUTest::SetUp() { std::tie(basicParamsSet, cpuParams, fusingParams, enforceSnippets) = this->GetParam(); std::vector shapes; ElementType netType; - ngraph::helpers::InputLayerType secondaryInputType; + utils::InputLayerType secondaryInputType; ov::test::utils::OpType opType; ov::AnyMap additionalConfig; std::tie(shapes, eltwiseType, secondaryInputType, opType, netType, inType, outType, targetDevice, additionalConfig) = basicParamsSet; @@ -127,7 +131,7 @@ void EltwiseLayerCPUTest::SetUp() { shapes.resize(2); switch (opType) { case ov::test::utils::OpType::SCALAR: { - std::vector identityShapes(shapes[0].second.size(), {1}); + std::vector identityShapes(shapes[0].second.size(), {1}); shapes[1] = {{}, identityShapes}; break; } @@ -145,31 +149,29 @@ void EltwiseLayerCPUTest::SetUp() { updateSelectedType(getPrimitiveType(), netType, configuration); // selectedType = makeSelectedTypeStr(getPrimitiveType(), netType); #if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64) - if (eltwiseType == POWER) { + if (eltwiseType == utils::POWER) { selectedType = std::regex_replace(selectedType, std::regex("acl"), "ref"); } #endif - if (enforceSnippets) { - configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE, - InferenceEngine::PluginConfigInternalParams::IGNORE_CALLBACK}); - } else { - configuration.insert({InferenceEngine::PluginConfigInternalParams::KEY_SNIPPETS_MODE, - InferenceEngine::PluginConfigInternalParams::DISABLE}); - } + if (enforceSnippets) { + configuration.insert(ov::intel_cpu::snippets_mode(ov::intel_cpu::SnippetsMode::IGNORE_CALLBACK)); + } else { + configuration.insert(ov::intel_cpu::snippets_mode(ov::intel_cpu::SnippetsMode::DISABLE)); + } ov::ParameterVector parameters{std::make_shared(netType, inputDynamicShapes.front())}; - std::shared_ptr secondaryInput; - if (eltwiseType != ngraph::helpers::EltwiseTypes::BITWISE_NOT) { + std::shared_ptr secondaryInput; + if (eltwiseType != utils::EltwiseTypes::BITWISE_NOT) { switch (secondaryInputType) { - case ngraph::helpers::InputLayerType::PARAMETER: { + case utils::InputLayerType::PARAMETER: { auto param = std::make_shared(netType, inputDynamicShapes.back()); secondaryInput = param; parameters.push_back(param); break; } - case ngraph::helpers::InputLayerType::CONSTANT: { + case utils::InputLayerType::CONSTANT: { auto pShape = inputDynamicShapes.back(); - ngraph::Shape shape; + ov::Shape shape; if (pShape.is_static()) { shape = pShape.get_shape(); } else { @@ -185,23 +187,23 @@ void EltwiseLayerCPUTest::SetUp() { auto data_tensor = generate_eltwise_input(netType, shape); if ((netType == ElementType::i8) || (netType == ElementType::u8)) { auto data_ptr = reinterpret_cast(data_tensor.data()); - std::vector data(data_ptr, data_ptr + ngraph::shape_size(shape)); + std::vector data(data_ptr, data_ptr + ov::shape_size(shape)); secondaryInput = ngraph::builder::makeConstant(netType, shape, data); } else if ((netType == ElementType::i16) || (netType == ElementType::u16)) { auto data_ptr = reinterpret_cast(data_tensor.data()); - std::vector data(data_ptr, data_ptr + ngraph::shape_size(shape)); + std::vector data(data_ptr, data_ptr + ov::shape_size(shape)); secondaryInput = ngraph::builder::makeConstant(netType, shape, data); } else if ((netType == ElementType::i32) || (netType == ElementType::u32)) { auto data_ptr = reinterpret_cast(data_tensor.data()); - std::vector data(data_ptr, data_ptr + ngraph::shape_size(shape)); + std::vector data(data_ptr, data_ptr + ov::shape_size(shape)); secondaryInput = ngraph::builder::makeConstant(netType, shape, data); } else if (netType == ElementType::f16) { auto data_ptr = reinterpret_cast(data_tensor.data()); - std::vector data(data_ptr, data_ptr + ngraph::shape_size(shape)); + std::vector data(data_ptr, data_ptr + ov::shape_size(shape)); secondaryInput = ngraph::builder::makeConstant(netType, shape, data); } else { auto data_ptr = reinterpret_cast(data_tensor.data()); - std::vector data(data_ptr, data_ptr + ngraph::shape_size(shape)); + std::vector data(data_ptr, data_ptr + ov::shape_size(shape)); secondaryInput = ngraph::builder::makeConstant(netType, shape, data); } break; @@ -211,7 +213,7 @@ void EltwiseLayerCPUTest::SetUp() { } } } - auto eltwise = ngraph::builder::makeEltwise(parameters[0], secondaryInput, eltwiseType); + auto eltwise = utils::makeEltwise(parameters[0], secondaryInput, eltwiseType); function = makeNgraphFunction(netType, parameters, eltwise, "Eltwise"); } @@ -242,36 +244,36 @@ const std::vector& opTypes() { return opTypes; } -const std::vector& eltwiseOpTypesBinInp() { - static const std::vector eltwiseOpTypesBinInp = { - ngraph::helpers::EltwiseTypes::ADD, - ngraph::helpers::EltwiseTypes::MULTIPLY, +const std::vector& eltwiseOpTypesBinInp() { + static const std::vector eltwiseOpTypesBinInp = { + utils::EltwiseTypes::ADD, + utils::EltwiseTypes::MULTIPLY, #if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64) - ngraph::helpers::EltwiseTypes::SUBTRACT, // TODO: Fix CVS-105430 - ngraph::helpers::EltwiseTypes::DIVIDE, // TODO: Fix CVS-105430 - ngraph::helpers::EltwiseTypes::FLOOR_MOD, // TODO: Fix CVS-111875 + utils::EltwiseTypes::SUBTRACT, // TODO: Fix CVS-105430 + utils::EltwiseTypes::DIVIDE, // TODO: Fix CVS-105430 + utils::EltwiseTypes::FLOOR_MOD, // TODO: Fix CVS-111875 #endif - ngraph::helpers::EltwiseTypes::SQUARED_DIFF, + utils::EltwiseTypes::SQUARED_DIFF, }; return eltwiseOpTypesBinInp; } -const std::vector& eltwiseOpTypesDiffInp() { - static const std::vector eltwiseOpTypesDiffInp = { // Different number of input nodes depending on optimizations - ngraph::helpers::EltwiseTypes::POWER, - // ngraph::helpers::EltwiseTypes::MOD // Does not execute because of transformations +const std::vector& eltwiseOpTypesDiffInp() { + static const std::vector eltwiseOpTypesDiffInp = { // Different number of input nodes depending on optimizations + utils::EltwiseTypes::POWER, + // utils::EltwiseTypes::MOD // Does not execute because of transformations }; return eltwiseOpTypesDiffInp; } -const std::vector& eltwiseOpTypesBinDyn() { - static const std::vector eltwiseOpTypesBinDyn = { - ngraph::helpers::EltwiseTypes::ADD, - ngraph::helpers::EltwiseTypes::MULTIPLY, +const std::vector& eltwiseOpTypesBinDyn() { + static const std::vector eltwiseOpTypesBinDyn = { + utils::EltwiseTypes::ADD, + utils::EltwiseTypes::MULTIPLY, #if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64) // TODO: Fix CVS-105430 - ngraph::helpers::EltwiseTypes::SUBTRACT, + utils::EltwiseTypes::SUBTRACT, #endif - ngraph::helpers::EltwiseTypes::SQUARED_DIFF, + utils::EltwiseTypes::SQUARED_DIFF, }; return eltwiseOpTypesBinDyn; } @@ -312,29 +314,29 @@ const std::vector>& inShapes_5D() { return inShapes_5D; } -const std::vector& eltwiseOpTypesI32() { - static const std::vector eltwiseOpTypesI32 = { - ngraph::helpers::EltwiseTypes::ADD, - ngraph::helpers::EltwiseTypes::MULTIPLY, +const std::vector& eltwiseOpTypesI32() { + static const std::vector eltwiseOpTypesI32 = { + utils::EltwiseTypes::ADD, + utils::EltwiseTypes::MULTIPLY, #if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64) // TODO: Fix CVS-105430 - ngraph::helpers::EltwiseTypes::SUBTRACT, - ngraph::helpers::EltwiseTypes::DIVIDE, + utils::EltwiseTypes::SUBTRACT, + utils::EltwiseTypes::DIVIDE, #endif - ngraph::helpers::EltwiseTypes::SQUARED_DIFF, + utils::EltwiseTypes::SQUARED_DIFF, }; return eltwiseOpTypesI32; } -const std::vector& secondaryInputTypes() { - static const std::vector secondaryInputTypes = { - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER, +const std::vector& secondaryInputTypes() { + static const std::vector secondaryInputTypes = { + utils::InputLayerType::CONSTANT, + utils::InputLayerType::PARAMETER, }; return secondaryInputTypes; } -const std::vector>& inShapes_4D_1D() { - static const std::vector> inShapes_4D_1D = { +const std::vector>& inShapes_4D_1D() { + static const std::vector> inShapes_4D_1D = { {{2, 17, 5, 4}, {4}}, {{1, 3, 3, 3}, {3}}, }; @@ -356,8 +358,8 @@ const std::vector& cpuParams_4D_1D_Parameter_mode() { return cpuParams_4D_1D_Parameter_mode; } -const std::vector>& inShapes_5D_1D() { - static const std::vector> inShapes_5D_1D = { +const std::vector>& inShapes_5D_1D() { + static const std::vector> inShapes_5D_1D = { {{2, 17, 5, 4, 10}, {10}}, {{1, 3, 3, 3, 3}, {3}}, }; @@ -484,5 +486,6 @@ const std::vector& enforceSnippets() { return enforceSnippets; } -} // namespace Eltwise -} // namespace CPULayerTestsDefinitions +} // namespace Eltwise +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/eltwise.hpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/eltwise.hpp index 3b17539e6117c5..3271a37a78f6ac 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/eltwise.hpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/eltwise.hpp @@ -7,15 +7,15 @@ #include "shared_test_classes/single_layer/eltwise.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include +#include "common_test_utils/ov_tensor_utils.hpp" #include "test_utils/fusing_test_utils.hpp" #include "test_utils/cpu_test_utils.hpp" #include "gtest/gtest.h" using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { typedef std::tuple< subgraph::EltwiseTestParams, @@ -29,12 +29,12 @@ class EltwiseLayerCPUTest : public testing::WithParamInterface obj); protected: - ov::Tensor generate_eltwise_input(const ov::element::Type& type, const ngraph::Shape& shape); - void generate_inputs(const std::vector& targetInputStaticShapes) override; + ov::Tensor generate_eltwise_input(const ov::element::Type& type, const ov::Shape& shape); + void generate_inputs(const std::vector& targetInputStaticShapes) override; void SetUp() override; private: - ngraph::helpers::EltwiseTypes eltwiseType; + utils::EltwiseTypes eltwiseType; }; namespace Eltwise { @@ -43,32 +43,33 @@ const std::vector& additional_config(); const std::vector& netType(); const std::vector& opTypes(); -const std::vector& eltwiseOpTypesBinInp(); -const std::vector& secondaryInputTypes(); +const std::vector& eltwiseOpTypesBinInp(); +const std::vector& secondaryInputTypes(); -const std::vector& eltwiseOpTypesBinInp(); -const std::vector& eltwiseOpTypesDiffInp(); -const std::vector& eltwiseOpTypesBinDyn(); +const std::vector& eltwiseOpTypesBinInp(); +const std::vector& eltwiseOpTypesDiffInp(); +const std::vector& eltwiseOpTypesBinDyn(); const std::vector& cpuParams_4D(); const std::vector>& inShapes_4D(); const std::vector>& inShapes_4D_dyn_const(); const std::vector& inShapes_4D_dyn_param(); -const std::vector>& inShapes_4D_1D(); +const std::vector>& inShapes_4D_1D(); const std::vector & cpuParams_4D_1D_Constant_mode(); const std::vector& cpuParams_4D_1D_Parameter_mode(); const std::vector& cpuParams_5D(); const std::vector>& inShapes_5D(); -const std::vector>& inShapes_5D_1D(); +const std::vector>& inShapes_5D_1D(); const std::vector& inShapes_5D_dyn_const(); const std::vector& inShapes_5D_dyn_param(); const std::vector& cpuParams_5D_1D_constant(); const std::vector& cpuParams_5D_1D_parameter(); -const std::vector& eltwiseOpTypesI32(); +const std::vector& eltwiseOpTypesI32(); const std::vector& enforceSnippets(); -} // namespace Eltwise -} // namespace CPULayerTestsDefinitions +} // namespace Eltwise +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/matmul.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/matmul.cpp index e09ac9a9606e26..614e85a3afae4e 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/matmul.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/matmul.cpp @@ -7,14 +7,12 @@ #include "openvino/core/type/element_type.hpp" #include "openvino/runtime/properties.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { std::string MatMulLayerCPUTest::getTestCaseName(const testing::TestParamInfo& obj) { MatMulLayerTestParamsSet basicParamsSet; @@ -27,9 +25,9 @@ std::string MatMulLayerCPUTest::getTestCaseName(const testing::TestParamInfo additionalConfig; + ov::AnyMap additionalConfig; std::tie(shapeRelatedParams, netType, inType, outType, secondaryInputType, targetDevice, additionalConfig) = basicParamsSet; @@ -59,7 +57,7 @@ std::string MatMulLayerCPUTest::getTestCaseName(const testing::TestParamInfo() << ":"; } result << ")"; result << CpuTestWithFusing::getTestCaseName(fusingParams); @@ -85,8 +83,8 @@ void MatMulLayerCPUTest::SetUp() { ShapeRelatedParams shapeRelatedParams; ElementType netType; - helpers::InputLayerType secondaryInputType; - std::map additionalConfig; + utils::InputLayerType secondaryInputType; + ov::AnyMap additionalConfig; std::tie(shapeRelatedParams, netType, inType, outType, secondaryInputType, targetDevice, additionalConfig) = basicParamsSet; @@ -119,7 +117,8 @@ void MatMulLayerCPUTest::SetUp() { configuration.insert(additionalConfig.begin(), additionalConfig.end()); - if (additionalConfig[PluginConfigParams::KEY_ENFORCE_BF16] == PluginConfigParams::YES) + auto it = additionalConfig.find(ov::hint::inference_precision.name()); + if (it != additionalConfig.end() && it->second.as() == ov::element::bf16) inType = outType = netType = ElementType::bf16; else inType = outType = netType; @@ -130,7 +129,7 @@ void MatMulLayerCPUTest::SetUp() { ov::ParameterVector params{std::make_shared(netType, inShapeA)}; std::shared_ptr matrixB; - if (secondaryInputType == helpers::InputLayerType::PARAMETER) { + if (secondaryInputType == utils::InputLayerType::PARAMETER) { auto param = std::make_shared(netType, inShapeB); matrixB = param; params.push_back(param); @@ -139,7 +138,13 @@ void MatMulLayerCPUTest::SetUp() { auto tensor = ov::test::utils::create_and_fill_tensor(netType, inShapeB.to_shape()); matrixB = std::make_shared(tensor); } - auto paramOuts = helpers::convert2OutputVector(helpers::castOps2Nodes(params)); + + ov::OutputVector paramOuts; + for (auto&& node : params) { + for (auto&& param : node->outputs()) + paramOuts.push_back(param); + } + auto matMul = std::make_shared(paramOuts[0], matrixB, transpA, transpB); function = makeNgraphFunction(netType, params, matMul, cpuNodeType); checkFusingPosition = false; @@ -161,8 +166,8 @@ TEST_P(MatMulLayerCPUTest, CompareWithRefs) { } namespace MatMul { -const std::map& emptyAdditionalConfig() { - static const std::map emptyAdditionalConfig; +const ov::AnyMap& emptyAdditionalConfig() { + static const ov::AnyMap emptyAdditionalConfig; return emptyAdditionalConfig; } @@ -181,14 +186,13 @@ const std::vector& netPRCs() { return netPRCs; } -const std::vector>& additionalConfig() { - static std::vector> additionalConfig { - #ifndef OV_CPU_WITH_MLAS +const std::vector& additionalConfig() { + static std::vector additionalConfig{ +#ifndef OV_CPU_WITH_MLAS // FP32 precision is covered by MLAS - std::map{/* empty config */}, - #endif - {{PluginConfigParams::KEY_ENFORCE_BF16, PluginConfigParams::YES}} - }; + ov::AnyMap{/* empty config */}, +#endif + {ov::hint::inference_precision(ov::element::bf16)}}; return additionalConfig; } @@ -313,5 +317,6 @@ const std::vector& IS3D_smoke() { return IS3D_smoke; } -} // namespace MatMul -} // namespace CPULayerTestsDefinitions +} // namespace MatMul +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/matmul.hpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/matmul.hpp index b965fab5ee5b46..c7a0c09fee3d1d 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/matmul.hpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/matmul.hpp @@ -6,16 +6,12 @@ #include "shared_test_classes/single_layer/mat_mul.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" #include "test_utils/fusing_test_utils.hpp" -#include "ov_models/builders.hpp" -using namespace ngraph; -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { enum class MatMulNodeType { MatMul, @@ -27,15 +23,15 @@ struct ShapeRelatedParams { std::pair transpose; }; -typedef std::tuple< - ShapeRelatedParams, - ElementType, // Network precision - ElementType, // Input precision - ElementType, // Output precision - ngraph::helpers::InputLayerType, // Secondary input type - TargetDevice, // Device name - std::map // Additional network configuration -> MatMulLayerTestParamsSet; +typedef std::tuple + MatMulLayerTestParamsSet; using MatMulLayerCPUTestParamSet = std::tuple& netPRCs(); const std::vector& matmulFusingParams(); - const std::vector>& additionalConfig(); - const std::map& emptyAdditionalConfig(); + const std::vector& additionalConfig(); + const ov::AnyMap& emptyAdditionalConfig(); const std::vector& filterSpecificParams(); const std::vector& IS2D_nightly(); const std::vector& IS2D_smoke(); const std::vector& IS3D_smoke(); -} // namespace MatMul -} // namespace CPULayerTestsDefinitions +} // namespace MatMul +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/mvn.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/mvn.cpp index 53aa6ca9604fc3..e12a5478b85cef 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/mvn.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/mvn.cpp @@ -6,24 +6,22 @@ #include "gtest/gtest.h" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { std::string MvnLayerCPUTest::getTestCaseName(testing::TestParamInfo obj) { basicCpuMvnParams basicParamsSet; CPUSpecificParams cpuParams; fusingSpecificParams fusingParams; ElementType inputPrecision, outputPrecision; - std::map additionalConfig; + ov::AnyMap additionalConfig; std::tie(basicParamsSet, cpuParams, fusingParams, inputPrecision, outputPrecision, additionalConfig) = obj.param; InputShape inputShapes; ElementType netPrecision; - ngraph::AxisSet axes; + ov::AxisSet axes; bool acrossChanels, normalizeVariance; double eps; std::tie(inputShapes, netPrecision, axes, acrossChanels, normalizeVariance, eps) = basicParamsSet; @@ -50,7 +48,7 @@ std::string MvnLayerCPUTest::getTestCaseName(testing::TestParamInfo +#include "common_test_utils/ov_tensor_utils.hpp" #include "test_utils/fusing_test_utils.hpp" #include "test_utils/cpu_test_utils.hpp" #include "gtest/gtest.h" - -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { using basicCpuMvnParams = std::tuple< InputShape, // Input shapes ElementType, // Input precision - ngraph::AxisSet, // Reduction axes + ov::AxisSet, // Reduction axes bool, // Across channels bool, // Normalize variance double>; // Epsilon @@ -31,9 +28,9 @@ using MvnLayerCPUTestParamSet = std::tuple< basicCpuMvnParams, CPUSpecificParams, fusingSpecificParams, - ElementType, // CNNNetwork input precision - ElementType, // CNNNetwork output precision - std::map>; + ElementType, // model input precision + ElementType, // model output precision + ov::AnyMap>; class MvnLayerCPUTest : public testing::WithParamInterface, virtual public SubgraphBaseTest, public CpuTestWithFusing { @@ -58,10 +55,11 @@ namespace MVN { const std::vector& inputShapesStatic_4D(); const std::vector& inputShapesStatic_5D(); - const std::vector& emptyReductionAxes(); + const std::vector& emptyReductionAxes(); const std::vector& acrossChannels(); const std::vector& epsilon(); - const std::vector>& additionalConfig(); -} // namespace MVN -} // namespace CPULayerTestsDefinitions \ No newline at end of file + const std::vector& additionalConfig(); +} // namespace MVN +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/pooling.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/pooling.cpp index 2d0f42fe85c318..c7c2ae3e938d83 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/pooling.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/pooling.cpp @@ -6,15 +6,13 @@ #include "pooling.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { std::string PoolingLayerCPUTest::getTestCaseName(const testing::TestParamInfo& obj) { - LayerTestsDefinitions::poolSpecificParams basicParamsSet; + ov::test::poolSpecificParams basicParamsSet; InputShape inputShapes; ElementType inPrc; bool isInt8; @@ -22,11 +20,11 @@ std::string PoolingLayerCPUTest::getTestCaseName(const testing::TestParamInfo kernel, stride; std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; + ov::op::PadType padType; + ov::op::RoundingType roundingType; bool excludePad; std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = basicParamsSet; @@ -39,10 +37,10 @@ std::string PoolingLayerCPUTest::getTestCaseName(const testing::TestParamInfoGetParam(); - ngraph::helpers::PoolingTypes poolType; + utils::PoolingTypes poolType; std::vector kernel, stride; std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; + ov::op::PadType padType; + ov::op::RoundingType roundingType; bool excludePad; std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = basicParamsSet; @@ -97,7 +95,7 @@ void PoolingLayerCPUTest::SetUp() { params.push_back(std::make_shared(inPrc, shape)); } - std::shared_ptr poolInput = params[0]; + std::shared_ptr poolInput = params[0]; if (isInt8) { ov::Shape newShape(poolInput->get_output_partial_shape(0).size(), 1); poolInput = ngraph::builder::makeFakeQuantize(poolInput, inPrc, 256, newShape); @@ -115,7 +113,7 @@ void PoolingLayerCPUTest::SetUp() { std::string MaxPoolingV8LayerCPUTest::getTestCaseName( const testing::TestParamInfo& obj) { - LayerTestsDefinitions::maxPoolV8SpecificParams basicParamsSet; + maxPoolV8SpecificParams basicParamsSet; InputShape inputShapes; ElementType inPrc; CPUSpecificParams cpuParams; @@ -123,9 +121,9 @@ std::string MaxPoolingV8LayerCPUTest::getTestCaseName( std::vector kernel, stride, dilation; std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; - ngraph::element::Type indexElementType; + ov::op::PadType padType; + ov::op::RoundingType roundingType; + ov::element::Type indexElementType; int64_t axis; std::tie(kernel, stride, dilation, padBegin, padEnd, indexElementType, axis, roundingType, padType) = basicParamsSet; @@ -154,7 +152,7 @@ std::string MaxPoolingV8LayerCPUTest::getTestCaseName( void MaxPoolingV8LayerCPUTest::SetUp() { targetDevice = ov::test::utils::DEVICE_CPU; - LayerTestsDefinitions::maxPoolV8SpecificParams basicParamsSet; + maxPoolV8SpecificParams basicParamsSet; InputShape inputShapes; ElementType inPrc; CPUSpecificParams cpuParams; @@ -162,9 +160,9 @@ void MaxPoolingV8LayerCPUTest::SetUp() { std::vector kernel, stride, dilation; std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; - ngraph::element::Type indexElementType; + ov::op::PadType padType; + ov::op::RoundingType roundingType; + ov::element::Type indexElementType; int64_t axis; std::tie(kernel, stride, dilation, padBegin, padEnd, indexElementType, axis, roundingType, padType) = basicParamsSet; @@ -191,8 +189,8 @@ void MaxPoolingV8LayerCPUTest::SetUp() { indexElementType, axis); pooling->get_rt_info() = getCPUInfo(); - ngraph::ResultVector results{std::make_shared(pooling->output(0))}; - function = std::make_shared(results, params, "MaxPooling"); + ov::ResultVector results{std::make_shared(pooling->output(0))}; + function = std::make_shared(results, params, "MaxPooling"); } TEST_P(PoolingLayerCPUTest, CompareWithRefs) { @@ -208,34 +206,34 @@ TEST_P(MaxPoolingV8LayerCPUTest, CompareWithRefs) { namespace Pooling { // The combination of parameters: NCHW + CEIL gives an accuracy problem in ACL AvgPool -const ngraph::op::RoundingType expectedAvgRoundingType() { +const ov::op::RoundingType expectedAvgRoundingType() { #if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64) - return ngraph::op::RoundingType::FLOOR; + return ov::op::RoundingType::FLOOR; #else - return ngraph::op::RoundingType::CEIL; + return ov::op::RoundingType::CEIL; #endif } -const std::vector& paramsMax3D() { - static const std::vector paramsMax3D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2}, {2}, {0}, {0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4}, {2}, {0}, {0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2}, {1}, {0}, {0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, +const std::vector& paramsMax3D() { + static const std::vector paramsMax3D = { + poolSpecificParams{ utils::PoolingTypes::MAX, {2}, {2}, {0}, {0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false }, + poolSpecificParams{ utils::PoolingTypes::MAX, {4}, {2}, {0}, {0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false }, + poolSpecificParams{ utils::PoolingTypes::MAX, {2}, {1}, {0}, {0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false }, }; return paramsMax3D; } -const std::vector& paramsAvg3D() { - static const std::vector paramsAvg3D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3}, {1}, {1}, {0}, - expectedAvgRoundingType(), ngraph::op::PadType::SAME_UPPER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3}, {1}, {1}, {0}, - expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4}, {4}, {2}, {2}, - expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, true }, +const std::vector& paramsAvg3D() { + static const std::vector paramsAvg3D = { + poolSpecificParams{ utils::PoolingTypes::AVG, {3}, {1}, {1}, {0}, + expectedAvgRoundingType(), ov::op::PadType::SAME_UPPER, false }, + poolSpecificParams{ utils::PoolingTypes::AVG, {3}, {1}, {1}, {0}, + expectedAvgRoundingType(), ov::op::PadType::EXPLICIT, true }, + poolSpecificParams{ utils::PoolingTypes::AVG, {4}, {4}, {2}, {2}, + expectedAvgRoundingType(), ov::op::PadType::EXPLICIT, true }, }; return paramsAvg3D; } @@ -245,25 +243,25 @@ const std::vector& inpOutPrecision() { return inpOutPrecision; } -const std::vector& paramsMax4D() { - static const std::vector paramsMax4D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4, 2}, {2, 2}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4, 2}, {2, 1}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, +const std::vector& paramsMax4D() { + static const std::vector paramsMax4D = { + poolSpecificParams{ utils::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER, false }, + poolSpecificParams{ utils::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, false }, + poolSpecificParams{ utils::PoolingTypes::MAX, {4, 2}, {2, 2}, {0, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false }, + poolSpecificParams{ utils::PoolingTypes::MAX, {4, 2}, {2, 1}, {0, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false }, }; return paramsMax4D; } -const std::vector& paramsMaxV84D() { - static const std::vector paramsMaxV84D = { - LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2}, {2, 2}, {1, 1}, {0, 0}, {0, 0}, - ngraph::element::Type_t::i32, 0, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER }, +const std::vector& paramsMaxV84D() { + static const std::vector paramsMaxV84D = { + maxPoolV8SpecificParams{ {2, 2}, {2, 2}, {1, 1}, {0, 0}, {0, 0}, + ov::element::Type_t::i32, 0, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER }, }; return paramsMaxV84D; } @@ -371,71 +369,71 @@ const std::vector& inputShapes5D() { return inputShapes5D; } -const std::vector& paramsMaxV85D() { - static const std::vector paramsMaxV85D = { - LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, - ngraph::element::Type_t::i32, 0, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER }, +const std::vector& paramsMaxV85D() { + static const std::vector paramsMaxV85D = { + maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, + ov::element::Type_t::i32, 0, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER }, }; return paramsMaxV85D; } -const std::vector& paramsAvg4D() { - static const std::vector paramsAvg4D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, - expectedAvgRoundingType(), ngraph::op::PadType::SAME_LOWER, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, - expectedAvgRoundingType(), ngraph::op::PadType::SAME_UPPER, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, - expectedAvgRoundingType(), ngraph::op::PadType::SAME_LOWER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, - expectedAvgRoundingType(), ngraph::op::PadType::SAME_UPPER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {0, 0}, {0, 0}, - expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4}, {4, 4}, {2, 2}, {2, 2}, - expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, true }, +const std::vector& paramsAvg4D() { + static const std::vector paramsAvg4D = { + poolSpecificParams{ utils::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, + expectedAvgRoundingType(), ov::op::PadType::SAME_LOWER, true }, + poolSpecificParams{ utils::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, + expectedAvgRoundingType(), ov::op::PadType::SAME_UPPER, true }, + poolSpecificParams{ utils::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, + expectedAvgRoundingType(), ov::op::PadType::SAME_LOWER, false }, + poolSpecificParams{ utils::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, + expectedAvgRoundingType(), ov::op::PadType::SAME_UPPER, false }, + poolSpecificParams{ utils::PoolingTypes::AVG, {2, 2}, {2, 2}, {0, 0}, {0, 0}, + expectedAvgRoundingType(), ov::op::PadType::EXPLICIT, true }, + poolSpecificParams{ utils::PoolingTypes::AVG, {4, 4}, {4, 4}, {2, 2}, {2, 2}, + expectedAvgRoundingType(), ov::op::PadType::EXPLICIT, true }, }; return paramsAvg4D; } -const std::vector& paramsAvg5D() { - static const std::vector paramsAvg5D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, - expectedAvgRoundingType(), ngraph::op::PadType::SAME_LOWER, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, - expectedAvgRoundingType(), ngraph::op::PadType::SAME_UPPER, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, - expectedAvgRoundingType(), ngraph::op::PadType::SAME_LOWER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, - expectedAvgRoundingType(), ngraph::op::PadType::SAME_UPPER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, - expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3, 3, 3}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0}, - expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4, 4}, {2, 2, 2}, {2, 2, 2}, {2, 2, 2}, - expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, true }, +const std::vector& paramsAvg5D() { + static const std::vector paramsAvg5D = { + poolSpecificParams{ utils::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, + expectedAvgRoundingType(), ov::op::PadType::SAME_LOWER, true }, + poolSpecificParams{ utils::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, + expectedAvgRoundingType(), ov::op::PadType::SAME_UPPER, true }, + poolSpecificParams{ utils::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, + expectedAvgRoundingType(), ov::op::PadType::SAME_LOWER, false }, + poolSpecificParams{ utils::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, + expectedAvgRoundingType(), ov::op::PadType::SAME_UPPER, false }, + poolSpecificParams{ utils::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, + expectedAvgRoundingType(), ov::op::PadType::EXPLICIT, true }, + poolSpecificParams{ utils::PoolingTypes::AVG, {3, 3, 3}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0}, + expectedAvgRoundingType(), ov::op::PadType::EXPLICIT, true }, + poolSpecificParams{ utils::PoolingTypes::AVG, {4, 4, 4}, {2, 2, 2}, {2, 2, 2}, {2, 2, 2}, + expectedAvgRoundingType(), ov::op::PadType::EXPLICIT, true }, }; return paramsAvg5D; } -const std::vector& paramsMax5D() { - static const std::vector paramsMax5D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, +const std::vector& paramsMax5D() { + static const std::vector paramsMax5D = { + poolSpecificParams{ utils::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER, false }, + poolSpecificParams{ utils::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, false }, + poolSpecificParams{ utils::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false }, + poolSpecificParams{ utils::PoolingTypes::MAX, {3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false }, }; return paramsMax5D; } -const std::vector& paramsAvg4D_Large() { - static const std::vector paramsAvg4D_Large = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {65, 65}, {65, 65}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::FLOOR, ngraph::op::PadType::VALID, true }, +const std::vector& paramsAvg4D_Large() { + static const std::vector paramsAvg4D_Large = { + poolSpecificParams{ utils::PoolingTypes::AVG, {65, 65}, {65, 65}, {0, 0}, {0, 0}, + ov::op::RoundingType::FLOOR, ov::op::PadType::VALID, true }, }; return paramsAvg4D_Large; } @@ -458,4 +456,5 @@ const std::vector& inputShapes4D_Large() { } // namespace Pooling -} // namespace CPULayerTestsDefinitions \ No newline at end of file +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/pooling.hpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/pooling.hpp index ecf12a0360de1f..505019bb763b60 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/pooling.hpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/pooling.hpp @@ -9,20 +9,21 @@ #include "test_utils/fusing_test_utils.hpp" #include "shared_test_classes/single_layer/pooling.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" +#include "shared_test_classes/single_op/pooling.hpp" -using namespace ov::test; using namespace CPUTestUtils; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -using poolLayerCpuTestParamsSet = std::tuple; -using maxPoolV8LayerCpuTestParamsSet = std::tuple; @@ -47,23 +48,24 @@ class MaxPoolingV8LayerCPUTest : public testing::WithParamInterface& inpOutPrecision(); -const ngraph::op::RoundingType expectedAvgRoundingType(); +const ov::op::RoundingType expectedAvgRoundingType(); -const std::vector& paramsMax3D(); -const std::vector& paramsAvg3D(); -const std::vector& paramsMax4D(); +const std::vector& paramsMax3D(); +const std::vector& paramsAvg3D(); +const std::vector& paramsMax4D(); -const std::vector& paramsMaxV84D(); -const std::vector& paramsMaxV85D(); +const std::vector& paramsMaxV84D(); +const std::vector& paramsMaxV85D(); const std::vector& inputShapes3D(); const std::vector& inputShapes4D(); const std::vector& inputShapes4D_Large(); const std::vector& inputShapes5D(); -const std::vector& paramsAvg4D(); -const std::vector& paramsAvg4D_Large(); -const std::vector& paramsAvg5D(); -const std::vector& paramsMax5D(); -} // namespace Pooling -} // namespace CPULayerTestsDefinitions \ No newline at end of file +const std::vector& paramsAvg4D(); +const std::vector& paramsAvg4D_Large(); +const std::vector& paramsAvg5D(); +const std::vector& paramsMax5D(); +} // namespace Pooling +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/random_uniform.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/random_uniform.cpp index 282ebef47ba9bb..bc34f3234cffb3 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/random_uniform.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/random_uniform.cpp @@ -6,9 +6,9 @@ #include "ov_models/builders.hpp" using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { std::string RandomUniformLayerTestCPU::getTestCaseName(const testing::TestParamInfo& obj) { const auto& out_shape = std::get<0>(obj.param); @@ -71,13 +71,13 @@ void RandomUniformLayerTestCPU::SetUp() { } else if (output_prc == ElementType::f64) { updateSelectedType(getPrimitiveType(), ElementType::f32, configuration); } else if (output_prc == ElementType::f16) { - if (InferenceEngine::with_cpu_x86_avx512_core_fp16()) { + if (ov::with_cpu_x86_avx512_core_fp16()) { updateSelectedType(getPrimitiveType(), ElementType::f16, configuration); } else { updateSelectedType(getPrimitiveType(), ElementType::f32, configuration); } } else if (output_prc == ElementType::bf16) { - if (InferenceEngine::with_cpu_x86_bfloat16()) { + if (ov::with_cpu_x86_bfloat16()) { updateSelectedType(getPrimitiveType(), ElementType::bf16, configuration); } else { updateSelectedType("ref_any", ElementType::bf16, configuration); @@ -124,10 +124,10 @@ void RandomUniformLayerTestCPU::SetUp() { function = std::make_shared(results, in_params, "RandomUniformLayerTestCPU"); // todo: issue: 123320 - if (!InferenceEngine::with_cpu_x86_avx512_core()) { + if (!ov::with_cpu_x86_avx512_core()) { convert_precisions.insert({ ov::element::bf16, ov::element::f32 }); } - if (!InferenceEngine::with_cpu_x86_avx512_core_fp16()) { + if (!ov::with_cpu_x86_avx512_core_fp16()) { convert_precisions.insert({ ov::element::f16, ov::element::f32 }); } } @@ -257,4 +257,5 @@ TEST_P(RandomUniformLayerTestCPU, CompareWithRefs) { CheckPluginRelatedResults(compiledModel, "RandomUniform"); } -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/random_uniform.hpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/random_uniform.hpp index a7d5dcecc42187..2d32426cb0a782 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/random_uniform.hpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/random_uniform.hpp @@ -7,7 +7,8 @@ #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { typedef std::tuple< ov::Shape, // Output shapes @@ -48,4 +49,5 @@ class RandomUniformLayerTestCPU : public testing::WithParamInterface obj) { basicReduceParams basicParams; @@ -25,7 +24,7 @@ std::string ReduceCPULayerTest::getTestCaseName(testing::TestParamInfo axes; ov::test::utils::OpType opType; bool keepDims; - ngraph::helpers::ReductionType reductionType; + utils::ReductionType reductionType; ElementType netPrecision, inPrc, outPrc; std::vector inputShapes; @@ -110,10 +109,10 @@ void ReduceCPULayerTest::SetUp() { default: FAIL() << "Reduce op doesn't support operation type: " << opType; } - auto reductionAxesNode = std::dynamic_pointer_cast( - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape(shapeAxes), axes)); + auto reductionAxesNode = std::dynamic_pointer_cast( + std::make_shared(ov::element::Type_t::i64, ov::Shape(shapeAxes), axes)); - const auto reduce = ngraph::builder::makeReduce(params[0], reductionAxesNode, keepDims, reductionType); + const auto reduce = utils::make_reduce(params[0], reductionAxesNode, keepDims, reductionType); // hybrid layouts if (inFmts.size() != 0 && outFmts.size() == 0) { @@ -140,29 +139,29 @@ void ReduceCPULayerTest::SetUp() { function = makeNgraphFunction(netPrecision, params, reduce, "Reduce"); } -void ReduceCPULayerTest::generate_inputs(const std::vector& targetInputStaticShapes) { +void ReduceCPULayerTest::generate_inputs(const std::vector& targetInputStaticShapes) { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { const auto& funcInput = funcInputs[i]; ov::Tensor tensor; if (reductionType == ngraph::helpers::ReductionType::Prod) { - tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), - targetInputStaticShapes[i], - 10, - 5); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 5; + in_data.range = 10; + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], in_data); if (netPrecision == ElementType::f32) { auto* rawBlobDataPtr = static_cast(tensor.data()); for (size_t i = 0; i < tensor.get_size(); ++i) { rawBlobDataPtr[i] /= 10.f; } } else if (netPrecision == ElementType::f16) { - auto *rawBlobDataPtr = static_cast(tensor.data()); + auto *rawBlobDataPtr = static_cast(tensor.data()); for (size_t i = 0; i < tensor.get_size(); ++i) { rawBlobDataPtr[i] /= 10.f; } } else if (netPrecision == ElementType::bf16) { - auto* rawBlobDataPtr = static_cast(tensor.data()); + auto* rawBlobDataPtr = static_cast(tensor.data()); for (size_t i = 0; i < tensor.get_size(); ++i) { rawBlobDataPtr[i] /= 10.f; } @@ -225,15 +224,15 @@ const std::vector& opTypes() { return opTypes; } -const std::vector& reductionTypes() { - static const std::vector reductionTypes = { - ngraph::helpers::ReductionType::Mean, - ngraph::helpers::ReductionType::Max, - ngraph::helpers::ReductionType::Sum, - ngraph::helpers::ReductionType::Min, - ngraph::helpers::ReductionType::Prod, - ngraph::helpers::ReductionType::L1, - ngraph::helpers::ReductionType::L2, +const std::vector& reductionTypes() { + static const std::vector reductionTypes = { + utils::ReductionType::Mean, + utils::ReductionType::Max, + utils::ReductionType::Sum, + utils::ReductionType::Min, + utils::ReductionType::Prod, + utils::ReductionType::L1, + utils::ReductionType::L2, }; return reductionTypes; } @@ -262,15 +261,16 @@ const std::vector> additionalConfigFP32 return additionalConfig; } -const std::vector& reductionTypesInt32() { - static const std::vector reductionTypesInt32 = { - ngraph::helpers::ReductionType::Sum, - ngraph::helpers::ReductionType::Min, - ngraph::helpers::ReductionType::Max, - ngraph::helpers::ReductionType::L1, +const std::vector& reductionTypesInt32() { + static const std::vector reductionTypesInt32 = { + utils::ReductionType::Sum, + utils::ReductionType::Min, + utils::ReductionType::Max, + utils::ReductionType::L1, }; return reductionTypesInt32; } } // namespace Reduce -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/reduce.hpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/reduce.hpp index 31022372a8234e..6d9913d22f7969 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/reduce.hpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/reduce.hpp @@ -5,26 +5,26 @@ #pragma once #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" -#include +#include "common_test_utils/ov_tensor_utils.hpp" #include "test_utils/fusing_test_utils.hpp" using namespace CPUTestUtils; using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -typedef std::tuple< - std::vector, // Axis to reduce order - ov::test::utils::OpType, // Scalar or vector type axis - bool, // Keep dims - ngraph::helpers::ReductionType, // Reduce operation type - ElementType, // Net precision - ElementType, // Input precision - ElementType, // Output precision - std::vector // Input shapes -> basicReduceParams; +typedef std::tuple, // Axis to reduce order + ov::test::utils::OpType, // Scalar or vector type axis + bool, // Keep dims + utils::ReductionType, // Reduce operation type + ElementType, // Net precision + ElementType, // Input precision + ElementType, // Output precision + std::vector // Input shapes + > + basicReduceParams; typedef std::tuple< basicReduceParams, @@ -38,10 +38,10 @@ class ReduceCPULayerTest : public testing::WithParamInterface obj); protected: void SetUp() override; - void generate_inputs(const std::vector& targetInputStaticShapes) override; + void generate_inputs(const std::vector& targetInputStaticShapes) override; private: - ngraph::helpers::ReductionType reductionType; + utils::ReductionType reductionType; ElementType netPrecision; }; @@ -51,11 +51,12 @@ const std::vector& keepDims(); const std::vector>& axes(); const std::vector>& axesND(); const std::vector& opTypes(); -const std::vector& reductionTypes(); +const std::vector& reductionTypes(); const std::vector& inpOutPrc(); const std::vector> additionalConfig(); const std::vector> additionalConfigFP32(); -const std::vector& reductionTypesInt32(); +const std::vector& reductionTypesInt32(); -} // namespace Reduce -} // namespace CPULayerTestsDefinitions +} // namespace Reduce +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/scaled_attn.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/scaled_attn.cpp index 644f94bc2bf326..46513a36016620 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/scaled_attn.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/scaled_attn.cpp @@ -2,18 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include #include "scaled_attn.hpp" + #include "gtest/gtest.h" +#include "openvino/opsets/opset13.hpp" #include "test_utils/cpu_test_utils.hpp" +#include "transformations/op_conversions/scaled_dot_product_attention_decomposition.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { std::string ScaledAttnLayerCPUTest::getTestCaseName(const testing::TestParamInfo& obj) { CPUSpecificParams cpuParams; @@ -132,4 +131,5 @@ TEST_P(ScaledAttnLayerCPUTest, CompareWithRefs) { namespace ScaledAttn { } // namespace ScaledAttn -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/scaled_attn.hpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/scaled_attn.hpp index 0a11d159b50b62..8ef8834388d19f 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/scaled_attn.hpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/scaled_attn.hpp @@ -2,16 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { typedef std::tuple, // shape @@ -36,4 +33,5 @@ class ScaledAttnLayerCPUTest : public testing::WithParamInterface& obj) { CPUSpecificParams cpuParams; @@ -56,7 +54,7 @@ void SoftMaxLayerCPUTest::SetUp() { for (auto&& shape : inputDynamicShapes) params.push_back(std::make_shared(inType, shape)); - const auto softMax = std::make_shared(params.at(0), config.axis); + const auto softMax = std::make_shared(params.at(0), config.axis); function = makeNgraphFunction(inType, params, softMax, "SoftMax"); } @@ -69,4 +67,5 @@ TEST_P(SoftMaxLayerCPUTest, CompareWithRefs) { namespace SoftMax { } // namespace SoftMax -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/softmax.hpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/softmax.hpp index 06e03d79a493a1..f31bc35b97b712 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/softmax.hpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/softmax.hpp @@ -2,16 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { struct SoftMaxConfig { ov::test::InputShape inputShape; @@ -38,4 +35,5 @@ namespace SoftMax { } // namespace SoftMax -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/transpose.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/transpose.cpp index 029e97a2eae21d..daff97d5077515 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/transpose.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/transpose.cpp @@ -7,19 +7,17 @@ #include "gtest/gtest.h" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { std::string TransposeLayerCPUTest::getTestCaseName(testing::TestParamInfo obj) { - Precision netPrecision; + ov::element::Type netPrecision; InputShape inputShapes; std::vector inputOrder; std::string targetDevice; CPUSpecificParams cpuParams; - std::map additionalConfig; + ov::AnyMap additionalConfig; std::tie(inputShapes, inputOrder, netPrecision, targetDevice, additionalConfig, cpuParams) = obj.param; std::ostringstream result; @@ -30,12 +28,12 @@ std::string TransposeLayerCPUTest::getTestCaseName(testing::TestParamInfo(results, ov::ParameterVector{params}, "TransposeLayerCPUTest"); + functionRefs = ov::clone_model(*function); } TEST_P(TransposeLayerCPUTest, CompareWithRefs) { @@ -77,8 +75,8 @@ TEST_P(TransposeLayerCPUTest, CompareWithRefs) { } namespace Transpose { -const std::vector& netPrecisionsPerChannels() { - static const std::vector netPrecisionsPerChannels = {Precision::I8, Precision::FP32}; +const std::vector& netPrecisionsPerChannels() { + static const std::vector netPrecisionsPerChannels = {ov::element::i8, ov::element::f32}; return netPrecisionsPerChannels; } @@ -123,4 +121,5 @@ const std::vector>& inputOrder4D() { return inputOrder4D; } } // namespace Transpose -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/transpose.hpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/transpose.hpp index 5b8300106c83df..f11f84f89a7093 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/transpose.hpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/classes/transpose.hpp @@ -5,24 +5,22 @@ #pragma once #include "shared_test_classes/single_layer/transpose.hpp" -#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" #include "gtest/gtest.h" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { -typedef std::tuple< - InputShape, // Input shapes - std::vector, // Input order - InferenceEngine::Precision, // Net precision - std::string, // Target device name - std::map, // Additional network configuration - CPUSpecificParams> TransposeLayerCPUTestParamSet; +namespace ov { +namespace test { +typedef std::tuple, // Input order + ov::element::Type, // Net precision + std::string, // Target device name + ov::AnyMap, // Additional network configuration + CPUSpecificParams> + TransposeLayerCPUTestParamSet; class TransposeLayerCPUTest : public testing::WithParamInterface, public ov::test::SubgraphBaseTest, public CPUTestsBase { @@ -33,10 +31,11 @@ class TransposeLayerCPUTest : public testing::WithParamInterface& netPrecisionsPerChannels(); + const std::vector& netPrecisionsPerChannels(); const std::vector& dynamicInputShapes4DC16(); const std::vector& dynamicInputShapes4DC32(); const std::vector& dynamicInputShapes4D(); const std::vector>& inputOrder4D(); -} // namespace Transpose -} // namespace CPULayerTestsDefinitions \ No newline at end of file +} // namespace Transpose +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/concat.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/concat.cpp index e805d802abeb7d..0a71bfe87ead9a 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/concat.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/concat.cpp @@ -2,24 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/base/ov_subgraph.hpp" #include "ov_models/builders.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace ov::test; using namespace CPUTestUtils; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -typedef std::tuple< - size_t, // Concat axis - std::vector, // Input shapes - ElementType, // Network precision - CPUSpecificParams -> concatCPUTestParams; +typedef std::tuple, // Input shapes + ElementType, // Network precision + CPUSpecificParams> + concatCPUTestParams; class ConcatLayerCPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CPUTestsBase { + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { int axis; @@ -49,7 +49,7 @@ class ConcatLayerCPUTest : public testing::WithParamInterface &expected, const std::vector &actual) override { + void compare(const std::vector& expected, const std::vector& actual) override { if (actual.front().get_size() == 0) { ASSERT_EQ(0, expected.front().get_size()); for (const auto& shape : targetStaticShapes[inferNum]) { @@ -74,7 +74,7 @@ class ConcatLayerCPUTest : public testing::WithParamInterfaceGetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; - selectedType += std::string("_") + InferenceEngine::details::convertPrecision(netPrecision).name(); + selectedType += std::string("_") + ov::element::Type(netPrecision).get_type_name(); init_input_shapes(inputShape); @@ -85,7 +85,7 @@ class ConcatLayerCPUTest : public testing::WithParamInterface(paramsOuts, axis); + auto concat = std::make_shared(paramsOuts, axis); function = makeNgraphFunction(netPrecision, params, concat, "ConcatCPU"); } @@ -119,615 +119,699 @@ const auto blocked16_4D_ref = CPUSpecificParams{{nChw16c}, {nChw16c}, {}, "ref"} const auto blocked16_5D_ref = CPUSpecificParams{{nCdhw16c}, {nCdhw16c}, {}, "ref"}; // List of precisions natively supported by onednn. -const std::vector netPrecisions = { - ElementType::i8, - ElementType::i32, - ElementType::f32, - ElementType::bf16 -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block8_static, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(1, -2, 3), - ::testing::Values(static_shapes_to_test_representation({{2, 16, 3, 5}, {2, 16, 3, 5}})), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(planar_4D_ref, planarChannels_4D, blocked8_4D_ref)), - ConcatLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block16_static, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(1, 2, -1), - ::testing::Values(static_shapes_to_test_representation({{3, 32, 3, 5}, {3, 32, 3, 5}})), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(blocked16_4D_ref)), - ConcatLayerCPUTest::getTestCaseName); +const std::vector netPrecisions = {ElementType::i8, ElementType::i32, ElementType::f32, ElementType::bf16}; + +INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block8_static, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(1, -2, 3), + ::testing::Values(static_shapes_to_test_representation({{2, 16, 3, 5}, + {2, 16, 3, 5}})), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(planar_4D_ref, planarChannels_4D, blocked8_4D_ref)), + ConcatLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block16_static, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(1, 2, -1), + ::testing::Values(static_shapes_to_test_representation({{3, 32, 3, 5}, + {3, 32, 3, 5}})), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(blocked16_4D_ref)), + ConcatLayerCPUTest::getTestCaseName); const std::vector> inputShapes4D_Block_axis1 = { - { - // {{dynamic shape}, {{static shape case1}, {static shape case2}, ...} - {{-1, 32, -1, -1}, {{2, 32, 5, 7}, {1, 32, 10, 2}, {3, 32, 1, 8}}}, // input 0 - {{-1, 16, -1, -1}, {{2, 16, 5, 7}, {1, 16, 10, 2}, {3, 16, 1, 8}}}, // input 1 - {{-1, 64, -1, -1}, {{2, 64, 5, 7}, {1, 64, 10, 2}, {3, 64, 1, 8}}} // input 2 - }, - { - {{{1, 5}, 32, {1, 10}, {2, 8}}, {{2, 32, 5, 7}, {1, 32, 10, 2}, {3, 32, 1, 8}}}, - {{{1, 3}, 16, {1, 10}, {2, 8}}, {{2, 16, 5, 7}, {1, 16, 10, 2}, {3, 16, 1, 8}}}, - {{{1, 3}, 64, {1, 10}, {2, 8}}, {{2, 64, 5, 7}, {1, 64, 10, 2}, {3, 64, 1, 8}}} - }, - { - {{{1, 10}, 32, 2, 3}, {{2, 32, 2, 3}, {1, 32, 2, 3}}}, - {{{1, 10}, 16, 2, 3}, {{2, 16, 2, 3}, {1, 16, 2, 3}}}, - {{{1, 10}, 64, 2, 3}, {{2, 64, 2, 3}, {1, 64, 2, 3}}} - }, + { + // {{dynamic shape}, {{static shape case1}, {static shape case2}, ...} + {{-1, 32, -1, -1}, {{2, 32, 5, 7}, {1, 32, 10, 2}, {3, 32, 1, 8}}}, // input 0 + {{-1, 16, -1, -1}, {{2, 16, 5, 7}, {1, 16, 10, 2}, {3, 16, 1, 8}}}, // input 1 + {{-1, 64, -1, -1}, {{2, 64, 5, 7}, {1, 64, 10, 2}, {3, 64, 1, 8}}} // input 2 + }, + {{{{1, 5}, 32, {1, 10}, {2, 8}}, {{2, 32, 5, 7}, {1, 32, 10, 2}, {3, 32, 1, 8}}}, + {{{1, 3}, 16, {1, 10}, {2, 8}}, {{2, 16, 5, 7}, {1, 16, 10, 2}, {3, 16, 1, 8}}}, + {{{1, 3}, 64, {1, 10}, {2, 8}}, {{2, 64, 5, 7}, {1, 64, 10, 2}, {3, 64, 1, 8}}}}, + {{{{1, 10}, 32, 2, 3}, {{2, 32, 2, 3}, {1, 32, 2, 3}}}, + {{{1, 10}, 16, 2, 3}, {{2, 16, 2, 3}, {1, 16, 2, 3}}}, + {{{1, 10}, 64, 2, 3}, {{2, 64, 2, 3}, {1, 64, 2, 3}}}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block_dynamic_axis_1, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(1, -3), - ::testing::ValuesIn(inputShapes4D_Block_axis1), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(blocked8_4D_ref, blocked16_4D_ref)), - ConcatLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block_dynamic_axis_1, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(1, -3), + ::testing::ValuesIn(inputShapes4D_Block_axis1), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(blocked8_4D_ref, blocked16_4D_ref)), + ConcatLayerCPUTest::getTestCaseName); const std::vector> inputShapes4D_axis1 = { - { - {{-1, -1, -1, -1}, {{2, 32, 0, 7}, {2, 32, 5, 7}, {2, 32, 5, 7}, {1, 18, 10, 2}, {2, 32, 5, 7}, {3, 8, 1, 8}, {2, 0, 5, 7}}}, - {{-1, -1, -1, -1}, {{2, 16, 0, 7}, {2, 16, 5, 7}, {2, 16, 5, 7}, {1, 5, 10, 2}, {2, 0, 5, 7}, {3, 3, 1, 8}, {2, 16, 5, 7}}}, - {{-1, -1, -1, -1}, {{2, 64, 0, 7}, {2, 64, 5, 7}, {2, 0, 5, 7}, {1, 45, 10, 2}, {2, 64, 5, 7}, {3, 1, 1, 8}, {2, 64, 5, 7}}} - }, - { - {{{1, 3}, {8, 32}, {1, 10}, {2, 8}}, {{2, 32, 5, 7}, {1, 18, 10, 2}, {3, 8, 1, 8}}}, - {{{1, 3}, {3, 16}, {1, 10}, {2, 8}}, {{2, 16, 5, 7}, {1, 5, 10, 2}, {3, 3, 1, 8}}}, - {{{1, 3}, {1, 64}, {1, 10}, {2, 8}}, {{2, 64, 5, 7}, {1, 45, 10, 2}, {3, 1, 1, 8}}} - }, - { - {{{1, 18, 10, 2}}, {{1, 18, 10, 2}, {1, 18, 10, 2}}}, - {{-1, -1, -1, -1}, {{1, 3, 10, 2}, {1, 5, 10, 2}}}, - {{{1, 5, 10, 2}}, {{1, 5, 10, 2}, {1, 5, 10, 2}}} - }, - { - {{{-1, 8, -1, -1}}, {{2, 8, 5, 7}, {1, 8, 10, 2}}}, - {{{-1, 3, -1, -1}}, {{2, 3, 5, 7}, {1, 3, 10, 2}}}, - {{{-1, -1, -1, -1}}, {{2, 16, 5, 7}, {1, 7, 10, 2}}} - } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_dynamic_axis_1, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(1), - ::testing::ValuesIn(inputShapes4D_axis1), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(planar_4D_ref, planarChannels_4D)), - ConcatLayerCPUTest::getTestCaseName); + {{{-1, -1, -1, -1}, + {{2, 32, 0, 7}, {2, 32, 5, 7}, {2, 32, 5, 7}, {1, 18, 10, 2}, {2, 32, 5, 7}, {3, 8, 1, 8}, {2, 0, 5, 7}}}, + {{-1, -1, -1, -1}, + {{2, 16, 0, 7}, {2, 16, 5, 7}, {2, 16, 5, 7}, {1, 5, 10, 2}, {2, 0, 5, 7}, {3, 3, 1, 8}, {2, 16, 5, 7}}}, + {{-1, -1, -1, -1}, + {{2, 64, 0, 7}, {2, 64, 5, 7}, {2, 0, 5, 7}, {1, 45, 10, 2}, {2, 64, 5, 7}, {3, 1, 1, 8}, {2, 64, 5, 7}}}}, + {{{{1, 3}, {8, 32}, {1, 10}, {2, 8}}, {{2, 32, 5, 7}, {1, 18, 10, 2}, {3, 8, 1, 8}}}, + {{{1, 3}, {3, 16}, {1, 10}, {2, 8}}, {{2, 16, 5, 7}, {1, 5, 10, 2}, {3, 3, 1, 8}}}, + {{{1, 3}, {1, 64}, {1, 10}, {2, 8}}, {{2, 64, 5, 7}, {1, 45, 10, 2}, {3, 1, 1, 8}}}}, + {{{{1, 18, 10, 2}}, {{1, 18, 10, 2}, {1, 18, 10, 2}}}, + {{-1, -1, -1, -1}, {{1, 3, 10, 2}, {1, 5, 10, 2}}}, + {{{1, 5, 10, 2}}, {{1, 5, 10, 2}, {1, 5, 10, 2}}}}, + {{{{-1, 8, -1, -1}}, {{2, 8, 5, 7}, {1, 8, 10, 2}}}, + {{{-1, 3, -1, -1}}, {{2, 3, 5, 7}, {1, 3, 10, 2}}}, + {{{-1, -1, -1, -1}}, {{2, 16, 5, 7}, {1, 7, 10, 2}}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_dynamic_axis_1, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(1), + ::testing::ValuesIn(inputShapes4D_axis1), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(planar_4D_ref, planarChannels_4D)), + ConcatLayerCPUTest::getTestCaseName); const std::vector> inputShapes4D_Block_axis2 = { - { - {{-1, 16, -1, -1}, {{2, 16, 5, 7}, {1, 16, 16, 2}, {3, 16, 2, 8}}}, - {{-1, 16, -1, -1}, {{2, 16, 1, 7}, {1, 16, 3, 2}, {3, 16, 11, 8}}}, - {{-1, 16, -1, -1}, {{2, 16, 10, 7}, {1, 16, 5, 2}, {3, 16, 1, 8}}}, - }, - { - {{{1, 3}, 16, {2, 16}, {2, 8}}, {{2, 16, 5, 7}, {1, 16, 16, 2}, {3, 16, 2, 8}}}, - {{{1, 3}, 16, {1, 11}, {2, 8}}, {{2, 16, 1, 7}, {1, 16, 3, 2}, {3, 16, 11, 8}}}, - {{{1, 3}, 16, {1, 10}, {2, 8}}, {{2, 16, 10, 7}, {1, 16, 5, 2}, {3, 16, 1, 8}}}, - }, - { - {{{1, 5}, 16, 5, 7}, {{2, 16, 5, 7}, {1, 16, 5, 7}}}, - {{{1, 5}, 16, 1, 7}, {{2, 16, 1, 7}, {1, 16, 1, 7}}}, - {{{1, 5}, 16, 10, 7}, {{2, 16, 10, 7}, {1, 16, 10, 7}}}, - }, + { + {{-1, 16, -1, -1}, {{2, 16, 5, 7}, {1, 16, 16, 2}, {3, 16, 2, 8}}}, + {{-1, 16, -1, -1}, {{2, 16, 1, 7}, {1, 16, 3, 2}, {3, 16, 11, 8}}}, + {{-1, 16, -1, -1}, {{2, 16, 10, 7}, {1, 16, 5, 2}, {3, 16, 1, 8}}}, + }, + { + {{{1, 3}, 16, {2, 16}, {2, 8}}, {{2, 16, 5, 7}, {1, 16, 16, 2}, {3, 16, 2, 8}}}, + {{{1, 3}, 16, {1, 11}, {2, 8}}, {{2, 16, 1, 7}, {1, 16, 3, 2}, {3, 16, 11, 8}}}, + {{{1, 3}, 16, {1, 10}, {2, 8}}, {{2, 16, 10, 7}, {1, 16, 5, 2}, {3, 16, 1, 8}}}, + }, + { + {{{1, 5}, 16, 5, 7}, {{2, 16, 5, 7}, {1, 16, 5, 7}}}, + {{{1, 5}, 16, 1, 7}, {{2, 16, 1, 7}, {1, 16, 1, 7}}}, + {{{1, 5}, 16, 10, 7}, {{2, 16, 10, 7}, {1, 16, 10, 7}}}, + }, }; -INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block_dynamic_axis_2, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(2), - ::testing::ValuesIn(inputShapes4D_Block_axis2), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(blocked8_4D_ref, blocked16_4D_ref)), - ConcatLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block_dynamic_axis_2, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(2), + ::testing::ValuesIn(inputShapes4D_Block_axis2), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(blocked8_4D_ref, blocked16_4D_ref)), + ConcatLayerCPUTest::getTestCaseName); const std::vector> inputShapes4D_axis2 = { - { - {{-1, -1, -1, -1}, {{2, 16, 5, 7}, {1, 16, 16, 2}, {3, 16, 2, 8}}}, - {{-1, -1, -1, -1}, {{2, 16, 1, 7}, {1, 16, 3, 2}, {3, 16, 11, 8}}}, - {{-1, -1, -1, -1}, {{2, 16, 10, 7}, {1, 16, 5, 2}, {3, 16, 1, 8}}}, - }, - { - {{{1, 3}, {1, 16}, {2, 16}, {2, 8}}, {{2, 16, 5, 7}, {1, 16, 16, 2}, {3, 16, 2, 8}}}, - {{{1, 3}, {1, 16}, {1, 11}, {2, 8}}, {{2, 16, 1, 7}, {1, 16, 3, 2}, {3, 16, 11, 8}}}, - {{{1, 3}, {1, 16}, {1, 10}, {2, 8}}, {{2, 16, 10, 7}, {1, 16, 5, 2}, {3, 16, 1, 8}}}, - }, + { + {{-1, -1, -1, -1}, {{2, 16, 5, 7}, {1, 16, 16, 2}, {3, 16, 2, 8}}}, + {{-1, -1, -1, -1}, {{2, 16, 1, 7}, {1, 16, 3, 2}, {3, 16, 11, 8}}}, + {{-1, -1, -1, -1}, {{2, 16, 10, 7}, {1, 16, 5, 2}, {3, 16, 1, 8}}}, + }, + { + {{{1, 3}, {1, 16}, {2, 16}, {2, 8}}, {{2, 16, 5, 7}, {1, 16, 16, 2}, {3, 16, 2, 8}}}, + {{{1, 3}, {1, 16}, {1, 11}, {2, 8}}, {{2, 16, 1, 7}, {1, 16, 3, 2}, {3, 16, 11, 8}}}, + {{{1, 3}, {1, 16}, {1, 10}, {2, 8}}, {{2, 16, 10, 7}, {1, 16, 5, 2}, {3, 16, 1, 8}}}, + }, }; -INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_dynamic_axis_2, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(2, -2), - ::testing::ValuesIn(inputShapes4D_axis2), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(planar_4D_ref, planarChannels_4D)), - ConcatLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_dynamic_axis_2, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(2, -2), + ::testing::ValuesIn(inputShapes4D_axis2), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(planar_4D_ref, planarChannels_4D)), + ConcatLayerCPUTest::getTestCaseName); const std::vector> inputShapes4D_Block_axis3 = { - { - {{-1, 32, -1, -1}, {{2, 32, 4, 5}, {1, 32, 1, 16}, {3, 32, 7, 2}, }}, - {{-1, 32, -1, -1}, {{2, 32, 4, 1}, {1, 32, 1, 3}, {3, 32, 7, 11}}}, - {{-1, 32, -1, -1}, {{2, 32, 4, 10}, {1, 32, 1, 5}, {3, 32, 7, 1}}}, - }, - { - {{{1, 3}, 32, {1, 7}, {2, 16}}, {{2, 32, 4, 5}, {1, 32, 1, 16}, {3, 32, 7, 2}}}, - {{{1, 3}, 32, {1, 7}, {1, 11}}, {{2, 32, 4, 1}, {1, 32, 1, 3}, {3, 32, 7, 11}}}, - {{{1, 3}, 32, {1, 7}, {1, 10}}, {{2, 32, 4, 10}, {1, 32, 1, 5}, {3, 32, 7, 1}}}, - }, + { + {{-1, 32, -1, -1}, + { + {2, 32, 4, 5}, + {1, 32, 1, 16}, + {3, 32, 7, 2}, + }}, + {{-1, 32, -1, -1}, {{2, 32, 4, 1}, {1, 32, 1, 3}, {3, 32, 7, 11}}}, + {{-1, 32, -1, -1}, {{2, 32, 4, 10}, {1, 32, 1, 5}, {3, 32, 7, 1}}}, + }, + { + {{{1, 3}, 32, {1, 7}, {2, 16}}, {{2, 32, 4, 5}, {1, 32, 1, 16}, {3, 32, 7, 2}}}, + {{{1, 3}, 32, {1, 7}, {1, 11}}, {{2, 32, 4, 1}, {1, 32, 1, 3}, {3, 32, 7, 11}}}, + {{{1, 3}, 32, {1, 7}, {1, 10}}, {{2, 32, 4, 10}, {1, 32, 1, 5}, {3, 32, 7, 1}}}, + }, }; -INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block_dynamic_axis_3, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(3), - ::testing::ValuesIn(inputShapes4D_Block_axis3), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(blocked8_4D_ref, blocked16_4D_ref)), - ConcatLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block_dynamic_axis_3, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(3), + ::testing::ValuesIn(inputShapes4D_Block_axis3), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(blocked8_4D_ref, blocked16_4D_ref)), + ConcatLayerCPUTest::getTestCaseName); const std::vector> inputShapes4D_axis3 = { - { - {{-1, -1, -1, -1}, {{2, 32, 4, 5}, {1, 32, 1, 16}, {3, 32, 7, 2}}}, - {{-1, -1, -1, -1}, {{2, 32, 4, 1}, {1, 32, 1, 3}, {3, 32, 7, 11}}}, - {{-1, -1, -1, -1}, {{2, 32, 4, 10}, {1, 32, 1, 5}, {3, 32, 7, 1}}}, - }, - { - {{{1, 3}, {1, 32}, {1, 7}, {2, 16}}, {{2, 32, 4, 5}, {1, 32, 1, 16}, {3, 32, 7, 2}}}, - {{{1, 3}, {1, 32}, {1, 7}, {1, 11}}, {{2, 32, 4, 1}, {1, 32, 1, 3}, {3, 32, 7, 11}}}, - {{{1, 3}, {1, 32}, {1, 7}, {1, 10}}, {{2, 32, 4, 10}, {1, 32, 1, 5}, {3, 32, 7, 1}}}, - }, - { - {{{1, 3}, 32, 4, 5}, {{1, 32, 4, 5}, {2, 32, 4, 5}}}, - {{{1, 3}, 32, 4, 1}, {{1, 32, 4, 1}, {2, 32, 4, 1}}}, - {{{1, 3}, 32, 4, 10}, {{1, 32, 4, 10}, {2, 32, 4, 10}}}, - }, + { + {{-1, -1, -1, -1}, {{2, 32, 4, 5}, {1, 32, 1, 16}, {3, 32, 7, 2}}}, + {{-1, -1, -1, -1}, {{2, 32, 4, 1}, {1, 32, 1, 3}, {3, 32, 7, 11}}}, + {{-1, -1, -1, -1}, {{2, 32, 4, 10}, {1, 32, 1, 5}, {3, 32, 7, 1}}}, + }, + { + {{{1, 3}, {1, 32}, {1, 7}, {2, 16}}, {{2, 32, 4, 5}, {1, 32, 1, 16}, {3, 32, 7, 2}}}, + {{{1, 3}, {1, 32}, {1, 7}, {1, 11}}, {{2, 32, 4, 1}, {1, 32, 1, 3}, {3, 32, 7, 11}}}, + {{{1, 3}, {1, 32}, {1, 7}, {1, 10}}, {{2, 32, 4, 10}, {1, 32, 1, 5}, {3, 32, 7, 1}}}, + }, + { + {{{1, 3}, 32, 4, 5}, {{1, 32, 4, 5}, {2, 32, 4, 5}}}, + {{{1, 3}, 32, 4, 1}, {{1, 32, 4, 1}, {2, 32, 4, 1}}}, + {{{1, 3}, 32, 4, 10}, {{1, 32, 4, 10}, {2, 32, 4, 10}}}, + }, }; -INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_dynamic_axis_3, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(3, -1), - ::testing::ValuesIn(inputShapes4D_axis3), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(planar_4D_ref, planarChannels_4D)), - ConcatLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block8_static, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(2, 3, -2), - ::testing::Values(static_shapes_to_test_representation({{2, 16, 3, 5, 7}, {2, 16, 3, 5, 7}})), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(planar_5D_ref, planarChannels_5D, blocked8_5D_ref)), - ConcatLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block16_static, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(2, 3, 4), - ::testing::Values(static_shapes_to_test_representation({{2, 32, 3, 5, 7}, {2, 32, 3, 5, 7}})), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(blocked16_5D_ref)), - ConcatLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_dynamic_axis_3, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(3, -1), + ::testing::ValuesIn(inputShapes4D_axis3), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(planar_4D_ref, planarChannels_4D)), + ConcatLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block8_static, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(2, 3, -2), + ::testing::Values(static_shapes_to_test_representation({{2, 16, 3, 5, 7}, + {2, 16, 3, 5, 7}})), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(planar_5D_ref, planarChannels_5D, blocked8_5D_ref)), + ConcatLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block16_static, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(2, 3, 4), + ::testing::Values(static_shapes_to_test_representation({{2, 32, 3, 5, 7}, + {2, 32, 3, 5, 7}})), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(blocked16_5D_ref)), + ConcatLayerCPUTest::getTestCaseName); const std::vector> inputShapes5D_Block_axis1 = { - { - {{-1, 32, -1, -1, -1}, {{2, 32, 5, 7, 6}, {1, 32, 10, 2, 8}, {3, 32, 1, 8, 10}}}, - {{-1, 16, -1, -1, -1}, {{2, 16, 5, 7, 6}, {1, 16, 10, 2, 8}, {3, 16, 1, 8, 10}}}, - {{-1, 64, -1, -1, -1}, {{2, 64, 5, 7, 6}, {1, 64, 10, 2, 8}, {3, 64, 1, 8, 10}}}, - }, - { - {{{1, 3}, 32, {1, 10}, {2, 8}, {6, 10}}, {{2, 32, 5, 7, 6}, {1, 32, 10, 2, 8}, {3, 32, 1, 8, 10}}}, - {{{1, 3}, 16, {1, 10}, {2, 8}, {6, 10}}, {{2, 16, 5, 7, 6}, {1, 16, 10, 2, 8}, {3, 16, 1, 8, 10}}}, - {{{1, 3}, 64, {1, 10}, {2, 8}, {6, 10}}, {{2, 64, 5, 7, 6}, {1, 64, 10, 2, 8}, {3, 64, 1, 8, 10}}}, - }, + { + {{-1, 32, -1, -1, -1}, {{2, 32, 5, 7, 6}, {1, 32, 10, 2, 8}, {3, 32, 1, 8, 10}}}, + {{-1, 16, -1, -1, -1}, {{2, 16, 5, 7, 6}, {1, 16, 10, 2, 8}, {3, 16, 1, 8, 10}}}, + {{-1, 64, -1, -1, -1}, {{2, 64, 5, 7, 6}, {1, 64, 10, 2, 8}, {3, 64, 1, 8, 10}}}, + }, + { + {{{1, 3}, 32, {1, 10}, {2, 8}, {6, 10}}, {{2, 32, 5, 7, 6}, {1, 32, 10, 2, 8}, {3, 32, 1, 8, 10}}}, + {{{1, 3}, 16, {1, 10}, {2, 8}, {6, 10}}, {{2, 16, 5, 7, 6}, {1, 16, 10, 2, 8}, {3, 16, 1, 8, 10}}}, + {{{1, 3}, 64, {1, 10}, {2, 8}, {6, 10}}, {{2, 64, 5, 7, 6}, {1, 64, 10, 2, 8}, {3, 64, 1, 8, 10}}}, + }, }; -INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block_dynamic_axis_1, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(1), - ::testing::ValuesIn(inputShapes5D_Block_axis1), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(blocked8_5D_ref, blocked16_5D_ref)), - ConcatLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block_dynamic_axis_1, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(1), + ::testing::ValuesIn(inputShapes5D_Block_axis1), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(blocked8_5D_ref, blocked16_5D_ref)), + ConcatLayerCPUTest::getTestCaseName); const std::vector> inputShapes5D_axis1 = { - { - {{-1, -1, -1, -1, -1}, {{2, 5, 5, 7, 6}, {1, 3, 10, 2, 8}, {3, 4, 1, 8, 10}}}, - {{-1, -1, -1, -1, -1}, {{2, 16, 5, 7, 6}, {1, 20, 10, 2, 8}, {3, 5, 1, 8, 10}, }}, - {{-1, -1, -1, -1, -1}, {{2, 1, 5, 7, 6}, {1, 17, 10, 2, 8}, {3, 5, 1, 8, 10}}}, - }, - { - {{{1, 3}, {3, 5}, {1, 10}, {2, 8}, {6, 10}}, {{2, 5, 5, 7, 6}, {1, 3, 10, 2, 8}, {3, 4, 1, 8, 10}}}, - {{{1, 3}, {5, 20}, {1, 10}, {2, 8}, {4, 10}}, {{2, 16, 5, 7, 6}, {1, 20, 10, 2, 8}, {3, 5, 1, 8, 10}, }}, - {{{1, 3}, {1, 17}, {1, 10}, {2, 8}, {6, 10}}, {{2, 1, 5, 7, 6}, {1, 17, 10, 2, 8}, {3, 5, 1, 8, 10}}}, - }, + { + {{-1, -1, -1, -1, -1}, {{2, 5, 5, 7, 6}, {1, 3, 10, 2, 8}, {3, 4, 1, 8, 10}}}, + {{-1, -1, -1, -1, -1}, + { + {2, 16, 5, 7, 6}, + {1, 20, 10, 2, 8}, + {3, 5, 1, 8, 10}, + }}, + {{-1, -1, -1, -1, -1}, {{2, 1, 5, 7, 6}, {1, 17, 10, 2, 8}, {3, 5, 1, 8, 10}}}, + }, + { + {{{1, 3}, {3, 5}, {1, 10}, {2, 8}, {6, 10}}, {{2, 5, 5, 7, 6}, {1, 3, 10, 2, 8}, {3, 4, 1, 8, 10}}}, + {{{1, 3}, {5, 20}, {1, 10}, {2, 8}, {4, 10}}, + { + {2, 16, 5, 7, 6}, + {1, 20, 10, 2, 8}, + {3, 5, 1, 8, 10}, + }}, + {{{1, 3}, {1, 17}, {1, 10}, {2, 8}, {6, 10}}, {{2, 1, 5, 7, 6}, {1, 17, 10, 2, 8}, {3, 5, 1, 8, 10}}}, + }, }; -INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_dynamic_axis_1, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(1), - ::testing::ValuesIn(inputShapes5D_axis1), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(planar_5D_ref, planarChannels_5D)), - ConcatLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_dynamic_axis_1, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(1), + ::testing::ValuesIn(inputShapes5D_axis1), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(planar_5D_ref, planarChannels_5D)), + ConcatLayerCPUTest::getTestCaseName); const std::vector> inputShapes5D_Block_axis2 = { - { - {{-1, 16, -1, -1, -1}, {{2, 16, 5, 8, 7}, {1, 16, 16, 1, 2}, {3, 16, 2, 5, 8}, }}, - {{-1, 16, -1, -1, -1}, {{2, 16, 1, 8, 7}, {1, 16, 3, 1, 2}, {3, 16, 11, 5, 8}}}, - {{-1, 16, -1, -1, -1}, {{2, 16, 10, 8, 7}, {1, 16, 5, 1, 2}, {3, 16, 1, 5, 8}}}, - }, - { - {{{1, 3}, 16, {2, 16}, {1, 8}, {2, 8}}, {{2, 16, 5, 8, 7}, {1, 16, 16, 1, 2}, {3, 16, 2, 5, 8}, }}, - {{{1, 5}, 16, {1, 11}, {1, 8}, {1, 8}}, {{2, 16, 1, 8, 7}, {1, 16, 3, 1, 2}, {3, 16, 11, 5, 8}}}, - {{{1, 6}, 16, {1, 10}, {1, 8}, {2, 10}}, {{2, 16, 10, 8, 7}, {1, 16, 5, 1, 2}, {3, 16, 1, 5, 8}}}, - }, + { + {{-1, 16, -1, -1, -1}, + { + {2, 16, 5, 8, 7}, + {1, 16, 16, 1, 2}, + {3, 16, 2, 5, 8}, + }}, + {{-1, 16, -1, -1, -1}, {{2, 16, 1, 8, 7}, {1, 16, 3, 1, 2}, {3, 16, 11, 5, 8}}}, + {{-1, 16, -1, -1, -1}, {{2, 16, 10, 8, 7}, {1, 16, 5, 1, 2}, {3, 16, 1, 5, 8}}}, + }, + { + {{{1, 3}, 16, {2, 16}, {1, 8}, {2, 8}}, + { + {2, 16, 5, 8, 7}, + {1, 16, 16, 1, 2}, + {3, 16, 2, 5, 8}, + }}, + {{{1, 5}, 16, {1, 11}, {1, 8}, {1, 8}}, {{2, 16, 1, 8, 7}, {1, 16, 3, 1, 2}, {3, 16, 11, 5, 8}}}, + {{{1, 6}, 16, {1, 10}, {1, 8}, {2, 10}}, {{2, 16, 10, 8, 7}, {1, 16, 5, 1, 2}, {3, 16, 1, 5, 8}}}, + }, }; -INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block_dynamic_axis_2, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(-3), - ::testing::ValuesIn(inputShapes5D_Block_axis2), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(blocked8_5D_ref, blocked16_5D_ref)), - ConcatLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block_dynamic_axis_2, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(-3), + ::testing::ValuesIn(inputShapes5D_Block_axis2), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(blocked8_5D_ref, blocked16_5D_ref)), + ConcatLayerCPUTest::getTestCaseName); const std::vector> inputShapes5D_axis2 = { - { - {{-1, -1, -1, -1, -1}, {{2, 4, 5, 8, 7}, {1, 20, 16, 1, 2}, {3, 8, 2, 5, 8}}}, - {{-1, -1, -1, -1, -1}, {{2, 4, 1, 8, 7}, {1, 20, 3, 1, 2}, {3, 8, 11, 5, 8}}}, - {{-1, -1, -1, -1, -1}, {{2, 4, 10, 8, 7}, {1, 20, 5, 1, 2}, {3, 8, 1, 5, 8}}}, - }, - { - {{{1, 3}, {4, 20}, {1, 16}, {1, 8}, {2, 8}}, {{2, 4, 5, 8, 7}, {1, 20, 16, 1, 2}, {3, 8, 2, 5, 8}}}, - {{{1, 3}, {4, 20}, {1, 11}, {1, 10}, {1, 15}}, {{2, 4, 1, 8, 7}, {1, 20, 3, 1, 2}, {3, 8, 11, 5, 8}}}, - {{{1, 3}, {1, 20}, {1, 15}, {1, 10}, {2, 8}}, {{2, 4, 10, 8, 7}, {1, 20, 5, 1, 2}, {3, 8, 1, 5, 8}}}, - }, + { + {{-1, -1, -1, -1, -1}, {{2, 4, 5, 8, 7}, {1, 20, 16, 1, 2}, {3, 8, 2, 5, 8}}}, + {{-1, -1, -1, -1, -1}, {{2, 4, 1, 8, 7}, {1, 20, 3, 1, 2}, {3, 8, 11, 5, 8}}}, + {{-1, -1, -1, -1, -1}, {{2, 4, 10, 8, 7}, {1, 20, 5, 1, 2}, {3, 8, 1, 5, 8}}}, + }, + { + {{{1, 3}, {4, 20}, {1, 16}, {1, 8}, {2, 8}}, {{2, 4, 5, 8, 7}, {1, 20, 16, 1, 2}, {3, 8, 2, 5, 8}}}, + {{{1, 3}, {4, 20}, {1, 11}, {1, 10}, {1, 15}}, {{2, 4, 1, 8, 7}, {1, 20, 3, 1, 2}, {3, 8, 11, 5, 8}}}, + {{{1, 3}, {1, 20}, {1, 15}, {1, 10}, {2, 8}}, {{2, 4, 10, 8, 7}, {1, 20, 5, 1, 2}, {3, 8, 1, 5, 8}}}, + }, }; -INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_dynamic_axis_2, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(2), - ::testing::ValuesIn(inputShapes5D_axis2), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(planar_5D_ref, planarChannels_5D)), - ConcatLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_dynamic_axis_2, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(2), + ::testing::ValuesIn(inputShapes5D_axis2), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(planar_5D_ref, planarChannels_5D)), + ConcatLayerCPUTest::getTestCaseName); const std::vector> inputShapes5D_Block_axis3 = { - { - {{-1, 32, -1, -1, -1}, {{2, 32, 4, 5, 7}, {1, 32, 1, 16, 3}, {3, 32, 7, 2, 4}}}, - {{-1, 32, -1, -1, -1}, {{2, 32, 4, 1, 7}, {1, 32, 1, 3, 3}, {3, 32, 7, 11, 4}}}, - {{-1, 32, -1, -1, -1}, {{2, 32, 4, 10, 7}, {1, 32, 1, 5, 3}, {3, 32, 7, 1, 4}}}, - }, - { - {{{1, 3}, 32, {1, 7}, {2, 16}, {3, 7}}, {{2, 32, 4, 5, 7}, {1, 32, 1, 16, 3}, {3, 32, 7, 2, 4}, }}, - {{{1, 5}, 32, {1, 7}, {1, 11}, {3, 7}}, {{2, 32, 4, 1, 7}, {1, 32, 1, 3, 3}, {3, 32, 7, 11, 4}}}, - {{{1, 6}, 32, {1, 15}, {1, 10}, {1, 20}}, {{2, 32, 4, 10, 7}, {1, 32, 1, 5, 3}, {3, 32, 7, 1, 4}}}, - }, + { + {{-1, 32, -1, -1, -1}, {{2, 32, 4, 5, 7}, {1, 32, 1, 16, 3}, {3, 32, 7, 2, 4}}}, + {{-1, 32, -1, -1, -1}, {{2, 32, 4, 1, 7}, {1, 32, 1, 3, 3}, {3, 32, 7, 11, 4}}}, + {{-1, 32, -1, -1, -1}, {{2, 32, 4, 10, 7}, {1, 32, 1, 5, 3}, {3, 32, 7, 1, 4}}}, + }, + { + {{{1, 3}, 32, {1, 7}, {2, 16}, {3, 7}}, + { + {2, 32, 4, 5, 7}, + {1, 32, 1, 16, 3}, + {3, 32, 7, 2, 4}, + }}, + {{{1, 5}, 32, {1, 7}, {1, 11}, {3, 7}}, {{2, 32, 4, 1, 7}, {1, 32, 1, 3, 3}, {3, 32, 7, 11, 4}}}, + {{{1, 6}, 32, {1, 15}, {1, 10}, {1, 20}}, {{2, 32, 4, 10, 7}, {1, 32, 1, 5, 3}, {3, 32, 7, 1, 4}}}, + }, }; -INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block_dynamic_axis_3, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(3), - ::testing::ValuesIn(inputShapes5D_Block_axis3), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(blocked8_5D_ref, blocked16_5D_ref)), - ConcatLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block_dynamic_axis_3, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(3), + ::testing::ValuesIn(inputShapes5D_Block_axis3), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(blocked8_5D_ref, blocked16_5D_ref)), + ConcatLayerCPUTest::getTestCaseName); const std::vector> inputShapes5D_axis3 = { - { - {{-1, -1, -1, -1, -1}, {{2, 32, 4, 5, 7}, {1, 11, 1, 16, 3}, {3, 7, 7, 2, 4}}}, - {{-1, -1, -1, -1, -1}, {{2, 32, 4, 1, 7}, {1, 11, 1, 3, 3}, {3, 7, 7, 11, 4}}}, - {{-1, -1, -1, -1, -1}, {{2, 32, 4, 10, 7}, {1, 11, 1, 5, 3}, {3, 7, 7, 1, 4}}}, - }, - { - {{{1, 7}, {7, 32}, {1, 7}, {1, 16}, {3, 14}}, {{2, 32, 4, 5, 7}, {1, 11, 1, 16, 3}, {3, 7, 7, 2, 4}, }}, - {{{1, 7}, {7, 32}, {1, 10}, {1, 11}, {3, 7}}, {{2, 32, 4, 1, 7}, {1, 11, 1, 3, 3}, {3, 7, 7, 11, 4}}}, - {{{1, 7}, {1, 32}, {1, 10}, {1, 10}, {1, 10}}, {{2, 32, 4, 10, 7}, {1, 11, 1, 5, 3}, {3, 7, 7, 1, 4}}}, - }, + { + {{-1, -1, -1, -1, -1}, {{2, 32, 4, 5, 7}, {1, 11, 1, 16, 3}, {3, 7, 7, 2, 4}}}, + {{-1, -1, -1, -1, -1}, {{2, 32, 4, 1, 7}, {1, 11, 1, 3, 3}, {3, 7, 7, 11, 4}}}, + {{-1, -1, -1, -1, -1}, {{2, 32, 4, 10, 7}, {1, 11, 1, 5, 3}, {3, 7, 7, 1, 4}}}, + }, + { + {{{1, 7}, {7, 32}, {1, 7}, {1, 16}, {3, 14}}, + { + {2, 32, 4, 5, 7}, + {1, 11, 1, 16, 3}, + {3, 7, 7, 2, 4}, + }}, + {{{1, 7}, {7, 32}, {1, 10}, {1, 11}, {3, 7}}, {{2, 32, 4, 1, 7}, {1, 11, 1, 3, 3}, {3, 7, 7, 11, 4}}}, + {{{1, 7}, {1, 32}, {1, 10}, {1, 10}, {1, 10}}, {{2, 32, 4, 10, 7}, {1, 11, 1, 5, 3}, {3, 7, 7, 1, 4}}}, + }, }; -INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_dynamic_axis_3, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(3), - ::testing::ValuesIn(inputShapes5D_axis3), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(planar_5D_ref, planarChannels_5D)), - ConcatLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_dynamic_axis_3, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(3), + ::testing::ValuesIn(inputShapes5D_axis3), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(planar_5D_ref, planarChannels_5D)), + ConcatLayerCPUTest::getTestCaseName); const std::vector> inputShapes5D_Block_axis4 = { - { - {{-1, 32, -1, -1, -1}, {{2, 32, 4, 5, 5}, {1, 32, 1, 1, 16}, {3, 32, 7, 9, 2}, }}, - {{-1, 32, -1, -1, -1}, {{2, 32, 4, 5, 1}, {1, 32, 1, 1, 3}, {3, 32, 7, 9, 11}}}, - {{-1, 32, -1, -1, -1}, {{2, 32, 4, 5, 10}, {1, 32, 1, 1, 5}, {3, 32, 7, 9, 1}}}, - }, - { - {{{1, 15}, 32, {1, 10}, {1, 10}, {1, 16}}, {{2, 32, 4, 5, 5}, {1, 32, 1, 1, 16}, {3, 32, 7, 9, 2}, }}, - {{{1, 15}, 32, {1, 10}, {1, 10}, {1, 11}}, {{2, 32, 4, 5, 1}, {1, 32, 1, 1, 3}, {3, 32, 7, 9, 11}}}, - {{{1, 15}, 32, {1, 10}, {1, 10}, {1, 11}}, {{2, 32, 4, 5, 10}, {1, 32, 1, 1, 5}, {3, 32, 7, 9, 1}}}, - }, + { + {{-1, 32, -1, -1, -1}, + { + {2, 32, 4, 5, 5}, + {1, 32, 1, 1, 16}, + {3, 32, 7, 9, 2}, + }}, + {{-1, 32, -1, -1, -1}, {{2, 32, 4, 5, 1}, {1, 32, 1, 1, 3}, {3, 32, 7, 9, 11}}}, + {{-1, 32, -1, -1, -1}, {{2, 32, 4, 5, 10}, {1, 32, 1, 1, 5}, {3, 32, 7, 9, 1}}}, + }, + { + {{{1, 15}, 32, {1, 10}, {1, 10}, {1, 16}}, + { + {2, 32, 4, 5, 5}, + {1, 32, 1, 1, 16}, + {3, 32, 7, 9, 2}, + }}, + {{{1, 15}, 32, {1, 10}, {1, 10}, {1, 11}}, {{2, 32, 4, 5, 1}, {1, 32, 1, 1, 3}, {3, 32, 7, 9, 11}}}, + {{{1, 15}, 32, {1, 10}, {1, 10}, {1, 11}}, {{2, 32, 4, 5, 10}, {1, 32, 1, 1, 5}, {3, 32, 7, 9, 1}}}, + }, }; -INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block_dynamic_axis_4, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(4), - ::testing::ValuesIn(inputShapes5D_Block_axis4), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(blocked8_5D_ref, blocked16_5D_ref)), - ConcatLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block_dynamic_axis_4, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(4), + ::testing::ValuesIn(inputShapes5D_Block_axis4), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(blocked8_5D_ref, blocked16_5D_ref)), + ConcatLayerCPUTest::getTestCaseName); const std::vector> inputShapes5D_axis4 = { - { - {{-1, -1, -1, -1, -1}, {{2, 1, 4, 5, 5}, {1, 4, 1, 1, 16}, {3, 14, 7, 9, 2}}}, - {{-1, -1, -1, -1, -1}, {{2, 1, 4, 5, 1}, {1, 4, 1, 1, 3}, {3, 14, 7, 9, 11}}}, - {{-1, -1, -1, -1, -1}, {{2, 1, 4, 5, 10}, {1, 4, 1, 1, 5}, {3, 14, 7, 9, 1}}}, - }, - { - {{{1, 3}, {1, 14}, {1, 7}, {1, 10}, {2, 16}}, {{2, 1, 4, 5, 5}, {1, 4, 1, 1, 16}, {3, 14, 7, 9, 2}}}, - {{{1, 3}, {1, 14}, {1, 7}, {1, 9}, {1, 11}}, {{2, 1, 4, 5, 1}, {1, 4, 1, 1, 3}, {3, 14, 7, 9, 11}}}, - {{{1, 3}, {1, 14}, {1, 7}, {1, 9}, {1, 10}}, {{2, 1, 4, 5, 10}, {1, 4, 1, 1, 5}, {3, 14, 7, 9, 1}}}, - }, + { + {{-1, -1, -1, -1, -1}, {{2, 1, 4, 5, 5}, {1, 4, 1, 1, 16}, {3, 14, 7, 9, 2}}}, + {{-1, -1, -1, -1, -1}, {{2, 1, 4, 5, 1}, {1, 4, 1, 1, 3}, {3, 14, 7, 9, 11}}}, + {{-1, -1, -1, -1, -1}, {{2, 1, 4, 5, 10}, {1, 4, 1, 1, 5}, {3, 14, 7, 9, 1}}}, + }, + { + {{{1, 3}, {1, 14}, {1, 7}, {1, 10}, {2, 16}}, {{2, 1, 4, 5, 5}, {1, 4, 1, 1, 16}, {3, 14, 7, 9, 2}}}, + {{{1, 3}, {1, 14}, {1, 7}, {1, 9}, {1, 11}}, {{2, 1, 4, 5, 1}, {1, 4, 1, 1, 3}, {3, 14, 7, 9, 11}}}, + {{{1, 3}, {1, 14}, {1, 7}, {1, 9}, {1, 10}}, {{2, 1, 4, 5, 10}, {1, 4, 1, 1, 5}, {3, 14, 7, 9, 1}}}, + }, }; -INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_dynamic_axis_4, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(4), - ::testing::ValuesIn(inputShapes5D_axis4), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(planar_5D_ref, planarChannels_5D)), - ConcatLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_dynamic_axis_4, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(4), + ::testing::ValuesIn(inputShapes5D_axis4), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(planar_5D_ref, planarChannels_5D)), + ConcatLayerCPUTest::getTestCaseName); const std::vector> inputShapes_byBatch_static = { - static_shapes_to_test_representation({{5, 2, 2, 2}, {2, 2, 2, 2}}), - static_shapes_to_test_representation({{1, 3, 5}, {3, 3, 5}}), - static_shapes_to_test_representation({{4, 3, 2}, {1, 3, 2}}) -}; + static_shapes_to_test_representation({{5, 2, 2, 2}, {2, 2, 2, 2}}), + static_shapes_to_test_representation({{1, 3, 5}, {3, 3, 5}}), + static_shapes_to_test_representation({{4, 3, 2}, {1, 3, 2}})}; const std::vector> inputShapes_byBatch_dynamic = { - // 5D - { - {{-1, -1, -1, -1, -1}, {{10, 32, 4, 5, 5}, {4, 7, 1, 1, 3}, {3, 20, 7, 9, 1}, }}, - {{-1, -1, -1, -1, -1}, {{5, 32, 4, 5, 5}, {7, 7, 1, 1, 3}, {3, 20, 7, 9, 1}}}, - {{-1, -1, -1, -1, -1}, {{1, 32, 4, 5, 5}, {1, 7, 1, 1, 3}, {6, 20, 7, 9, 1}}}, - }, - { - {{{3, 10}, {7, 32}, {1, 9}, {1, 10}, {1, 5}}, {{10, 32, 4, 5, 5}, {4, 7, 1, 1, 3}, {3, 20, 7, 9, 1}, }}, - {{{3, 7}, {7, 32}, {1, 7}, {1, 9}, {1, 5}}, {{5, 32, 4, 5, 5}, {7, 7, 1, 1, 3}, {3, 20, 7, 9, 1}}}, - {{{1, 6}, {7, 32}, {1, 7}, {1, 9}, {1, 5}}, {{1, 32, 4, 5, 5}, {1, 7, 1, 1, 3}, {6, 20, 7, 9, 1}}}, - }, - // 4D - { - {{-1, -1, -1, -1}, {{10, 32, 4, 5}, {4, 7, 1, 1}, {3, 20, 7, 9}, }}, - {{-1, -1, -1, -1}, {{5, 32, 4, 5}, {7, 7, 1, 1}, {3, 20, 7, 9}}}, - {{-1, -1, -1, -1}, {{1, 32, 4, 5}, {1, 7, 1, 1}, {6, 20, 7, 9}}}, - }, - { - {{{1, 10}, {1, 32}, {1, 7}, {1, 9}}, {{10, 32, 4, 5}, {4, 7, 1, 1}, {3, 20, 7, 9}, }}, - {{{3, 7}, {7, 32}, {1, 7}, {1, 9}}, {{5, 32, 4, 5}, {7, 7, 1, 1}, {3, 20, 7, 9}}}, - {{{1, 6}, {7, 32}, {1, 7}, {1, 9}}, {{1, 32, 4, 5}, {1, 7, 1, 1}, {6, 20, 7, 9}}}, - }, - { - {{{1, 10}, 32, 4, 5}, {{10, 32, 4, 5}, {4, 32, 4, 5}}}, - {{{1, 10}, 32, 4, 5}, {{5, 32, 4, 5}, {7, 32, 4, 5}}}, - {{{1, 10}, 32, 4, 5}, {{1, 32, 4, 5}, {1, 32, 4, 5}}}, - } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Concat_byBatch_static, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(0), - ::testing::ValuesIn(inputShapes_byBatch_static), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})), - ConcatLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Concat_byBatch_dynamic, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(0), - ::testing::ValuesIn(inputShapes_byBatch_dynamic), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), - ConcatLayerCPUTest::getTestCaseName); + // 5D + { + {{-1, -1, -1, -1, -1}, + { + {10, 32, 4, 5, 5}, + {4, 7, 1, 1, 3}, + {3, 20, 7, 9, 1}, + }}, + {{-1, -1, -1, -1, -1}, {{5, 32, 4, 5, 5}, {7, 7, 1, 1, 3}, {3, 20, 7, 9, 1}}}, + {{-1, -1, -1, -1, -1}, {{1, 32, 4, 5, 5}, {1, 7, 1, 1, 3}, {6, 20, 7, 9, 1}}}, + }, + { + {{{3, 10}, {7, 32}, {1, 9}, {1, 10}, {1, 5}}, + { + {10, 32, 4, 5, 5}, + {4, 7, 1, 1, 3}, + {3, 20, 7, 9, 1}, + }}, + {{{3, 7}, {7, 32}, {1, 7}, {1, 9}, {1, 5}}, {{5, 32, 4, 5, 5}, {7, 7, 1, 1, 3}, {3, 20, 7, 9, 1}}}, + {{{1, 6}, {7, 32}, {1, 7}, {1, 9}, {1, 5}}, {{1, 32, 4, 5, 5}, {1, 7, 1, 1, 3}, {6, 20, 7, 9, 1}}}, + }, + // 4D + { + {{-1, -1, -1, -1}, + { + {10, 32, 4, 5}, + {4, 7, 1, 1}, + {3, 20, 7, 9}, + }}, + {{-1, -1, -1, -1}, {{5, 32, 4, 5}, {7, 7, 1, 1}, {3, 20, 7, 9}}}, + {{-1, -1, -1, -1}, {{1, 32, 4, 5}, {1, 7, 1, 1}, {6, 20, 7, 9}}}, + }, + { + {{{1, 10}, {1, 32}, {1, 7}, {1, 9}}, + { + {10, 32, 4, 5}, + {4, 7, 1, 1}, + {3, 20, 7, 9}, + }}, + {{{3, 7}, {7, 32}, {1, 7}, {1, 9}}, {{5, 32, 4, 5}, {7, 7, 1, 1}, {3, 20, 7, 9}}}, + {{{1, 6}, {7, 32}, {1, 7}, {1, 9}}, {{1, 32, 4, 5}, {1, 7, 1, 1}, {6, 20, 7, 9}}}, + }, + { + {{{1, 10}, 32, 4, 5}, {{10, 32, 4, 5}, {4, 32, 4, 5}}}, + {{{1, 10}, 32, 4, 5}, {{5, 32, 4, 5}, {7, 32, 4, 5}}}, + {{{1, 10}, 32, 4, 5}, {{1, 32, 4, 5}, {1, 32, 4, 5}}}, + }}; + +INSTANTIATE_TEST_SUITE_P(smoke_Concat_byBatch_static, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(0), + ::testing::ValuesIn(inputShapes_byBatch_static), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})), + ConcatLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Concat_byBatch_dynamic, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(0), + ::testing::ValuesIn(inputShapes_byBatch_dynamic), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + ConcatLayerCPUTest::getTestCaseName); const std::vector> inputShapes3D_axis1 = { - static_shapes_to_test_representation({{2, 4, 5}, {2, 4, 5}}), - { - {{-1, -1, -1}, {{2, 5, 12}, {1, 16, 1}, {5, 2, 6}, }}, - {{-1, -1, -1}, {{2, 1, 12}, {1, 3, 1}, {5, 11, 6}}}, - {{-1, -1, -1}, {{2, 10, 12}, {1, 5, 1}, {5, 1, 6}}}, - }, - { - {{{1, 5}, {2, 16}, {1, 12}}, {{2, 5, 12}, {1, 16, 1}, {5, 2, 6}, }}, - {{{1, 5}, {1, 11}, {1, 21}}, {{2, 1, 12}, {1, 3, 1}, {5, 11, 6}}}, - {{{1, 5}, {1, 10}, {1, 12}}, {{2, 10, 12}, {1, 5, 1}, {5, 1, 6}}}, - }, + static_shapes_to_test_representation({{2, 4, 5}, {2, 4, 5}}), + { + {{-1, -1, -1}, + { + {2, 5, 12}, + {1, 16, 1}, + {5, 2, 6}, + }}, + {{-1, -1, -1}, {{2, 1, 12}, {1, 3, 1}, {5, 11, 6}}}, + {{-1, -1, -1}, {{2, 10, 12}, {1, 5, 1}, {5, 1, 6}}}, + }, + { + {{{1, 5}, {2, 16}, {1, 12}}, + { + {2, 5, 12}, + {1, 16, 1}, + {5, 2, 6}, + }}, + {{{1, 5}, {1, 11}, {1, 21}}, {{2, 1, 12}, {1, 3, 1}, {5, 11, 6}}}, + {{{1, 5}, {1, 10}, {1, 12}}, {{2, 10, 12}, {1, 5, 1}, {5, 1, 6}}}, + }, }; -INSTANTIATE_TEST_SUITE_P(smoke_Concat_3D_axis1, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(1), - ::testing::ValuesIn(inputShapes3D_axis1), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), - ConcatLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Concat_3D_axis1, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(1), + ::testing::ValuesIn(inputShapes3D_axis1), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + ConcatLayerCPUTest::getTestCaseName); const std::vector> inputShapes3D_axis2 = { - static_shapes_to_test_representation({{2, 4, 5}, {2, 4, 5}}), - { - {{-1, -1, -1}, {{4, 4, 5}, {3, 2, 16}, {1, 1, 2}}}, - {{-1, -1, -1}, {{4, 4, 1}, {3, 2, 3}, {1, 1, 11}}}, - {{-1, -1, -1}, {{4, 4, 10}, {3, 2, 5}, {1, 1, 1}}}, - }, - { - {{{1, 4}, {1, 4}, {2, 16}}, {{4, 4, 5}, {3, 2, 16}, {1, 1, 2}, }}, - {{{1, 4}, {1, 4}, {1, 11}}, {{4, 4, 1}, {3, 2, 3}, {1, 1, 11}}}, - {{{1, 4}, {1, 4}, {1, 10}}, {{4, 4, 10}, {3, 2, 5}, {1, 1, 1}}}, - }, + static_shapes_to_test_representation({{2, 4, 5}, {2, 4, 5}}), + { + {{-1, -1, -1}, {{4, 4, 5}, {3, 2, 16}, {1, 1, 2}}}, + {{-1, -1, -1}, {{4, 4, 1}, {3, 2, 3}, {1, 1, 11}}}, + {{-1, -1, -1}, {{4, 4, 10}, {3, 2, 5}, {1, 1, 1}}}, + }, + { + {{{1, 4}, {1, 4}, {2, 16}}, + { + {4, 4, 5}, + {3, 2, 16}, + {1, 1, 2}, + }}, + {{{1, 4}, {1, 4}, {1, 11}}, {{4, 4, 1}, {3, 2, 3}, {1, 1, 11}}}, + {{{1, 4}, {1, 4}, {1, 10}}, {{4, 4, 10}, {3, 2, 5}, {1, 1, 1}}}, + }, }; -INSTANTIATE_TEST_SUITE_P(smoke_Concat_3D_axis2, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(2), - ::testing::ValuesIn(inputShapes3D_axis2), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), - ConcatLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Concat_3D_axis2, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(2), + ::testing::ValuesIn(inputShapes3D_axis2), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + ConcatLayerCPUTest::getTestCaseName); const std::vector> inputShapes2D_axis1 = { - static_shapes_to_test_representation({{3, 2}, {3, 10}}), - { - {{-1, -1}, {{19, 5}, {1, 16}, {8, 2}, }}, - {{-1, -1}, {{19, 1}, {1, 3}, {8, 11}}}, - {{-1, -1}, {{19, 10}, {1, 5}, {8, 1}}}, - }, - { - {{{1, 19}, {2, 16}}, {{19, 5}, {1, 16}, {8, 2}, }}, - {{{1, 19}, {1, 11}}, {{19, 1}, {1, 3}, {8, 11}}}, - {{{1, 19}, {1, 10}}, {{19, 10}, {1, 5}, {8, 1}}}, - }, + static_shapes_to_test_representation({{3, 2}, {3, 10}}), + { + {{-1, -1}, + { + {19, 5}, + {1, 16}, + {8, 2}, + }}, + {{-1, -1}, {{19, 1}, {1, 3}, {8, 11}}}, + {{-1, -1}, {{19, 10}, {1, 5}, {8, 1}}}, + }, + { + {{{1, 19}, {2, 16}}, + { + {19, 5}, + {1, 16}, + {8, 2}, + }}, + {{{1, 19}, {1, 11}}, {{19, 1}, {1, 3}, {8, 11}}}, + {{{1, 19}, {1, 10}}, {{19, 10}, {1, 5}, {8, 1}}}, + }, }; -INSTANTIATE_TEST_SUITE_P(smoke_Concat_2D_axis1, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(1), - ::testing::ValuesIn(inputShapes2D_axis1), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), - ConcatLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Concat_2D_axis1, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(1), + ::testing::ValuesIn(inputShapes2D_axis1), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + ConcatLayerCPUTest::getTestCaseName); const std::vector> inputShapes1D_static = { - static_shapes_to_test_representation({ov::Shape{5}, ov::Shape{5}}), - static_shapes_to_test_representation({ov::Shape{2}, ov::Shape{2}}), - static_shapes_to_test_representation({ov::Shape{1}, ov::Shape{1}}), - static_shapes_to_test_representation({ov::Shape{3}, ov::Shape{3}}) -}; + static_shapes_to_test_representation({ov::Shape{5}, ov::Shape{5}}), + static_shapes_to_test_representation({ov::Shape{2}, ov::Shape{2}}), + static_shapes_to_test_representation({ov::Shape{1}, ov::Shape{1}}), + static_shapes_to_test_representation({ov::Shape{3}, ov::Shape{3}})}; const std::vector> inputShapes1D_dynamic = { - { - {{-1}, {{19}, {8}, {5}}}, - {{-1}, {{19}, {8}, {5}}}, - {{-1}, {{19}, {8}, {5}}}, - }, - { - {{{1, 20}}, {{19}, {8}, {5}}}, - {{{1, 20}}, {{19}, {8}, {5}}}, - {{{1, 20}}, {{19}, {8}, {5}}}, - }, + { + {{-1}, {{19}, {8}, {5}}}, + {{-1}, {{19}, {8}, {5}}}, + {{-1}, {{19}, {8}, {5}}}, + }, + { + {{{1, 20}}, {{19}, {8}, {5}}}, + {{{1, 20}}, {{19}, {8}, {5}}}, + {{{1, 20}}, {{19}, {8}, {5}}}, + }, }; -INSTANTIATE_TEST_SUITE_P(smoke_Concat_1D_static, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(0), - ::testing::ValuesIn(inputShapes1D_static), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})), - ConcatLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Concat_1D_dynamic, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(0), - ::testing::ValuesIn(inputShapes1D_dynamic), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), - ConcatLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Concat_1D_static, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(0), + ::testing::ValuesIn(inputShapes1D_static), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})), + ConcatLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Concat_1D_dynamic, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(0), + ::testing::ValuesIn(inputShapes1D_dynamic), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + ConcatLayerCPUTest::getTestCaseName); // ============================================== inPlace cases ============================================ -INSTANTIATE_TEST_SUITE_P(concat_Concat4D_CPU_Block8inPlace, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(0, 1), - ::testing::Values(std::vector{ - {{}, {{1, 16, 5, 7}}}, - {{}, {{1, 16, 5, 7}}}, - {{}, {{1, 16, 5, 7}}}, - }, - std::vector{ - {{1, 16, -1, -1}, {{1, 16, 5, 7}, {1, 16, 16, 2}, {1, 16, 2, 8}}}, - {{1, 16, -1, -1}, {{1, 16, 5, 7}, {1, 16, 16, 2}, {1, 16, 2, 8}}}, - {{1, 16, -1, -1}, {{1, 16, 5, 7}, {1, 16, 16, 2}, {1, 16, 2, 8}}}, - }), - ::testing::Values(ElementType::f32), - ::testing::Values(planar_4D, blocked8_4D)), - ConcatLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block16inPlace_0, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(0), - ::testing::Values(std::vector{ - {{}, {{1, 32, 5, 7}}}, - {{}, {{1, 32, 5, 7}}}, - {{}, {{1, 32, 5, 7}}}, - }, - std::vector{ - {{1, 32, -1, -1}, {{1, 32, 5, 7}, {1, 32, 16, 2}, {1, 32, 2, 8}}}, - {{1, 32, -1, -1}, {{1, 32, 5, 7}, {1, 32, 16, 2}, {1, 32, 2, 8}}}, - {{1, 32, -1, -1}, {{1, 32, 5, 7}, {1, 32, 16, 2}, {1, 32, 2, 8}}}, - }), - ::testing::Values(ElementType::f32), - ::testing::Values(blocked16_4D)), - ConcatLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block16inPlace_1, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(1), - ::testing::Values(std::vector{ - {{}, {{1, 32, 5, 7}}}, - {{}, {{1, 16, 5, 7}}}, - {{}, {{1, 32, 5, 7}}}, - }, - std::vector{ - {{1, 32, -1, -1}, {{1, 32, 5, 7}, {1, 32, 16, 2}, {1, 32, 2, 8}}}, - {{1, 16, -1, -1}, {{1, 16, 5, 7}, {1, 16, 16, 2}, {1, 16, 2, 8}}}, - {{1, 32, -1, -1}, {{1, 32, 5, 7}, {1, 32, 16, 2}, {1, 32, 2, 8}}}, - }), - ::testing::Values(ElementType::f32), - ::testing::Values(blocked16_4D)), - ConcatLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(concat_Concat5D_CPU_Block8inPlace, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(0, 1), - ::testing::Values(std::vector{ - {{}, {{1, 16, 3, 5, 7}}}, - {{}, {{1, 16, 3, 5, 7}}}, - {{}, {{1, 16, 3, 5, 7}}}, - }, - std::vector{ - {{1, 32, -1, -1, -1}, {{1, 32, 5, 7, 3}, {1, 32, 16, 2, 3}, {1, 32, 2, 8, 3}}}, - {{1, 32, -1, -1, -1}, {{1, 32, 5, 7, 3}, {1, 32, 16, 2, 3}, {1, 32, 2, 8, 3}}}, - {{1, 32, -1, -1, -1}, {{1, 32, 5, 7, 3}, {1, 32, 16, 2, 3}, {1, 32, 2, 8, 3}}}, - }), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(planar_5D, blocked8_5D)), - ConcatLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Concat5D_CPU_Block16inPlace, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(0, 1), - ::testing::Values(std::vector{ - {{}, {{1, 32, 3, 5, 7}}}, - {{}, {{1, 32, 3, 5, 7}}}, - {{}, {{1, 32, 3, 5, 7}}}, - }, - std::vector{ - {{1, 32, -1, -1, -1}, {{1, 32, 5, 7, 3}, {1, 32, 16, 2, 3}, {1, 32, 2, 8, 3}}}, - {{1, 32, -1, -1, -1}, {{1, 32, 5, 7, 3}, {1, 32, 16, 2, 3}, {1, 32, 2, 8, 3}}}, - {{1, 32, -1, -1, -1}, {{1, 32, 5, 7, 3}, {1, 32, 16, 2, 3}, {1, 32, 2, 8, 3}}}, - }), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(blocked16_5D)), - ConcatLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Concat_inPlace, ConcatLayerCPUTest, - ::testing::Combine( - ::testing::Values(0, 1, 2, -1), - ::testing::ValuesIn(std::vector>{ - static_shapes_to_test_representation({{1, 1, 1, 10}, {1, 1, 1, 10}}), - static_shapes_to_test_representation({{1, 1, 5}, {1, 1, 5}})}), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})), - ConcatLayerCPUTest::getTestCaseName); - -} // namespace - -} // namespace CPULayerTestsDefinitions +INSTANTIATE_TEST_SUITE_P(concat_Concat4D_CPU_Block8inPlace, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(0, 1), + ::testing::Values( + std::vector{ + {{}, {{1, 16, 5, 7}}}, + {{}, {{1, 16, 5, 7}}}, + {{}, {{1, 16, 5, 7}}}, + }, + std::vector{ + {{1, 16, -1, -1}, {{1, 16, 5, 7}, {1, 16, 16, 2}, {1, 16, 2, 8}}}, + {{1, 16, -1, -1}, {{1, 16, 5, 7}, {1, 16, 16, 2}, {1, 16, 2, 8}}}, + {{1, 16, -1, -1}, {{1, 16, 5, 7}, {1, 16, 16, 2}, {1, 16, 2, 8}}}, + }), + ::testing::Values(ElementType::f32), + ::testing::Values(planar_4D, blocked8_4D)), + ConcatLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block16inPlace_0, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(0), + ::testing::Values( + std::vector{ + {{}, {{1, 32, 5, 7}}}, + {{}, {{1, 32, 5, 7}}}, + {{}, {{1, 32, 5, 7}}}, + }, + std::vector{ + {{1, 32, -1, -1}, {{1, 32, 5, 7}, {1, 32, 16, 2}, {1, 32, 2, 8}}}, + {{1, 32, -1, -1}, {{1, 32, 5, 7}, {1, 32, 16, 2}, {1, 32, 2, 8}}}, + {{1, 32, -1, -1}, {{1, 32, 5, 7}, {1, 32, 16, 2}, {1, 32, 2, 8}}}, + }), + ::testing::Values(ElementType::f32), + ::testing::Values(blocked16_4D)), + ConcatLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Concat4D_CPU_Block16inPlace_1, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(1), + ::testing::Values( + std::vector{ + {{}, {{1, 32, 5, 7}}}, + {{}, {{1, 16, 5, 7}}}, + {{}, {{1, 32, 5, 7}}}, + }, + std::vector{ + {{1, 32, -1, -1}, {{1, 32, 5, 7}, {1, 32, 16, 2}, {1, 32, 2, 8}}}, + {{1, 16, -1, -1}, {{1, 16, 5, 7}, {1, 16, 16, 2}, {1, 16, 2, 8}}}, + {{1, 32, -1, -1}, {{1, 32, 5, 7}, {1, 32, 16, 2}, {1, 32, 2, 8}}}, + }), + ::testing::Values(ElementType::f32), + ::testing::Values(blocked16_4D)), + ConcatLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + concat_Concat5D_CPU_Block8inPlace, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(0, 1), + ::testing::Values( + std::vector{ + {{}, {{1, 16, 3, 5, 7}}}, + {{}, {{1, 16, 3, 5, 7}}}, + {{}, {{1, 16, 3, 5, 7}}}, + }, + std::vector{ + {{1, 32, -1, -1, -1}, {{1, 32, 5, 7, 3}, {1, 32, 16, 2, 3}, {1, 32, 2, 8, 3}}}, + {{1, 32, -1, -1, -1}, {{1, 32, 5, 7, 3}, {1, 32, 16, 2, 3}, {1, 32, 2, 8, 3}}}, + {{1, 32, -1, -1, -1}, {{1, 32, 5, 7, 3}, {1, 32, 16, 2, 3}, {1, 32, 2, 8, 3}}}, + }), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(planar_5D, blocked8_5D)), + ConcatLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_Concat5D_CPU_Block16inPlace, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(0, 1), + ::testing::Values( + std::vector{ + {{}, {{1, 32, 3, 5, 7}}}, + {{}, {{1, 32, 3, 5, 7}}}, + {{}, {{1, 32, 3, 5, 7}}}, + }, + std::vector{ + {{1, 32, -1, -1, -1}, {{1, 32, 5, 7, 3}, {1, 32, 16, 2, 3}, {1, 32, 2, 8, 3}}}, + {{1, 32, -1, -1, -1}, {{1, 32, 5, 7, 3}, {1, 32, 16, 2, 3}, {1, 32, 2, 8, 3}}}, + {{1, 32, -1, -1, -1}, {{1, 32, 5, 7, 3}, {1, 32, 16, 2, 3}, {1, 32, 2, 8, 3}}}, + }), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(blocked16_5D)), + ConcatLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Concat_inPlace, + ConcatLayerCPUTest, + ::testing::Combine(::testing::Values(0, 1, 2, -1), + ::testing::ValuesIn(std::vector>{ + static_shapes_to_test_representation({{1, 1, 1, 10}, {1, 1, 1, 10}}), + static_shapes_to_test_representation({{1, 1, 5}, {1, 1, 5}})}), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})), + ConcatLayerCPUTest::getTestCaseName); + +} // namespace + +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/convert_to_plugin_specific_node.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/convert_to_plugin_specific_node.cpp index 92e2e20e00a6b0..e4c9658218ccdd 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/convert_to_plugin_specific_node.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/convert_to_plugin_specific_node.cpp @@ -2,35 +2,36 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "test_utils/cpu_test_utils.hpp" +#include "common_test_utils/node_builders/eltwise.hpp" #include "ov_models/builders.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "test_utils/cpu_test_utils.hpp" -using namespace ngraph; -using namespace InferenceEngine; using namespace CPUTestUtils; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -using ConvertToPluginSpecificNodeParams = std::tuple; // expected number of constant node +using ConvertToPluginSpecificNodeParams = std::tuple; // expected number of constant node class ConvertToPluginSpecificNode : public testing::WithParamInterface, - public LayerTestsUtils::LayerTestsCommon { + public SubgraphBaseStaticTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - SizeVector nonConstShape, constShape; - Precision prc; - helpers::EltwiseTypes nodeType; + ov::Shape nonConstShape, constShape; + ov::element::Type prc; + ov::test::utils::EltwiseTypes nodeType; size_t port, constNodeNum; std::tie(nonConstShape, constShape, prc, nodeType, port, constNodeNum) = obj.param; std::ostringstream result; - result << "IS_NON_CONST=" << ov::test::utils::vec2str(nonConstShape) << "_"; - result << "IS_CONST=" << ov::test::utils::vec2str(constShape) << "_"; + result << "IS_NON_CONST=" << nonConstShape << "_"; + result << "IS_CONST=" << constShape << "_"; result << "PRC=" << prc << "_"; result << "NODE=" << nodeType << "_"; result << "PORT=" << port << "_"; @@ -45,39 +46,36 @@ class ConvertToPluginSpecificNode : public testing::WithParamInterfaceGetParam(); OPENVINO_ASSERT(shape_size(constShape) == 1); - const auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(prc); - const auto param = std::make_shared(ngPrc, ngraph::Shape(nonConstShape)); - const auto constNode = builder::makeConstant(ngPrc, ngraph::Shape(constShape), std::vector{}, true); + const auto param = std::make_shared(prc, ov::Shape(nonConstShape)); + const auto constNode = ngraph::builder::makeConstant(prc, constShape, std::vector{}, true); OutputVector inputs(2); inputs[port] = constNode; inputs[1 - port] = param; - auto powerStatic = ngraph::builder::makeEltwise(inputs[0], inputs[1], nodeType); + auto powerStatic = ov::test::utils::makeEltwise(inputs[0], inputs[1], nodeType); - function = std::make_shared(powerStatic, ParameterVector{param}, "ConvertToPluginSpecificNode"); + function = std::make_shared(powerStatic, ParameterVector{param}, "ConvertToPluginSpecificNode"); } }; TEST_P(ConvertToPluginSpecificNode, CompareWithRefs) { - Run(); - CheckNumberOfNodesWithType(executableNetwork, "Const", constNodeNum); + run(); + CheckNumberOfNodesWithType(compiledModel, "Const", constNodeNum); } namespace { -const std::vector> nonConstIS = { - {3, 4, 5, 6} -}; +const std::vector nonConstIS = {{3, 4, 5, 6}}; -const std::vector> constIS = { +const std::vector constIS = { {}, {1}, {1, 1}, @@ -85,34 +83,37 @@ const std::vector> constIS = { {1, 1, 1, 1}, }; -std::vector nodeTypes = { - ngraph::helpers::EltwiseTypes::ADD, - ngraph::helpers::EltwiseTypes::SUBTRACT, - ngraph::helpers::EltwiseTypes::MULTIPLY -}; +std::vector nodeTypes = {ov::test::utils::EltwiseTypes::ADD, + ov::test::utils::EltwiseTypes::SUBTRACT, + ov::test::utils::EltwiseTypes::MULTIPLY}; -const std::vector port = { - 0, 1 -}; +const std::vector port = {0, 1}; const auto testParamsEltwise = ::testing::Combine(::testing::ValuesIn(nonConstIS), ::testing::ValuesIn(constIS), - ::testing::Values(Precision::FP32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(nodeTypes), ::testing::ValuesIn(port), ::testing::Values(0)); -INSTANTIATE_TEST_SUITE_P(smoke_CheckEltwise, ConvertToPluginSpecificNode, testParamsEltwise, ConvertToPluginSpecificNode::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_CheckEltwise, + ConvertToPluginSpecificNode, + testParamsEltwise, + ConvertToPluginSpecificNode::getTestCaseName); const auto testParamsPower = ::testing::Combine(::testing::ValuesIn(nonConstIS), ::testing::ValuesIn(constIS), - ::testing::Values(Precision::FP32), - ::testing::Values(ngraph::helpers::EltwiseTypes::POWER), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::test::utils::EltwiseTypes::POWER), ::testing::Values(1), ::testing::Values(0)); -INSTANTIATE_TEST_SUITE_P(smoke_CheckPower, ConvertToPluginSpecificNode, testParamsPower, ConvertToPluginSpecificNode::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_CheckPower, + ConvertToPluginSpecificNode, + testParamsPower, + ConvertToPluginSpecificNode::getTestCaseName); -} // namespace +} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/convolution.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/convolution.cpp deleted file mode 100755 index aed06f2840d61e..00000000000000 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/convolution.cpp +++ /dev/null @@ -1,1714 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "test_utils/cpu_test_utils.hpp" -#include "test_utils/filter_cpu_info.hpp" -#include "test_utils/convolution_params.hpp" -#include "test_utils/fusing_test_utils.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "common_test_utils/node_builders/convolution.hpp" -#include "openvino/core/visibility.hpp" -#include -#include "utils/general_utils.h" - -using namespace InferenceEngine; -using namespace CPUTestUtils; -using namespace ov::test; -using namespace ov::intel_cpu; - -namespace CPULayerTestsDefinitions { -using LayerTestsDefinitions::convSpecificParams; - -typedef std::tuple< - convSpecificParams, - ElementType, // Net precision - ElementType, // Input precision - ElementType, // Output precision - InputShape, // Input shape - LayerTestsUtils::TargetDevice // Device name -> convLayerTestParamsSet; - -typedef std::tuple< - convLayerTestParamsSet, - CPUSpecificParams, - fusingSpecificParams, - std::map > convLayerCPUTestParamsSet; - -class ConvolutionLayerCPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CpuTestWithFusing { -public: - static std::string getTestCaseName(const testing::TestParamInfo& obj) { - convLayerTestParamsSet basicParamsSet; - CPUSpecificParams cpuParams; - fusingSpecificParams fusingParams; - std::map additionalConfig; - std::tie(basicParamsSet, cpuParams, fusingParams, additionalConfig) = obj.param; - - convSpecificParams convParams; - ElementType netType; - ElementType inType, outType; - InputShape inputShape; - std::string targetDevice; - std::tie(convParams, netType, inType, outType, inputShape, targetDevice) = basicParamsSet; - ngraph::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; - std::vector padBegin, padEnd; - size_t convOutChannels; - std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType) = convParams; - - std::ostringstream result; - result << "IS="; - result << ov::test::utils::partialShape2str({inputShape.first}) << "_"; - result << "TS=("; - for (const auto& shape : inputShape.second) { - result << ov::test::utils::vec2str(shape) << "_"; - } - result << ")_"; - result << "K" << ov::test::utils::vec2str(kernel) << "_"; - result << "S" << ov::test::utils::vec2str(stride) << "_"; - result << "PB" << ov::test::utils::vec2str(padBegin) << "_"; - result << "PE" << ov::test::utils::vec2str(padEnd) << "_"; - result << "D=" << ov::test::utils::vec2str(dilation) << "_"; - result << "O=" << convOutChannels << "_"; - result << "AP=" << padType << "_"; - result << "netPRC=" << netType << "_"; - result << "inPRC=" << inType << "_"; - result << "outPRC=" << outType << "_"; - result << "trgDev=" << targetDevice; - - result << CPUTestsBase::getTestCaseName(cpuParams); - result << CpuTestWithFusing::getTestCaseName(fusingParams); - - if (!additionalConfig.empty()) { - result << "_PluginConf"; - for (auto& item : additionalConfig) { - result << "_" << item.first << "=" << item.second; - } - } - - return result.str(); - } -protected: - bool isBias = false; - InferenceEngine::SizeVector kernel, dilation; - - void checkBiasFusing(ov::CompiledModel &execNet) const { - if (!execNet) return; - - auto execGraph = execNet.get_runtime_model(); - ASSERT_NE(nullptr, execGraph); - - bool foundConv = false; - for (const auto &node : execGraph->get_ops()) { - const auto & rtInfo = node->get_rt_info(); - auto getExecValue = [&rtInfo](const std::string & paramName) -> std::string { - auto it = rtInfo.find(paramName); - OPENVINO_ASSERT(rtInfo.end() != it); - return it->second.as(); - }; - - if (getExecValue(ExecGraphInfoSerialization::LAYER_TYPE) == "Convolution") { - foundConv = true; - ASSERT_EQ(3, node->inputs().size()); - break; - } - } - - ASSERT_TRUE(foundConv) << "Can't find Convolution node"; - } - - std::shared_ptr modifyGraph(const ngraph::element::Type &ngPrc, - ngraph::ParameterVector ¶ms, - const std::shared_ptr &lastNode) override { - auto retNode = CpuTestWithFusing::modifyGraph(ngPrc, params, lastNode); - std::shared_ptr opToShapeInfer = nullptr; - for (auto& targetShapes : targetStaticShapes) { - for (size_t i = targetShapes.size(); i < params.size(); ++i) { - const auto &shape = params[i]->get_output_partial_shape(0); - if (shape.is_static()) { - targetShapes.push_back(shape.get_shape()); - } else { - // It is assumed that in such tests we have second parameter only if sum fusion is tested. - // Considering this fact, we need to set the appropriate static shape for the second term of the sum operation, and - // it has to match the convolution output shape. So the most suitable solution here is to perform shape inference on the - // convolution node - if (!opToShapeInfer) { - ngraph::OutputVector inputsForShapeInfer; - for (size_t j = 0; j < lastNode->get_input_size(); j++) { - if (ngraph::is_type(lastNode->get_input_node_ptr(j))) { - inputsForShapeInfer.push_back(lastNode->get_input_node_shared_ptr(j)); - } else { - inputsForShapeInfer.push_back(std::make_shared(lastNode->get_input_element_type(j), - lastNode->get_input_partial_shape(j))); - } - } - opToShapeInfer = lastNode->clone_with_new_inputs(inputsForShapeInfer); - } - - std::vector secondParameterShapes; - if (auto parameter = dynamic_cast(opToShapeInfer->get_input_node_ptr(0))) { - parameter->set_partial_shape(targetShapes.front()); - parameter->validate_and_infer_types(); - } - opToShapeInfer->validate_and_infer_types(); - targetShapes.push_back(opToShapeInfer->get_output_shape(0)); - } - } - } - return retNode; - } - - void SetUp() override { - rel_threshold = 1e-4f; - - convLayerTestParamsSet basicParamsSet; - CPUSpecificParams cpuParams; - fusingSpecificParams fusingParams; - std::map additionalConfig; - std::tie(basicParamsSet, cpuParams, fusingParams, additionalConfig) = this->GetParam(); - - configuration.insert(additionalConfig.begin(), additionalConfig.end()); - - std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; - std::tie(postOpMgrPtr, fusedOps) = fusingParams; - - if (postOpMgrPtr) - isBias = (postOpMgrPtr->getFusedOpsNames() == "Add(PerChannel)" && selectedType != "jit_avx512_winograd"); - - convSpecificParams convParams; - InputShape inputShape; - auto netType = ElementType::undefined; - std::tie(convParams, netType, inType, outType, inputShape, targetDevice) = basicParamsSet; - - init_input_shapes({inputShape}); - - if (configuration.count(PluginConfigParams::KEY_ENFORCE_BF16) && - PluginConfigParams::YES == configuration[PluginConfigParams::KEY_ENFORCE_BF16].as()) { - selectedType += "_BF16"; - rel_threshold = 1e-2f; - if (selectedType == "jit_gemm_BF16") - rel_threshold = 0.05f; - } else { - selectedType = makeSelectedTypeStr(selectedType, netType); - } - - ngraph::op::PadType padType; - InferenceEngine::SizeVector stride; - std::vector padBegin, padEnd; - size_t convOutChannels; - std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType) = convParams; - - ov::ParameterVector inputParams; - for (auto&& shape : inputDynamicShapes) - inputParams.push_back(std::make_shared(ov::element::f32, shape)); - - auto convolutionNode = ov::test::utils::make_convolution(inputParams[0], netType, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels); - - function = makeNgraphFunction(netType, inputParams, convolutionNode, "Convolution"); - } -}; - -TEST_P(ConvolutionLayerCPUTest, CompareWithRefs) { - // Skip tests for sse41 convolution where ic or oc cannot be exactly divided by the block size, - // since tails processing for sse41 nspc layout is not supported yet (see 52736). - if (!inFmts.empty() && (inFmts.front() == nwc || inFmts.front() == nhwc || inFmts.front() == ndhwc) && selectedType.find("jit_sse") != std::string::npos) { - auto inpChannels = function->get_parameters().front()->get_partial_shape()[1].get_length(); - auto outChannels = function->get_output_partial_shape(0)[1].get_length(); - if ((inpChannels % 8) || (outChannels % 8)) { - GTEST_SKIP() << "Disabled test due to the sse41 convolution kernel does not support tails for nspc layout." << std::endl; - } - } - - if (!priority.empty()) { - // Skip all the brgconv avx2 tests for now. Current brgconv_avx2 is disabled due to perf regression[CVS-105756]. - // This convolution test code has already covered brgconv avx2 primitive. - // @todo: Remove this once brgconv_avx2 is enabled for convolution node. - if (priority[0].find("brgconv_avx2") != std::string::npos) - GTEST_SKIP() << "Disabled test due to the brgconv_avx2 is not enabled." << std::endl; - // Skip tests for brgconv convolution where kernel size = 1x1 - if (one_of(priority[0], "brgconv_avx512", "brgconv_avx512_amx", "brgconv_avx2")) { - bool is_1x1 = true; - for (const auto &i : kernel) { - if (i != 1) { - is_1x1 = false; - break; - } - } - if (is_1x1) { - GTEST_SKIP() << "Disabled test due to the brgconv does not support 1x1 convolution kernel." << std::endl; - } - } - - // Skip tests for brgconv_amx convolution where dilation is not 1 - if (priority[0].find("amx") != std::string::npos) { - bool dilation_is_1x1 = true; - for (const auto &i : dilation) { - if (i != 1) { - dilation_is_1x1 = false; - break; - } - } - if (!dilation_is_1x1) { - GTEST_SKIP() << "Disabled test due to the brgconv amx does not support non 1 dilation convolution kernel." << std::endl; - } - } - } - - run(); - - if (isBias) { - checkBiasFusing(compiledModel); - } - CheckPluginRelatedResults(compiledModel, "Convolution"); -} - -namespace { - -std::vector filterCPUInfoForDevice_BF16(std::vector allParams) { - std::vector specificParams; - bool with_bf16 = with_cpu_x86_bfloat16(); - std::copy_if(allParams.begin(), allParams.end(), std::back_inserter(specificParams), [with_bf16](const CPUSpecificParams& item) { - const auto &selected = std::get<3>(item); - // when no bf16 hardware brgconv will not work - if (!with_bf16 && selected.find("brgconv") != std::string::npos) { - return false; - } - return true; - }); - - return filterCPUInfoForDevice(specificParams); -} - -/* COMMON PARAMS */ -const std::vector fusingParamsSet{ - emptyFusingSpec, - // eltwise - fusingRelu, - fusingPRelu1DScaleShift, - // depthwise - fusingReluScaleShift, - // fake quantize - fusingFakeQuantizePerTensorRelu, - fusingFakeQuantizePerChannelRelu, - // sum - fusingSumEluFQ, - fusingSum, - // bias - fusingAddPerChannel -}; - -const std::vector fusingParamsSetBF16{ - emptyFusingSpec, - // eltwise - fusingRelu, - // depthwise - fusingPRelu1DScaleShift, - // sum - fusingSum, - // bias - fusingAddPerChannel -}; - -/* ============= Convolution params (GEMM layout) ============= */ -const SizeVector numOutChannels_Gemm = { 6 }; - -/* ============= Convolution params (blocked and nspc layout) ============= */ -const SizeVector numOutChannels = { 64, 63 }; - -/* ============= Convolution params (1D) ============= */ -const std::vector kernels1d = { {3}, {1} }; -const std::vector strides1d = { {1}, {2} }; -const std::vector> padBegins1d = { {0}, {1} }; -const std::vector> padEnds1d = { {0} }; -const std::vector dilations1d = { {1}, {2} }; -std::vector inputShapes1d = { - {{}, {{ 2, 64, 7 }}}, - {{}, {{ 1, 67, 7 }}}, - { - //dynamic shape - { -1, 64, {1, 200} }, - { //target static shapes - { 2, 64, 7 }, - { 1, 64, 9 } - } - }, - { - //dynamic shape - { -1, 67, {1, 200} }, - { //target static shapes - { 2, 67, 7 }, - { 1, 67, 9 } - } - }, - { - //dynamic shape - { {1, 200}, 64, -1 }, - { //target static shapes - { 2, 64, 7 }, - { 1, 64, 5 } - } - } -}; -std::vector inputShapesPlain2Blocked1d = { - {{}, {{1, 1, 7}}}, - {{}, {{1, 2, 7}}}, - {{}, {{1, 3, 7}}}, - { - //dynamic shapes - {-1, 1, {1, 200}}, - { //target static shapes - {2, 1, 7}, - {1, 1, 9} - } - }, - { - //dynamic shapes - {-1, 3, {1, 200}}, - { //target static shapes - {2, 3, 7}, - {1, 3, 9} - } - } -}; - -/* ============= Convolution params (2D) ============= */ -const std::vector kernels2d = { {3, 3}, {1, 1} }; -const std::vector strides2d = { {1, 1}, {2, 2} }; -const std::vector> padBegins2d = { {0, 0}, {1, 1} }; -const std::vector> padEnds2d = { {0, 0} }; -const std::vector dilations2d = { {1, 1} }; - -std::vector inputShapes2d = { - {{}, {{ 1, 64, 7, 7 }}}, - {{}, {{ 1, 67, 7, 7 }}}, - { - //dynamic shape - { -1, 64, -1, {1, 200} }, - { //target static shapes - { 2, 64, 7, 7 }, - { 1, 64, 9, 9} - } - }, - { - //dynamic shape - { -1, 67, -1, {1, 200} }, - { //target static shapes - { 2, 67, 7, 7 }, - { 1, 67, 9, 9} - } - } -}; - -std::vector inputShapesPlain2Blocked2d = { - {{}, {{ 1, 1, 7, 7 }}}, - {{}, {{ 1, 2, 7, 7 }}}, - {{}, {{ 1, 3, 7, 7 }}}, - { - //dynamic shape - { -1, 1, -1, {1, 200} }, - { //target static shapes - { 2, 1, 7, 7 }, - { 1, 1, 9, 9} - } - }, - { - //dynamic shape - { -1, 3, -1, {1, 200} }, - { //target static shapes - { 2, 3, 7, 7 }, - { 1, 3, 9, 9} - } - } -}; - -/* ============= Convolution params (3D) ============= */ -const std::vector kernels3d = { {3, 3, 3}, {1, 1, 1} }; -const std::vector strides3d = { {1, 1, 1}, {2, 2, 2} }; -const std::vector> padBegins3d = { {0, 0, 0}, {1, 1, 1} }; -const std::vector> padEnds3d = { {0, 0, 0} }; -const std::vector dilations3d = { {1, 1, 1} }; -std::vector inputShapes3d = { - {{}, {{ 1, 64, 7, 7, 7 }}}, - {{}, {{ 1, 67, 7, 7, 7 }}}, - { - //dynamic shapes - { -1, 64, -1, {1, 200}, -1 }, - { //target static shapes - { 1, 64, 7, 7, 7 }, - { 1, 64, 9, 9, 9} - } - }, - { - //dynamic shapes - { -1, 67, -1, {1, 200}, -1 }, - { //target static shapes - { 1, 67, 7, 7, 7 }, - { 1, 67, 9, 9, 9} - } - } -}; -std::vector inputShapesPlain2Blocked3d = { - {{}, {{ 1, 1, 7, 7, 7 }}}, - {{}, {{ 1, 2, 7, 7, 7 }}}, - {{}, {{ 1, 3, 7, 7, 7 }}}, - { - //dynamic shapes - { -1, 1, -1, {1, 200}, -1 }, - { //target static shapes - { 2, 1, 7, 7, 7 }, - { 1, 1, 9, 9, 9 } - } - }, - { - //dynamic shapes - { -1, 3, -1, {1, 200}, -1 }, - { //target static shapes - { 2, 3, 7, 7, 7 }, - { 1, 3, 9, 9, 9 } - } - } -}; -/* ============= */ - -/* INSTANCES */ -/* ============= Convolution (Gemm 1D) ============= */ -const auto convParams_ExplicitPadding_GEMM_1D = ::testing::Combine( - ::testing::ValuesIn(kernels1d), - ::testing::ValuesIn(strides1d), - ::testing::ValuesIn(padBegins1d), - ::testing::ValuesIn(padEnds1d), - ::testing::ValuesIn(dilations1d), - ::testing::ValuesIn(numOutChannels_Gemm), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const std::vector CPUParams_GEMM_1D = { - conv_gemm_1D, - conv_gemm_1D_nspc -}; - -std::vector inShapesGemm1D = { - {{}, {{ 2, 12, 7 }}}, - { - //dynamic shape - { {1, 200}, 12, {1, 200} }, - { //target static shapes - { 2, 12, 7 }, - { 1, 12, 5 } - } - } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_GEMM_FP32, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm1D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_GEMM_1D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -// Verify that even if primitive is missed in custom priority list there is still a fallback to the default priority list -const auto conv_gemm_1D_improperPriorityList = CPUSpecificParams{{ncw}, {ncw}, {"unknown"}, "jit_gemm"}; - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_GEMM_FP32_ImproperPriorityList, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm1D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_1D})), - ::testing::Values(emptyFusingSpec), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_GEMM_BF16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm1D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_1D})), // todo: [AV] what about conv_gemm_1D_nspc? - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_GEMM_I8, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm1D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_GEMM_1D)), - ::testing::Values(fusingSum), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -/* ============= Convolution (Gemm 2D) ============= */ -const auto convParams_ExplicitPadding_GEMM_2D = ::testing::Combine( - ::testing::ValuesIn(kernels2d), - ::testing::ValuesIn(strides2d), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::ValuesIn(dilations2d), - ::testing::ValuesIn(numOutChannels_Gemm), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const auto convParams_ExplicitPadding_GEMM_2D_dilated = ::testing::Combine( - ::testing::ValuesIn(kernels2d), - ::testing::ValuesIn(strides2d), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::Values(SizeVector{2, 2}), - ::testing::ValuesIn(numOutChannels_Gemm), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const std::vector CPUParams_GEMM_2D = { - conv_gemm_2D, - conv_gemm_2D_nspc -}; - -std::vector inShapesGemm2D = { - {{}, {{ 2, 12, 7, 7 }}}, - { - //dynamic shape - { {1, 200}, 12, -1, {1, 200} }, - { //target static shapes - { 2, 12, 7, 7 }, - { 1, 12, 5, 5 } - } - } -}; - -std::vector inShapesGemm2D_cache = { - {{}, {{ 2, 12, 7, 7 }}}, - { - //dynamic shape - { {1, 200}, 12, -1, {1, 200} }, - { //target static shapes - { 1, 12, 5, 5 }, - { 1, 12, 7, 7 }, - { 1, 12, 5, 5 } - } - } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_GEMM_FP32, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm2D_cache), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_GEMM_2D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_GEMM_BF16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm2D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_GEMM_2D)), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_GEMM_I8, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm2D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_GEMM_2D)), - ::testing::Values(fusingSum), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_2D_GEMM_FP32_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_2D_dilated, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm2D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_GEMM_2D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_2D_GEMM_BF16_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_2D_dilated, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm2D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_GEMM_2D)), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_2D_GEMM_I8_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_2D_dilated, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm2D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_GEMM_2D)), - ::testing::Values(fusingSum), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -/* ============= Convolution (GEMM 3D) ============= */ -const auto convParams_ExplicitPadding_GEMM_3D = ::testing::Combine( - ::testing::ValuesIn(kernels3d), - ::testing::ValuesIn(strides3d), - ::testing::ValuesIn(padBegins3d), - ::testing::ValuesIn(padEnds3d), - ::testing::ValuesIn(dilations3d), - ::testing::ValuesIn(numOutChannels_Gemm), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const auto convParams_ExplicitPadding_GEMM_3D_dilated = ::testing::Combine( - ::testing::ValuesIn(kernels3d), - ::testing::ValuesIn(strides3d), - ::testing::ValuesIn(padBegins3d), - ::testing::ValuesIn(padEnds3d), - ::testing::Values(SizeVector{2, 2, 2}), - ::testing::ValuesIn(numOutChannels_Gemm), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const std::vector CPUParams_GEMM_3D = { - conv_gemm_3D, - conv_gemm_3D_nspc -}; - -std::vector inShapesGemm3D = { - {{}, {{ 2, 12, 7, 7, 7 }}}, - { - //dynamic shape - { {1, 200}, 12, -1, {1, 200}, -1 }, - { //target static shapes - { 2, 12, 7, 7, 7 }, - { 1, 12, 5, 5, 5 } - } - } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_GEMM_FP32, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_3D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm3D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_GEMM_3D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_GEMM_BF16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_3D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm3D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_GEMM_3D)), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_GEMM_I8, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_3D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm3D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_GEMM_3D)), - ::testing::Values(fusingSum), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_3D_GEMM_FP32_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_3D_dilated, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm3D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_GEMM_3D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_3D_GEMM_BF16_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_3D_dilated, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm3D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_GEMM_3D)), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_3D_GEMM_I8_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_3D_dilated, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm3D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_GEMM_3D)), - ::testing::Values(fusingSum), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -/* ============= Convolution (1D) ============= */ -const auto convParams_ExplicitPadding_1D = ::testing::Combine( - ::testing::ValuesIn(kernels1d), - ::testing::ValuesIn(strides1d), - ::testing::ValuesIn(padBegins1d), - ::testing::ValuesIn(padEnds1d), - ::testing::ValuesIn(dilations1d), - ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const std::vector CPUParams_1D = { - conv_sse42_1D, - conv_avx2_1D, - conv_avx512_1D, - conv_sse42_1D_nspc, - conv_avx2_1D_nspc, - conv_avx2_1D_nspc_brgconv, - conv_avx512_1D_nspc, - conv_avx512_1D_nspc_brgconv -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_FP32, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes1d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_1D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_BF16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes1d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_1D, - conv_avx512_1D_nspc_brgconv, conv_avx512_1D_nspc_brgconv_amx})), // todo: [AV] what about conv_avx512_1D_nspc? - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_I8, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes1d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_1D)), - ::testing::Values(fusingSum), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -const std::vector CPUParams_1D_plain_to_blocked = { - conv_sse42_plain_to_blocked_1D, - conv_avx2_plain_to_blocked_1D, - conv_avx512_plain_to_blocked_1D, -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_PlainToBlocked_FP32, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesPlain2Blocked1d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_1D_plain_to_blocked)), - ::testing::Values(emptyFusingSpec), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_PlainToBlocked_BF16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesPlain2Blocked1d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_plain_to_blocked_1D})), - ::testing::Values(emptyFusingSpec), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -/* ============= Convolution (2D) ============= */ -const auto convParams_ExplicitPadding_2D = ::testing::Combine( - ::testing::ValuesIn(kernels2d), - ::testing::ValuesIn(strides2d), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::ValuesIn(dilations2d), - ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const auto convParams_ExplicitPadding_2D_dilated = ::testing::Combine( - ::testing::ValuesIn(kernels2d), - ::testing::ValuesIn(strides2d), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::Values(SizeVector{2, 2}), - ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const std::vector CPUParams_2D = { - conv_sse42_2D, - conv_avx2_2D, - conv_avx512_2D, - conv_sse42_2D_nspc, - conv_avx2_2D_nspc, - conv_avx2_2D_nspc_brgconv, - conv_avx512_2D_nspc, - conv_avx512_2D_nspc_brgconv -}; - -std::vector inputShapes2d_cache = { - {{}, {{ 1, 64, 7, 7 }}}, - {{}, {{ 1, 67, 7, 7 }}}, - { - //dynamic shape - { -1, 64, -1, {1, 200} }, - { //target static shapes - { 1, 64, 7, 7 }, - { 1, 64, 9, 9 }, - { 1, 64, 7, 7 } - } - }, - { - //dynamic shape - { -1, 67, -1, {1, 200} }, - { //target static shapes - { 1, 67, 7, 7 }, - { 1, 67, 9, 9} - } - } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_FP32, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d_cache), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_2D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -std::vector inputShapes2d_dynBatch = { - { - //dynamic shape - { {1, 10}, 64, 7, 7 }, - { //target static shapes - { 2, 64, 7, 7 }, - { 1, 64, 7, 7 } - } - }, -}; - -const std::vector fusingParamsSet_dynBatch{ - emptyFusingSpec, - fusingReluScaleShift, - fusingSum, - fusingAddPerChannel -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_FP32_dynBatch, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d_dynBatch), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_2D)), - ::testing::ValuesIn(fusingParamsSet_dynBatch), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_BF16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_2D, conv_avx512_2D_nspc, - conv_avx512_2D_nspc_brgconv, conv_avx512_2D_nspc_brgconv_amx})), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_I8, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_2D)), - ::testing::Values(fusingSum), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_2D_FP32_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D_dilated, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_2D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_2D_BF16_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D_dilated, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_2D, conv_avx512_2D_nspc, - conv_avx512_2D_nspc_brgconv, conv_avx512_2D_nspc_brgconv_amx})), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_2D_I8_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D_dilated, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_2D)), - ::testing::Values(fusingSum), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -const std::vector CPUParams_2D_plain_to_blocked = { - conv_sse42_plain_to_blocked_2D, - conv_avx2_plain_to_blocked_2D, - conv_avx512_plain_to_blocked_2D, -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_PlainToBlocked_FP32, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesPlain2Blocked2d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_2D_plain_to_blocked)), - ::testing::Values(emptyFusingSpec), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_PlainToBlocked_BF16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesPlain2Blocked2d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_plain_to_blocked_2D})), - ::testing::Values(emptyFusingSpec), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_PlainToBlocked_2D_FP32_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D_dilated, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesPlain2Blocked2d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_2D_plain_to_blocked)), - ::testing::Values(emptyFusingSpec), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_PlainToBlocked_2D_BF16_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D_dilated, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesPlain2Blocked2d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_plain_to_blocked_2D})), - ::testing::Values(emptyFusingSpec), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -/* ============= Reorder + Convolution ============= */ -const auto convParams_Reorder_2D = ::testing::Combine( - ::testing::Values(SizeVector{1, 1}), - ::testing::Values(SizeVector{2, 2}), - ::testing::Values(std::vector{0, 0}), - ::testing::Values(std::vector{0, 0}), - ::testing::Values(SizeVector{1, 1}), - ::testing::Values(64), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -std::vector inputShapes_Reorder_2D = { - { - // dynamic shape - { -1, 32, -1, -1 }, - // target static shapes - { - { 1, 32, 39, 40 }, - { 2, 32, 20, 20 }, - { 1, 32, 39, 40 }, - { 2, 32, 20, 20 } - } - } -}; - -const std::vector fusingParamsSet_reorder{ - emptyFusingSpec, - fusingReluScaleShift, - fusingAddPerChannel -}; - -INSTANTIATE_TEST_SUITE_P(smoke_reorder_Conv_2D, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_Reorder_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_Reorder_2D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D_1x1})), - ::testing::ValuesIn(fusingParamsSet_reorder), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -/* ============= Convolution (3D) ============= */ -const auto convParams_ExplicitPadding_3D = ::testing::Combine( - ::testing::ValuesIn(kernels3d), - ::testing::ValuesIn(strides3d), - ::testing::ValuesIn(padBegins3d), - ::testing::ValuesIn(padEnds3d), - ::testing::ValuesIn(dilations3d), - ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const auto convParams_ExplicitPadding_3D_dilated = ::testing::Combine( - ::testing::ValuesIn(kernels3d), - ::testing::ValuesIn(strides3d), - ::testing::ValuesIn(padBegins3d), - ::testing::ValuesIn(padEnds3d), - ::testing::Values(SizeVector{2, 2, 2}), - ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const std::vector CPUParams_3D = { - //conv_sse42_3D, // not supported jit_sse42 for 3d - conv_avx2_3D, - conv_avx512_3D, - conv_avx2_3D_nspc, - conv_avx2_3D_nspc_brgconv, - conv_avx512_3D_nspc, - conv_avx512_3D_nspc_brgconv -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_FP32, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_3D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes3d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_3D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_FP32_fusingScaleShiftAndFakeQuantizePerChannel, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_3D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes3d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_3D)), - ::testing::Values(fusingScaleShiftAndFakeQuantizePerChannel), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_BF16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_3D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes3d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_3D, conv_avx512_3D_nspc, - conv_avx512_3D_nspc_brgconv, conv_avx512_3D_nspc_brgconv_amx})), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_I8, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_3D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes3d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_3D)), - ::testing::Values(fusingSum), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_3D_FP32_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_3D_dilated, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes3d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_3D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_3D_BF16_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_3D_dilated, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes3d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_3D, conv_avx512_3D_nspc, - conv_avx512_3D_nspc_brgconv, conv_avx512_3D_nspc_brgconv_amx})), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_3D_I8_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_3D_dilated, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes3d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_3D)), - ::testing::Values(fusingSum), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -const std::vector CPUParams_3D_plain_to_blocked = { - conv_avx2_plain_to_blocked_3D, - conv_avx512_plain_to_blocked_3D, -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_PlainToBlocked_FP32, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_3D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesPlain2Blocked3d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_3D_plain_to_blocked)), - ::testing::Values(emptyFusingSpec), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_PlainToBlocked_BF16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_3D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesPlain2Blocked3d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_plain_to_blocked_3D})), - ::testing::Values(emptyFusingSpec), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_PlainToBlocked_3D_FP32_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_3D_dilated, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesPlain2Blocked3d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_3D_plain_to_blocked)), - ::testing::Values(emptyFusingSpec), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_PlainToBlocked_3D_BF16_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_3D_dilated, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesPlain2Blocked3d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_plain_to_blocked_3D})), - ::testing::Values(emptyFusingSpec), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -/* ============= Kernel_1x1 (1D) ============= */ - -const auto convParams_ExplicitPadding_1x1_1D = ::testing::Combine( - ::testing::Values(SizeVector({1})), - ::testing::Values(SizeVector({1})), - ::testing::Values(std::vector({0})), - ::testing::Values(std::vector({0})), - ::testing::Values(SizeVector({1})), - ::testing::Values(63), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const std::vector CPUParams_1x1_1D = { - conv_sse42_1D_1x1, - conv_avx2_1D_1x1, - conv_avx512_1D_1x1, - conv_sse42_1D_1x1_nspc, - conv_avx2_1D_1x1_nspc, - conv_avx2_1D_1x1_nspc_brgconv, - conv_avx512_1D_1x1_nspc, - conv_avx512_1D_1x1_nspc_brgconv -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_1x1_FP32, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1x1_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes1d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_1x1_1D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_1x1_BF16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1x1_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes1d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_1D_1x1, conv_avx512_2D_1x1_nspc, - conv_avx512_1D_1x1_nspc_brgconv, conv_avx512_1D_1x1_nspc_brgconv_amx})), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_1x1_I8, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1x1_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes1d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_1x1_1D)), - ::testing::Values(fusingSum), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -/* ============= Kernel_1x1 (2D) ============= */ - -const auto convParams_ExplicitPadding_1x1_2D = ::testing::Combine( - ::testing::Values(SizeVector({1, 1})), - ::testing::Values(SizeVector({1, 1})), - ::testing::Values(std::vector({0, 0})), - ::testing::Values(std::vector({0, 0})), - ::testing::Values(SizeVector({1, 1})), - ::testing::Values(63), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const std::vector CPUParams_1x1_2D = { - conv_sse42_2D_1x1, - conv_avx2_2D_1x1, - conv_avx512_2D_1x1, - conv_sse42_2D_1x1_nspc, - conv_avx2_2D_1x1_nspc, - conv_avx2_2D_1x1_nspc_brgconv, - conv_avx512_2D_1x1_nspc, - conv_avx512_2D_1x1_nspc_brgconv -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_1x1_FP32, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1x1_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_1x1_2D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_1x1_BF16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1x1_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_2D_1x1, conv_avx512_2D_1x1_nspc, - conv_avx512_2D_1x1_nspc_brgconv, conv_avx512_2D_1x1_nspc_brgconv_amx})), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_1x1_I8, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1x1_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_1x1_2D)), - ::testing::Values(fusingSum), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -/* ============= Jit Planar ============= */ - -/* ============= Convolution planar params (2D) ============= */ -const std::vector CPUParams_Jit_Planar_2D = { - // sse42 is not supported - conv_avx2_planar_2D, - conv_avx512_planar_2D, -}; - -const auto convParams_Planar_ExplicitPadding_2D = ::testing::Combine( - ::testing::ValuesIn(kernels2d), - ::testing::Values(SizeVector{1, 1}), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::ValuesIn(dilations2d), - ::testing::Values(1), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const auto convParams_Planar_ExplicitPadding_2D_dilated = ::testing::Combine( - ::testing::ValuesIn(kernels2d), - ::testing::Values(SizeVector{1, 1}), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::Values(SizeVector{2, 2}), - ::testing::Values(1), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_Jit_Planar_FP32, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_Planar_ExplicitPadding_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Jit_Planar_2D)), - ::testing::Values(emptyFusingSpec, fusingRelu), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_2D_Jit_Planar_FP32_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_Planar_ExplicitPadding_2D_dilated, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Jit_Planar_2D)), - ::testing::Values(emptyFusingSpec, fusingRelu), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -/* ============= Convolution planar params (3D) ============= */ -const std::vector CPUParams_Jit_Planar_3D = { - // sse42 is not supported - conv_avx2_planar_3D, - conv_avx512_planar_3D, -}; - -const auto convParams_Planar_ExplicitPadding_3D = ::testing::Combine( - ::testing::ValuesIn(kernels3d), - ::testing::Values(SizeVector{1, 1, 1}), - ::testing::ValuesIn(padBegins3d), - ::testing::ValuesIn(padEnds3d), - ::testing::ValuesIn(dilations3d), - ::testing::Values(1), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const auto convParams_Planar_ExplicitPadding_3D_dilated = ::testing::Combine( - ::testing::ValuesIn(kernels3d), - ::testing::Values(SizeVector{1, 1, 1}), - ::testing::ValuesIn(padBegins3d), - ::testing::ValuesIn(padEnds3d), - ::testing::Values(SizeVector{2, 2, 2}), - ::testing::Values(1), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_Jit_Planar_FP32, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_Planar_ExplicitPadding_3D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes3d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Jit_Planar_3D)), - ::testing::Values(emptyFusingSpec, fusingRelu), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_3D_Jit_Planar_FP32_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_Planar_ExplicitPadding_3D_dilated, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes3d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Jit_Planar_3D)), - ::testing::Values(emptyFusingSpec, fusingRelu), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -/* ============= Convolution auto padding tests ============= */ - -const auto convParams_AutoPadding_2D = ::testing::Combine( - ::testing::Values(kernels2d.front()), - ::testing::ValuesIn(strides2d), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::ValuesIn(dilations2d), - ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::SAME_UPPER, ngraph::op::PadType::SAME_LOWER) -); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_AutoPad_FP32, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_AutoPadding_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_2D)), - ::testing::Values(emptyFusingSpec), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -/* ============= */ - -} // namespace - -/* ============= Large Filter Test ============= */ -namespace { - -const size_t outChannels = 80; - -const SizeVector kernel = { 251 }; -const SizeVector stride = { 10 }; -const std::vector padBegins = { 0 }; -const std::vector padEnds = { 0 }; -const SizeVector dilations = { 1 }; - -const auto convParams_1D = ::testing::Combine( - ::testing::Values(kernel), - ::testing::Values(stride), - ::testing::Values(padBegins), - ::testing::Values(padEnds), - ::testing::Values(dilations), - ::testing::Values(outChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -std::vector inShapes = { - {{}, {{ 1, 1, 600 }}}, - { - //dynamic shape - { -1, 1, -1 }, - { //target static shapes - { 1, 1, 600 }, - { 10, 1, 700 }, - { 1, 1, 600 } - } - } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_Large_Filter, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapes), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{}, {}, {}, CPUTestsBase::any_type}), - ::testing::Values(emptyFusingSpec), - ::testing::Values(cpuEmptyPluginConfig)), - ConvolutionLayerCPUTest::getTestCaseName); - -} // namespace -} // namespace CPULayerTestsDefinitions diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/convolution_backprop_data.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/convolution_backprop_data.cpp index 265ab3da1bdd64..b1737e65469191 100755 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/convolution_backprop_data.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/convolution_backprop_data.cpp @@ -2,39 +2,34 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include +#include "shared_test_classes/single_op/convolution_backprop_data.hpp" +#include "common_test_utils/node_builders/convolution_backprop_data.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "cpu_shape.h" -#include "ov_models/builders.hpp" #include "openvino/core/preprocess/pre_post_process.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/convolution_params.hpp" #include "test_utils/cpu_test_utils.hpp" #include "test_utils/filter_cpu_info.hpp" #include "test_utils/fusing_test_utils.hpp" -#include "common_test_utils/node_builders/convolution_backprop_data.hpp" using namespace CPUTestUtils; -using namespace ov::test; +namespace ov { +namespace test { -namespace CPULayerTestsDefinitions { +using DeconvSpecParams = ov::test::convBackpropDataSpecificParams; -using DeconvSpecParams = LayerTestsDefinitions::convBackpropDataSpecificParams; +using DeconvInputData = std::tuple>>; // values for 'output_shape' -using DeconvInputData = std::tuple>>; // values for 'output_shape' - -using DeconvLayerCPUTestParamsSet = std::tuple>; +using DeconvLayerCPUTestParamsSet = + std::tuple; class DeconvolutionLayerCPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CpuTestWithFusing { + virtual public SubgraphBaseTest, + public CpuTestWithFusing { public: static std::string getTestCaseName(testing::TestParamInfo obj) { DeconvSpecParams basicParamsSet; @@ -42,17 +37,17 @@ class DeconvolutionLayerCPUTest : public testing::WithParamInterface additionalConfig; + ov::AnyMap additionalConfig; std::tie(basicParamsSet, inputData, prec, fusingParams, cpuParams, additionalConfig) = obj.param; - ngraph::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; + ov::op::PadType padType; + std::vector kernel, stride, dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType, outPadding) = basicParamsSet; InputShape inputShape; - ngraph::helpers::InputLayerType outShapeType; + ov::test::utils::InputLayerType outShapeType; std::vector> outShapeData; std::tie(inputShape, outShapeType, outShapeData) = inputData; @@ -88,20 +83,20 @@ class DeconvolutionLayerCPUTest : public testing::WithParamInterface kernel, stride; void SetUp() override { rel_threshold = 1e-4f; @@ -187,11 +209,11 @@ class DeconvolutionLayerCPUTest : public testing::WithParamInterface additionalConfig; + ov::AnyMap additionalConfig; std::tie(basicParamsSet, inputData, prec, fusingParams, cpuParams, additionalConfig) = this->GetParam(); InputShape inputShape; - ngraph::helpers::InputLayerType outShapeType; + ov::test::utils::InputLayerType outShapeType; std::tie(inputShape, outShapeType, outShapeData) = inputData; configuration.insert(additionalConfig.begin(), additionalConfig.end()); @@ -200,7 +222,8 @@ class DeconvolutionLayerCPUTest : public testing::WithParamInterface paramsShapes; paramsShapes.push_back(inputShape); - if (!outShapeData.empty() && outShapeType == ngraph::helpers::InputLayerType::PARAMETER) { + if (!outShapeData.empty() && outShapeType == ov::test::utils::InputLayerType::PARAMETER) { const auto outShapeDims = ov::Shape{outShapeData.front().size()}; - paramsShapes.push_back(InputShape{outShapeDims, std::vector(inputShape.second.size(), outShapeDims)}); + paramsShapes.push_back( + InputShape{outShapeDims, std::vector(inputShape.second.size(), outShapeDims)}); } init_input_shapes(paramsShapes); @@ -223,8 +247,8 @@ class DeconvolutionLayerCPUTest : public testing::WithParamInterface dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels; std::vector> outShapeData; @@ -239,7 +263,9 @@ TEST_P(DeconvolutionLayerCPUTest, CompareWithRefs) { if (stride.size() > 2) isSupportedParams &= stride[stride.size() - 3] <= kernel[kernel.size() - 3]; if (!isSupportedParams) { - GTEST_SKIP() << "Fusing with strides more than kernel size was disabled, because oneDNN deconvolution doesn't support it" << std::endl; + GTEST_SKIP() << "Fusing with strides more than kernel size was disabled, because oneDNN deconvolution " + "doesn't support it" + << std::endl; } } @@ -250,228 +276,187 @@ TEST_P(DeconvolutionLayerCPUTest, CompareWithRefs) { namespace { /* COMMON PARAMS */ -const std::vector fusingParamsSet{ - emptyFusingSpec, - fusingScaleShift -}; +const std::vector fusingParamsSet{emptyFusingSpec, fusingScaleShift}; -const std::map cpuEmptyPluginConfig; -const std::mapcpuBF16PluginConfig = { { InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, - InferenceEngine::PluginConfigParams::YES } }; -const std::vector> emptyOutputPadding = { {} }; +const ov::AnyMap cpuEmptyPluginConfig; +const ov::AnyMap cpuBF16PluginConfig = { + {InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::YES}}; +const std::vector> emptyOutputPadding = {{}}; /* ============= Deconvolution params (planar layout) ============= */ -const InferenceEngine::SizeVector numOutChannels_Planar = { 6 }; +const std::vector numOutChannels_Planar = {6}; /* ============= Deconvolution params (blocked layout) ============= */ -const InferenceEngine::SizeVector numOutChannels_Blocked = { 64 }; +const std::vector numOutChannels_Blocked = {64}; /* ============= Deconvolution params (2D) ============= */ -const std::vector kernels2d = { {3, 3}, {1, 1} }; -const std::vector strides2d = { {1, 1}, {2, 2} }; -const std::vector> padBegins2d = { {0, 0} }; -const std::vector> padEnds2d = { {0, 0} }; -const std::vector dilations2d = { {1, 1} }; +const std::vector> kernels2d = {{3, 3}, {1, 1}}; +const std::vector> strides2d = {{1, 1}, {2, 2}}; +const std::vector> padBegins2d = {{0, 0}}; +const std::vector> padEnds2d = {{0, 0}}; +const std::vector> dilations2d = {{1, 1}}; - -const std::vector deconvAmxKernels2d = { {3, 3}, {2, 2}}; -const std::vector deconvAmxStrides2d = { {2, 2}}; +const std::vector> deconvAmxKernels2d = {{3, 3}, {2, 2}}; +const std::vector> deconvAmxStrides2d = {{2, 2}}; /* ============= Deconvolution params (3D) ============= */ -const std::vector kernels3d = { {3, 3, 3}, {1, 1, 1} }; -const std::vector strides3d = { {1, 1, 1}, {2, 2, 2} }; -const std::vector> padBegins3d = { {0, 0, 0} }; -const std::vector> padEnds3d = { {0, 0, 0} }; -const std::vector dilations3d = { {1, 1, 1} }; +const std::vector> kernels3d = {{3, 3, 3}, {1, 1, 1}}; +const std::vector> strides3d = {{1, 1, 1}, {2, 2, 2}}; +const std::vector> padBegins3d = {{0, 0, 0}}; +const std::vector> padEnds3d = {{0, 0, 0}}; +const std::vector> dilations3d = {{1, 1, 1}}; -const std::vector deconvAmxKernels3d = { {3, 3, 3}, {2, 2, 2} }; -const std::vector deconvAmxStrides3d = { {2, 2, 2} }; +const std::vector> deconvAmxKernels3d = {{3, 3, 3}, {2, 2, 2}}; +const std::vector> deconvAmxStrides3d = {{2, 2, 2}}; /* ============= */ /* INSTANCES */ /* ============= Deconvolution (Planar 2D) ============= */ -const auto convParams_ExplicitPadding_Planar_2D = ::testing::Combine( - ::testing::ValuesIn(kernels2d), - ::testing::ValuesIn(strides2d), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::ValuesIn(dilations2d), - ::testing::ValuesIn(numOutChannels_Planar), - ::testing::Values(ngraph::op::PadType::EXPLICIT), - ::testing::ValuesIn(emptyOutputPadding) -); +const auto convParams_ExplicitPadding_Planar_2D = ::testing::Combine(::testing::ValuesIn(kernels2d), + ::testing::ValuesIn(strides2d), + ::testing::ValuesIn(padBegins2d), + ::testing::ValuesIn(padEnds2d), + ::testing::ValuesIn(dilations2d), + ::testing::ValuesIn(numOutChannels_Planar), + ::testing::Values(ov::op::PadType::EXPLICIT), + ::testing::ValuesIn(emptyOutputPadding)); const std::vector Planar_2D_inputs_smoke = { - DeconvInputData{ - InputShape{{}, {{ 2, 12, 7, 7 }}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 12, -1, -1}, {{ 1, 12, 7, 7}, { 2, 12, 5, 7}, { 1, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::PARAMETER, - {{15, 15}, {9, 10}, {15, 15}} - } -}; + DeconvInputData{InputShape{{}, {{2, 12, 7, 7}}}, ov::test::utils::InputLayerType::CONSTANT, {}}, + DeconvInputData{InputShape{{-1, 12, -1, -1}, {{1, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 7, 7}}}, + ov::test::utils::InputLayerType::PARAMETER, + {{15, 15}, {9, 10}, {15, 15}}}}; const std::vector Planar_2D_inputs_nightly = { - DeconvInputData{ - InputShape{{-1, 12, -1, -1}, {{ 2, 12, 7, 7}, { 2, 12, 5, 7}, { 1, 12, 9, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 12, 7, 7}, {{ 1, 12, 7, 7}, { 2, 12, 7, 7}, { 1, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {{15, 15}} - }, - DeconvInputData{ - InputShape{{{1, 10}, 12, 7, 7}, {{ 1, 12, 7, 7}, { 2, 12, 7, 7}, { 3, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {{15, 15}} - }, + DeconvInputData{InputShape{{-1, 12, -1, -1}, {{2, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 9, 4}}}, + ov::test::utils::InputLayerType::CONSTANT, + {}}, + DeconvInputData{InputShape{{-1, 12, 7, 7}, {{1, 12, 7, 7}, {2, 12, 7, 7}, {1, 12, 7, 7}}}, + ov::test::utils::InputLayerType::CONSTANT, + {{15, 15}}}, + DeconvInputData{InputShape{{{1, 10}, 12, 7, 7}, {{1, 12, 7, 7}, {2, 12, 7, 7}, {3, 12, 7, 7}}}, + ov::test::utils::InputLayerType::CONSTANT, + {{15, 15}}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_Planar_FP32, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_Planar_2D, - ::testing::ValuesIn(Planar_2D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_2D})), - ::testing::Values(cpuEmptyPluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_Planar_BF16, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_Planar_2D, - ::testing::ValuesIn(Planar_2D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_2D})), - ::testing::Values(cpuBF16PluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_Deconv_2D_Planar_FP32, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_Planar_2D, - ::testing::ValuesIn(Planar_2D_inputs_nightly), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_2D})), - ::testing::Values(cpuEmptyPluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_Deconv_2D_Planar_BF16, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_Planar_2D, - ::testing::ValuesIn(Planar_2D_inputs_nightly), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_2D})), - ::testing::Values(cpuBF16PluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_Planar_FP32, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_Planar_2D, + ::testing::ValuesIn(Planar_2D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_2D})), + ::testing::Values(cpuEmptyPluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_Planar_BF16, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_Planar_2D, + ::testing::ValuesIn(Planar_2D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_2D})), + ::testing::Values(cpuBF16PluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_Deconv_2D_Planar_FP32, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_Planar_2D, + ::testing::ValuesIn(Planar_2D_inputs_nightly), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_2D})), + ::testing::Values(cpuEmptyPluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_Deconv_2D_Planar_BF16, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_Planar_2D, + ::testing::ValuesIn(Planar_2D_inputs_nightly), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_2D})), + ::testing::Values(cpuBF16PluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); /* ============= Deconvolution (Planar 3D) ============= */ const std::vector Planar_3D_inputs_smoke = { - DeconvInputData{ - InputShape{{}, {{ 2, 12, 7, 7, 7 }}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 12, -1, -1, -1}, {{ 2, 12, 7, 7, 7}, { 2, 12, 5, 7, 7}, { 1, 12, 9, 4, 9}}}, - ngraph::helpers::InputLayerType::PARAMETER, - {{15, 15, 15}, {9, 10, 10}, {9, 9, 9}} - } -}; + DeconvInputData{InputShape{{}, {{2, 12, 7, 7, 7}}}, ov::test::utils::InputLayerType::CONSTANT, {}}, + DeconvInputData{InputShape{{-1, 12, -1, -1, -1}, {{2, 12, 7, 7, 7}, {2, 12, 5, 7, 7}, {1, 12, 9, 4, 9}}}, + ov::test::utils::InputLayerType::PARAMETER, + {{15, 15, 15}, {9, 10, 10}, {9, 9, 9}}}}; const std::vector Planar_3D_inputs_nightly = { DeconvInputData{ // -1 will result deconv use 64 to infer output shape, for 3d output shape is too big for gemm bwd kernel // to buffer the intermedia results - InputShape{{-1, 12, {5, 9}, {4, 7}, {7, 9}}, {{ 2, 12, 7, 7, 7}, { 2, 12, 5, 7, 7}, { 1, 12, 9, 4, 9}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, + InputShape{{-1, 12, {5, 9}, {4, 7}, {7, 9}}, {{2, 12, 7, 7, 7}, {2, 12, 5, 7, 7}, {1, 12, 9, 4, 9}}}, + ov::test::utils::InputLayerType::CONSTANT, + {}}, DeconvInputData{ - InputShape{{-1, 12, -1, -1, -1}, {{ 2, 12, 7, 7, 7}, { 2, 12, 5, 7, 7}, { 1, 12, 9, 4, 9}, { 2, 12, 7, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {{10, 16, 16}} - }, - DeconvInputData{ - InputShape{{{1, 10}, 12, 7, 7, 7}, {{ 2, 12, 7, 7, 7}, { 1, 12, 7, 7, 7}, { 3, 12, 7, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {{15, 15, 15}} - } -}; - -const auto convParams_ExplicitPadding_Planar_3D = ::testing::Combine( - ::testing::ValuesIn(kernels3d), - ::testing::ValuesIn(strides3d), - ::testing::ValuesIn(padBegins3d), - ::testing::ValuesIn(padEnds3d), - ::testing::ValuesIn(dilations3d), - ::testing::ValuesIn(numOutChannels_Planar), - ::testing::Values(ngraph::op::PadType::EXPLICIT), - ::testing::ValuesIn(emptyOutputPadding) -); - -INSTANTIATE_TEST_SUITE_P(smoke_Deconv_3D_Planar_FP32, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_Planar_3D, - ::testing::ValuesIn(Planar_3D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_3D})), - ::testing::Values(cpuEmptyPluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Deconv_3D_Planar_BF16, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_Planar_3D, - ::testing::ValuesIn(Planar_3D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_3D})), - ::testing::Values(cpuBF16PluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_Deconv_3D_Planar_FP32, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_Planar_3D, - ::testing::ValuesIn(Planar_3D_inputs_nightly), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_3D})), - ::testing::Values(cpuEmptyPluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_Deconv_3D_Planar_BF16, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_Planar_3D, - ::testing::ValuesIn(Planar_3D_inputs_nightly), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_3D})), - ::testing::Values(cpuBF16PluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); + InputShape{{-1, 12, -1, -1, -1}, {{2, 12, 7, 7, 7}, {2, 12, 5, 7, 7}, {1, 12, 9, 4, 9}, {2, 12, 7, 7, 7}}}, + ov::test::utils::InputLayerType::CONSTANT, + {{10, 16, 16}}}, + DeconvInputData{InputShape{{{1, 10}, 12, 7, 7, 7}, {{2, 12, 7, 7, 7}, {1, 12, 7, 7, 7}, {3, 12, 7, 7, 7}}}, + ov::test::utils::InputLayerType::CONSTANT, + {{15, 15, 15}}}}; + +const auto convParams_ExplicitPadding_Planar_3D = ::testing::Combine(::testing::ValuesIn(kernels3d), + ::testing::ValuesIn(strides3d), + ::testing::ValuesIn(padBegins3d), + ::testing::ValuesIn(padEnds3d), + ::testing::ValuesIn(dilations3d), + ::testing::ValuesIn(numOutChannels_Planar), + ::testing::Values(ov::op::PadType::EXPLICIT), + ::testing::ValuesIn(emptyOutputPadding)); + +INSTANTIATE_TEST_SUITE_P(smoke_Deconv_3D_Planar_FP32, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_Planar_3D, + ::testing::ValuesIn(Planar_3D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_3D})), + ::testing::Values(cpuEmptyPluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Deconv_3D_Planar_BF16, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_Planar_3D, + ::testing::ValuesIn(Planar_3D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_3D})), + ::testing::Values(cpuBF16PluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_Deconv_3D_Planar_FP32, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_Planar_3D, + ::testing::ValuesIn(Planar_3D_inputs_nightly), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_3D})), + ::testing::Values(cpuEmptyPluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_Deconv_3D_Planar_BF16, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_Planar_3D, + ::testing::ValuesIn(Planar_3D_inputs_nightly), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_3D})), + ::testing::Values(cpuBF16PluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); /* ============= Deconvolution (Blocked 2D) ============= */ const std::vector Blocked_2D_inputs_smoke = { - DeconvInputData{ - InputShape{{}, {{ 2, 67, 7, 7 }}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 67, -1, -1}, {{ 2, 67, 7, 7}, { 2, 67, 5, 7}, { 1, 67, 9, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, - {{15, 15}, {9, 10}, {9, 9}} - } -}; - + DeconvInputData{InputShape{{}, {{2, 67, 7, 7}}}, ov::test::utils::InputLayerType::CONSTANT, {}}, + DeconvInputData{InputShape{{-1, 67, -1, -1}, {{2, 67, 7, 7}, {2, 67, 5, 7}, {1, 67, 9, 4}}}, + ov::test::utils::InputLayerType::PARAMETER, + {{15, 15}, {9, 10}, {9, 9}}}}; const auto convParams_ExplicitPadding_Blocked_2D_nightly = ::testing::Combine( ::testing::ValuesIn(kernels2d), @@ -482,397 +467,352 @@ const auto convParams_ExplicitPadding_Blocked_2D_nightly = ::testing::Combine( ::testing::ValuesIn(padEnds2d), ::testing::ValuesIn(dilations2d), ::testing::ValuesIn(numOutChannels_Blocked), - ::testing::Values(ngraph::op::PadType::EXPLICIT), - ::testing::ValuesIn(emptyOutputPadding) -); + ::testing::Values(ov::op::PadType::EXPLICIT), + ::testing::ValuesIn(emptyOutputPadding)); const std::vector Blocked_2D_inputs_nightly = { - DeconvInputData{ - InputShape{{-1, 67, -1, -1}, {{ 2, 67, 7, 7}, { 2, 67, 5, 7}, { 1, 67, 9, 4}, { 2, 67, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 67, -1, -1}, {{ 2, 67, 7, 7}, { 2, 67, 5, 7}, { 1, 67, 9, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {{15, 15}} - }, - DeconvInputData{ - InputShape{{ {1, 10}, 67, 7, 7}, {{ 2, 67, 7, 7}, { 3, 67, 7, 7}, { 1, 67, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {{15, 15}} - } -}; - -const auto convParams_ExplicitPadding_Blocked_2D = ::testing::Combine( - ::testing::ValuesIn(kernels2d), - ::testing::ValuesIn(strides2d), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::ValuesIn(dilations2d), - ::testing::ValuesIn(numOutChannels_Blocked), - ::testing::Values(ngraph::op::PadType::EXPLICIT), - ::testing::ValuesIn(emptyOutputPadding) -); - -const auto convParams_ExplicitPadding_AMX_2D = ::testing::Combine( - ::testing::ValuesIn(deconvAmxKernels2d), - ::testing::ValuesIn(deconvAmxStrides2d), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::ValuesIn(dilations2d), - ::testing::ValuesIn(numOutChannels_Blocked), - ::testing::Values(ngraph::op::PadType::EXPLICIT), - ::testing::ValuesIn(emptyOutputPadding) -); - -INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_Blocked_FP32, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_Blocked_2D, - ::testing::ValuesIn(Blocked_2D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D, conv_avx2_2D})), - ::testing::Values(cpuEmptyPluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_Blocked_BF16, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_Blocked_2D, - ::testing::ValuesIn(Blocked_2D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D})), - ::testing::Values(cpuBF16PluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_NSPC_BF16_AMX_NO_FUSING, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_AMX_2D, - ::testing::ValuesIn(Blocked_2D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn({emptyFusingSpec}), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D_nspc_amx})), - ::testing::Values(cpuBF16PluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_NSPC_INT8_AMX, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_AMX_2D, - ::testing::ValuesIn(Blocked_2D_inputs_smoke), - ::testing::Values(ElementType::i8), - ::testing::ValuesIn({emptyFusingSpec, fusingClampRoundAddRelu}), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D_nspc_amx})), - ::testing::Values(cpuEmptyPluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_Deconv_2D_Blocked_FP32, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_Blocked_2D_nightly, - ::testing::ValuesIn(Blocked_2D_inputs_nightly), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D, conv_avx2_2D})), - ::testing::Values(cpuEmptyPluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_Deconv_2D_Blocked_BF16, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_Blocked_2D_nightly, - ::testing::ValuesIn(Blocked_2D_inputs_nightly), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D})), - ::testing::Values(cpuBF16PluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); + DeconvInputData{InputShape{{-1, 67, -1, -1}, {{2, 67, 7, 7}, {2, 67, 5, 7}, {1, 67, 9, 4}, {2, 67, 7, 7}}}, + ov::test::utils::InputLayerType::CONSTANT, + {}}, + DeconvInputData{InputShape{{-1, 67, -1, -1}, {{2, 67, 7, 7}, {2, 67, 5, 7}, {1, 67, 9, 4}}}, + ov::test::utils::InputLayerType::CONSTANT, + {{15, 15}}}, + DeconvInputData{InputShape{{{1, 10}, 67, 7, 7}, {{2, 67, 7, 7}, {3, 67, 7, 7}, {1, 67, 7, 7}}}, + ov::test::utils::InputLayerType::CONSTANT, + {{15, 15}}}}; + +const auto convParams_ExplicitPadding_Blocked_2D = ::testing::Combine(::testing::ValuesIn(kernels2d), + ::testing::ValuesIn(strides2d), + ::testing::ValuesIn(padBegins2d), + ::testing::ValuesIn(padEnds2d), + ::testing::ValuesIn(dilations2d), + ::testing::ValuesIn(numOutChannels_Blocked), + ::testing::Values(ov::op::PadType::EXPLICIT), + ::testing::ValuesIn(emptyOutputPadding)); + +const auto convParams_ExplicitPadding_AMX_2D = ::testing::Combine(::testing::ValuesIn(deconvAmxKernels2d), + ::testing::ValuesIn(deconvAmxStrides2d), + ::testing::ValuesIn(padBegins2d), + ::testing::ValuesIn(padEnds2d), + ::testing::ValuesIn(dilations2d), + ::testing::ValuesIn(numOutChannels_Blocked), + ::testing::Values(ov::op::PadType::EXPLICIT), + ::testing::ValuesIn(emptyOutputPadding)); + +INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_Blocked_FP32, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_Blocked_2D, + ::testing::ValuesIn(Blocked_2D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D, conv_avx2_2D})), + ::testing::Values(cpuEmptyPluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_Blocked_BF16, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_Blocked_2D, + ::testing::ValuesIn(Blocked_2D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D})), + ::testing::Values(cpuBF16PluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_NSPC_BF16_AMX_NO_FUSING, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_AMX_2D, + ::testing::ValuesIn(Blocked_2D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn({emptyFusingSpec}), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D_nspc_amx})), + ::testing::Values(cpuBF16PluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_NSPC_INT8_AMX, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_AMX_2D, + ::testing::ValuesIn(Blocked_2D_inputs_smoke), + ::testing::Values(ElementType::i8), + ::testing::ValuesIn({emptyFusingSpec, fusingClampRoundAddRelu}), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D_nspc_amx})), + ::testing::Values(cpuEmptyPluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_Deconv_2D_Blocked_FP32, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_Blocked_2D_nightly, + ::testing::ValuesIn(Blocked_2D_inputs_nightly), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D, conv_avx2_2D})), + ::testing::Values(cpuEmptyPluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_Deconv_2D_Blocked_BF16, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_Blocked_2D_nightly, + ::testing::ValuesIn(Blocked_2D_inputs_nightly), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D})), + ::testing::Values(cpuBF16PluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); /* ============= Deconvolution (Blocked 3D) ============= */ const std::vector Blocked_3D_inputs_smoke = { - DeconvInputData{ - InputShape{{}, {{ 2, 35, 7, 7, 7 }}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 35, -1, -1, -1}, {{ 1, 35, 5, 5, 5}, { 2, 35, 5, 7, 5}}}, - ngraph::helpers::InputLayerType::PARAMETER, - {{7, 7, 7}, {7, 9, 7}} - } -}; - -const auto convParams_ExplicitPadding_Blocked_3D_nightly = ::testing::Combine( - ::testing::ValuesIn(kernels3d), - ::testing::ValuesIn({strides3d[0]}), - ::testing::ValuesIn(padBegins3d), - ::testing::ValuesIn(padEnds3d), - ::testing::ValuesIn(dilations3d), - ::testing::Values(32), - ::testing::Values(ngraph::op::PadType::EXPLICIT), - ::testing::ValuesIn(emptyOutputPadding) -); + DeconvInputData{InputShape{{}, {{2, 35, 7, 7, 7}}}, ov::test::utils::InputLayerType::CONSTANT, {}}, + DeconvInputData{InputShape{{-1, 35, -1, -1, -1}, {{1, 35, 5, 5, 5}, {2, 35, 5, 7, 5}}}, + ov::test::utils::InputLayerType::PARAMETER, + {{7, 7, 7}, {7, 9, 7}}}}; + +const auto convParams_ExplicitPadding_Blocked_3D_nightly = + ::testing::Combine(::testing::ValuesIn(kernels3d), + ::testing::ValuesIn({strides3d[0]}), + ::testing::ValuesIn(padBegins3d), + ::testing::ValuesIn(padEnds3d), + ::testing::ValuesIn(dilations3d), + ::testing::Values(32), + ::testing::Values(ov::op::PadType::EXPLICIT), + ::testing::ValuesIn(emptyOutputPadding)); const std::vector Blocked_3D_inputs_nightly = { - DeconvInputData{ - InputShape{{-1, 35, -1, -1, -1}, {{ 1, 35, 5, 5, 5}, { 2, 35, 5, 7, 5}, { 1, 35, 5, 5, 5}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 35, -1, -1, -1}, {{ 1, 35, 5, 5, 5}, { 2, 35, 5, 7, 5}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {{7, 7, 7}} - }, - DeconvInputData{ - InputShape{{{1, 10}, 35, 5, 5, 5}, {{ 1, 35, 5, 5, 5}, { 2, 35, 5, 5, 5}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {{7, 7, 7}} - } -}; - -const auto convParams_ExplicitPadding_Blocked_3D = ::testing::Combine( - ::testing::ValuesIn(kernels3d), - ::testing::ValuesIn(strides3d), - ::testing::ValuesIn(padBegins3d), - ::testing::ValuesIn(padEnds3d), - ::testing::ValuesIn(dilations3d), - ::testing::Values(32), - ::testing::Values(ngraph::op::PadType::EXPLICIT), - ::testing::ValuesIn(emptyOutputPadding) -); - -const auto convParams_ExplicitPadding_AMX_3D = ::testing::Combine( - ::testing::ValuesIn(deconvAmxKernels3d), - ::testing::ValuesIn(deconvAmxStrides3d), - ::testing::ValuesIn(padBegins3d), - ::testing::ValuesIn(padEnds3d), - ::testing::ValuesIn(dilations3d), - ::testing::Values(32), - ::testing::Values(ngraph::op::PadType::EXPLICIT), - ::testing::ValuesIn(emptyOutputPadding) -); - -INSTANTIATE_TEST_SUITE_P(smoke_Deconv_3D_Blocked_FP32, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_Blocked_3D, - ::testing::ValuesIn(Blocked_3D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D})), - ::testing::Values(cpuEmptyPluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Deconv_3D_Blocked_BF16, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_Blocked_3D, - ::testing::ValuesIn(Blocked_3D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D})), - ::testing::Values(cpuBF16PluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); - - -INSTANTIATE_TEST_SUITE_P(smoke_Deconv_3D_NSPC_BF16_AMX_NO_FUSING, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_AMX_3D, - ::testing::ValuesIn(Blocked_3D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn({emptyFusingSpec}), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D_nspc_amx})), - ::testing::Values(cpuBF16PluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Deconv_3D_NSPC_INT8_AMX, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_AMX_3D, - ::testing::ValuesIn(Blocked_3D_inputs_smoke), - ::testing::Values(ElementType::i8), - ::testing::ValuesIn({emptyFusingSpec, fusingClampRoundAddRelu}), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D_nspc_amx})), - ::testing::Values(cpuEmptyPluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_Deconv_3D_Blocked_FP32, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_Blocked_3D_nightly, - ::testing::ValuesIn(Blocked_3D_inputs_nightly), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D})), - ::testing::Values(cpuEmptyPluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_Deconv_3D_Blocked_BF16, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_Blocked_3D_nightly, - ::testing::ValuesIn(Blocked_3D_inputs_nightly), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D})), - ::testing::Values(cpuBF16PluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); + DeconvInputData{InputShape{{-1, 35, -1, -1, -1}, {{1, 35, 5, 5, 5}, {2, 35, 5, 7, 5}, {1, 35, 5, 5, 5}}}, + ov::test::utils::InputLayerType::CONSTANT, + {}}, + DeconvInputData{InputShape{{-1, 35, -1, -1, -1}, {{1, 35, 5, 5, 5}, {2, 35, 5, 7, 5}}}, + ov::test::utils::InputLayerType::CONSTANT, + {{7, 7, 7}}}, + DeconvInputData{InputShape{{{1, 10}, 35, 5, 5, 5}, {{1, 35, 5, 5, 5}, {2, 35, 5, 5, 5}}}, + ov::test::utils::InputLayerType::CONSTANT, + {{7, 7, 7}}}}; + +const auto convParams_ExplicitPadding_Blocked_3D = ::testing::Combine(::testing::ValuesIn(kernels3d), + ::testing::ValuesIn(strides3d), + ::testing::ValuesIn(padBegins3d), + ::testing::ValuesIn(padEnds3d), + ::testing::ValuesIn(dilations3d), + ::testing::Values(32), + ::testing::Values(ov::op::PadType::EXPLICIT), + ::testing::ValuesIn(emptyOutputPadding)); + +const auto convParams_ExplicitPadding_AMX_3D = ::testing::Combine(::testing::ValuesIn(deconvAmxKernels3d), + ::testing::ValuesIn(deconvAmxStrides3d), + ::testing::ValuesIn(padBegins3d), + ::testing::ValuesIn(padEnds3d), + ::testing::ValuesIn(dilations3d), + ::testing::Values(32), + ::testing::Values(ov::op::PadType::EXPLICIT), + ::testing::ValuesIn(emptyOutputPadding)); + +INSTANTIATE_TEST_SUITE_P(smoke_Deconv_3D_Blocked_FP32, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_Blocked_3D, + ::testing::ValuesIn(Blocked_3D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D})), + ::testing::Values(cpuEmptyPluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Deconv_3D_Blocked_BF16, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_Blocked_3D, + ::testing::ValuesIn(Blocked_3D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D})), + ::testing::Values(cpuBF16PluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Deconv_3D_NSPC_BF16_AMX_NO_FUSING, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_AMX_3D, + ::testing::ValuesIn(Blocked_3D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn({emptyFusingSpec}), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D_nspc_amx})), + ::testing::Values(cpuBF16PluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Deconv_3D_NSPC_INT8_AMX, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_AMX_3D, + ::testing::ValuesIn(Blocked_3D_inputs_smoke), + ::testing::Values(ElementType::i8), + ::testing::ValuesIn({emptyFusingSpec, fusingClampRoundAddRelu}), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D_nspc_amx})), + ::testing::Values(cpuEmptyPluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_Deconv_3D_Blocked_FP32, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_Blocked_3D_nightly, + ::testing::ValuesIn(Blocked_3D_inputs_nightly), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D})), + ::testing::Values(cpuEmptyPluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_Deconv_3D_Blocked_BF16, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_Blocked_3D_nightly, + ::testing::ValuesIn(Blocked_3D_inputs_nightly), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D})), + ::testing::Values(cpuBF16PluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); /* ============= Kernel_1x1 (2D) ============= */ -const auto convParams_ExplicitPadding_1x1_2D = ::testing::Combine( - ::testing::Values(InferenceEngine::SizeVector({1, 1})), - ::testing::Values(InferenceEngine::SizeVector({1, 1})), - ::testing::Values(std::vector({0, 0})), - ::testing::Values(std::vector({0, 0})), - ::testing::Values(InferenceEngine::SizeVector({1, 1})), - ::testing::ValuesIn(numOutChannels_Blocked), - ::testing::Values(ngraph::op::PadType::EXPLICIT), - ::testing::ValuesIn(emptyOutputPadding) -); - -INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_1x1_FP32, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_1x1_2D, - ::testing::ValuesIn(Blocked_2D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D_1x1, conv_avx2_2D_1x1})), - ::testing::Values(cpuEmptyPluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_1x1_BF16, DeconvolutionLayerCPUTest, - ::testing::Combine( - convParams_ExplicitPadding_1x1_2D, - ::testing::ValuesIn(Blocked_2D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D_1x1, conv_avx2_2D_1x1})), - ::testing::Values(cpuBF16PluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); +const auto convParams_ExplicitPadding_1x1_2D = ::testing::Combine(::testing::Values(std::vector({1, 1})), + ::testing::Values(std::vector({1, 1})), + ::testing::Values(std::vector({0, 0})), + ::testing::Values(std::vector({0, 0})), + ::testing::Values(std::vector({1, 1})), + ::testing::ValuesIn(numOutChannels_Blocked), + ::testing::Values(ov::op::PadType::EXPLICIT), + ::testing::ValuesIn(emptyOutputPadding)); + +INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_1x1_FP32, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_1x1_2D, + ::testing::ValuesIn(Blocked_2D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D_1x1, + conv_avx2_2D_1x1})), + ::testing::Values(cpuEmptyPluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_1x1_BF16, + DeconvolutionLayerCPUTest, + ::testing::Combine(convParams_ExplicitPadding_1x1_2D, + ::testing::ValuesIn(Blocked_2D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D_1x1, + conv_avx2_2D_1x1})), + ::testing::Values(cpuBF16PluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); /* ============= Reorder + Deconvolution ============= */ -INSTANTIATE_TEST_SUITE_P(smoke_reorder_Deconv_2D, DeconvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine(::testing::ValuesIn(kernels2d), - ::testing::Values(InferenceEngine::SizeVector{1, 1}), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::ValuesIn(dilations2d), - ::testing::ValuesIn(numOutChannels_Blocked), - ::testing::Values(ngraph::op::PadType::EXPLICIT), - ::testing::ValuesIn(emptyOutputPadding)), - ::testing::Values(DeconvInputData{InputShape{{-1, 67, -1, -1}, {{ 1, 67, 7, 7}, { 1, 67, 9, 4}, { 1, 67, 5, 7}, { 1, 67, 7, 7}, { 1, 67, 9, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, - {{15, 15}, {9, 9}, {9, 10}, {15, 15}, {9, 9}}}), - ::testing::Values(ElementType::f32), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D})), - ::testing::Values(cpuEmptyPluginConfig)), +INSTANTIATE_TEST_SUITE_P( + smoke_reorder_Deconv_2D, + DeconvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(kernels2d), + ::testing::Values(std::vector{1, 1}), + ::testing::ValuesIn(padBegins2d), + ::testing::ValuesIn(padEnds2d), + ::testing::ValuesIn(dilations2d), + ::testing::ValuesIn(numOutChannels_Blocked), + ::testing::Values(ov::op::PadType::EXPLICIT), + ::testing::ValuesIn(emptyOutputPadding)), + ::testing::Values(DeconvInputData{ + InputShape{{-1, 67, -1, -1}, + {{1, 67, 7, 7}, {1, 67, 9, 4}, {1, 67, 5, 7}, {1, 67, 7, 7}, {1, 67, 9, 4}}}, + ov::test::utils::InputLayerType::PARAMETER, + {{15, 15}, {9, 9}, {9, 10}, {15, 15}, {9, 9}}}), + ::testing::Values(ElementType::f32), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D})), + ::testing::Values(cpuEmptyPluginConfig)), DeconvolutionLayerCPUTest::getTestCaseName); /* ============= Deconvolution auto padding tests ============= */ const std::vector inputs_2D_AutoPadding = { - DeconvInputData{ - InputShape{{}, {{ 2, 67, 7, 7 }}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 67, -1, -1}, {{ 1, 67, 9, 4}, { 2, 67, 5, 7}, { 1, 67, 9, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 67, -1, -1}, {{ 2, 67, 7, 7}, { 2, 67, 5, 7}, { 1, 67, 9, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {{15, 15}} - }, - DeconvInputData{ - InputShape{{-1, 67, -1, -1}, {{ 1, 67, 9, 4}, { 2, 67, 5, 7}, { 1, 67, 9, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, - {{9, 9}, {9, 10}, {9, 9}} - } -}; - -const auto deconvParams_AutoPadding_2D = ::testing::Combine( - ::testing::ValuesIn(kernels2d), - ::testing::ValuesIn(strides2d), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::ValuesIn(dilations2d), - ::testing::ValuesIn(numOutChannels_Blocked), - ::testing::Values(ngraph::op::PadType::SAME_UPPER, ngraph::op::PadType::SAME_LOWER), - ::testing::ValuesIn(emptyOutputPadding) -); - -INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_AutoPadding_FP32, DeconvolutionLayerCPUTest, - ::testing::Combine( - deconvParams_AutoPadding_2D, - ::testing::ValuesIn(inputs_2D_AutoPadding), - ::testing::Values(ElementType::f32), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_2D, conv_avx512_2D})), - ::testing::Values(cpuEmptyPluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); + DeconvInputData{InputShape{{}, {{2, 67, 7, 7}}}, ov::test::utils::InputLayerType::CONSTANT, {}}, + DeconvInputData{InputShape{{-1, 67, -1, -1}, {{1, 67, 9, 4}, {2, 67, 5, 7}, {1, 67, 9, 4}}}, + ov::test::utils::InputLayerType::CONSTANT, + {}}, + DeconvInputData{InputShape{{-1, 67, -1, -1}, {{2, 67, 7, 7}, {2, 67, 5, 7}, {1, 67, 9, 4}}}, + ov::test::utils::InputLayerType::CONSTANT, + {{15, 15}}}, + DeconvInputData{InputShape{{-1, 67, -1, -1}, {{1, 67, 9, 4}, {2, 67, 5, 7}, {1, 67, 9, 4}}}, + ov::test::utils::InputLayerType::PARAMETER, + {{9, 9}, {9, 10}, {9, 9}}}}; + +const auto deconvParams_AutoPadding_2D = + ::testing::Combine(::testing::ValuesIn(kernels2d), + ::testing::ValuesIn(strides2d), + ::testing::ValuesIn(padBegins2d), + ::testing::ValuesIn(padEnds2d), + ::testing::ValuesIn(dilations2d), + ::testing::ValuesIn(numOutChannels_Blocked), + ::testing::Values(ov::op::PadType::SAME_UPPER, ov::op::PadType::SAME_LOWER), + ::testing::ValuesIn(emptyOutputPadding)); + +INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_AutoPadding_FP32, + DeconvolutionLayerCPUTest, + ::testing::Combine(deconvParams_AutoPadding_2D, + ::testing::ValuesIn(inputs_2D_AutoPadding), + ::testing::Values(ElementType::f32), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_2D, conv_avx512_2D})), + ::testing::Values(cpuEmptyPluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); const std::vector inputs_3D_AutoPadding = { - DeconvInputData{ - InputShape{{-1, 2, 4, {32, 64}, {32, 64}}, {{1, 2, 4, 32, 32}, {1, 2, 4, 40, 40}}}, - ngraph::helpers::InputLayerType::PARAMETER, - {{8, 64, 64}, {8, 80, 80}} - }, - DeconvInputData{ - InputShape{ - {1, 64, 5, {1, std::numeric_limits::max()}, {1, std::numeric_limits::max()}}, - {{1, 64, 5, 8, 8}} - }, - ngraph::helpers::InputLayerType::CONSTANT, - {{10, 16, 16}} - }, -}; - -const auto deconvParams_AutoPadding_3D = ::testing::Combine( - ::testing::Values(kernels3d[0]), - ::testing::Values(strides3d[1]), - ::testing::ValuesIn(padBegins3d), - ::testing::ValuesIn(padEnds3d), - ::testing::ValuesIn(dilations3d), - ::testing::Values(1), - ::testing::Values(ngraph::op::PadType::SAME_UPPER, ngraph::op::PadType::SAME_LOWER), - ::testing::ValuesIn(emptyOutputPadding) -); - -INSTANTIATE_TEST_SUITE_P(smoke_Deconv_3D_AutoPadding_FP32, DeconvolutionLayerCPUTest, - ::testing::Combine( - deconvParams_AutoPadding_3D, - ::testing::ValuesIn(inputs_3D_AutoPadding), - ::testing::Values(ElementType::f32), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_3D, conv_avx512_3D})), - ::testing::Values(cpuEmptyPluginConfig)), - DeconvolutionLayerCPUTest::getTestCaseName); - -const auto deconvParams_AutoPadding_2D_AMX = ::testing::Combine( - ::testing::ValuesIn(deconvAmxKernels2d), - ::testing::ValuesIn(deconvAmxStrides2d), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::ValuesIn(dilations2d), - ::testing::Values(256), - ::testing::Values(ngraph::op::PadType::SAME_UPPER, ngraph::op::PadType::SAME_LOWER), - ::testing::ValuesIn(emptyOutputPadding) -); - -const DeconvInputData inputs_2D_AutoPadding_AMX = { - InputShape{{-1, 512, -1, -1}, {{ 1, 512, 32, 51}, { 1, 512, 68, 101}}}, - ngraph::helpers::InputLayerType::PARAMETER, - {{64, 101}, {135, 202}} + DeconvInputData{InputShape{{-1, 2, 4, {32, 64}, {32, 64}}, {{1, 2, 4, 32, 32}, {1, 2, 4, 40, 40}}}, + ov::test::utils::InputLayerType::PARAMETER, + {{8, 64, 64}, {8, 80, 80}}}, + DeconvInputData{InputShape{{1, + 64, + 5, + {1, std::numeric_limits::max()}, + {1, std::numeric_limits::max()}}, + {{1, 64, 5, 8, 8}}}, + ov::test::utils::InputLayerType::CONSTANT, + {{10, 16, 16}}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_AutoPadding_AMX_BF16, DeconvolutionLayerCPUTest, - ::testing::Combine( - deconvParams_AutoPadding_2D_AMX, - ::testing::Values(inputs_2D_AutoPadding_AMX), - ::testing::Values(ElementType::f32), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D_nspc_brgconv_amx})), - ::testing::Values(cpuBF16PluginConfig)), +const auto deconvParams_AutoPadding_3D = + ::testing::Combine(::testing::Values(kernels3d[0]), + ::testing::Values(strides3d[1]), + ::testing::ValuesIn(padBegins3d), + ::testing::ValuesIn(padEnds3d), + ::testing::ValuesIn(dilations3d), + ::testing::Values(1), + ::testing::Values(ov::op::PadType::SAME_UPPER, ov::op::PadType::SAME_LOWER), + ::testing::ValuesIn(emptyOutputPadding)); + +INSTANTIATE_TEST_SUITE_P(smoke_Deconv_3D_AutoPadding_FP32, + DeconvolutionLayerCPUTest, + ::testing::Combine(deconvParams_AutoPadding_3D, + ::testing::ValuesIn(inputs_3D_AutoPadding), + ::testing::Values(ElementType::f32), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_3D, conv_avx512_3D})), + ::testing::Values(cpuEmptyPluginConfig)), + DeconvolutionLayerCPUTest::getTestCaseName); + +const auto deconvParams_AutoPadding_2D_AMX = + ::testing::Combine(::testing::ValuesIn(deconvAmxKernels2d), + ::testing::ValuesIn(deconvAmxStrides2d), + ::testing::ValuesIn(padBegins2d), + ::testing::ValuesIn(padEnds2d), + ::testing::ValuesIn(dilations2d), + ::testing::Values(256), + ::testing::Values(ov::op::PadType::SAME_UPPER, ov::op::PadType::SAME_LOWER), + ::testing::ValuesIn(emptyOutputPadding)); + +const DeconvInputData inputs_2D_AutoPadding_AMX = {InputShape{{-1, 512, -1, -1}, {{1, 512, 32, 51}, {1, 512, 68, 101}}}, + ov::test::utils::InputLayerType::PARAMETER, + {{64, 101}, {135, 202}}}; + +INSTANTIATE_TEST_SUITE_P( + smoke_Deconv_2D_AutoPadding_AMX_BF16, + DeconvolutionLayerCPUTest, + ::testing::Combine(deconvParams_AutoPadding_2D_AMX, + ::testing::Values(inputs_2D_AutoPadding_AMX), + ::testing::Values(ElementType::f32), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D_nspc_brgconv_amx})), + ::testing::Values(cpuBF16PluginConfig)), DeconvolutionLayerCPUTest::getTestCaseName); -} // namespace +} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_greedy_decoder.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_greedy_decoder.cpp index c75d192c1e98e6..bbb4c9b05ac6b0 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_greedy_decoder.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_greedy_decoder.cpp @@ -92,14 +92,13 @@ class CTCGreedyDecoderLayerCPUTest : public testing::WithParamInterface using namespace CPUTestUtils; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { - -typedef std::tuple< - std::vector, // Input shapes - std::tuple, // Axis and Batch dim - ElementType, // Network precision - bool, // Is const Axis - CPUSpecificParams, // CPU specific params - std::map // Additional config -> GatherLayerTestCPUParams; +namespace ov { +namespace test { + +typedef std::tuple, // Input shapes + std::tuple, // Axis and Batch dim + ElementType, // Network precision + bool, // Is const Axis + CPUSpecificParams, // CPU specific params + ov::AnyMap // Additional config + > + GatherLayerTestCPUParams; class GatherLayerTestCPU : public testing::WithParamInterface, - virtual public ov::test::SubgraphBaseTest, public CPUTestsBase { + virtual public ov::test::SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { std::vector inputShapes; @@ -30,20 +29,22 @@ class GatherLayerTestCPU : public testing::WithParamInterface additionalConfig; + ov::AnyMap additionalConfig; std::tie(inputShapes, axisAndBatchDims, netPrecision, isAxisConstant, cpuParams, additionalConfig) = obj.param; std::ostringstream result; result << "IS=("; for (size_t i = 0lu; i < inputShapes.size(); i++) { - result << ov::test::utils::partialShape2str({inputShapes[i].first}) << (i < inputShapes.size() - 1lu ? "_" : ""); + result << ov::test::utils::partialShape2str({inputShapes[i].first}) + << (i < inputShapes.size() - 1lu ? "_" : ""); } result << ")_TS="; for (size_t i = 0lu; i < inputShapes.front().second.size(); i++) { result << "{"; for (size_t j = 0lu; j < inputShapes.size(); j++) { - result << ov::test::utils::vec2str(inputShapes[j].second[i]) << (j < inputShapes.size() - 1lu ? "_" : ""); + result << ov::test::utils::vec2str(inputShapes[j].second[i]) + << (j < inputShapes.size() - 1lu ? "_" : ""); } result << "}_"; } @@ -55,9 +56,9 @@ class GatherLayerTestCPU : public testing::WithParamInterfaceset_friendly_name("indices"); if (!isAxisConstant) { @@ -107,8 +107,11 @@ class GatherLayerTestCPU : public testing::WithParamInterface gatherNode; if (isAxisConstant) { - gatherNode = std::make_shared(params[0], params[1], - ov::op::v0::Constant::create(intInputsPrecision, ov::Shape({1}), { axis }), batchDims); + gatherNode = std::make_shared( + params[0], + params[1], + ov::op::v0::Constant::create(intInputsPrecision, ov::Shape({1}), {axis}), + batchDims); } else { gatherNode = std::make_shared(params[0], params[1], params[2], batchDims); } @@ -126,17 +129,21 @@ class GatherLayerTestCPU : public testing::WithParamInterfaceget_friendly_name() == "data") { const auto dataTypeSize = funcInput.get_element_type().size(); - const uint32_t range = dataTypeSize == 4 ? 0x7FFFFFFF : dataTypeSize == 2 ? 0xFFFF : 0xFF; - tensor = ov::test::utils::create_and_fill_tensor( - funcInput.get_element_type(), targetInputStaticShapes[0], range, 0, 1); + in_data.start_from = 0; + in_data.range = dataTypeSize == 4 ? 0x7FFFFFFF : dataTypeSize == 2 ? 0xFFFF : 0xFF; + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[0], in_data); } else if (funcInput.get_node()->get_friendly_name() == "indices") { - tensor = ov::test::utils::create_and_fill_tensor( - funcInput.get_element_type(), targetInputStaticShapes[1], axisDim * 2, -axisDim, 1); + in_data.start_from = -axisDim; + in_data.range = axisDim * 2; + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[1], in_data); } else if (funcInput.get_node()->get_friendly_name() == "axis") { - tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), {1}, 1, axis, 1); + in_data.start_from = axis; + in_data.range = 1; + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), {1}, in_data); } inputs.insert({funcInput.get_node_shared_ptr(), tensor}); } @@ -145,16 +152,17 @@ class GatherLayerTestCPU : public testing::WithParamInterface, // Indices - int, // Axis - ElementType, // Network precision - CPUSpecificParams // CPU specific params -> GatherInPlaceLayerTestCPUParams; +typedef std::tuple, // Indices + int, // Axis + ElementType, // Network precision + CPUSpecificParams // CPU specific params + > + GatherInPlaceLayerTestCPUParams; class GatherInPlaceLayerTestCPU : public testing::WithParamInterface, - virtual public ov::test::SubgraphBaseTest, public CPUTestsBase { + virtual public ov::test::SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { InputShape inputShapes; @@ -172,7 +180,8 @@ class GatherInPlaceLayerTestCPU : public testing::WithParamInterfaceGetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; targetDevice = ov::test::utils::DEVICE_CPU; - init_input_shapes({ inputShapes }); + init_input_shapes({inputShapes}); selectedType = makeSelectedTypeStr(selectedType, netPrecision); - ngraph::ParameterVector params { - std::make_shared(netPrecision, inputDynamicShapes[0]) - }; + ov::ParameterVector params{std::make_shared(netPrecision, inputDynamicShapes[0])}; params[0]->set_friendly_name("data"); - std::shared_ptr gatherNode = std::make_shared(params[0], + std::shared_ptr gatherNode = std::make_shared( + params[0], ov::op::v0::Constant::create(intInputsPrecision, ov::Shape({indices.size()}), indices), - ov::op::v0::Constant::create(intInputsPrecision, ov::Shape({1}), { axis }), batchDims); + ov::op::v0::Constant::create(intInputsPrecision, ov::Shape({1}), {axis}), + batchDims); function = makeNgraphFunction(netPrecision, params, gatherNode, "GatherCPU"); } @@ -223,24 +232,19 @@ TEST_P(GatherInPlaceLayerTestCPU, CompareWithRefs) { } namespace { -const std::vector netPrecisions = { - ElementType::f32, - ElementType::bf16, - ElementType::i8 -}; +const std::vector netPrecisions = {ElementType::f32, ElementType::bf16, ElementType::i8}; -std::vector> additionalConfig - = {{{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::YES}}}; +std::vector additionalConfig = {{{ov::hint::inference_precision(ov::element::f32)}}, + {{ov::hint::inference_precision(ov::element::bf16)}}}; std::vector isAxisConst{true, false}; const CPUSpecificParams cpuParamsRef{{}, {}, {"ref_any"}, "ref_any"}; std::vector getCPUInfo() { std::vector resCPUParams; - if (InferenceEngine::with_cpu_x86_avx512f()) { + if (ov::with_cpu_x86_avx512f()) { resCPUParams.push_back(CPUSpecificParams{{}, {}, {"jit_avx512"}, "jit_avx512"}); - } else if (InferenceEngine::with_cpu_x86_avx2()) { + } else if (ov::with_cpu_x86_avx2()) { resCPUParams.push_back(CPUSpecificParams{{}, {}, {"jit_avx2"}, "jit_avx2"}); } else { resCPUParams.push_back(CPUSpecificParams{{}, {}, {"ref"}, "ref"}); @@ -250,191 +254,159 @@ std::vector getCPUInfo() { ///// 1D ///// const std::vector> staticInputShapes1D = { - { { {}, { {1} } }, { {}, { {1} } } }, - { { {}, { {2} } }, { {}, { {2} } } }, - { { {}, { {3} } }, { {}, { {3} } } }, - { { {}, { {4} } }, { {}, { {4} } } }, - { { {}, { {5} } }, { {}, { {5} } } }, - { { {}, { {6} } }, { {}, { {6} } } }, - { { {}, { {7} } }, { {}, { {7} } } }, - { { {}, { {8} } }, { {}, { {8} } } }, - { { {}, { {9} } }, { {}, { {9} } } }, - { { {}, { {11} } }, { {}, { {11} } } }, - { { {}, { {13} } }, { {}, { {13} } } }, - { { {}, { {15} } }, { {}, { {15} } } }, - { { {}, { {16} } }, { {}, { {16} } } }, - { { {}, { {17} } }, { {}, { {17} } } }, - { { {}, { {19} } }, { {}, { {19} } } }, - { { {}, { {23} } }, { {}, { {23} } } }, - { { {}, { {24} } }, { {}, { {24} } } }, - { { {}, { {32} } }, { {}, { {32} } } }, - { { {}, { {33} } }, { {}, { {33} } } }, - { { {}, { {37} } }, { {}, { {37} } } }, - { { {}, { {41} } }, { {}, { {41} } } }, - { { {}, { {48} } }, { {}, { {48} } } }, - { { {}, { {51} } }, { {}, { {51} } } }, - { { {}, { {63} } }, { {}, { {63} } } }, - { { {}, { {64} } }, { {}, { {64} } } }, - { { {}, { {65} } }, { {}, { {65} } } } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_static_1D, GatherLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(staticInputShapes1D), - ::testing::Values(std::tuple{0, 0}), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(true), - ::testing::ValuesIn(getCPUInfo()), - ::testing::Values(additionalConfig[0])), - GatherLayerTestCPU::getTestCaseName); + {{{}, {{1}}}, {{}, {{1}}}}, {{{}, {{2}}}, {{}, {{2}}}}, {{{}, {{3}}}, {{}, {{3}}}}, + {{{}, {{4}}}, {{}, {{4}}}}, {{{}, {{5}}}, {{}, {{5}}}}, {{{}, {{6}}}, {{}, {{6}}}}, + {{{}, {{7}}}, {{}, {{7}}}}, {{{}, {{8}}}, {{}, {{8}}}}, {{{}, {{9}}}, {{}, {{9}}}}, + {{{}, {{11}}}, {{}, {{11}}}}, {{{}, {{13}}}, {{}, {{13}}}}, {{{}, {{15}}}, {{}, {{15}}}}, + {{{}, {{16}}}, {{}, {{16}}}}, {{{}, {{17}}}, {{}, {{17}}}}, {{{}, {{19}}}, {{}, {{19}}}}, + {{{}, {{23}}}, {{}, {{23}}}}, {{{}, {{24}}}, {{}, {{24}}}}, {{{}, {{32}}}, {{}, {{32}}}}, + {{{}, {{33}}}, {{}, {{33}}}}, {{{}, {{37}}}, {{}, {{37}}}}, {{{}, {{41}}}, {{}, {{41}}}}, + {{{}, {{48}}}, {{}, {{48}}}}, {{{}, {{51}}}, {{}, {{51}}}}, {{{}, {{63}}}, {{}, {{63}}}}, + {{{}, {{64}}}, {{}, {{64}}}}, {{{}, {{65}}}, {{}, {{65}}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_static_1D, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(staticInputShapes1D), + ::testing::Values(std::tuple{0, 0}), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(true), + ::testing::ValuesIn(getCPUInfo()), + ::testing::Values(additionalConfig[0])), + GatherLayerTestCPU::getTestCaseName); const std::vector> dynamicInputShapes1D = { - { { { ov::Dimension{1, 70} }, // Dynamic shape 0 - { {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}, {11}, {13}, {15}, {16}, {17}, {19}, {23}, {24}, {32}, {55}, {63}, {64}, {65} } }, // Target shapes - { { -1 }, // Dynamic shape 1 - { {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}, {11}, {13}, {15}, {16}, {17}, {19}, {23}, {24}, {32}, {55}, {63}, {64}, {65} } } } // Target shapes + {{{ov::Dimension{1, 70}}, // Dynamic shape 0 + {{1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}, {11}, {13}, + {15}, {16}, {17}, {19}, {23}, {24}, {32}, {55}, {63}, {64}, {65}}}, // Target shapes + {{-1}, // Dynamic shape 1 + {{1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}, {11}, {13}, + {15}, {16}, {17}, {19}, {23}, {24}, {32}, {55}, {63}, {64}, {65}}}} // Target shapes }; -INSTANTIATE_TEST_SUITE_P(smoke_dynamic_1D, GatherLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes1D), - ::testing::Values(std::tuple{0, 0}), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(true, false), - ::testing::ValuesIn(getCPUInfo()), - ::testing::Values(additionalConfig[0])), - GatherLayerTestCPU::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_dynamic_1D, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(dynamicInputShapes1D), + ::testing::Values(std::tuple{0, 0}), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(true, false), + ::testing::ValuesIn(getCPUInfo()), + ::testing::Values(additionalConfig[0])), + GatherLayerTestCPU::getTestCaseName); ///// 4D JIT ///// std::vector> get4DShapesJitStat(int maxBatchDims) { std::vector> result = {}; - if (InferenceEngine::with_cpu_x86_avx2()) { + if (ov::with_cpu_x86_avx2()) { if (maxBatchDims == 2) { - result = { - { { {}, { {18, 2, 2, 1} } }, // Static shapes - { {}, { {18, 2, 8} } } - }, - { { {}, { {17, 2, 2, 2} } }, // Static shapes - { {}, { {17, 2, 7} } } - }, - { { {}, { {16, 2, 2, 3} } }, // Static shapes - { {}, { {16, 2, 6} } } - }, - { { {}, { {15, 2, 2, 4} } }, // Static shapes - { {}, { {15, 2, 5} } } - }, - { { {}, { {14, 2, 2, 5} } }, // Static shapes - { {}, { {14, 2, 4} } } - }, - { { {}, { {13, 2, 2, 6} } }, // Static shapes - { {}, { {13, 2, 3} } } - }, - { { {}, { {12, 2, 2, 7} } }, // Static shapes - { {}, { {12, 2, 2} } } - }, - { { {}, { {11, 2, 2, 8} } }, // Static shapes - { {}, { {11, 2, 1} } } - } - }; + result = {{{{}, {{18, 2, 2, 1}}}, // Static shapes + {{}, {{18, 2, 8}}}}, + {{{}, {{17, 2, 2, 2}}}, // Static shapes + {{}, {{17, 2, 7}}}}, + {{{}, {{16, 2, 2, 3}}}, // Static shapes + {{}, {{16, 2, 6}}}}, + {{{}, {{15, 2, 2, 4}}}, // Static shapes + {{}, {{15, 2, 5}}}}, + {{{}, {{14, 2, 2, 5}}}, // Static shapes + {{}, {{14, 2, 4}}}}, + {{{}, {{13, 2, 2, 6}}}, // Static shapes + {{}, {{13, 2, 3}}}}, + {{{}, {{12, 2, 2, 7}}}, // Static shapes + {{}, {{12, 2, 2}}}}, + {{{}, {{11, 2, 2, 8}}}, // Static shapes + {{}, {{11, 2, 1}}}}}; } else if (maxBatchDims == 3) { - result = { - { { {}, { {18, 2, 8, 1} } }, // Static shapes - { {}, { {18, 2, 8} } } - }, - { { {}, { {17, 2, 7, 2} } }, // Static shapes - { {}, { {17, 2, 7} } } - }, - { { {}, { {16, 2, 6, 3} } }, // Static shapes - { {}, { {16, 2, 6} } } - }, - { { {}, { {15, 2, 5, 4} } }, // Static shapes - { {}, { {15, 2, 5} } } - }, - { { {}, { {14, 2, 4, 5} } }, // Static shapes - { {}, { {14, 2, 4} } } - }, - { { {}, { {13, 2, 3, 6} } }, // Static shapes - { {}, { {13, 2, 3} } } - }, - { { {}, { {12, 2, 2, 7} } }, // Static shapes - { {}, { {12, 2, 2} } } - }, - { { {}, { {11, 2, 1, 8} } }, // Static shapes - { {}, { {11, 2, 1} } } - } - }; + result = {{{{}, {{18, 2, 8, 1}}}, // Static shapes + {{}, {{18, 2, 8}}}}, + {{{}, {{17, 2, 7, 2}}}, // Static shapes + {{}, {{17, 2, 7}}}}, + {{{}, {{16, 2, 6, 3}}}, // Static shapes + {{}, {{16, 2, 6}}}}, + {{{}, {{15, 2, 5, 4}}}, // Static shapes + {{}, {{15, 2, 5}}}}, + {{{}, {{14, 2, 4, 5}}}, // Static shapes + {{}, {{14, 2, 4}}}}, + {{{}, {{13, 2, 3, 6}}}, // Static shapes + {{}, {{13, 2, 3}}}}, + {{{}, {{12, 2, 2, 7}}}, // Static shapes + {{}, {{12, 2, 2}}}}, + {{{}, {{11, 2, 1, 8}}}, // Static shapes + {{}, {{11, 2, 1}}}}}; } else { throw std::invalid_argument("Invalid test case. Not valid batch dims."); } - } // AVX2 - if (InferenceEngine::with_cpu_x86_avx512f()) { + } // AVX2 + if (ov::with_cpu_x86_avx512f()) { std::vector> tmp; if (maxBatchDims == 2) { - tmp = { - { { {}, { {19, 4, 2, 9} } }, // Static shapes - { {}, { {19, 4, 16} } } - }, - { { {}, { {20, 4, 2, 10} } }, // Static shapes - { {}, { {20, 4, 15} } }, - }, - { { {}, { {21, 4, 2, 11} } }, // Static shapes - { {}, { {21, 4, 14} } } - }, - { { {}, { {22, 4, 2, 12} } }, // Static shapes - { {}, { {22, 4, 13} } }, - }, - { { {}, { {23, 4, 2, 13} } }, // Static shapes - { {}, { {23, 4, 12} } }, - }, - { { {}, { {24, 4, 2, 14} } }, // Static shapes - { {}, { {24, 4, 11} } }, - }, - { { {}, { {25, 4, 2, 15} } }, // Static shapes - { {}, { {25, 4, 10} } }, - }, - { { {}, { {26, 4, 2, 16} } }, // Static shapes - { {}, { {26, 4, 9} } }, - } - }; + tmp = {{{{}, {{19, 4, 2, 9}}}, // Static shapes + {{}, {{19, 4, 16}}}}, + { + {{}, {{20, 4, 2, 10}}}, // Static shapes + {{}, {{20, 4, 15}}}, + }, + {{{}, {{21, 4, 2, 11}}}, // Static shapes + {{}, {{21, 4, 14}}}}, + { + {{}, {{22, 4, 2, 12}}}, // Static shapes + {{}, {{22, 4, 13}}}, + }, + { + {{}, {{23, 4, 2, 13}}}, // Static shapes + {{}, {{23, 4, 12}}}, + }, + { + {{}, {{24, 4, 2, 14}}}, // Static shapes + {{}, {{24, 4, 11}}}, + }, + { + {{}, {{25, 4, 2, 15}}}, // Static shapes + {{}, {{25, 4, 10}}}, + }, + { + {{}, {{26, 4, 2, 16}}}, // Static shapes + {{}, {{26, 4, 9}}}, + }}; } else if (maxBatchDims == 3) { - tmp = { - { { {}, { {19, 4, 16, 9} } }, // Static shapes - { {}, { {19, 4, 16} } } - }, - { { {}, { {20, 4, 15, 10} } }, // Static shapes - { {}, { {20, 4, 15} } }, - }, - { { {}, { {21, 4, 14, 11} } }, // Static shapes - { {}, { {21, 4, 14} } } - }, - { { {}, { {22, 4, 13, 12} } }, // Static shapes - { {}, { {22, 4, 13} } }, - }, - { { {}, { {23, 4, 12, 13} } }, // Static shapes - { {}, { {23, 4, 12} } }, - }, - { { {}, { {24, 4, 11, 14} } }, // Static shapes - { {}, { {24, 4, 11} } }, - }, - { { {}, { {25, 4, 10, 15} } }, // Static shapes - { {}, { {25, 4, 10} } }, - }, - { { {}, { {26, 4, 9, 16} } }, // Static shapes - { {}, { {26, 4, 9} } }, - } - }; + tmp = {{{{}, {{19, 4, 16, 9}}}, // Static shapes + {{}, {{19, 4, 16}}}}, + { + {{}, {{20, 4, 15, 10}}}, // Static shapes + {{}, {{20, 4, 15}}}, + }, + {{{}, {{21, 4, 14, 11}}}, // Static shapes + {{}, {{21, 4, 14}}}}, + { + {{}, {{22, 4, 13, 12}}}, // Static shapes + {{}, {{22, 4, 13}}}, + }, + { + {{}, {{23, 4, 12, 13}}}, // Static shapes + {{}, {{23, 4, 12}}}, + }, + { + {{}, {{24, 4, 11, 14}}}, // Static shapes + {{}, {{24, 4, 11}}}, + }, + { + {{}, {{25, 4, 10, 15}}}, // Static shapes + {{}, {{25, 4, 10}}}, + }, + { + {{}, {{26, 4, 9, 16}}}, // Static shapes + {{}, {{26, 4, 9}}}, + }}; } else { throw std::invalid_argument("Invalid test case. Not valid batch dims."); } result.insert(result.end(), tmp.begin(), tmp.end()); - } // AVX5 + } // AVX5 return result; } std::vector> get4DAxisBatchJitStat(ov::element::Type type, int maxBatchDims) { std::vector> result = {}; - if (InferenceEngine::with_cpu_x86_avx512f()) { + if (ov::with_cpu_x86_avx512f()) { if (type.size() == 4 || type.size() == 2 || type.size() == 1) { if (maxBatchDims == 2) return std::vector>{{3, 0}, {3, 1}, {3, 2}, {2, 0}, {2, 1}, {2, 2}}; @@ -443,7 +415,7 @@ std::vector> get4DAxisBatchJitStat(ov::element::Type type, else throw std::invalid_argument("Invalid test case. Not valid batch dims."); } - } else if (InferenceEngine::with_cpu_x86_avx2()) { + } else if (ov::with_cpu_x86_avx2()) { if (type.size() == 4) { if (maxBatchDims == 2) return std::vector>{{3, 0}, {3, 1}, {3, 2}, {2, 0}, {2, 1}, {2, 2}}; @@ -463,152 +435,151 @@ std::vector> get4DAxisBatchJitStat(ov::element::Type type, return {}; } -INSTANTIATE_TEST_SUITE_P(smoke_static_4D_jit32, GatherLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(get4DShapesJitStat(2)), - ::testing::ValuesIn(get4DAxisBatchJitStat(ElementType::f32, 2)), - ::testing::Values(ElementType::f32), - ::testing::Values(true), - ::testing::ValuesIn(getCPUInfo()), - ::testing::ValuesIn(additionalConfig)), - GatherLayerTestCPU::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_static_4D_jit16, GatherLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(get4DShapesJitStat(2)), - ::testing::ValuesIn(get4DAxisBatchJitStat(ElementType::bf16, 2)), - ::testing::Values(ElementType::bf16), - ::testing::Values(true), - ::testing::ValuesIn(getCPUInfo()), - ::testing::Values(additionalConfig[0])), - GatherLayerTestCPU::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_static_4D_jit8, GatherLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(get4DShapesJitStat(2)), - ::testing::ValuesIn(get4DAxisBatchJitStat(ElementType::i8, 2)), - ::testing::Values(ElementType::i8), - ::testing::Values(true), - ::testing::ValuesIn(getCPUInfo()), - ::testing::Values(additionalConfig[0])), - GatherLayerTestCPU::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_static_4D_jit32, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(get4DShapesJitStat(2)), + ::testing::ValuesIn(get4DAxisBatchJitStat(ElementType::f32, 2)), + ::testing::Values(ElementType::f32), + ::testing::Values(true), + ::testing::ValuesIn(getCPUInfo()), + ::testing::ValuesIn(additionalConfig)), + GatherLayerTestCPU::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_static_4D_jit16, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(get4DShapesJitStat(2)), + ::testing::ValuesIn(get4DAxisBatchJitStat(ElementType::bf16, 2)), + ::testing::Values(ElementType::bf16), + ::testing::Values(true), + ::testing::ValuesIn(getCPUInfo()), + ::testing::Values(additionalConfig[0])), + GatherLayerTestCPU::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_static_4D_jit8, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(get4DShapesJitStat(2)), + ::testing::ValuesIn(get4DAxisBatchJitStat(ElementType::i8, 2)), + ::testing::Values(ElementType::i8), + ::testing::Values(true), + ::testing::ValuesIn(getCPUInfo()), + ::testing::Values(additionalConfig[0])), + GatherLayerTestCPU::getTestCaseName); // batchDims == indicesRank -INSTANTIATE_TEST_SUITE_P(smoke_static_4D_jit32_Bmax, GatherLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(get4DShapesJitStat(3)), - ::testing::ValuesIn(get4DAxisBatchJitStat(ElementType::f32, 3)), - ::testing::Values(ElementType::f32), - ::testing::Values(true), - ::testing::ValuesIn(getCPUInfo()), - ::testing::ValuesIn(additionalConfig)), - GatherLayerTestCPU::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_static_4D_jit16_Bmax, GatherLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(get4DShapesJitStat(3)), - ::testing::ValuesIn(get4DAxisBatchJitStat(ElementType::bf16, 3)), - ::testing::Values(ElementType::bf16), - ::testing::Values(true), - ::testing::ValuesIn(getCPUInfo()), - ::testing::Values(additionalConfig[0])), - GatherLayerTestCPU::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_static_4D_jit8_Bmax, GatherLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(get4DShapesJitStat(3)), - ::testing::ValuesIn(get4DAxisBatchJitStat(ElementType::i8, 3)), - ::testing::Values(ElementType::i8), - ::testing::Values(true), - ::testing::ValuesIn(getCPUInfo()), - ::testing::Values(additionalConfig[0])), - GatherLayerTestCPU::getTestCaseName); - +INSTANTIATE_TEST_SUITE_P(smoke_static_4D_jit32_Bmax, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(get4DShapesJitStat(3)), + ::testing::ValuesIn(get4DAxisBatchJitStat(ElementType::f32, 3)), + ::testing::Values(ElementType::f32), + ::testing::Values(true), + ::testing::ValuesIn(getCPUInfo()), + ::testing::ValuesIn(additionalConfig)), + GatherLayerTestCPU::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_static_4D_jit16_Bmax, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(get4DShapesJitStat(3)), + ::testing::ValuesIn(get4DAxisBatchJitStat(ElementType::bf16, 3)), + ::testing::Values(ElementType::bf16), + ::testing::Values(true), + ::testing::ValuesIn(getCPUInfo()), + ::testing::Values(additionalConfig[0])), + GatherLayerTestCPU::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_static_4D_jit8_Bmax, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(get4DShapesJitStat(3)), + ::testing::ValuesIn(get4DAxisBatchJitStat(ElementType::i8, 3)), + ::testing::Values(ElementType::i8), + ::testing::Values(true), + ::testing::ValuesIn(getCPUInfo()), + ::testing::Values(additionalConfig[0])), + GatherLayerTestCPU::getTestCaseName); std::vector> get4DShapesJitDyn(int maxBatchDims) { std::vector> result = {}; - if (InferenceEngine::with_cpu_x86_avx2()) { + if (ov::with_cpu_x86_avx2()) { if (maxBatchDims == 2) { result = { - { { { ov::Dimension(5, 15), -1, -1, -1 }, // Dynamic shape 0 - { {8, 2, 2, 1}, {10, 2, 2, 2}, {8, 2, 2, 3}, {10, 2, 2, 4}} }, // Target shapes - { { ov::Dimension(4, 16), -1, -1 }, // Dynamic shape 1 - { {8, 2, 8}, {10, 2, 7}, {8, 2, 6}, {10, 2, 5} } } }, // Target shapes - { { { -1, -1, -1, -1 }, // Dynamic shape 0 - { {8, 2, 2, 5}, {10, 2, 2, 6}, {8, 2, 2, 7}, {10, 2, 2, 8}} }, // Target shapes - { { -1, -1, -1 }, // Dynamic shape 1 - { {8, 2, 4}, {10, 2, 3}, {8, 2, 2}, {10, 2, 1} } } }, // Target shapes - { { { ov::Dimension(5, 15), -1, -1, -1 }, // Dynamic shape 0 - { {10, 2, 2, 1}, {10, 2, 2, 2}, {10, 2, 2, 3}, {10, 2, 2, 4}} }, // Target shapes - { { 10, 2, 5 }, // Dynamic shape 1 - { {10, 2, 5}, {10, 2, 5}, {10, 2, 5}, {10, 2, 5} } } }, // Target shapes - { { { 8, 2, 2, 5 }, // Dynamic shape 0 - { {8, 2, 2, 5}, {8, 2, 2, 5}, {8, 2, 2, 5}, {8, 2, 2, 5}} }, // Target shapes - { { -1, -1, -1 }, // Dynamic shape 1 - { {8, 2, 4}, {8, 2, 3}, {8, 2, 2}, {8, 2, 1} } } } // Target shapes + {{{ov::Dimension(5, 15), -1, -1, -1}, // Dynamic shape 0 + {{8, 2, 2, 1}, {10, 2, 2, 2}, {8, 2, 2, 3}, {10, 2, 2, 4}}}, // Target shapes + {{ov::Dimension(4, 16), -1, -1}, // Dynamic shape 1 + {{8, 2, 8}, {10, 2, 7}, {8, 2, 6}, {10, 2, 5}}}}, // Target shapes + {{{-1, -1, -1, -1}, // Dynamic shape 0 + {{8, 2, 2, 5}, {10, 2, 2, 6}, {8, 2, 2, 7}, {10, 2, 2, 8}}}, // Target shapes + {{-1, -1, -1}, // Dynamic shape 1 + {{8, 2, 4}, {10, 2, 3}, {8, 2, 2}, {10, 2, 1}}}}, // Target shapes + {{{ov::Dimension(5, 15), -1, -1, -1}, // Dynamic shape 0 + {{10, 2, 2, 1}, {10, 2, 2, 2}, {10, 2, 2, 3}, {10, 2, 2, 4}}}, // Target shapes + {{10, 2, 5}, // Dynamic shape 1 + {{10, 2, 5}, {10, 2, 5}, {10, 2, 5}, {10, 2, 5}}}}, // Target shapes + {{{8, 2, 2, 5}, // Dynamic shape 0 + {{8, 2, 2, 5}, {8, 2, 2, 5}, {8, 2, 2, 5}, {8, 2, 2, 5}}}, // Target shapes + {{-1, -1, -1}, // Dynamic shape 1 + {{8, 2, 4}, {8, 2, 3}, {8, 2, 2}, {8, 2, 1}}}} // Target shapes }; } else if (maxBatchDims == 3) { result = { - { { { ov::Dimension(5, 15), -1, -1, -1 }, // Dynamic shape 0 - { {8, 2, 8, 1}, {10, 2, 8, 2}, {8, 2, 8, 3}, {10, 2, 5, 4}} }, // Target shapes - { { ov::Dimension(4, 16), -1, -1 }, // Dynamic shape 1 - { {8, 2, 8}, {10, 2, 8}, {8, 2, 8}, {10, 2, 5} } } }, // Target shapes - { { { -1, -1, -1, -1 }, // Dynamic shape 0 - { {8, 2, 4, 5}, {10, 2, 3, 6}, {8, 2, 2, 7}, {10, 2, 1, 8}} }, // Target shapes - { { -1, -1, -1 }, // Dynamic shape 1 - { {8, 2, 4}, {10, 2, 3}, {8, 2, 2}, {10, 2, 1} } } }, // Target shapes - { { { ov::Dimension(5, 15), -1, -1, -1 }, // Dynamic shape 0 - { {10, 2, 5, 1}, {10, 2, 5, 2}, {10, 2, 5, 3}, {10, 2, 5, 4}} }, // Target shapes - { { 10, 2, 5 }, // Dynamic shape 1 - { {10, 2, 5}, {10, 2, 5}, {10, 2, 5}, {10, 2, 5} } } }, // Target shapes - { { { 8, 2, 3, 5 }, // Dynamic shape 0 - { {8, 2, 3, 5}, {8, 2, 3, 5}, {8, 2, 3, 5}, {8, 2, 3, 5}} }, // Target shapes - { { -1, -1, -1 }, // Dynamic shape 1 - { {8, 2, 3}, {8, 2, 3}, {8, 2, 3}, {8, 2, 3} } } } // Target shapes + {{{ov::Dimension(5, 15), -1, -1, -1}, // Dynamic shape 0 + {{8, 2, 8, 1}, {10, 2, 8, 2}, {8, 2, 8, 3}, {10, 2, 5, 4}}}, // Target shapes + {{ov::Dimension(4, 16), -1, -1}, // Dynamic shape 1 + {{8, 2, 8}, {10, 2, 8}, {8, 2, 8}, {10, 2, 5}}}}, // Target shapes + {{{-1, -1, -1, -1}, // Dynamic shape 0 + {{8, 2, 4, 5}, {10, 2, 3, 6}, {8, 2, 2, 7}, {10, 2, 1, 8}}}, // Target shapes + {{-1, -1, -1}, // Dynamic shape 1 + {{8, 2, 4}, {10, 2, 3}, {8, 2, 2}, {10, 2, 1}}}}, // Target shapes + {{{ov::Dimension(5, 15), -1, -1, -1}, // Dynamic shape 0 + {{10, 2, 5, 1}, {10, 2, 5, 2}, {10, 2, 5, 3}, {10, 2, 5, 4}}}, // Target shapes + {{10, 2, 5}, // Dynamic shape 1 + {{10, 2, 5}, {10, 2, 5}, {10, 2, 5}, {10, 2, 5}}}}, // Target shapes + {{{8, 2, 3, 5}, // Dynamic shape 0 + {{8, 2, 3, 5}, {8, 2, 3, 5}, {8, 2, 3, 5}, {8, 2, 3, 5}}}, // Target shapes + {{-1, -1, -1}, // Dynamic shape 1 + {{8, 2, 3}, {8, 2, 3}, {8, 2, 3}, {8, 2, 3}}}} // Target shapes }; } else { throw std::invalid_argument("Invalid test case. Not valid batch dims."); } } - if (InferenceEngine::with_cpu_x86_avx512f()) { + if (ov::with_cpu_x86_avx512f()) { std::vector> tmp; if (maxBatchDims == 2) { tmp = { - { { { ov::Dimension(5, 15), -1, -1, -1 }, // Dynamic shape 0 - { {8, 2, 2, 9}, {10, 2, 2, 10}, {8, 2, 2, 11}, {10, 2, 2, 12}} }, // Target shapes - { { ov::Dimension(4, 16), -1, -1 }, // Dynamic shape 1 - { {8, 2, 16}, {10, 2, 15}, {8, 2, 14}, {10, 2, 13} } } }, // Target shapes - { { { -1, -1, -1, -1 }, // Dynamic shape 0 - { {8, 2, 2, 13}, {10, 2, 2, 14}, {8, 2, 2, 15}, {10, 2, 2, 16}} }, // Target shapes - { { -1, -1, -1 }, // Dynamic shape 1 - { {8, 2, 12}, {10, 2, 11}, {8, 2, 10}, {10, 2, 9} } } }, // Target shapes - { { { ov::Dimension(5, 15), -1, -1, -1 }, // Dynamic shape 0 - { {10, 2, 2, 9}, {10, 2, 2, 10}, {10, 2, 2, 11}, {10, 2, 2, 12}} }, // Target shapes - { { 10, 2, 16 }, // Dynamic shape 1 - { {10, 2, 16}, {10, 2, 16}, {10, 2, 16}, {10, 2, 16} } } }, // Target shapes - { { { 8, 2, 2, 15 }, // Dynamic shape 0 - { {8, 2, 2, 15}, {8, 2, 2, 15}, {8, 2, 2, 15}, {8, 2, 2, 15}} }, // Target shapes - { { -1, -1, -1 }, // Dynamic shape 1 - { {8, 2, 12}, {8, 2, 11}, {8, 2, 10}, {8, 2, 9} } } } // Target shapes + {{{ov::Dimension(5, 15), -1, -1, -1}, // Dynamic shape 0 + {{8, 2, 2, 9}, {10, 2, 2, 10}, {8, 2, 2, 11}, {10, 2, 2, 12}}}, // Target shapes + {{ov::Dimension(4, 16), -1, -1}, // Dynamic shape 1 + {{8, 2, 16}, {10, 2, 15}, {8, 2, 14}, {10, 2, 13}}}}, // Target shapes + {{{-1, -1, -1, -1}, // Dynamic shape 0 + {{8, 2, 2, 13}, {10, 2, 2, 14}, {8, 2, 2, 15}, {10, 2, 2, 16}}}, // Target shapes + {{-1, -1, -1}, // Dynamic shape 1 + {{8, 2, 12}, {10, 2, 11}, {8, 2, 10}, {10, 2, 9}}}}, // Target shapes + {{{ov::Dimension(5, 15), -1, -1, -1}, // Dynamic shape 0 + {{10, 2, 2, 9}, {10, 2, 2, 10}, {10, 2, 2, 11}, {10, 2, 2, 12}}}, // Target shapes + {{10, 2, 16}, // Dynamic shape 1 + {{10, 2, 16}, {10, 2, 16}, {10, 2, 16}, {10, 2, 16}}}}, // Target shapes + {{{8, 2, 2, 15}, // Dynamic shape 0 + {{8, 2, 2, 15}, {8, 2, 2, 15}, {8, 2, 2, 15}, {8, 2, 2, 15}}}, // Target shapes + {{-1, -1, -1}, // Dynamic shape 1 + {{8, 2, 12}, {8, 2, 11}, {8, 2, 10}, {8, 2, 9}}}} // Target shapes }; } else if (maxBatchDims == 3) { tmp = { - { { { ov::Dimension(5, 15), -1, -1, -1 }, // Dynamic shape 0 - { {8, 2, 16, 9}, {10, 2, 15, 10}, {8, 2, 14, 11}, {10, 2, 13, 12}} }, // Target shapes - { { ov::Dimension(4, 16), -1, -1 }, // Dynamic shape 1 - { {8, 2, 16}, {10, 2, 15}, {8, 2, 14}, {10, 2, 13} } } }, // Target shapes - { { { -1, -1, -1, -1 }, // Dynamic shape 0 - { {8, 2, 12, 13}, {10, 2, 11, 14}, {8, 2, 10, 15}, {10, 2, 9, 16}} }, // Target shapes - { { -1, -1, -1 }, // Dynamic shape 1 - { {8, 2, 12}, {10, 2, 11}, {8, 2, 10}, {10, 2, 9} } } }, // Target shapes - { { { ov::Dimension(5, 15), -1, -1, -1 }, // Dynamic shape 0 - { {10, 2, 16, 9}, {10, 2, 16, 10}, {10, 2, 16, 11}, {10, 2, 16, 12}} }, // Target shapes - { { 10, 2, 16 }, // Dynamic shape 1 - { {10, 2, 16}, {10, 2, 16}, {10, 2, 16}, {10, 2, 16} } } }, // Target shapes - { { { 8, 2, 11, 15 }, // Dynamic shape 0 - { {8, 2, 11, 15}, {8, 2, 11, 15}, {8, 2, 11, 15}, {8, 2, 11, 15}} }, // Target shapes - { { -1, -1, -1 }, // Dynamic shape 1 - { {8, 2, 11}, {8, 2, 11}, {8, 2, 11}, {8, 2, 11} } } } // Target shapes + {{{ov::Dimension(5, 15), -1, -1, -1}, // Dynamic shape 0 + {{8, 2, 16, 9}, {10, 2, 15, 10}, {8, 2, 14, 11}, {10, 2, 13, 12}}}, // Target shapes + {{ov::Dimension(4, 16), -1, -1}, // Dynamic shape 1 + {{8, 2, 16}, {10, 2, 15}, {8, 2, 14}, {10, 2, 13}}}}, // Target shapes + {{{-1, -1, -1, -1}, // Dynamic shape 0 + {{8, 2, 12, 13}, {10, 2, 11, 14}, {8, 2, 10, 15}, {10, 2, 9, 16}}}, // Target shapes + {{-1, -1, -1}, // Dynamic shape 1 + {{8, 2, 12}, {10, 2, 11}, {8, 2, 10}, {10, 2, 9}}}}, // Target shapes + {{{ov::Dimension(5, 15), -1, -1, -1}, // Dynamic shape 0 + {{10, 2, 16, 9}, {10, 2, 16, 10}, {10, 2, 16, 11}, {10, 2, 16, 12}}}, // Target shapes + {{10, 2, 16}, // Dynamic shape 1 + {{10, 2, 16}, {10, 2, 16}, {10, 2, 16}, {10, 2, 16}}}}, // Target shapes + {{{8, 2, 11, 15}, // Dynamic shape 0 + {{8, 2, 11, 15}, {8, 2, 11, 15}, {8, 2, 11, 15}, {8, 2, 11, 15}}}, // Target shapes + {{-1, -1, -1}, // Dynamic shape 1 + {{8, 2, 11}, {8, 2, 11}, {8, 2, 11}, {8, 2, 11}}}} // Target shapes }; } else { throw std::invalid_argument("Invalid test case. Not valid batch dims."); @@ -621,7 +592,7 @@ std::vector> get4DShapesJitDyn(int maxBatchDim std::vector> get4DAxisBatchJitDyn(ov::element::Type type, int maxBatchDims) { std::vector> result = {}; - if (InferenceEngine::with_cpu_x86_avx512f()) { + if (ov::with_cpu_x86_avx512f()) { if (type.size() == 4 || type.size() == 2 || type.size() == 1) { if (maxBatchDims == 2) return std::vector>{{3, 0}, {3, 1}, {3, 2}}; @@ -630,7 +601,7 @@ std::vector> get4DAxisBatchJitDyn(ov::element::Type type, i else throw std::invalid_argument("Invalid test case. Not valid batch dims."); } - } else if (InferenceEngine::with_cpu_x86_avx2()) { + } else if (ov::with_cpu_x86_avx2()) { if (type.size() == 4 || type.size() == 2 || type.size() == 1) { if (maxBatchDims == 2) return std::vector>{{3, 0}, {3, 1}, {3, 2}}; @@ -643,184 +614,167 @@ std::vector> get4DAxisBatchJitDyn(ov::element::Type type, i return {}; } -INSTANTIATE_TEST_SUITE_P(smoke_dynamic_4D_jit32, GatherLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(get4DShapesJitDyn(2)), - ::testing::ValuesIn(get4DAxisBatchJitDyn(ElementType::f32, 2)), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(isAxisConst), - ::testing::ValuesIn(getCPUInfo()), - ::testing::ValuesIn(additionalConfig)), - GatherLayerTestCPU::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_dynamic_4D_jit16, GatherLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(get4DShapesJitDyn(2)), - ::testing::ValuesIn(get4DAxisBatchJitDyn(ElementType::bf16, 2)), - ::testing::Values(ElementType::bf16), - ::testing::ValuesIn(isAxisConst), - ::testing::ValuesIn(getCPUInfo()), - ::testing::Values(additionalConfig[0])), - GatherLayerTestCPU::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_dynamic_4D_jit8, GatherLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(get4DShapesJitDyn(2)), - ::testing::ValuesIn(get4DAxisBatchJitDyn(ElementType::i8, 2)), - ::testing::Values(ElementType::i8), - ::testing::ValuesIn(isAxisConst), - ::testing::ValuesIn(getCPUInfo()), - ::testing::Values(additionalConfig[0])), - GatherLayerTestCPU::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_dynamic_4D_jit32, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(get4DShapesJitDyn(2)), + ::testing::ValuesIn(get4DAxisBatchJitDyn(ElementType::f32, 2)), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(isAxisConst), + ::testing::ValuesIn(getCPUInfo()), + ::testing::ValuesIn(additionalConfig)), + GatherLayerTestCPU::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_dynamic_4D_jit16, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(get4DShapesJitDyn(2)), + ::testing::ValuesIn(get4DAxisBatchJitDyn(ElementType::bf16, 2)), + ::testing::Values(ElementType::bf16), + ::testing::ValuesIn(isAxisConst), + ::testing::ValuesIn(getCPUInfo()), + ::testing::Values(additionalConfig[0])), + GatherLayerTestCPU::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_dynamic_4D_jit8, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(get4DShapesJitDyn(2)), + ::testing::ValuesIn(get4DAxisBatchJitDyn(ElementType::i8, 2)), + ::testing::Values(ElementType::i8), + ::testing::ValuesIn(isAxisConst), + ::testing::ValuesIn(getCPUInfo()), + ::testing::Values(additionalConfig[0])), + GatherLayerTestCPU::getTestCaseName); // batchDims == indicesRank -INSTANTIATE_TEST_SUITE_P(smoke_dynamic_4D_jit32_Bmax, GatherLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(get4DShapesJitDyn(3)), - ::testing::ValuesIn(get4DAxisBatchJitDyn(ElementType::f32, 3)), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(isAxisConst), - ::testing::ValuesIn(getCPUInfo()), - ::testing::ValuesIn(additionalConfig)), - GatherLayerTestCPU::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_dynamic_4D_jit16_Bmax, GatherLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(get4DShapesJitDyn(3)), - ::testing::ValuesIn(get4DAxisBatchJitDyn(ElementType::bf16, 3)), - ::testing::Values(ElementType::bf16), - ::testing::ValuesIn(isAxisConst), - ::testing::ValuesIn(getCPUInfo()), - ::testing::Values(additionalConfig[0])), - GatherLayerTestCPU::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_dynamic_4D_jit8_Bmax, GatherLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(get4DShapesJitDyn(3)), - ::testing::ValuesIn(get4DAxisBatchJitDyn(ElementType::i8, 3)), - ::testing::Values(ElementType::i8), - ::testing::ValuesIn(isAxisConst), - ::testing::ValuesIn(getCPUInfo()), - ::testing::Values(additionalConfig[0])), - GatherLayerTestCPU::getTestCaseName); - +INSTANTIATE_TEST_SUITE_P(smoke_dynamic_4D_jit32_Bmax, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(get4DShapesJitDyn(3)), + ::testing::ValuesIn(get4DAxisBatchJitDyn(ElementType::f32, 3)), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(isAxisConst), + ::testing::ValuesIn(getCPUInfo()), + ::testing::ValuesIn(additionalConfig)), + GatherLayerTestCPU::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_dynamic_4D_jit16_Bmax, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(get4DShapesJitDyn(3)), + ::testing::ValuesIn(get4DAxisBatchJitDyn(ElementType::bf16, 3)), + ::testing::Values(ElementType::bf16), + ::testing::ValuesIn(isAxisConst), + ::testing::ValuesIn(getCPUInfo()), + ::testing::Values(additionalConfig[0])), + GatherLayerTestCPU::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_dynamic_4D_jit8_Bmax, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(get4DShapesJitDyn(3)), + ::testing::ValuesIn(get4DAxisBatchJitDyn(ElementType::i8, 3)), + ::testing::Values(ElementType::i8), + ::testing::ValuesIn(isAxisConst), + ::testing::ValuesIn(getCPUInfo()), + ::testing::Values(additionalConfig[0])), + GatherLayerTestCPU::getTestCaseName); ///// 4D REFERENCE ///// std::vector> get4DShapesRefStat(bool maxBatchDims) { std::vector> result = {}; - if (InferenceEngine::with_cpu_x86_avx2()) { + if (ov::with_cpu_x86_avx2()) { if (!maxBatchDims) { - result = { - { { {}, { {10, 2, 9, 9} } }, // Static shapes - { {}, { {10, 2, 8} } } - }, - { { {}, { {11, 2, 9, 2} } }, // Static shapes - { {}, { {11, 2, 7} } } - }, - { { {}, { {12, 2, 9, 3} } }, // Static shapes - { {}, { {12, 2, 6} } } - }, - { { {}, { {13, 2, 9, 4} } }, // Static shapes - { {}, { {13, 2, 5} } } - }, - { { {}, { {14, 2, 9, 5} } }, // Static shapes - { {}, { {14, 2, 4} } } - }, - { { {}, { {15, 2, 9, 6} } }, // Static shapes - { {}, { {15, 2, 3} } } - }, - { { {}, { {16, 2, 9, 7} } }, // Static shapes - { {}, { {16, 2, 2} } } - }, - { { {}, { {17, 2, 9, 8} } }, // Static shapes - { {}, { {17, 2, 1} } } - } - }; + result = {{{{}, {{10, 2, 9, 9}}}, // Static shapes + {{}, {{10, 2, 8}}}}, + {{{}, {{11, 2, 9, 2}}}, // Static shapes + {{}, {{11, 2, 7}}}}, + {{{}, {{12, 2, 9, 3}}}, // Static shapes + {{}, {{12, 2, 6}}}}, + {{{}, {{13, 2, 9, 4}}}, // Static shapes + {{}, {{13, 2, 5}}}}, + {{{}, {{14, 2, 9, 5}}}, // Static shapes + {{}, {{14, 2, 4}}}}, + {{{}, {{15, 2, 9, 6}}}, // Static shapes + {{}, {{15, 2, 3}}}}, + {{{}, {{16, 2, 9, 7}}}, // Static shapes + {{}, {{16, 2, 2}}}}, + {{{}, {{17, 2, 9, 8}}}, // Static shapes + {{}, {{17, 2, 1}}}}}; } else { - result = { - { { {}, { {10, 8, 2, 39} } }, // Static shapes - { {}, { {10, 8} } } - }, - { { {}, { {11, 7, 2, 42} } }, // Static shapes - { {}, { {11, 7} } } - }, - { { {}, { {12, 6, 2, 43} } }, // Static shapes - { {}, { {12, 6} } } - }, - { { {}, { {13, 5, 2, 44} } }, // Static shapes - { {}, { {13, 5} } } - }, - { { {}, { {14, 4, 2, 45} } }, // Static shapes - { {}, { {14, 4} } } - }, - { { {}, { {15, 3, 2, 46} } }, // Static shapes - { {}, { {15, 3} } } - }, - { { {}, { {16, 2, 2, 47} } }, // Static shapes - { {}, { {16, 2} } } - }, - { { {}, { {17, 1, 2, 38} } }, // Static shapes - { {}, { {17, 1} } } - } - }; + result = {{{{}, {{10, 8, 2, 39}}}, // Static shapes + {{}, {{10, 8}}}}, + {{{}, {{11, 7, 2, 42}}}, // Static shapes + {{}, {{11, 7}}}}, + {{{}, {{12, 6, 2, 43}}}, // Static shapes + {{}, {{12, 6}}}}, + {{{}, {{13, 5, 2, 44}}}, // Static shapes + {{}, {{13, 5}}}}, + {{{}, {{14, 4, 2, 45}}}, // Static shapes + {{}, {{14, 4}}}}, + {{{}, {{15, 3, 2, 46}}}, // Static shapes + {{}, {{15, 3}}}}, + {{{}, {{16, 2, 2, 47}}}, // Static shapes + {{}, {{16, 2}}}}, + {{{}, {{17, 1, 2, 38}}}, // Static shapes + {{}, {{17, 1}}}}}; } } - if (InferenceEngine::with_cpu_x86_avx512f()) { + if (ov::with_cpu_x86_avx512f()) { std::vector> tmp; if (!maxBatchDims) { - tmp = { - { { {}, { {25, 4, 4, 17} } }, // Static shapes - { {}, { {25, 4, 16} } } - }, - { { {}, { {24, 4, 4, 18} } }, // Static shapes - { {}, { {24, 4, 15} } }, - }, - { { {}, { {23, 4, 4, 19} } }, // Static shapes - { {}, { {23, 4, 14} } } - }, - { { {}, { {22, 4, 4, 20} } }, // Static shapes - { {}, { {22, 4, 13} } }, - }, - { { {}, { {21, 4, 4, 21} } }, // Static shapes - { {}, { {21, 4, 12} } }, - }, - { { {}, { {20, 4, 4, 22} } }, // Static shapes - { {}, { {20, 4, 11} } }, - }, - { { {}, { {19, 4, 4, 23} } }, // Static shapes - { {}, { {19, 4, 10} } }, - }, - { { {}, { {18, 4, 4, 24} } }, // Static shapes - { {}, { {18, 4, 9} } }, - } - }; + tmp = {{{{}, {{25, 4, 4, 17}}}, // Static shapes + {{}, {{25, 4, 16}}}}, + { + {{}, {{24, 4, 4, 18}}}, // Static shapes + {{}, {{24, 4, 15}}}, + }, + {{{}, {{23, 4, 4, 19}}}, // Static shapes + {{}, {{23, 4, 14}}}}, + { + {{}, {{22, 4, 4, 20}}}, // Static shapes + {{}, {{22, 4, 13}}}, + }, + { + {{}, {{21, 4, 4, 21}}}, // Static shapes + {{}, {{21, 4, 12}}}, + }, + { + {{}, {{20, 4, 4, 22}}}, // Static shapes + {{}, {{20, 4, 11}}}, + }, + { + {{}, {{19, 4, 4, 23}}}, // Static shapes + {{}, {{19, 4, 10}}}, + }, + { + {{}, {{18, 4, 4, 24}}}, // Static shapes + {{}, {{18, 4, 9}}}, + }}; } else { - tmp = { - { { {}, { {25, 16, 4, 65} } }, // Static shapes - { {}, { {25, 16} } } - }, - { { {}, { {24, 15, 4, 66} } }, // Static shapes - { {}, { {24, 15} } }, - }, - { { {}, { {23, 14, 4, 67} } }, // Static shapes - { {}, { {23, 14} } } - }, - { { {}, { {22, 13, 4, 68} } }, // Static shapes - { {}, { {22, 13} } }, - }, - { { {}, { {21, 12, 4, 69} } }, // Static shapes - { {}, { {21, 12} } }, - }, - { { {}, { {20, 11, 4, 70} } }, // Static shapes - { {}, { {20, 11} } }, - }, - { { {}, { {19, 10, 4, 71} } }, // Static shapes - { {}, { {19, 10} } }, - }, - { { {}, { {18, 9, 4, 72} } }, // Static shapes - { {}, { {18, 9} } }, - } - }; + tmp = {{{{}, {{25, 16, 4, 65}}}, // Static shapes + {{}, {{25, 16}}}}, + { + {{}, {{24, 15, 4, 66}}}, // Static shapes + {{}, {{24, 15}}}, + }, + {{{}, {{23, 14, 4, 67}}}, // Static shapes + {{}, {{23, 14}}}}, + { + {{}, {{22, 13, 4, 68}}}, // Static shapes + {{}, {{22, 13}}}, + }, + { + {{}, {{21, 12, 4, 69}}}, // Static shapes + {{}, {{21, 12}}}, + }, + { + {{}, {{20, 11, 4, 70}}}, // Static shapes + {{}, {{20, 11}}}, + }, + { + {{}, {{19, 10, 4, 71}}}, // Static shapes + {{}, {{19, 10}}}, + }, + { + {{}, {{18, 9, 4, 72}}}, // Static shapes + {{}, {{18, 9}}}, + }}; } result.insert(result.end(), tmp.begin(), tmp.end()); } @@ -830,7 +784,7 @@ std::vector> get4DShapesRefStat(bool maxBatchD std::vector> get4DAxisBatchRefStat(ov::element::Type type, bool maxBatchDims) { std::vector> result = {}; - if (InferenceEngine::with_cpu_x86_avx512f()) { + if (ov::with_cpu_x86_avx512f()) { if (type.size() == 4) { if (!maxBatchDims) return std::vector>{{1, 0}, {1, 1}, {0, 0}}; @@ -842,7 +796,7 @@ std::vector> get4DAxisBatchRefStat(ov::element::Type type, else return std::vector>{{2, 2}}; } - } else if (InferenceEngine::with_cpu_x86_avx2()) { + } else if (ov::with_cpu_x86_avx2()) { if (type.size() == 4) { if (!maxBatchDims) return std::vector>{{1, 0}, {1, 1}, {0, 0}}; @@ -858,105 +812,106 @@ std::vector> get4DAxisBatchRefStat(ov::element::Type type, return {}; } -INSTANTIATE_TEST_SUITE_P(smoke_static_4D_ref32, GatherLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(get4DShapesRefStat(false)), - ::testing::ValuesIn(get4DAxisBatchRefStat(ElementType::f32, false)), - ::testing::Values(ElementType::f32), - ::testing::Values(true), - ::testing::Values(cpuParamsRef), - ::testing::ValuesIn(additionalConfig)), - GatherLayerTestCPU::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_static_4D_ref16, GatherLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(get4DShapesRefStat(false)), - ::testing::ValuesIn(get4DAxisBatchRefStat(ElementType::bf16, false)), - ::testing::Values(ElementType::bf16), - ::testing::Values(true), - ::testing::Values(cpuParamsRef), - ::testing::Values(additionalConfig[0])), - GatherLayerTestCPU::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_static_4D_ref8, GatherLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(get4DShapesRefStat(false)), - ::testing::ValuesIn(get4DAxisBatchRefStat(ElementType::i8, false)), - ::testing::Values(ElementType::i8), - ::testing::Values(true), - ::testing::Values(cpuParamsRef), - ::testing::Values(additionalConfig[0])), - GatherLayerTestCPU::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_static_4D_ref32, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(get4DShapesRefStat(false)), + ::testing::ValuesIn(get4DAxisBatchRefStat(ElementType::f32, false)), + ::testing::Values(ElementType::f32), + ::testing::Values(true), + ::testing::Values(cpuParamsRef), + ::testing::ValuesIn(additionalConfig)), + GatherLayerTestCPU::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_static_4D_ref16, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(get4DShapesRefStat(false)), + ::testing::ValuesIn(get4DAxisBatchRefStat(ElementType::bf16, false)), + ::testing::Values(ElementType::bf16), + ::testing::Values(true), + ::testing::Values(cpuParamsRef), + ::testing::Values(additionalConfig[0])), + GatherLayerTestCPU::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_static_4D_ref8, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(get4DShapesRefStat(false)), + ::testing::ValuesIn(get4DAxisBatchRefStat(ElementType::i8, false)), + ::testing::Values(ElementType::i8), + ::testing::Values(true), + ::testing::Values(cpuParamsRef), + ::testing::Values(additionalConfig[0])), + GatherLayerTestCPU::getTestCaseName); // batchDims == indicesRank -INSTANTIATE_TEST_SUITE_P(smoke_static_4D_ref32_Bmax, GatherLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(get4DShapesRefStat(true)), - ::testing::ValuesIn(get4DAxisBatchRefStat(ElementType::f32, true)), - ::testing::Values(ElementType::f32), - ::testing::Values(true), - ::testing::Values(cpuParamsRef), - ::testing::ValuesIn(additionalConfig)), - GatherLayerTestCPU::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_static_4D_ref16_Bmax, GatherLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(get4DShapesRefStat(true)), - ::testing::ValuesIn(get4DAxisBatchRefStat(ElementType::bf16, true)), - ::testing::Values(ElementType::bf16), - ::testing::Values(true), - ::testing::Values(cpuParamsRef), - ::testing::Values(additionalConfig[0])), - GatherLayerTestCPU::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_static_4D_ref8_Bmax, GatherLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(get4DShapesRefStat(true)), - ::testing::ValuesIn(get4DAxisBatchRefStat(ElementType::i8, true)), - ::testing::Values(ElementType::i8), - ::testing::Values(true), - ::testing::Values(cpuParamsRef), - ::testing::Values(additionalConfig[0])), - GatherLayerTestCPU::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_static_4D_ref32_Bmax, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(get4DShapesRefStat(true)), + ::testing::ValuesIn(get4DAxisBatchRefStat(ElementType::f32, true)), + ::testing::Values(ElementType::f32), + ::testing::Values(true), + ::testing::Values(cpuParamsRef), + ::testing::ValuesIn(additionalConfig)), + GatherLayerTestCPU::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_static_4D_ref16_Bmax, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(get4DShapesRefStat(true)), + ::testing::ValuesIn(get4DAxisBatchRefStat(ElementType::bf16, true)), + ::testing::Values(ElementType::bf16), + ::testing::Values(true), + ::testing::Values(cpuParamsRef), + ::testing::Values(additionalConfig[0])), + GatherLayerTestCPU::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_static_4D_ref8_Bmax, + GatherLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(get4DShapesRefStat(true)), + ::testing::ValuesIn(get4DAxisBatchRefStat(ElementType::i8, true)), + ::testing::Values(ElementType::i8), + ::testing::Values(true), + ::testing::Values(cpuParamsRef), + ::testing::Values(additionalConfig[0])), + GatherLayerTestCPU::getTestCaseName); // InPlace const std::vector shapesInPlace4D_0 = { - { {}, { {5, 4, 4, 19} } }, - { {5, 4, -1, -1}, { {5, 4, 4, 19}, {5, 4, 4, 25}, {5, 4, 2, 19} } }, + {{}, {{5, 4, 4, 19}}}, + {{5, 4, -1, -1}, {{5, 4, 4, 19}, {5, 4, 4, 25}, {5, 4, 2, 19}}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_inplace_4D_0, GatherInPlaceLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(shapesInPlace4D_0), - ::testing::Values(std::vector{ 2 }), - ::testing::Values(0), - ::testing::Values(ElementType::f32), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})), - GatherInPlaceLayerTestCPU::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_inplace_4D_0, + GatherInPlaceLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(shapesInPlace4D_0), + ::testing::Values(std::vector{2}), + ::testing::Values(0), + ::testing::Values(ElementType::f32), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})), + GatherInPlaceLayerTestCPU::getTestCaseName); const std::vector shapesInPlace4D_1 = { - { {}, { {1, 9, 4, 19} } }, - { {1, 9, -1, -1}, { {1, 9, 4, 19}, {1, 9, 4, 25}, {1, 9, 2, 19} } }, + {{}, {{1, 9, 4, 19}}}, + {{1, 9, -1, -1}, {{1, 9, 4, 19}, {1, 9, 4, 25}, {1, 9, 2, 19}}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_inplace_4D_1, GatherInPlaceLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(shapesInPlace4D_1), - ::testing::Values(std::vector{ -4 }, std::vector{ 5 }), - ::testing::Values(1), - ::testing::Values(ElementType::f32), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})), - GatherInPlaceLayerTestCPU::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_4D_out_of_range, GatherInPlaceLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(shapesInPlace4D_1), - ::testing::Values(std::vector{ 10 }, std::vector{ -15 }), - ::testing::Values(1), - ::testing::Values(ElementType::f32), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref_any"})), - GatherInPlaceLayerTestCPU::getTestCaseName); - -} // namespace -} // namespace CPULayerTestsDefinitions +INSTANTIATE_TEST_SUITE_P(smoke_inplace_4D_1, + GatherInPlaceLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(shapesInPlace4D_1), + ::testing::Values(std::vector{-4}, std::vector{5}), + ::testing::Values(1), + ::testing::Values(ElementType::f32), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})), + GatherInPlaceLayerTestCPU::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_4D_out_of_range, + GatherInPlaceLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(shapesInPlace4D_1), + ::testing::Values(std::vector{10}, std::vector{-15}), + ::testing::Values(1), + ::testing::Values(ElementType::f32), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref_any"})), + GatherInPlaceLayerTestCPU::getTestCaseName); + +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_elements.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_elements.cpp index 1edc0146f2328f..e1cc152ffbcad3 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_elements.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_elements.cpp @@ -2,34 +2,26 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" -#include #include "test_utils/cpu_test_utils.hpp" -using namespace ov::test; -using namespace ngraph; using namespace CPUTestUtils; -using namespace InferenceEngine; -using namespace ngraph::helpers; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { +using GatherElementsParams = std::tuple, // Dynamic shape + Target static shapes + int, // Axis + ElementType, // Data precision + ElementType, // Indices precision + TargetDevice // Device name + >; -using GatherElementsParams = std::tuple< - std::vector, // Dynamic shape + Target static shapes - int, // Axis - ElementType, // Data precision - ElementType, // Indices precision - TargetDevice // Device name ->; - -using GatherElementsCPUTestParamSet = std::tuple< - GatherElementsParams, - CPUSpecificParams ->; +using GatherElementsCPUTestParamSet = std::tuple; class GatherElementsCPUTest : public testing::WithParamInterface, - virtual public ov::test::SubgraphBaseTest, public CPUTestsBase { + virtual public ov::test::SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseNameCommon(const testing::TestParamInfo& obj) { std::vector shapes; @@ -57,7 +49,7 @@ class GatherElementsCPUTest : public testing::WithParamInterface &obj) { + static std::string getTestCaseName(const testing::TestParamInfo& obj) { GatherElementsParams basicParamsSet; CPUSpecificParams cpuParams; std::tie(basicParamsSet, cpuParams) = obj.param; @@ -70,14 +62,17 @@ class GatherElementsCPUTest : public testing::WithParamInterface& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { const auto& funcInput = funcInputs[i]; ov::Tensor tensor; - - tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], 15, 0, 32768); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 15; + in_data.resolution = 32768; + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], in_data); inputs.insert({funcInput.get_node_shared_ptr(), tensor}); } @@ -98,13 +93,12 @@ class GatherElementsCPUTest : public testing::WithParamInterface(dPrecision, inputDynamicShapes[0]), - std::make_shared(iPrecision, inputDynamicShapes[1]), + ov::ParameterVector params = { + std::make_shared(dPrecision, inputDynamicShapes[0]), + std::make_shared(iPrecision, inputDynamicShapes[1]), }; - auto gather = std::make_shared( - params[0], params[1], axis); + auto gather = std::make_shared(params[0], params[1], axis); function = makeNgraphFunction(dPrecision, params, gather, "GatherElements"); } }; @@ -114,27 +108,24 @@ TEST_P(GatherElementsCPUTest, CompareWithRefs) { } namespace { -std::vector cpuParams_4D = { - CPUSpecificParams({nchw}, {nchw}, {}, {}) -}; +std::vector cpuParams_4D = {CPUSpecificParams({nchw}, {nchw}, {}, {})}; const std::vector> inDynamicShapeParams = { - {{{-1, -1, -1, -1}, {{2, 3, 5, 7}, {3, 4, 6, 8}}}, - {{-1, -1, -1, -1}, {{2, 3, 9, 7}, {3, 4, 4, 8}}}}, + {{{-1, -1, -1, -1}, {{2, 3, 5, 7}, {3, 4, 6, 8}}}, {{-1, -1, -1, -1}, {{2, 3, 9, 7}, {3, 4, 4, 8}}}}, {{{{1, 10}, {1, 10}, {1, 10}, {1, 10}}, {{3, 4, 6, 8}, {2, 3, 5, 7}}}, - {{{1, 10}, {1, 10}, {1, 10}, {1, 10}}, {{3, 4, 4, 8}, {2, 3, 9, 7}}}} -}; - -INSTANTIATE_TEST_SUITE_P(smoke_set1, GatherElementsCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(inDynamicShapeParams), // shape - ::testing::ValuesIn(std::vector({2, -2})), // Axis - ::testing::ValuesIn(std::vector({ElementType::bf16, ElementType::f32})), - ::testing::Values(ElementType::i32), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D))), - GatherElementsCPUTest::getTestCaseName); - -} // namespace -} // namespace CPULayerTestsDefinitions + {{{1, 10}, {1, 10}, {1, 10}, {1, 10}}, {{3, 4, 4, 8}, {2, 3, 9, 7}}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_set1, + GatherElementsCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(inDynamicShapeParams), // shape + ::testing::ValuesIn(std::vector({2, -2})), // Axis + ::testing::ValuesIn(std::vector( + {ElementType::bf16, ElementType::f32})), + ::testing::Values(ElementType::i32), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D))), + GatherElementsCPUTest::getTestCaseName); + +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_nd.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_nd.cpp index 01ba342722e2e6..ff74784ee773c0 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_nd.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_nd.cpp @@ -2,23 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" -using namespace InferenceEngine; -using namespace ov; -using namespace test; +namespace ov { +namespace test { -namespace CPULayerTestsDefinitions { - -using GatherNDLayerCPUTestParamSet = std::tuple< - InputShape, // Input shapes - std::pair>, // Indexes shape and values - ElementType, // Input element type - ElementType, // Indices element type - int // Batch dims ->; +using GatherNDLayerCPUTestParamSet = std::tuple>, // Indexes shape and values + ElementType, // Input element type + ElementType, // Indices element type + int // Batch dims + >; class GatherNDLayerCPUTest : public testing::WithParamInterface, virtual public SubgraphBaseTest { @@ -42,7 +36,7 @@ class GatherNDLayerCPUTest : public testing::WithParamInterface(dataElementType, shape)); } - auto indexes_node = ngraph::opset3::Constant::create(idxElementType, indexes.first, indexes.second); - auto gather_nd = std::make_shared(params[0], indexes_node, batchDims); - ngraph::ResultVector results{std::make_shared(gather_nd)}; - function = std::make_shared(results, params, "gatherND"); + auto indexes_node = ov::op::v0::Constant::create(idxElementType, indexes.first, indexes.second); + auto gather_nd = std::make_shared(params[0], indexes_node, batchDims); + ov::ResultVector results{std::make_shared(gather_nd)}; + function = std::make_shared(results, params, "gatherND"); } }; @@ -88,10 +82,10 @@ class GatherND8LayerCPUTest : public testing::WithParamInterface(dataElementType, shape)); } - auto indexes_node = ngraph::opset3::Constant::create(idxElementType, indexes.first, indexes.second); - auto gather_nd = std::make_shared(params[0], indexes_node, batchDims); - ngraph::ResultVector results{std::make_shared(gather_nd)}; - function = std::make_shared(results, params, "gatherND"); + auto indexes_node = ov::op::v0::Constant::create(idxElementType, indexes.first, indexes.second); + auto gather_nd = std::make_shared(params[0], indexes_node, batchDims); + ov::ResultVector results{std::make_shared(gather_nd)}; + function = std::make_shared(results, params, "gatherND"); } }; @@ -105,91 +99,100 @@ TEST_P(GatherND8LayerCPUTest, CompareWithRefs) { namespace { -const std::vector inputPrecisions = { - ElementType::f32, - ElementType::bf16, - ElementType::i8 -}; +const std::vector inputPrecisions = {ElementType::f32, ElementType::bf16, ElementType::i8}; -const std::vector indexesPrecisions = { - ElementType::i32 -}; +const std::vector indexesPrecisions = {ElementType::i32}; const std::vector inputShapesDynamicBD_0 = { - {{-1, -1, -1}, // dynamic - {{5, 10, 5}, {4, 12, 4}, {4, 12, 4}, {5, 5, 5}}}, // target + {{-1, -1, -1}, // dynamic + {{5, 10, 5}, {4, 12, 4}, {4, 12, 4}, {5, 5, 5}}}, // target - {{-1, 5, -1, -1}, // dynamic - {{8, 5, 5, 5}, {5, 5, 8, 4}, {4, 5, 4, 5}}}, // target + {{-1, 5, -1, -1}, // dynamic + {{8, 5, 5, 5}, {5, 5, 8, 4}, {4, 5, 4, 5}}}, // target - {{{4, 10}, {5, 10}, {5, 10}, {5, 10}, {5, 10}}, // dynamic - {{4, 5, 5, 5, 5}, {4, 5, 5, 8, 5}, {10, 8, 5, 5, 5}}}, // target + {{{4, 10}, {5, 10}, {5, 10}, {5, 10}, {5, 10}}, // dynamic + {{4, 5, 5, 5, 5}, {4, 5, 5, 8, 5}, {10, 8, 5, 5, 5}}}, // target }; const std::vector>> indexesShapesBD_0 = { - std::pair>{{2, 2}, {3, 3, 2, 1}}, - std::pair>{{1, 2, 3}, {0, 1, 1, 1, 0, 2}}, - std::pair>{{2, 1, 1, 2}, {0, 2, 1, 1}}, + std::pair>{{2, 2}, {3, 3, 2, 1}}, + std::pair>{{1, 2, 3}, {0, 1, 1, 1, 0, 2}}, + std::pair>{{2, 1, 1, 2}, {0, 2, 1, 1}}, }; -const auto subset_BD0 = ::testing::Combine( - ::testing::ValuesIn(inputShapesDynamicBD_0), - ::testing::ValuesIn(indexesShapesBD_0), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(indexesPrecisions), - ::testing::Values(0)); - -INSTANTIATE_TEST_SUITE_P(smoke_GatherND5DynamicBD_0, GatherNDLayerCPUTest, subset_BD0, GatherNDLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_GatherND8DynamicBD_0, GatherND8LayerCPUTest, subset_BD0, GatherNDLayerCPUTest::getTestCaseName); +const auto subset_BD0 = ::testing::Combine(::testing::ValuesIn(inputShapesDynamicBD_0), + ::testing::ValuesIn(indexesShapesBD_0), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(indexesPrecisions), + ::testing::Values(0)); + +INSTANTIATE_TEST_SUITE_P(smoke_GatherND5DynamicBD_0, + GatherNDLayerCPUTest, + subset_BD0, + GatherNDLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_GatherND8DynamicBD_0, + GatherND8LayerCPUTest, + subset_BD0, + GatherNDLayerCPUTest::getTestCaseName); const std::vector inputShapesDynamicBD_1 = { - {{3, -1, -1}, // dynamic - {{3, 10, 5}, {3, 10, 5}, {3, 12, 8}, {3, 8, 8}}}, // target + {{3, -1, -1}, // dynamic + {{3, 10, 5}, {3, 10, 5}, {3, 12, 8}, {3, 8, 8}}}, // target - {{3, {5, 10}, {5, 10}, {5, 10}, {5, 10}}, // dynamic - {{3, 5, 5, 5, 5}, {3, 8, 10, 10, 10}, {3, 8, 6, 8, 7}}}, // target + {{3, {5, 10}, {5, 10}, {5, 10}, {5, 10}}, // dynamic + {{3, 5, 5, 5, 5}, {3, 8, 10, 10, 10}, {3, 8, 6, 8, 7}}}, // target }; const std::vector>> indexesShapesBD_1 = { - std::pair>{{3, 2}, {0, 1, 2, 1, 0, 0}}, - std::pair>{{3, 2, 2}, {0, 1, 1, 1, 0, 2, 0, 1, 1, 1, 0, 2}}, - std::pair>{{3, 1, 1, 2}, {0, 2, 1, 1, 0, 2}}, + std::pair>{{3, 2}, {0, 1, 2, 1, 0, 0}}, + std::pair>{{3, 2, 2}, {0, 1, 1, 1, 0, 2, 0, 1, 1, 1, 0, 2}}, + std::pair>{{3, 1, 1, 2}, {0, 2, 1, 1, 0, 2}}, }; -const auto subset_BD1 = ::testing::Combine( - ::testing::ValuesIn(inputShapesDynamicBD_1), - ::testing::ValuesIn(indexesShapesBD_1), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(indexesPrecisions), - ::testing::Values(0)); - -INSTANTIATE_TEST_SUITE_P(smoke_GatherND5DynamicBD_1, GatherNDLayerCPUTest, subset_BD1, GatherNDLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_GatherND8DynamicBD_1, GatherND8LayerCPUTest, subset_BD1, GatherNDLayerCPUTest::getTestCaseName); +const auto subset_BD1 = ::testing::Combine(::testing::ValuesIn(inputShapesDynamicBD_1), + ::testing::ValuesIn(indexesShapesBD_1), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(indexesPrecisions), + ::testing::Values(0)); + +INSTANTIATE_TEST_SUITE_P(smoke_GatherND5DynamicBD_1, + GatherNDLayerCPUTest, + subset_BD1, + GatherNDLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_GatherND8DynamicBD_1, + GatherND8LayerCPUTest, + subset_BD1, + GatherNDLayerCPUTest::getTestCaseName); const std::vector inputShapesDynamicBD_2 = { - {{2, 2, -1, -1, -1}, // dynamic - {{2, 2, 5, 6, 5}, {2, 2, 2, 3, 3}, {2, 2, 2, 3, 3}, {2, 2, 7, 2, 3}}}, // target + {{2, 2, -1, -1, -1}, // dynamic + {{2, 2, 5, 6, 5}, {2, 2, 2, 3, 3}, {2, 2, 2, 3, 3}, {2, 2, 7, 2, 3}}}, // target - {{2, 2, {5, 10}, {5, 10}, {5, 10}}, // dynamic - {{2, 2, 5, 5, 5}, {2, 2, 10, 10, 5}, {2, 2, 7, 8, 7}}}, // target + {{2, 2, {5, 10}, {5, 10}, {5, 10}}, // dynamic + {{2, 2, 5, 5, 5}, {2, 2, 10, 10, 5}, {2, 2, 7, 8, 7}}}, // target }; const std::vector>> indexesShapesBD_2 = { - std::pair>{{2, 2, 3}, {0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0}}, - std::pair>{{2, 2, 2, 3}, {0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, - 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0}}, + std::pair>{{2, 2, 3}, {0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0}}, + std::pair>{{2, 2, 2, 3}, + {0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0}}, }; -const auto subset_BD2 = ::testing::Combine( - ::testing::ValuesIn(inputShapesDynamicBD_2), - ::testing::ValuesIn(indexesShapesBD_2), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(indexesPrecisions), - ::testing::Values(0)); - -INSTANTIATE_TEST_SUITE_P(smoke_GatherND5DynamicBD_2, GatherNDLayerCPUTest, subset_BD2, GatherNDLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_GatherND8DynamicBD_2, GatherND8LayerCPUTest, subset_BD2, GatherNDLayerCPUTest::getTestCaseName); - +const auto subset_BD2 = ::testing::Combine(::testing::ValuesIn(inputShapesDynamicBD_2), + ::testing::ValuesIn(indexesShapesBD_2), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(indexesPrecisions), + ::testing::Values(0)); + +INSTANTIATE_TEST_SUITE_P(smoke_GatherND5DynamicBD_2, + GatherNDLayerCPUTest, + subset_BD2, + GatherNDLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_GatherND8DynamicBD_2, + GatherND8LayerCPUTest, + subset_BD2, + GatherNDLayerCPUTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_tree.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_tree.cpp index fbb67606ef2297..055434c6477a5e 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_tree.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gather_tree.cpp @@ -2,39 +2,35 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" #include "common_test_utils/ov_tensor_utils.hpp" +#include "ov_models/builders.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -namespace CPULayerTestsDefinitions { - -using namespace ov::test; using namespace CPUTestUtils; -using GatherTreeCPUTestParams = typename std::tuple< - InputShape, // Input tensors shape - ngraph::helpers::InputLayerType, // Secondary input type - ov::element::Type, // Network precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::string>; // Device name +namespace ov { +namespace test { + +using GatherTreeCPUTestParams = typename std::tuple; // Device name class GatherTreeLayerCPUTest : public testing::WithParamInterface, - virtual public ov::test::SubgraphBaseTest, public CPUTestsBase { + virtual public ov::test::SubgraphBaseTest, + public CPUTestsBase { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj) { + static std::string getTestCaseName(const testing::TestParamInfo& obj) { InputShape inputShape; ov::element::Type netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; - ngraph::helpers::InputLayerType secondaryInputType; + ov::element::Type inPrc, outPrc; + ov::test::utils::InputLayerType secondaryInputType; std::string targetName; - std::tie(inputShape, secondaryInputType, netPrecision, inPrc, outPrc, inLayout, outLayout, targetName) = obj.param; + std::tie(inputShape, secondaryInputType, netPrecision, inPrc, outPrc, targetName) = obj.param; std::ostringstream result; result << "IS=" << ov::test::utils::partialShape2str({inputShape.first}) << "_"; @@ -44,10 +40,8 @@ class GatherTreeLayerCPUTest : public testing::WithParamInterface(paramsIn.front(), inp2, inp3, inp4); + auto operationResult = std::make_shared(paramsIn.front(), inp2, inp3, inp4); - ngraph::ResultVector results{std::make_shared(operationResult)}; - function = std::make_shared(results, paramsIn, "GatherTree"); + ov::ResultVector results{std::make_shared(operationResult)}; + function = std::make_shared(results, paramsIn, "GatherTree"); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto maxBeamIndex = targetInputStaticShapes.front().at(2) - 1; const auto& funcInputs = function->inputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { - auto tensor = - ov::test::utils::create_and_fill_tensor(funcInputs[i].get_element_type(), - targetInputStaticShapes[i], - maxBeamIndex, - (i == 2 || i == 3) ? maxBeamIndex / 2 : 0); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = (i == 2 || i == 3) ? maxBeamIndex / 2 : 0; + in_data.range = maxBeamIndex; + auto tensor = ov::test::utils::create_and_fill_tensor(funcInputs[i].get_element_type(), targetInputStaticShapes[i], in_data); inputs.insert({funcInputs[i].get_node_shared_ptr(), tensor}); } } @@ -136,62 +133,56 @@ TEST_P(GatherTreeLayerCPUTest, CompareWithRefs) { namespace { -const std::vector netPrecisions = { - ov::element::f32, - ov::element::i32 -}; - -const std::vector inputStaticShapes = {{{}, {{5, 1, 10}}}, {{}, {{1, 1, 10}}}, - {{}, {{20, 1, 10}}}, {{}, {{20, 20, 10}}}}; - -const std::vector inputDynamicShapesParameter = - {{{-1, 1, -1}, {{7, 1, 10}, {8, 1, 20}}}, {{-1, 1, {5, 10}}, {{2, 1, 7}, {5, 1, 8}}}, - {{-1, {1, 5}, 10}, {{20, 1, 10}, {17, 2, 10}}}, {{-1, -1, -1}, {{20, 20, 15}, {30, 30, 10}}}}; - -const std::vector inputDynamicShapesConstant = - {{{-1, 1, -1}, {{7, 1, 10}}}, {{-1, 1, {5, 10}}, {{2, 1, 7}}}, - {{-1, {1, 5}, 10}, {{20, 1, 10}}}, {{-1, -1, -1}, {{20, 20, 15}}}}; - -const std::vector secondaryInputTypes = { - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER -}; - -INSTANTIATE_TEST_SUITE_P(smoke_GatherTreeCPUStatic, GatherTreeLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputStaticShapes), - ::testing::ValuesIn(secondaryInputTypes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - GatherTreeLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GatherTreeCPUDynamicParameter, GatherTreeLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputDynamicShapesParameter), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - GatherTreeLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GatherTreeCPUDynamicConstant, GatherTreeLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputDynamicShapesConstant), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - GatherTreeLayerCPUTest::getTestCaseName); - -} // namespace -} // namespace CPULayerTestsDefinitions +const std::vector netPrecisions = {ov::element::f32, ov::element::i32}; + +const std::vector inputStaticShapes = {{{}, {{5, 1, 10}}}, + {{}, {{1, 1, 10}}}, + {{}, {{20, 1, 10}}}, + {{}, {{20, 20, 10}}}}; + +const std::vector inputDynamicShapesParameter = {{{-1, 1, -1}, {{7, 1, 10}, {8, 1, 20}}}, + {{-1, 1, {5, 10}}, {{2, 1, 7}, {5, 1, 8}}}, + {{-1, {1, 5}, 10}, {{20, 1, 10}, {17, 2, 10}}}, + {{-1, -1, -1}, {{20, 20, 15}, {30, 30, 10}}}}; + +const std::vector inputDynamicShapesConstant = {{{-1, 1, -1}, {{7, 1, 10}}}, + {{-1, 1, {5, 10}}, {{2, 1, 7}}}, + {{-1, {1, 5}, 10}, {{20, 1, 10}}}, + {{-1, -1, -1}, {{20, 20, 15}}}}; + +const std::vector secondaryInputTypes = {ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER}; + +INSTANTIATE_TEST_SUITE_P(smoke_GatherTreeCPUStatic, + GatherTreeLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputStaticShapes), + ::testing::ValuesIn(secondaryInputTypes), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + GatherTreeLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_GatherTreeCPUDynamicParameter, + GatherTreeLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputDynamicShapesParameter), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + GatherTreeLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_GatherTreeCPUDynamicConstant, + GatherTreeLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputDynamicShapesConstant), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + GatherTreeLayerCPUTest::getTestCaseName); + +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/grid_sample.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/grid_sample.cpp index 278c92ed3681a1..3ef7ef621e0f3d 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/grid_sample.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/grid_sample.cpp @@ -2,60 +2,74 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" -#include using namespace CPUTestUtils; -using namespace ov::test; -using ov::op::v9::GridSample; - -namespace CPULayerTestsDefinitions { - -typedef std::tuple< - std::vector, // Input shapes - GridSample::InterpolationMode, // Interpolation mode - GridSample::PaddingMode, // Padding mode - bool, // Align corners - ElementType, // Data precision - ElementType, // Grid precision - CPUSpecificParams, // CPU specific params - std::map // Additional config -> GridSampleLayerTestCPUParams; + +namespace ov { +namespace test { + +typedef std::tuple, // Input shapes + ov::op::v9::GridSample::InterpolationMode, // Interpolation mode + ov::op::v9::GridSample::PaddingMode, // Padding mode + bool, // Align corners + ElementType, // Data precision + ElementType, // Grid precision + CPUSpecificParams, // CPU specific params + ov::AnyMap // Additional config + > + GridSampleLayerTestCPUParams; class GridSampleLayerTestCPU : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CPUTestsBase { + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { std::vector inputShapes; - GridSample::InterpolationMode interpolateMode; - GridSample::PaddingMode paddingMode; + ov::op::v9::GridSample::InterpolationMode interpolateMode; + ov::op::v9::GridSample::PaddingMode paddingMode; bool alignCorners; ElementType dataPrecision, gridPrecision; CPUSpecificParams cpuParams; - std::map additionalConfig; + ov::AnyMap additionalConfig; - std::tie(inputShapes, interpolateMode, paddingMode, alignCorners, dataPrecision, gridPrecision, cpuParams, additionalConfig) = obj.param; + std::tie(inputShapes, + interpolateMode, + paddingMode, + alignCorners, + dataPrecision, + gridPrecision, + cpuParams, + additionalConfig) = obj.param; std::ostringstream result; result << "IS=("; for (size_t i = 0lu; i < inputShapes.size(); i++) { - result << ov::test::utils::partialShape2str({inputShapes[i].first}) << (i < inputShapes.size() - 1lu ? "_" : ""); + result << ov::test::utils::partialShape2str({inputShapes[i].first}) + << (i < inputShapes.size() - 1lu ? "_" : ""); } result << ")_TS="; for (size_t i = 0lu; i < inputShapes.front().second.size(); i++) { result << "{"; for (size_t j = 0lu; j < inputShapes.size(); j++) { - result << ov::test::utils::vec2str(inputShapes[j].second[i]) << (j < inputShapes.size() - 1lu ? "_" : ""); + result << ov::test::utils::vec2str(inputShapes[j].second[i]) + << (j < inputShapes.size() - 1lu ? "_" : ""); } result << "}_"; } - result << "interpMode=" << (interpolateMode == GridSample::InterpolationMode::BILINEAR ? "BILINEAR" : - interpolateMode == GridSample::InterpolationMode::BICUBIC ? "BICUBIC" : "NEAREST") << "_"; - result << "padMode=" << (paddingMode == GridSample::PaddingMode::ZEROS ? "ZEROS" : - paddingMode == GridSample::PaddingMode::BORDER ? "BORDER" : "REFLECTION") << "_"; + result << "interpMode=" + << (interpolateMode == ov::op::v9::GridSample::InterpolationMode::BILINEAR ? "BILINEAR" + : interpolateMode == ov::op::v9::GridSample::InterpolationMode::BICUBIC ? "BICUBIC" + : "NEAREST") + << "_"; + result << "padMode=" + << (paddingMode == ov::op::v9::GridSample::PaddingMode::ZEROS ? "ZEROS" + : paddingMode == ov::op::v9::GridSample::PaddingMode::BORDER ? "BORDER" + : "REFLECTION") + << "_"; result << "alignCorners=" << (alignCorners ? "True" : "False") << "_"; result << "dataPrc=" << dataPrecision << "_"; result << "gridPrc=" << gridPrecision; @@ -63,9 +77,9 @@ class GridSampleLayerTestCPU : public testing::WithParamInterfaceset_friendly_name("grid"); - GridSample::Attributes attributes = {alignCorners, interpolateMode, paddingMode}; - auto gridSampleNode = std::make_shared(params[0], params[1], attributes); + ov::op::v9::GridSample::Attributes attributes = {alignCorners, interpolateMode, paddingMode}; + auto gridSampleNode = std::make_shared(params[0], params[1], attributes); function = makeNgraphFunction(dataPrecision, params, gridSampleNode, "GridSampleCPU"); } @@ -116,16 +137,19 @@ class GridSampleLayerTestCPU : public testing::WithParamInterfaceget_friendly_name() == "data") { int32_t range = std::accumulate(targetInputStaticShapes[0].begin(), targetInputStaticShapes[0].end(), 1u, std::multiplies()); - tensor = utils::create_and_fill_tensor( - funcInput.get_element_type(), targetInputStaticShapes[0], range, -range / 2, 1); + in_data.start_from = -range / 2; + in_data.range = range; + tensor = utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[0], in_data); } else if (funcInput.get_node()->get_friendly_name() == "grid") { int32_t range = std::max(targetInputStaticShapes[0][2], targetInputStaticShapes[0][3]) + 2; - int32_t resolution = range / 2; - tensor = utils::create_and_fill_tensor( - funcInput.get_element_type(), targetInputStaticShapes[1], range, -1, resolution == 0 ? 1 : resolution); + in_data.start_from = -1; + in_data.range = range; + in_data.resolution = range / 2; + tensor = utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[1], in_data); } inputs.insert({funcInput.get_node_shared_ptr(), tensor}); } @@ -139,31 +163,28 @@ TEST_P(GridSampleLayerTestCPU, CompareWithRefs) { namespace { -std::vector> additionalConfig - = {{{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::YES}}}; +std::vector additionalConfig = {{{ov::hint::inference_precision(ov::element::f32)}}, + {{ov::hint::inference_precision(ov::element::bf16)}}}; -std::vector interpolateMode { - GridSample::InterpolationMode::BILINEAR, - GridSample::InterpolationMode::BICUBIC, - GridSample::InterpolationMode::NEAREST }; +std::vector interpolateMode{ov::op::v9::GridSample::InterpolationMode::BILINEAR, + ov::op::v9::GridSample::InterpolationMode::BICUBIC, + ov::op::v9::GridSample::InterpolationMode::NEAREST}; -std::vector paddingMode { - GridSample::PaddingMode::ZEROS, - GridSample::PaddingMode::BORDER, - GridSample::PaddingMode::REFLECTION }; +std::vector paddingMode{ov::op::v9::GridSample::PaddingMode::ZEROS, + ov::op::v9::GridSample::PaddingMode::BORDER, + ov::op::v9::GridSample::PaddingMode::REFLECTION}; -std::vector alignCorners { true, false }; +std::vector alignCorners{true, false}; std::vector getCPUInfo() { std::vector resCPUParams; - if (InferenceEngine::with_cpu_x86_avx512f()) { + if (ov::with_cpu_x86_avx512f()) { resCPUParams.push_back(CPUSpecificParams{{}, {}, {"jit_avx512"}, "jit_avx512"}); - } else if (InferenceEngine::with_cpu_x86_avx2()) { + } else if (ov::with_cpu_x86_avx2()) { resCPUParams.push_back(CPUSpecificParams{{}, {}, {"jit_avx2"}, "jit_avx2"}); - } else if (InferenceEngine::with_cpu_x86_avx()) { + } else if (ov::with_cpu_x86_avx()) { resCPUParams.push_back(CPUSpecificParams{{}, {}, {"jit_avx"}, "jit_avx"}); - } else if (InferenceEngine::with_cpu_x86_sse42()) { + } else if (ov::with_cpu_x86_sse42()) { resCPUParams.push_back(CPUSpecificParams{{}, {}, {"jit_sse42"}, "jit_sse42"}); } return resCPUParams; @@ -171,198 +192,180 @@ std::vector getCPUInfo() { std::vector> getStaticShapes() { // SSE42 - std::vector> result = { - { { {}, { {1, 5, 1, 1} } }, // Static shapes - { {}, { {1, 1, 1, 2} } } - }, - { { {}, { {2, 4, 7, 1} } }, // Static shapes - { {}, { {2, 1, 2, 2} } } - }, - { { {}, { {3, 3, 3, 3} } }, // Static shapes - { {}, { {3, 3, 1, 2} } } - }, - { { {}, { {4, 2, 5, 4} } }, // Static shapes - { {}, { {4, 2, 2, 2} } } - }, - { { {}, { {5, 1, 5, 5} } }, // Static shapes - { {}, { {5, 1, 5, 2} } } - }, - { { {}, { {4, 2, 4, 6} } }, // Static shapes - { {}, { {4, 2, 3, 2} } } - }, - { { {}, { {3, 3, 5, 7} } }, // Static shapes - { {}, { {3, 7, 1, 2} } } - }, - { { {}, { {2, 4, 7, 7} } }, // Static shapes - { {}, { {2, 2, 4, 2} } } - }, - { { {}, { {2, 5, 8, 8} } }, // Static shapes - { {}, { {2, 3, 3, 2} } } - }, - { { {}, { {2, 6, 9, 8} } }, // Static shapes - { {}, { {2, 2, 5, 2} } } - } - }; + std::vector> result = {{{{}, {{1, 5, 1, 1}}}, // Static shapes + {{}, {{1, 1, 1, 2}}}}, + {{{}, {{2, 4, 7, 1}}}, // Static shapes + {{}, {{2, 1, 2, 2}}}}, + {{{}, {{3, 3, 3, 3}}}, // Static shapes + {{}, {{3, 3, 1, 2}}}}, + {{{}, {{4, 2, 5, 4}}}, // Static shapes + {{}, {{4, 2, 2, 2}}}}, + {{{}, {{5, 1, 5, 5}}}, // Static shapes + {{}, {{5, 1, 5, 2}}}}, + {{{}, {{4, 2, 4, 6}}}, // Static shapes + {{}, {{4, 2, 3, 2}}}}, + {{{}, {{3, 3, 5, 7}}}, // Static shapes + {{}, {{3, 7, 1, 2}}}}, + {{{}, {{2, 4, 7, 7}}}, // Static shapes + {{}, {{2, 2, 4, 2}}}}, + {{{}, {{2, 5, 8, 8}}}, // Static shapes + {{}, {{2, 3, 3, 2}}}}, + {{{}, {{2, 6, 9, 8}}}, // Static shapes + {{}, {{2, 2, 5, 2}}}}}; // AVX2, AVX - if (InferenceEngine::with_cpu_x86_avx2() || InferenceEngine::with_cpu_x86_avx()) { - std::vector> tmp = { - { { {}, { {1, 7, 5, 3} } }, // Static shapes - { {}, { {1, 1, 11, 2} } } - }, - { { {}, { {2, 6, 7, 2} } }, // Static shapes - { {}, { {2, 6, 2, 2} } } - }, - { { {}, { {3, 5, 6, 3} } }, // Static shapes - { {}, { {3, 1, 13, 2} } } - }, - { { {}, { {4, 4, 5, 6} } }, // Static shapes - { {}, { {4, 2, 7, 2} } } - }, - { { {}, { {5, 3, 4, 5} } }, // Static shapes - { {}, { {5, 3, 5, 2} } } - }, - { { {}, { {4, 2, 7, 6} } }, // Static shapes - { {}, { {4, 4, 4, 2} } } - }, - { { {}, { {3, 3, 9, 7} } }, // Static shapes - { {}, { {3, 1, 17, 2} } } - }, - { { {}, { {2, 4, 9, 8} } }, // Static shapes - { {}, { {2, 19, 1, 2} } } - } - }; + if (ov::with_cpu_x86_avx2() || ov::with_cpu_x86_avx()) { + std::vector> tmp = {{{{}, {{1, 7, 5, 3}}}, // Static shapes + {{}, {{1, 1, 11, 2}}}}, + {{{}, {{2, 6, 7, 2}}}, // Static shapes + {{}, {{2, 6, 2, 2}}}}, + {{{}, {{3, 5, 6, 3}}}, // Static shapes + {{}, {{3, 1, 13, 2}}}}, + {{{}, {{4, 4, 5, 6}}}, // Static shapes + {{}, {{4, 2, 7, 2}}}}, + {{{}, {{5, 3, 4, 5}}}, // Static shapes + {{}, {{5, 3, 5, 2}}}}, + {{{}, {{4, 2, 7, 6}}}, // Static shapes + {{}, {{4, 4, 4, 2}}}}, + {{{}, {{3, 3, 9, 7}}}, // Static shapes + {{}, {{3, 1, 17, 2}}}}, + {{{}, {{2, 4, 9, 8}}}, // Static shapes + {{}, {{2, 19, 1, 2}}}}}; result.insert(result.end(), tmp.begin(), tmp.end()); } // AVX512 - if (InferenceEngine::with_cpu_x86_avx512f()) { - std::vector> tmp = { - { { {}, { {1, 7, 2, 9} } }, // Static shapes - { {}, { {1, 4, 5, 2} } } - }, - { { {}, { {2, 6, 3, 10} } }, // Static shapes - { {}, { {2, 3, 7, 2} } }, - }, - { { {}, { {3, 5, 2, 11} } }, // Static shapes - { {}, { {3, 4, 6, 2} } } - }, - { { {}, { {4, 4, 4, 12} } }, // Static shapes - { {}, { {4, 5, 5, 2} } }, - }, - { { {}, { {5, 3, 2, 13} } }, // Static shapes - { {}, { {5, 1, 31, 2} } }, - }, - { { {}, { {4, 3, 5, 14} } }, // Static shapes - { {}, { {4, 4, 8, 2} } }, - }, - { { {}, { {3, 2, 2, 15} } }, // Static shapes - { {}, { {3, 33, 1, 2} } }, - }, - { { {}, { {2, 1, 6, 16} } }, // Static shapes - { {}, { {2, 8, 8, 2} } }, - }, - { { {}, { {2, 3, 7, 17} } }, // Static shapes - { {}, { {2, 9, 9, 2} } }, - } - }; + if (ov::with_cpu_x86_avx512f()) { + std::vector> tmp = {{{{}, {{1, 7, 2, 9}}}, // Static shapes + {{}, {{1, 4, 5, 2}}}}, + { + {{}, {{2, 6, 3, 10}}}, // Static shapes + {{}, {{2, 3, 7, 2}}}, + }, + {{{}, {{3, 5, 2, 11}}}, // Static shapes + {{}, {{3, 4, 6, 2}}}}, + { + {{}, {{4, 4, 4, 12}}}, // Static shapes + {{}, {{4, 5, 5, 2}}}, + }, + { + {{}, {{5, 3, 2, 13}}}, // Static shapes + {{}, {{5, 1, 31, 2}}}, + }, + { + {{}, {{4, 3, 5, 14}}}, // Static shapes + {{}, {{4, 4, 8, 2}}}, + }, + { + {{}, {{3, 2, 2, 15}}}, // Static shapes + {{}, {{3, 33, 1, 2}}}, + }, + { + {{}, {{2, 1, 6, 16}}}, // Static shapes + {{}, {{2, 8, 8, 2}}}, + }, + { + {{}, {{2, 3, 7, 17}}}, // Static shapes + {{}, {{2, 9, 9, 2}}}, + }}; result.insert(result.end(), tmp.begin(), tmp.end()); } return result; } -INSTANTIATE_TEST_SUITE_P(smoke_static, GridSampleLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(getStaticShapes()), - ::testing::ValuesIn(interpolateMode), - ::testing::ValuesIn(paddingMode), - ::testing::ValuesIn(alignCorners), - ::testing::ValuesIn({ElementType::f32, ElementType::i32}), - ::testing::ValuesIn({ElementType::f32}), - ::testing::ValuesIn(getCPUInfo()), - ::testing::Values(additionalConfig[0])), - GridSampleLayerTestCPU::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_static_1, GridSampleLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(getStaticShapes()), - ::testing::ValuesIn(interpolateMode), - ::testing::ValuesIn(paddingMode), - ::testing::ValuesIn(alignCorners), - ::testing::ValuesIn({ElementType::bf16, ElementType::i8}), - ::testing::ValuesIn({ElementType::f32, ElementType::bf16}), - ::testing::ValuesIn(getCPUInfo()), - ::testing::Values(additionalConfig[0])), - GridSampleLayerTestCPU::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_static_2, GridSampleLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(getStaticShapes()), - ::testing::ValuesIn(interpolateMode), - ::testing::ValuesIn(paddingMode), - ::testing::ValuesIn(alignCorners), - ::testing::ValuesIn({ElementType::f32}), - ::testing::ValuesIn({ElementType::bf16}), - ::testing::ValuesIn(getCPUInfo()), - ::testing::Values(additionalConfig[0])), - GridSampleLayerTestCPU::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_static, + GridSampleLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(getStaticShapes()), + ::testing::ValuesIn(interpolateMode), + ::testing::ValuesIn(paddingMode), + ::testing::ValuesIn(alignCorners), + ::testing::ValuesIn({ElementType::f32, ElementType::i32}), + ::testing::ValuesIn({ElementType::f32}), + ::testing::ValuesIn(getCPUInfo()), + ::testing::Values(additionalConfig[0])), + GridSampleLayerTestCPU::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_static_1, + GridSampleLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(getStaticShapes()), + ::testing::ValuesIn(interpolateMode), + ::testing::ValuesIn(paddingMode), + ::testing::ValuesIn(alignCorners), + ::testing::ValuesIn({ElementType::bf16, ElementType::i8}), + ::testing::ValuesIn({ElementType::f32, ElementType::bf16}), + ::testing::ValuesIn(getCPUInfo()), + ::testing::Values(additionalConfig[0])), + GridSampleLayerTestCPU::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_static_2, + GridSampleLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(getStaticShapes()), + ::testing::ValuesIn(interpolateMode), + ::testing::ValuesIn(paddingMode), + ::testing::ValuesIn(alignCorners), + ::testing::ValuesIn({ElementType::f32}), + ::testing::ValuesIn({ElementType::bf16}), + ::testing::ValuesIn(getCPUInfo()), + ::testing::Values(additionalConfig[0])), + GridSampleLayerTestCPU::getTestCaseName); const std::vector> dynamicInSapes = { - { { { ov::Dimension(1, 15), -1, -1, -1 }, // Dynamic shape 0 - { {1, 1, 1, 1}, {6, 3, 1, 2}, {4, 5, 3, 1}, {2, 7, 2, 2} } }, // Target shapes - { { ov::Dimension(1, 16), -1, -1, -1 }, // Dynamic shape 1 - { {1, 1, 1, 2}, {6, 2, 2, 2}, {4, 1, 3, 2}, {2, 1, 2, 2} } } }, // Target shapes - { { { -1, -1, -1, -1 }, // Dynamic shape 0 - { {1, 2, 1, 5}, {3, 4, 2, 3}, {5, 6, 7, 1}, {7, 8, 2, 4} } }, // Target shapes - { { -1, -1, -1, 2 }, // Dynamic shape 1 - { {1, 2, 4, 2}, {3, 1, 7, 2}, {5, 2, 3, 2}, {7, 1, 5, 2} } } }, // Target shapes - { { { ov::Dimension(2, 15), -1, -1, -1 }, // Dynamic shape 0 - { {8, 3, 3, 3}, {6, 5, 2, 5}, {4, 7, 1, 11}, {2, 9, 3, 4} } }, // Target shapes - { { -1, 3, 7, 2 }, // Dynamic shape 1 - { {8, 3, 7, 2}, {6, 3, 7, 2}, {4, 3, 7, 2}, {2, 3, 7, 2} } } }, // Target shapes - { { { 3, 4, 4, 5 }, // Dynamic shape 0 - { {3, 4, 4, 5}, {3, 4, 4, 5}, {3, 4, 4, 5}, {3, 4, 4, 5} } }, // Target shapes - { { -1, -1, -1, 2 }, // Dynamic shape 1 - { {3, 3, 4, 2}, {3, 1, 11, 2}, {3, 2, 5, 2}, {3, 3, 3, 2} } } }, // Target shapes - { { { -1, -1, -1, -1 }, // Dynamic shape 0 - { {1, 2, 1, 13}, {3, 4, 7, 2}, {5, 6, 3, 5}, {7, 8, 4, 4} } }, // Target shapes - { { -1, -1, -1, -1 }, // Dynamic shape 1 - { {1, 4, 4, 2}, {3, 3, 5, 2}, {5, 2, 7, 2}, {7, 1, 13, 2} } } }, // Target shapes - { { { -1, -1, -1, -1 }, // Dynamic shape 0 - { {2, 11, 1, 17}, {4, 9, 6, 3}, {6, 7, 7, 3}, {8, 3, 2, 11} } }, // Target shapes - { { -1, -1, -1, 2 }, // Dynamic shape 1 - { {2, 5, 4, 2}, {4, 1, 19, 2}, {6, 6, 3, 2}, {8, 1, 17, 2} } } }, // Target shapes - { { { 3, -1, -1, -1 }, // Dynamic shape 0 - { {3, 2, 1, 23}, {3, 4, 3, 8}, {3, 6, 5, 5}, {3, 8, 31, 1} } }, // Target shapes - { { -1, -1, -1, 2 }, // Dynamic shape 1 - { {3, 31, 1, 2}, {3, 6, 4, 2}, {3, 23, 1, 2}, {3, 11, 2, 2} } } }, // Target shapes - { { { -1, 3, -1, -1 }, // Dynamic shape 0 - { {8, 3, 8, 4}, {6, 3, 33, 1}, {4, 3, 8, 6}, {2, 3, 8, 8} } }, // Target shapes - { { -1, -1, -1, 2 }, // Dynamic shape 1 - { {8, 8, 8, 2}, {6, 8, 7, 2}, {4, 1, 33, 2}, {2, 4, 8, 2} } } } // Target shapes + {{{ov::Dimension(1, 15), -1, -1, -1}, // Dynamic shape 0 + {{1, 1, 1, 1}, {6, 3, 1, 2}, {4, 5, 3, 1}, {2, 7, 2, 2}}}, // Target shapes + {{ov::Dimension(1, 16), -1, -1, -1}, // Dynamic shape 1 + {{1, 1, 1, 2}, {6, 2, 2, 2}, {4, 1, 3, 2}, {2, 1, 2, 2}}}}, // Target shapes + {{{-1, -1, -1, -1}, // Dynamic shape 0 + {{1, 2, 1, 5}, {3, 4, 2, 3}, {5, 6, 7, 1}, {7, 8, 2, 4}}}, // Target shapes + {{-1, -1, -1, 2}, // Dynamic shape 1 + {{1, 2, 4, 2}, {3, 1, 7, 2}, {5, 2, 3, 2}, {7, 1, 5, 2}}}}, // Target shapes + {{{ov::Dimension(2, 15), -1, -1, -1}, // Dynamic shape 0 + {{8, 3, 3, 3}, {6, 5, 2, 5}, {4, 7, 1, 11}, {2, 9, 3, 4}}}, // Target shapes + {{-1, 3, 7, 2}, // Dynamic shape 1 + {{8, 3, 7, 2}, {6, 3, 7, 2}, {4, 3, 7, 2}, {2, 3, 7, 2}}}}, // Target shapes + {{{3, 4, 4, 5}, // Dynamic shape 0 + {{3, 4, 4, 5}, {3, 4, 4, 5}, {3, 4, 4, 5}, {3, 4, 4, 5}}}, // Target shapes + {{-1, -1, -1, 2}, // Dynamic shape 1 + {{3, 3, 4, 2}, {3, 1, 11, 2}, {3, 2, 5, 2}, {3, 3, 3, 2}}}}, // Target shapes + {{{-1, -1, -1, -1}, // Dynamic shape 0 + {{1, 2, 1, 13}, {3, 4, 7, 2}, {5, 6, 3, 5}, {7, 8, 4, 4}}}, // Target shapes + {{-1, -1, -1, -1}, // Dynamic shape 1 + {{1, 4, 4, 2}, {3, 3, 5, 2}, {5, 2, 7, 2}, {7, 1, 13, 2}}}}, // Target shapes + {{{-1, -1, -1, -1}, // Dynamic shape 0 + {{2, 11, 1, 17}, {4, 9, 6, 3}, {6, 7, 7, 3}, {8, 3, 2, 11}}}, // Target shapes + {{-1, -1, -1, 2}, // Dynamic shape 1 + {{2, 5, 4, 2}, {4, 1, 19, 2}, {6, 6, 3, 2}, {8, 1, 17, 2}}}}, // Target shapes + {{{3, -1, -1, -1}, // Dynamic shape 0 + {{3, 2, 1, 23}, {3, 4, 3, 8}, {3, 6, 5, 5}, {3, 8, 31, 1}}}, // Target shapes + {{-1, -1, -1, 2}, // Dynamic shape 1 + {{3, 31, 1, 2}, {3, 6, 4, 2}, {3, 23, 1, 2}, {3, 11, 2, 2}}}}, // Target shapes + {{{-1, 3, -1, -1}, // Dynamic shape 0 + {{8, 3, 8, 4}, {6, 3, 33, 1}, {4, 3, 8, 6}, {2, 3, 8, 8}}}, // Target shapes + {{-1, -1, -1, 2}, // Dynamic shape 1 + {{8, 8, 8, 2}, {6, 8, 7, 2}, {4, 1, 33, 2}, {2, 4, 8, 2}}}} // Target shapes }; -INSTANTIATE_TEST_SUITE_P(smoke_dynamic, GridSampleLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(dynamicInSapes), - ::testing::ValuesIn(interpolateMode), - ::testing::ValuesIn(paddingMode), - ::testing::ValuesIn(alignCorners), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(getCPUInfo()), - ::testing::Values(additionalConfig[0])), - GridSampleLayerTestCPU::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_dynamic, GridSampleLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(dynamicInSapes), - ::testing::ValuesIn(interpolateMode), - ::testing::ValuesIn(paddingMode), - ::testing::ValuesIn(alignCorners), - ::testing::ValuesIn({ElementType::bf16, ElementType::i32}), - ::testing::ValuesIn({ElementType::bf16}), - ::testing::ValuesIn(getCPUInfo()), - ::testing::Values(additionalConfig[0])), - GridSampleLayerTestCPU::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +INSTANTIATE_TEST_SUITE_P(smoke_dynamic, + GridSampleLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(dynamicInSapes), + ::testing::ValuesIn(interpolateMode), + ::testing::ValuesIn(paddingMode), + ::testing::ValuesIn(alignCorners), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(getCPUInfo()), + ::testing::Values(additionalConfig[0])), + GridSampleLayerTestCPU::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_dynamic, + GridSampleLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(dynamicInSapes), + ::testing::ValuesIn(interpolateMode), + ::testing::ValuesIn(paddingMode), + ::testing::ValuesIn(alignCorners), + ::testing::ValuesIn({ElementType::bf16, ElementType::i32}), + ::testing::ValuesIn({ElementType::bf16}), + ::testing::ValuesIn(getCPUInfo()), + ::testing::Values(additionalConfig[0])), + GridSampleLayerTestCPU::getTestCaseName); +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/grn.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/grn.cpp index 5047d3a615f602..75c93393e69b2e 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/grn.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/grn.cpp @@ -2,41 +2,33 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/base/ov_subgraph.hpp" #include "ov_models/builders.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace CPUTestUtils; -using namespace ov::test; +namespace ov { +namespace test { -namespace CPULayerTestsDefinitions { - -using GRNCPUTestParams = typename std::tuple< - ov::element::Type, // Network precision - InferenceEngine::Precision, // Input precision - InferenceEngine::Precision, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - InputShape, // Input shape - float, // Bias - std::string>; // Device name +using GRNCPUTestParams = typename std::tuple; // Device name class GRNLayerCPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CPUTestsBase { + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { ov::element::Type netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; + ov::element::Type inPrc, outPrc; InputShape inputShape; float bias; std::string targetDevice; - std::tie(netPrecision, inPrc, outPrc, - inLayout, outLayout, - inputShape, - bias, - targetDevice) = obj.param; + std::tie(netPrecision, inPrc, outPrc, inputShape, bias, targetDevice) = obj.param; std::ostringstream result; result << "IS=" << ov::test::utils::partialShape2str({inputShape.first}) << "_"; @@ -45,11 +37,9 @@ class GRNLayerCPUTest : public testing::WithParamInterface, result << ov::test::utils::vec2str(item) << "_"; } result << "netPRC=" << netPrecision.get_type_name() << "_"; - result << "inPRC=" << inPrc.name() << "_"; - result << "outPRC=" << outPrc.name() << "_"; - result << "inL=" << inLayout << "_"; - result << "outL=" << outLayout << "_"; - result << "bias=" << bias << "_"; + result << "inPRC=" << inPrc.get_type_name() << "_"; + result << "outPRC=" << outPrc.get_type_name() << "_"; + result << "bias=" << bias << "_"; result << "trgDev=" << targetDevice; return result.str(); @@ -58,12 +48,11 @@ class GRNLayerCPUTest : public testing::WithParamInterface, protected: void SetUp() override { ov::element::Type netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; + ov::element::Type inPrc, outPrc; InputShape inputShape; float bias; - std::tie(netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, bias, targetDevice) = GetParam(); + std::tie(netPrecision, inPrc, outPrc, inputShape, bias, targetDevice) = GetParam(); init_input_shapes({inputShape}); @@ -71,9 +60,9 @@ class GRNLayerCPUTest : public testing::WithParamInterface, for (auto&& shape : inputDynamicShapes) paramsIn.push_back(std::make_shared(netPrecision, shape)); - const auto grn = std::make_shared(paramsIn[0], bias); - const ngraph::ResultVector results{std::make_shared(grn)}; - function = std::make_shared(results, paramsIn, "Grn"); + const auto grn = std::make_shared(paramsIn[0], bias); + const ov::ResultVector results{std::make_shared(grn)}; + function = std::make_shared(results, paramsIn, "Grn"); } }; @@ -83,43 +72,36 @@ TEST_P(GRNLayerCPUTest, CompareWithRefs) { namespace { -const std::vector netPrecisions = { - ov::element::bf16, - ov::element::f16, - ov::element::f32 -}; +const std::vector netPrecisions = {ov::element::bf16, ov::element::f16, ov::element::f32}; const std::vector biases = {1e-6f, 0.33f, 1.1f, 2.25f, 100.25f}; const std::vector dataInputStaticShapes = {{{}, {{16, 24}}}, {{}, {{3, 16, 24}}}, {{}, {{1, 3, 30, 30}}}}; -const std::vector dataInputDynamicShapes = - {{{-1, -1}, {{5, 17}, {10, 3}}}, {{3, {10, 12}, -1}, {{3, 12, 25}, {3, 10, 10}}}, - {{2, -1, -1, {5, 10}}, {{2, 17, 20, 7}, {2, 10, 12, 5}}}}; - -INSTANTIATE_TEST_SUITE_P(smoke_GRNCPUStatic, GRNLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::ValuesIn(dataInputStaticShapes), - ::testing::ValuesIn(biases), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - GRNLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GRNCPUDynamic, GRNLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Precision::UNSPECIFIED), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::ValuesIn(dataInputDynamicShapes), - ::testing::ValuesIn(biases), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - GRNLayerCPUTest::getTestCaseName); - -} // namespace -} // namespace CPULayerTestsDefinitions +const std::vector dataInputDynamicShapes = {{{-1, -1}, {{5, 17}, {10, 3}}}, + {{3, {10, 12}, -1}, {{3, 12, 25}, {3, 10, 10}}}, + {{2, -1, -1, {5, 10}}, {{2, 17, 20, 7}, {2, 10, 12, 5}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_GRNCPUStatic, + GRNLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::ValuesIn(dataInputStaticShapes), + ::testing::ValuesIn(biases), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + GRNLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_GRNCPUDynamic, + GRNLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::ValuesIn(dataInputDynamicShapes), + ::testing::ValuesIn(biases), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + GRNLayerCPUTest::getTestCaseName); + +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/group_convolution.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/group_convolution.cpp index c3a140384d571b..fba83b23446dbc 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/group_convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/group_convolution.cpp @@ -2,45 +2,41 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "shared_test_classes/single_op/group_convolution.hpp" + +#include "common_test_utils/node_builders/group_convolution.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include -#include "test_utils/cpu_test_utils.hpp" #include "test_utils/convolution_params.hpp" -#include "test_utils/fusing_test_utils.hpp" +#include "test_utils/cpu_test_utils.hpp" #include "test_utils/filter_cpu_info.hpp" -#include "common_test_utils/node_builders/group_convolution.hpp" +#include "test_utils/fusing_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; +namespace ov { +namespace test { -namespace CPULayerTestsDefinitions { +using groupConvSpecificParams = ov::test::groupConvSpecificParams; -using groupConvSpecificParams = LayerTestsDefinitions::groupConvSpecificParams; -using Config = std::map; +typedef std::tuple + groupConvLayerTestsParamsSet; -typedef std::tuple< - groupConvSpecificParams, - ElementType, - ElementType, // Input precision - ElementType, // Output precision - InputShape, // Input shapes - LayerTestsUtils::TargetDevice> groupConvLayerTestParamsSet; - -typedef std::tuple< - groupConvLayerTestParamsSet, - CPUSpecificParams, - fusingSpecificParams, - Config > groupConvLayerCPUTestParamsSet; +typedef std::tuple + groupConvLayerCPUTestParamsSet; class GroupConvolutionLayerCPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CpuTestWithFusing { + virtual public SubgraphBaseTest, + public CpuTestWithFusing { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - groupConvLayerTestParamsSet basicParamsSet; + groupConvLayerTestsParamsSet basicParamsSet; CPUSpecificParams cpuParams; fusingSpecificParams fusingParams; - Config additionalConfig; + ov::AnyMap additionalConfig; std::tie(basicParamsSet, cpuParams, fusingParams, additionalConfig) = obj.param; groupConvSpecificParams groupConvParams; @@ -49,15 +45,15 @@ class GroupConvolutionLayerCPUTest : public testing::WithParamInterface kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels, numGroups; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, numGroups, padType) = groupConvParams; std::ostringstream result; result << "IS="; - result << ov::test::utils::partialShape2str({inputShape.first}) << "_"; + result << ov::test::utils::partialShape2str({inputShape.first}) << "_"; result << "TS=("; for (const auto& shape : inputShape.second) { result << ov::test::utils::vec2str(shape) << "_"; @@ -82,7 +78,7 @@ class GroupConvolutionLayerCPUTest : public testing::WithParamInterface modifyGraph(const ngraph::element::Type &ngPrc, - ngraph::ParameterVector ¶ms, - const std::shared_ptr &lastNode) override { + std::shared_ptr modifyGraph(const ov::element::Type& ngPrc, + ov::ParameterVector& params, + const std::shared_ptr& lastNode) override { auto retNode = CpuTestWithFusing::modifyGraph(ngPrc, params, lastNode); - std::shared_ptr opToShapeInfer = nullptr; + std::shared_ptr opToShapeInfer = nullptr; for (auto& targetShapes : targetStaticShapes) { for (size_t i = targetShapes.size(); i < params.size(); ++i) { - const auto &shape = params[i]->get_output_partial_shape(0); + const auto& shape = params[i]->get_output_partial_shape(0); if (shape.is_static()) { targetShapes.push_back(shape.get_shape()); } else { // It is assumed that in such tests we have second parameter only if sum fusion is tested. - // Considering this fact, we need to set the appropriate static shape for the second term of the sum operation, and - // it has to match the convolution output shape. So the most suitable solution here is to perform shape inference on the - // convolution node + // Considering this fact, we need to set the appropriate static shape for the second term of the sum + // operation, and it has to match the convolution output shape. So the most suitable solution here + // is to perform shape inference on the convolution node if (!opToShapeInfer) { - ngraph::OutputVector inputsForShapeInfer; + ov::OutputVector inputsForShapeInfer; for (size_t j = 0; j < lastNode->get_input_size(); j++) { - if (ngraph::is_type(lastNode->get_input_node_ptr(j))) { + if (ov::is_type(lastNode->get_input_node_ptr(j))) { inputsForShapeInfer.push_back(lastNode->get_input_node_shared_ptr(j)); } else { - inputsForShapeInfer.push_back(std::make_shared(lastNode->get_input_element_type(j), - lastNode->get_input_partial_shape(j))); + inputsForShapeInfer.push_back( + std::make_shared(lastNode->get_input_element_type(j), + lastNode->get_input_partial_shape(j))); } } opToShapeInfer = lastNode->clone_with_new_inputs(inputsForShapeInfer); @@ -159,10 +156,10 @@ class GroupConvolutionLayerCPUTest : public testing::WithParamInterfaceGetParam(); configuration.insert(additionalConfig.begin(), additionalConfig.end()); @@ -171,25 +168,25 @@ class GroupConvolutionLayerCPUTest : public testing::WithParamInterfacegetFusedOpsNames() == "Add(PerChannel)"; + isBias = postOpMgrPtr->getFusedOpsNames() == "Add(PerChannel)"; groupConvSpecificParams groupConvParams; InputShape inputShape; - auto netType = ElementType::undefined; + auto netType = ElementType::undefined; std::tie(groupConvParams, netType, inType, outType, inputShape, targetDevice) = basicParamsSet; init_input_shapes({inputShape}); - if (configuration.count(PluginConfigParams::KEY_ENFORCE_BF16) && - PluginConfigParams::YES == configuration[PluginConfigParams::KEY_ENFORCE_BF16].as()) { - selectedType += "_BF16"; + if (configuration.count(ov::hint::inference_precision.name()) && + ov::element::bf16 == configuration[ov::hint::inference_precision.name()].as()) { + selectedType += "_bf16"; rel_threshold = 1e-2f; } else { selectedType = makeSelectedTypeStr(selectedType, netType); } - ngraph::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; + ov::op::PadType padType; + std::vector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels, numGroups; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, numGroups, padType) = groupConvParams; @@ -198,8 +195,16 @@ class GroupConvolutionLayerCPUTest : public testing::WithParamInterface(netType, shape)); - auto groupConv = ov::test::utils::make_group_convolution(params[0], netType, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels, numGroups); + auto groupConv = ov::test::utils::make_group_convolution(params[0], + netType, + kernel, + stride, + padBegin, + padEnd, + dilation, + padType, + convOutChannels, + numGroups); function = makeNgraphFunction(netType, params, groupConv, "groupConvolution"); } }; @@ -213,9 +218,9 @@ TEST_P(ExpectFallbackGroupConvolutionLayerCPUTest, CompareWithRefs) { } ASSERT_TRUE(!selectedType.empty()) << "Node type is not defined."; auto function = compiledModel.get_runtime_model(); - for (const auto &node : function->get_ops()) { - const auto & rtInfo = node->get_rt_info(); - auto getExecValue = [&rtInfo](const std::string & paramName) -> std::string { + for (const auto& node : function->get_ops()) { + const auto& rtInfo = node->get_rt_info(); + auto getExecValue = [&rtInfo](const std::string& paramName) -> std::string { auto it = rtInfo.find(paramName); OPENVINO_ASSERT(rtInfo.end() != it); return it->second.as(); @@ -238,7 +243,8 @@ TEST_P(GroupConvolutionLayerCPUTest, CompareWithRefs) { namespace { /* GROUP CONV TEST UTILS */ -std::vector filterParamsSetForDevice(std::vector paramsSet) { +std::vector filterParamsSetForDevice( + std::vector paramsSet) { std::vector resParamsSet; const int cpuParamsIndex = 1; const int selectedTypeIndex = 3; @@ -248,20 +254,20 @@ std::vector filterParamsSetForDevice(std::vector auto cpuParams = std::get(param); auto selectedTypeStr = std::get(cpuParams); - if (selectedTypeStr.find("jit") != std::string::npos && !with_cpu_x86_sse42()) + if (selectedTypeStr.find("jit") != std::string::npos && !ov::with_cpu_x86_sse42()) continue; - if (selectedTypeStr.find("sse42") != std::string::npos && !with_cpu_x86_sse42()) + if (selectedTypeStr.find("sse42") != std::string::npos && !ov::with_cpu_x86_sse42()) continue; - if (selectedTypeStr.find("avx2") != std::string::npos && !with_cpu_x86_avx2()) + if (selectedTypeStr.find("avx2") != std::string::npos && !ov::with_cpu_x86_avx2()) continue; - if (selectedTypeStr.find("avx512") != std::string::npos && !with_cpu_x86_avx512f()) + if (selectedTypeStr.find("avx512") != std::string::npos && !ov::with_cpu_x86_avx512f()) continue; - if (selectedTypeStr.find("amx") != std::string::npos && !with_cpu_x86_avx512_core_amx()) + if (selectedTypeStr.find("amx") != std::string::npos && !ov::with_cpu_x86_avx512_core_amx()) continue; auto additionalConfig = std::get(param); - if (additionalConfig.count(PluginConfigParams::KEY_ENFORCE_BF16) && - PluginConfigParams::YES == additionalConfig[PluginConfigParams::KEY_ENFORCE_BF16] && - !with_cpu_x86_bfloat16()) { + if (additionalConfig.count(ov::hint::inference_precision.name()) && + ov::element::bf16 == additionalConfig[ov::hint::inference_precision.name()].as() && + !ov::with_cpu_x86_bfloat16()) { continue; } resParamsSet.push_back(param); @@ -272,7 +278,7 @@ std::vector filterParamsSetForDevice(std::vector std::vector filterCPUInfoForDeviceSupportBF16(std::vector CPUParams) { std::vector resParamsSet; - if (with_cpu_x86_bfloat16()) { + if (ov::with_cpu_x86_bfloat16()) { return filterCPUInfoForDevice(CPUParams); } return resParamsSet; @@ -280,1343 +286,1133 @@ std::vector filterCPUInfoForDeviceSupportBF16(std::vector fusingParamsSet { - emptyFusingSpec, - // eltwise - fusingRelu, - fusingPRelu1D, - // depthwise - fusingReluScaleShift, - // fake quantize - fusingFakeQuantizePerTensorRelu, - fusingFakeQuantizePerChannelRelu, - // sum - fusingSumEluFQ, - fusingSum -}; - -const std::vector fusingParamsSetBF16{ - emptyFusingSpec, - // eltwise - fusingRelu, - // depthwise - fusingReluScaleShift, - // sum - fusingSum -}; - +const std::vector fusingParamsSet{emptyFusingSpec, + // eltwise + fusingRelu, + fusingPRelu1D, + // depthwise + fusingReluScaleShift, + // fake quantize + fusingFakeQuantizePerTensorRelu, + fusingFakeQuantizePerChannelRelu, + // sum + fusingSumEluFQ, + fusingSum}; + +const std::vector fusingParamsSetBF16{emptyFusingSpec, + // eltwise + fusingRelu, + // depthwise + fusingReluScaleShift, + // sum + fusingSum}; /* ============= GroupConvolution params (planar layout) ============= */ -const SizeVector numOutChannels_Gemm = {6}; -const SizeVector numGroups_Gemm = {2, 3}; +const std::vector numOutChannels_Gemm = {6}; +const std::vector numGroups_Gemm = {2, 3}; /* ============= GroupConvolution params (blocked layout) ============= */ -const SizeVector numOutChannels_Blocked = {64}; -const SizeVector numGroups_Blocked = {2, 4}; +const std::vector numOutChannels_Blocked = {64}; +const std::vector numGroups_Blocked = {2, 4}; /* ============= GroupConvolution params (DW) ============= */ -const SizeVector numOutChannels_DW = {32}; -const SizeVector numGroups_DW = {32}; +const std::vector numOutChannels_DW = {32}; +const std::vector numGroups_DW = {32}; /* ============= GroupConvolution params (1D) ============= */ -const std::vector kernels1d = { {3}, {1} }; -const std::vector strides1d = { {1}, {2} }; -const std::vector> padBegins1d = { {0}, {1} }; -const std::vector> padEnds1d = { {0} }; -const std::vector dilations1d = { {1}, {2} }; +const std::vector> kernels1d = {{3}, {1}}; +const std::vector> strides1d = {{1}, {2}}; +const std::vector> padBegins1d = {{0}, {1}}; +const std::vector> padEnds1d = {{0}}; +const std::vector> dilations1d = {{1}, {2}}; /* ============= GroupConvolution params (2D) ============= */ -const std::vector kernels2d = {{3, 3}, {1, 1}}; -const std::vector strides2d = {{1, 1}, {2, 2}}; +const std::vector> kernels2d = {{3, 3}, {1, 1}}; +const std::vector> strides2d = {{1, 1}, {2, 2}}; const std::vector> padBegins2d = {{0, 0}, {1, 1}}; const std::vector> padEnds2d = {{0, 0}}; -const std::vector dilations2d = {{1, 1}, {2, 2}}; +const std::vector> dilations2d = {{1, 1}, {2, 2}}; /* ============= GroupConvolution params (3D) ============= */ -const std::vector kernels3d = {{3, 3, 3}, {1, 1, 1}}; -const std::vector strides3d = {{1, 1, 1}, {2, 2, 2}}; +const std::vector> kernels3d = {{3, 3, 3}, {1, 1, 1}}; +const std::vector> strides3d = {{1, 1, 1}, {2, 2, 2}}; const std::vector> padBegins3d = {{0, 0, 0}, {1, 1, 1}}; const std::vector> padEnds3d = {{0, 0, 0}}; -const std::vector dilations3d = {{1, 1, 1}, {2, 2, 2}}; +const std::vector> dilations3d = {{1, 1, 1}, {2, 2, 2}}; /* ============= */ - /* INSTANCES */ /* ============= GroupConvolution (GEMM 1D) ============= */ -const auto groupConvParams_ExplicitPadding_Gemm_1D = ::testing::Combine( - ::testing::ValuesIn(kernels1d), - ::testing::ValuesIn(strides1d), - ::testing::ValuesIn(padBegins1d), - ::testing::ValuesIn(padEnds1d), - ::testing::ValuesIn(dilations1d), - ::testing::ValuesIn(numOutChannels_Gemm), - ::testing::ValuesIn(numGroups_Gemm), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const std::vector CPUParams_Gemm_1D = { - conv_gemm_1D, - conv_gemm_1D_nspc -}; +const auto groupConvParams_ExplicitPadding_Gemm_1D = ::testing::Combine(::testing::ValuesIn(kernels1d), + ::testing::ValuesIn(strides1d), + ::testing::ValuesIn(padBegins1d), + ::testing::ValuesIn(padEnds1d), + ::testing::ValuesIn(dilations1d), + ::testing::ValuesIn(numOutChannels_Gemm), + ::testing::ValuesIn(numGroups_Gemm), + ::testing::Values(ov::op::PadType::EXPLICIT)); + +const std::vector CPUParams_Gemm_1D = {conv_gemm_1D, conv_gemm_1D_nspc}; + +std::vector inShapesGemm1D = {{{}, {{2, 12, 7}}}, + {// dynamic shape + {{1, 200}, 12, {1, 200}}, + {// target static shapes + {2, 12, 7}, + {1, 12, 5}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_1D_Gemm_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_Gemm_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm1D), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Gemm_1D)), + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); -std::vector inShapesGemm1D = { - {{}, {{ 2, 12, 7 }}}, - { - //dynamic shape - {{1, 200}, 12, {1, 200}}, - { //target static shapes - { 2, 12, 7 }, - { 1, 12, 5 } - } - } -}; +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_1D_Gemm_with_bias_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_Gemm_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm1D), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Gemm_1D)), + ::testing::Values(fusingAddPerChannel), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_1D_Gemm_FP32, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_Gemm_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm1D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Gemm_1D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_1D_Gemm_with_bias_FP32, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_Gemm_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm1D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Gemm_1D)), - ::testing::Values(fusingAddPerChannel), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_1D_Gemm_BF16, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_Gemm_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm1D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_1D})), // todo: [AV] what about conv_gemm_1D_nspc? - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_1D_Gemm_BF16, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_Gemm_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm1D), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice( + {conv_gemm_1D})), // todo: [AV] what about conv_gemm_1D_nspc? + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ============= GroupConvolution (GEMM 2D) ============= */ -const auto groupConvParams_ExplicitPadding_Gemm_2D = ::testing::Combine( - ::testing::ValuesIn(kernels2d), - ::testing::ValuesIn(strides2d), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::ValuesIn(dilations2d), - ::testing::ValuesIn(numOutChannels_Gemm), - ::testing::ValuesIn(numGroups_Gemm), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const std::vector CPUParams_Gemm_2D = { - conv_gemm_2D, - conv_gemm_2D_nspc -}; - -std::vector inShapesGemm2D = { - {{}, {{ 2, 12, 7, 7 }}}, - { - //dynamic shape - {{1, 200}, 12, -1, {1, 200}}, - { //target static shapes - { 2, 12, 7, 7 }, - { 1, 12, 5, 5 } - } - } -}; +const auto groupConvParams_ExplicitPadding_Gemm_2D = ::testing::Combine(::testing::ValuesIn(kernels2d), + ::testing::ValuesIn(strides2d), + ::testing::ValuesIn(padBegins2d), + ::testing::ValuesIn(padEnds2d), + ::testing::ValuesIn(dilations2d), + ::testing::ValuesIn(numOutChannels_Gemm), + ::testing::ValuesIn(numGroups_Gemm), + ::testing::Values(ov::op::PadType::EXPLICIT)); + +const std::vector CPUParams_Gemm_2D = {conv_gemm_2D, conv_gemm_2D_nspc}; + +std::vector inShapesGemm2D = {{{}, {{2, 12, 7, 7}}}, + {// dynamic shape + {{1, 200}, 12, -1, {1, 200}}, + {// target static shapes + {2, 12, 7, 7}, + {1, 12, 5, 5}}}}; + +std::vector inShapesGemm2D_cache = {{{}, {{2, 12, 7, 7}}}, + {// dynamic shape + {{1, 200}, 12, -1, {1, 200}}, + {// target static shapes + {1, 12, 5, 5}, + {1, 12, 7, 7}, + {1, 12, 5, 5}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_2D_Gemm_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_Gemm_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm2D_cache), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Gemm_2D)), + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); -std::vector inShapesGemm2D_cache = { - {{}, {{ 2, 12, 7, 7 }}}, - { - //dynamic shape - {{1, 200}, 12, -1, {1, 200}}, - { //target static shapes - { 1, 12, 5, 5 }, - { 1, 12, 7, 7 }, - { 1, 12, 5, 5 } - } - } -}; +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_2D_Gemm_with_bias_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_Gemm_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm2D_cache), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Gemm_2D)), + ::testing::Values(fusingAddPerChannel), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_2D_Gemm_FP32, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_Gemm_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm2D_cache), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Gemm_2D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_2D_Gemm_with_bias_FP32, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_Gemm_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm2D_cache), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Gemm_2D)), - ::testing::Values(fusingAddPerChannel), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_2D_Gemm_BF16, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_Gemm_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm2D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Gemm_2D)), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_2D_Gemm_BF16, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_Gemm_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm2D), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Gemm_2D)), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ============= GroupConvolution (Gemm 3D) ============= */ -const auto groupConvParams_ExplicitPadding_Gemm_3D = ::testing::Combine( - ::testing::ValuesIn(kernels3d), - ::testing::ValuesIn(strides3d), - ::testing::ValuesIn(padBegins3d), - ::testing::ValuesIn(padEnds3d), - ::testing::ValuesIn(dilations3d), - ::testing::ValuesIn(numOutChannels_Gemm), - ::testing::ValuesIn(numGroups_Gemm), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const std::vector CPUParams_Gemm_3D = { - conv_gemm_3D, - conv_gemm_3D_nspc -}; +const auto groupConvParams_ExplicitPadding_Gemm_3D = ::testing::Combine(::testing::ValuesIn(kernels3d), + ::testing::ValuesIn(strides3d), + ::testing::ValuesIn(padBegins3d), + ::testing::ValuesIn(padEnds3d), + ::testing::ValuesIn(dilations3d), + ::testing::ValuesIn(numOutChannels_Gemm), + ::testing::ValuesIn(numGroups_Gemm), + ::testing::Values(ov::op::PadType::EXPLICIT)); + +const std::vector CPUParams_Gemm_3D = {conv_gemm_3D, conv_gemm_3D_nspc}; + +std::vector inShapesGemm3D = {{{}, {{2, 12, 7, 7, 7}}}, + {// dynamic shape + {{1, 200}, 12, -1, {1, 200}, -1}, + {// target static shapes + {2, 12, 7, 7, 7}, + {1, 12, 5, 5, 5}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_3D_Gemm_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_Gemm_3D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm3D), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Gemm_3D)), + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); -std::vector inShapesGemm3D = { - {{}, {{ 2, 12, 7, 7, 7 }}}, - { - //dynamic shape - {{1, 200}, 12, -1, {1, 200}, -1}, - { //target static shapes - { 2, 12, 7, 7, 7 }, - { 1, 12, 5, 5, 5 } - } - } -}; +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_3D_Gemm_with_bias_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_Gemm_3D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm3D), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Gemm_3D)), + ::testing::Values(fusingAddPerChannel), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_3D_Gemm_FP32, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_Gemm_3D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm3D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Gemm_3D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_3D_Gemm_with_bias_FP32, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_Gemm_3D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm3D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Gemm_3D)), - ::testing::Values(fusingAddPerChannel), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_3D_Gemm_BF16, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_Gemm_3D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm3D), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Gemm_3D)), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_3D_Gemm_BF16, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_Gemm_3D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm3D), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_Gemm_3D)), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ============= GroupConvolution params (brgemm_1D) ============= */ -const std::vector kernels_brgemm_1d = {{3}}; -const std::vector strides_brgemm_1d = { {1}, {2} }; -const std::vector> padBegins_brgemm_1d = { {0}, {1} }; -const std::vector> padEnds_brgemm_1d = { {0} }; -const std::vector dilations_brgemm_1d = { {1}, {2} }; +const std::vector> kernels_brgemm_1d = {{3}}; +const std::vector> strides_brgemm_1d = {{1}, {2}}; +const std::vector> padBegins_brgemm_1d = {{0}, {1}}; +const std::vector> padEnds_brgemm_1d = {{0}}; +const std::vector> dilations_brgemm_1d = {{1}, {2}}; /* ============= GroupConvolution params (brgemm_2D) ============= */ -const std::vector kernels_brgemm_2d = {{3, 3}}; -const std::vector strides_brgemm_2d = {{1, 1}, {2, 2}}; +const std::vector> kernels_brgemm_2d = {{3, 3}}; +const std::vector> strides_brgemm_2d = {{1, 1}, {2, 2}}; const std::vector> padBegins_brgemm_2d = {{0, 0}, {1, 1}}; const std::vector> padEnds_brgemm_2d = {{0, 0}}; -const std::vector dilations_brgemm_2d = {{1, 1}, {2, 2}}; +const std::vector> dilations_brgemm_2d = {{1, 1}, {2, 2}}; /* ============= GroupConvolution params (brgemm_3D) ============= */ -const std::vector kernels_brgemm_3d = {{3, 3, 3}}; -const std::vector strides_brgemm_3d = {{1, 1, 1}, {2, 2, 2}}; +const std::vector> kernels_brgemm_3d = {{3, 3, 3}}; +const std::vector> strides_brgemm_3d = {{1, 1, 1}, {2, 2, 2}}; const std::vector> padBegins_brgemm_3d = {{0, 0, 0}, {1, 1, 1}}; const std::vector> padEnds_brgemm_3d = {{0, 0, 0}}; -const std::vector dilations_brgemm_3d = {{1, 1, 1}, {2, 2, 2}}; +const std::vector> dilations_brgemm_3d = {{1, 1, 1}, {2, 2, 2}}; /* ============= */ -const SizeVector numGroups_brgemm_Blocked = {2}; +const std::vector numGroups_brgemm_Blocked = {2}; /* ============= GroupConvolution (brgemm 1D) ============= */ -const auto groupConvParams_ExplicitPadding_brgemm_1D = ::testing::Combine( - ::testing::ValuesIn(kernels_brgemm_1d), - ::testing::ValuesIn(strides_brgemm_1d), - ::testing::ValuesIn(padBegins_brgemm_1d), - ::testing::ValuesIn(padEnds_brgemm_1d), - ::testing::ValuesIn(dilations_brgemm_1d), - ::testing::ValuesIn(numOutChannels_Blocked), - ::testing::ValuesIn(numGroups_brgemm_Blocked), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const std::vector CPUParams_brgemm_1D_BF16 = { - conv_avx512_1D_nspc_brgconv -}; - -const std::vector CPUParams_brgemm_1D_FP32 = { - conv_avx512_1D_nspc_brgconv -}; - -std::vector inputShapes_brgemm_1d = { - {{}, {{ 2, 64, 7 }}}, - { - //dynamic shapes - {-1, 64, {1, 200}}, - { //target static shapes - { 2, 64, 7 }, - { 1, 64, 9 } - } - }, - { - //dynamic shapes - { {-1, 64, -1} }, - { //target static shapes - { 2, 64, 7 }, - { 1, 64, 14 } - } - } -}; +const auto groupConvParams_ExplicitPadding_brgemm_1D = ::testing::Combine(::testing::ValuesIn(kernels_brgemm_1d), + ::testing::ValuesIn(strides_brgemm_1d), + ::testing::ValuesIn(padBegins_brgemm_1d), + ::testing::ValuesIn(padEnds_brgemm_1d), + ::testing::ValuesIn(dilations_brgemm_1d), + ::testing::ValuesIn(numOutChannels_Blocked), + ::testing::ValuesIn(numGroups_brgemm_Blocked), + ::testing::Values(ov::op::PadType::EXPLICIT)); + +const std::vector CPUParams_brgemm_1D_BF16 = {conv_avx512_1D_nspc_brgconv}; + +const std::vector CPUParams_brgemm_1D_FP32 = {conv_avx512_1D_nspc_brgconv}; + +std::vector inputShapes_brgemm_1d = {{{}, {{2, 64, 7}}}, + {// dynamic shapes + {-1, 64, {1, 200}}, + {// target static shapes + {2, 64, 7}, + {1, 64, 9}}}, + {// dynamic shapes + {{-1, 64, -1}}, + {// target static shapes + {2, 64, 7}, + {1, 64, 14}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_1D_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_brgemm_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_brgemm_1d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_1D_FP32)), + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_1D_FP32, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_brgemm_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_brgemm_1d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_1D_FP32)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_1D_FP32_fusingBias, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_brgemm_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_brgemm_1d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_1D_FP32)), - ::testing::Values(fusingAddPerChannel), - ::testing::Values(cpuEmptyPluginConfig)), +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_1D_FP32_fusingBias, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_brgemm_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_brgemm_1d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_1D_FP32)), + ::testing::Values(fusingAddPerChannel), + ::testing::Values(empty_plugin_config)), GroupConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_1D_BF16, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_brgemm_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_brgemm_1d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDeviceSupportBF16(CPUParams_brgemm_1D_BF16)), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_GroupConv_brgemm_1D_BF16, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_brgemm_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_brgemm_1d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDeviceSupportBF16(CPUParams_brgemm_1D_BF16)), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ============= GroupConvolution (brgemm_2D) ============= */ -const auto groupConvParams_ExplicitPadding_brgemm_2D = ::testing::Combine( - ::testing::ValuesIn(kernels_brgemm_2d), - ::testing::ValuesIn(strides_brgemm_2d), - ::testing::ValuesIn(padBegins_brgemm_2d), - ::testing::ValuesIn(padEnds_brgemm_2d), - ::testing::ValuesIn(dilations_brgemm_2d), - ::testing::ValuesIn(numOutChannels_Blocked), - ::testing::ValuesIn(numGroups_brgemm_Blocked), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const std::vector CPUParams_brgemm_2D_FP32 = { - conv_avx512_2D_nspc_brgconv -}; - -const std::vector CPUParams_brgemm_2D_BF16 = { - conv_avx512_2D_nspc_brgconv, - conv_avx512_2D_nspc_brgconv_amx -}; - -std::vector inputShapes_brgemm_2d = { - {{}, {{ 1, 64, 7, 7 }}}, - { - //dynamic shapes - {-1, 64, -1, {1, 200}}, - { //target static shapes - { 2, 64, 7, 7 }, - { 1, 64, 9, 9 } - } - } -}; +const auto groupConvParams_ExplicitPadding_brgemm_2D = ::testing::Combine(::testing::ValuesIn(kernels_brgemm_2d), + ::testing::ValuesIn(strides_brgemm_2d), + ::testing::ValuesIn(padBegins_brgemm_2d), + ::testing::ValuesIn(padEnds_brgemm_2d), + ::testing::ValuesIn(dilations_brgemm_2d), + ::testing::ValuesIn(numOutChannels_Blocked), + ::testing::ValuesIn(numGroups_brgemm_Blocked), + ::testing::Values(ov::op::PadType::EXPLICIT)); + +const std::vector CPUParams_brgemm_2D_FP32 = {conv_avx512_2D_nspc_brgconv}; + +const std::vector CPUParams_brgemm_2D_BF16 = {conv_avx512_2D_nspc_brgconv, + conv_avx512_2D_nspc_brgconv_amx}; + +std::vector inputShapes_brgemm_2d = {{{}, {{1, 64, 7, 7}}}, + {// dynamic shapes + {-1, 64, -1, {1, 200}}, + {// target static shapes + {2, 64, 7, 7}, + {1, 64, 9, 9}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_2D_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_brgemm_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_brgemm_2d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_2D_FP32)), + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_2D_FP32, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_brgemm_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_brgemm_2d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_2D_FP32)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - -std::vector inputShapes_brgemm_2d_dynBatch = { - { - //dynamic shapes - { {1, 10}, 64, {7, 9}, {7, 9}}, - { //target static shapes - { 2, 64, 7, 7 }, - { 1, 64, 9, 9 }, - { 3, 64, 9, 9 } - } - } -}; +std::vector inputShapes_brgemm_2d_dynBatch = {{// dynamic shapes + {{1, 10}, 64, {7, 9}, {7, 9}}, + {// target static shapes + {2, 64, 7, 7}, + {1, 64, 9, 9}, + {3, 64, 9, 9}}}}; + +INSTANTIATE_TEST_SUITE_P(nightly_GroupConv_brgemm_2D_FP32_dynBatch, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_brgemm_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_brgemm_2d_dynBatch), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_2D_FP32)), + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(nightly_GroupConv_brgemm_2D_FP32_dynBatch, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_brgemm_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_brgemm_2d_dynBatch), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_2D_FP32)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - -std::vector inputShapes_brgemm_2d_cache = { - { - //dynamic shapes - {-1, 64, -1, {1, 200}}, - { //target static shapes - { 1, 64, 7, 7 }, - { 1, 64, 9, 9 }, - { 1, 64, 7, 7 }, - } - } -}; +std::vector inputShapes_brgemm_2d_cache = {{// dynamic shapes + {-1, 64, -1, {1, 200}}, + { + // target static shapes + {1, 64, 7, 7}, + {1, 64, 9, 9}, + {1, 64, 7, 7}, + }}}; + +INSTANTIATE_TEST_SUITE_P(nightly_GroupConv_brgemm_2D_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_brgemm_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_brgemm_2d_cache), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_2D_FP32)), + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(nightly_GroupConv_brgemm_2D_FP32, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_brgemm_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_brgemm_2d_cache), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_2D_FP32)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_2D_BF16, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_brgemm_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_brgemm_2d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDeviceSupportBF16(CPUParams_brgemm_2D_BF16)), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_GroupConv_brgemm_2D_BF16, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_brgemm_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_brgemm_2d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDeviceSupportBF16(CPUParams_brgemm_2D_BF16)), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ============= GroupConvolution (brgemm_3D) ============= */ -const auto groupConvParams_ExplicitPadding_brgemm_3D = ::testing::Combine( - ::testing::ValuesIn(kernels_brgemm_3d), - ::testing::ValuesIn(strides_brgemm_3d), - ::testing::ValuesIn(padBegins_brgemm_3d), - ::testing::ValuesIn(padEnds_brgemm_3d), - ::testing::ValuesIn(dilations_brgemm_3d), - ::testing::ValuesIn(numOutChannels_Blocked), - ::testing::ValuesIn(numGroups_brgemm_Blocked), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - - -const std::vector CPUParams_brgemm_3D_FP32 = { - conv_avx512_3D_nspc_brgconv -}; - -const std::vector CPUParams_brgemm_3D_BF16 = { - conv_avx512_3D_nspc_brgconv, - conv_avx512_3D_nspc_brgconv_amx -}; - -std::vector inputShapes_brgemm_3d = { - {{}, {{ 1, 64, 7, 7, 7 }}}, - { - //dynamic shapes - {-1, 64, -1, {1, 200}, -1}, - { //target static shapes - { 2, 64, 7, 7, 7 }, - { 1, 64, 9, 9, 9 } - } - } -}; +const auto groupConvParams_ExplicitPadding_brgemm_3D = ::testing::Combine(::testing::ValuesIn(kernels_brgemm_3d), + ::testing::ValuesIn(strides_brgemm_3d), + ::testing::ValuesIn(padBegins_brgemm_3d), + ::testing::ValuesIn(padEnds_brgemm_3d), + ::testing::ValuesIn(dilations_brgemm_3d), + ::testing::ValuesIn(numOutChannels_Blocked), + ::testing::ValuesIn(numGroups_brgemm_Blocked), + ::testing::Values(ov::op::PadType::EXPLICIT)); + +const std::vector CPUParams_brgemm_3D_FP32 = {conv_avx512_3D_nspc_brgconv}; + +const std::vector CPUParams_brgemm_3D_BF16 = {conv_avx512_3D_nspc_brgconv, + conv_avx512_3D_nspc_brgconv_amx}; + +std::vector inputShapes_brgemm_3d = {{{}, {{1, 64, 7, 7, 7}}}, + {// dynamic shapes + {-1, 64, -1, {1, 200}, -1}, + {// target static shapes + {2, 64, 7, 7, 7}, + {1, 64, 9, 9, 9}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_3D_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_brgemm_3D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_brgemm_3d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_3D_FP32)), + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_3D_FP32, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_brgemm_3D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_brgemm_3d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_3D_FP32)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_3D_BF16, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_brgemm_3D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_brgemm_3d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDeviceSupportBF16(CPUParams_brgemm_3D_BF16)), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_GroupConv_brgemm_3D_BF16, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_brgemm_3D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_brgemm_3d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDeviceSupportBF16(CPUParams_brgemm_3D_BF16)), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ============= GroupConvolution params (brgemm_1x1_1D) ============= */ -const std::vector kernels_brgemm_1x1_1d = {{1}}; -const std::vector strides_brgemm_1x1_1d = { {1}, {2} }; -const std::vector> padBegins_brgemm_1x1_1d = { {0}}; -const std::vector> padEnds_brgemm_1x1_1d = { {0} }; -const std::vector dilations_brgemm_1x1_1d = { {1}, {2} }; +const std::vector> kernels_brgemm_1x1_1d = {{1}}; +const std::vector> strides_brgemm_1x1_1d = {{1}, {2}}; +const std::vector> padBegins_brgemm_1x1_1d = {{0}}; +const std::vector> padEnds_brgemm_1x1_1d = {{0}}; +const std::vector> dilations_brgemm_1x1_1d = {{1}, {2}}; /* ============= GroupConvolution params (brgemm_1x1_2D) ============= */ -const std::vector kernels_brgemm_1x1_2d = {{1, 1}}; -const std::vector strides_brgemm_1x1_2d = {{1, 1}, {2, 2}}; +const std::vector> kernels_brgemm_1x1_2d = {{1, 1}}; +const std::vector> strides_brgemm_1x1_2d = {{1, 1}, {2, 2}}; const std::vector> padBegins_brgemm_1x1_2d = {{0, 0}}; const std::vector> padEnds_brgemm_1x1_2d = {{0, 0}}; -const std::vector dilations_brgemm_1x1_2d = {{1, 1}, {2, 2}}; +const std::vector> dilations_brgemm_1x1_2d = {{1, 1}, {2, 2}}; /* ============= GroupConvolution params (brgemm_1x1_3D) ============= */ -const std::vector kernels_brgemm_1x1_3d = {{1, 1, 1}}; -const std::vector strides_brgemm_1x1_3d = {{1, 1, 1}, {2, 2, 2}}; +const std::vector> kernels_brgemm_1x1_3d = {{1, 1, 1}}; +const std::vector> strides_brgemm_1x1_3d = {{1, 1, 1}, {2, 2, 2}}; const std::vector> padBegins_brgemm_1x1_3d = {{0, 0, 0}}; const std::vector> padEnds_brgemm_1x1_3d = {{0, 0, 0}}; -const std::vector dilations_brgemm_1x1_3d = {{1, 1, 1}, {2, 2, 2}}; +const std::vector> dilations_brgemm_1x1_3d = {{1, 1, 1}, {2, 2, 2}}; /* ============= */ -const SizeVector numGroups_brgemm_1x1_Blocked = {2}; +const std::vector numGroups_brgemm_1x1_Blocked = {2}; /* ============= GroupConvolution (brgemm_1x1 1D) ============= */ -const auto groupConvParams_ExplicitPadding_brgemm_1x1_1D = ::testing::Combine( - ::testing::ValuesIn(kernels_brgemm_1x1_1d), - ::testing::ValuesIn(strides_brgemm_1x1_1d), - ::testing::ValuesIn(padBegins_brgemm_1x1_1d), - ::testing::ValuesIn(padEnds_brgemm_1x1_1d), - ::testing::ValuesIn(dilations_brgemm_1x1_1d), - ::testing::ValuesIn(numOutChannels_Blocked), - ::testing::ValuesIn(numGroups_brgemm_1x1_Blocked), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); +const auto groupConvParams_ExplicitPadding_brgemm_1x1_1D = + ::testing::Combine(::testing::ValuesIn(kernels_brgemm_1x1_1d), + ::testing::ValuesIn(strides_brgemm_1x1_1d), + ::testing::ValuesIn(padBegins_brgemm_1x1_1d), + ::testing::ValuesIn(padEnds_brgemm_1x1_1d), + ::testing::ValuesIn(dilations_brgemm_1x1_1d), + ::testing::ValuesIn(numOutChannels_Blocked), + ::testing::ValuesIn(numGroups_brgemm_1x1_Blocked), + ::testing::Values(ov::op::PadType::EXPLICIT)); const std::vector CPUParams_brgemm_1x1_1D_BF16 = { - conv_avx512_1D_1x1_nspc_brgconv, + conv_avx512_1D_1x1_nspc_brgconv, }; const std::vector CPUParams_brgemm_1x1_1D_FP32 = { - conv_avx512_1D_1x1_nspc_brgconv, + conv_avx512_1D_1x1_nspc_brgconv, }; -std::vector inputShapes_brgemm_1x1_1d = { - {{}, {{ 2, 64, 7 }}}, - { - //dynamic shapes - {-1, 64, {1, 200}}, - { //target static shapes - { 2, 64, 7 }, - { 1, 64, 9 } - } - }, - { - //dynamic shapes - { {-1, 64, -1} }, - { //target static shapes - { 2, 64, 7 }, - { 1, 64, 14 } - } - } -}; +std::vector inputShapes_brgemm_1x1_1d = {{{}, {{2, 64, 7}}}, + {// dynamic shapes + {-1, 64, {1, 200}}, + {// target static shapes + {2, 64, 7}, + {1, 64, 9}}}, + {// dynamic shapes + {{-1, 64, -1}}, + {// target static shapes + {2, 64, 7}, + {1, 64, 14}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_1x1_1D_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_brgemm_1x1_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_brgemm_1x1_1d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_1x1_1D_FP32)), + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_1x1_1D_FP32, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_brgemm_1x1_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_brgemm_1x1_1d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_1x1_1D_FP32)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_1x1_1D_FP32_fusingBias, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_brgemm_1x1_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_brgemm_1x1_1d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_1x1_1D_FP32)), - ::testing::Values(fusingAddPerChannel), - ::testing::Values(cpuEmptyPluginConfig)), +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_1x1_1D_FP32_fusingBias, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_brgemm_1x1_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_brgemm_1x1_1d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_1x1_1D_FP32)), + ::testing::Values(fusingAddPerChannel), + ::testing::Values(empty_plugin_config)), GroupConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_1x1_1D_BF16, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_brgemm_1x1_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_brgemm_1x1_1d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDeviceSupportBF16(CPUParams_brgemm_1x1_1D_BF16)), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_GroupConv_brgemm_1x1_1D_BF16, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_brgemm_1x1_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_brgemm_1x1_1d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDeviceSupportBF16(CPUParams_brgemm_1x1_1D_BF16)), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ============= GroupConvolution (brgemm_1x1_2D) ============= */ -const auto groupConvParams_ExplicitPadding_brgemm_1x1_2D = ::testing::Combine( - ::testing::ValuesIn(kernels_brgemm_1x1_2d), - ::testing::ValuesIn(strides_brgemm_1x1_2d), - ::testing::ValuesIn(padBegins_brgemm_1x1_2d), - ::testing::ValuesIn(padEnds_brgemm_1x1_2d), - ::testing::ValuesIn(dilations_brgemm_1x1_2d), - ::testing::ValuesIn(numOutChannels_Blocked), - ::testing::ValuesIn(numGroups_brgemm_1x1_Blocked), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const std::vector CPUParams_brgemm_1x1_2D_FP32 = { - conv_avx512_2D_1x1_nspc_brgconv -}; - -const std::vector CPUParams_brgemm_1x1_2D_BF16 = { - conv_avx512_2D_1x1_nspc_brgconv, - conv_avx512_2D_1x1_nspc_brgconv_amx -}; - -std::vector inputShapes_brgemm_1x1_2d = { - {{}, {{ 1, 64, 7, 7 }}}, - { - //dynamic shapes - {-1, 64, -1, {1, 200}}, - { //target static shapes - { 2, 64, 7, 7 }, - { 1, 64, 9, 9 } - } - } -}; +const auto groupConvParams_ExplicitPadding_brgemm_1x1_2D = + ::testing::Combine(::testing::ValuesIn(kernels_brgemm_1x1_2d), + ::testing::ValuesIn(strides_brgemm_1x1_2d), + ::testing::ValuesIn(padBegins_brgemm_1x1_2d), + ::testing::ValuesIn(padEnds_brgemm_1x1_2d), + ::testing::ValuesIn(dilations_brgemm_1x1_2d), + ::testing::ValuesIn(numOutChannels_Blocked), + ::testing::ValuesIn(numGroups_brgemm_1x1_Blocked), + ::testing::Values(ov::op::PadType::EXPLICIT)); + +const std::vector CPUParams_brgemm_1x1_2D_FP32 = {conv_avx512_2D_1x1_nspc_brgconv}; + +const std::vector CPUParams_brgemm_1x1_2D_BF16 = {conv_avx512_2D_1x1_nspc_brgconv, + conv_avx512_2D_1x1_nspc_brgconv_amx}; + +std::vector inputShapes_brgemm_1x1_2d = {{{}, {{1, 64, 7, 7}}}, + {// dynamic shapes + {-1, 64, -1, {1, 200}}, + {// target static shapes + {2, 64, 7, 7}, + {1, 64, 9, 9}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_1x1_2D_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_brgemm_1x1_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_brgemm_1x1_2d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_1x1_2D_FP32)), + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_1x1_2D_FP32, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_brgemm_1x1_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_brgemm_1x1_2d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_1x1_2D_FP32)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - -std::vector inputShapes_brgemm_1x1_2d_dynBatch = { - { - //dynamic shapes - { {1, 10}, 64, {7, 9}, {7, 9}}, - { //target static shapes - { 2, 64, 7, 7 }, - { 1, 64, 9, 9 }, - { 3, 64, 9, 9 } - } - } -}; +std::vector inputShapes_brgemm_1x1_2d_dynBatch = {{// dynamic shapes + {{1, 10}, 64, {7, 9}, {7, 9}}, + {// target static shapes + {2, 64, 7, 7}, + {1, 64, 9, 9}, + {3, 64, 9, 9}}}}; + +INSTANTIATE_TEST_SUITE_P(nightly_GroupConv_brgemm_1x1_2D_FP32_dynBatch, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_brgemm_1x1_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_brgemm_1x1_2d_dynBatch), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_1x1_2D_FP32)), + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(nightly_GroupConv_brgemm_1x1_2D_FP32_dynBatch, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_brgemm_1x1_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_brgemm_1x1_2d_dynBatch), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_1x1_2D_FP32)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - -std::vector inputShapes_brgemm_1x1_2d_cache = { - { - //dynamic shapes - {-1, 64, -1, {1, 200}}, - { //target static shapes - { 1, 64, 7, 7 }, - { 1, 64, 9, 9 }, - { 1, 64, 7, 7 }, - } - } -}; +std::vector inputShapes_brgemm_1x1_2d_cache = {{// dynamic shapes + {-1, 64, -1, {1, 200}}, + { + // target static shapes + {1, 64, 7, 7}, + {1, 64, 9, 9}, + {1, 64, 7, 7}, + }}}; + +INSTANTIATE_TEST_SUITE_P(nightly_GroupConv_brgemm_1x1_2D_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_brgemm_1x1_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_brgemm_1x1_2d_cache), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_1x1_2D_FP32)), + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(nightly_GroupConv_brgemm_1x1_2D_FP32, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_brgemm_1x1_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_brgemm_1x1_2d_cache), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_1x1_2D_FP32)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_1x1_2D_BF16, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_brgemm_1x1_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_brgemm_1x1_2d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDeviceSupportBF16(CPUParams_brgemm_1x1_2D_BF16)), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_GroupConv_brgemm_1x1_2D_BF16, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_brgemm_1x1_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_brgemm_1x1_2d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDeviceSupportBF16(CPUParams_brgemm_1x1_2D_BF16)), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ============= GroupConvolution (brgemm_1x1_3D) ============= */ -const auto groupConvParams_ExplicitPadding_brgemm_1x1_3D = ::testing::Combine( - ::testing::ValuesIn(kernels_brgemm_1x1_3d), - ::testing::ValuesIn(strides_brgemm_1x1_3d), - ::testing::ValuesIn(padBegins_brgemm_1x1_3d), - ::testing::ValuesIn(padEnds_brgemm_1x1_3d), - ::testing::ValuesIn(dilations_brgemm_1x1_3d), - ::testing::ValuesIn(numOutChannels_Blocked), - ::testing::ValuesIn(numGroups_brgemm_1x1_Blocked), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - +const auto groupConvParams_ExplicitPadding_brgemm_1x1_3D = + ::testing::Combine(::testing::ValuesIn(kernels_brgemm_1x1_3d), + ::testing::ValuesIn(strides_brgemm_1x1_3d), + ::testing::ValuesIn(padBegins_brgemm_1x1_3d), + ::testing::ValuesIn(padEnds_brgemm_1x1_3d), + ::testing::ValuesIn(dilations_brgemm_1x1_3d), + ::testing::ValuesIn(numOutChannels_Blocked), + ::testing::ValuesIn(numGroups_brgemm_1x1_Blocked), + ::testing::Values(ov::op::PadType::EXPLICIT)); const std::vector CPUParams_brgemm_1x1_3D_FP32 = { - conv_avx512_3D_1x1_nspc_brgconv, + conv_avx512_3D_1x1_nspc_brgconv, }; const std::vector CPUParams_brgemm_1x1_3D_BF16 = { - conv_avx512_3D_1x1_nspc_brgconv, - conv_avx512_3D_1x1_nspc_brgconv_amx, + conv_avx512_3D_1x1_nspc_brgconv, + conv_avx512_3D_1x1_nspc_brgconv_amx, }; -std::vector inputShapes_brgemm_1x1_3d = { - {{}, {{ 1, 64, 7, 7, 7 }}}, - { - //dynamic shapes - {-1, 64, -1, {1, 200}, -1}, - { //target static shapes - { 2, 64, 7, 7, 7 }, - { 1, 64, 9, 9, 9 } - } - } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_1x1_3D_FP32, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_brgemm_1x1_3D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_brgemm_1x1_3d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_1x1_3D_FP32)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_1x1_3D_BF16, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_brgemm_1x1_3D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_brgemm_1x1_3d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDeviceSupportBF16(CPUParams_brgemm_1x1_3D_BF16)), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - +std::vector inputShapes_brgemm_1x1_3d = {{{}, {{1, 64, 7, 7, 7}}}, + {// dynamic shapes + {-1, 64, -1, {1, 200}, -1}, + {// target static shapes + {2, 64, 7, 7, 7}, + {1, 64, 9, 9, 9}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_brgemm_1x1_3D_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_brgemm_1x1_3D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_brgemm_1x1_3d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_brgemm_1x1_3D_FP32)), + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_GroupConv_brgemm_1x1_3D_BF16, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_brgemm_1x1_3D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_brgemm_1x1_3d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDeviceSupportBF16(CPUParams_brgemm_1x1_3D_BF16)), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ============= GroupConvolution (1D) ============= */ -const auto groupConvParams_ExplicitPadding_1D = ::testing::Combine( - ::testing::ValuesIn(kernels1d), - ::testing::ValuesIn(strides1d), - ::testing::ValuesIn(padBegins1d), - ::testing::ValuesIn(padEnds1d), - ::testing::ValuesIn(dilations1d), - ::testing::ValuesIn(numOutChannels_Blocked), - ::testing::ValuesIn(numGroups_Blocked), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const std::vector CPUParams_1D = { - conv_sse42_1D, - conv_avx2_1D, - conv_avx512_1D, - conv_sse42_1D_nspc, - conv_avx2_1D_nspc, - conv_avx512_1D_nspc -}; - -std::vector inputShapes1d = { - {{}, {{ 2, 64, 7 }}}, - { - //dynamic shapes - {-1, 64, {1, 200}}, - { //target static shapes - { 2, 64, 7 }, - { 1, 64, 9 } - } - }, - { - //dynamic shapes - { {-1, 64, -1} }, - { //target static shapes - { 2, 64, 7 }, - { 1, 64, 14 } - } - } -}; +const auto groupConvParams_ExplicitPadding_1D = ::testing::Combine(::testing::ValuesIn(kernels1d), + ::testing::ValuesIn(strides1d), + ::testing::ValuesIn(padBegins1d), + ::testing::ValuesIn(padEnds1d), + ::testing::ValuesIn(dilations1d), + ::testing::ValuesIn(numOutChannels_Blocked), + ::testing::ValuesIn(numGroups_Blocked), + ::testing::Values(ov::op::PadType::EXPLICIT)); + +const std::vector CPUParams_1D = + {conv_sse42_1D, conv_avx2_1D, conv_avx512_1D, conv_sse42_1D_nspc, conv_avx2_1D_nspc, conv_avx512_1D_nspc}; + +std::vector inputShapes1d = {{{}, {{2, 64, 7}}}, + {// dynamic shapes + {-1, 64, {1, 200}}, + {// target static shapes + {2, 64, 7}, + {1, 64, 9}}}, + {// dynamic shapes + {{-1, 64, -1}}, + {// target static shapes + {2, 64, 7}, + {1, 64, 14}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_1D_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes1d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_1D)), + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_1D_FP32, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes1d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_1D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_1D_FP32_fusingBias, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes1d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_1D)), - ::testing::Values(fusingAddPerChannel), - ::testing::Values(cpuEmptyPluginConfig)), +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_1D_FP32_fusingBias, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes1d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_1D)), + ::testing::Values(fusingAddPerChannel), + ::testing::Values(empty_plugin_config)), GroupConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_1D_BF16, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes1d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_1D})), // todo: [AV] what about conv_avx512_1D_nspc? - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_1D_BF16, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes1d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice( + {conv_avx512_1D})), // todo: [AV] what about conv_avx512_1D_nspc? + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ============= GroupConvolution (2D) ============= */ -const auto groupConvParams_ExplicitPadding_2D = ::testing::Combine( - ::testing::ValuesIn(kernels2d), - ::testing::ValuesIn(strides2d), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::ValuesIn(dilations2d), - ::testing::ValuesIn(numOutChannels_Blocked), - ::testing::ValuesIn(numGroups_Blocked), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const std::vector CPUParams_2D = { - conv_sse42_2D, - conv_avx2_2D, - conv_avx512_2D, - conv_sse42_2D_nspc, - conv_avx2_2D_nspc, - conv_avx512_2D_nspc -}; - -std::vector inputShapes2d = { - {{}, {{ 1, 64, 7, 7 }}}, - { - //dynamic shapes - {-1, 64, -1, {1, 200}}, - { //target static shapes - { 2, 64, 7, 7 }, - { 1, 64, 9, 9 } - } - } -}; +const auto groupConvParams_ExplicitPadding_2D = ::testing::Combine(::testing::ValuesIn(kernels2d), + ::testing::ValuesIn(strides2d), + ::testing::ValuesIn(padBegins2d), + ::testing::ValuesIn(padEnds2d), + ::testing::ValuesIn(dilations2d), + ::testing::ValuesIn(numOutChannels_Blocked), + ::testing::ValuesIn(numGroups_Blocked), + ::testing::Values(ov::op::PadType::EXPLICIT)); + +const std::vector CPUParams_2D = + {conv_sse42_2D, conv_avx2_2D, conv_avx512_2D, conv_sse42_2D_nspc, conv_avx2_2D_nspc, conv_avx512_2D_nspc}; + +std::vector inputShapes2d = {{{}, {{1, 64, 7, 7}}}, + {// dynamic shapes + {-1, 64, -1, {1, 200}}, + {// target static shapes + {2, 64, 7, 7}, + {1, 64, 9, 9}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_2D_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_2D)), + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_2D_FP32, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_2D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - -std::vector inputShapes2d_dynBatch = { - { - //dynamic shapes - { {1, 10}, 64, {7, 9}, {7, 9}}, - { //target static shapes - { 2, 64, 7, 7 }, - { 1, 64, 9, 9 }, - { 3, 64, 9, 9 } - } - } -}; +std::vector inputShapes2d_dynBatch = {{// dynamic shapes + {{1, 10}, 64, {7, 9}, {7, 9}}, + {// target static shapes + {2, 64, 7, 7}, + {1, 64, 9, 9}, + {3, 64, 9, 9}}}}; + +INSTANTIATE_TEST_SUITE_P(nightly_GroupConv_2D_FP32_dynBatch, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2d_dynBatch), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_2D)), + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(nightly_GroupConv_2D_FP32_dynBatch, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d_dynBatch), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_2D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - -std::vector inputShapes2d_cache = { - { - //dynamic shapes - {-1, 64, -1, {1, 200}}, - { //target static shapes - { 1, 64, 7, 7 }, - { 1, 64, 9, 9 }, - { 1, 64, 7, 7 }, - } - } -}; +std::vector inputShapes2d_cache = {{// dynamic shapes + {-1, 64, -1, {1, 200}}, + { + // target static shapes + {1, 64, 7, 7}, + {1, 64, 9, 9}, + {1, 64, 7, 7}, + }}}; + +INSTANTIATE_TEST_SUITE_P(nightly_GroupConv_2D_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2d_cache), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_2D)), + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(nightly_GroupConv_2D_FP32, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d_cache), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_2D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_2D_BF16, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D, conv_avx512_2D_nspc})), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_2D_BF16, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D, + conv_avx512_2D_nspc})), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ============= GroupConvolution (3D) ============= */ -const auto groupConvParams_ExplicitPadding_3D = ::testing::Combine( - ::testing::ValuesIn(kernels3d), - ::testing::ValuesIn(strides3d), - ::testing::ValuesIn(padBegins3d), - ::testing::ValuesIn(padEnds3d), - ::testing::ValuesIn(dilations3d), - ::testing::ValuesIn(numOutChannels_Blocked), - ::testing::ValuesIn(numGroups_Blocked), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); +const auto groupConvParams_ExplicitPadding_3D = ::testing::Combine(::testing::ValuesIn(kernels3d), + ::testing::ValuesIn(strides3d), + ::testing::ValuesIn(padBegins3d), + ::testing::ValuesIn(padEnds3d), + ::testing::ValuesIn(dilations3d), + ::testing::ValuesIn(numOutChannels_Blocked), + ::testing::ValuesIn(numGroups_Blocked), + ::testing::Values(ov::op::PadType::EXPLICIT)); const std::vector CPUParams_3D = { -// conv_sse42_3D, // not supported jit_sse42 for 3d - conv_avx2_3D, - conv_avx512_3D, - conv_avx2_3D_nspc, - conv_avx512_3D_nspc -}; - -std::vector inputShapes3d = { - {{}, {{ 1, 64, 7, 7, 7 }}}, - { - //dynamic shapes - {-1, 64, -1, {1, 200}, -1}, - { //target static shapes - { 2, 64, 7, 7, 7 }, - { 1, 64, 9, 9, 9 } - } - } -}; + // conv_sse42_3D, // not supported jit_sse42 for 3d + conv_avx2_3D, + conv_avx512_3D, + conv_avx2_3D_nspc, + conv_avx512_3D_nspc}; + +std::vector inputShapes3d = {{{}, {{1, 64, 7, 7, 7}}}, + {// dynamic shapes + {-1, 64, -1, {1, 200}, -1}, + {// target static shapes + {2, 64, 7, 7, 7}, + {1, 64, 9, 9, 9}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_3D_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_3D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes3d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_3D)), + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_3D_FP32, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_3D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes3d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_3D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_3D_BF16, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_3D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes3d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D, conv_avx512_3D_nspc})), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_3D_BF16, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_3D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes3d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D, + conv_avx512_3D_nspc})), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ============= GroupConvolution (DW 1D) ============= */ -const auto groupConvParams_ExplicitPadding_DW_1D = ::testing::Combine( - ::testing::ValuesIn(kernels1d), - ::testing::ValuesIn(strides1d), - ::testing::ValuesIn(padBegins1d), - ::testing::ValuesIn(padEnds1d), - ::testing::ValuesIn(dilations1d), - ::testing::ValuesIn(numOutChannels_DW), - ::testing::ValuesIn(numGroups_DW), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const std::vector CPUParams_DW_1D = { - conv_sse42_dw_1D, - conv_avx2_dw_1D, - conv_avx512_dw_1D, - conv_sse42_dw_1D_nspc, - conv_avx2_dw_1D_nspc, - conv_avx512_dw_1D_nspc -}; - -std::vector inputShapes1dDW = { - {{}, {{ 2, 32, 7 }}}, - { - //dynamic shapes - {-1, 32, {1, 200}}, - { //target static shapes - { 2, 32, 7 }, - { 1, 32, 9 } - } - } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_1D_DW_FP32, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_DW_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes1dDW), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_sse42_dw_1D, - conv_avx2_dw_1D, - conv_avx512_dw_1D})), // todo: [AV] what about conv_sse42_dw_1D_nspc, - // conv_avx2_dw_1D_nspc, conv_avx512_dw_1D_nspc? - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - - -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_1D_DW_BF16, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_DW_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes1dDW), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_dw_1D})), // todo: [AV] what about conv_avx512_dw_1D_nspc? - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); +const auto groupConvParams_ExplicitPadding_DW_1D = ::testing::Combine(::testing::ValuesIn(kernels1d), + ::testing::ValuesIn(strides1d), + ::testing::ValuesIn(padBegins1d), + ::testing::ValuesIn(padEnds1d), + ::testing::ValuesIn(dilations1d), + ::testing::ValuesIn(numOutChannels_DW), + ::testing::ValuesIn(numGroups_DW), + ::testing::Values(ov::op::PadType::EXPLICIT)); + +const std::vector CPUParams_DW_1D = {conv_sse42_dw_1D, + conv_avx2_dw_1D, + conv_avx512_dw_1D, + conv_sse42_dw_1D_nspc, + conv_avx2_dw_1D_nspc, + conv_avx512_dw_1D_nspc}; + +std::vector inputShapes1dDW = {{{}, {{2, 32, 7}}}, + {// dynamic shapes + {-1, 32, {1, 200}}, + {// target static shapes + {2, 32, 7}, + {1, 32, 9}}}}; + +INSTANTIATE_TEST_SUITE_P( + smoke_GroupConv_1D_DW_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine(groupConvParams_ExplicitPadding_DW_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes1dDW), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice( + {conv_sse42_dw_1D, conv_avx2_dw_1D, conv_avx512_dw_1D})), // todo: [AV] what about conv_sse42_dw_1D_nspc, + // conv_avx2_dw_1D_nspc, conv_avx512_dw_1D_nspc? + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_1D_DW_BF16, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_DW_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes1dDW), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice( + {conv_avx512_dw_1D})), // todo: [AV] what about conv_avx512_dw_1D_nspc? + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ============= GroupConvolution (DW 2D) ============= */ -const auto groupConvParams_ExplicitPadding_DW_2D = ::testing::Combine( - ::testing::ValuesIn(kernels2d), - ::testing::ValuesIn(strides2d), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::ValuesIn(dilations2d), - ::testing::ValuesIn(numOutChannels_DW), - ::testing::ValuesIn(numGroups_DW), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const std::vector CPUParams_DW_2D = { - conv_sse42_dw_2D, - conv_avx2_dw_2D, - conv_avx512_dw_2D, - conv_sse42_dw_2D_nspc, - conv_avx2_dw_2D_nspc, - conv_avx512_dw_2D_nspc -}; - -std::vector inputShapes2dDW = { - {{}, {{ 2, 32, 7, 7 }}}, - { - //dynamic shapes - {-1, 32, -1, {1, 200}}, - { //target static shapes - { 2, 32, 7, 7 }, - { 1, 32, 9, 9 } - } - } -}; +const auto groupConvParams_ExplicitPadding_DW_2D = ::testing::Combine(::testing::ValuesIn(kernels2d), + ::testing::ValuesIn(strides2d), + ::testing::ValuesIn(padBegins2d), + ::testing::ValuesIn(padEnds2d), + ::testing::ValuesIn(dilations2d), + ::testing::ValuesIn(numOutChannels_DW), + ::testing::ValuesIn(numGroups_DW), + ::testing::Values(ov::op::PadType::EXPLICIT)); + +const std::vector CPUParams_DW_2D = {conv_sse42_dw_2D, + conv_avx2_dw_2D, + conv_avx512_dw_2D, + conv_sse42_dw_2D_nspc, + conv_avx2_dw_2D_nspc, + conv_avx512_dw_2D_nspc}; + +std::vector inputShapes2dDW = {{{}, {{2, 32, 7, 7}}}, + {// dynamic shapes + {-1, 32, -1, {1, 200}}, + {// target static shapes + {2, 32, 7, 7}, + {1, 32, 9, 9}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_2D_DW_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_DW_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2dDW), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_DW_2D)), + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_2D_DW_FP32, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_DW_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2dDW), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_DW_2D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); - - -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_2D_DW_BF16, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_DW_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2dDW), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_dw_2D, conv_avx512_dw_2D_nspc})), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_2D_DW_BF16, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_DW_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2dDW), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_dw_2D, + conv_avx512_dw_2D_nspc})), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ============= GroupConvolution (DW 3D) ============= */ -const auto groupConvParams_ExplicitPadding_DW_3D = ::testing::Combine( - ::testing::ValuesIn(kernels3d), - ::testing::ValuesIn(strides3d), - ::testing::ValuesIn(padBegins3d), - ::testing::ValuesIn(padEnds3d), - ::testing::ValuesIn(dilations3d), - ::testing::ValuesIn(numOutChannels_DW), - ::testing::ValuesIn(numGroups_DW), - ::testing::Values(ngraph::op::PadType::EXPLICIT) -); - -const std::vector CPUParams_DW_3D = { - conv_sse42_dw_3D, - conv_avx2_dw_3D, - conv_avx512_dw_3D, - conv_sse42_dw_3D_nspc, - conv_avx2_dw_3D_nspc, - conv_avx512_dw_3D_nspc -}; - -std::vector inputShapes3dDW = { - {{}, {{ 2, 32, 7, 7, 7 }}}, - { - //dynamic shapes - {-1, 32, -1, {1, 200}, -1}, - { //target static shapes - { 2, 32, 7, 7, 7 }, - { 1, 32, 9, 9, 9 } - } - } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_3D_DW_FP32, GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - groupConvParams_ExplicitPadding_DW_3D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes3dDW), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_DW_3D)), - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), - GroupConvolutionLayerCPUTest::getTestCaseName); +const auto groupConvParams_ExplicitPadding_DW_3D = ::testing::Combine(::testing::ValuesIn(kernels3d), + ::testing::ValuesIn(strides3d), + ::testing::ValuesIn(padBegins3d), + ::testing::ValuesIn(padEnds3d), + ::testing::ValuesIn(dilations3d), + ::testing::ValuesIn(numOutChannels_DW), + ::testing::ValuesIn(numGroups_DW), + ::testing::Values(ov::op::PadType::EXPLICIT)); + +const std::vector CPUParams_DW_3D = {conv_sse42_dw_3D, + conv_avx2_dw_3D, + conv_avx512_dw_3D, + conv_sse42_dw_3D_nspc, + conv_avx2_dw_3D_nspc, + conv_avx512_dw_3D_nspc}; + +std::vector inputShapes3dDW = {{{}, {{2, 32, 7, 7, 7}}}, + {// dynamic shapes + {-1, 32, -1, {1, 200}, -1}, + {// target static shapes + {2, 32, 7, 7, 7}, + {1, 32, 9, 9, 9}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_3D_DW_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_DW_3D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes3dDW), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_DW_3D)), + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ========= */ - /* ============= SINGLE TEST CASES ============= */ using VecFusingParams = std::vector; -using ConfigRelatedParams = std::tuple; // Plugin config FusingParamsSet +using ConfigRelatedParams = std::tuple; // Plugin config FusingParamsSet using VecConfigRelatedParams = std::vector; -std::vector makeSingleGroupConvCPUTestCases(SizeVector kernels, SizeVector strides, SizeVector dilations, - std::vector padBegins, std::vector padEnds, - ngraph::op::PadType padType, int groups, int mb, SizeVector spDims, - int inGroupSize, int outGroupSize, - const std::vector& CPUParams, - const VecConfigRelatedParams& vecConfigRelatedParams) { +std::vector makeSingleGroupConvCPUTestCases( + std::vector kernels, + std::vector strides, + std::vector dilations, + std::vector padBegins, + std::vector padEnds, + ov::op::PadType padType, + int groups, + int mb, + std::vector spDims, + int inGroupSize, + int outGroupSize, + const std::vector& CPUParams, + const VecConfigRelatedParams& vecConfigRelatedParams) { int inChannels = groups * inGroupSize; int outChannels = groups * outGroupSize; InputShape inputShapes; - SizeVector targetShape; + std::vector targetShape; targetShape.push_back(mb); targetShape.push_back(inChannels); targetShape.insert(targetShape.end(), spDims.begin(), spDims.end()); inputShapes.second.push_back({targetShape}); - groupConvSpecificParams specificParams(kernels, strides, padBegins, padEnds, dilations, outChannels, groups, padType); + groupConvSpecificParams + specificParams(kernels, strides, padBegins, padEnds, dilations, outChannels, groups, padType); std::vector retVector; for (auto& configRelatedParams : vecConfigRelatedParams) { VecFusingParams fusingParams; - Config config; + ov::AnyMap config; std::tie(config, fusingParams) = configRelatedParams; - groupConvLayerTestParamsSet basicParamsSet(specificParams, ElementType::f32, ElementType::undefined, ElementType::undefined, - inputShapes, ov::test::utils::DEVICE_CPU); + groupConvLayerTestsParamsSet basicParamsSet(specificParams, + ElementType::f32, + ElementType::undefined, + ElementType::undefined, + inputShapes, + ov::test::utils::DEVICE_CPU); - for (auto &item : CPUParams) { - for (auto &fusingParam : fusingParams) { + for (auto& item : CPUParams) { + for (auto& fusingParam : fusingParams) { retVector.push_back(groupConvLayerCPUTestParamsSet(basicParamsSet, item, fusingParam, config)); } } } - return retVector; + return retVector; } -template +template void concatTestCases(std::vector& resultVec, T tesCase) { - resultVec.insert(resultVec.begin(), std::make_move_iterator(tesCase.begin()), std::make_move_iterator(tesCase.end())); + resultVec.insert(resultVec.begin(), + std::make_move_iterator(tesCase.begin()), + std::make_move_iterator(tesCase.end())); } -template +template void concatTestCases(std::vector& resultVec, T&& tesCase, Args&&... args) { concatTestCases(resultVec, std::forward(tesCase)); concatTestCases(resultVec, std::forward(args)...); } -template +template std::vector generateSingleGroupConvCPUTestCases(Args&&... args) { std::vector retVec; concatTestCases(retVec, std::forward(args)...); @@ -1625,287 +1421,960 @@ std::vector generateSingleGroupConvCPUTestCases( /* COMMON PARAMS */ -const VecConfigRelatedParams vecPrcConnectParamsFP32 = {ConfigRelatedParams{cpuEmptyPluginConfig, fusingParamsSet}}; -const VecConfigRelatedParams vecPrcConnectParams = {ConfigRelatedParams{cpuEmptyPluginConfig, fusingParamsSet}, - ConfigRelatedParams{cpuBF16PluginConfig, fusingParamsSetBF16}}; -const VecConfigRelatedParams vecPrcConnectParamsBF16 = {ConfigRelatedParams{cpuBF16PluginConfig, fusingParamsSetBF16}}; +const VecConfigRelatedParams vecPrcConnectParamsFP32 = {ConfigRelatedParams{empty_plugin_config, fusingParamsSet}}; +const VecConfigRelatedParams vecPrcConnectParams = {ConfigRelatedParams{empty_plugin_config, fusingParamsSet}, + ConfigRelatedParams{cpu_bf16_plugin_config, fusingParamsSetBF16}}; +const VecConfigRelatedParams vecPrcConnectParamsBF16 = { + ConfigRelatedParams{cpu_bf16_plugin_config, fusingParamsSetBF16}}; -const VecConfigRelatedParams vecPrcConnectParamsFP32Default = {ConfigRelatedParams{cpuEmptyPluginConfig, VecFusingParams{emptyFusingSpec}}}; -const VecConfigRelatedParams vecPrcConnectParamsDefault = {ConfigRelatedParams{cpuEmptyPluginConfig, VecFusingParams{emptyFusingSpec}}, - ConfigRelatedParams{cpuBF16PluginConfig, VecFusingParams{emptyFusingSpec}}}; +const VecConfigRelatedParams vecPrcConnectParamsFP32Default = { + ConfigRelatedParams{empty_plugin_config, VecFusingParams{emptyFusingSpec}}}; +const VecConfigRelatedParams vecPrcConnectParamsDefault = { + ConfigRelatedParams{empty_plugin_config, VecFusingParams{emptyFusingSpec}}, + ConfigRelatedParams{cpu_bf16_plugin_config, VecFusingParams{emptyFusingSpec}}}; /* ============= GEMM GroupConvolution ============= */ const std::vector gemmGroupConvTestCases = generateSingleGroupConvCPUTestCases( - // 1. is_depthwise (true, false) - // 2. jcp.im2col_sz (=0,>0) - // 3. is_blocking_applicable (true, false) - - // is_depthwise == false, im2col_sz > 0 - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 5}, 2, 2, CPUParams_Gemm_2D, vecPrcConnectParams), - // is_depthwise == true - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, 2, 1, {5, 5}, 1, 1, - CPUParams_Gemm_2D, vecPrcConnectParams), - // im2col_sz == 0, is_blocking_applicable == true - makeSingleGroupConvCPUTestCases({1, 1}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 5}, 2, 2, CPUParams_Gemm_2D, vecPrcConnectParams), - // is_blocking_applicable == false ((jcp.im2col_sz == 0) && (jcp.ic / jcp.oc >= 42)) - makeSingleGroupConvCPUTestCases({1, 1}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 5}, 42, 1, CPUParams_Gemm_2D, vecPrcConnectParams), - - // "hard" cases - makeSingleGroupConvCPUTestCases({3, 3}, {2, 2}, {1, 1}, {1, 1}, {1, 1}, ngraph::op::PadType::EXPLICIT, - 3, 2, {129, 129}, 4, 2, CPUParams_Gemm_2D, vecPrcConnectParamsDefault), - makeSingleGroupConvCPUTestCases({2, 4}, {1, 2}, {3, 2}, {2, 1}, {1, 0}, ngraph::op::PadType::EXPLICIT, - 2, 1, {10, 10}, 3, 3, CPUParams_Gemm_2D, vecPrcConnectParamsDefault), - makeSingleGroupConvCPUTestCases({3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, ngraph::op::PadType::EXPLICIT, - 3, 2, {33, 33, 33}, 4, 2, CPUParams_Gemm_3D, vecPrcConnectParamsDefault), - makeSingleGroupConvCPUTestCases({2, 3, 4}, {1, 2, 2}, {3, 1, 2}, {2, 2, 1}, {1, 1, 0}, ngraph::op::PadType::EXPLICIT, - 2, 1, {10, 10, 10}, 3, 3, CPUParams_Gemm_3D, vecPrcConnectParams) -); - -INSTANTIATE_TEST_SUITE_P(smoke_GEMM_GroupConv, GroupConvolutionLayerCPUTest, ::testing::ValuesIn(filterParamsSetForDevice(gemmGroupConvTestCases)), - GroupConvolutionLayerCPUTest::getTestCaseName); + // 1. is_depthwise (true, false) + // 2. jcp.im2col_sz (=0,>0) + // 3. is_blocking_applicable (true, false) + + // is_depthwise == false, im2col_sz > 0 + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 5}, + 2, + 2, + CPUParams_Gemm_2D, + vecPrcConnectParams), + // is_depthwise == true + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 5}, + 1, + 1, + CPUParams_Gemm_2D, + vecPrcConnectParams), + // im2col_sz == 0, is_blocking_applicable == true + makeSingleGroupConvCPUTestCases({1, 1}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 5}, + 2, + 2, + CPUParams_Gemm_2D, + vecPrcConnectParams), + // is_blocking_applicable == false ((jcp.im2col_sz == 0) && (jcp.ic / jcp.oc >= 42)) + makeSingleGroupConvCPUTestCases({1, 1}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 5}, + 42, + 1, + CPUParams_Gemm_2D, + vecPrcConnectParams), + + // "hard" cases + makeSingleGroupConvCPUTestCases({3, 3}, + {2, 2}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 3, + 2, + {129, 129}, + 4, + 2, + CPUParams_Gemm_2D, + vecPrcConnectParamsDefault), + makeSingleGroupConvCPUTestCases({2, 4}, + {1, 2}, + {3, 2}, + {2, 1}, + {1, 0}, + ov::op::PadType::EXPLICIT, + 2, + 1, + {10, 10}, + 3, + 3, + CPUParams_Gemm_2D, + vecPrcConnectParamsDefault), + makeSingleGroupConvCPUTestCases({3, 3, 3}, + {2, 2, 2}, + {1, 1, 1}, + {1, 1, 1}, + {1, 1, 1}, + ov::op::PadType::EXPLICIT, + 3, + 2, + {33, 33, 33}, + 4, + 2, + CPUParams_Gemm_3D, + vecPrcConnectParamsDefault), + makeSingleGroupConvCPUTestCases({2, 3, 4}, + {1, 2, 2}, + {3, 1, 2}, + {2, 2, 1}, + {1, 1, 0}, + ov::op::PadType::EXPLICIT, + 2, + 1, + {10, 10, 10}, + 3, + 3, + CPUParams_Gemm_3D, + vecPrcConnectParams)); + +INSTANTIATE_TEST_SUITE_P(smoke_GEMM_GroupConv, + GroupConvolutionLayerCPUTest, + ::testing::ValuesIn(filterParamsSetForDevice(gemmGroupConvTestCases)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ============= JIT SSE42 GroupConvolution ============= */ const std::vector sse42_GroupConv = {conv_sse42_2D, conv_sse42_2D_nspc}; const std::vector JIT_SSE42_GroupConvTestCases = generateSingleGroupConvCPUTestCases( - // 1. jcp.ur_w (=3,<3) - // 2. jcp.ur_w_tail (=0,>0) - // 3. jcp.kw (>7,<=7) - // 4. jcp.nb_oc = jcp.oc / jcp.oc_block; - // 5. jcp.nb_ic = jcp.ic / jcp.ic_block; - // 6. ocb_work - - // jcp.ur_w == 3, jcp.ur_w_tail == 2 - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 10}, 8, 8, sse42_GroupConv, vecPrcConnectParamsFP32), - // jcp.ur_w < 3 (jcp.ur_w == jcp.ow) - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 4}, 8, 8, sse42_GroupConv, vecPrcConnectParamsFP32), - // jcp.ur_w == 3, jcp.ur_w_tail == 0 - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 11}, 8, 8, sse42_GroupConv, vecPrcConnectParamsFP32), - // jcp.kw > 7 - makeSingleGroupConvCPUTestCases({3, 8}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 10}, 8, 8, sse42_GroupConv, vecPrcConnectParamsFP32), - // jcp.nb_oc == 2 - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 5}, 8, 16, sse42_GroupConv, vecPrcConnectParamsFP32), - // jcp.nb_ic == 2 - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 5}, 16, 8, sse42_GroupConv, vecPrcConnectParamsFP32), - // ocb_work > 1 (ocb_work == 2) - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 5}, 8, 40, sse42_GroupConv, vecPrcConnectParamsFP32), - // jcp.nb_ic == 2, ocb_work == 2 - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 5}, 16, 40, sse42_GroupConv, vecPrcConnectParamsFP32), - - // "hard" cases - makeSingleGroupConvCPUTestCases({3, 3}, {2, 2}, {1, 1}, {1, 1}, {1, 1}, ngraph::op::PadType::EXPLICIT, - 3, 2, {129, 129}, 8, 8, sse42_GroupConv, vecPrcConnectParamsFP32Default), - makeSingleGroupConvCPUTestCases({2, 4}, {1, 2}, {3, 2}, {2, 1}, {1, 0}, ngraph::op::PadType::EXPLICIT, - 2, 1, {10, 10}, 8, 8, sse42_GroupConv, vecPrcConnectParamsFP32Default) - - // not supported jit_sse42 for 3d - // makeSingleGroupConvCPUTestCases({3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, ngraph::op::PadType::EXPLICIT, - // 3, 2, {33, 33, 33}, 8, 8, cpuParams_sse42_3D), - // makeSingleGroupConvCPUTestCases({2, 3, 4}, {1, 2, 2}, {3, 1, 2}, {2, 2, 1}, {1, 1, 0}, ngraph::op::PadType::EXPLICIT, - // 2, 1, {10, 10, 10}, 8, 8, cpuParams_sse42_3D), + // 1. jcp.ur_w (=3,<3) + // 2. jcp.ur_w_tail (=0,>0) + // 3. jcp.kw (>7,<=7) + // 4. jcp.nb_oc = jcp.oc / jcp.oc_block; + // 5. jcp.nb_ic = jcp.ic / jcp.ic_block; + // 6. ocb_work + + // jcp.ur_w == 3, jcp.ur_w_tail == 2 + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 10}, + 8, + 8, + sse42_GroupConv, + vecPrcConnectParamsFP32), + // jcp.ur_w < 3 (jcp.ur_w == jcp.ow) + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 4}, + 8, + 8, + sse42_GroupConv, + vecPrcConnectParamsFP32), + // jcp.ur_w == 3, jcp.ur_w_tail == 0 + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 11}, + 8, + 8, + sse42_GroupConv, + vecPrcConnectParamsFP32), + // jcp.kw > 7 + makeSingleGroupConvCPUTestCases({3, 8}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 10}, + 8, + 8, + sse42_GroupConv, + vecPrcConnectParamsFP32), + // jcp.nb_oc == 2 + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 5}, + 8, + 16, + sse42_GroupConv, + vecPrcConnectParamsFP32), + // jcp.nb_ic == 2 + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 5}, + 16, + 8, + sse42_GroupConv, + vecPrcConnectParamsFP32), + // ocb_work > 1 (ocb_work == 2) + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 5}, + 8, + 40, + sse42_GroupConv, + vecPrcConnectParamsFP32), + // jcp.nb_ic == 2, ocb_work == 2 + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 5}, + 16, + 40, + sse42_GroupConv, + vecPrcConnectParamsFP32), + + // "hard" cases + makeSingleGroupConvCPUTestCases({3, 3}, + {2, 2}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 3, + 2, + {129, 129}, + 8, + 8, + sse42_GroupConv, + vecPrcConnectParamsFP32Default), + makeSingleGroupConvCPUTestCases({2, 4}, + {1, 2}, + {3, 2}, + {2, 1}, + {1, 0}, + ov::op::PadType::EXPLICIT, + 2, + 1, + {10, 10}, + 8, + 8, + sse42_GroupConv, + vecPrcConnectParamsFP32Default) + + // not supported jit_sse42 for 3d + // makeSingleGroupConvCPUTestCases({3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, + // ov::op::PadType::EXPLICIT, + // 3, 2, {33, 33, 33}, 8, 8, cpuParams_sse42_3D), + // makeSingleGroupConvCPUTestCases({2, 3, 4}, {1, 2, 2}, {3, 1, 2}, {2, 2, 1}, {1, 1, 0}, + // ov::op::PadType::EXPLICIT, + // 2, 1, {10, 10, 10}, 8, 8, cpuParams_sse42_3D), ); -INSTANTIATE_TEST_SUITE_P(smoke_JIT_SSE42_GroupConv, GroupConvolutionLayerCPUTest, ::testing::ValuesIn(filterParamsSetForDevice(JIT_SSE42_GroupConvTestCases)), - GroupConvolutionLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_JIT_SSE42_GroupConv, + GroupConvolutionLayerCPUTest, + ::testing::ValuesIn(filterParamsSetForDevice(JIT_SSE42_GroupConvTestCases)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ============= JIT AVX2 GroupConvolution ============= */ const std::vector avx2_GroupConv_2D = {conv_avx2_2D, conv_avx2_2D_nspc}; const std::vector avx2_GroupConv_3D = {conv_avx2_3D, conv_avx2_3D_nspc}; const std::vector JIT_AVX2_GroupConvTestCases = generateSingleGroupConvCPUTestCases( - // 1. jcp.ur_w (=3,<3) - // 2. jcp.ur_w_tail (=0,>0) - // 3. jcp.kw (>7,<=7) - // 4. jcp.nb_oc = jcp.oc / jcp.oc_block; - // 5. jcp.nb_ic = jcp.ic / jcp.ic_block; - // 6. ocb_work - - // jcp.ur_w == 3, jcp.ur_w_tail == 2 - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 10}, 8, 8, avx2_GroupConv_2D, vecPrcConnectParamsFP32), - // jcp.ur_w < 3 (jcp.ur_w == jcp.ow) - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 4}, 8, 8, avx2_GroupConv_2D, vecPrcConnectParamsFP32), - // jcp.ur_w == 3, jcp.ur_w_tail == 0 - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 11}, 8, 8, avx2_GroupConv_2D, vecPrcConnectParamsFP32), - // jcp.kw > 7 - makeSingleGroupConvCPUTestCases({3, 8}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 10}, 8, 8, avx2_GroupConv_2D, vecPrcConnectParamsFP32), - // jcp.nb_oc == 2 - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 5}, 8, 16, avx2_GroupConv_2D, vecPrcConnectParamsFP32), - // jcp.nb_ic == 2 - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 5}, 16, 8, avx2_GroupConv_2D, vecPrcConnectParamsFP32), - // ocb_work > 1 (ocb_work == 2) - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 5}, 8, 40, avx2_GroupConv_2D, vecPrcConnectParamsFP32), - // jcp.nb_ic == 2, ocb_work == 2 - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 5}, 16, 40, avx2_GroupConv_2D, vecPrcConnectParamsFP32), - - // "hard" cases - makeSingleGroupConvCPUTestCases({3, 3}, {2, 2}, {1, 1}, {1, 1}, {1, 1}, ngraph::op::PadType::EXPLICIT, - 3, 2, {129, 129}, 8, 8, avx2_GroupConv_2D, vecPrcConnectParamsFP32Default), - makeSingleGroupConvCPUTestCases({2, 4}, {1, 2}, {3, 2}, {2, 1}, {1, 0}, ngraph::op::PadType::EXPLICIT, - 2, 1, {10, 10}, 8, 8, avx2_GroupConv_2D, vecPrcConnectParamsFP32Default), - makeSingleGroupConvCPUTestCases({3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, ngraph::op::PadType::EXPLICIT, - 3, 2, {33, 33, 33}, 8, 8, avx2_GroupConv_3D, vecPrcConnectParamsFP32Default), - makeSingleGroupConvCPUTestCases({2, 3, 4}, {1, 2, 2}, {3, 1, 2}, {2, 2, 1}, {1, 1, 0}, ngraph::op::PadType::EXPLICIT, - 2, 1, {10, 10, 10}, 8, 8, avx2_GroupConv_3D, vecPrcConnectParamsFP32) -); - -INSTANTIATE_TEST_SUITE_P(smoke_JIT_AVX2_GroupConv, GroupConvolutionLayerCPUTest, ::testing::ValuesIn(filterParamsSetForDevice(JIT_AVX2_GroupConvTestCases)), - GroupConvolutionLayerCPUTest::getTestCaseName); + // 1. jcp.ur_w (=3,<3) + // 2. jcp.ur_w_tail (=0,>0) + // 3. jcp.kw (>7,<=7) + // 4. jcp.nb_oc = jcp.oc / jcp.oc_block; + // 5. jcp.nb_ic = jcp.ic / jcp.ic_block; + // 6. ocb_work + + // jcp.ur_w == 3, jcp.ur_w_tail == 2 + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 10}, + 8, + 8, + avx2_GroupConv_2D, + vecPrcConnectParamsFP32), + // jcp.ur_w < 3 (jcp.ur_w == jcp.ow) + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 4}, + 8, + 8, + avx2_GroupConv_2D, + vecPrcConnectParamsFP32), + // jcp.ur_w == 3, jcp.ur_w_tail == 0 + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 11}, + 8, + 8, + avx2_GroupConv_2D, + vecPrcConnectParamsFP32), + // jcp.kw > 7 + makeSingleGroupConvCPUTestCases({3, 8}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 10}, + 8, + 8, + avx2_GroupConv_2D, + vecPrcConnectParamsFP32), + // jcp.nb_oc == 2 + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 5}, + 8, + 16, + avx2_GroupConv_2D, + vecPrcConnectParamsFP32), + // jcp.nb_ic == 2 + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 5}, + 16, + 8, + avx2_GroupConv_2D, + vecPrcConnectParamsFP32), + // ocb_work > 1 (ocb_work == 2) + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 5}, + 8, + 40, + avx2_GroupConv_2D, + vecPrcConnectParamsFP32), + // jcp.nb_ic == 2, ocb_work == 2 + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 5}, + 16, + 40, + avx2_GroupConv_2D, + vecPrcConnectParamsFP32), + + // "hard" cases + makeSingleGroupConvCPUTestCases({3, 3}, + {2, 2}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 3, + 2, + {129, 129}, + 8, + 8, + avx2_GroupConv_2D, + vecPrcConnectParamsFP32Default), + makeSingleGroupConvCPUTestCases({2, 4}, + {1, 2}, + {3, 2}, + {2, 1}, + {1, 0}, + ov::op::PadType::EXPLICIT, + 2, + 1, + {10, 10}, + 8, + 8, + avx2_GroupConv_2D, + vecPrcConnectParamsFP32Default), + makeSingleGroupConvCPUTestCases({3, 3, 3}, + {2, 2, 2}, + {1, 1, 1}, + {1, 1, 1}, + {1, 1, 1}, + ov::op::PadType::EXPLICIT, + 3, + 2, + {33, 33, 33}, + 8, + 8, + avx2_GroupConv_3D, + vecPrcConnectParamsFP32Default), + makeSingleGroupConvCPUTestCases({2, 3, 4}, + {1, 2, 2}, + {3, 1, 2}, + {2, 2, 1}, + {1, 1, 0}, + ov::op::PadType::EXPLICIT, + 2, + 1, + {10, 10, 10}, + 8, + 8, + avx2_GroupConv_3D, + vecPrcConnectParamsFP32)); + +INSTANTIATE_TEST_SUITE_P(smoke_JIT_AVX2_GroupConv, + GroupConvolutionLayerCPUTest, + ::testing::ValuesIn(filterParamsSetForDevice(JIT_AVX2_GroupConvTestCases)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ============= JIT AVX512 GroupConvolution ============= */ const std::vector avx512_GroupConv_2D = {conv_avx512_2D, conv_avx512_2D_nspc}; const std::vector avx512_GroupConv_3D = {conv_avx512_3D, conv_avx512_3D_nspc}; const std::vector JIT_AVX512_GroupConvTestCases = generateSingleGroupConvCPUTestCases( - // 1. "blocked to blocked" or "planar to blocked" - // 2. jcp.nb_ic, jcp.nb_oc - - // blocked to blocked - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 5}, 16, 16, avx512_GroupConv_2D, vecPrcConnectParams), - // jcp.nb_ic == 2 - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 5}, 32, 16, avx512_GroupConv_2D, vecPrcConnectParams), - // jcp.nb_oc == 2 - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 2, 1, {5, 5}, 16, 32, avx512_GroupConv_2D, vecPrcConnectParams), - - // "hard" cases - makeSingleGroupConvCPUTestCases({3, 3}, {2, 2}, {1, 1}, {1, 1}, {1, 1}, ngraph::op::PadType::EXPLICIT, 3, 2, {129, 129}, 16, 16, - avx512_GroupConv_2D, vecPrcConnectParams), - makeSingleGroupConvCPUTestCases({2, 4}, {1, 2}, {3, 2}, {2, 1}, {1, 0}, ngraph::op::PadType::EXPLICIT, - 2, 1, {10, 10}, 16, 16, avx512_GroupConv_2D, vecPrcConnectParamsDefault), - makeSingleGroupConvCPUTestCases({3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, ngraph::op::PadType::EXPLICIT, - 3, 2, {33, 33, 33}, 16, 16, avx512_GroupConv_3D, vecPrcConnectParamsDefault), - makeSingleGroupConvCPUTestCases({2, 3, 4}, {1, 2, 2}, {3, 1, 2}, {2, 2, 1}, {1, 1, 0}, ngraph::op::PadType::EXPLICIT, - 2, 1, {10, 10, 10}, 16, 16, avx512_GroupConv_3D, vecPrcConnectParams) -); - -INSTANTIATE_TEST_SUITE_P(smoke_JIT_AVX512_GroupConv, GroupConvolutionLayerCPUTest, - ::testing::ValuesIn(filterParamsSetForDevice(JIT_AVX512_GroupConvTestCases)), - GroupConvolutionLayerCPUTest::getTestCaseName); + // 1. "blocked to blocked" or "planar to blocked" + // 2. jcp.nb_ic, jcp.nb_oc + + // blocked to blocked + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 5}, + 16, + 16, + avx512_GroupConv_2D, + vecPrcConnectParams), + // jcp.nb_ic == 2 + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 5}, + 32, + 16, + avx512_GroupConv_2D, + vecPrcConnectParams), + // jcp.nb_oc == 2 + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 2, + 1, + {5, 5}, + 16, + 32, + avx512_GroupConv_2D, + vecPrcConnectParams), + + // "hard" cases + makeSingleGroupConvCPUTestCases({3, 3}, + {2, 2}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 3, + 2, + {129, 129}, + 16, + 16, + avx512_GroupConv_2D, + vecPrcConnectParams), + makeSingleGroupConvCPUTestCases({2, 4}, + {1, 2}, + {3, 2}, + {2, 1}, + {1, 0}, + ov::op::PadType::EXPLICIT, + 2, + 1, + {10, 10}, + 16, + 16, + avx512_GroupConv_2D, + vecPrcConnectParamsDefault), + makeSingleGroupConvCPUTestCases({3, 3, 3}, + {2, 2, 2}, + {1, 1, 1}, + {1, 1, 1}, + {1, 1, 1}, + ov::op::PadType::EXPLICIT, + 3, + 2, + {33, 33, 33}, + 16, + 16, + avx512_GroupConv_3D, + vecPrcConnectParamsDefault), + makeSingleGroupConvCPUTestCases({2, 3, 4}, + {1, 2, 2}, + {3, 1, 2}, + {2, 2, 1}, + {1, 1, 0}, + ov::op::PadType::EXPLICIT, + 2, + 1, + {10, 10, 10}, + 16, + 16, + avx512_GroupConv_3D, + vecPrcConnectParams)); + +INSTANTIATE_TEST_SUITE_P(smoke_JIT_AVX512_GroupConv, + GroupConvolutionLayerCPUTest, + ::testing::ValuesIn(filterParamsSetForDevice(JIT_AVX512_GroupConvTestCases)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ============= JIT SSE42 DW GroupConvolution ============= */ const std::vector sse42_DW_2D = {conv_sse42_dw_2D, conv_sse42_dw_2D_nspc}; const std::vector sse42_DW_3D = {conv_sse42_dw_3D, conv_sse42_dw_3D_nspc}; const std::vector JIT_SSE42_DW_GroupConvTestCases = generateSingleGroupConvCPUTestCases( - // 1. jcp.ngroups % simd_w (=0,!=0) - // 2. jcp.nb_ch - // 3. jcp.nb_ch_blocking (=2,<2) - // 4. jcp.ur_w == 3 - - // jcp.ngroups % simd_w == 0, jcp.nb_ch == 1, jcp.nb_ch_blocking == 1 (jcp.ngroups == 8) - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 8, 1, {5, 5}, 1, 1, sse42_DW_2D, vecPrcConnectParamsFP32), - // jcp.ngroups % simd_w == 0, jcp.nb_ch == 2, jcp.nb_ch_blocking == 2 (jcp.ngroups == 16) - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 16, 1, {5, 5}, 1, 1, sse42_DW_2D, vecPrcConnectParamsFP32), - // jcp.ngroups % simd_w != 0, jcp.nb_ch == 3, jcp.nb_ch_blocking == 2 (jcp.ngroups == 17) TODO: pad channels not supported for SSE42 - // makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - // 17, 1, {5, 5}, 1, 1, conv_sse42_DW_2D, vecPrcConnectParamsFP32only), - // jcp.ow > jcp.ur_w (jcp.ow == 7) - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 8, 1, {5, 9}, 1, 1, sse42_DW_2D, vecPrcConnectParamsFP32), - - // "hard" cases - makeSingleGroupConvCPUTestCases({3, 3}, {2, 2}, {1, 1}, {1, 1}, {1, 1}, ngraph::op::PadType::EXPLICIT, 8, 2, {129, 129}, 1, 1, - sse42_DW_2D, vecPrcConnectParamsFP32), - makeSingleGroupConvCPUTestCases({2, 4}, {1, 2}, {3, 2}, {2, 1}, {1, 0}, ngraph::op::PadType::EXPLICIT, - 8, 1, {10, 10}, 1, 1, sse42_DW_2D, vecPrcConnectParamsFP32Default), - makeSingleGroupConvCPUTestCases({3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, ngraph::op::PadType::EXPLICIT, - 8, 2, {33, 33, 33}, 1, 1, sse42_DW_3D, vecPrcConnectParamsFP32Default), - makeSingleGroupConvCPUTestCases({2, 3, 4}, {1, 2, 2}, {3, 1, 2}, {2, 2, 1}, {1, 1, 0}, ngraph::op::PadType::EXPLICIT, - 8, 1, {10, 10, 10}, 1, 1, sse42_DW_3D, vecPrcConnectParamsFP32) -); - -INSTANTIATE_TEST_SUITE_P(smoke_JIT_SSE42_DW_GroupConv, GroupConvolutionLayerCPUTest, ::testing::ValuesIn(filterParamsSetForDevice -(JIT_SSE42_DW_GroupConvTestCases)), GroupConvolutionLayerCPUTest::getTestCaseName); + // 1. jcp.ngroups % simd_w (=0,!=0) + // 2. jcp.nb_ch + // 3. jcp.nb_ch_blocking (=2,<2) + // 4. jcp.ur_w == 3 + + // jcp.ngroups % simd_w == 0, jcp.nb_ch == 1, jcp.nb_ch_blocking == 1 (jcp.ngroups == 8) + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 8, + 1, + {5, 5}, + 1, + 1, + sse42_DW_2D, + vecPrcConnectParamsFP32), + // jcp.ngroups % simd_w == 0, jcp.nb_ch == 2, jcp.nb_ch_blocking == 2 (jcp.ngroups == 16) + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 16, + 1, + {5, 5}, + 1, + 1, + sse42_DW_2D, + vecPrcConnectParamsFP32), + // jcp.ngroups % simd_w != 0, jcp.nb_ch == 3, jcp.nb_ch_blocking == 2 (jcp.ngroups == 17) TODO: pad channels not + // supported for SSE42 makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, + // ov::op::PadType::VALID, 17, 1, {5, 5}, 1, 1, conv_sse42_DW_2D, vecPrcConnectParamsFP32only), jcp.ow > jcp.ur_w + // (jcp.ow == 7) + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 8, + 1, + {5, 9}, + 1, + 1, + sse42_DW_2D, + vecPrcConnectParamsFP32), + + // "hard" cases + makeSingleGroupConvCPUTestCases({3, 3}, + {2, 2}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 8, + 2, + {129, 129}, + 1, + 1, + sse42_DW_2D, + vecPrcConnectParamsFP32), + makeSingleGroupConvCPUTestCases({2, 4}, + {1, 2}, + {3, 2}, + {2, 1}, + {1, 0}, + ov::op::PadType::EXPLICIT, + 8, + 1, + {10, 10}, + 1, + 1, + sse42_DW_2D, + vecPrcConnectParamsFP32Default), + makeSingleGroupConvCPUTestCases({3, 3, 3}, + {2, 2, 2}, + {1, 1, 1}, + {1, 1, 1}, + {1, 1, 1}, + ov::op::PadType::EXPLICIT, + 8, + 2, + {33, 33, 33}, + 1, + 1, + sse42_DW_3D, + vecPrcConnectParamsFP32Default), + makeSingleGroupConvCPUTestCases({2, 3, 4}, + {1, 2, 2}, + {3, 1, 2}, + {2, 2, 1}, + {1, 1, 0}, + ov::op::PadType::EXPLICIT, + 8, + 1, + {10, 10, 10}, + 1, + 1, + sse42_DW_3D, + vecPrcConnectParamsFP32)); + +INSTANTIATE_TEST_SUITE_P(smoke_JIT_SSE42_DW_GroupConv, + GroupConvolutionLayerCPUTest, + ::testing::ValuesIn(filterParamsSetForDevice(JIT_SSE42_DW_GroupConvTestCases)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ============= JIT AVX2 DW GroupConvolution ============= */ const std::vector avx2_DW_2D = {conv_avx2_dw_2D, conv_avx2_dw_2D_nspc}; const std::vector avx2_DW_3D = {conv_avx2_dw_3D, conv_avx2_dw_3D_nspc}; const std::vector JIT_AVX2_DW_GroupConvTestCases = generateSingleGroupConvCPUTestCases( - // 1. jcp.ngroups % simd_w (=0,!=0) - // 2. jcp.nb_ch - // 3. jcp.nb_ch_blocking (=3,<3) - // 4. jcp.ur_w == 4 - - // jcp.ngroups % simd_w == 0, jcp.nb_ch == 1, jcp.nb_ch_blocking == 1 (jcp.ngroups == 8) - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 8, 1, {5, 5}, 1, 1, avx2_DW_2D, vecPrcConnectParamsFP32), - // jcp.ngroups % simd_w == 0, jcp.nb_ch == 3, jcp.nb_ch_blocking == 3 (jcp.ngroups == 24) - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 24, 1, {5, 5}, 1, 1, avx2_DW_2D, vecPrcConnectParamsFP32), - // jcp.ngroups % simd_w != 0, jcp.nb_ch == 4, jcp.nb_ch_blocking == 3 (jcp.ngroups == 25) - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 25, 1, {5, 5}, 1, 1, avx2_DW_2D, vecPrcConnectParamsFP32), - // jcp.ow > jcp.ur_w (jcp.ow == 7) - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 8, 1, {5, 9}, 1, 1, avx2_DW_2D, vecPrcConnectParamsFP32), - - // "hard" cases - makeSingleGroupConvCPUTestCases({3, 3}, {2, 2}, {1, 1}, {1, 1}, {1, 1}, ngraph::op::PadType::EXPLICIT, 8, 2, {129, 129}, 1, 1, - avx2_DW_2D, vecPrcConnectParamsFP32Default), - makeSingleGroupConvCPUTestCases({2, 4}, {1, 2}, {3, 2}, {2, 1}, {1, 0}, ngraph::op::PadType::EXPLICIT, - 8, 1, {10, 10}, 1, 1, avx2_DW_2D, vecPrcConnectParamsFP32Default), - makeSingleGroupConvCPUTestCases({3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, ngraph::op::PadType::EXPLICIT, - 8, 2, {33, 33, 33}, 1, 1, avx2_DW_3D, vecPrcConnectParamsFP32Default), - makeSingleGroupConvCPUTestCases({2, 3, 4}, {1, 2, 2}, {3, 1, 2}, {2, 2, 1}, {1, 1, 0}, ngraph::op::PadType::EXPLICIT, - 8, 1, {10, 10, 10}, 1, 1, avx2_DW_3D, vecPrcConnectParamsFP32) -); - -INSTANTIATE_TEST_SUITE_P(smoke_JIT_AVX2_DW_GroupConv, GroupConvolutionLayerCPUTest, ::testing::ValuesIn(filterParamsSetForDevice -(JIT_AVX2_DW_GroupConvTestCases)), GroupConvolutionLayerCPUTest::getTestCaseName); + // 1. jcp.ngroups % simd_w (=0,!=0) + // 2. jcp.nb_ch + // 3. jcp.nb_ch_blocking (=3,<3) + // 4. jcp.ur_w == 4 + + // jcp.ngroups % simd_w == 0, jcp.nb_ch == 1, jcp.nb_ch_blocking == 1 (jcp.ngroups == 8) + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 8, + 1, + {5, 5}, + 1, + 1, + avx2_DW_2D, + vecPrcConnectParamsFP32), + // jcp.ngroups % simd_w == 0, jcp.nb_ch == 3, jcp.nb_ch_blocking == 3 (jcp.ngroups == 24) + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 24, + 1, + {5, 5}, + 1, + 1, + avx2_DW_2D, + vecPrcConnectParamsFP32), + // jcp.ngroups % simd_w != 0, jcp.nb_ch == 4, jcp.nb_ch_blocking == 3 (jcp.ngroups == 25) + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 25, + 1, + {5, 5}, + 1, + 1, + avx2_DW_2D, + vecPrcConnectParamsFP32), + // jcp.ow > jcp.ur_w (jcp.ow == 7) + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 8, + 1, + {5, 9}, + 1, + 1, + avx2_DW_2D, + vecPrcConnectParamsFP32), + + // "hard" cases + makeSingleGroupConvCPUTestCases({3, 3}, + {2, 2}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 8, + 2, + {129, 129}, + 1, + 1, + avx2_DW_2D, + vecPrcConnectParamsFP32Default), + makeSingleGroupConvCPUTestCases({2, 4}, + {1, 2}, + {3, 2}, + {2, 1}, + {1, 0}, + ov::op::PadType::EXPLICIT, + 8, + 1, + {10, 10}, + 1, + 1, + avx2_DW_2D, + vecPrcConnectParamsFP32Default), + makeSingleGroupConvCPUTestCases({3, 3, 3}, + {2, 2, 2}, + {1, 1, 1}, + {1, 1, 1}, + {1, 1, 1}, + ov::op::PadType::EXPLICIT, + 8, + 2, + {33, 33, 33}, + 1, + 1, + avx2_DW_3D, + vecPrcConnectParamsFP32Default), + makeSingleGroupConvCPUTestCases({2, 3, 4}, + {1, 2, 2}, + {3, 1, 2}, + {2, 2, 1}, + {1, 1, 0}, + ov::op::PadType::EXPLICIT, + 8, + 1, + {10, 10, 10}, + 1, + 1, + avx2_DW_3D, + vecPrcConnectParamsFP32)); + +INSTANTIATE_TEST_SUITE_P(smoke_JIT_AVX2_DW_GroupConv, + GroupConvolutionLayerCPUTest, + ::testing::ValuesIn(filterParamsSetForDevice(JIT_AVX2_DW_GroupConvTestCases)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ============= JIT AVX512 DW GroupConvolution ============= */ const std::vector avx512_DW_2D = {conv_avx512_dw_2D, conv_avx512_dw_2D_nspc}; const std::vector avx512_DW_3D = {conv_avx512_dw_3D, conv_avx512_dw_3D_nspc}; -const std::vector JIT_AVX512_DW_GroupConvTestCases = generateSingleGroupConvCPUTestCases( +const std::vector JIT_AVX512_DW_GroupConvTestCases = + generateSingleGroupConvCPUTestCases( // 1. jcp.ngroups % simd_w (=0,!=0) // 2. jcp.nb_ch // 3. jcp.nb_ch_blocking (=4,<4) // 4. jcp.ur_w == 6 // jcp.ngroups % simd_w == 0, jcp.nb_ch == 1, jcp.nb_ch_blocking == 1 (jcp.ngroups == 16) - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 16, 1, {5, 5}, 1, 1, avx512_DW_2D, vecPrcConnectParams), + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 16, + 1, + {5, 5}, + 1, + 1, + avx512_DW_2D, + vecPrcConnectParams), // jcp.ngroups % simd_w == 0, jcp.nb_ch == 4, jcp.nb_ch_blocking == 4 (jcp.ngroups == 64) - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 64, 1, {5, 5}, 1, 1, avx512_DW_2D, vecPrcConnectParams), + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 64, + 1, + {5, 5}, + 1, + 1, + avx512_DW_2D, + vecPrcConnectParams), // jcp.ngroups % simd_w != 0, jcp.nb_ch == 5, jcp.nb_ch_blocking == 4 (jcp.ngroups == 65) - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 65, 1, {5, 5}, 1, 1, avx512_DW_2D, vecPrcConnectParams), + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 65, + 1, + {5, 5}, + 1, + 1, + avx512_DW_2D, + vecPrcConnectParams), // jcp.ow > jcp.ur_w (jcp.ow == 7) - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::VALID, - 8, 1, {5, 9}, 1, 1, avx512_DW_2D, vecPrcConnectParams), + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::VALID, + 8, + 1, + {5, 9}, + 1, + 1, + avx512_DW_2D, + vecPrcConnectParams), // "hard" cases - makeSingleGroupConvCPUTestCases({3, 3}, {2, 2}, {1, 1}, {1, 1}, {1, 1}, ngraph::op::PadType::EXPLICIT, 16, 2, {129, 129}, 1, 1, - avx512_DW_2D, vecPrcConnectParamsDefault), - makeSingleGroupConvCPUTestCases({2, 4}, {1, 2}, {3, 2}, {2, 1}, {1, 0}, ngraph::op::PadType::EXPLICIT, 16, 1, {10, 10}, 1, 1, - avx512_DW_2D, vecPrcConnectParamsDefault), - makeSingleGroupConvCPUTestCases({3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, ngraph::op::PadType::EXPLICIT, - 16, 2, {33, 33, 33}, 1, 1, avx512_DW_3D, vecPrcConnectParamsDefault), - makeSingleGroupConvCPUTestCases({2, 3, 4}, {1, 2, 2}, {3, 1, 2}, {2, 2, 1}, {1, 1, 0}, ngraph::op::PadType::EXPLICIT, - 16, 1, {10, 10, 10}, 1, 1, avx512_DW_3D, vecPrcConnectParams) -); - -INSTANTIATE_TEST_SUITE_P(smoke_JIT_AVX512_DW_GroupConv, GroupConvolutionLayerCPUTest, ::testing::ValuesIn(filterParamsSetForDevice -(JIT_AVX512_DW_GroupConvTestCases)), GroupConvolutionLayerCPUTest::getTestCaseName); + makeSingleGroupConvCPUTestCases({3, 3}, + {2, 2}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 16, + 2, + {129, 129}, + 1, + 1, + avx512_DW_2D, + vecPrcConnectParamsDefault), + makeSingleGroupConvCPUTestCases({2, 4}, + {1, 2}, + {3, 2}, + {2, 1}, + {1, 0}, + ov::op::PadType::EXPLICIT, + 16, + 1, + {10, 10}, + 1, + 1, + avx512_DW_2D, + vecPrcConnectParamsDefault), + makeSingleGroupConvCPUTestCases({3, 3, 3}, + {2, 2, 2}, + {1, 1, 1}, + {1, 1, 1}, + {1, 1, 1}, + ov::op::PadType::EXPLICIT, + 16, + 2, + {33, 33, 33}, + 1, + 1, + avx512_DW_3D, + vecPrcConnectParamsDefault), + makeSingleGroupConvCPUTestCases({2, 3, 4}, + {1, 2, 2}, + {3, 1, 2}, + {2, 2, 1}, + {1, 1, 0}, + ov::op::PadType::EXPLICIT, + 16, + 1, + {10, 10, 10}, + 1, + 1, + avx512_DW_3D, + vecPrcConnectParams)); + +INSTANTIATE_TEST_SUITE_P(smoke_JIT_AVX512_DW_GroupConv, + GroupConvolutionLayerCPUTest, + ::testing::ValuesIn(filterParamsSetForDevice(JIT_AVX512_DW_GroupConvTestCases)), + GroupConvolutionLayerCPUTest::getTestCaseName); /* ============= JIT SSE42 1x1 Convolution (not supported with groups) ============= */ /* ============= JIT AVX2 1x1 Convolution (not supported with groups) ============= */ @@ -1920,24 +2389,48 @@ const std::vector CPUParams_Fallback_Brgemm_2D = { CPUSpecificParams{{nhwc}, {nhwc}, {/* non-brgconv_avx512_amx is expected */}, "brgconv_avx512_amx"}, }; const std::vector CPUParams_Fallback_Brgemm_1D_Small_Shape = { - CPUSpecificParams{{nwc}, {nwc}, {/* non-brgconv_avx512_amx is expected */}, "brgconv_avx512_amx"} -}; -const std::vector BRGEMM_EXPECT_FALLBACK_GroupConvTestCases = generateSingleGroupConvCPUTestCases( + CPUSpecificParams{{nwc}, {nwc}, {/* non-brgconv_avx512_amx is expected */}, "brgconv_avx512_amx"}}; +const std::vector BRGEMM_EXPECT_FALLBACK_GroupConvTestCases = + generateSingleGroupConvCPUTestCases( // channel <= 16 // https://github.com/openvinotoolkit/oneDNN/blob/6df930dab5ab0a7dfaea6100acd03b479e2fa0a8/src/cpu/x64/jit_brgemm_conv_utils.cpp#L1712 - makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, ngraph::op::PadType::EXPLICIT, - 4, 1, {5, 5}, 16, 16, CPUParams_Fallback_Brgemm_2D, vecPrcConnectParamsFP32), + makeSingleGroupConvCPUTestCases({3, 3}, + {1, 1}, + {1, 1}, + {0, 0}, + {0, 0}, + ov::op::PadType::EXPLICIT, + 4, + 1, + {5, 5}, + 16, + 16, + CPUParams_Fallback_Brgemm_2D, + vecPrcConnectParamsFP32), // small shape on amx // https://github.com/openvinotoolkit/oneDNN/blob/6df930dab5ab0a7dfaea6100acd03b479e2fa0a8/src/cpu/x64/jit_brgemm_conv_utils.cpp#L1719 - makeSingleGroupConvCPUTestCases({3}, {1}, {1}, {0}, {0}, ngraph::op::PadType::EXPLICIT, - 4, 1, {3}, 32, 32, CPUParams_Fallback_Brgemm_1D_Small_Shape, vecPrcConnectParamsBF16) -); - -INSTANTIATE_TEST_SUITE_P(smoke_BRGEMM_EXPECT_FALLBACK_GroupConv, ExpectFallbackGroupConvolutionLayerCPUTest, ::testing::ValuesIn(filterParamsSetForDevice -(BRGEMM_EXPECT_FALLBACK_GroupConvTestCases)), ExpectFallbackGroupConvolutionLayerCPUTest::getTestCaseName); + makeSingleGroupConvCPUTestCases({3}, + {1}, + {1}, + {0}, + {0}, + ov::op::PadType::EXPLICIT, + 4, + 1, + {3}, + 32, + 32, + CPUParams_Fallback_Brgemm_1D_Small_Shape, + vecPrcConnectParamsBF16)); + +INSTANTIATE_TEST_SUITE_P(smoke_BRGEMM_EXPECT_FALLBACK_GroupConv, + ExpectFallbackGroupConvolutionLayerCPUTest, + ::testing::ValuesIn(filterParamsSetForDevice(BRGEMM_EXPECT_FALLBACK_GroupConvTestCases)), + ExpectFallbackGroupConvolutionLayerCPUTest::getTestCaseName); /* ============= brgemm GroupConvolution test, expect fallback to other implementation, end ============= */ -} // namespace +} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/group_convolution_backprop_data.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/group_convolution_backprop_data.cpp index cfdb4ee8bc06e9..1279f81aacd860 100755 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/group_convolution_backprop_data.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/group_convolution_backprop_data.cpp @@ -2,36 +2,33 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "shared_test_classes/single_op/group_convolution_backprop_data.hpp" + +#include "common_test_utils/node_builders/group_convolution_backprop_data.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "openvino/core/preprocess/pre_post_process.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "test_utils/convolution_params.hpp" #include "test_utils/cpu_test_utils.hpp" #include "test_utils/filter_cpu_info.hpp" -#include "test_utils/convolution_params.hpp" #include "test_utils/fusing_test_utils.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include -#include -#include "openvino/core/preprocess/pre_post_process.hpp" -#include "common_test_utils/node_builders/group_convolution_backprop_data.hpp" using namespace CPUTestUtils; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -using GroupDeconvSpecParams = LayerTestsDefinitions::groupConvBackpropSpecificParams; +using GroupDeconvSpecParams = ov::test::groupConvBackpropSpecificParams; -using DeconvInputData = std::tuple>>; // values for 'output_shape' +using DeconvInputData = std::tuple>>; // values for 'output_shape' -using GroupDeconvLayerCPUTestParamsSet = std::tuple>; +using GroupDeconvLayerCPUTestParamsSet = std:: + tuple; class GroupDeconvolutionLayerCPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CpuTestWithFusing { + virtual public SubgraphBaseTest, + public CpuTestWithFusing { public: static std::string getTestCaseName(testing::TestParamInfo obj) { GroupDeconvSpecParams basicParamsSet; @@ -39,17 +36,18 @@ class GroupDeconvolutionLayerCPUTest : public testing::WithParamInterface additionalConfig; + ov::AnyMap additionalConfig; std::tie(basicParamsSet, inputData, prec, fusingParams, cpuParams, additionalConfig) = obj.param; - ngraph::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; + ov::op::PadType padType; + std::vector kernel, stride, dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels, groupNum; - std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, groupNum, padType, outPadding) = basicParamsSet; + std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, groupNum, padType, outPadding) = + basicParamsSet; InputShape inputShape; - ngraph::helpers::InputLayerType outShapeType; + ov::test::utils::InputLayerType outShapeType; std::vector> outShapeData; std::tie(inputShape, outShapeType, outShapeData) = inputData; @@ -86,20 +84,20 @@ class GroupDeconvolutionLayerCPUTest : public testing::WithParamInterface, ov::Tensor>& params) { + return params.first->get_friendly_name() == "param_1"; + }); OPENVINO_ASSERT(pos != inputs.end()); inputs.erase(pos); } auto expectedOutputs = calculate_refs(); if (expectedOutputs.empty()) { - return; + return; } ASSERT_EQ(actualOutputs.size(), expectedOutputs.size()) - << "nGraph interpreter has " << expectedOutputs.size() << " outputs, while IE " << actualOutputs.size(); + << "nGraph interpreter has " << expectedOutputs.size() << " outputs, while IE " << actualOutputs.size(); compare(expectedOutputs, actualOutputs); } @@ -162,17 +168,21 @@ class GroupDeconvolutionLayerCPUTest : public testing::WithParamInterface createGraph(const std::vector& inShapes, ngraph::helpers::InputLayerType outShapeType) { + std::shared_ptr createGraph(const std::vector& inShapes, + ov::test::utils::InputLayerType outShapeType) { ov::ParameterVector params{std::make_shared(prec, inShapes.front())}; std::shared_ptr outShapeNode; if (!outShapeData.empty()) { - if (outShapeType == ngraph::helpers::InputLayerType::PARAMETER) { + if (outShapeType == ov::test::utils::InputLayerType::PARAMETER) { OPENVINO_ASSERT(inputDynamicShapes.size() == 2); - auto outShapeParam = std::make_shared(ngraph::element::i32, inputDynamicShapes.back()); + auto outShapeParam = + std::make_shared(ov::element::i32, inputDynamicShapes.back()); params.push_back(outShapeParam); outShapeNode = outShapeParam; } else { - outShapeNode = ngraph::opset8::Constant::create(ngraph::element::i32, {outShapeData[inferRequestNum].size()}, outShapeData[inferRequestNum]); + outShapeNode = ov::op::v0::Constant::create(ov::element::i32, + {outShapeData[inferRequestNum].size()}, + outShapeData[inferRequestNum]); } } @@ -183,18 +193,37 @@ class GroupDeconvolutionLayerCPUTest : public testing::WithParamInterface deconv; if (!outShapeData.empty()) { OPENVINO_ASSERT(outShapeNode != nullptr); - deconv = ov::test::utils::make_group_convolution_backprop_data(params[0], outShapeNode, prec, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels, groupNum); + deconv = ov::test::utils::make_group_convolution_backprop_data(params[0], + outShapeNode, + prec, + kernel, + stride, + padBegin, + padEnd, + dilation, + padType, + convOutChannels, + groupNum); } else { - deconv = ov::test::utils::make_group_convolution_backprop_data(params[0], prec, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels, groupNum, false, outPadding); + deconv = ov::test::utils::make_group_convolution_backprop_data(params[0], + prec, + kernel, + stride, + padBegin, + padEnd, + dilation, + padType, + convOutChannels, + groupNum, + false, + outPadding); } return makeNgraphFunction(prec, params, deconv, "GroupDeconvCPU"); } protected: - InferenceEngine::SizeVector kernel, stride; + std::vector kernel, stride; void SetUp() override { rel_threshold = 1e-4f; @@ -205,21 +234,23 @@ class GroupDeconvolutionLayerCPUTest : public testing::WithParamInterface additionalConfig; + ov::AnyMap additionalConfig; std::tie(basicParamsSet, inputData, prec, fusingParams, cpuParams, additionalConfig) = this->GetParam(); configuration.insert(additionalConfig.begin(), additionalConfig.end()); std::tie(postOpMgrPtr, fusedOps) = fusingParams; - std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, groupNum, padType, outPadding) = basicParamsSet; + std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, groupNum, padType, outPadding) = + basicParamsSet; InputShape inputShape; - ngraph::helpers::InputLayerType outShapeType; + ov::test::utils::InputLayerType outShapeType; std::tie(inputShape, outShapeType, outShapeData) = inputData; std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; - if (additionalConfig[InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16] == InferenceEngine::PluginConfigParams::YES) { + if (additionalConfig[InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16] == + InferenceEngine::PluginConfigParams::YES) { inType = outType = prec = ElementType::bf16; rel_threshold = 1e-2f; } else { @@ -230,9 +261,10 @@ class GroupDeconvolutionLayerCPUTest : public testing::WithParamInterface paramsShapes; paramsShapes.push_back(inputShape); - if (!outShapeData.empty() && outShapeType == ngraph::helpers::InputLayerType::PARAMETER) { + if (!outShapeData.empty() && outShapeType == ov::test::utils::InputLayerType::PARAMETER) { const auto outShapeDims = ov::Shape{outShapeData.front().size()}; - paramsShapes.push_back(InputShape{outShapeDims, std::vector(inputShape.second.size(), outShapeDims)}); + paramsShapes.push_back( + InputShape{outShapeDims, std::vector(inputShape.second.size(), outShapeDims)}); } init_input_shapes(paramsShapes); @@ -242,8 +274,8 @@ class GroupDeconvolutionLayerCPUTest : public testing::WithParamInterface dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels, groupNum; std::vector> outShapeData; @@ -258,7 +290,9 @@ TEST_P(GroupDeconvolutionLayerCPUTest, CompareWithRefs) { if (stride.size() > 2) isSupportedParams &= stride[stride.size() - 3] <= kernel[kernel.size() - 3]; if (!isSupportedParams) { - GTEST_SKIP() << "Fusing with strides more than kernel size was disabled, because oneDNN deconvolution doesn't support it" << std::endl; + GTEST_SKIP() << "Fusing with strides more than kernel size was disabled, because oneDNN deconvolution " + "doesn't support it" + << std::endl; } } @@ -270,635 +304,534 @@ namespace { std::vector filterCPUInfoForDevice_BF16(std::vector allParams) { std::vector specificParams; - bool with_bf16 = InferenceEngine::with_cpu_x86_bfloat16(); - std::copy_if(allParams.begin(), allParams.end(), std::back_inserter(specificParams), [with_bf16](const CPUSpecificParams& item) { - const auto &selected = std::get<3>(item); - // when no bf16 hardware amx will not work - if (!with_bf16 && selected.find("amx") != std::string::npos) { - return false; - } - return true; - }); + bool with_bf16 = ov::with_cpu_x86_bfloat16(); + std::copy_if(allParams.begin(), + allParams.end(), + std::back_inserter(specificParams), + [with_bf16](const CPUSpecificParams& item) { + const auto& selected = std::get<3>(item); + // when no bf16 hardware amx will not work + if (!with_bf16 && selected.find("amx") != std::string::npos) { + return false; + } + return true; + }); return filterCPUInfoForDevice(specificParams); } /* COMMON PARAMS */ -std::vector fusingParamsSet { - emptyFusingSpec, - fusingScaleShift, +std::vector fusingParamsSet{ + emptyFusingSpec, + fusingScaleShift, }; -const std::map cpuEmptyPluginConfig; -const std::map cpuBF16PluginConfig = { { InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, - InferenceEngine::PluginConfigParams::YES } }; +const ov::AnyMap cpuEmptyPluginConfig; +const ov::AnyMap cpuBF16PluginConfig = { + {InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::YES}}; -const std::vector> emptyOutputShape = {{}}; +const std::vector> emptyOutputShape = {{}}; const std::vector> emptyOutputPadding = {{}}; /* ============= GroupConvolution params (planar layout) ============= */ -const InferenceEngine::SizeVector numOutChannels_Planar = {6}; -const InferenceEngine::SizeVector numGroups_Planar = {2, 3}; +const std::vector numOutChannels_Planar = {6}; +const std::vector numGroups_Planar = {2, 3}; /* ============= GroupConvolution params (blocked layout) ============= */ -const InferenceEngine::SizeVector numOutChannels_Blocked = {64}; -const InferenceEngine::SizeVector numGroups_Blocked = {2, 4}; +const std::vector numOutChannels_Blocked = {64}; +const std::vector numGroups_Blocked = {2, 4}; /* ============= GroupConvolution params (nspc layout) ============= */ -const InferenceEngine::SizeVector numOutChannels_nspc = {64}; -const InferenceEngine::SizeVector numGroups_nspc = {2}; +const std::vector numOutChannels_nspc = {64}; +const std::vector numGroups_nspc = {2}; /* ============= GroupConvolution params (DW) ============= */ -const InferenceEngine::SizeVector numOutChannels_DW = {32}; -const InferenceEngine::SizeVector numGroups_DW = {32}; +const std::vector numOutChannels_DW = {32}; +const std::vector numGroups_DW = {32}; /* ============= GroupConvolution params (2D) ============= */ -const std::vector kernels2d = {{3, 3}, {1, 1}}; -const std::vector strides2d = {{1, 1}, {2, 2}}; +const std::vector> kernels2d = {{3, 3}, {1, 1}}; +const std::vector> strides2d = {{1, 1}, {2, 2}}; const std::vector> padBegins2d = {{0, 0}}; const std::vector> padEnds2d = {{0, 0}}; -const std::vector dilations2d = {{1, 1}}; +const std::vector> dilations2d = {{1, 1}}; /* ============= GroupConvolution params (3D) ============= */ -const std::vector kernels3d = {{3, 3, 3}, {1, 1, 1}}; -const std::vector strides3d = {{1, 1, 1}, {2, 2, 2}}; +const std::vector> kernels3d = {{3, 3, 3}, {1, 1, 1}}; +const std::vector> strides3d = {{1, 1, 1}, {2, 2, 2}}; const std::vector> padBegins3d = {{0, 0, 0}}; const std::vector> padEnds3d = {{0, 0, 0}}; -const std::vector dilations3d = {{1, 1, 1}}; +const std::vector> dilations3d = {{1, 1, 1}}; /* ============= */ - /* INSTANCES */ /* ============= GroupConvolution (Planar 2D) ============= */ const std::vector Planar_2D_inputs_smoke = { - DeconvInputData{ - InputShape{{}, {{ 2, 12, 7, 7 }}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 12, -1, -1}, {{ 1, 12, 7, 7}, { 2, 12, 5, 7}, { 1, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::PARAMETER, - {{15, 15}, {9, 10}, {15, 15}} - } -}; + DeconvInputData{InputShape{{}, {{2, 12, 7, 7}}}, ov::test::utils::InputLayerType::CONSTANT, {}}, + DeconvInputData{InputShape{{-1, 12, -1, -1}, {{1, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 7, 7}}}, + ov::test::utils::InputLayerType::PARAMETER, + {{15, 15}, {9, 10}, {15, 15}}}}; const std::vector Planar_2D_inputs_nightly = { - DeconvInputData{ - InputShape{{-1, 12, -1, -1}, {{ 2, 12, 7, 7}, { 2, 12, 5, 7}, { 1, 12, 9, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 12, -1, -1}, {{ 2, 12, 7, 7}, { 2, 12, 5, 7}, { 1, 12, 9, 4}, { 2, 12, 5, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {{15, 15}} - }, - DeconvInputData{ - InputShape{{{1, 10}, 12, 7, 7}, {{ 1, 12, 7, 7}, { 3, 12, 7, 7}, { 2, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {{15, 15}} - } -}; - -const auto groupConvParams_ExplicitPadding_Planar_2D = ::testing::Combine( - ::testing::ValuesIn(kernels2d), - ::testing::ValuesIn(strides2d), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::ValuesIn(dilations2d), - ::testing::ValuesIn(numOutChannels_Planar), - ::testing::ValuesIn(numGroups_Planar), - ::testing::Values(ngraph::op::PadType::EXPLICIT), - ::testing::ValuesIn(emptyOutputPadding) -); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_Planar_FP32, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_Planar_2D, - ::testing::ValuesIn(Planar_2D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_2D})), - ::testing::Values(cpuEmptyPluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_Planar_BF16, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_Planar_2D, - ::testing::ValuesIn(Planar_2D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_2D})), - ::testing::Values(cpuBF16PluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_GroupDeconv_2D_Planar_FP32, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_Planar_2D, - ::testing::ValuesIn(Planar_2D_inputs_nightly), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_2D})), - ::testing::Values(cpuEmptyPluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_GroupDeconv_2D_Planar_BF16, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_Planar_2D, - ::testing::ValuesIn(Planar_2D_inputs_nightly), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_2D})), - ::testing::Values(cpuBF16PluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); + DeconvInputData{InputShape{{-1, 12, -1, -1}, {{2, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 9, 4}}}, + ov::test::utils::InputLayerType::CONSTANT, + {}}, + DeconvInputData{InputShape{{-1, 12, -1, -1}, {{2, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 9, 4}, {2, 12, 5, 7}}}, + ov::test::utils::InputLayerType::CONSTANT, + {{15, 15}}}, + DeconvInputData{InputShape{{{1, 10}, 12, 7, 7}, {{1, 12, 7, 7}, {3, 12, 7, 7}, {2, 12, 7, 7}}}, + ov::test::utils::InputLayerType::CONSTANT, + {{15, 15}}}}; + +const auto groupConvParams_ExplicitPadding_Planar_2D = ::testing::Combine(::testing::ValuesIn(kernels2d), + ::testing::ValuesIn(strides2d), + ::testing::ValuesIn(padBegins2d), + ::testing::ValuesIn(padEnds2d), + ::testing::ValuesIn(dilations2d), + ::testing::ValuesIn(numOutChannels_Planar), + ::testing::ValuesIn(numGroups_Planar), + ::testing::Values(ov::op::PadType::EXPLICIT), + ::testing::ValuesIn(emptyOutputPadding)); + +INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_Planar_FP32, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_Planar_2D, + ::testing::ValuesIn(Planar_2D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_2D})), + ::testing::Values(cpuEmptyPluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_Planar_BF16, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_Planar_2D, + ::testing::ValuesIn(Planar_2D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_2D})), + ::testing::Values(cpuBF16PluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_GroupDeconv_2D_Planar_FP32, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_Planar_2D, + ::testing::ValuesIn(Planar_2D_inputs_nightly), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_2D})), + ::testing::Values(cpuEmptyPluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_GroupDeconv_2D_Planar_BF16, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_Planar_2D, + ::testing::ValuesIn(Planar_2D_inputs_nightly), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_2D})), + ::testing::Values(cpuBF16PluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); /* ============= GroupConvolution (Planar 3D) ============= */ const std::vector Planar_3D_inputs_smoke = { - DeconvInputData{ - InputShape{{}, {{ 2, 12, 7, 7, 7 }}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 12, -1, -1, -1}, {{ 2, 12, 7, 7, 7}, { 2, 12, 5, 7, 7}, { 1, 12, 9, 4, 9}}}, - ngraph::helpers::InputLayerType::PARAMETER, - {{15, 15, 15}, {9, 10, 10}, {9, 9, 9}} - } -}; + DeconvInputData{InputShape{{}, {{2, 12, 7, 7, 7}}}, ov::test::utils::InputLayerType::CONSTANT, {}}, + DeconvInputData{InputShape{{-1, 12, -1, -1, -1}, {{2, 12, 7, 7, 7}, {2, 12, 5, 7, 7}, {1, 12, 9, 4, 9}}}, + ov::test::utils::InputLayerType::PARAMETER, + {{15, 15, 15}, {9, 10, 10}, {9, 9, 9}}}}; const std::vector Planar_3D_inputs_nightly = { DeconvInputData{ - InputShape{{-1, 12, -1, -1, -1}, {{ 2, 12, 7, 7, 7}, { 2, 12, 5, 7, 7}, { 1, 12, 9, 4, 9}, { 2, 12, 5, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 12, -1, -1, -1}, {{ 2, 12, 7, 7, 7}, { 2, 12, 5, 7, 7}, { 1, 12, 9, 4, 9}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {{15, 15, 15}} - }, - DeconvInputData{ - InputShape{{{1, 10}, 12, 7, 7, 7}, {{ 3, 12, 7, 7, 7}, { 2, 12, 7, 7, 7}, { 1, 12, 7, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {{15, 15, 15}} - } -}; - -const auto groupConvParams_ExplicitPadding_Planar_3D = ::testing::Combine( - ::testing::ValuesIn(kernels3d), - ::testing::ValuesIn(strides3d), - ::testing::ValuesIn(padBegins3d), - ::testing::ValuesIn(padEnds3d), - ::testing::ValuesIn(dilations3d), - ::testing::ValuesIn(numOutChannels_Planar), - ::testing::ValuesIn(numGroups_Planar), - ::testing::Values(ngraph::op::PadType::EXPLICIT), - ::testing::ValuesIn(emptyOutputPadding) -); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_3D_Planar_FP32, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_Planar_3D, - ::testing::ValuesIn(Planar_3D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_3D})), - ::testing::Values(cpuEmptyPluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_3D_Planar_BF16, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_Planar_3D, - ::testing::ValuesIn(Planar_3D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_3D})), - ::testing::Values(cpuBF16PluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_GroupDeconv_3D_Planar_FP32, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_Planar_3D, - ::testing::ValuesIn(Planar_3D_inputs_nightly), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_3D})), - ::testing::Values(cpuEmptyPluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_GroupDeconv_3D_Planar_BF16, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_Planar_3D, - ::testing::ValuesIn(Planar_3D_inputs_nightly), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_3D})), - ::testing::Values(cpuBF16PluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); + InputShape{{-1, 12, -1, -1, -1}, {{2, 12, 7, 7, 7}, {2, 12, 5, 7, 7}, {1, 12, 9, 4, 9}, {2, 12, 5, 7, 7}}}, + ov::test::utils::InputLayerType::CONSTANT, + {}}, + DeconvInputData{InputShape{{-1, 12, -1, -1, -1}, {{2, 12, 7, 7, 7}, {2, 12, 5, 7, 7}, {1, 12, 9, 4, 9}}}, + ov::test::utils::InputLayerType::CONSTANT, + {{15, 15, 15}}}, + DeconvInputData{InputShape{{{1, 10}, 12, 7, 7, 7}, {{3, 12, 7, 7, 7}, {2, 12, 7, 7, 7}, {1, 12, 7, 7, 7}}}, + ov::test::utils::InputLayerType::CONSTANT, + {{15, 15, 15}}}}; + +const auto groupConvParams_ExplicitPadding_Planar_3D = ::testing::Combine(::testing::ValuesIn(kernels3d), + ::testing::ValuesIn(strides3d), + ::testing::ValuesIn(padBegins3d), + ::testing::ValuesIn(padEnds3d), + ::testing::ValuesIn(dilations3d), + ::testing::ValuesIn(numOutChannels_Planar), + ::testing::ValuesIn(numGroups_Planar), + ::testing::Values(ov::op::PadType::EXPLICIT), + ::testing::ValuesIn(emptyOutputPadding)); + +INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_3D_Planar_FP32, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_Planar_3D, + ::testing::ValuesIn(Planar_3D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_3D})), + ::testing::Values(cpuEmptyPluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_3D_Planar_BF16, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_Planar_3D, + ::testing::ValuesIn(Planar_3D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_3D})), + ::testing::Values(cpuBF16PluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_GroupDeconv_3D_Planar_FP32, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_Planar_3D, + ::testing::ValuesIn(Planar_3D_inputs_nightly), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_3D})), + ::testing::Values(cpuEmptyPluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_GroupDeconv_3D_Planar_BF16, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_Planar_3D, + ::testing::ValuesIn(Planar_3D_inputs_nightly), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_3D})), + ::testing::Values(cpuBF16PluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); /* ============= GroupConvolution (Blocked 2D) ============= */ const std::vector Blocked_2D_inputs_smoke = { - DeconvInputData{ - InputShape{{}, {{ 2, 64, 7, 7 }}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 64, -1, -1}, {{ 2, 64, 7, 7}, { 2, 64, 5, 7}, { 1, 64, 9, 5}}}, - ngraph::helpers::InputLayerType::PARAMETER, - {{15, 15}, {9, 10}, {19, 9}} - } -}; - -const auto groupConvParams_ExplicitPadding_Blocked_2D_nightly = ::testing::Combine( - ::testing::ValuesIn(kernels2d), - ::testing::ValuesIn({strides2d[1]}), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::ValuesIn(dilations2d), - ::testing::ValuesIn(numOutChannels_Blocked), - ::testing::ValuesIn(numGroups_Blocked), - ::testing::Values(ngraph::op::PadType::EXPLICIT), - ::testing::ValuesIn(emptyOutputPadding) -); + DeconvInputData{InputShape{{}, {{2, 64, 7, 7}}}, ov::test::utils::InputLayerType::CONSTANT, {}}, + DeconvInputData{InputShape{{-1, 64, -1, -1}, {{2, 64, 7, 7}, {2, 64, 5, 7}, {1, 64, 9, 5}}}, + ov::test::utils::InputLayerType::PARAMETER, + {{15, 15}, {9, 10}, {19, 9}}}}; + +const auto groupConvParams_ExplicitPadding_Blocked_2D_nightly = + ::testing::Combine(::testing::ValuesIn(kernels2d), + ::testing::ValuesIn({strides2d[1]}), + ::testing::ValuesIn(padBegins2d), + ::testing::ValuesIn(padEnds2d), + ::testing::ValuesIn(dilations2d), + ::testing::ValuesIn(numOutChannels_Blocked), + ::testing::ValuesIn(numGroups_Blocked), + ::testing::Values(ov::op::PadType::EXPLICIT), + ::testing::ValuesIn(emptyOutputPadding)); const std::vector Blocked_2D_inputs_nightly = { - DeconvInputData{ - InputShape{{-1, 64, -1, -1}, {{ 2, 64, 7, 7}, { 2, 64, 5, 7}, { 1, 64, 9, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 64, -1, -1}, {{ 2, 64, 7, 7}, { 2, 64, 5, 7}, { 1, 64, 9, 4}, { 2, 64, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {{15, 15}} - }, - DeconvInputData{ - InputShape{{{1, 10}, 64, 7, 7}, {{ 2, 64, 7, 7}, { 3, 64, 7, 7}, { 1, 64, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {{15, 15}} - } -}; - -const auto groupConvParams_ExplicitPadding_Blocked_2D = ::testing::Combine( - ::testing::ValuesIn(kernels2d), - ::testing::ValuesIn(strides2d), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::ValuesIn(dilations2d), - ::testing::ValuesIn(numOutChannels_Blocked), - ::testing::ValuesIn(numGroups_Blocked), - ::testing::Values(ngraph::op::PadType::EXPLICIT), - ::testing::ValuesIn(emptyOutputPadding) -); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_Blocked_FP32, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_Blocked_2D, - ::testing::ValuesIn(Blocked_2D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D, conv_avx2_2D})), - ::testing::Values(cpuEmptyPluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_Blocked_BF16, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_Blocked_2D, - ::testing::ValuesIn(Blocked_2D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D})), - ::testing::Values(cpuBF16PluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_GroupDeconv_2D_Blocked_FP32, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_Blocked_2D_nightly, - ::testing::ValuesIn(Blocked_2D_inputs_nightly), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D, conv_avx2_2D})), - ::testing::Values(cpuEmptyPluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_GroupDeconv_2D_Blocked_BF16, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_Blocked_2D_nightly, - ::testing::ValuesIn(Blocked_2D_inputs_nightly), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D})), - ::testing::Values(cpuBF16PluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); + DeconvInputData{InputShape{{-1, 64, -1, -1}, {{2, 64, 7, 7}, {2, 64, 5, 7}, {1, 64, 9, 4}}}, + ov::test::utils::InputLayerType::CONSTANT, + {}}, + DeconvInputData{InputShape{{-1, 64, -1, -1}, {{2, 64, 7, 7}, {2, 64, 5, 7}, {1, 64, 9, 4}, {2, 64, 7, 7}}}, + ov::test::utils::InputLayerType::CONSTANT, + {{15, 15}}}, + DeconvInputData{InputShape{{{1, 10}, 64, 7, 7}, {{2, 64, 7, 7}, {3, 64, 7, 7}, {1, 64, 7, 7}}}, + ov::test::utils::InputLayerType::CONSTANT, + {{15, 15}}}}; + +const auto groupConvParams_ExplicitPadding_Blocked_2D = ::testing::Combine(::testing::ValuesIn(kernels2d), + ::testing::ValuesIn(strides2d), + ::testing::ValuesIn(padBegins2d), + ::testing::ValuesIn(padEnds2d), + ::testing::ValuesIn(dilations2d), + ::testing::ValuesIn(numOutChannels_Blocked), + ::testing::ValuesIn(numGroups_Blocked), + ::testing::Values(ov::op::PadType::EXPLICIT), + ::testing::ValuesIn(emptyOutputPadding)); + +INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_Blocked_FP32, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_Blocked_2D, + ::testing::ValuesIn(Blocked_2D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D, conv_avx2_2D})), + ::testing::Values(cpuEmptyPluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_Blocked_BF16, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_Blocked_2D, + ::testing::ValuesIn(Blocked_2D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D})), + ::testing::Values(cpuBF16PluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_GroupDeconv_2D_Blocked_FP32, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_Blocked_2D_nightly, + ::testing::ValuesIn(Blocked_2D_inputs_nightly), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D, conv_avx2_2D})), + ::testing::Values(cpuEmptyPluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_GroupDeconv_2D_Blocked_BF16, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_Blocked_2D_nightly, + ::testing::ValuesIn(Blocked_2D_inputs_nightly), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D})), + ::testing::Values(cpuBF16PluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); /* ============= GroupConvolution (nspc 2D) ============= */ const std::vector nspc_2D_inputs_smoke = { - DeconvInputData{ - InputShape{{}, {{ 2, 64, 7, 7 }}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 64, -1, -1}, {{ 2, 64, 7, 7}, { 2, 64, 5, 7}, { 1, 64, 9, 5}}}, - ngraph::helpers::InputLayerType::PARAMETER, - {{15, 15}, {9, 10}, {19, 9}} - } -}; - -const auto groupConvParams_ExplicitPadding_nspc_2D = ::testing::Combine( - ::testing::ValuesIn(kernels2d), - ::testing::ValuesIn(strides2d), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::ValuesIn(dilations2d), - ::testing::ValuesIn(numOutChannels_nspc), - ::testing::ValuesIn(numGroups_nspc), - ::testing::Values(ngraph::op::PadType::EXPLICIT), - ::testing::ValuesIn(emptyOutputPadding) -); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_AMX_BF16, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_nspc_2D, - ::testing::ValuesIn(nspc_2D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_2D_nspc, conv_avx512_2D_nspc_amx})), - ::testing::Values(cpuBF16PluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); + DeconvInputData{InputShape{{}, {{2, 64, 7, 7}}}, ov::test::utils::InputLayerType::CONSTANT, {}}, + DeconvInputData{InputShape{{-1, 64, -1, -1}, {{2, 64, 7, 7}, {2, 64, 5, 7}, {1, 64, 9, 5}}}, + ov::test::utils::InputLayerType::PARAMETER, + {{15, 15}, {9, 10}, {19, 9}}}}; + +const auto groupConvParams_ExplicitPadding_nspc_2D = ::testing::Combine(::testing::ValuesIn(kernels2d), + ::testing::ValuesIn(strides2d), + ::testing::ValuesIn(padBegins2d), + ::testing::ValuesIn(padEnds2d), + ::testing::ValuesIn(dilations2d), + ::testing::ValuesIn(numOutChannels_nspc), + ::testing::ValuesIn(numGroups_nspc), + ::testing::Values(ov::op::PadType::EXPLICIT), + ::testing::ValuesIn(emptyOutputPadding)); + +INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_AMX_BF16, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_nspc_2D, + ::testing::ValuesIn(nspc_2D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_2D_nspc, + conv_avx512_2D_nspc_amx})), + ::testing::Values(cpuBF16PluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); /* ============= GroupConvolution (Blocked 3D) ============= */ const std::vector Blocked_3D_inputs_smoke = { - DeconvInputData{ - InputShape{{}, {{ 2, 64, 7, 7, 7 }}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 64, -1, -1, -1}, {{ 1, 64, 5, 5, 5}, { 2, 64, 5, 7, 5}}}, - ngraph::helpers::InputLayerType::PARAMETER, - {{7, 7, 7}, {7, 9, 7}} - } -}; + DeconvInputData{InputShape{{}, {{2, 64, 7, 7, 7}}}, ov::test::utils::InputLayerType::CONSTANT, {}}, + DeconvInputData{InputShape{{-1, 64, -1, -1, -1}, {{1, 64, 5, 5, 5}, {2, 64, 5, 7, 5}}}, + ov::test::utils::InputLayerType::PARAMETER, + {{7, 7, 7}, {7, 9, 7}}}}; const std::vector Blocked_3D_inputs_nightly = { - DeconvInputData{ - InputShape{{-1, 64, -1, -1, -1}, {{ 1, 64, 5, 5, 5}, { 2, 64, 5, 7, 5}, { 1, 64, 5, 5, 5}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 64, -1, -1, -1}, {{ 1, 64, 5, 5, 5}, { 2, 64, 5, 7, 5}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {{7, 7, 7}} - }, - DeconvInputData{ - InputShape{{{1, 10}, 64, -1, -1, -1}, {{ 1, 64, 5, 5, 5}, { 2, 64, 5, 5, 5}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {{7, 7, 7}} - } -}; - -const auto groupConvParams_ExplicitPadding_Blocked_3D = ::testing::Combine( - ::testing::ValuesIn(kernels3d), - ::testing::ValuesIn(strides3d), - ::testing::ValuesIn(padBegins3d), - ::testing::ValuesIn(padEnds3d), - ::testing::ValuesIn(dilations3d), - ::testing::ValuesIn(numOutChannels_Blocked), - ::testing::ValuesIn(numGroups_Blocked), - ::testing::Values(ngraph::op::PadType::EXPLICIT), - ::testing::ValuesIn(emptyOutputPadding) -); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_3D_Blocked_FP32, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_Blocked_3D, - ::testing::ValuesIn(Blocked_3D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D})), - ::testing::Values(cpuEmptyPluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_3D_Blocked_BF16, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_Blocked_3D, - ::testing::ValuesIn(Blocked_3D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D})), - ::testing::Values(cpuBF16PluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_GroupDeconv_3D_Blocked_FP32, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_Blocked_3D, - ::testing::ValuesIn(Blocked_3D_inputs_nightly), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D})), - ::testing::Values(cpuEmptyPluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_GroupDeconv_3D_Blocked_BF16, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_Blocked_3D, - ::testing::ValuesIn(Blocked_3D_inputs_nightly), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D})), - ::testing::Values(cpuBF16PluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); + DeconvInputData{InputShape{{-1, 64, -1, -1, -1}, {{1, 64, 5, 5, 5}, {2, 64, 5, 7, 5}, {1, 64, 5, 5, 5}}}, + ov::test::utils::InputLayerType::CONSTANT, + {}}, + DeconvInputData{InputShape{{-1, 64, -1, -1, -1}, {{1, 64, 5, 5, 5}, {2, 64, 5, 7, 5}}}, + ov::test::utils::InputLayerType::CONSTANT, + {{7, 7, 7}}}, + DeconvInputData{InputShape{{{1, 10}, 64, -1, -1, -1}, {{1, 64, 5, 5, 5}, {2, 64, 5, 5, 5}}}, + ov::test::utils::InputLayerType::CONSTANT, + {{7, 7, 7}}}}; + +const auto groupConvParams_ExplicitPadding_Blocked_3D = ::testing::Combine(::testing::ValuesIn(kernels3d), + ::testing::ValuesIn(strides3d), + ::testing::ValuesIn(padBegins3d), + ::testing::ValuesIn(padEnds3d), + ::testing::ValuesIn(dilations3d), + ::testing::ValuesIn(numOutChannels_Blocked), + ::testing::ValuesIn(numGroups_Blocked), + ::testing::Values(ov::op::PadType::EXPLICIT), + ::testing::ValuesIn(emptyOutputPadding)); + +INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_3D_Blocked_FP32, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_Blocked_3D, + ::testing::ValuesIn(Blocked_3D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D})), + ::testing::Values(cpuEmptyPluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_3D_Blocked_BF16, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_Blocked_3D, + ::testing::ValuesIn(Blocked_3D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D})), + ::testing::Values(cpuBF16PluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_GroupDeconv_3D_Blocked_FP32, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_Blocked_3D, + ::testing::ValuesIn(Blocked_3D_inputs_nightly), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D})), + ::testing::Values(cpuEmptyPluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_GroupDeconv_3D_Blocked_BF16, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_Blocked_3D, + ::testing::ValuesIn(Blocked_3D_inputs_nightly), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D})), + ::testing::Values(cpuBF16PluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); /* ============= GroupConvolution (nspc 3D) ============= */ const std::vector nspc_3D_inputs_smoke = { - DeconvInputData{ - InputShape{{}, {{ 2, 64, 7, 7, 7 }}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 64, -1, -1, -1}, {{ 1, 64, 5, 5, 5}, { 2, 64, 5, 7, 5}}}, - ngraph::helpers::InputLayerType::PARAMETER, - {{7, 7, 7}, {7, 9, 7}} - } -}; - -const auto groupConvParams_ExplicitPadding_nspc_3D = ::testing::Combine( - ::testing::ValuesIn(kernels3d), - ::testing::ValuesIn(strides3d), - ::testing::ValuesIn(padBegins3d), - ::testing::ValuesIn(padEnds3d), - ::testing::ValuesIn(dilations3d), - ::testing::ValuesIn(numOutChannels_nspc), - ::testing::ValuesIn(numGroups_nspc), - ::testing::Values(ngraph::op::PadType::EXPLICIT), - ::testing::ValuesIn(emptyOutputPadding) -); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_3D_nspc_BF16, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_nspc_3D, - ::testing::ValuesIn(nspc_3D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D_nspc, conv_avx512_3D_nspc_amx})), - ::testing::Values(cpuBF16PluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); + DeconvInputData{InputShape{{}, {{2, 64, 7, 7, 7}}}, ov::test::utils::InputLayerType::CONSTANT, {}}, + DeconvInputData{InputShape{{-1, 64, -1, -1, -1}, {{1, 64, 5, 5, 5}, {2, 64, 5, 7, 5}}}, + ov::test::utils::InputLayerType::PARAMETER, + {{7, 7, 7}, {7, 9, 7}}}}; + +const auto groupConvParams_ExplicitPadding_nspc_3D = ::testing::Combine(::testing::ValuesIn(kernels3d), + ::testing::ValuesIn(strides3d), + ::testing::ValuesIn(padBegins3d), + ::testing::ValuesIn(padEnds3d), + ::testing::ValuesIn(dilations3d), + ::testing::ValuesIn(numOutChannels_nspc), + ::testing::ValuesIn(numGroups_nspc), + ::testing::Values(ov::op::PadType::EXPLICIT), + ::testing::ValuesIn(emptyOutputPadding)); + +INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_3D_nspc_BF16, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_nspc_3D, + ::testing::ValuesIn(nspc_3D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_3D_nspc, + conv_avx512_3D_nspc_amx})), + ::testing::Values(cpuBF16PluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); /* ============= GroupConvolution (DW 2D) ============= */ const std::vector dw_2D_inputs_smoke = { - DeconvInputData{ - InputShape{{}, {{ 2, 32, 7, 7 }}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 32, -1, -1}, {{ 1, 32, 5, 5}, { 2, 32, 5, 7}}}, - ngraph::helpers::InputLayerType::PARAMETER, - {{7, 7}, {7, 9}} - } -}; + DeconvInputData{InputShape{{}, {{2, 32, 7, 7}}}, ov::test::utils::InputLayerType::CONSTANT, {}}, + DeconvInputData{InputShape{{-1, 32, -1, -1}, {{1, 32, 5, 5}, {2, 32, 5, 7}}}, + ov::test::utils::InputLayerType::PARAMETER, + {{7, 7}, {7, 9}}}}; const std::vector dw_2D_inputs_nightly = { - DeconvInputData{ - InputShape{{-1, 32, -1, -1}, {{ 1, 32, 5, 5}, { 2, 32, 5, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 32, -1, -1}, {{ 1, 32, 5, 5}, { 2, 32, 5, 7}, { 1, 32, 5, 5}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {{7, 7}} - }, - DeconvInputData{ - InputShape{{{1, 10}, 32, 5, 5}, {{ 2, 32, 5, 5}, { 1, 32, 5, 5}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {{7, 7}} - } -}; - -const auto groupConvParams_ExplicitPadding_DW_2D = ::testing::Combine( - ::testing::ValuesIn(kernels2d), - ::testing::ValuesIn(strides2d), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::ValuesIn(dilations2d), - ::testing::ValuesIn(numOutChannels_DW), - ::testing::ValuesIn(numGroups_DW), - ::testing::Values(ngraph::op::PadType::EXPLICIT), - ::testing::ValuesIn(emptyOutputPadding) -); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_DW_FP32, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_DW_2D, - ::testing::ValuesIn(dw_2D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_dw_2D, conv_avx2_dw_2D})), - ::testing::Values(cpuEmptyPluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_DW_BF16, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_DW_2D, - ::testing::ValuesIn(dw_2D_inputs_smoke), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_dw_2D})), - ::testing::Values(cpuBF16PluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_GroupDeconv_2D_DW_FP32, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_DW_2D, - ::testing::ValuesIn(dw_2D_inputs_nightly), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_dw_2D, conv_avx2_dw_2D})), - ::testing::Values(cpuEmptyPluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_GroupDeconv_2D_DW_BF16, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupConvParams_ExplicitPadding_DW_2D, - ::testing::ValuesIn(dw_2D_inputs_nightly), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(fusingParamsSet), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_dw_2D})), - ::testing::Values(cpuBF16PluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); + DeconvInputData{InputShape{{-1, 32, -1, -1}, {{1, 32, 5, 5}, {2, 32, 5, 7}}}, + ov::test::utils::InputLayerType::CONSTANT, + {}}, + DeconvInputData{InputShape{{-1, 32, -1, -1}, {{1, 32, 5, 5}, {2, 32, 5, 7}, {1, 32, 5, 5}}}, + ov::test::utils::InputLayerType::CONSTANT, + {{7, 7}}}, + DeconvInputData{InputShape{{{1, 10}, 32, 5, 5}, {{2, 32, 5, 5}, {1, 32, 5, 5}}}, + ov::test::utils::InputLayerType::CONSTANT, + {{7, 7}}}}; + +const auto groupConvParams_ExplicitPadding_DW_2D = ::testing::Combine(::testing::ValuesIn(kernels2d), + ::testing::ValuesIn(strides2d), + ::testing::ValuesIn(padBegins2d), + ::testing::ValuesIn(padEnds2d), + ::testing::ValuesIn(dilations2d), + ::testing::ValuesIn(numOutChannels_DW), + ::testing::ValuesIn(numGroups_DW), + ::testing::Values(ov::op::PadType::EXPLICIT), + ::testing::ValuesIn(emptyOutputPadding)); + +INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_DW_FP32, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_DW_2D, + ::testing::ValuesIn(dw_2D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_dw_2D, + conv_avx2_dw_2D})), + ::testing::Values(cpuEmptyPluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_DW_BF16, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_DW_2D, + ::testing::ValuesIn(dw_2D_inputs_smoke), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_dw_2D})), + ::testing::Values(cpuBF16PluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_GroupDeconv_2D_DW_FP32, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_DW_2D, + ::testing::ValuesIn(dw_2D_inputs_nightly), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_dw_2D, + conv_avx2_dw_2D})), + ::testing::Values(cpuEmptyPluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_GroupDeconv_2D_DW_BF16, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupConvParams_ExplicitPadding_DW_2D, + ::testing::ValuesIn(dw_2D_inputs_nightly), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(fusingParamsSet), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_dw_2D})), + ::testing::Values(cpuBF16PluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); /* ============= Reorder + GroupDeconvolution ============= */ -INSTANTIATE_TEST_SUITE_P(smoke_reorder_GroupDeconv_2D, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine(::testing::ValuesIn(kernels2d), - ::testing::Values(InferenceEngine::SizeVector{1, 1}), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::ValuesIn(dilations2d), - ::testing::ValuesIn(numOutChannels_Blocked), - ::testing::ValuesIn(numGroups_Blocked), - ::testing::Values(ngraph::op::PadType::EXPLICIT), - ::testing::ValuesIn(emptyOutputPadding)), - ::testing::Values(DeconvInputData{InputShape{{-1, 64, -1, -1}, {{ 1, 64, 7, 7}, { 2, 64, 5, 7}, { 1, 64, 9, 4}, { 1, 64, 7, 7}}}, - ngraph::helpers::InputLayerType::PARAMETER, - {{15, 15}, {9, 10}, {9, 9}, {15, 15}}}), - ::testing::Values(ElementType::f32), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D})), - ::testing::Values(cpuEmptyPluginConfig)), +INSTANTIATE_TEST_SUITE_P( + smoke_reorder_GroupDeconv_2D, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(kernels2d), + ::testing::Values(std::vector{1, 1}), + ::testing::ValuesIn(padBegins2d), + ::testing::ValuesIn(padEnds2d), + ::testing::ValuesIn(dilations2d), + ::testing::ValuesIn(numOutChannels_Blocked), + ::testing::ValuesIn(numGroups_Blocked), + ::testing::Values(ov::op::PadType::EXPLICIT), + ::testing::ValuesIn(emptyOutputPadding)), + ::testing::Values(DeconvInputData{ + InputShape{{-1, 64, -1, -1}, {{1, 64, 7, 7}, {2, 64, 5, 7}, {1, 64, 9, 4}, {1, 64, 7, 7}}}, + ov::test::utils::InputLayerType::PARAMETER, + {{15, 15}, {9, 10}, {9, 9}, {15, 15}}}), + ::testing::Values(ElementType::f32), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_2D})), + ::testing::Values(cpuEmptyPluginConfig)), GroupDeconvolutionLayerCPUTest::getTestCaseName); /* ============= GroupDeconvolution auto padding tests ============= */ const std::vector inputs_2D_AutoPadding = { - DeconvInputData{ - InputShape{{}, {{ 2, 64, 7, 7 }}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 64, -1, -1}, {{ 2, 64, 7, 7}, { 2, 64, 5, 7}, { 1, 64, 9, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {} - }, - DeconvInputData{ - InputShape{{-1, 64, -1, -1}, {{ 1, 64, 7, 7}, { 2, 64, 5, 7}, { 1, 64, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, - {{15, 15}} - }, - DeconvInputData{ - InputShape{{-1, 64, -1, -1}, {{ 2, 64, 7, 7}, { 2, 64, 5, 7}, { 1, 64, 9, 5}}}, - ngraph::helpers::InputLayerType::PARAMETER, - {{15, 15}, {9, 10}, {19, 9}} - } -}; - -const auto groupDeconvParams_AutoPadding_2D = ::testing::Combine( - ::testing::ValuesIn(kernels2d), - ::testing::ValuesIn(strides2d), - ::testing::ValuesIn(padBegins2d), - ::testing::ValuesIn(padEnds2d), - ::testing::ValuesIn(dilations2d), - ::testing::ValuesIn(numOutChannels_Blocked), - ::testing::ValuesIn(numGroups_Blocked), - ::testing::Values(ngraph::op::PadType::SAME_UPPER, ngraph::op::PadType::SAME_LOWER), - ::testing::ValuesIn(emptyOutputPadding) -); - -INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_AutoPadding_FP32, GroupDeconvolutionLayerCPUTest, - ::testing::Combine( - groupDeconvParams_AutoPadding_2D, - ::testing::ValuesIn(inputs_2D_AutoPadding), - ::testing::Values(ElementType::f32), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_2D, conv_avx512_2D})), - ::testing::Values(cpuEmptyPluginConfig)), - GroupDeconvolutionLayerCPUTest::getTestCaseName); - -} // namespace - -} // namespace CPULayerTestsDefinitions + DeconvInputData{InputShape{{}, {{2, 64, 7, 7}}}, ov::test::utils::InputLayerType::CONSTANT, {}}, + DeconvInputData{InputShape{{-1, 64, -1, -1}, {{2, 64, 7, 7}, {2, 64, 5, 7}, {1, 64, 9, 4}}}, + ov::test::utils::InputLayerType::CONSTANT, + {}}, + DeconvInputData{InputShape{{-1, 64, -1, -1}, {{1, 64, 7, 7}, {2, 64, 5, 7}, {1, 64, 7, 7}}}, + ov::test::utils::InputLayerType::CONSTANT, + {{15, 15}}}, + DeconvInputData{InputShape{{-1, 64, -1, -1}, {{2, 64, 7, 7}, {2, 64, 5, 7}, {1, 64, 9, 5}}}, + ov::test::utils::InputLayerType::PARAMETER, + {{15, 15}, {9, 10}, {19, 9}}}}; + +const auto groupDeconvParams_AutoPadding_2D = + ::testing::Combine(::testing::ValuesIn(kernels2d), + ::testing::ValuesIn(strides2d), + ::testing::ValuesIn(padBegins2d), + ::testing::ValuesIn(padEnds2d), + ::testing::ValuesIn(dilations2d), + ::testing::ValuesIn(numOutChannels_Blocked), + ::testing::ValuesIn(numGroups_Blocked), + ::testing::Values(ov::op::PadType::SAME_UPPER, ov::op::PadType::SAME_LOWER), + ::testing::ValuesIn(emptyOutputPadding)); + +INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_AutoPadding_FP32, + GroupDeconvolutionLayerCPUTest, + ::testing::Combine(groupDeconvParams_AutoPadding_2D, + ::testing::ValuesIn(inputs_2D_AutoPadding), + ::testing::Values(ElementType::f32), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(filterCPUInfoForDevice({conv_gemm_2D, conv_avx512_2D})), + ::testing::Values(cpuEmptyPluginConfig)), + GroupDeconvolutionLayerCPUTest::getTestCaseName); + +} // namespace + +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gru_cell.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gru_cell.cpp index 94768354adbc6d..495d97e6271940 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gru_cell.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gru_cell.cpp @@ -2,39 +2,46 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "common_test_utils/node_builders/gru_cell.hpp" + #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace CPUTestUtils; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { - -using GRUCellCpuSpecificParams = typename std::tuple< - std::vector, // Shapes - bool, // Using decompose to sub-ops transformation - std::vector, // Activations - float, // Clip - bool, // Linear before reset - ElementType, // Network precision - CPUSpecificParams, // CPU specific params - std::map // Additional config ->; +namespace ov { +namespace test { + +using GRUCellCpuSpecificParams = typename std::tuple, // Shapes + bool, // Using decompose to sub-ops transformation + std::vector, // Activations + float, // Clip + bool, // Linear before reset + ElementType, // Network precision + CPUSpecificParams, // CPU specific params + ov::AnyMap // Additional config + >; class GRUCellCPUTest : public testing::WithParamInterface, - virtual public ov::test::SubgraphBaseTest, public CPUTestsBase { + virtual public ov::test::SubgraphBaseTest, + public CPUTestsBase { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj) { + static std::string getTestCaseName(const testing::TestParamInfo& obj) { std::vector inputShapes; bool decompose, linearBeforeReset; std::vector activations; float clip = 0.f; ElementType netPrecision; CPUSpecificParams cpuParams; - std::map additionalConfig; + ov::AnyMap additionalConfig; - std::tie(inputShapes, decompose, activations, clip, linearBeforeReset, netPrecision, cpuParams, additionalConfig) = obj.param; + std::tie(inputShapes, + decompose, + activations, + clip, + linearBeforeReset, + netPrecision, + cpuParams, + additionalConfig) = obj.param; std::ostringstream result; result << "IS=("; @@ -50,7 +57,7 @@ class GRUCellCPUTest : public testing::WithParamInterface> additionalConfig - = {{{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::YES}}}; +std::vector additionalConfig = {{{ov::hint::inference_precision(ov::element::f32)}}, + {{ov::hint::inference_precision(ov::element::bf16)}}}; CPUSpecificParams cpuParams{{nc, nc}, {nc}, {"ref_any"}, "ref_any"}; @@ -126,64 +142,65 @@ std::vector> activations = {{"sigmoid", "tanh"}}; // oneDNN supports only zero clip std::vector clip = {0.f}; std::vector linearBeforeReset = {true, false}; -std::vector netPrecisions = { ElementType::f32 }; - -const std::vector> staticShapes = { - { { {}, { {1, 1} } }, // Static shapes - { {}, { {1, 1} } } }, - { { {}, { {1, 1} } }, // Static shapes - { {}, { {1, 10} } } }, - { { {}, { {1, 30} } }, // Static shapes - { {}, { {1, 10} } } }, - { { {}, { {1, 30} } }, // Static shapes - { {}, { {1, 1} } } }, - { { {}, { {3, 1} } }, // Static shapes - { {}, { {3, 1} } } }, - { { {}, { {5, 1} } }, // Static shapes - { {}, { {5, 1} } } }, - { { {}, { {5, 30} } }, // Static shapes - { {}, { {5, 10} } } } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_static, GRUCellCPUTest, - ::testing::Combine(::testing::ValuesIn(staticShapes), - ::testing::ValuesIn(shouldDecompose), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip), - ::testing::ValuesIn(linearBeforeReset), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(cpuParams), - ::testing::ValuesIn(additionalConfig)), - GRUCellCPUTest::getTestCaseName); +std::vector netPrecisions = {ElementType::f32}; + +const std::vector> staticShapes = {{{{}, {{1, 1}}}, // Static shapes + {{}, {{1, 1}}}}, + {{{}, {{1, 1}}}, // Static shapes + {{}, {{1, 10}}}}, + {{{}, {{1, 30}}}, // Static shapes + {{}, {{1, 10}}}}, + {{{}, {{1, 30}}}, // Static shapes + {{}, {{1, 1}}}}, + {{{}, {{3, 1}}}, // Static shapes + {{}, {{3, 1}}}}, + {{{}, {{5, 1}}}, // Static shapes + {{}, {{5, 1}}}}, + {{{}, {{5, 30}}}, // Static shapes + {{}, {{5, 10}}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_static, + GRUCellCPUTest, + ::testing::Combine(::testing::ValuesIn(staticShapes), + ::testing::ValuesIn(shouldDecompose), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip), + ::testing::ValuesIn(linearBeforeReset), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(cpuParams), + ::testing::ValuesIn(additionalConfig)), + GRUCellCPUTest::getTestCaseName); const std::vector> dynamicShapes = { - { { { {-1}, 1 }, // Dynamic shape 0 - { {1, 1}, {3, 1}, {5, 1} } }, // Target shapes - { { {-1}, 1 }, // Dynamic shape 1 - { {1, 1}, {3, 1}, {5, 1} } } }, // Target shapes - { { { {1, 10}, 30 }, // Dynamic shape 0 - { {2, 30}, {5, 30}, {8, 30} } }, // Target shapes - { { {1, 10}, 10 }, // Dynamic shape 1 - { {2, 10}, {5, 10}, {8, 10} } } }, // Target shapes - { { { {1, 10}, {25, 35} }, // Dynamic shape 0 - { {2, 30}, {5, 30}, {8, 30} } }, // Target shapes - { { {1, 10}, -1 }, // Dynamic shape 1 - { {2, 10}, {5, 10}, {8, 10} } } }, // Target shapes - { { { {1, 10}, {25, 35} }, // Dynamic shape 0 - { {2, 30}, {5, 30}, {8, 30}, {2, 30}, {5, 30}, {8, 30} } }, // Target shapes - { { {1, 10}, -1 }, // Dynamic shape 1 - { {2, 10}, {5, 10}, {8, 10}, {2, 10}, {5, 10}, {8, 10} } } } // Target shapes + {{{{-1}, 1}, // Dynamic shape 0 + {{1, 1}, {3, 1}, {5, 1}}}, // Target shapes + {{{-1}, 1}, // Dynamic shape 1 + {{1, 1}, {3, 1}, {5, 1}}}}, // Target shapes + {{{{1, 10}, 30}, // Dynamic shape 0 + {{2, 30}, {5, 30}, {8, 30}}}, // Target shapes + {{{1, 10}, 10}, // Dynamic shape 1 + {{2, 10}, {5, 10}, {8, 10}}}}, // Target shapes + {{{{1, 10}, {25, 35}}, // Dynamic shape 0 + {{2, 30}, {5, 30}, {8, 30}}}, // Target shapes + {{{1, 10}, -1}, // Dynamic shape 1 + {{2, 10}, {5, 10}, {8, 10}}}}, // Target shapes + {{{{1, 10}, {25, 35}}, // Dynamic shape 0 + {{2, 30}, {5, 30}, {8, 30}, {2, 30}, {5, 30}, {8, 30}}}, // Target shapes + {{{1, 10}, -1}, // Dynamic shape 1 + {{2, 10}, {5, 10}, {8, 10}, {2, 10}, {5, 10}, {8, 10}}}} // Target shapes }; -INSTANTIATE_TEST_SUITE_P(smoke_dynamic, GRUCellCPUTest, - ::testing::Combine(::testing::ValuesIn(dynamicShapes), - ::testing::ValuesIn(shouldDecompose), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip), - ::testing::ValuesIn(linearBeforeReset), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(cpuParams), - ::testing::ValuesIn(additionalConfig)), - GRUCellCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +INSTANTIATE_TEST_SUITE_P(smoke_dynamic, + GRUCellCPUTest, + ::testing::Combine(::testing::ValuesIn(dynamicShapes), + ::testing::ValuesIn(shouldDecompose), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip), + ::testing::ValuesIn(linearBeforeReset), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(cpuParams), + ::testing::ValuesIn(additionalConfig)), + GRUCellCPUTest::getTestCaseName); +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gru_sequence.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gru_sequence.cpp index 60596e8bd697c7..9481f04b8ec9ce 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/gru_sequence.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/gru_sequence.cpp @@ -3,43 +3,51 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp" #include "transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp" +#include "common_test_utils/node_builders/gru_cell.hpp" using namespace CPUTestUtils; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { - -using GRUSequenceCpuSpecificParams = typename std::tuple< - std::vector, // Shapes - ngraph::helpers::SequenceTestsMode, // Pure Sequence or TensorIterator - std::vector, // Activations - float, // Clip - bool, // Linear_before_reset - ov::op::RecurrentSequenceDirection, // Direction - ElementType, // Network precision - CPUSpecificParams, // CPU specific params - std::map // Additional config ->; +namespace ov { +namespace test { + +using GRUSequenceCpuSpecificParams = + typename std::tuple, // Shapes + ov::test::utils::SequenceTestsMode, // Pure Sequence or TensorIterator + std::vector, // Activations + float, // Clip + bool, // Linear_before_reset + ov::op::RecurrentSequenceDirection, // Direction + ElementType, // Network precision + CPUSpecificParams, // CPU specific params + ov::AnyMap // Additional config + >; class GRUSequenceCPUTest : public testing::WithParamInterface, - virtual public ov::test::SubgraphBaseTest, public CPUTestsBase { + virtual public ov::test::SubgraphBaseTest, + public CPUTestsBase { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj) { + static std::string getTestCaseName(const testing::TestParamInfo& obj) { std::vector inputShapes; - ngraph::helpers::SequenceTestsMode seqMode; + ov::test::utils::SequenceTestsMode seqMode; std::vector activations; float clip; bool linearBeforeRest; ov::op::RecurrentSequenceDirection direction; ElementType netPrecision; CPUSpecificParams cpuParams; - std::map additionalConfig; - - std::tie(inputShapes, seqMode, activations, clip, linearBeforeRest, direction, netPrecision, cpuParams, additionalConfig) = obj.param; + ov::AnyMap additionalConfig; + + std::tie(inputShapes, + seqMode, + activations, + clip, + linearBeforeRest, + direction, + netPrecision, + cpuParams, + additionalConfig) = obj.param; std::ostringstream result; result << "IS=("; @@ -55,7 +63,7 @@ class GRUSequenceCPUTest : public testing::WithParamInterface(netPrecision, shape)); } - const size_t batchSize = inputDynamicShapes[0][0].is_static() ? inputDynamicShapes[0][0].get_length() : - inputDynamicShapes[1][0].is_static() ? inputDynamicShapes[1][0].get_length() : - inputDynamicShapes.size() > 2 && inputDynamicShapes[2][0].is_static() ? inputDynamicShapes[2][0].get_length() : - 1lu; + const size_t batchSize = inputDynamicShapes[0][0].is_static() ? inputDynamicShapes[0][0].get_length() + : inputDynamicShapes[1][0].is_static() ? inputDynamicShapes[1][0].get_length() + : inputDynamicShapes.size() > 2 && inputDynamicShapes[2][0].is_static() + ? inputDynamicShapes[2][0].get_length() + : 1lu; if (inputDynamicShapes.size() > 2) { if (!inputDynamicShapes[2].is_dynamic() && - seqMode != ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM && - seqMode != ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM) { + seqMode != ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_PARAM && + seqMode != ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM) { params.pop_back(); } else { params[2]->set_element_type(ElementType::i64); @@ -124,25 +142,27 @@ class GRUSequenceCPUTest : public testing::WithParamInterface WRB = {{numDirections, 3 * hiddenSize, inputSize}, {numDirections, 3 * hiddenSize, hiddenSize}, - {numDirections, (linearBeforeReset ? 4 : 3) * hiddenSize}, {batchSize}}; - auto gruSequenceOp = ngraph::builder::makeGRU(paramsOuts, - WRB, - hiddenSize, - activations, - {}, - {}, - clip, - linearBeforeReset, - true, - direction, - seqMode); + paramsOuts.push_back(param); + + std::vector WRB = {{numDirections, 3 * hiddenSize, inputSize}, + {numDirections, 3 * hiddenSize, hiddenSize}, + {numDirections, (linearBeforeReset ? 4 : 3) * hiddenSize}, + {batchSize}}; + auto gruSequenceOp = ov::test::utils::make_gru(paramsOuts, + WRB, + hiddenSize, + activations, + {}, + {}, + clip, + linearBeforeReset, + true, + direction, + seqMode); function = makeNgraphFunction(netPrecision, params, gruSequenceOp, "gruSequenceOp"); - if (seqMode != ngraph::helpers::SequenceTestsMode::PURE_SEQ) { + if (seqMode != ov::test::utils::SequenceTestsMode::PURE_SEQ) { ov::pass::Manager manager; if (direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL) manager.register_pass(); @@ -180,180 +200,187 @@ TEST_P(GRUSequenceCPUTest, CompareWithRefs) { namespace { /* CPU PARAMS */ -std::vector> additionalConfig - = {{{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::YES}}}; +std::vector additionalConfig = {{{ov::hint::inference_precision(ov::element::f32)}}, + {{ov::hint::inference_precision(ov::element::bf16)}}}; CPUSpecificParams cpuParams{{ntc, tnc}, {ntc, tnc}, {"ref_any"}, "ref_any"}; -CPUSpecificParams cpuParamsBatchSizeOne{{tnc, tnc}, {tnc, tnc}, {"ref_any"}, "ref_any"};; +CPUSpecificParams cpuParamsBatchSizeOne{{tnc, tnc}, {tnc, tnc}, {"ref_any"}, "ref_any"}; -std::vector mode{ngraph::helpers::SequenceTestsMode::PURE_SEQ}; +std::vector mode{ov::test::utils::SequenceTestsMode::PURE_SEQ}; // output values increase rapidly without clip, so use only seq_lengths = 2 std::vector> activations = {{"sigmoid", "tanh"}}; std::vector linearBeforeReset = {true, false}; std::vector clip{0.f}; std::vector direction = {ov::op::RecurrentSequenceDirection::FORWARD}; -std::vector netPrecisions = { ElementType::f32 }; - -const std::vector> staticShapes = { - { { {}, { {10, 2, 10} } }, // #0. Static shapes - { {}, { {10, 1, 1} } }, - { {}, { {10} } } }, - { { {}, { {10, 2, 10} } }, // #1. Static shapes - { {}, { {10, 1, 10} } }, - { {}, { {10} } } }, - { { {}, { {1, 2, 10} } }, // #2. Static shapes - { {}, { {1, 1, 1} } }, - { {}, { {1} } } }, - { { {}, { {1, 2, 10} } }, // #3. Static shapes - { {}, { {1, 1, 10} } }, - { {}, { {1} } } }, - { { {}, { {10, 2, 10} } }, // #4. Static shapes - { {}, { {10, 1, 1} } } }, - { { {}, { {10, 2, 10} } }, // #5. Static shapes - { {}, { {10, 1, 10} } } } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_static, GRUSequenceCPUTest, - ::testing::Combine(::testing::ValuesIn(std::vector>{staticShapes[0], staticShapes[1]}), - ::testing::ValuesIn(mode), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip), - ::testing::ValuesIn(linearBeforeReset), - ::testing::ValuesIn(direction), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(cpuParams), - ::testing::Values(std::map{})), - GRUSequenceCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_static_BatchSizeOne, GRUSequenceCPUTest, - ::testing::Combine(::testing::ValuesIn(std::vector>{staticShapes[3]}), - ::testing::ValuesIn(mode), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip), - ::testing::ValuesIn(linearBeforeReset), - ::testing::ValuesIn(direction), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(cpuParamsBatchSizeOne), - ::testing::Values(std::map{})), - GRUSequenceCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_static_bf16, GRUSequenceCPUTest, - ::testing::Combine(::testing::ValuesIn(std::vector>{staticShapes[4], staticShapes[5]}), - ::testing::ValuesIn(mode), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip), - ::testing::ValuesIn(linearBeforeReset), - ::testing::ValuesIn(direction), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(cpuParams), - ::testing::Values(additionalConfig[1])), - GRUSequenceCPUTest::getTestCaseName); +std::vector netPrecisions = {ElementType::f32}; + +const std::vector> staticShapes = {{{{}, {{10, 2, 10}}}, // #0. Static shapes + {{}, {{10, 1, 1}}}, + {{}, {{10}}}}, + {{{}, {{10, 2, 10}}}, // #1. Static shapes + {{}, {{10, 1, 10}}}, + {{}, {{10}}}}, + {{{}, {{1, 2, 10}}}, // #2. Static shapes + {{}, {{1, 1, 1}}}, + {{}, {{1}}}}, + {{{}, {{1, 2, 10}}}, // #3. Static shapes + {{}, {{1, 1, 10}}}, + {{}, {{1}}}}, + {{{}, {{10, 2, 10}}}, // #4. Static shapes + {{}, {{10, 1, 1}}}}, + {{{}, {{10, 2, 10}}}, // #5. Static shapes + {{}, {{10, 1, 10}}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_static, + GRUSequenceCPUTest, + ::testing::Combine(::testing::ValuesIn(std::vector>{staticShapes[0], + staticShapes[1]}), + ::testing::ValuesIn(mode), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip), + ::testing::ValuesIn(linearBeforeReset), + ::testing::ValuesIn(direction), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(cpuParams), + ::testing::Values(ov::AnyMap{})), + GRUSequenceCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_static_BatchSizeOne, + GRUSequenceCPUTest, + ::testing::Combine(::testing::ValuesIn(std::vector>{staticShapes[3]}), + ::testing::ValuesIn(mode), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip), + ::testing::ValuesIn(linearBeforeReset), + ::testing::ValuesIn(direction), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(cpuParamsBatchSizeOne), + ::testing::Values(ov::AnyMap{})), + GRUSequenceCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_static_bf16, + GRUSequenceCPUTest, + ::testing::Combine(::testing::ValuesIn(std::vector>{staticShapes[4], + staticShapes[5]}), + ::testing::ValuesIn(mode), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip), + ::testing::ValuesIn(linearBeforeReset), + ::testing::ValuesIn(direction), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(cpuParams), + ::testing::Values(additionalConfig[1])), + GRUSequenceCPUTest::getTestCaseName); const std::vector> dynamicShapes = { - { { {-1, {1, 5}, 10}, // #0. Dynamic shape 0 - { {10, 2, 10}, {8, 3, 10}, {5, 4, 10} } }, // Target shapes - { {{0, 15}, 1, 1}, // Dynamic shape 1 - { {10, 1, 1}, {8, 1, 1}, {5, 1, 1} } }, // Target shapes - { {{0, 12}}, // Dynamic shape 2 - { {10}, {8}, {5} } } }, // Target shapes - { { {{0, 11}, -1, 10}, // #1. Dynamic shape 0 - { {10, 2, 10}, {3, 4, 10}, {5, 5, 10} } }, // Target shapes - { {-1, 1, 10}, // Dynamic shape 1 - { {10, 1, 10}, {3, 1, 10}, {5, 1, 10} } }, // Target shapes - { {-1}, // Dynamic shape 2 - { {10}, {3}, {5} } } }, // Target shapes - { { {{0, 11}, -1, {7, 11}}, // #2. Dynamic shape 0 - { {10, 2, 10}, {3, 4, 10}, {5, 5, 10} } }, // Target shapes - { {-1, 1, {8, 12}}, // Dynamic shape 1 - { {10, 1, 10}, {3, 1, 10}, {5, 1, 10} } }, // Target shapes - { {-1}, // Dynamic shape 2 - { {10}, {3}, {5} } } }, // Target shapes - { { {-1, {0, 7}, 10}, // #3. Dynamic shape 0 - { {1, 2, 10}, {1, 3, 10}, {1, 6, 10} } }, // Target shapes - { {-1, 1, 1}, // Dynamic shape 1 - { {1, 1, 1}, {1, 1, 1}, {1, 1, 1} } }, // Target shapes - { {-1}, // Dynamic shape 2 - { {1}, {1}, {1} } } }, // Target shapes - { { {1, -1, 10}, // #4. Dynamic shape 0 - { {1, 2, 10}, {1, 4, 10}, {1, 8, 10} } }, // Target shapes - { {1, 1, 10}, // Dynamic shape 1 - { {1, 1, 10}, {1, 1, 10}, {1, 1, 10} } }, // Target shapes - { {-1}, // Dynamic shape 2 - { {1}, {1}, {1} } } }, // Target shapes - { { {-1, -1, -1}, // #5. Dynamic shape 0 - { {1, 2, 10}, {1, 4, 10}, {1, 8, 10} } }, // Target shapes - { {-1, -1, -1}, // Dynamic shape 1 - { {1, 1, 10}, {1, 1, 10}, {1, 1, 10} } }, // Target shapes - { {-1}, // Dynamic shape 2 - { {1}, {1}, {1} } } }, // Target shapes - { { {2, {1, 5}, 10}, // #6. Dynamic shape 0 - { {2, 2, 10}, {2, 3, 10}, {2, 4, 10} } }, // Target shapes - { {2, 1, 1}, // Dynamic shape 1 - { {2, 1, 1}, {2, 1, 1}, {2, 1, 1} } }, // Target shapes - { {-1}, // Dynamic shape 2 - { {2}, {2}, {2} } }}, // Target shapes - { { {5, -1, 10}, // #7. Dynamic shape 0 - { {5, 2, 10}, {5, 4, 10}, {5, 5, 10} } }, // Target shapes - { {5, 1, 10}, // Dynamic shape 1 - { {5, 1, 10}, {5, 1, 10}, {5, 1, 10} } }, // Target shapes - { {-1}, // Dynamic shape 2 - { {5}, {5}, {5} } }}, // Target shapes - { { {{0, 11}, -1, {7, 11}}, // #8. Dynamic shape 0 - { {10, 2, 10}, {3, 4, 10}, {5, 5, 10}, {10, 2, 10}, {5, 5, 10} } }, // Target shapes - { {-1, 1, {8, 12}}, // Dynamic shape 1 - { {10, 1, 10}, {3, 1, 10}, {5, 1, 10}, {10, 1, 10}, {5, 1, 10} } }, // Target shapes - { {-1}, // Dynamic shape 2 - { {10}, {3}, {5}, {10}, {5} } } } // Target shapes + {{{-1, {1, 5}, 10}, // #0. Dynamic shape 0 + {{10, 2, 10}, {8, 3, 10}, {5, 4, 10}}}, // Target shapes + {{{0, 15}, 1, 1}, // Dynamic shape 1 + {{10, 1, 1}, {8, 1, 1}, {5, 1, 1}}}, // Target shapes + {{{0, 12}}, // Dynamic shape 2 + {{10}, {8}, {5}}}}, // Target shapes + {{{{0, 11}, -1, 10}, // #1. Dynamic shape 0 + {{10, 2, 10}, {3, 4, 10}, {5, 5, 10}}}, // Target shapes + {{-1, 1, 10}, // Dynamic shape 1 + {{10, 1, 10}, {3, 1, 10}, {5, 1, 10}}}, // Target shapes + {{-1}, // Dynamic shape 2 + {{10}, {3}, {5}}}}, // Target shapes + {{{{0, 11}, -1, {7, 11}}, // #2. Dynamic shape 0 + {{10, 2, 10}, {3, 4, 10}, {5, 5, 10}}}, // Target shapes + {{-1, 1, {8, 12}}, // Dynamic shape 1 + {{10, 1, 10}, {3, 1, 10}, {5, 1, 10}}}, // Target shapes + {{-1}, // Dynamic shape 2 + {{10}, {3}, {5}}}}, // Target shapes + {{{-1, {0, 7}, 10}, // #3. Dynamic shape 0 + {{1, 2, 10}, {1, 3, 10}, {1, 6, 10}}}, // Target shapes + {{-1, 1, 1}, // Dynamic shape 1 + {{1, 1, 1}, {1, 1, 1}, {1, 1, 1}}}, // Target shapes + {{-1}, // Dynamic shape 2 + {{1}, {1}, {1}}}}, // Target shapes + {{{1, -1, 10}, // #4. Dynamic shape 0 + {{1, 2, 10}, {1, 4, 10}, {1, 8, 10}}}, // Target shapes + {{1, 1, 10}, // Dynamic shape 1 + {{1, 1, 10}, {1, 1, 10}, {1, 1, 10}}}, // Target shapes + {{-1}, // Dynamic shape 2 + {{1}, {1}, {1}}}}, // Target shapes + {{{-1, -1, -1}, // #5. Dynamic shape 0 + {{1, 2, 10}, {1, 4, 10}, {1, 8, 10}}}, // Target shapes + {{-1, -1, -1}, // Dynamic shape 1 + {{1, 1, 10}, {1, 1, 10}, {1, 1, 10}}}, // Target shapes + {{-1}, // Dynamic shape 2 + {{1}, {1}, {1}}}}, // Target shapes + {{{2, {1, 5}, 10}, // #6. Dynamic shape 0 + {{2, 2, 10}, {2, 3, 10}, {2, 4, 10}}}, // Target shapes + {{2, 1, 1}, // Dynamic shape 1 + {{2, 1, 1}, {2, 1, 1}, {2, 1, 1}}}, // Target shapes + {{-1}, // Dynamic shape 2 + {{2}, {2}, {2}}}}, // Target shapes + {{{5, -1, 10}, // #7. Dynamic shape 0 + {{5, 2, 10}, {5, 4, 10}, {5, 5, 10}}}, // Target shapes + {{5, 1, 10}, // Dynamic shape 1 + {{5, 1, 10}, {5, 1, 10}, {5, 1, 10}}}, // Target shapes + {{-1}, // Dynamic shape 2 + {{5}, {5}, {5}}}}, // Target shapes + {{{{0, 11}, -1, {7, 11}}, // #8. Dynamic shape 0 + {{10, 2, 10}, {3, 4, 10}, {5, 5, 10}, {10, 2, 10}, {5, 5, 10}}}, // Target shapes + {{-1, 1, {8, 12}}, // Dynamic shape 1 + {{10, 1, 10}, {3, 1, 10}, {5, 1, 10}, {10, 1, 10}, {5, 1, 10}}}, // Target shapes + {{-1}, // Dynamic shape 2 + {{10}, {3}, {5}, {10}, {5}}}} // Target shapes }; -INSTANTIATE_TEST_SUITE_P(smoke_dynamic, GRUSequenceCPUTest, - ::testing::Combine(::testing::ValuesIn({dynamicShapes[0], dynamicShapes[1], dynamicShapes[2]}), - ::testing::ValuesIn(mode), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip), - ::testing::ValuesIn(linearBeforeReset), - ::testing::ValuesIn(direction), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(cpuParams), - ::testing::Values(std::map{})), - GRUSequenceCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_dynamic_BatchSizeOne, GRUSequenceCPUTest, - ::testing::Combine(::testing::ValuesIn({dynamicShapes[4]}), - ::testing::ValuesIn(mode), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip), - ::testing::ValuesIn(linearBeforeReset), - ::testing::ValuesIn(direction), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(cpuParamsBatchSizeOne), - ::testing::Values(std::map{})), - GRUSequenceCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_dynamic, GRUSequenceCPUTest, - ::testing::Combine(::testing::ValuesIn({dynamicShapes[5], dynamicShapes[8]}), - ::testing::ValuesIn(mode), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip), - ::testing::ValuesIn(linearBeforeReset), - ::testing::ValuesIn(direction), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(cpuParams), - ::testing::Values(std::map{})), - GRUSequenceCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_dynamic_bf16, GRUSequenceCPUTest, - ::testing::Combine(::testing::ValuesIn({dynamicShapes[6], dynamicShapes[7]}), - ::testing::ValuesIn(mode), - ::testing::ValuesIn(activations), - ::testing::ValuesIn(clip), - ::testing::ValuesIn(linearBeforeReset), - ::testing::ValuesIn(direction), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(cpuParams), - ::testing::Values(additionalConfig[1])), - GRUSequenceCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +INSTANTIATE_TEST_SUITE_P(smoke_dynamic, + GRUSequenceCPUTest, + ::testing::Combine(::testing::ValuesIn({dynamicShapes[0], dynamicShapes[1], dynamicShapes[2]}), + ::testing::ValuesIn(mode), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip), + ::testing::ValuesIn(linearBeforeReset), + ::testing::ValuesIn(direction), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(cpuParams), + ::testing::Values(ov::AnyMap{})), + GRUSequenceCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_dynamic_BatchSizeOne, + GRUSequenceCPUTest, + ::testing::Combine(::testing::ValuesIn({dynamicShapes[4]}), + ::testing::ValuesIn(mode), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip), + ::testing::ValuesIn(linearBeforeReset), + ::testing::ValuesIn(direction), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(cpuParamsBatchSizeOne), + ::testing::Values(ov::AnyMap{})), + GRUSequenceCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_dynamic, + GRUSequenceCPUTest, + ::testing::Combine(::testing::ValuesIn({dynamicShapes[5], dynamicShapes[8]}), + ::testing::ValuesIn(mode), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip), + ::testing::ValuesIn(linearBeforeReset), + ::testing::ValuesIn(direction), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(cpuParams), + ::testing::Values(ov::AnyMap{})), + GRUSequenceCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_dynamic_bf16, + GRUSequenceCPUTest, + ::testing::Combine(::testing::ValuesIn({dynamicShapes[6], dynamicShapes[7]}), + ::testing::ValuesIn(mode), + ::testing::ValuesIn(activations), + ::testing::ValuesIn(clip), + ::testing::ValuesIn(linearBeforeReset), + ::testing::ValuesIn(direction), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(cpuParams), + ::testing::Values(additionalConfig[1])), + GRUSequenceCPUTest::getTestCaseName); +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/arm/conversion.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/arm/conversion.cpp index e7e8c25dee812c..fb3a2854c0f6b2 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/arm/conversion.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/arm/conversion.cpp @@ -6,12 +6,10 @@ #include "shared_test_classes/single_layer/conversion.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace Conversion { INSTANTIATE_TEST_SUITE_P(smoke_ConvertCPULayerTest_7D_Dynamic, ConvertCPULayerTest, @@ -31,4 +29,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvertCPULayerTest_7D_Static, ConvertCPULayerTes ConvertCPULayerTest::getTestCaseName); } // namespace Conversion -} // namespace CPULayerTestsDefinitions \ No newline at end of file +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/arm/eltwise.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/arm/eltwise.cpp index b71716831db8a4..823f4a7686af9c 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/arm/eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/arm/eltwise.cpp @@ -7,12 +7,10 @@ #include "test_utils/cpu_test_utils.hpp" #include "test_utils/fusing_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace Eltwise { namespace { @@ -45,9 +43,9 @@ const auto params_4D_bitwise = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(bitwise_in_shapes_4D), ::testing::ValuesIn({ - ngraph::helpers::EltwiseTypes::BITWISE_AND, - ngraph::helpers::EltwiseTypes::BITWISE_OR, - ngraph::helpers::EltwiseTypes::BITWISE_XOR + ov::test::utils::EltwiseTypes::BITWISE_AND, + ov::test::utils::EltwiseTypes::BITWISE_OR, + ov::test::utils::EltwiseTypes::BITWISE_XOR }), ::testing::ValuesIn(secondaryInputTypes()), ::testing::ValuesIn({ ov::test::utils::OpType::VECTOR }), @@ -69,9 +67,9 @@ const auto params_4D_bitwise_i32 = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(bitwise_in_shapes_4D), ::testing::ValuesIn({ - ngraph::helpers::EltwiseTypes::BITWISE_AND, - ngraph::helpers::EltwiseTypes::BITWISE_OR, - ngraph::helpers::EltwiseTypes::BITWISE_XOR + ov::test::utils::EltwiseTypes::BITWISE_AND, + ov::test::utils::EltwiseTypes::BITWISE_OR, + ov::test::utils::EltwiseTypes::BITWISE_XOR }), ::testing::ValuesIn(secondaryInputTypes()), ::testing::ValuesIn({ ov::test::utils::OpType::VECTOR }), @@ -93,8 +91,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Bitwise_i32, EltwiseLayerCPUTe const auto params_4D_bitwise_NOT = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(bitwise_in_shapes_4D), - ::testing::ValuesIn({ ngraph::helpers::EltwiseTypes::BITWISE_NOT }), - ::testing::ValuesIn({ ngraph::helpers::InputLayerType::CONSTANT }), + ::testing::ValuesIn({ ov::test::utils::EltwiseTypes::BITWISE_NOT }), + ::testing::ValuesIn({ ov::test::utils::InputLayerType::CONSTANT }), ::testing::ValuesIn({ ov::test::utils::OpType::VECTOR }), ::testing::ValuesIn({ ov::element::Type_t::i8, ov::element::Type_t::u8, ov::element::Type_t::i32 }), ::testing::Values(ov::element::Type_t::undefined), @@ -113,8 +111,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Bitwise_NOT, EltwiseLayerCPUTe const auto params_4D_bitwise_NOT_i32 = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(bitwise_in_shapes_4D), - ::testing::ValuesIn({ ngraph::helpers::EltwiseTypes::BITWISE_NOT }), - ::testing::ValuesIn({ ngraph::helpers::InputLayerType::CONSTANT }), + ::testing::ValuesIn({ ov::test::utils::EltwiseTypes::BITWISE_NOT }), + ::testing::ValuesIn({ ov::test::utils::InputLayerType::CONSTANT }), ::testing::ValuesIn({ ov::test::utils::OpType::VECTOR }), ::testing::ValuesIn({ ov::element::Type_t::i16 }), ::testing::Values(ov::element::Type_t::undefined), @@ -130,6 +128,7 @@ const auto params_4D_bitwise_NOT_i32 = ::testing::Combine( INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Bitwise_NOT_i32, EltwiseLayerCPUTest, params_4D_bitwise_NOT_i32, EltwiseLayerCPUTest::getTestCaseName); -} // namespace -} // namespace Eltwise -} // namespace CPULayerTestsDefinitions +} // namespace +} // namespace Eltwise +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/activation.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/activation.cpp index 08e27dba864e5f..0defef891fd6f3 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/activation.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/activation.cpp @@ -6,12 +6,10 @@ #include "shared_test_classes/single_layer/activation.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace Activation { /* ============= Activation (1D) ============= */ @@ -20,8 +18,8 @@ const auto basicCases3D = ::testing::Combine( ::testing::Values(activationShapes()), ::testing::ValuesIn(ov::test::utils::combineParams(activationTypes())), ::testing::ValuesIn(netPrc()), - ::testing::Values(Precision::FP32), - ::testing::Values(Precision::FP32), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams3D())) ); @@ -33,8 +31,8 @@ const auto basicCases4D = ::testing::Combine( ::testing::Values(activationShapes()), ::testing::ValuesIn(ov::test::utils::combineParams(activationTypes())), ::testing::ValuesIn(netPrc()), - ::testing::Values(Precision::FP32), - ::testing::Values(Precision::FP32), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams4D())) ); @@ -46,8 +44,8 @@ const auto basicCases5D = ::testing::Combine( ::testing::Values(activationShapes()), ::testing::ValuesIn(ov::test::utils::combineParams(activationTypes())), ::testing::ValuesIn(netPrc()), - ::testing::Values(Precision::FP32), - ::testing::Values(Precision::FP32), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams5D())) ); @@ -58,12 +56,13 @@ const auto dynamicMathBasicCases = ::testing::Combine( ::testing::Values(activationShapes()), ::testing::ValuesIn(ov::test::utils::combineParams(activationTypesDynamicMath())), ::testing::ValuesIn(netPrecisions()), - ::testing::Values(Precision::FP32), - ::testing::Values(Precision::FP32), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(cpuParamsDynamicMath()) ); INSTANTIATE_TEST_SUITE_P(smoke_Activation5D_dynamicMath_CPU, ActivationLayerCPUTest, dynamicMathBasicCases, ActivationLayerCPUTest::getTestCaseName); } // namespace Activation -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/conversion.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/conversion.cpp index 4a29a6d542cdcc..e7123986ffa363 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/conversion.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/conversion.cpp @@ -6,12 +6,10 @@ #include "shared_test_classes/single_layer/conversion.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace Conversion { static std::string expectedPrimitiveType() { @@ -51,4 +49,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvertCPULayerTest_4D_Static, ConvertCPULayerTes ConvertCPULayerTest::getTestCaseName); } // namespace Conversion -} // namespace CPULayerTestsDefinitions \ No newline at end of file +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/convolution.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/convolution.cpp new file mode 100644 index 00000000000000..1c6a4232f89154 --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/convolution.cpp @@ -0,0 +1,544 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "single_layer_tests/classes/convolution.hpp" +#include "shared_test_classes/single_layer/convolution.hpp" +#include "test_utils/cpu_test_utils.hpp" +#include "test_utils/filter_cpu_info.hpp" + +using namespace CPUTestUtils; + +namespace ov { +namespace test { +namespace Convolution { +/* ============= Convolution (Gemm 1D) ============= */ +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_GEMM_FP32, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_GEMM_1D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm1D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_1D())), + ::testing::ValuesIn(fusingParamsSetWithEmpty()), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +std::vector inputShapesPlain2Blocked3d = { + {{}, {{ 1, 1, 7, 7, 7 }}}, + {{}, {{ 1, 2, 7, 7, 7 }}}, + {{}, {{ 1, 3, 7, 7, 7 }}}, + { + //dynamic shapes + { -1, 1, -1, {1, 200}, -1 }, + { //target static shapes + { 2, 1, 7, 7, 7 }, + { 1, 1, 9, 9, 9 } + } + }, + { + //dynamic shapes + { -1, 3, -1, {1, 200}, -1 }, + { //target static shapes + { 2, 3, 7, 7, 7 }, + { 1, 3, 9, 9, 9 } + } + } +}; + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_GEMM_FP32, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_GEMM_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm2D_cache()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_GEMM_I8, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_GEMM_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm2D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_2D_GEMM_FP32_dilated_empty_fusing, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_GEMM_2D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm2D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_2D_GEMM_I8_dilated, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_GEMM_2D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm2D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +/* ============= Convolution (2D) ============= */ +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_FP32_empty_fusing, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2d_cache()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_2D())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +const std::vector fusingParamsSet_dynBatch{ + emptyFusingSpec, + fusingSum, + fusingAddPerChannel, + fusingReluScaleShift +}; + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_FP32_dynBatch, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2d_dynBatch()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_2D())), + ::testing::ValuesIn(fusingParamsSet_dynBatch), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_I8, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_2D())), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_2D_FP32_dilated_empty_fusing, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_2D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_2D())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_2D_I8_dilated, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_2D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_2D())), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +const std::vector CPUParams_2D_plain_to_blocked = { + conv_sse42_plain_to_blocked_2D, + conv_avx2_plain_to_blocked_2D, + conv_avx512_plain_to_blocked_2D, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_PlainToBlocked_FP32, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapesPlain2Blocked2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_2D_plain_to_blocked)), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_PlainToBlocked_2D_FP32_dilated, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_2D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapesPlain2Blocked2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_2D_plain_to_blocked)), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +/* ============= Reorder + Convolution ============= */ +const auto convParams_Reorder_2D = ::testing::Combine( + ::testing::Values(ov::Shape{1, 1}), + ::testing::Values(ov::Shape{2, 2}), + ::testing::Values(std::vector{0, 0}), + ::testing::Values(std::vector{0, 0}), + ::testing::Values(ov::Shape{1, 1}), + ::testing::Values(64), + ::testing::Values(ov::op::PadType::EXPLICIT) +); + +std::vector inputShapes_Reorder_2D = { + { + // dynamic shape + { -1, 32, -1, -1 }, + // target static shapes + { + { 1, 32, 39, 40 }, + { 2, 32, 20, 20 }, + { 1, 32, 39, 40 }, + { 2, 32, 20, 20 } + } + } +}; + +const std::vector fusingParamsSet_reorder{ + emptyFusingSpec, + fusingReluScaleShift, + fusingAddPerChannel +}; + +INSTANTIATE_TEST_SUITE_P(smoke_reorder_Conv_2D, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_Reorder_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_Reorder_2D), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo({conv_avx512_2D_1x1})), + ::testing::ValuesIn(fusingParamsSet_reorder), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +/* ============= Convolution (3D) ============= */ +const std::vector CPUParams_3D = { + //conv_sse42_3D, // not supported jit_sse42 for 3d + conv_avx2_3D, + conv_avx512_3D, + conv_avx2_3D_nspc, + conv_avx2_3D_nspc_brgconv, + conv_avx512_3D_nspc, + conv_avx512_3D_nspc_brgconv +}; + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_FP32, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_3D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes3d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_3D)), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_FP32_fusingScaleShiftAndFakeQuantizePerChannel, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_3D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes3d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_3D)), + ::testing::Values(fusingScaleShiftAndFakeQuantizePerChannel), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_I8, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_3D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes3d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_3D)), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_3D_FP32_dilated, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_3D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes3d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_3D)), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_3D_I8_dilated, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_3D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes3d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_3D)), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +const std::vector CPUParams_3D_plain_to_blocked = { + conv_avx2_plain_to_blocked_3D, + conv_avx512_plain_to_blocked_3D, +}; + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_PlainToBlocked_FP32, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_3D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapesPlain2Blocked3d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_3D_plain_to_blocked)), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_PlainToBlocked_BF16, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_3D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapesPlain2Blocked3d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo({conv_avx512_plain_to_blocked_3D})), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_PlainToBlocked_3D_FP32_dilated, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_3D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapesPlain2Blocked3d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_3D_plain_to_blocked)), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_PlainToBlocked_3D_BF16_dilated, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_3D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapesPlain2Blocked3d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo({conv_avx512_plain_to_blocked_3D})), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +/* ============= Kernel_1x1 (1D) ============= */ + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_1x1_FP32_empty_fusing, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_1x1_1D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes1d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_1x1_1D())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_1x1_I8, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_1x1_1D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes1d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_1x1_1D())), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +/* ============= Kernel_1x1 (2D) ============= */ + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_1x1_FP32_empty_fusing, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_1x1_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_1x1_2D())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_1x1_I8, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_1x1_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_1x1_2D())), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +/* ============= Convolution auto padding tests ============= */ + +const auto convParams_AutoPadding_2D = ::testing::Combine( + ::testing::Values(kernels2d().front()), + ::testing::ValuesIn(strides2d()), + ::testing::ValuesIn(padBegins2d()), + ::testing::ValuesIn(padEnds2d()), + ::testing::ValuesIn(dilations2d()), + ::testing::ValuesIn(numOutChannels()), + ::testing::Values(ov::op::PadType::SAME_UPPER, ov::op::PadType::SAME_LOWER) +); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_AutoPad_FP32, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_AutoPadding_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_2D())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +/* ============= Large Filter Test ============= */ +namespace { + +const size_t outChannels = 80; + +const ov::Shape kernel = { 251 }; +const ov::Shape stride = { 10 }; +const std::vector padBegins = { 0 }; +const std::vector padEnds = { 0 }; +const ov::Shape dilations = { 1 }; + +const auto convParams_1D = ::testing::Combine( + ::testing::Values(kernel), + ::testing::Values(stride), + ::testing::Values(padBegins), + ::testing::Values(padEnds), + ::testing::Values(dilations), + ::testing::Values(outChannels), + ::testing::Values(ov::op::PadType::EXPLICIT) +); + +std::vector inShapes = { + {{}, {{ 1, 1, 600 }}}, + { + //dynamic shape + { -1, 1, -1 }, + { //target static shapes + { 1, 1, 600 }, + { 10, 1, 700 }, + { 1, 1, 600 } + } + } +}; + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_Large_Filter, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapes), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{}, {}, {}, CPUTestsBase::any_type}), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +} // namespace +} // namespace Convolution +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/eltwise.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/eltwise.cpp index bf61d07e289d25..4268d252697103 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/eltwise.cpp @@ -7,12 +7,10 @@ #include "test_utils/cpu_test_utils.hpp" #include "test_utils/fusing_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace Eltwise { const auto params_4D = ::testing::Combine( @@ -86,8 +84,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D, EltwiseLayerCPUTest, params_5 const auto params_4D_1D_constant_mode = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_1D())), - ::testing::Values(ngraph::helpers::EltwiseTypes::ADD, ngraph::helpers::EltwiseTypes::MULTIPLY), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -103,8 +101,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_1D_Constant, EltwiseLayerCPUTe const auto params_4D_1D_parameter_mode = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_1D())), - ::testing::Values(ngraph::helpers::EltwiseTypes::ADD, ngraph::helpers::EltwiseTypes::MULTIPLY), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -120,8 +118,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_1D_Parameter, EltwiseLayerCPUT const auto params_5D_1D_constant = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D_1D())), - ::testing::Values(ngraph::helpers::EltwiseTypes::ADD, ngraph::helpers::EltwiseTypes::MULTIPLY), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -137,8 +135,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_1D_Constant, EltwiseLayerCPUTe const auto params_5D_1D_parameter = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D_1D())), - ::testing::Values(ngraph::helpers::EltwiseTypes::ADD, ngraph::helpers::EltwiseTypes::MULTIPLY), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -155,7 +153,7 @@ const auto params_4D_dyn_const = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(inShapes_4D_dyn_const()), ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -172,7 +170,7 @@ const auto params_4D_dyn_param = ::testing::Combine( ::testing::Combine( ::testing::Values(inShapes_4D_dyn_param()), ::testing::ValuesIn(eltwiseOpTypesBinDyn()), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -189,7 +187,7 @@ const auto params_5D_dyn_const = ::testing::Combine( ::testing::Combine( ::testing::Values(inShapes_5D_dyn_const()), ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -206,7 +204,7 @@ const auto params_5D_dyn_param = ::testing::Combine( ::testing::Combine( ::testing::Values(inShapes_5D_dyn_param()), ::testing::ValuesIn(eltwiseOpTypesBinDyn()), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -219,5 +217,6 @@ const auto params_5D_dyn_param = ::testing::Combine( INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_MemOrder_dyn_param, EltwiseLayerCPUTest, params_5D_dyn_param, EltwiseLayerCPUTest::getTestCaseName); -} // namespace Eltwise -} // namespace CPULayerTestsDefinitions +} // namespace Eltwise +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/matmul.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/matmul.cpp index a1eaf7ac28db09..ba9be6b8a916db 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/matmul.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/matmul.cpp @@ -8,12 +8,10 @@ #include "test_utils/filter_cpu_info.hpp" #include "test_utils/fusing_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace MatMul { /* ============= MatMul ============= */ namespace matmul { @@ -291,7 +289,7 @@ const auto matMulParams = ::testing::Combine(::testing::ValuesIn(IS), ::testing::ValuesIn(netPRCs()), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::PARAMETER), + ::testing::Values(utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(additionalConfig())); @@ -306,7 +304,7 @@ const auto matMulParamsDynamic = ::testing::Combine(::testing::ValuesIn(IS_Dynam ::testing::ValuesIn(netPRCs()), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::PARAMETER), + ::testing::Values(utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(additionalConfig())); @@ -321,7 +319,7 @@ const auto matMulParamsDynamic_nightly = ::testing::Combine(::testing::ValuesIn( ::testing::ValuesIn(netPRCs()), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::PARAMETER), + ::testing::Values(utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(additionalConfig())); @@ -332,6 +330,7 @@ const auto testParamsDynamic_nightly = ::testing::Combine(matMulParamsDynamic_ni INSTANTIATE_TEST_SUITE_P(nightly_MM_Dynamic, MatMulLayerCPUTest, testParamsDynamic_nightly, MatMulLayerCPUTest::getTestCaseName); -} // namespace matmul -} // namespace MatMul -} // namespace CPULayerTestsDefinitions \ No newline at end of file +} // namespace matmul +} // namespace MatMul +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/multinomial.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/multinomial.cpp index 911b0085a39f5e..cf806b009d0c01 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/multinomial.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/multinomial.cpp @@ -11,8 +11,6 @@ namespace { using ov::test::MultinomialLayerTest; -std::vector> global_op_seed = {{1ul, 2ul}, {0ul, 0ul}}; - std::vector probs_4x4_f32 = {0.00001f, 0.001f, 0.1f, @@ -53,61 +51,69 @@ std::vector probs_1x3_bf16_log = {ov::bfloat16(3.0f), ov::bfloat16 std::vector num_samples_scalar_i32 = {1}; std::vector num_samples_1x1_i64 = {2}; -std::vector num_samples_scalar_i64 = {3}; - -const std::vector probs = {ov::Tensor(ov::element::f32, {4, 4}, probs_4x4_f32.data()), - ov::Tensor(ov::element::f16, {2, 3}, probs_2x3_f16.data()), - ov::Tensor(ov::element::bf16, {1, 3}, probs_1x3_bf16.data())}; - -const std::vector probs_log = {ov::Tensor(ov::element::f32, {4, 4}, probs_4x4_f32_log.data()), - ov::Tensor(ov::element::f16, {2, 3}, probs_2x3_f16_log.data()), - ov::Tensor(ov::element::bf16, {1, 3}, probs_1x3_bf16_log.data())}; - -const std::vector num_samples = {ov::Tensor(ov::element::i32, {}, num_samples_scalar_i32.data()), - ov::Tensor(ov::element::i64, {1}, num_samples_1x1_i64.data()), - ov::Tensor(ov::element::i64, {}, num_samples_scalar_i64.data())}; - -const std::vector convert_type = {ov::test::ElementType::i32}; - -const std::vector with_replacement = { - // true, - false}; - -const auto params_static = ::testing::Combine(::testing::Values("static"), - ::testing::ValuesIn(probs), - ::testing::ValuesIn(num_samples), - ::testing::ValuesIn(convert_type), - ::testing::ValuesIn(with_replacement), - ::testing::Values(false), // log_probs - ::testing::ValuesIn(global_op_seed), - ::testing::Values(ov::test::utils::DEVICE_CPU)); - -const auto params_static_log = ::testing::Combine(::testing::Values("static"), - ::testing::ValuesIn(probs_log), - ::testing::ValuesIn(num_samples), - ::testing::ValuesIn(convert_type), - ::testing::ValuesIn(with_replacement), - ::testing::Values(true), // log_probs - ::testing::ValuesIn(global_op_seed), - ::testing::Values(ov::test::utils::DEVICE_CPU)); - -const auto params_dynamic = ::testing::Combine(::testing::Values("dynamic"), - ::testing::ValuesIn(probs), - ::testing::ValuesIn(num_samples), - ::testing::ValuesIn(convert_type), - ::testing::ValuesIn(with_replacement), - ::testing::Values(false), // log_probs - ::testing::ValuesIn(global_op_seed), - ::testing::Values(ov::test::utils::DEVICE_CPU)); - -const auto params_dynamic_log = ::testing::Combine(::testing::Values("dynamic"), - ::testing::ValuesIn(probs_log), - ::testing::ValuesIn(num_samples), - ::testing::ValuesIn(convert_type), - ::testing::ValuesIn(with_replacement), - ::testing::Values(true), // log_probs - ::testing::ValuesIn(global_op_seed), - ::testing::Values(ov::test::utils::DEVICE_CPU)); + +const auto probs = testing::Values(ov::Tensor(ov::element::f32, {4, 4}, probs_4x4_f32.data()), + ov::Tensor(ov::element::f16, {2, 3}, probs_2x3_f16.data()), + ov::Tensor(ov::element::bf16, {1, 3}, probs_1x3_bf16.data())); + +const auto probs_log = testing::Values(ov::Tensor(ov::element::f32, {4, 4}, probs_4x4_f32_log.data()), + ov::Tensor(ov::element::f16, {2, 3}, probs_2x3_f16_log.data()), + ov::Tensor(ov::element::bf16, {1, 3}, probs_1x3_bf16_log.data())); + +const auto num_samples = testing::Values(ov::Tensor(ov::element::i32, {}, num_samples_scalar_i32.data()), + ov::Tensor(ov::element::i64, {1}, num_samples_1x1_i64.data())); + +const auto convert_type = testing::Values(ov::test::ElementType::i32, ov::test::ElementType::i64); + +const auto with_replacement = testing::Values(true, false); + +const auto log_probs_true = testing::Values(true); +const auto log_probs_false = testing::Values(false); + +const auto test_type_static = testing::Values("static"); +const auto test_type_dynamic = testing::Values("dynamic"); + +// NOTE: (0,0) seeds are skipped (ticket 126095) +const auto global_op_seed = + testing::Values(std::pair{1ul, 2ul}, std::pair{0ul, 0ul}); + +const auto device_cpu = testing::Values(ov::test::utils::DEVICE_CPU); + +const auto params_static = ::testing::Combine(test_type_static, + probs, + num_samples, + convert_type, + with_replacement, + log_probs_false, + global_op_seed, + device_cpu); + +const auto params_static_log = ::testing::Combine(test_type_static, + probs_log, + num_samples, + convert_type, + with_replacement, + log_probs_true, + global_op_seed, + device_cpu); + +const auto params_dynamic = ::testing::Combine(test_type_dynamic, + probs, + num_samples, + convert_type, + with_replacement, + log_probs_false, + global_op_seed, + device_cpu); + +const auto params_dynamic_log = ::testing::Combine(test_type_dynamic, + probs_log, + num_samples, + convert_type, + with_replacement, + log_probs_true, + global_op_seed, + device_cpu); INSTANTIATE_TEST_SUITE_P(smoke_MultinomialStatic, MultinomialLayerTest, diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/mvn.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/mvn.cpp index c0f05b261d33a0..6d2cf965aaa340 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/mvn.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/mvn.cpp @@ -7,12 +7,10 @@ #include "test_utils/cpu_test_utils.hpp" #include "test_utils/fusing_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace MVN { const std::vector normalizeVariance = { @@ -210,5 +208,6 @@ const auto Mvn5DStatic = ::testing::Combine( INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Mvn5D_Static, MvnLayerCPUTest, Mvn5DStatic, MvnLayerCPUTest::getTestCaseName); -} // namespace MVN -} // namespace CPULayerTestsDefinitions \ No newline at end of file +} // namespace MVN +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/nms_rotated.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/nms_rotated.cpp index 7888a88a60221d..12c10e0087f2f8 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/nms_rotated.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/nms_rotated.cpp @@ -5,8 +5,9 @@ #include "single_op_tests/nms_rotated.hpp" using namespace LayerTestsDefinitions; -using namespace ov::test; +namespace ov { +namespace test { static const std::vector> input_shapes = { { @@ -93,3 +94,6 @@ INSTANTIATE_TEST_SUITE_P(nightly_, NmsRotatedOpTest, ::testing::Values(empty_plugin_config), ::testing::Values(utils::DEVICE_CPU)), NmsRotatedOpTest::getTestCaseName); + +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/pooling.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/pooling.cpp index e15408a6085b9d..c4c0121461830c 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/pooling.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/pooling.cpp @@ -7,12 +7,10 @@ #include "test_utils/cpu_test_utils.hpp" #include "test_utils/fusing_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace Pooling { static CPUSpecificParams expectedCpuConfig() { @@ -24,9 +22,9 @@ static CPUSpecificParams expectedCpuConfig() { } const std::vector vecCpuConfigs = {expectedCpuConfig()}; -const std::vector paramsAvg3D_RefOnly = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2}, {2}, {2}, {2}, - expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, false }, +const std::vector paramsAvg3D_RefOnly = { + poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2}, {2}, {2}, {2}, + expectedAvgRoundingType(), ov::op::PadType::EXPLICIT, false }, }; INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_CPU_3D, PoolingLayerCPUTest, @@ -59,9 +57,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_3D_NotOptimized, PoolingLayerCPUTest, ::testing::Values(emptyFusingSpec)), PoolingLayerCPUTest::getTestCaseName); -const std::vector paramsAvg4D_RefOnly = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {2, 2}, {2, 2}, - expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, false }, +const std::vector paramsAvg4D_RefOnly = { + poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2}, {2, 2}, {2, 2}, {2, 2}, + expectedAvgRoundingType(), ov::op::PadType::EXPLICIT, false }, }; INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_CPU_4D, PoolingLayerCPUTest, @@ -112,21 +110,21 @@ INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_Large, PoolingLayerCPUTest, ::testing::Values(emptyFusingSpec)), PoolingLayerCPUTest::getTestCaseName); -const std::vector paramsMaxV85D_ref = { - LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, - ngraph::element::Type_t::i32, 0, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER }, - LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, - ngraph::element::Type_t::i32, 0, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT }, - LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 3, 4}, {2, 2, 2}, {2, 1, 1}, {1, 1, 1}, {1, 2, 2}, - ngraph::element::Type_t::i32, 0, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT }, +const std::vector paramsMaxV85D_ref = { + maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, + ov::element::Type_t::i32, 0, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER }, + maxPoolV8SpecificParams{ {2, 2, 2}, {1, 1, 1}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, + ov::element::Type_t::i32, 0, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT }, + maxPoolV8SpecificParams{ {2, 3, 4}, {2, 2, 2}, {2, 1, 1}, {1, 1, 1}, {1, 2, 2}, + ov::element::Type_t::i32, 0, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT }, }; -const std::vector paramsAvg5D_RefOnly = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {2, 2, 2}, {2, 2, 2}, - expectedAvgRoundingType(), ngraph::op::PadType::EXPLICIT, false }, +const std::vector paramsAvg5D_RefOnly = { + poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {2, 2, 2}, {2, 2, 2}, + expectedAvgRoundingType(), ov::op::PadType::EXPLICIT, false }, }; //FIXME: 5D cases are temporarly disabled on ARM because ACL support check in Pooling::getSupportedDescriptors() can't check layout @@ -177,5 +175,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_5D_NotOptimized, PoolingLayerCPUTest, ::testing::Values(emptyFusingSpec)), PoolingLayerCPUTest::getTestCaseName); #endif -} // namespace Pooling -} // namespace CPULayerTestsDefinitions \ No newline at end of file +} // namespace Pooling +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/random_uniform.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/random_uniform.cpp index f319fb6ada2719..61101375bb5598 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/random_uniform.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/random_uniform.cpp @@ -5,9 +5,9 @@ #include "single_layer_tests/classes/random_uniform.hpp" using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace RandomUniform { static const std::vector shape_prc = { @@ -64,5 +64,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_ParamConst, RandomUniformLayerTestCPU, ::testing::Values(empty_plugin_config)), RandomUniformLayerTestCPU::getTestCaseName); -} // namespace RandomUniform -} // namespace CPULayerTestsDefinitions +} // namespace RandomUniform +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/reduce.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/reduce.cpp index 22c49156eba333..3de2c8e4f2bbdc 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/reduce.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/reduce.cpp @@ -7,12 +7,10 @@ #include "test_utils/cpu_test_utils.hpp" #include "test_utils/fusing_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace Reduce { std::vector> inputShapes = { @@ -161,5 +159,6 @@ INSTANTIATE_TEST_SUITE_P( ReduceCPULayerTest::getTestCaseName ); -} // namespace Reduce -} // namespace CPULayerTestsDefinitions \ No newline at end of file +} // namespace Reduce +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/softmax.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/softmax.cpp index 03655fd40a536d..6a17df004e92ce 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/softmax.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/softmax.cpp @@ -5,12 +5,10 @@ #include "single_layer_tests/classes/softmax.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace SoftMax { const auto notOptimizedCPUSpec = CPUSpecificParams{{}, {}, {"ref_any"}, "ref_any"}; @@ -146,5 +144,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_SoftMax_Unsupported_CPU, SoftMaxLayerCPUTest, UnsupportedParams, SoftMaxLayerCPUTest::getTestCaseName); -} // namespace SoftMax -} // namespace CPULayerTestsDefinitions \ No newline at end of file +} // namespace SoftMax +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/transpose.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/transpose.cpp index c1260a923b09a1..23ef684f1076f2 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/transpose.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/common/transpose.cpp @@ -6,14 +6,12 @@ #include "shared_test_classes/single_layer/transpose.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace Transpose { -std::vector> additional_config = { +std::vector additional_config = { {{ov::hint::inference_precision.name(), ov::element::f32.to_string()}}, {{ov::hint::inference_precision.name(), ov::element::f16.to_string()}} }; @@ -21,9 +19,9 @@ std::vector> additional_config = { const auto cpuParams_nhwc = CPUSpecificParams {{nhwc}, {}, {}, {}}; const auto cpuParams_nchw = CPUSpecificParams {{nchw}, {}, {}, {}}; -const std::vector netPrecisions = { - Precision::I8, - Precision::FP32 +const std::vector netPrecisions = { + ov::element::i8, + ov::element::f32 }; const std::vector> inputOrderPerChannels4D = { @@ -97,5 +95,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamicShapes4D_PermutePerChannels, TransposeLaye ::testing::Values(CPUSpecificParams{})), TransposeLayerCPUTest::getTestCaseName); -} // namespace Transpose -} // namespace CPULayerTestsDefinitions \ No newline at end of file +} // namespace Transpose +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/activation.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/activation.cpp index 84b5b401de171c..89d8a7c0250294 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/activation.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/activation.cpp @@ -6,19 +6,18 @@ #include "shared_test_classes/single_layer/activation.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; +using namespace ov::test::utils; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace Activation { namespace { -const std::vector& netPrc() { - static const std::vector netPrc { - Precision::FP32, - Precision::BF16, +const std::vector& netPrc() { + static const std::vector netPrc { + ov::element::f32, + ov::element::bf16, }; return netPrc; @@ -46,8 +45,8 @@ const auto blockedCases3D = ::testing::Combine( ::testing::Values(activationShapes()), ::testing::ValuesIn(ov::test::utils::combineParams(activationTypesBlocked())), ::testing::ValuesIn(netPrc()), - ::testing::Values(Precision::FP32), - ::testing::Values(Precision::FP32), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams3Dblocked())) ); @@ -67,8 +66,8 @@ const auto basicCases4D = ::testing::Combine( ::testing::Values(activationShapes()), ::testing::ValuesIn(ov::test::utils::combineParams(activationTypes())), ::testing::ValuesIn(netPrc()), - ::testing::Values(Precision::FP32), - ::testing::Values(Precision::FP32), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams4Dblocked())) ); @@ -88,13 +87,14 @@ const auto basicCases5D = ::testing::Combine( ::testing::Values(activationShapes()), ::testing::ValuesIn(ov::test::utils::combineParams(activationTypes())), ::testing::ValuesIn(netPrc()), - ::testing::Values(Precision::FP32), - ::testing::Values(Precision::FP32), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(filterCPUSpecificParams(cpuParams5Dblocked())) ); INSTANTIATE_TEST_SUITE_P(smoke_Activation5D_Eltwise_CPU_Blocked, ActivationLayerCPUTest, basicCases5D, ActivationLayerCPUTest::getTestCaseName); -} // namespace -} // namespace Activation -} // namespace CPULayerTestsDefinitions +} // namespace +} // namespace Activation +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/conversion.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/conversion.cpp index 9206eca36d7352..258b0ef53a361e 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/conversion.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/conversion.cpp @@ -6,12 +6,10 @@ #include "shared_test_classes/single_layer/conversion.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace Conversion { namespace { @@ -36,9 +34,9 @@ std::vector memForm4D_static_blocked = { CPUSpecificParams({nChw16c}, {nChw16c}, {}, {}) }; -const std::vector precisions_floating_point = { - Precision::FP32, - Precision::BF16 +const std::vector precisions_floating_point = { + ov::element::f32, + ov::element::bf16 }; INSTANTIATE_TEST_SUITE_P(smoke_ConvertCPULayerTest_Blocked, ConvertCPULayerTest, @@ -53,7 +51,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvertCPULayerTest_BOOL_Static, ConvertCPULayerT ::testing::Combine( ::testing::ValuesIn(inShapes_4D_static()), ::testing::ValuesIn(precisions_floating_point), - ::testing::Values(Precision::BOOL), + ::testing::Values(ov::element::boolean), ::testing::Values(CPUSpecificParams({nchw}, {nchw}, {}, {}))), ConvertCPULayerTest::getTestCaseName); @@ -61,10 +59,11 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvertCPULayerTest_BOOL_Dynamic, ConvertCPULayer ::testing::Combine( ::testing::ValuesIn(inShapes_4D_dynamic()), ::testing::ValuesIn(precisions_floating_point), - ::testing::Values(Precision::BOOL), + ::testing::Values(ov::element::boolean), ::testing::Values(CPUSpecificParams({nchw}, {nchw}, {}, "ref"))), ConvertCPULayerTest::getTestCaseName); } // namespace } // namespace Conversion -} // namespace CPULayerTestsDefinitions \ No newline at end of file +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/convolution.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/convolution.cpp new file mode 100644 index 00000000000000..f322e1088edb62 --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/convolution.cpp @@ -0,0 +1,667 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "single_layer_tests/classes/convolution.hpp" +#include "shared_test_classes/single_op/convolution.hpp" +#include "test_utils/cpu_test_utils.hpp" +#include "test_utils/filter_cpu_info.hpp" + +using namespace CPUTestUtils; + +namespace ov { +namespace test { +namespace Convolution { +namespace { + +std::vector filterCPUInfoForDevice_BF16(std::vector allParams) { + std::vector specificParams; + bool with_bf16 = with_cpu_x86_bfloat16(); + std::copy_if(allParams.begin(), allParams.end(), std::back_inserter(specificParams), [with_bf16](const CPUSpecificParams& item) { + const auto &selected = std::get<3>(item); + // when no bf16 hardware brgconv will not work + if (!with_bf16 && selected.find("brgconv") != std::string::npos) { + return false; + } + return true; + }); + + return filterCPUInfoForDevice(specificParams); +} + +const std::vector fusingParamsSetWithoutEmpty{ + // eltwise + fusingRelu, + fusingPRelu1DScaleShift, + // depthwise + fusingReluScaleShift, + // fake quantize + fusingFakeQuantizePerTensorRelu, + fusingFakeQuantizePerChannelRelu, + // sum + fusingSumEluFQ, + fusingSum, + // bias + fusingAddPerChannel +}; + +const std::vector fusingParamsSetBF16{ + emptyFusingSpec, + // eltwise + fusingRelu, + // depthwise + fusingPRelu1DScaleShift, + // sum + fusingSum, + // bias + fusingAddPerChannel +}; + +/* ============= Convolution (Gemm 1D) ============= */ +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_GEMM_FP32, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_GEMM_1D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm1D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_1D())), + ::testing::ValuesIn(fusingParamsSetWithEmpty()), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_GEMM_FP32_ImproperPriorityList, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_GEMM_1D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm1D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo({conv_gemm_1D})), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_GEMM_BF16, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_GEMM_1D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm1D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo({conv_gemm_1D})), // todo: [AV] what about conv_gemm_1D_nspc? + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_GEMM_I8, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_GEMM_1D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm1D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_1D())), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_GEMM_BF16, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_GEMM_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm2D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_2D_GEMM_BF16_dilated, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_GEMM_2D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm2D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_GEMM_FP32_fusing, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_GEMM_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm2D_cache()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), + ::testing::ValuesIn(fusingParamsSetWithoutEmpty), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_2D_GEMM_FP32_dilated_fusing, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_GEMM_2D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm2D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), + ::testing::ValuesIn(fusingParamsSetWithoutEmpty), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_FP32_fusing, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2d_cache()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_2D())), + ::testing::ValuesIn(fusingParamsSetWithoutEmpty), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_2D_FP32_dilated_fusing, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_2D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_2D())), + ::testing::ValuesIn(fusingParamsSetWithoutEmpty), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +/* ============= Kernel_1x1 (1D) ============= */ +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_1x1_FP32_fusing, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_1x1_1D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes1d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_1x1_1D())), + ::testing::ValuesIn(fusingParamsSetWithoutEmpty), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_1x1_FP32_fusing, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_1x1_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_1x1_2D())), + ::testing::ValuesIn(fusingParamsSetWithoutEmpty), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +/* ============= Convolution (1D) ============= */ +const auto convParams_ExplicitPadding_1D = ::testing::Combine( + ::testing::ValuesIn(kernels1d()), + ::testing::ValuesIn(strides1d()), + ::testing::ValuesIn(padBegins1d()), + ::testing::ValuesIn(padEnds1d()), + ::testing::ValuesIn(dilations1d()), + ::testing::ValuesIn(numOutChannels()), + ::testing::Values(ov::op::PadType::EXPLICIT) +); + +const std::vector CPUParams_1D = { + conv_sse42_1D, + conv_avx2_1D, + conv_avx512_1D, + conv_sse42_1D_nspc, + conv_avx2_1D_nspc, + conv_avx2_1D_nspc_brgconv, + conv_avx512_1D_nspc, + conv_avx512_1D_nspc_brgconv +}; + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_FP32, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes1d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_1D)), + ::testing::ValuesIn(fusingParamsSetWithEmpty()), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_BF16, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes1d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_1D, + conv_avx512_1D_nspc_brgconv, conv_avx512_1D_nspc_brgconv_amx})), // todo: [AV] what about conv_avx512_1D_nspc? + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_I8, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes1d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_1D)), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +const std::vector CPUParams_1D_plain_to_blocked = { + conv_sse42_plain_to_blocked_1D, + conv_avx2_plain_to_blocked_1D, + conv_avx512_plain_to_blocked_1D, +}; + +std::vector inputShapesPlain2Blocked1d = { + {{}, {{1, 1, 7}}}, + {{}, {{1, 2, 7}}}, + {{}, {{1, 3, 7}}}, + { + //dynamic shapes + {-1, 1, {1, 200}}, + { //target static shapes + {2, 1, 7}, + {1, 1, 9} + } + }, + { + //dynamic shapes + {-1, 3, {1, 200}}, + { //target static shapes + {2, 3, 7}, + {1, 3, 9} + } + } +}; + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_PlainToBlocked_FP32, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapesPlain2Blocked1d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_1D_plain_to_blocked)), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_PlainToBlocked_BF16, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapesPlain2Blocked1d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo({conv_avx512_plain_to_blocked_1D})), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_BF16, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_2D, conv_avx512_2D_nspc, + conv_avx512_2D_nspc_brgconv, conv_avx512_2D_nspc_brgconv_amx})), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_2D_BF16_dilated, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_2D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_2D, conv_avx512_2D_nspc, + conv_avx512_2D_nspc_brgconv, conv_avx512_2D_nspc_brgconv_amx})), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_PlainToBlocked_BF16, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapesPlain2Blocked2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo({conv_avx512_plain_to_blocked_2D})), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_PlainToBlocked_2D_BF16_dilated, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_2D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapesPlain2Blocked2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo({conv_avx512_plain_to_blocked_2D})), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_1x1_BF16, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_1x1_1D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes1d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_1D_1x1, conv_avx512_2D_1x1_nspc, + conv_avx512_1D_1x1_nspc_brgconv, conv_avx512_1D_1x1_nspc_brgconv_amx})), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_1x1_BF16, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_1x1_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_2D_1x1, conv_avx512_2D_1x1_nspc, + conv_avx512_2D_1x1_nspc_brgconv, conv_avx512_2D_1x1_nspc_brgconv_amx})), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +/* ============= Jit Planar ============= */ +/* ============= Convolution planar params (2D) ============= */ +const std::vector CPUParams_Jit_Planar_2D = { + // sse42 is not supported + conv_avx2_planar_2D, + conv_avx512_planar_2D, +}; + +const auto convParams_Planar_ExplicitPadding_2D = ::testing::Combine( + ::testing::ValuesIn(kernels2d()), + ::testing::Values(ov::Shape{1, 1}), + ::testing::ValuesIn(padBegins2d()), + ::testing::ValuesIn(padEnds2d()), + ::testing::ValuesIn(dilations2d()), + ::testing::Values(1), + ::testing::Values(ov::op::PadType::EXPLICIT) +); + +const auto convParams_Planar_ExplicitPadding_2D_dilated = ::testing::Combine( + ::testing::ValuesIn(kernels2d()), + ::testing::Values(ov::Shape{1, 1}), + ::testing::ValuesIn(padBegins2d()), + ::testing::ValuesIn(padEnds2d()), + ::testing::Values(ov::Shape{2, 2}), + ::testing::Values(1), + ::testing::Values(ov::op::PadType::EXPLICIT) +); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_Jit_Planar_FP32, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_Planar_ExplicitPadding_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_Jit_Planar_2D)), + ::testing::Values(emptyFusingSpec, fusingRelu), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_2D_Jit_Planar_FP32_dilated, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_Planar_ExplicitPadding_2D_dilated, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_Jit_Planar_2D)), + ::testing::Values(emptyFusingSpec, fusingRelu), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +/* ============= Convolution (GEMM 3D) ============= */ +INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_GEMM_FP32, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_GEMM_3D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm3D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_3D())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_GEMM_I8, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_GEMM_3D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm3D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_3D())), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_3D_GEMM_FP32_dilated, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_GEMM_3D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm3D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_3D())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_3D_GEMM_I8_dilated, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_GEMM_3D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm3D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_3D())), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +/* ============= Convolution planar params (3D) ============= */ +const std::vector CPUParams_Jit_Planar_3D = { + // sse42 is not supported + conv_avx2_planar_3D, + conv_avx512_planar_3D, +}; + +const auto convParams_Planar_ExplicitPadding_3D = ::testing::Combine( + ::testing::ValuesIn(kernels3d()), + ::testing::Values(ov::Shape{1, 1, 1}), + ::testing::ValuesIn(padBegins3d()), + ::testing::ValuesIn(padEnds3d()), + ::testing::ValuesIn(dilations3d()), + ::testing::Values(1), + ::testing::Values(ov::op::PadType::EXPLICIT) +); + +const auto convParams_Planar_ExplicitPadding_3D_dilated = ::testing::Combine( + ::testing::ValuesIn(kernels3d()), + ::testing::Values(ov::Shape{1, 1, 1}), + ::testing::ValuesIn(padBegins3d()), + ::testing::ValuesIn(padEnds3d()), + ::testing::Values(ov::Shape{2, 2, 2}), + ::testing::Values(1), + ::testing::Values(ov::op::PadType::EXPLICIT) +); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_GEMM_BF16, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_GEMM_3D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm3D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_3D())), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_3D_GEMM_BF16_dilated, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_GEMM_3D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inShapesGemm3D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_3D())), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_BF16, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_3D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes3d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_3D, conv_avx512_3D_nspc, + conv_avx512_3D_nspc_brgconv, conv_avx512_3D_nspc_brgconv_amx})), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_3D_BF16_dilated, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_ExplicitPadding_3D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes3d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_3D, conv_avx512_3D_nspc, + conv_avx512_3D_nspc_brgconv, conv_avx512_3D_nspc_brgconv_amx})), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_Jit_Planar_FP32, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_Planar_ExplicitPadding_3D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes3d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_Jit_Planar_3D)), + ::testing::Values(emptyFusingSpec, fusingRelu), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_3D_Jit_Planar_FP32_dilated, ConvolutionLayerCPUTest, + ::testing::Combine( + ::testing::Combine( + convParams_Planar_ExplicitPadding_3D_dilated, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes3d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_Jit_Planar_3D)), + ::testing::Values(emptyFusingSpec, fusingRelu), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +} // namespace +} // namespace Convolution +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/eltwise.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/eltwise.cpp index 3136cfc3f606a3..9d44fb111d9eca 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/eltwise.cpp @@ -9,13 +9,10 @@ #include #include -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace Eltwise { namespace { @@ -114,8 +111,8 @@ const std::vector>& inShapes_5D_Blocked_Planar() { return inShapes_5D_Blocked_Planar; } -const std::vector>& inShapes_5D_Planar_Blocked() { - static const std::vector> inShapes_5D_Planar_Blocked = { +const std::vector>& inShapes_5D_Planar_Blocked() { + static const std::vector> inShapes_5D_Planar_Blocked = { {{2, 1, 31, 1, 3}, {2, 17, 31, 4, 3}}, {{2, 1, 1, 3, 4}, {2, 17, 5, 3, 1}}, }; @@ -177,7 +174,7 @@ const auto params_4D_fusing = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_fusing())), ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::ValuesIn(opTypes()), ::testing::Values(ElementType::f32), ::testing::Values(ov::element::undefined), @@ -194,7 +191,7 @@ const auto params_4D_fusing_blocked_blocked = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_fusing())), ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::ValuesIn(opTypes()), ::testing::Values(ElementType::f32), ::testing::Values(ov::element::undefined), @@ -212,7 +209,7 @@ const auto params_4D_blocked_blocked_fusing = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_fusing())), ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::ValuesIn(opTypes()), ::testing::Values(ElementType::f32), ::testing::Values(ov::element::undefined), @@ -286,7 +283,7 @@ const auto params_4D_Blocked_Planar = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_Blocked_Planar())), ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -303,7 +300,7 @@ const auto params_4D_Planar_Blocked = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_Planar_Blocked())), ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -320,7 +317,7 @@ const auto params_5D_Blocked_Planar = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D_Blocked_Planar())), ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -337,7 +334,7 @@ const auto params_5D_Planar_Blocked = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D_Planar_Blocked())), ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -353,8 +350,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_Planar_Blocked_x64, EltwiseLay const auto params_4D_1D_constant_mode = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_1D())), - ::testing::Values(ngraph::helpers::EltwiseTypes::ADD, ngraph::helpers::EltwiseTypes::MULTIPLY), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -370,8 +367,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_1D_Constant_x64, EltwiseLayerC const auto params_4D_1D_parameter_mode = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_1D())), - ::testing::Values(ngraph::helpers::EltwiseTypes::ADD, ngraph::helpers::EltwiseTypes::MULTIPLY), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -387,8 +384,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_1D_Parameter_x64, EltwiseLayer const auto params_5D_1D_constant = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D_1D())), - ::testing::Values(ngraph::helpers::EltwiseTypes::ADD, ngraph::helpers::EltwiseTypes::MULTIPLY), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -404,8 +401,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_1D_Constant_x64, EltwiseLayerC const auto params_5D_1D_parameter = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D_1D())), - ::testing::Values(ngraph::helpers::EltwiseTypes::ADD, ngraph::helpers::EltwiseTypes::MULTIPLY), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -424,7 +421,7 @@ const auto params_4D_dyn_const = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(inShapes_4D_dyn_const()), ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -441,7 +438,7 @@ const auto params_4D_blocked_blocked_dyn_const = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(inShapes_4D_dyn_const()), ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -459,7 +456,7 @@ const auto params_4D_dyn_param = ::testing::Combine( ::testing::Combine( ::testing::Values(inShapes_4D_dyn_param()), ::testing::ValuesIn(eltwiseOpTypesBinDyn()), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -476,7 +473,7 @@ const auto params_4D_blocked_blocked_dyn_param = ::testing::Combine( ::testing::Combine( ::testing::Values(inShapes_4D_dyn_param()), ::testing::ValuesIn(eltwiseOpTypesBinDyn()), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -494,7 +491,7 @@ const auto params_4D_dyn_param_fusing = ::testing::Combine( ::testing::Combine( ::testing::Values(inShapes_4D_dyn_param_fusing()), ::testing::ValuesIn(eltwiseOpTypesBinDyn()), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::ValuesIn(opTypes()), ::testing::Values(ElementType::f32), ::testing::Values(ov::element::undefined), @@ -511,7 +508,7 @@ const auto params_4D_dyn_param_fusing_Blocked_Blocked = ::testing::Combine( ::testing::Combine( ::testing::Values(inShapes_4D_dyn_param_fusing()), ::testing::ValuesIn(eltwiseOpTypesBinDyn()), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::ValuesIn(opTypes()), ::testing::Values(ElementType::f32), ::testing::Values(ov::element::undefined), @@ -529,7 +526,7 @@ const auto params_4D_blocked_blocked_dyn_param_fusing = ::testing::Combine( ::testing::Combine( ::testing::Values(inShapes_4D_dyn_param_fusing()), ::testing::ValuesIn(eltwiseOpTypesBinDyn()), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::ValuesIn(opTypes()), ::testing::Values(ElementType::f32), ::testing::Values(ov::element::undefined), @@ -549,7 +546,7 @@ const auto params_5D_dyn_const_Blocked_Blocked = ::testing::Combine( ::testing::Combine( ::testing::Values(inShapes_5D_dyn_const()), ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -567,7 +564,7 @@ const auto params_5D_dyn_param_Blocked_Blocked = ::testing::Combine( ::testing::Combine( ::testing::Values(inShapes_5D_dyn_param()), ::testing::ValuesIn(eltwiseOpTypesBinDyn()), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::ValuesIn(opTypes()), ::testing::ValuesIn(netType()), ::testing::Values(ov::element::undefined), @@ -611,9 +608,9 @@ const auto params_4D_bitwise = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(bitwise_in_shapes_4D), ::testing::ValuesIn({ - ngraph::helpers::EltwiseTypes::BITWISE_AND, - ngraph::helpers::EltwiseTypes::BITWISE_OR, - ngraph::helpers::EltwiseTypes::BITWISE_XOR + ov::test::utils::EltwiseTypes::BITWISE_AND, + ov::test::utils::EltwiseTypes::BITWISE_OR, + ov::test::utils::EltwiseTypes::BITWISE_XOR }), ::testing::ValuesIn(secondaryInputTypes()), ::testing::ValuesIn({ ov::test::utils::OpType::VECTOR }), @@ -636,9 +633,9 @@ const auto params_4D_bitwise_i32 = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(bitwise_in_shapes_4D), ::testing::ValuesIn({ - ngraph::helpers::EltwiseTypes::BITWISE_AND, - ngraph::helpers::EltwiseTypes::BITWISE_OR, - ngraph::helpers::EltwiseTypes::BITWISE_XOR + ov::test::utils::EltwiseTypes::BITWISE_AND, + ov::test::utils::EltwiseTypes::BITWISE_OR, + ov::test::utils::EltwiseTypes::BITWISE_XOR }), ::testing::ValuesIn(secondaryInputTypes()), ::testing::ValuesIn({ ov::test::utils::OpType::VECTOR }), @@ -660,8 +657,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Bitwise_i32, EltwiseLayerCPUTe const auto params_4D_bitwise_NOT = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(bitwise_in_shapes_4D), - ::testing::ValuesIn({ ngraph::helpers::EltwiseTypes::BITWISE_NOT }), - ::testing::ValuesIn({ ngraph::helpers::InputLayerType::CONSTANT }), + ::testing::ValuesIn({ ov::test::utils::EltwiseTypes::BITWISE_NOT }), + ::testing::ValuesIn({ ov::test::utils::InputLayerType::CONSTANT }), ::testing::ValuesIn({ ov::test::utils::OpType::VECTOR }), ::testing::ValuesIn({ ov::element::Type_t::i8, ov::element::Type_t::u8, ov::element::Type_t::i32 }), ::testing::Values(ov::element::Type_t::undefined), @@ -681,8 +678,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Bitwise_NOT, EltwiseLayerCPUTe const auto params_4D_bitwise_NOT_i32 = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(bitwise_in_shapes_4D), - ::testing::ValuesIn({ ngraph::helpers::EltwiseTypes::BITWISE_NOT }), - ::testing::ValuesIn({ ngraph::helpers::InputLayerType::CONSTANT }), + ::testing::ValuesIn({ ov::test::utils::EltwiseTypes::BITWISE_NOT }), + ::testing::ValuesIn({ ov::test::utils::InputLayerType::CONSTANT }), ::testing::ValuesIn({ ov::test::utils::OpType::VECTOR }), ::testing::ValuesIn({ ov::element::Type_t::i16 }), ::testing::Values(ov::element::Type_t::undefined), @@ -698,6 +695,7 @@ const auto params_4D_bitwise_NOT_i32 = ::testing::Combine( INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Bitwise_NOT_i32, EltwiseLayerCPUTest, params_4D_bitwise_NOT_i32, EltwiseLayerCPUTest::getTestCaseName); -} // namespace -} // namespace Eltwise -} // namespace CPULayerTestsDefinitions \ No newline at end of file +} // namespace +} // namespace Eltwise +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/matmul.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/matmul.cpp index bc357d158f2b57..14d18faa55d879 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/matmul.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/matmul.cpp @@ -7,15 +7,11 @@ #include "test_utils/cpu_test_utils.hpp" #include "test_utils/filter_cpu_info.hpp" #include "test_utils/fusing_test_utils.hpp" -#include "ov_models/builders.hpp" -#include -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace MatMul { namespace { const std::vector IS_x64 = { @@ -55,7 +51,7 @@ const auto matMulParams_x64 = ::testing::Combine(::testing::ValuesIn(IS_x64), ::testing::ValuesIn(netPRCs()), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::PARAMETER), + ::testing::Values(utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(additionalConfig())); @@ -70,7 +66,7 @@ const auto testParams2D_smoke = ::testing::Combine(::testing::Combine(::testing: ::testing::Values(ElementType::f32), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(emptyAdditionalConfig())), ::testing::Values(MatMulNodeType::FullyConnected), @@ -81,7 +77,7 @@ const auto testParams2DBF16_smoke = ::testing::Combine(::testing::Combine(::test ::testing::ValuesIn(netPRCs()), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(additionalConfig())), ::testing::Values(MatMulNodeType::FullyConnected), @@ -95,24 +91,24 @@ const auto testParams2D_nightly = ::testing::Combine(::testing::Combine(::testin ::testing::Values(ElementType::f32), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values((emptyAdditionalConfig()))), ::testing::Values(MatMulNodeType::FullyConnected), ::testing::ValuesIn(fusingParamsSet2D_nightly), ::testing::ValuesIn(filterCPUInfo(filterSpecificParams()))); -std::vector> filterAdditionalConfig_Brgemm() { +std::vector filterAdditionalConfig_Brgemm() { #ifndef OV_CPU_WITH_MLAS // FP32 precision is covered by MLAS - std::vector> additionalConfig = { - std::map{/* empty config */} + std::vector additionalConfig = { + ov::AnyMap{/* empty config */} }; #else - std::vector> additionalConfig = {}; + std::vector additionalConfig = {}; #endif if (with_cpu_x86_bfloat16()) { - additionalConfig.push_back({{PluginConfigParams::KEY_ENFORCE_BF16, PluginConfigParams::YES}}); + additionalConfig.push_back({ov::hint::inference_precision(ov::element::bf16)}); } return additionalConfig; @@ -202,7 +198,7 @@ const auto fullyConnectedParams2D_Brgemm_smoke = ::testing::Combine(::testing::V ::testing::Values(ElementType::f32), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(filterAdditionalConfig_Brgemm())); @@ -231,7 +227,7 @@ const auto matMulBrgemmParams_smoke = ::testing::Combine(::testing::ValuesIn(IS_ ::testing::Values(ElementType::f32), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::PARAMETER), + ::testing::Values(utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(filterAdditionalConfig_Brgemm())); @@ -260,7 +256,7 @@ const auto matMulBrgemmParams_nightly = ::testing::Combine(::testing::ValuesIn(I ::testing::Values(ElementType::f32), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::PARAMETER), + ::testing::Values(utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(filterAdditionalConfig_Brgemm())); @@ -334,7 +330,7 @@ const auto matMulBrgemmParamsDynamic = ::testing::Combine(::testing::ValuesIn(IS ::testing::Values(ElementType::f32), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::PARAMETER), + ::testing::Values(utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(filterAdditionalConfig_Brgemm())); @@ -398,7 +394,7 @@ const auto matMulParamsDynamicFusing = ::testing::Combine(::testing::ValuesIn(IS ::testing::ValuesIn(netPRCs()), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::PARAMETER), + ::testing::Values(utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(additionalConfig())); @@ -413,7 +409,7 @@ const auto matMulParamsBrgemmDynamicFusing = ::testing::Combine(::testing::Value ::testing::Values(ElementType::f32), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::PARAMETER), + ::testing::Values(utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(filterAdditionalConfig_Brgemm())); @@ -424,10 +420,10 @@ const auto testParamsBrgemmDynamicFusing = ::testing::Combine(matMulParamsBrgemm INSTANTIATE_TEST_SUITE_P(smoke_MM_Brgemm_Dynamic_Fusing, MatMulLayerCPUTest, testParamsBrgemmDynamicFusing, MatMulLayerCPUTest::getTestCaseName); -std::vector> filterAdditionalConfig_BrgemmAmx() { - std::vector> additionalConfig; +std::vector filterAdditionalConfig_BrgemmAmx() { + std::vector additionalConfig; if (with_cpu_x86_bfloat16()) { - additionalConfig.push_back({{PluginConfigParams::KEY_ENFORCE_BF16, PluginConfigParams::YES}}); + additionalConfig.push_back({ov::hint::inference_precision(ov::element::bf16)}); } return additionalConfig; @@ -460,7 +456,7 @@ const auto matMulBrgemmAmxParams_smoke = ::testing::Combine(::testing::ValuesIn( ::testing::Values(ElementType::f32), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::PARAMETER), + ::testing::Values(utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(filterAdditionalConfig_BrgemmAmx())); @@ -482,7 +478,7 @@ const auto matMulBrgemmAmxParams_nightly = ::testing::Combine(::testing::ValuesI ::testing::Values(ElementType::f32), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::PARAMETER), + ::testing::Values(utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(filterAdditionalConfig_BrgemmAmx())); @@ -497,7 +493,7 @@ const auto matMulBrgemmAmxParamsDynamic = ::testing::Combine(::testing::ValuesIn ::testing::Values(ElementType::f32), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::PARAMETER), + ::testing::Values(utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(filterAdditionalConfig_BrgemmAmx())); @@ -544,7 +540,7 @@ const auto fullyConnectedParams2D_Brgconv1x1_smoke = ::testing::Combine(::testin ::testing::Values(ElementType::f32), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(emptyAdditionalConfig())); @@ -597,7 +593,7 @@ const auto fullyConnectedParams3D_Brgconv1x1_smoke = ::testing::Combine(::testin ::testing::Values(ElementType::f32), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(emptyAdditionalConfig())); @@ -612,7 +608,7 @@ const auto fullyConnectedParams2D_Brgemm_Amx_smoke = ::testing::Combine(::testin ::testing::Values(ElementType::f32), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(filterAdditionalConfig_BrgemmAmx())); @@ -650,7 +646,7 @@ const auto fullyConnectedParams2D_Brgemm_nightly = ::testing::Combine(::testing: ::testing::Values(ElementType::f32), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(filterAdditionalConfig_Brgemm())); @@ -665,7 +661,7 @@ const auto fullyConnectedParams2D_Brgemm_Amx_nightly = ::testing::Combine(::test ::testing::Values(ElementType::f32), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(filterAdditionalConfig_BrgemmAmx())); @@ -680,7 +676,7 @@ const auto testParams2DBF16_nightly = ::testing::Combine(::testing::Combine(::te ::testing::ValuesIn(netPRCs()), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(additionalConfig())), ::testing::Values(MatMulNodeType::FullyConnected), @@ -711,7 +707,7 @@ const auto fullyConnectedParams3DBF16_smoke = ::testing::Combine(::testing::Valu ::testing::ValuesIn(netPRCs()), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(additionalConfig())); @@ -726,7 +722,7 @@ const auto fullyConnectedParams3D_smoke = ::testing::Combine(::testing::ValuesIn ::testing::Values(ElementType::f32), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(emptyAdditionalConfig())); @@ -775,7 +771,7 @@ const auto fullyConnectedParams3D_nightly = ::testing::Combine(::testing::Values ::testing::Values(ElementType::f32), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(emptyAdditionalConfig())); @@ -783,7 +779,7 @@ const auto fullyConnectedParams3DBF16_nightly = ::testing::Combine(::testing::Va ::testing::ValuesIn(netPRCs()), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(additionalConfig())); @@ -800,6 +796,7 @@ const auto testParams3D_nightly = ::testing::Combine(fullyConnectedParams3D_nigh ::testing::ValuesIn(filterCPUInfo(filterSpecificParams()))); INSTANTIATE_TEST_SUITE_P(nightly_FC_3D, MatMulLayerCPUTest, testParams3D_nightly, MatMulLayerCPUTest::getTestCaseName); -} // namespace -} // namespace MatMul -} // namespace CPULayerTestsDefinitions \ No newline at end of file +} // namespace +} // namespace MatMul +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/mlas/matmul.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/mlas/matmul.cpp index 3be916593f0b7b..1d918f031f3696 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/mlas/matmul.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/mlas/matmul.cpp @@ -7,12 +7,10 @@ #include "test_utils/cpu_test_utils.hpp" #include "test_utils/fusing_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace MatMul { namespace { #ifdef OV_CPU_WITH_MLAS @@ -33,7 +31,7 @@ const auto testParams3D_MLAS_smoke = ::testing::Combine(::testing::Combine(::tes ::testing::Values(ElementType::f32), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(emptyAdditionalConfig())), ::testing::Values(MatMulNodeType::FullyConnected), @@ -48,7 +46,7 @@ const auto testParams2D_MLAS_nightly = ::testing::Combine(::testing::Combine(::t ::testing::Values(ElementType::f32), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(emptyAdditionalConfig())), ::testing::Values(MatMulNodeType::FullyConnected), @@ -67,7 +65,7 @@ const auto testParams2D_MLAS_smoke = ::testing::Combine(::testing::Combine(::tes ::testing::Values(ElementType::f32), ::testing::Values(ElementType::undefined), ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(emptyAdditionalConfig())), ::testing::Values(MatMulNodeType::FullyConnected), @@ -75,6 +73,7 @@ const auto testParams2D_MLAS_smoke = ::testing::Combine(::testing::Combine(::tes ::testing::ValuesIn(filterSpecificParams_MLAS())); INSTANTIATE_TEST_SUITE_P(smoke_FC_2D_MLAS, MatMulLayerCPUTest, testParams2D_MLAS_smoke, MatMulLayerCPUTest::getTestCaseName); #endif -} // namespace -} // namespace MatMul -} // namespace CPULayerTestsDefinitions \ No newline at end of file +} // namespace +} // namespace MatMul +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/mvn.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/mvn.cpp index 7e45470c395d48..e432dd5f0773cb 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/mvn.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/mvn.cpp @@ -6,16 +6,12 @@ #include "shared_test_classes/single_layer/mvn.hpp" #include "test_utils/cpu_test_utils.hpp" #include "test_utils/fusing_test_utils.hpp" -#include -#include +#include "common_test_utils/ov_tensor_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace MVN { namespace { @@ -179,6 +175,38 @@ const auto Mvn4DStatic = ::testing::Combine( INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Mvn4D_Static, MvnLayerCPUTest, Mvn4DStatic, MvnLayerCPUTest::getTestCaseName); +// test cases of tails process of block layout with f32 precision. +// could cover SSE41 code path on SSE41 platform(currrent bf16 cases are skipped on non-avx512 machine) +const std::vector& inputShapesStatic_4D_CTails() { + static const std::vector inputShapesStatic_4D = { + {1, 3, 2, 2}, + {1, 4, 5, 5}, + {1, 7, 2, 5}, + }; + return inputShapesStatic_4D; +} + +ov::AnyMap additionalConfigCTails = { + {ov::hint::inference_precision.name(), ov::element::f32} +}; + +const auto Mvn4DStaticCTails = ::testing::Combine( + ::testing::Combine( + ::testing::ValuesIn(static_shapes_to_test_representation(inputShapesStatic_4D_CTails())), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(emptyReductionAxes()), + ::testing::Values(false), + ::testing::ValuesIn(normalizeVariance), + ::testing::ValuesIn(epsilon())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D)), + ::testing::Values(emptyFusingSpec), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::f32), + ::testing::Values(additionalConfigCTails)); + +INSTANTIATE_TEST_SUITE_P(CompareWithRefs_Mvn4D_Static_CTails, MvnLayerCPUTest, Mvn4DStaticCTails, MvnLayerCPUTest::getTestCaseName); +// end + const auto Mvn5DStatic = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(inputShapesStatic_5D())), @@ -219,6 +247,7 @@ const auto MvnSmallSpatial = ::testing::Combine( INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_MvnSmallSpatial, MvnLayerCPUTest, MvnSmallSpatial, MvnLayerCPUTest::getTestCaseName); -} // namespace -} // namespace MVN -} // namespace CPULayerTestsDefinitions \ No newline at end of file +} // namespace +} // namespace MVN +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/pooling.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/pooling.cpp index 89406d9c6a43b1..15f43df6ed663d 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/pooling.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/pooling.cpp @@ -7,16 +7,11 @@ #include "test_utils/cpu_test_utils.hpp" #include "test_utils/filter_cpu_info.hpp" #include "test_utils/fusing_test_utils.hpp" -#include -#include -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace Pooling { namespace { @@ -27,16 +22,16 @@ const auto sse42 = CPUSpecificParams{{}, {}, {"jit_sse42"}, "jit_sse42"}; const std::vector vecCpuConfigs = {sse42, avx, avx512}; -const std::vector paramsMaxV84D_ref = { - LayerTestsDefinitions::maxPoolV8SpecificParams{ {2, 2}, {2, 2}, {2, 2}, {0, 0}, {0, 0}, - ngraph::element::Type_t::i32, 0, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER }, - LayerTestsDefinitions::maxPoolV8SpecificParams{ {4, 2}, {2, 2}, {1, 2}, {0, 0}, {0, 0}, - ngraph::element::Type_t::i32, 0, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT }, - LayerTestsDefinitions::maxPoolV8SpecificParams{ {4, 2}, {2, 1}, {2, 2}, {0, 0}, {0, 0}, - ngraph::element::Type_t::i32, 0, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT }, +const std::vector paramsMaxV84D_ref = { + maxPoolV8SpecificParams{ {2, 2}, {2, 2}, {2, 2}, {0, 0}, {0, 0}, + ov::element::Type_t::i32, 0, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER }, + maxPoolV8SpecificParams{ {4, 2}, {2, 2}, {1, 2}, {0, 0}, {0, 0}, + ov::element::Type_t::i32, 0, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT }, + maxPoolV8SpecificParams{ {4, 2}, {2, 1}, {2, 2}, {0, 0}, {0, 0}, + ov::element::Type_t::i32, 0, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT }, }; INSTANTIATE_TEST_SUITE_P(smoke_MaxPoolV8_CPU_4D_ref, MaxPoolingV8LayerCPUTest, @@ -144,6 +139,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_CPU_5D_I8, PoolingLayerCPUTest, ::testing::ValuesIn(filterCPUInfoForDevice(vecCpuConfigsFusing_5D)), ::testing::ValuesIn(fusingParamsSet)), PoolingLayerCPUTest::getTestCaseName); -} // namespace -} // namespace Pooling -} // namespace CPULayerTestsDefinitions \ No newline at end of file +} // namespace +} // namespace Pooling +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/random_uniform.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/random_uniform.cpp index 8fec42f382464d..bb6c6282c1fc9a 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/random_uniform.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/random_uniform.cpp @@ -5,9 +5,9 @@ #include "single_layer_tests/classes/random_uniform.hpp" using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace RandomUniform { static const std::vector output_prc_nightly = { @@ -42,5 +42,6 @@ INSTANTIATE_TEST_SUITE_P(nightly_Param, RandomUniformLayerTestCPU, ::testing::Values(empty_plugin_config)), RandomUniformLayerTestCPU::getTestCaseName); -} // namespace RandomUniform -} // namespace CPULayerTestsDefinitions +} // namespace RandomUniform +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/reduce.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/reduce.cpp index 4291894af8e68b..657adbae683995 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/reduce.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/reduce.cpp @@ -8,13 +8,10 @@ #include "test_utils/fusing_test_utils.hpp" #include "ov_lpt_models/common/builders.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace Reduce { namespace { @@ -100,15 +97,13 @@ std::vector cpuParams_NHWC_4D = { CPUSpecificParams({nhwc}, {nhwc}, {}, {}) }; -const std::vector reductionLogicalTypes = { - ngraph::helpers::ReductionType::LogicalOr, - ngraph::helpers::ReductionType::LogicalAnd -}; +const std::vector reductionLogicalTypes = {ov::test::utils::ReductionType::LogicalOr, + ov::test::utils::ReductionType::LogicalAnd}; -const std::vector reductionTypesFusing = { - ngraph::helpers::ReductionType::Mean, - ngraph::helpers::ReductionType::Max, - ngraph::helpers::ReductionType::L2, +const std::vector reductionTypesFusing = { + ov::test::utils::ReductionType::Mean, + ov::test::utils::ReductionType::Max, + ov::test::utils::ReductionType::L2, }; // This custom subgraph is used to test post-ops fusing case with U8/I8 precision on output, @@ -116,7 +111,7 @@ const std::vector reductionTypesFusing = { const auto fusingFakeQuantizeTranspose = fusingSpecificParams{std::make_shared(std::vector{ {[](postNodeConfig& cfg){ auto localPrc = cfg.input->get_element_type(); - ngraph::Shape newShape(cfg.input->get_output_partial_shape(0).size(), 1); + ov::Shape newShape(cfg.input->get_output_partial_shape(0).size(), 1); const auto fakeQuantize = ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape); std::vector order(newShape.size()); std::iota(order.begin(), order.end(), 0); @@ -138,7 +133,7 @@ const std::vector fusingParamsSet { }; // Exclude cases of fusingFakeQuantizePerChannelRelu, where FQ for non-1 channel fallbacks -// to decomposed ngraph reference implementation, so such fusing tests are N/A +// to decomposed reference implementation, so such fusing tests are N/A const std::vector fusingParamsSet_KeepNoDims { /* activations */ fusingSwish, @@ -619,6 +614,7 @@ INSTANTIATE_TEST_SUITE_P( ReduceCPULayerTest::getTestCaseName ); -} // namespace -} // namespace Reduce -} // namespace CPULayerTestsDefinitions \ No newline at end of file +} // namespace +} // namespace Reduce +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/scaled_attn.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/scaled_attn.cpp index a4b993c58a7117..9fe53cdbed6d46 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/scaled_attn.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/scaled_attn.cpp @@ -5,12 +5,10 @@ #include "single_layer_tests/classes/scaled_attn.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace ScaledAttn { const auto cpuSpec = CPUSpecificParams{{}, {}, {"ref_any"}, "ref_any"}; @@ -75,5 +73,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_ScaledAttn_CPU, params, ScaledAttnLayerCPUTest::getTestCaseName); -} // namespace ScaledAttn -} // namespace CPULayerTestsDefinitions \ No newline at end of file +} // namespace ScaledAttn +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/transpose.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/transpose.cpp index e577a8f7a5e6bd..db5f673a6a2eea 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/transpose.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/instances/x64/transpose.cpp @@ -6,16 +6,13 @@ #include "shared_test_classes/single_layer/transpose.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace Transpose { namespace { -std::map additional_config; +ov::AnyMap additional_config; const auto cpuParams_ndhwc = CPUSpecificParams {{ndhwc}, {}, {}, {}}; const auto cpuParams_ncdhw = CPUSpecificParams {{ncdhw}, {}, {}, {}}; @@ -27,10 +24,10 @@ const auto cpuParams_nChw8c = CPUSpecificParams {{nChw8c}, {}, {}, {}}; const auto cpuParams_nCdhw8c = CPUSpecificParams {{nCdhw8c}, {}, {}, {}}; const auto cpuParams_nspc = CPUSpecificParams {{acdb}, {}, {}, {}}; -const std::vector netPrecisions = { - Precision::I8, - Precision::BF16, - Precision::FP32 +const std::vector netPrecisions = { + ov::element::i8, + ov::element::bf16, + ov::element::f32 }; const std::vector CPUParams4D_blocked = { @@ -62,7 +59,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamicShapes4D_Transpose, TransposeLayerCPUTest, ::testing::Combine( ::testing::ValuesIn(dynamicInputShapes4D()), ::testing::ValuesIn(inputOrder4D()), - ::testing::Values(Precision::BF16), + ::testing::Values(ov::element::bf16), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(additional_config), ::testing::ValuesIn({CPUSpecificParams{}, cpuParams_nspc})), @@ -142,7 +139,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamicShapes5D_Transpose, TransposeLayerCPUTest, ::testing::Combine( ::testing::ValuesIn(dynamicInputShapes5D), ::testing::ValuesIn(inputOrder5D), - ::testing::Values(Precision::BF16), + ::testing::Values(ov::element::bf16), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(additional_config), ::testing::Values(CPUSpecificParams{})), @@ -177,6 +174,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamicShapes5D_PermutePerChannels, TransposeLaye ::testing::Values(additional_config), ::testing::Values(CPUSpecificParams{})), TransposeLayerCPUTest::getTestCaseName); -} // namespace -} // namespace Transpose -} // namespace CPULayerTestsDefinitions \ No newline at end of file +} // namespace +} // namespace Transpose +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/interpolate.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/interpolate.cpp index bcf1bb5cc1a2e7..123cf64e3979e8 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/interpolate.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/interpolate.cpp @@ -2,41 +2,40 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "common_test_utils/ov_tensor_utils.hpp" +#include "openvino/core/preprocess/pre_post_process.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include "test_utils/fusing_test_utils.hpp" -#include -#include "openvino/core/preprocess/pre_post_process.hpp" -#include +#include "transformations/op_conversions/convert_interpolate11_downgrade.hpp" -using namespace ov::test; using namespace CPUTestUtils; -using ngraph::helpers::operator<<; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -using InterpolateSpecificParams = std::tuple, // PadBegin - std::vector, // PadEnd - double>; // Cube coef +using InterpolateSpecificParams = + std::tuple, // PadBegin + std::vector, // PadEnd + double>; // Cube coef -using ShapeParams = std::tuple>, // scales or sizes values - std::vector>; // axes + ov::test::utils::InputLayerType, // input type + std::vector>, // scales or sizes values + std::vector>; // axes using InterpolateLayerCPUTestParamsSet = std::tuple>; + ov::AnyMap>; class InterpolateLayerCPUTest : public testing::WithParamInterface, virtual public SubgraphBaseTest, public CpuTestWithFusing { @@ -47,7 +46,7 @@ class InterpolateLayerCPUTest : public testing::WithParamInterface additionalConfig; + ov::AnyMap additionalConfig; std::tie(specificParams, shapeParams, prec, cpuParams, fusingParams, additionalConfig) = obj.param; ov::op::v11::Interpolate::InterpolateMode mode; @@ -61,11 +60,12 @@ class InterpolateLayerCPUTest : public testing::WithParamInterface> shapeDataForInput; std::vector axes; std::tie(shapeCalcMode, inputShapes, shapeInputType, shapeDataForInput, axes) = shapeParams; + using ov::test::utils::operator<<; std::ostringstream result; result << "ShapeCalcMode=" << shapeCalcMode << "_"; result << "IS="; @@ -99,7 +99,7 @@ class InterpolateLayerCPUTest : public testing::WithParamInterface filterCPUInfoForDevice3D() { return resCPUParams; } -std::vector> filterAdditionalConfig3D() { +std::vector filterAdditionalConfig3D() { return { {} }; @@ -362,14 +364,14 @@ const std::vector shapeParams3D = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1}, {{1, 3, 4}, {2, 4, 6}, {1, 3, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1.f, 1.f, 1.25f}, {1.f, 1.f, 1.25f}, {1.f, 1.f, 1.5f}}, defaultAxes3D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1}, {{1, 3, 4}, {2, 4, 6}, {1, 3, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1, 3, 6}, {2, 4, 8}, {1, 3, 6}}, defaultAxes3D.front() } @@ -414,7 +416,7 @@ INSTANTIATE_TEST_SUITE_P(InterpolateNN_Layout_Test_3D, InterpolateLayerCPUTest, #if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64) const std::vector interpolateFusingParamsSet3D_fixed_C() { std::vector fuseParams; - if (InferenceEngine::with_cpu_x86_avx2()) { + if (ov::with_cpu_x86_avx2()) { fuseParams.push_back(fusingFakeQuantizePerChannelRelu); fuseParams.push_back(fusingMultiplyPerChannel); } @@ -426,14 +428,14 @@ const std::vector shapeParams3D_fixed_C = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{}, {{1, 3, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.f, 1.f, 1.25f}}, defaultAxes3D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, 3, -1}, {{1, 3, 4}, {1, 3, 6}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1, 3, 8}}, defaultAxes3D.front() } @@ -539,14 +541,14 @@ INSTANTIATE_TEST_SUITE_P(InterpolateCubic_Layout3D_Test, InterpolateLayerCPUTest // 4D std::vector filterCPUInfoForDevice() { std::vector resCPUParams; - if (InferenceEngine::with_cpu_x86_avx512f()) { + if (ov::with_cpu_x86_avx512f()) { resCPUParams.push_back(CPUSpecificParams{{nChw16c, x, x, x}, {nChw16c}, {"jit_avx512"}, "jit_avx512"}); resCPUParams.push_back(CPUSpecificParams{{nhwc, x, x, x}, {nhwc}, {"jit_avx512"}, "jit_avx512"}); - } else if (InferenceEngine::with_cpu_x86_avx2()) { + } else if (ov::with_cpu_x86_avx2()) { resCPUParams.push_back(CPUSpecificParams{{nChw8c, x, x, x}, {nChw8c}, {"jit_avx2"}, "jit_avx2"}); resCPUParams.push_back(CPUSpecificParams{{nhwc, x, x, x}, {nhwc}, {"jit_avx2"}, "jit_avx2"}); resCPUParams.push_back(CPUSpecificParams{{nchw, x, x, x}, {nchw}, {"jit_avx2"}, "jit_avx2"}); - } else if (InferenceEngine::with_cpu_x86_sse42()) { + } else if (ov::with_cpu_x86_sse42()) { resCPUParams.push_back(CPUSpecificParams{{nChw8c, x, x, x}, {nChw8c}, {"jit_sse42"}, "jit_sse42"}); resCPUParams.push_back(CPUSpecificParams{{nhwc, x, x, x}, {nhwc}, {"jit_sse42"}, "jit_sse42"}); } else { @@ -568,28 +570,28 @@ const std::vector shapeParams4D_Smoke = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{}, {{1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.f, 1.f, 1.25f, 1.5f}}, defaultAxes4D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{}, {{1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1, 11, 5, 6}}, defaultAxes4D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1.f, 1.f, 1.25f, 1.5f}, {1.f, 1.f, 1.25f, 1.25f}, {1.f, 1.f, 1.25f, 1.5f}}, defaultAxes4D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1, 11, 6, 7}, {2, 7, 8, 7}, {1, 11, 6, 7}}, defaultAxes4D.front() } @@ -599,14 +601,14 @@ const std::vector shapeParams4D_Full = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.f, 1.f, 1.25f, 1.5f}}, defaultAxes4D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {1, 11, 5, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1, 11, 5, 6}}, defaultAxes4D.front() } @@ -660,14 +662,14 @@ const std::vector shapeParams4D_fixed_C = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{}, {{1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.f, 1.f, 1.25f, 1.5f}}, defaultAxes4D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, 16, -1, -1}, {{1, 16, 4, 4}, {1, 16, 6, 5}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1, 16, 6, 7}}, defaultAxes4D.front() } @@ -811,14 +813,14 @@ INSTANTIATE_TEST_SUITE_P(InterpolateCubic_Layout_Test, InterpolateLayerCPUTest, ////////////////////////5D///////////////////////////// std::vector filterCPUInfoForDevice5D() { std::vector resCPUParams; - if (InferenceEngine::with_cpu_x86_avx512f()) { + if (ov::with_cpu_x86_avx512f()) { resCPUParams.push_back(CPUSpecificParams{{nCdhw16c, x, x, x}, {nCdhw16c}, {"jit_avx512"}, "jit_avx512"}); resCPUParams.push_back(CPUSpecificParams{{ndhwc, x, x, x}, {ndhwc}, {"jit_avx512"}, "jit_avx512"}); - } else if (InferenceEngine::with_cpu_x86_avx2()) { + } else if (ov::with_cpu_x86_avx2()) { resCPUParams.push_back(CPUSpecificParams{{nCdhw8c, x, x, x}, {nCdhw8c}, {"jit_avx2"}, "jit_avx2"}); resCPUParams.push_back(CPUSpecificParams{{ndhwc, x, x, x}, {ndhwc}, {"jit_avx2"}, "jit_avx2"}); resCPUParams.push_back(CPUSpecificParams{{ncdhw, x, x, x}, {ncdhw}, {"jit_avx2"}, "jit_avx2"}); - } else if (InferenceEngine::with_cpu_x86_sse42()) { + } else if (ov::with_cpu_x86_sse42()) { resCPUParams.push_back(CPUSpecificParams{{nCdhw8c, x, x, x}, {nCdhw8c}, {"jit_sse42"}, "jit_sse42"}); resCPUParams.push_back(CPUSpecificParams{{ndhwc, x, x, x}, {ndhwc}, {"jit_sse42"}, "jit_sse42"}); } else { @@ -839,28 +841,28 @@ const std::vector shapeParams5D_Smoke = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{}, {{1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.f, 1.f, 1.25f, 1.5f, 0.5f}}, defaultAxes5D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{}, {{1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1, 11, 5, 6, 2}}, defaultAxes5D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {2, 7, 6, 5, 8}, {1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1.f, 1.f, 1.25f, 1.5f, 0.5f}, {1.f, 1.f, 1.25f, 1.25f, 1.25f}, {1.f, 1.f, 1.25f, 1.5f, 0.5f}}, defaultAxes5D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {2, 7, 6, 5, 8}, {1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1, 11, 6, 7, 2}, {2, 7, 8, 7, 4}, {1, 11, 6, 7, 2}}, defaultAxes5D.front() }, @@ -870,14 +872,14 @@ const std::vector shapeParams5D_Full = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {2, 7, 6, 5, 8}, {1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.f, 1.f, 1.25f, 1.5f, 0.5f}}, defaultAxes5D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {1, 11, 5, 5, 8}, {1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1, 11, 5, 6, 4}}, defaultAxes5D.front() } @@ -963,14 +965,14 @@ const std::vector shapeParams4D_corner = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{1, 11, 4, 4}, {{1, 11, 4, 4}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1.f, 1.f, 1.25f, 1.5f}, {1.f, 1.f, 1.25f, 1.25f}}, defaultAxes4D.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{1, 11, 4, 4}, {{1, 11, 4, 4}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1, 11, 6, 7}, {1, 11, 8, 7}}, defaultAxes4D.front() } @@ -1016,56 +1018,56 @@ const std::vector shapeParams4D_Pillow_Smoke = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{}, {{1, 3, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{2.0f, 4.0f}}, defaultAxes4D_pillow.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{}, {{2, 4, 16, 16}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{0.25f, 0.5f}}, defaultAxes4D_pillow.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{}, {{1, 3, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{5, 6}}, defaultAxes4D_pillow.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{}, {{2, 4, 16, 16}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{2, 8}}, defaultAxes4D_pillow.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1.25f, 1.5f}, {0.5f, 0.75f}, {1.25f, 1.5f}}, defaultAxes4D_pillow.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.25f, 0.75f}}, defaultAxes4D_pillow.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 17, 4, 4}, {2, 3, 10, 12}, {1, 17, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{6, 8}, {5, 4}, {6, 8}}, defaultAxes4D_pillow.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 17, 4, 4}, {2, 3, 10, 12}, {1, 17, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{6, 8}}, defaultAxes4D_pillow.front() }, @@ -1073,7 +1075,7 @@ const std::vector shapeParams4D_Pillow_Smoke = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 17, 4, 4}, {2, 3, 10, 12}, {1, 17, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{4, 4}, {10, 20}, {10, 4}}, defaultAxes4D_pillow.front() } @@ -1081,20 +1083,18 @@ const std::vector shapeParams4D_Pillow_Smoke = { std::vector filterCPUInfoForDevice_pillow() { std::vector resCPUParams; - if (InferenceEngine::with_cpu_x86_avx512f()) { + if (ov::with_cpu_x86_avx512f()) { resCPUParams.push_back(CPUSpecificParams{{nhwc, x, x}, {nhwc}, {"jit_avx512"}, "jit_avx512"}); - } else if (InferenceEngine::with_cpu_x86_avx2()) { + } else if (ov::with_cpu_x86_avx2()) { resCPUParams.push_back(CPUSpecificParams{{nhwc, x, x}, {nhwc}, {"jit_avx2"}, "jit_avx2"}); - } else if (InferenceEngine::with_cpu_x86_sse42()) { + } else if (ov::with_cpu_x86_sse42()) { resCPUParams.push_back(CPUSpecificParams{{nhwc, x, x}, {nhwc}, {"jit_sse42"}, "jit_sse42"}); } resCPUParams.push_back(CPUSpecificParams{{nchw, x, x}, {nchw}, {"ref"}, "ref"}); return resCPUParams; } -std::vector> filterPillowAdditionalConfig() { - return { - {{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::NO}} - }; +std::vector filterPillowAdditionalConfig() { + return {{}}; } const auto interpolateCasesBilinearPillow_Smoke = ::testing::Combine( @@ -1144,28 +1144,28 @@ const std::vector shapeParams4D_Pillow_Smoke_nchw_as_nhwc = { ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{}, {{1, 4, 4, 3}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{2.0f, 4.0f}}, defaultAxes4D_pillow_nchw_as_nhwc.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{}, {{2, 16, 16, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{2, 8}}, defaultAxes4D_pillow_nchw_as_nhwc.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, -1, -1, {2, 20}}, {{1, 4, 4, 11}, {2, 6, 5, 7}, {1, 4, 4, 11}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.25f, 0.75f}}, defaultAxes4D_pillow_nchw_as_nhwc.front() }, ShapeParams{ ov::op::v11::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, -1, -1, {2, 20}}, {{1, 4, 4, 17}, {2, 10, 12, 3}, {1, 4, 4, 17}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{6, 8}}, defaultAxes4D_pillow_nchw_as_nhwc.front() } @@ -1177,11 +1177,11 @@ const std::vector> pads4D_nchw_as_nhwc = { std::vector filterCPUInfoForDevice_pillow_nchw_as_nhwc() { std::vector resCPUParams; - if (InferenceEngine::with_cpu_x86_avx512f()) { + if (ov::with_cpu_x86_avx512f()) { resCPUParams.push_back(CPUSpecificParams{{nchw, x, x}, {nchw}, {"jit_avx512"}, "jit_avx512"}); - } else if (InferenceEngine::with_cpu_x86_avx2()) { + } else if (ov::with_cpu_x86_avx2()) { resCPUParams.push_back(CPUSpecificParams{{nchw, x, x}, {nchw}, {"jit_avx2"}, "jit_avx2"}); - } else if (InferenceEngine::with_cpu_x86_sse42()) { + } else if (ov::with_cpu_x86_sse42()) { resCPUParams.push_back(CPUSpecificParams{{nchw, x, x}, {nchw}, {"jit_sse42"}, "jit_sse42"}); } return resCPUParams; @@ -1225,5 +1225,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_InterpolateBicubicPillow_LayoutAlign_Test, Interp ::testing::ValuesIn(filterPillowAdditionalConfig())), InterpolateLayerCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/log_softmax.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/log_softmax.cpp index 4ba51e71dec1e2..8a5fe083cb1537 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/log_softmax.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/log_softmax.cpp @@ -3,20 +3,16 @@ // #include "test_utils/cpu_test_utils.hpp" -#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -using logSoftmaxLayerTestParams = std::tuple< - std::vector, // inputShape - Precision, // netPrecision - int64_t>; // axis +using logSoftmaxLayerTestParams = std::tuple, // inputShape + ov::element::Type, // netPrecision + int64_t>; // axis class LogSoftmaxLayerCPUTest : public testing::WithParamInterface, @@ -25,7 +21,7 @@ class LogSoftmaxLayerCPUTest public: static std::string getTestCaseName(testing::TestParamInfo obj) { std::vector inputShapes; - Precision netPrecision; + ov::element::Type netPrecision; int64_t axis; std::tie(inputShapes, netPrecision, axis) = obj.param; @@ -44,7 +40,7 @@ class LogSoftmaxLayerCPUTest result << ov::test::utils::vec2str(item) << "_"; } } - result << "netPRC=" << netPrecision.name(); + result << "netPRC=" << netPrecision.to_string(); result << "Axis=" << axis; return result.str(); } @@ -54,20 +50,20 @@ class LogSoftmaxLayerCPUTest targetDevice = ov::test::utils::DEVICE_CPU; std::vector inputShapes; - Precision netPrecision; + ov::element::Type netPrecision; int64_t axis; std::tie(inputShapes, netPrecision, axis) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + auto ngPrc = netPrecision; inType = outType = ngPrc; - selectedType = std::string("unknown_") + netPrecision.name(); + selectedType = std::string("unknown_") + netPrecision.to_string(); init_input_shapes(inputShapes); ov::ParameterVector params{std::make_shared(ngPrc, inputDynamicShapes.front())}; - const auto logSoftmax = std::make_shared(params[0], axis); - const ngraph::ResultVector results{std::make_shared(logSoftmax)}; - function = std::make_shared(results, params, "logSoftmax"); + const auto logSoftmax = std::make_shared(params[0], axis); + const ov::ResultVector results{std::make_shared(logSoftmax)}; + function = std::make_shared(results, params, "logSoftmax"); } }; @@ -77,8 +73,8 @@ TEST_P(LogSoftmaxLayerCPUTest, CompareWithRefs) { } namespace { -const std::vector netPrecisions = { - Precision::FP32 +const std::vector netPrecisions = { + ov::element::f32 }; const std::vector> inputShapes2D = { @@ -120,4 +116,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_LogSoftmax4D_dynamic, LogSoftmaxLayerCPUTest, par LogSoftmaxLayerCPUTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/logical.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/logical.cpp index 699d4ef1ab7504..1a333305cb6d1e 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/logical.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/logical.cpp @@ -2,33 +2,32 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "ov_models/builders.hpp" -#include "test_utils/cpu_test_utils.hpp" +#include "shared_test_classes/single_op/logical.hpp" + +#include "common_test_utils/node_builders/logical.hpp" #include "common_test_utils/ov_tensor_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::helpers; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -typedef std::tuple< - LayerTestsDefinitions::LogicalTestParams, - CPUSpecificParams> -LogicalLayerCPUTestParamSet; +typedef std::tuple LogicalLayerCPUTestParamSet; class LogicalLayerCPUTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon, public CPUTestsBase { + virtual public ov::test::SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - LayerTestsDefinitions::LogicalTestParams basicParamsSet; + ov::test::LogicalTestParams basicParamsSet; CPUSpecificParams cpuParams; std::tie(basicParamsSet, cpuParams) = obj.param; std::ostringstream result; - result << LayerTestsDefinitions::LogicalLayerTest::getTestCaseName(testing::TestParamInfo( - basicParamsSet, 0)); + result << ov::test::LogicalLayerTest::getTestCaseName( + testing::TestParamInfo(basicParamsSet, 0)); result << CPUTestsBase::getTestCaseName(cpuParams); @@ -37,125 +36,132 @@ class LogicalLayerCPUTest : public testing::WithParamInterfaceGetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; - LayerTestsDefinitions::LogicalParams::InputShapesTuple inputShapes; - ngraph::helpers::LogicalTypes logicalOpType; - ngraph::helpers::InputLayerType secondInputType; - InferenceEngine::Precision netPrecision; + std::vector inputShapes; + ov::test::utils::LogicalTypes logicalOpType; + ov::test::utils::InputLayerType secondInputType; + ov::element::Type netPrecision; std::string targetName; std::map additional_config; - std::tie(inputShapes, logicalOpType, secondInputType, netPrecision, inPrc, outPrc, - inLayout, outLayout, targetDevice, additional_config) = basicParamsSet; + std::tie(inputShapes, logicalOpType, secondInputType, netPrecision, targetDevice, additional_config) = + basicParamsSet; + init_input_shapes(inputShapes); - selectedType = getPrimitiveType() + "_" + inPrc.name(); + selectedType = getPrimitiveType() + "_" + ov::element::Type(inType).get_type_name(); - auto ngInputsPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(Precision::BOOL); // Because ngraph supports only boolean input for logical ops + auto ngInputsPrc = ov::element::boolean; // Because ngraph supports only boolean input for logical ops configuration.insert(additional_config.begin(), additional_config.end()); - ov::ParameterVector inputs{std::make_shared(ngInputsPrc, ov::Shape(inputShapes.first))}; - std::shared_ptr logicalNode; - if (logicalOpType != ngraph::helpers::LogicalTypes::LOGICAL_NOT) { + ov::ParameterVector inputs{std::make_shared(ngInputsPrc, inputDynamicShapes[0])}; + std::shared_ptr logicalNode; + if (logicalOpType != ov::test::utils::LogicalTypes::LOGICAL_NOT) { std::shared_ptr secondInput; - if (secondInputType == ngraph::helpers::InputLayerType::PARAMETER) { - auto param = std::make_shared(ngInputsPrc, ov::Shape(inputShapes.second)); + if (secondInputType == ov::test::utils::InputLayerType::PARAMETER) { + auto param = std::make_shared(ngInputsPrc, inputDynamicShapes[1]); secondInput = param; inputs.push_back(param); } else { - auto tensor = ov::test::utils::create_and_fill_tensor(ngInputsPrc, ov::Shape(inputShapes.second)); + auto tensor = ov::test::utils::create_and_fill_tensor(ngInputsPrc, targetStaticShapes[0][1]); secondInput = std::make_shared(tensor); } - logicalNode = ngraph::builder::makeLogical(inputs[0], secondInput, logicalOpType); + logicalNode = ov::test::utils::make_logical(inputs[0], secondInput, logicalOpType); } else { - logicalNode = ngraph::builder::makeLogical(inputs[0], ngraph::Output(), logicalOpType); + logicalNode = ov::test::utils::make_logical(inputs[0], ov::Output(), logicalOpType); } logicalNode->get_rt_info() = getCPUInfo(); - function = std::make_shared(logicalNode, inputs, "Logical"); + function = std::make_shared(logicalNode, inputs, "Logical"); } }; TEST_P(LogicalLayerCPUTest, CompareWithRefs) { - Run(); - CheckPluginRelatedResults(executableNetwork, "Eltwise"); + run(); + CheckPluginRelatedResults(compiledModel, "Eltwise"); } namespace { -std::map, std::vector>> inputShapes = { - {{1}, {{1}, {17}, {1, 1}, {2, 18}, {1, 1, 2}, {2, 2, 3}, {1, 1, 2, 3}}}, - {{5}, {{1}, {1, 1}, {2, 5}, {1, 1, 1}, {2, 2, 5}}}, - {{2, 200}, {{1}, {200}, {1, 200}, {2, 200}, {2, 2, 200}}}, - {{1, 3, 20}, {{20}, {2, 1, 1}}}, - {{2, 17, 3, 4}, {{4}, {1, 3, 4}, {2, 1, 3, 4}}}, - {{2, 1, 1, 3, 1}, {{1}, {1, 3, 4}, {2, 1, 3, 4}, {1, 1, 1, 1, 1}}}, -}; - -std::map, std::vector>> inputShapesNot = { - {{1}, {}}, - {{5}, {}}, - {{2, 200}, {}}, - {{1, 3, 20}, {}}, - {{2, 17, 3, 4}, {}}, - {{2, 1, 1, 3, 1}, {}}, +std::map> inputShapes = { + {{1}, {{1}, {17}, {1, 1}, {2, 18}, {1, 1, 2}, {2, 2, 3}, {1, 1, 2, 3}}}, + {{5}, {{1}, {1, 1}, {2, 5}, {1, 1, 1}, {2, 2, 5}}}, + {{2, 200}, {{1}, {200}, {1, 200}, {2, 200}, {2, 2, 200}}}, + {{1, 3, 20}, {{20}, {2, 1, 1}}}, + {{2, 17, 3, 4}, {{4}, {1, 3, 4}, {2, 1, 3, 4}}}, + {{2, 1, 1, 3, 1}, {{1}, {1, 3, 4}, {2, 1, 3, 4}, {1, 1, 1, 1, 1}}}, }; -std::vector inputsPrecisions = { - InferenceEngine::Precision::BOOL, +std::map> inputShapesNot = { + {{1}, {}}, + {{5}, {}}, + {{2, 200}, {}}, + {{1, 3, 20}, {}}, + {{2, 17, 3, 4}, {}}, + {{2, 1, 1, 3, 1}, {}}, }; -std::vector logicalOpTypes = { - ngraph::helpers::LogicalTypes::LOGICAL_AND, - ngraph::helpers::LogicalTypes::LOGICAL_OR, - ngraph::helpers::LogicalTypes::LOGICAL_XOR, +std::vector logicalOpTypes = { + ov::test::utils::LogicalTypes::LOGICAL_AND, + ov::test::utils::LogicalTypes::LOGICAL_OR, + ov::test::utils::LogicalTypes::LOGICAL_XOR, }; -std::vector secondInputTypes = { - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER, +std::vector secondInputTypes = { + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER, }; std::map additional_config; -std::vector bf16InpOutPrc = {Precision::BF16, Precision::FP32}; +std::vector> combine_shapes( + const std::map>& input_shapes_static) { + std::vector> result; + for (const auto& input_shape : input_shapes_static) { + for (auto& item : input_shape.second) { + result.push_back({input_shape.first, item}); + } + + if (input_shape.second.empty()) { + result.push_back({input_shape.first, {}}); + } + } + return result; +} const auto LogicalTestParams = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(LayerTestsDefinitions::LogicalLayerTest::combineShapes(inputShapes)), - ::testing::ValuesIn(logicalOpTypes), - ::testing::ValuesIn(secondInputTypes), - ::testing::Values(Precision::BF16), - ::testing::ValuesIn(bf16InpOutPrc), - ::testing::ValuesIn(bf16InpOutPrc), - ::testing::Values(Layout::ANY), - ::testing::Values(Layout::ANY), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(additional_config)), - ::testing::Values(emptyCPUSpec)); + ::testing::Combine(::testing::ValuesIn(ov::test::static_shapes_to_test_representation(combine_shapes(inputShapes))), + ::testing::ValuesIn(logicalOpTypes), + ::testing::ValuesIn(secondInputTypes), + ::testing::Values(ov::element::bf16), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(additional_config)), + ::testing::Values(emptyCPUSpec)); const auto LogicalTestParamsNot = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(LayerTestsDefinitions::LogicalLayerTest::combineShapes(inputShapesNot)), - ::testing::Values(ngraph::helpers::LogicalTypes::LOGICAL_NOT), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::Values(Precision::BF16), - ::testing::ValuesIn(bf16InpOutPrc), - ::testing::ValuesIn(bf16InpOutPrc), - ::testing::Values(Layout::ANY), - ::testing::Values(Layout::ANY), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(additional_config)), - ::testing::Values(emptyCPUSpec)); - - -INSTANTIATE_TEST_SUITE_P(smoke_Logical_Eltwise_CPU_BF16, LogicalLayerCPUTest, LogicalTestParams, LogicalLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Logical_Not_Eltwise_CPU_BF16, LogicalLayerCPUTest, LogicalTestParamsNot, LogicalLayerCPUTest::getTestCaseName); - -} // namespace -} // namespace CPULayerTestsDefinitions + ::testing::Combine( + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(combine_shapes(inputShapesNot))), + ::testing::Values(ov::test::utils::LogicalTypes::LOGICAL_NOT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::Values(ov::element::bf16), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(additional_config)), + ::testing::Values(emptyCPUSpec)); + +INSTANTIATE_TEST_SUITE_P(smoke_Logical_Eltwise_CPU_BF16, + LogicalLayerCPUTest, + LogicalTestParams, + LogicalLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Logical_Not_Eltwise_CPU_BF16, + LogicalLayerCPUTest, + LogicalTestParamsNot, + LogicalLayerCPUTest::getTestCaseName); + +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/loop.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/loop.cpp index 00d887206696ae..5073d8f96624fb 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/loop.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/loop.cpp @@ -2,17 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "shared_test_classes/single_layer/loop.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "ov_models/builders.hpp" -#include +#include "common_test_utils/ov_tensor_utils.hpp" -using namespace InferenceEngine; -using namespace ov; -using namespace test; -using namespace ngraph::helpers; +using namespace ov::test::utils; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { enum LOOP_IN_TYPE { INVARIANT, @@ -68,8 +66,10 @@ class LoopLayerCPUTest : public testing::WithParamInterface, size_t i = 0; if (funcInputs[i].get_node_shared_ptr()->get_friendly_name() == "trip_count") { const auto& funcInput = funcInputs[i]; - ov::Tensor tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), - funcInput.get_shape(), 10, 1); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 1; + in_data.range = 10; + ov::Tensor tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), funcInput.get_shape(), in_data); inputs.insert({funcInput.get_node_shared_ptr(), tensor}); i++; } @@ -77,8 +77,11 @@ class LoopLayerCPUTest : public testing::WithParamInterface, // parameters for body for (; i < funcInputs.size(); ++i) { const auto& funcInput = funcInputs[i]; - ov::Tensor tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), - targetInputStaticShapes[i], 15, 0, 32768); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 15; + in_data.resolution = 32768; + ov::Tensor tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], in_data); inputs.insert({funcInput.get_node_shared_ptr(), tensor}); } } @@ -101,39 +104,39 @@ class LoopLayerCPUTest : public testing::WithParamInterface, } // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - const std::vector body_params_shapes(shapes.size(), ngraph::PartialShape::dynamic()); - ngraph::ParameterVector body_params; + const std::vector body_params_shapes(shapes.size(), ov::PartialShape::dynamic()); + ov::ParameterVector body_params; for (const auto &pshape : body_params_shapes) { - body_params.emplace_back(std::make_shared(netType, pshape)); + body_params.emplace_back(std::make_shared(netType, pshape)); } - auto body_condition_const = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, true); - auto exec_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, exec_cond); - std::shared_ptr trip_count_input; + auto body_condition_const = std::make_shared(ov::element::boolean, ov::Shape{1}, true); + auto exec_condition = std::make_shared(ov::element::boolean, ov::Shape{1}, exec_cond); + std::shared_ptr trip_count_input; int shift = 0; if (trip_count_type == InputLayerType::PARAMETER) { for (auto& target : targetStaticShapes) - target.insert(target.begin(), ngraph::Shape{}); - trip_count_input = std::make_shared(ngraph::element::i64, ngraph::Shape{1}); + target.insert(target.begin(), ov::Shape{}); + trip_count_input = std::make_shared(ov::element::i64, ov::Shape{1}); trip_count_input->set_friendly_name("trip_count"); - params.insert(params.begin(), ov::as_type_ptr(trip_count_input)); + params.insert(params.begin(), ov::as_type_ptr(trip_count_input)); shift++; } else { - trip_count_input = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, trip_count); + trip_count_input = std::make_shared(ov::element::i64, ov::Shape{1}, trip_count); } // Body - std::shared_ptr Zo = body_params[0]; + std::shared_ptr Zo = body_params[0]; for (size_t i = 1; i < body_params.size(); ++i) { - Zo = std::make_shared(body_params[i], Zo); + Zo = std::make_shared(body_params[i], Zo); } - auto body = std::make_shared(ngraph::OutputVector{body_condition_const, Zo}, + auto body = std::make_shared(ov::OutputVector{body_condition_const, Zo}, body_params); - auto loop = std::make_shared(trip_count_input, exec_condition); + auto loop = std::make_shared(trip_count_input, exec_condition); loop->set_function(body); - loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0}); for (size_t i = 0; i < body_params.size(); ++i) { if (types[i] == LOOP_IN_TYPE::INVARIANT) { @@ -152,10 +155,10 @@ class LoopLayerCPUTest : public testing::WithParamInterface, // start=0, stride=1, part_size=1, end=-1, axis=1 auto out2 = loop->get_concatenated_slices(Zo, 0, 1, 1, -1, 1); - auto result0 = std::make_shared(out0); - auto result1 = std::make_shared(out1); - auto result2 = std::make_shared(out2); - function = std::make_shared(ngraph::ResultVector{result0, result1, result2}, params, "loop"); + auto result0 = std::make_shared(out0); + auto result1 = std::make_shared(out1); + auto result2 = std::make_shared(out2); + function = std::make_shared(ov::ResultVector{result0, result1, result2}, params, "loop"); } }; @@ -177,45 +180,45 @@ class LoopWhileLayerCPUTest : public LoopLayerCPUTest { targetDevice = ov::test::utils::DEVICE_CPU; init_input_shapes(shapes); for (auto& target : targetStaticShapes) - target.insert(target.begin(), ngraph::Shape{}); + target.insert(target.begin(), ov::Shape{}); ov::ParameterVector params; for (auto&& shape : inputDynamicShapes) { params.push_back(std::make_shared(inType, shape)); } // Body parameters - const std::vector body_params_shapes(shapes.size(), ngraph::PartialShape::dynamic()); - ngraph::ParameterVector body_params = { std::make_shared(ngraph::element::i64, ngraph::Shape{}) }; + const std::vector body_params_shapes(shapes.size(), ov::PartialShape::dynamic()); + ov::ParameterVector body_params = { std::make_shared(ov::element::i64, ov::Shape{}) }; for (const auto &pshape : body_params_shapes) { - body_params.emplace_back(std::make_shared(inType, pshape)); + body_params.emplace_back(std::make_shared(inType, pshape)); } - auto exec_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{}, exec_cond); - auto trip_count_input = std::make_shared(ngraph::element::i64, ngraph::Shape{}); + auto exec_condition = std::make_shared(ov::element::boolean, ov::Shape{}, exec_cond); + auto trip_count_input = std::make_shared(ov::element::i64, ov::Shape{}); trip_count_input->set_friendly_name("trip_count"); params.insert(params.begin(), trip_count_input); // Body - auto const_body_cond = std::make_shared(ngraph::element::i64, ngraph::Shape{}, 10); - auto const_body_step = std::make_shared(ngraph::element::i64, ngraph::Shape{}, 2); - auto less = std::make_shared(body_params[0], const_body_cond); - auto exec_idx = std::make_shared(body_params[0], const_body_step); + auto const_body_cond = std::make_shared(ov::element::i64, ov::Shape{}, 10); + auto const_body_step = std::make_shared(ov::element::i64, ov::Shape{}, 2); + auto less = std::make_shared(body_params[0], const_body_cond); + auto exec_idx = std::make_shared(body_params[0], const_body_step); - auto node_const = std::make_shared(inType, ngraph::Shape{}, 2); - auto node = std::make_shared(body_params[1], node_const); + auto node_const = std::make_shared(inType, ov::Shape{}, 2); + auto node = std::make_shared(body_params[1], node_const); - // reference ngraph function is resized by input static shapes in tests but + // reference model is resized by input static shapes in tests but // loop with pad in body has different input shape in each infer request so tests don't support it. // Alternative - eltwise instead of pad // const std::vector begin(inputDynamicShapes[0].rank().get_length(), 1); // const std::vector end(inputDynamicShapes[0].rank().get_length(), 0); // auto node = ngraph::builder::makePad(body_params[1], begin, end, .0f, PadMode::CONSTANT); - auto body = std::make_shared(ngraph::OutputVector{less, exec_idx, node}, body_params); + auto body = std::make_shared(ov::OutputVector{less, exec_idx, node}, body_params); - auto loop = std::make_shared(params[0], exec_condition); + auto loop = std::make_shared(params[0], exec_condition); loop->set_function(body); - loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0}); loop->set_merged_input(body_params[0], params[0], exec_idx); loop->set_merged_input(body_params[1], params[1], node); @@ -223,9 +226,9 @@ class LoopWhileLayerCPUTest : public LoopLayerCPUTest { auto out0 = loop->get_iter_value(exec_idx, -1); auto out1 = loop->get_iter_value(node, -1); - auto result0 = std::make_shared(out0); - auto result1 = std::make_shared(out1); - function = std::make_shared(ngraph::ResultVector{ result0, result1 }, params, "loop"); + auto result0 = std::make_shared(out0); + auto result1 = std::make_shared(out1); + function = std::make_shared(ov::ResultVector{ result0, result1 }, params, "loop"); } }; @@ -256,25 +259,25 @@ class LoopForDiffShapesLayerCPUTest : public LoopLayerCPUTest { } // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - const std::vector body_params_shapes(shapes.size(), ngraph::PartialShape::dynamic()); - ngraph::ParameterVector body_params; + const std::vector body_params_shapes(shapes.size(), ov::PartialShape::dynamic()); + ov::ParameterVector body_params; for (const auto &pshape : body_params_shapes) { - body_params.emplace_back(std::make_shared(inType, pshape)); + body_params.emplace_back(std::make_shared(inType, pshape)); } - auto body_condition_const = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, true); - auto exec_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, exec_cond); - std::shared_ptr trip_count_input; + auto body_condition_const = std::make_shared(ov::element::boolean, ov::Shape{1}, true); + auto exec_condition = std::make_shared(ov::element::boolean, ov::Shape{1}, exec_cond); + std::shared_ptr trip_count_input; int shift = 0; if (trip_count_type == InputLayerType::PARAMETER) { for (auto& target : targetStaticShapes) - target.insert(target.begin(), ngraph::Shape{}); - trip_count_input = std::make_shared(ngraph::element::i64, ngraph::Shape{1}); + target.insert(target.begin(), ov::Shape{}); + trip_count_input = std::make_shared(ov::element::i64, ov::Shape{1}); trip_count_input->set_friendly_name("trip_count"); - params.insert(params.begin(), ov::as_type_ptr(trip_count_input)); + params.insert(params.begin(), ov::as_type_ptr(trip_count_input)); shift++; } else { - trip_count_input = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, trip_count); + trip_count_input = std::make_shared(ov::element::i64, ov::Shape{1}, trip_count); } // Body @@ -288,11 +291,11 @@ class LoopForDiffShapesLayerCPUTest : public LoopLayerCPUTest { auto constant = ngraph::builder::makeConstant(inType, std::vector{1}, std::vector{0.5}); auto eltwise = std::make_shared(body_params[0], constant); - auto body = std::make_shared(ngraph::OutputVector{body_condition_const, s, eltwise}, body_params); + auto body = std::make_shared(ov::OutputVector{body_condition_const, s, eltwise}, body_params); - auto loop = std::make_shared(trip_count_input, exec_condition); + auto loop = std::make_shared(trip_count_input, exec_condition); loop->set_function(body); - loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0}); loop->set_merged_input(body_params[0], params[shift], eltwise); @@ -303,10 +306,10 @@ class LoopForDiffShapesLayerCPUTest : public LoopLayerCPUTest { // start=0, stride=1, part_size=1, end=-1, axis=1 auto out2 = loop->get_concatenated_slices(s, 0, 1, 1, -1, 1); - auto result0 = std::make_shared(out0); - auto result1 = std::make_shared(out1); - auto result2 = std::make_shared(out2); - function = std::make_shared(ngraph::ResultVector{result0, result1, result2}, params, "loop"); + auto result0 = std::make_shared(out0); + auto result1 = std::make_shared(out1); + auto result2 = std::make_shared(out2); + function = std::make_shared(ov::ResultVector{result0, result1, result2}, params, "loop"); } }; @@ -333,36 +336,36 @@ class LoopForConcatLayerCPUTest : public LoopLayerCPUTest { params.push_back(std::make_shared(inType, shape)); } // Body parameters - const std::vector body_params_shapes(shapes.size(), ngraph::PartialShape::dynamic()); + const std::vector body_params_shapes(shapes.size(), ov::PartialShape::dynamic()); ov::ParameterVector body_params; for (auto&& shape : inputDynamicShapes) { body_params.push_back(std::make_shared(inType, shape)); } - auto body_condition_const = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, true); - auto exec_condition = std::make_shared(ngraph::element::boolean, ngraph::Shape{1}, exec_cond); - std::shared_ptr trip_count_input; + auto body_condition_const = std::make_shared(ov::element::boolean, ov::Shape{1}, true); + auto exec_condition = std::make_shared(ov::element::boolean, ov::Shape{1}, exec_cond); + std::shared_ptr trip_count_input; int shift = 0; if (trip_count_type == InputLayerType::PARAMETER) { for (auto& target : targetStaticShapes) - target.insert(target.begin(), ngraph::Shape{}); - trip_count_input = std::make_shared(ngraph::element::i64, ngraph::Shape{1}); + target.insert(target.begin(), ov::Shape{}); + trip_count_input = std::make_shared(ov::element::i64, ov::Shape{1}); trip_count_input->set_friendly_name("trip_count"); - params.insert(params.begin(), ov::as_type_ptr(trip_count_input)); + params.insert(params.begin(), ov::as_type_ptr(trip_count_input)); shift++; } else { - trip_count_input = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, trip_count); + trip_count_input = std::make_shared(ov::element::i64, ov::Shape{1}, trip_count); } // Body auto constant = ngraph::builder::makeConstant(inType, std::vector{1}, std::vector{10}); - auto add = std::make_shared(body_params[0], constant); + auto add = std::make_shared(body_params[0], constant); auto concat = std::make_shared(ov::NodeVector{body_params[1], add}, 0); - auto body = std::make_shared(ngraph::OutputVector{body_condition_const, concat}, body_params); + auto body = std::make_shared(ov::OutputVector{body_condition_const, concat}, body_params); - auto loop = std::make_shared(trip_count_input, exec_condition); + auto loop = std::make_shared(trip_count_input, exec_condition); loop->set_function(body); - loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0}); loop->set_invariant_input(body_params[0], params[shift]); loop->set_merged_input(body_params[1], params[shift + 1], concat); @@ -370,9 +373,9 @@ class LoopForConcatLayerCPUTest : public LoopLayerCPUTest { auto out0 = loop->get_iter_value(body_condition_const, -1); auto out1 = loop->get_iter_value(concat, -1); - auto result0 = std::make_shared(out0); - auto result1 = std::make_shared(out1); - function = std::make_shared(ngraph::ResultVector{result0, result1}, params, "loop"); + auto result0 = std::make_shared(out0); + auto result1 = std::make_shared(out1); + function = std::make_shared(ov::ResultVector{result0, result1}, params, "loop"); } }; @@ -427,7 +430,11 @@ class StaticLoopDynamicSubgraphCPUTest : public SubgraphBaseTest { auto* dataPtr = tensor.data(); *dataPtr = true; } else { - tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], 2560, 0, 256); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 2560; + in_data.resolution = 256; + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], in_data); } inputs.insert({funcInput.get_node_shared_ptr(), tensor}); } @@ -669,4 +676,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_LoopForConcat, LoopForConcatLayerCPUTest, LoopLayerCPUTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/lrn.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/lrn.cpp index 2d47fe19f18a8d..2e0e5b9541338e 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/lrn.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/lrn.cpp @@ -2,17 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "test_utils/cpu_test_utils.hpp" - +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" +#include "test_utils/cpu_test_utils.hpp" -using namespace ngraph; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { using LRNParams = std::tuple< ElementType, // data precision InputShape, // data shape @@ -59,8 +56,8 @@ class LRNLayerCPUTest : public testing::WithParamInterface, public ov for (auto&& shape : inputDynamicShapes) { params.push_back(std::make_shared(inputPrecision, shape)); } - auto axesNode = ngraph::opset1::Constant::create(ngraph::element::i32, { axes.size() }, axes); - auto lrn = std::make_shared(params[0], axesNode, alpha, beta, bias, size); + auto axesNode = ov::op::v0::Constant::create(ov::element::i32, { axes.size() }, axes); + auto lrn = std::make_shared(params[0], axesNode, alpha, beta, bias, size); function = makeNgraphFunction(inputPrecision, params, lrn, "LRN"); } }; @@ -71,7 +68,7 @@ TEST_P(LRNLayerCPUTest, CompareWithRefs) { } const std::vector inputPrecisions = { - ngraph::element::f32, + ov::element::f32, }; const std::vector> axes = { @@ -120,4 +117,5 @@ const auto testCases = ::testing::Combine( INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs, LRNLayerCPUTest, testCases, LRNLayerCPUTest::getTestCaseName); -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_cell.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_cell.cpp index b03a7a35e53666..83a0126444f36a 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_cell.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_cell.cpp @@ -2,14 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "common_test_utils/node_builders/lstm_cell.hpp" + #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { using LSTMCellCpuSpecificParams = typename std::tuple< std::vector, // Shapes @@ -18,7 +19,7 @@ using LSTMCellCpuSpecificParams = typename std::tuple< float, // clip ElementType, // Network precision CPUSpecificParams, // CPU specific params - std::map // Additional config + ov::AnyMap // Additional config >; class LSTMCellLayerCPUTest : public testing::WithParamInterface, @@ -31,7 +32,7 @@ class LSTMCellLayerCPUTest : public testing::WithParamInterface additionalConfig; + ov::AnyMap additionalConfig; std::tie(inputShapes, decompose, activations, clip, netPrecision, cpuParams, additionalConfig) = obj.param; @@ -57,8 +58,7 @@ class LSTMCellLayerCPUTest : public testing::WithParamInterface> additionalConfig - = {{{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::YES}}, - {{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO}}}; +std::vector additionalConfig = {{{ov::hint::inference_precision(ov::element::bf16)}}, + {{ov::hint::inference_precision(ov::element::f32)}}}; CPUSpecificParams cpuParams{{nc, nc, nc}, {nc}, {"ref_any"}, "ref_any"}; @@ -201,4 +201,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamic, LSTMCellLayerCPUTest, ::testing::ValuesIn(additionalConfig)), LSTMCellLayerCPUTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_sequence.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_sequence.cpp index 206c29731b7353..d484942d899559 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_sequence.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/lstm_sequence.cpp @@ -2,41 +2,42 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "common_test_utils/node_builders/lstm_cell.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp" #include "transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp" +#include + using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -using LSTMSequenceCpuSpecificParams = typename std::tuple< - std::vector, // Shapes - ngraph::helpers::SequenceTestsMode, // Pure Sequence or TensorIterator - std::vector, // Activations - float, // Clip - ngraph::op::RecurrentSequenceDirection, // Direction - ElementType, // Network precision - CPUSpecificParams, // CPU specific params - std::map // Additional config ->; +using LSTMSequenceCpuSpecificParams = + typename std::tuple, // Shapes + ov::test::utils::SequenceTestsMode, // Pure Sequence or TensorIterator + std::vector, // Activations + float, // Clip + ov::op::RecurrentSequenceDirection, // Direction + ElementType, // Network precision + CPUSpecificParams, // CPU specific params + ov::AnyMap // Additional config + >; class LSTMSequenceCPUTest : public testing::WithParamInterface, virtual public ov::test::SubgraphBaseTest, public CPUTestsBase { public: static std::string getTestCaseName(const testing::TestParamInfo &obj) { std::vector inputShapes; - ngraph::helpers::SequenceTestsMode seqMode; + ov::test::utils::SequenceTestsMode seqMode; std::vector activations; float clip; ov::op::RecurrentSequenceDirection direction; ElementType netPrecision; CPUSpecificParams cpuParams; - std::map additionalConfig; + ov::AnyMap additionalConfig; std::tie(inputShapes, seqMode, activations, clip, direction, netPrecision, cpuParams, additionalConfig) = obj.param; @@ -62,9 +63,8 @@ class LSTMSequenceCPUTest : public testing::WithParamInterface(); manager.register_pass(); manager.run_passes(function); @@ -188,15 +189,14 @@ TEST_P(LSTMSequenceCPUTest, CompareWithRefs) { namespace { /* CPU PARAMS */ -std::vector> additionalConfig - = {{{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::YES}}}; +std::vector additionalConfig = {{{ov::hint::inference_precision(ov::element::f32)}}, + {{ov::hint::inference_precision(ov::element::bf16)}}}; CPUSpecificParams cpuParams{{ntc, tnc, tnc}, {ntc, tnc, tnc}, {"ref_any"}, "ref_any"}; // CPUSpecificParams cpuParamsBatchSizeOne{{tnc, ntc, ntc}, {tnc, ntc, ntc}, {"ref_any"}, "ref_any"}; CPUSpecificParams cpuParamsBatchSizeOne{{tnc, tnc, tnc}, {tnc, tnc, tnc}, {"ref_any"}, "ref_any"}; -std::vector mode{ngraph::helpers::SequenceTestsMode::PURE_SEQ}; +std::vector mode{ov::test::utils::SequenceTestsMode::PURE_SEQ}; // oneDNN supports only sigmoid-tanh-tanh std::vector> activations = {{"sigmoid", "tanh", "tanh"}}; // oneDNN supports only zero clip @@ -236,7 +236,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_static, LSTMSequenceCPUTest, ::testing::ValuesIn(direction), ::testing::ValuesIn(netPrecisions), ::testing::Values(cpuParams), - ::testing::Values(std::map{})), + ::testing::Values(ov::AnyMap{})), LSTMSequenceCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_static_BatchSizeOne, LSTMSequenceCPUTest, @@ -247,7 +247,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_static_BatchSizeOne, LSTMSequenceCPUTest, ::testing::ValuesIn(direction), ::testing::ValuesIn(netPrecisions), ::testing::Values(cpuParamsBatchSizeOne), - ::testing::Values(std::map{})), + ::testing::Values(ov::AnyMap{})), LSTMSequenceCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(nightly_static_bf16, LSTMSequenceCPUTest, @@ -345,7 +345,7 @@ namespace dynamicShapesBatchSwitch { const int seq_length = 1; const int hidden_size = 1024; const int num_directions = 1; - const ngraph::helpers::SequenceTestsMode mode = ngraph::helpers::SequenceTestsMode::PURE_SEQ; + const ov::test::utils::SequenceTestsMode mode = ov::test::utils::SequenceTestsMode::PURE_SEQ; CPUSpecificParams cpuParams{{ntc, tnc, tnc}, {ntc, tnc, tnc}, {"ref_any"}, "ref_any"}; const std::vector shapes = { @@ -396,7 +396,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamic_batch, LSTMSequenceCPUTest, ::testing::Values(ov::op::RecurrentSequenceDirection::FORWARD), ::testing::ValuesIn(netPrecisions), ::testing::Values(dynamicShapesBatchSwitch::cpuParams), - ::testing::Values(std::map{{"_dynamic_batch_test", "yes"}})), + ::testing::Values(ov::AnyMap{{"_dynamic_batch_test", "yes"}})), LSTMSequenceCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_dynamic, LSTMSequenceCPUTest, @@ -407,7 +407,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamic, LSTMSequenceCPUTest, ::testing::ValuesIn(direction), ::testing::ValuesIn(netPrecisions), ::testing::Values(cpuParams), - ::testing::Values(std::map{})), + ::testing::Values(ov::AnyMap{})), LSTMSequenceCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_dynamic_BatchSizeOne, LSTMSequenceCPUTest, @@ -418,7 +418,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamic_BatchSizeOne, LSTMSequenceCPUTest, ::testing::ValuesIn(direction), ::testing::ValuesIn(netPrecisions), ::testing::Values(cpuParamsBatchSizeOne), - ::testing::Values(std::map{})), + ::testing::Values(ov::AnyMap{})), LSTMSequenceCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(nightly_dynamic, LSTMSequenceCPUTest, @@ -429,7 +429,7 @@ INSTANTIATE_TEST_SUITE_P(nightly_dynamic, LSTMSequenceCPUTest, ::testing::ValuesIn(direction), ::testing::ValuesIn(netPrecisions), ::testing::Values(cpuParams), - ::testing::Values(std::map{})), + ::testing::Values(ov::AnyMap{})), LSTMSequenceCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(nightly_dynamic_bf16, LSTMSequenceCPUTest, @@ -453,5 +453,6 @@ INSTANTIATE_TEST_SUITE_P(nightly_dynamic_bf16_BatchSizeOne, LSTMSequenceCPUTest, ::testing::Values(cpuParamsBatchSizeOne), ::testing::Values(additionalConfig[1])), LSTMSequenceCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/matmul_sparse.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/matmul_sparse.cpp index ef99871333e9f9..8f602c697f59b7 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/matmul_sparse.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/matmul_sparse.cpp @@ -2,23 +2,20 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/mat_mul.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "test_utils/fusing_test_utils.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "cpu/cpu_config.hpp" +#include "openvino/runtime/intel_cpu/properties.hpp" #include "ov_models/builders.hpp" -#include -#include +#include "ov_ops/type_relaxed.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/base/utils/generate_inputs.hpp" -#include "cpu/cpu_config.hpp" -#include "common_test_utils/ov_tensor_utils.hpp" +#include "shared_test_classes/single_layer/mat_mul.hpp" +#include "test_utils/fusing_test_utils.hpp" -using namespace ngraph; -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { struct ShapeRelatedParams { std::vector inputShapes; @@ -32,7 +29,7 @@ typedef std::tuple< ElementType, // Output precision fusingSpecificParams, CPUSpecificParams, - std::map, // Additional config + ov::AnyMap, // Additional config float // Weights sparse rate > MatMulSparseParamSet; @@ -44,7 +41,7 @@ class MatMulSparseCPUTest : public testing::WithParamInterface additionalConfig; + ov::AnyMap additionalConfig; float weiSparseRate; std::tie(shapeRelatedParams, inType, weiType, outType, fusingParams, cpuParams, additionalConfig, weiSparseRate) = obj.param; @@ -76,7 +73,7 @@ class MatMulSparseCPUTest : public testing::WithParamInterface SparseRate80 = {{CPUConfigParams::KEY_CPU_SPARSE_WEIGHTS_DECOMPRESSION_RATE, "0.8"}}; +const ov::AnyMap emptyConfig = {}; +const ov::AnyMap SparseRate50 = {{ov::intel_cpu::sparse_weights_decompression_rate(0.5)}}; +const ov::AnyMap SparseRate80 = {{ov::intel_cpu::sparse_weights_decompression_rate(0.8)}}; const std::vector IS2D_sparse_smoke = { {static_shapes_to_test_representation({{64, 64}, {64, 64}}), {false, true}}, @@ -340,4 +334,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_FC_3D_I8_sparse, MatMulSparseCPUTest, testParams3 } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/non_max_suppression.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/non_max_suppression.cpp index 07b1e9c3405603..dfc7a48bf2559c 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/non_max_suppression.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/non_max_suppression.cpp @@ -2,20 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - -#include "shared_test_classes/base/ov_subgraph.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "ov_models/builders.hpp" -#include -#include "test_utils/cpu_test_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include "shared_test_classes/base/utils/ranges.hpp" +#include "test_utils/cpu_test_utils.hpp" -using namespace ov::test; -using namespace ngraph; using namespace CPUTestUtils; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { enum { BATCHES, @@ -42,7 +38,7 @@ using NmsParams = std::tuple, virtual p InputShapeParams inShapeParams; InputPrecisions inPrecisions; int32_t maxOutBoxesPerClass; - ngraph::helpers::InputLayerType maxOutBoxesType; + ov::test::utils::InputLayerType maxOutBoxesType; ThresholdValues thrValues; float iouThr, scoreThr, softNmsSigma; ov::op::v9::NonMaxSuppression::BoxEncodingType boxEncoding; @@ -82,6 +78,7 @@ class NmsLayerCPUTest : public testing::WithParamInterface, virtual p std::tie(numBatches, numBoxes, numClasses) = ts; result << "(nB=" << numBatches << "_nBox=" << numBoxes << "_nC=" << numClasses << ")_"; } + using ov::operator<<; result << "paramsPrec=" << paramsPrec << "_maxBoxPrec=" << maxBoxPrec << "_thrPrec=" << thrPrec << "_"; result << "maxOutBoxesPerClass=" << maxOutBoxesPerClass << "_"; result << "iouThr=" << iouThr << "_scoreThr=" << scoreThr << "_softNmsSigma=" << softNmsSigma << "_"; @@ -113,7 +110,7 @@ class NmsLayerCPUTest : public testing::WithParamInterface, virtual p InputShapeParams inShapeParams; InputPrecisions inPrecisions; ThresholdValues thrValues; - ngraph::helpers::InputLayerType maxOutBoxesType; + ov::test::utils::InputLayerType maxOutBoxesType; float iouThr, scoreThr, softNmsSigma; ov::op::v9::NonMaxSuppression::BoxEncodingType boxEncoding; bool sortResDescend; @@ -129,24 +126,24 @@ class NmsLayerCPUTest : public testing::WithParamInterface, virtual p std::tie(bounds, targetInDims) = inShapeParams; if (!bounds.empty()) { - inputDynamicShapes = std::vector{{bounds[BATCHES], bounds[BOXES], 4}, {bounds[BATCHES], bounds[CLASSES], bounds[BOXES]}}; + inputDynamicShapes = std::vector{{bounds[BATCHES], bounds[BOXES], 4}, {bounds[BATCHES], bounds[CLASSES], bounds[BOXES]}}; } else { size_t batches, boxes, classes; std::tie(batches, boxes, classes) = targetInDims.front(); ov::Dimension numBatches(batches), numBoxes(boxes), numClasses(classes); - inputDynamicShapes = std::vector{{numBatches, numBoxes, 4}, {numBatches, numClasses, numBoxes}}; + inputDynamicShapes = std::vector{{numBatches, numBoxes, 4}, {numBatches, numClasses, numBoxes}}; } for (const auto &ts : targetInDims) { size_t numBatches, numBoxes, numClasses; std::tie(numBatches, numBoxes, numClasses) = ts; - targetStaticShapes.push_back(std::vector{{numBatches, numBoxes, 4}, {numBatches, numClasses, numBoxes}}); - if (maxOutBoxesType == ngraph::helpers::InputLayerType::PARAMETER) { - targetStaticShapes.back().push_back(ngraph::Shape{1}); + targetStaticShapes.push_back(std::vector{{numBatches, numBoxes, 4}, {numBatches, numClasses, numBoxes}}); + if (maxOutBoxesType == ov::test::utils::InputLayerType::PARAMETER) { + targetStaticShapes.back().push_back(ov::Shape{1}); } } - std::shared_ptr maxOutBoxesPerClassNode; + std::shared_ptr maxOutBoxesPerClassNode; ov::ParameterVector params; for (auto&& shape : inputDynamicShapes) { params.push_back(std::make_shared(paramsPrec, shape)); @@ -154,18 +151,18 @@ class NmsLayerCPUTest : public testing::WithParamInterface, virtual p params[0]->set_friendly_name("param_1"); params[1]->set_friendly_name("param_2"); - if (maxOutBoxesType == ngraph::helpers::InputLayerType::PARAMETER) { - inputDynamicShapes.push_back(ngraph::PartialShape{1}); - params.push_back(std::make_shared(ElementType::i32, inputDynamicShapes.back())); + if (maxOutBoxesType == ov::test::utils::InputLayerType::PARAMETER) { + inputDynamicShapes.push_back(ov::PartialShape{1}); + params.push_back(std::make_shared(ElementType::i32, inputDynamicShapes.back())); params[1]->set_friendly_name("param_3"); maxOutBoxesPerClassNode = params.back(); } else { - maxOutBoxesPerClassNode = builder::makeConstant(maxBoxPrec, ngraph::Shape{}, std::vector{maxOutBoxesPerClass}); + maxOutBoxesPerClassNode = ngraph::builder::makeConstant(maxBoxPrec, ov::Shape{}, std::vector{maxOutBoxesPerClass}); } - auto iouThrNode = builder::makeConstant(thrPrec, ngraph::Shape{}, std::vector{iouThr})->output(0); - auto scoreThrNode = builder::makeConstant(thrPrec, ngraph::Shape{}, std::vector{scoreThr})->output(0); - auto softNmsSigmaNode = builder::makeConstant(thrPrec, ngraph::Shape{}, std::vector{softNmsSigma})->output(0); + auto iouThrNode = ngraph::builder::makeConstant(thrPrec, ov::Shape{}, std::vector{iouThr})->output(0); + auto scoreThrNode = ngraph::builder::makeConstant(thrPrec, ov::Shape{}, std::vector{scoreThr})->output(0); + auto softNmsSigmaNode = ngraph::builder::makeConstant(thrPrec, ov::Shape{}, std::vector{softNmsSigma})->output(0); auto nms = std::make_shared(params[0], params[1], maxOutBoxesPerClassNode, iouThrNode, scoreThrNode, softNmsSigmaNode, boxEncoding, sortResDescend, outType); @@ -419,7 +416,7 @@ const std::vector encodType = {o ov::op::v9::NonMaxSuppression::BoxEncodingType::CORNER}; const std::vector sortResDesc = {true, false}; const std::vector outType = {ElementType::i32, ElementType::i64}; -const std::vector maxBoxInputTypes = {ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::CONSTANT}; +const std::vector maxBoxInputTypes = {ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::CONSTANT}; const auto nmsParams = ::testing::Combine(::testing::ValuesIn(inShapeParams), ::testing::Combine(::testing::Values(ElementType::f32), @@ -438,4 +435,5 @@ const auto nmsParams = ::testing::Combine(::testing::ValuesIn(inShapeParams), INSTANTIATE_TEST_SUITE_P(smoke_NmsLayerCPUTest, NmsLayerCPUTest, nmsParams, NmsLayerCPUTest::getTestCaseName); -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/nonzero.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/nonzero.cpp index ecce08dedb2690..b3b47e3d12c2ec 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/nonzero.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/nonzero.cpp @@ -2,18 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include - -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { typedef std::tuple< InputShape, // Input shape definition @@ -59,7 +55,10 @@ class NonZeroLayerCPUTest : public testing::WithParamInterfaceinputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { const auto& funcInput = funcInputs[i]; - ov::Tensor tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], range, startFrom); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = startFrom; + in_data.range = range; + ov::Tensor tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], in_data); inputs.insert({funcInput.get_node_shared_ptr(), tensor}); } } @@ -87,7 +86,7 @@ class NonZeroLayerCPUTest : public testing::WithParamInterface(netType, shape)); } - auto nonZero = std::make_shared(inputParams[0]); + auto nonZero = std::make_shared(inputParams[0]); // I8 was used as a special placeholder during calculating of primitive type if input was U8, // real runtime precision is still U8 selectedType = makeSelectedTypeStr("ref", netType == ElementType::u8 ? ElementType::i8 : netType); @@ -199,7 +198,7 @@ std::vector inShapesDynamic = { } } }; -std::vector inShapesStatic = { +std::vector inShapesStatic = { { 100 }, { 4, 100 }, { 4, 2, 100 }, @@ -227,4 +226,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_NonZeroDynamicCPUTest, NonZeroLayerCPUTest, } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/normalize.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/normalize.cpp index b80b2175603f03..9c82f0bdf93dab 100755 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/normalize.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/normalize.cpp @@ -4,26 +4,21 @@ #include "shared_test_classes/single_layer/normalize_l2.hpp" #include "test_utils/fusing_test_utils.hpp" -#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include +#include "common_test_utils/ov_tensor_utils.hpp" -using namespace ngraph; -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace LayerTestsDefinitions; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -using NormalizeL2LayerCPUTestParamSet = std::tuple< - InputShape, // input shape - ElementType, // input element type - std::vector, // axes - float, // eps - ngraph::op::EpsMode, // eps_mode - CPUSpecificParams, - fusingSpecificParams>; +using NormalizeL2LayerCPUTestParamSet = std::tuple, // axes + float, // eps + ov::op::EpsMode, // eps_mode + CPUSpecificParams, + fusingSpecificParams>; class NormalizeL2LayerCPUTest : public testing::WithParamInterface, virtual public SubgraphBaseTest, public CpuTestWithFusing { @@ -33,7 +28,7 @@ class NormalizeL2LayerCPUTest : public testing::WithParamInterface axes; float eps; - ngraph::op::EpsMode epsMode; + ov::op::EpsMode epsMode; CPUSpecificParams cpuParams; fusingSpecificParams fusingParams; std::tie(shapes, inType, axes, eps, epsMode, cpuParams, fusingParams) = obj.param; @@ -60,7 +55,7 @@ class NormalizeL2LayerCPUTest : public testing::WithParamInterface axes; float eps; - ngraph::op::EpsMode epsMode; + ov::op::EpsMode epsMode; CPUSpecificParams cpuParams; fusingSpecificParams fusingParams; std::tie(shapes, inType, axes, eps, epsMode, cpuParams, fusingParams) = this->GetParam(); @@ -88,15 +83,19 @@ class NormalizeL2LayerCPUTest : public testing::WithParamInterface& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { const auto& funcInput = funcInputs[i]; ov::Tensor tensor; if (funcInput.get_element_type().is_real()) { - tensor = ov::test::utils::create_and_fill_tensor( - funcInput.get_element_type(), targetInputStaticShapes[i], 10, -5, 7, 222); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -5; + in_data.range = 10; + in_data.resolution = 7; + in_data.seed = 222; + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], in_data); } else { tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); } @@ -329,6 +328,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Dynamic_4D_FusingPerChannel, NormalizeL2LayerCPUT ::testing::ValuesIn(fusingParamsSetPerChannel)), NormalizeL2LayerCPUTest::getTestCaseName); -} // namespace +} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/one_hot.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/one_hot.cpp index 60d9a417454782..25e4e7011be40e 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/one_hot.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/one_hot.cpp @@ -7,21 +7,20 @@ #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -using oneHotCPUTestParams = std::tuple< - InputShape, // Input shape - int, // axis to extend - std::pair, // secondary input type && need to generate depth - size_t, // depth - float, // on_value - float, // off_value - InferenceEngine::Precision, // Output precision - CPUSpecificParams>; +using oneHotCPUTestParams = + std::tuple, // secondary input type && need to generate depth + size_t, // depth + float, // on_value + float, // off_value + ov::element::Type, // Output precision + CPUSpecificParams>; class OneHotLayerCPUTest : public testing::WithParamInterface, virtual public SubgraphBaseTest, public CPUTestsBase { @@ -29,10 +28,10 @@ class OneHotLayerCPUTest : public testing::WithParamInterface& obj) { InputShape inputShape; int axis; - std::pair inputType; + std::pair inputType; size_t depth; float onValue, offValue; - InferenceEngine::Precision outPrc; + ov::element::Type outPrc; CPUSpecificParams cpuParams; std::tie(inputShape, axis, inputType, depth, onValue, offValue, outPrc, cpuParams) = obj.param; @@ -45,20 +44,20 @@ class OneHotLayerCPUTest : public testing::WithParamInterfaceget_parameters().size() == 2) { generateDepth(); functionRefs = createFunction(true); @@ -125,22 +122,22 @@ class OneHotLayerCPUTest : public testing::WithParamInterface createFunction(bool depthConst) { - ov::ParameterVector params{std::make_shared(ngraph::element::i32, inputDynamicShapes.front())}; + std::shared_ptr createFunction(bool depthConst) { + ov::ParameterVector params{std::make_shared(ov::element::i32, inputDynamicShapes.front())}; params.front()->set_friendly_name("ParamsIndices"); std::shared_ptr depth; if (depthConst) { - depth = ngraph::op::Constant::create(ngraph::element::i32, ngraph::Shape{ }, {Depth}); + depth = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{ }, {Depth}); } else { - auto depthParam = std::make_shared(ngraph::element::i32, ngraph::Shape{ }); + auto depthParam = std::make_shared(ov::element::i32, ov::Shape{ }); depthParam->set_friendly_name("ParamDepth"); params.push_back(depthParam); depth = depthParam; } - auto on_value_const = std::make_shared(outType, ngraph::Shape{ }, OnValue); - auto off_value_const = std::make_shared(outType, ngraph::Shape{ }, OffValue); - auto oneHot = std::make_shared(params[0], depth, on_value_const, off_value_const, Axis); - return makeNgraphFunction(ngraph::element::i32, params, oneHot, "OneHot"); + auto on_value_const = std::make_shared(outType, ov::Shape{ }, OnValue); + auto off_value_const = std::make_shared(outType, ov::Shape{ }, OffValue); + auto oneHot = std::make_shared(params[0], depth, on_value_const, off_value_const, Axis); + return makeNgraphFunction(ov::element::i32, params, oneHot, "OneHot"); } void generateDepth() { testing::internal::Random random(time(nullptr)); @@ -159,21 +156,21 @@ TEST_P(OneHotLayerCPUTest, CompareWithRefs) { } namespace { -const std::vector outPrc = { - Precision::FP32, - Precision::BF16, - Precision::I8 - // Precision::U8 // Precision cannot be wrapped to constant one hot +const std::vector outPrc = { + ov::element::f32, + ov::element::bf16, + ov::element::i8 + // ov::element::u8 // Precision cannot be wrapped to constant one hot }; -std::vector> secondaryInputTypesStaticCase = { - {ngraph::helpers::InputLayerType::CONSTANT, true}, - {ngraph::helpers::InputLayerType::CONSTANT, false} +std::vector> secondaryInputTypesStaticCase = { + {utils::InputLayerType::CONSTANT, true}, + {utils::InputLayerType::CONSTANT, false} }; -std::vector> secondaryInputTypesDynamicCase = { - {ngraph::helpers::InputLayerType::CONSTANT, true}, - {ngraph::helpers::InputLayerType::CONSTANT, false}, - {ngraph::helpers::InputLayerType::PARAMETER, true} +std::vector> secondaryInputTypesDynamicCase = { + {utils::InputLayerType::CONSTANT, true}, + {utils::InputLayerType::CONSTANT, false}, + {utils::InputLayerType::PARAMETER, true} }; const std::vector staticInputShapes0D = { @@ -328,5 +325,6 @@ const auto testCase_5d_dynamic = ::testing::Combine( ); INSTANTIATE_TEST_SUITE_P(smoke_OneHotCPU_5D_Dynamic, OneHotLayerCPUTest, testCase_5d_dynamic, OneHotLayerCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/pad.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/pad.cpp index 227933e2a3fe2d..c5124d9cb95028 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/pad.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/pad.cpp @@ -2,21 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "test_utils/cpu_test_utils.hpp" +#include "shared_test_classes/single_layer/pad.hpp" + +#include "common_test_utils/ov_tensor_utils.hpp" +#include "openvino/op/pad.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include -#include +#include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { using PadLayerCPUTestParamSet = std::tuple< InputShape, // Input shape - ngraph::helpers::InputLayerType, // Secondary input types + ov::test::utils::InputLayerType, // Secondary input types ElementType, // Input element type std::vector, // padsBegin std::vector, // padsEnd @@ -30,7 +30,7 @@ class PadLayerCPUTest : public testing::WithParamInterface obj) { InputShape shapes; - ngraph::helpers::InputLayerType secondaryInputType; + ov::test::utils::InputLayerType secondaryInputType; ElementType elementType; std::vector padsBegin, padsEnd; ov::op::PadMode padMode; @@ -67,7 +67,10 @@ class PadLayerCPUTest : public testing::WithParamInterfaceget_friendly_name() == "pad_value") tensor = ov::Tensor{funcInput.get_element_type(), ov::Shape{}, &padValue}; @@ -79,7 +82,7 @@ class PadLayerCPUTest : public testing::WithParamInterface(dataType, shape)); } std::shared_ptr pad; - if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) { + if (secondaryInputType == ov::test::utils::InputLayerType::PARAMETER) { ov::Shape inShape = {padsBegin.size()}; auto beginNode = std::make_shared(ElementType::i64, inShape); @@ -155,13 +158,13 @@ const std::vector inputPrecisions = { ElementType::i8 }; -const std::vector inputLayerTypes = { - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER +const std::vector inputLayerTypes = { + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER }; -const std::vector inputLayerTypesBlocked = { - ngraph::helpers::InputLayerType::CONSTANT, +const std::vector inputLayerTypesBlocked = { + ov::test::utils::InputLayerType::CONSTANT, }; const std::vector argPadValue = {0.f, 2.5f}; @@ -747,5 +750,6 @@ INSTANTIATE_TEST_SUITE_P( /* *======================* *=====================* *======================* */ } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box.cpp index 13142ae4f395f1..76cc3498f8389d 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box.cpp @@ -2,21 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include - -#include +#include "openvino/core/partial_shape.hpp" #include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { using priorBoxSpecificParams = std::tuple< std::vector, // min_size @@ -32,16 +27,14 @@ using priorBoxSpecificParams = std::tuple< std::vector, // variance bool>; // scale_all_sizes -typedef std::tuple< - priorBoxSpecificParams, - ov::test::ElementType, // net precision - ov::test::ElementType, // Input precision - ov::test::ElementType, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - ov::test::InputShape, // input shape - ov::test::InputShape, // image shape - std::string> priorBoxLayerParams; +typedef std::tuple + priorBoxLayerParams; class PriorBoxLayerCPUTest : public testing::WithParamInterface, virtual public SubgraphBaseTest, public CPUTestsBase { @@ -49,19 +42,18 @@ class PriorBoxLayerCPUTest : public testing::WithParamInterface& obj) { ov::test::ElementType netPrecision; ov::test::ElementType inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; ov::test::InputShape inputShapes; ov::test::InputShape imageShapes; std::string targetDevice; priorBoxSpecificParams specParams; std::tie(specParams, netPrecision, - inPrc, outPrc, inLayout, outLayout, + inPrc, outPrc, inputShapes, imageShapes, targetDevice) = obj.param; - ngraph::op::PriorBoxAttrs attributes; + ov::op::v0::PriorBox::Attributes attributes; std::tie( attributes.min_size, attributes.max_size, @@ -83,8 +75,6 @@ class PriorBoxLayerCPUTest : public testing::WithParamInterface(netPrecision, shape)); } - auto shape_of_1 = std::make_shared(params[0]); - auto shape_of_2 = std::make_shared(params[1]); - auto priorBox = std::make_shared( + auto shape_of_1 = std::make_shared(params[0]); + auto shape_of_2 = std::make_shared(params[1]); + auto priorBox = std::make_shared( shape_of_1, shape_of_2, attributes); - ngraph::ResultVector results{std::make_shared(priorBox)}; - function = std::make_shared (results, params, "priorBox"); + ov::ResultVector results{std::make_shared(priorBox)}; + function = std::make_shared (results, params, "priorBox"); } }; @@ -219,12 +205,11 @@ INSTANTIATE_TEST_SUITE_P(smoke_PriorBox, PriorBoxLayerCPUTest, ::testing::ValuesIn(netPrecisions), ::testing::Values(ov::test::ElementType::undefined), ::testing::Values(ov::test::ElementType::undefined), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), ::testing::ValuesIn(inputShape), ::testing::ValuesIn(imageShape), ::testing::Values(ov::test::utils::DEVICE_CPU)), PriorBoxLayerCPUTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box_clustered.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box_clustered.cpp index 06fe498f62b551..5aa4df776dd752 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box_clustered.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/prior_box_clustered.cpp @@ -2,21 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include - -#include -#include "ov_models/builders.hpp" +#include "openvino/core/partial_shape.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { typedef std::tuple< std::vector, // widths @@ -28,16 +22,14 @@ typedef std::tuple< float, // offset std::vector> priorBoxClusteredSpecificParams; -typedef std::tuple< - priorBoxClusteredSpecificParams, - ov::test::ElementType, // net precision - ov::test::ElementType, // Input precision - ov::test::ElementType, // Output precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - ov::test::InputShape, // input shape - ov::test::InputShape, // image shape - std::string> priorBoxClusteredLayerParams; +typedef std::tuple + priorBoxClusteredLayerParams; class PriorBoxClusteredLayerCPUTest : public testing::WithParamInterface, virtual public SubgraphBaseTest, public CPUTestsBase { @@ -45,18 +37,12 @@ class PriorBoxClusteredLayerCPUTest : public testing::WithParamInterface& obj) { ov::test::ElementType netPrecision; ov::test::ElementType inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; ov::test::InputShape inputShapes, imageShapes; std::string targetDevice; priorBoxClusteredSpecificParams specParams; - std::tie(specParams, - netPrecision, - inPrc, outPrc, inLayout, outLayout, - inputShapes, - imageShapes, - targetDevice) = obj.param; - - ngraph::op::PriorBoxClusteredAttrs attributes; + std::tie(specParams, netPrecision, inPrc, outPrc, inputShapes, imageShapes, targetDevice) = obj.param; + + ov::op::v0::PriorBoxClustered::Attributes attributes; std::tie( attributes.widths, attributes.heights, @@ -75,8 +61,6 @@ class PriorBoxClusteredLayerCPUTest : public testing::WithParamInterface(netPrecision, shape)); } - auto shape_of_1 = std::make_shared(params[0]); - auto shape_of_2 = std::make_shared(params[1]); - auto priorBoxClustered = std::make_shared( + auto shape_of_1 = std::make_shared(params[0]); + auto shape_of_2 = std::make_shared(params[1]); + auto priorBoxClustered = std::make_shared( shape_of_1, shape_of_2, attributes); - ngraph::ResultVector results{ std::make_shared(priorBoxClustered) }; - function = std::make_shared(results, params, "priorBoxClustered"); + ov::ResultVector results{ std::make_shared(priorBoxClustered) }; + function = std::make_shared(results, params, "priorBoxClustered"); } }; @@ -217,8 +196,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_PriorBoxClustered, PriorBoxClusteredLayerCPUTest, ::testing::ValuesIn(netPrecisions), ::testing::Values(ov::test::ElementType::undefined), ::testing::Values(ov::test::ElementType::undefined), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), ::testing::ValuesIn(inputShapes), ::testing::ValuesIn(imageShapes), ::testing::Values(ov::test::utils::DEVICE_CPU)), @@ -226,4 +203,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_PriorBoxClustered, PriorBoxClusteredLayerCPUTest, ); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/proposal.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/proposal.cpp index 03240dcfdebafb..6f6c1c270d7d1a 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/proposal.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/proposal.cpp @@ -2,17 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "common_test_utils/ov_tensor_utils.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace proposalTypes { @@ -51,10 +48,9 @@ using proposalSpecificParams = std::tuple< ratio_type, scale_type>; -using proposalLayerTestCPUParams = std::tuple< - std::vector, // Input shapes - proposalSpecificParams, // Node attributes - Precision>; // Network precision +using proposalLayerTestCPUParams = std::tuple, // Input shapes + proposalSpecificParams, // Node attributes + ov::element::Type>; // Network precision class ProposalLayerCPUTest : public testing::WithParamInterface, public SubgraphBaseTest, public CPUTestsBase { @@ -62,7 +58,7 @@ class ProposalLayerCPUTest : public testing::WithParamInterface obj) { std::vector inputShapes; proposalSpecificParams proposalParams; - Precision netPrecision; + ov::element::Type netPrecision; std::tie(inputShapes, proposalParams, netPrecision) = obj.param; base_size_type base_size; @@ -103,7 +99,7 @@ class ProposalLayerCPUTest : public testing::WithParamInterface(ngPrc, shape)); + params.push_back(std::make_shared(netPrecision, shape)); } - ngraph::op::ProposalAttrs attrs; + ov::op::v0::Proposal::Attributes attrs; attrs.base_size = base_size; attrs.pre_nms_topn = pre_nms_topn; attrs.post_nms_topn = post_nms_topn; @@ -161,14 +156,14 @@ class ProposalLayerCPUTest : public testing::WithParamInterface(params[0], params[1], params[2], attrs); + auto proposal = std::make_shared(params[0], params[1], params[2], attrs); - ngraph::ResultVector results{ - std::make_shared(proposal->output(0)), - std::make_shared(proposal->output(1)) + ov::ResultVector results{ + std::make_shared(proposal->output(0)), + std::make_shared(proposal->output(1)) }; - function = std::make_shared(results, params, "Proposal"); + function = std::make_shared(results, params, "Proposal"); } void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); @@ -185,7 +180,11 @@ class ProposalLayerCPUTest : public testing::WithParamInterface netPrecision = { - Precision::FP32 +const std::vector netPrecision = { + ov::element::f32 }; std::vector> staticInputShapesCase1 = { @@ -338,4 +337,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_Proposal_Dynamic_Test_Case2, ProposalLayerCPUTest ProposalLayerCPUTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/psroi_pooling.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/psroi_pooling.cpp index 9d0841f15fb6f0..8217db402b4e04 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/psroi_pooling.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/psroi_pooling.cpp @@ -89,13 +89,13 @@ class PSROIPoolingLayerCPUTest : public testing::WithParamInterface(ngraph::element::f32, proposalShape, proposal); ov::ParameterVector params {std::make_shared(ngraph::element::f32, ov::Shape(featureMapShape))}; - auto psroi = std::make_shared(params[0], coords, outputDim, groupSize, + auto psroi = std::make_shared(params[0], coords, outputDim, groupSize, spatialScale, spatialBinsX, spatialBinsY, mode); psroi->get_rt_info() = getCPUInfo(); selectedType = getPrimitiveType() + "_" + inPrc.name(); threshold = 1e-2f; - const ngraph::ResultVector results{std::make_shared(psroi)}; + const ngraph::ResultVector results{std::make_shared(psroi)}; function = std::make_shared(results, params, "PSROIPooling"); } }; diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/range.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/range.cpp index 2596f18b459550..cc6fb0277dc1e0 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/range.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/range.cpp @@ -112,17 +112,17 @@ // step = std::get<2>(rangeInputs); // auto ngOutPr = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(outPrc); // auto ngNetPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inPrc); -// auto startPar = std::make_shared(ngNetPrc, ngraph::Shape{}); -// auto stopPar = std::make_shared(ngNetPrc, ngraph::Shape{}); -// auto stepPar = std::make_shared(ngNetPrc, ngraph::Shape{}); -// auto range = std::make_shared(startPar, stopPar, stepPar, ngOutPr); +// auto startPar = std::make_shared(ngNetPrc, ngraph::Shape{}); +// auto stopPar = std::make_shared(ngNetPrc, ngraph::Shape{}); +// auto stepPar = std::make_shared(ngNetPrc, ngraph::Shape{}); +// auto range = std::make_shared(startPar, stopPar, stepPar, ngOutPr); // range->get_rt_info() = getCPUInfo(); // selectedType = std::string("ref_any_") + (inPrc == outPrc ? inPrc.name() : "FP32"); // startPar->set_friendly_name("start"); // stopPar->set_friendly_name("stop"); // stepPar->set_friendly_name("step"); // -// const ngraph::ResultVector results{std::make_shared(range)}; +// const ngraph::ResultVector results{std::make_shared(range)}; // function = std::make_shared(results, ngraph::ParameterVector { // startPar, stopPar, stepPar}, "Range"); // functionRefs = ngraph::clone_function(*function); diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/rdft.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/rdft.cpp index b80dfc66cf691c..18471afff43294 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/rdft.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/rdft.cpp @@ -3,32 +3,29 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" -#include -#include +#include "common_test_utils/ov_tensor_utils.hpp" using namespace CPUTestUtils; -using namespace ov::test; -using namespace ov; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -std::vector precisions{element::f32}; +std::vector precisions{ov::element::f32}; using RDFTTestCPUParams = std::tuple, std::vector>, // axes std::vector>, // signal sizes - bool, // inverse - bool, // const axes if true - bool, // const signal sizes if true + bool, // inverse + bool, // const axes if true + bool, // const signal sizes if true CPUSpecificParams>; -class RDFTTestCPU : public testing::WithParamInterface>, +class RDFTTestCPU : public testing::WithParamInterface>, virtual public test::SubgraphBaseTest, public CPUTestsBase { public: - static std::string getTestCaseName(testing::TestParamInfo> obj) { - element::Type precision; + static std::string getTestCaseName(testing::TestParamInfo> obj) { + ov::element::Type precision; RDFTTestCPUParams params; std::vector shapes; std::vector> axes; @@ -76,7 +73,7 @@ class RDFTTestCPU : public testing::WithParamInterface shapes; bool inverse; @@ -113,14 +110,14 @@ class RDFTTestCPU : public testing::WithParamInterface(precision, *inputShapeIt++); + auto param = std::make_shared(precision, *inputShapeIt++); inputs.push_back(param); std::shared_ptr axesNode; if (constAxes) { - axesNode = opset9::Constant::create(element::i64, Shape{axes[0].size()}, axes[0]); + axesNode = ov::op::v0::Constant::create(element::i64, Shape{axes[0].size()}, axes[0]); } else { ASSERT_NE(inputShapeIt, inputDynamicShapes.end()); - auto param = std::make_shared(element::i64, *inputShapeIt++); + auto param = std::make_shared(element::i64, *inputShapeIt++); axesNode = param; inputs.push_back(param); } @@ -129,23 +126,23 @@ class RDFTTestCPU : public testing::WithParamInterface 0) { std::shared_ptr signalSizesNode; if (constSignalSizes) { - signalSizesNode = opset9::Constant::create(element::i64, Shape{signalSizes[0].size()}, signalSizes[0]); + signalSizesNode = ov::op::v0::Constant::create(element::i64, Shape{signalSizes[0].size()}, signalSizes[0]); } else { ASSERT_NE(inputShapeIt, inputDynamicShapes.end()); - auto param = std::make_shared(element::i64, *inputShapeIt); + auto param = std::make_shared(element::i64, *inputShapeIt); signalSizesNode = param; inputs.push_back(param); } if (inverse) { - rdft = std::make_shared(param, axesNode, signalSizesNode); + rdft = std::make_shared(param, axesNode, signalSizesNode); } else { - rdft = std::make_shared(param, axesNode, signalSizesNode); + rdft = std::make_shared(param, axesNode, signalSizesNode); } } else { if (inverse) { - rdft = std::make_shared(param, axesNode); + rdft = std::make_shared(param, axesNode); } else { - rdft = std::make_shared(param, axesNode); + rdft = std::make_shared(param, axesNode); } } function = std::make_shared(rdft, inputs); @@ -191,11 +188,11 @@ TEST_P(RDFTTestCPU, CompareWithRefs) { namespace { CPUSpecificParams getCPUSpecificParams() { - if (InferenceEngine::with_cpu_x86_avx512_core()) { + if (ov::with_cpu_x86_avx512_core()) { return CPUSpecificParams{{}, {}, {"jit_avx512"}, "jit_avx512"}; - } else if (InferenceEngine::with_cpu_x86_avx2()) { + } else if (ov::with_cpu_x86_avx2()) { return CPUSpecificParams{{}, {}, {"jit_avx2"}, "jit_avx2"}; - } else if (InferenceEngine::with_cpu_x86_sse42()) { + } else if (ov::with_cpu_x86_sse42()) { return CPUSpecificParams{{}, {}, {"jit_sse42"}, "jit_sse42"}; } else { return CPUSpecificParams{{}, {}, {"ref"}, "ref"}; @@ -206,7 +203,7 @@ CPUSpecificParams getCPUSpecificParams() { auto cpuParams = getCPUSpecificParams(); std::vector getParams1D() { - if (InferenceEngine::with_cpu_x86_avx512_core()) { + if (ov::with_cpu_x86_avx512_core()) { return { {static_shapes_to_test_representation({Shape{14}}), {{0}}, {}, false, true, true, cpuParams}, {static_shapes_to_test_representation({Shape{13}}), {{0}}, {}, false, true, true, cpuParams}, @@ -243,7 +240,7 @@ std::vector getParams1D() { {static_shapes_to_test_representation({Shape{25, 2}}), {{0}}, {{32}}, true, true, true, cpuParams}, {static_shapes_to_test_representation({Shape{24, 2}}), {{0}}, {{16}}, true, true, true, cpuParams}, }; - } else if (InferenceEngine::with_cpu_x86_avx2()) { + } else if (ov::with_cpu_x86_avx2()) { return { {static_shapes_to_test_representation({Shape{6}}), {{0}}, {}, false, true, true, cpuParams}, {static_shapes_to_test_representation({Shape{5}}), {{0}}, {}, false, true, true, cpuParams}, @@ -312,13 +309,13 @@ std::vector getParams1D() { return {}; } -INSTANTIATE_TEST_SUITE_P(smoke_RDFT_CPU_1D, RDFTTestCPU, - ::testing::Combine(::testing::ValuesIn(precisions), - ::testing::ValuesIn(getParams1D())), +INSTANTIATE_TEST_SUITE_P(smoke_RDFT_CPU_1D, + RDFTTestCPU, + ::testing::Combine(::testing::ValuesIn(precisions), ::testing::ValuesIn(getParams1D())), RDFTTestCPU::getTestCaseName); std::vector getParams2D() { - if (InferenceEngine::with_cpu_x86_avx512_core()) { + if (ov::with_cpu_x86_avx512_core()) { return { {static_shapes_to_test_representation({{46, 10}}), {{0}}, {}, false, true, true, cpuParams}, {static_shapes_to_test_representation({{45, 10}}), {{0}}, {}, false, true, true, cpuParams}, @@ -360,7 +357,7 @@ std::vector getParams2D() { {static_shapes_to_test_representation({{32, 513, 2}}), {{0, 1}}, {{32, 600}}, true, true, true, cpuParams}, {static_shapes_to_test_representation({{72, 1025, 2}}), {{0, 1}}, {{72, 100}}, true, true, true, cpuParams}, }; - } else if (InferenceEngine::with_cpu_x86_avx2()) { + } else if (ov::with_cpu_x86_avx2()) { return { {static_shapes_to_test_representation({{38, 16}}), {{0}}, {}, false, true, true, cpuParams}, {static_shapes_to_test_representation({{37, 8}}), {{0}}, {}, false, true, true, cpuParams}, @@ -460,14 +457,14 @@ std::vector getParams2D() { return {}; } -INSTANTIATE_TEST_SUITE_P(smoke_RDFT_CPU_2D, RDFTTestCPU, - ::testing::Combine(::testing::ValuesIn(precisions), - ::testing::ValuesIn(getParams2D())), +INSTANTIATE_TEST_SUITE_P(smoke_RDFT_CPU_2D, + RDFTTestCPU, + ::testing::Combine(::testing::ValuesIn(precisions), ::testing::ValuesIn(getParams2D())), RDFTTestCPU::getTestCaseName); std::vector getParams4D() { std::vector params; - if (InferenceEngine::with_cpu_x86_avx512_core()) { + if (ov::with_cpu_x86_avx512_core()) { params = { {static_shapes_to_test_representation({{10, 46, 128, 65}}), {{1}}, {}, false, true, true, cpuParams}, {static_shapes_to_test_representation({{10, 46, 128, 65}}), {{0, 1}}, {}, false, true, true, cpuParams}, @@ -488,7 +485,7 @@ std::vector getParams4D() { // TODO: FIXME //{static_shapes_to_test_representation({{46, 10, 128, 65, 2}}), {{0, 1, 2, 3}, {12, 15, 130, 40}, true, true, true, cpuParams}, }; - } else if (InferenceEngine::with_cpu_x86_avx2()) { + } else if (ov::with_cpu_x86_avx2()) { params = { {static_shapes_to_test_representation({{9, 16, 32, 126}}), {{1}}, {}, false, true, true, cpuParams}, {static_shapes_to_test_representation({{9, 16, 32, 126}}), {{1, 0}}, {}, false, true, true, cpuParams}, @@ -593,10 +590,11 @@ std::vector getParams4D() { return params; } -INSTANTIATE_TEST_SUITE_P(smoke_RDFT_CPU_4D, RDFTTestCPU, - ::testing::Combine(::testing::ValuesIn(precisions), - ::testing::ValuesIn(getParams4D())), +INSTANTIATE_TEST_SUITE_P(smoke_RDFT_CPU_4D, + RDFTTestCPU, + ::testing::Combine(::testing::ValuesIn(precisions), ::testing::ValuesIn(getParams4D())), RDFTTestCPU::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/region_yolo.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/region_yolo.cpp index 2dc172b24f0d7d..9fb1b65ab95392 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/region_yolo.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/region_yolo.cpp @@ -2,15 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -namespace CPULayerTestsDefinitions { -using namespace ov::test; +namespace ov { +namespace test { struct regionYoloAttributes { size_t classes; @@ -21,15 +19,13 @@ struct regionYoloAttributes { int end_axis; }; -using regionYoloParamsTuple = std::tuple< - InputShape, // Input Shape - regionYoloAttributes, // Params - std::vector, // mask - ov::test::ElementType, // Network input precision - ov::test::ElementType, // Network output precision - std::map, // Additional network configuration - std::string>; // Device name - +using regionYoloParamsTuple = std::tuple, // mask + ov::test::ElementType, // Network input precision + ov::test::ElementType, // Network output precision + ov::AnyMap, // Additional network configuration + std::string>; // Device name class RegionYoloCPULayerTest : public testing::WithParamInterface, virtual public ov::test::SubgraphBaseTest, public CPUTestsBase { @@ -41,7 +37,7 @@ class RegionYoloCPULayerTest : public testing::WithParamInterface additionalConfig; + ov::AnyMap additionalConfig; std::tie(inputShape, attributes, mask, inpPrecision, outPrecision, additionalConfig, targetName) = obj.param; @@ -65,7 +61,7 @@ class RegionYoloCPULayerTest : public testing::WithParamInterface mask; ov::test::ElementType inPrc; ov::test::ElementType outPrc; - std::map additionalConfig; + ov::AnyMap additionalConfig; std::tie(inputShape, attributes, mask, inPrc, outPrc, additionalConfig, targetDevice) = this->GetParam(); @@ -78,14 +74,19 @@ class RegionYoloCPULayerTest : public testing::WithParamInterface(inPrc, shape)); } - const auto region_yolo = std::make_shared(paramRegionYolo[0], - attributes.coordinates, attributes.classes, attributes.num_regions, - attributes.do_softmax, mask, attributes.start_axis, attributes.end_axis); + const auto region_yolo = std::make_shared(paramRegionYolo[0], + attributes.coordinates, + attributes.classes, + attributes.num_regions, + attributes.do_softmax, + mask, + attributes.start_axis, + attributes.end_axis); function = makeNgraphFunction(inPrc, paramRegionYolo, region_yolo, "RegionYolo"); } @@ -99,15 +100,15 @@ TEST_P(RegionYoloCPULayerTest, CompareWithRefs) { namespace { const std::vector inpOutPrc = {ov::test::ElementType::bf16, ov::test::ElementType::f32}; -const std::map additional_config; +const ov::AnyMap additional_config; /* *======================* Static Shapes *======================* */ -const std::vector inShapes_caffe = { +const std::vector inShapes_caffe = { {1, 125, 13, 13} }; -const std::vector inShapes_mxnet = { +const std::vector inShapes_mxnet = { {1, 75, 52, 52}, {1, 75, 32, 32}, {1, 75, 26, 26}, @@ -119,7 +120,7 @@ const std::vector inShapes_mxnet = { {1, 303, 28, 28}, }; -const std::vector inShapes_v3 = { +const std::vector inShapes_v3 = { {1, 255, 52, 52}, {1, 255, 26, 26}, {1, 255, 13, 13} @@ -225,5 +226,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_TestsRegionYoloMxnetCPUStatic, RegionYoloCPULayer INSTANTIATE_TEST_SUITE_P(smoke_TestsRegionYoloMxnetCPUDynamic, RegionYoloCPULayerTest, testCase_yolov3_mxnet_dynamic, RegionYoloCPULayerTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_TestsRegionYoloCaffeCPUStatic, RegionYoloCPULayerTest, testCase_yolov2_caffe, RegionYoloCPULayerTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_TestsRegionYoloCaffeCPUDynamic, RegionYoloCPULayerTest, testCase_yolov2_caffe_dynamic, RegionYoloCPULayerTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/reorg_yolo.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/reorg_yolo.cpp index ae6d5e5b7e85ab..91d97d9e679b40 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/reorg_yolo.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/reorg_yolo.cpp @@ -2,17 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "ov_models/builders.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::opset3; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { using ReorgYoloCPUParamsTuple = typename std::tuple(ngraph::element::f32, inputDynamicShapes[0]); - auto reorg_yolo = std::make_shared(param, stride); - function = std::make_shared(std::make_shared(reorg_yolo), - ngraph::ParameterVector{param}, - "ReorgYolo"); + auto param = std::make_shared(ov::element::f32, inputDynamicShapes[0]); + auto reorg_yolo = std::make_shared(param, stride); + function = std::make_shared(std::make_shared(reorg_yolo), + ov::ParameterVector{param}, + "ReorgYolo"); } }; @@ -87,4 +84,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_TestsReorgYolo_stride3_DynamicShape, testCase_stride3_Dynamic, ReorgYoloLayerCPUTest::getTestCaseName); -}; // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/reverse_sequence.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/reverse_sequence.cpp index d78a8780f774cf..e440d7496fecf4 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/reverse_sequence.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/reverse_sequence.cpp @@ -2,24 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/base/ov_subgraph.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "ov_models/builders.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "common_test_utils/ov_tensor_utils.hpp" using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -using ReverseSequenceCPUTestParams = typename std::tuple< - int64_t, // Index of the batch dimension - int64_t, // Index of the sequence dimension - InputShape, // Input shape - InputShape, // Shape of the input vector with sequence lengths to be reversed - ngraph::helpers::InputLayerType, // Secondary input type - InferenceEngine::Precision, // Network precision - std::string>; // Device name +using ReverseSequenceCPUTestParams = + typename std::tuple; // Device name class ReverseSequenceLayerCPUTest : public testing::WithParamInterface, virtual public SubgraphBaseTest, public CPUTestsBase { @@ -27,11 +27,11 @@ class ReverseSequenceLayerCPUTest : public testing::WithParamInterface obj) { int64_t batchAxisIndex; int64_t seqAxisIndex; - InferenceEngine::Precision netPrecision; + ov::element::Type netPrecision; std::string targetName; InputShape dataInputShape; InputShape seqLengthsShape; - ngraph::helpers::InputLayerType secondaryInputType; + utils::InputLayerType secondaryInputType; std::tie(batchAxisIndex, seqAxisIndex, dataInputShape, seqLengthsShape, secondaryInputType, netPrecision, targetName) = obj.param; @@ -49,7 +49,7 @@ class ReverseSequenceLayerCPUTest : public testing::WithParamInterface(paramsIn.front(), seqLengthsInput, batchAxisIndex, seqAxisIndex); + const ov::ResultVector results{std::make_shared(reverse)}; + function = std::make_shared(results, paramsIn, "ReverseSequence"); } void generate_inputs(const std::vector& targetInputStaticShapes) override { @@ -100,10 +99,11 @@ class ReverseSequenceLayerCPUTest : public testing::WithParamInterface netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::I32 +const std::vector netPrecisions = { + ov::element::f32, + ov::element::i32 }; const int64_t batchAxisIndex = 0L; @@ -147,9 +147,9 @@ const std::vector dataInputDynamicShapes5D = const std::vector seqLengthsDynamicShapes = {{{-1}, {{7}, {10}}}}; -const std::vector secondaryInputTypes = { - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER +const std::vector secondaryInputTypes = { + utils::InputLayerType::CONSTANT, + utils::InputLayerType::PARAMETER }; INSTANTIATE_TEST_SUITE_P(smoke_ReverseSequenceCPUStatic3D, ReverseSequenceLayerCPUTest, @@ -191,7 +191,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_ReverseSequenceCPUDynamic3D, ReverseSequenceLayer ::testing::ValuesIn(seqAxisIndices), ::testing::ValuesIn(dataInputDynamicShapes3D), ::testing::ValuesIn(seqLengthsDynamicShapes), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(utils::InputLayerType::PARAMETER), ::testing::ValuesIn(netPrecisions), ::testing::Values(ov::test::utils::DEVICE_CPU)), ReverseSequenceLayerCPUTest::getTestCaseName); @@ -202,7 +202,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_ReverseSequenceCPUDynamic4D, ReverseSequenceLayer ::testing::ValuesIn(seqAxisIndices), ::testing::ValuesIn(dataInputDynamicShapes4D), ::testing::ValuesIn(seqLengthsDynamicShapes), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(utils::InputLayerType::PARAMETER), ::testing::ValuesIn(netPrecisions), ::testing::Values(ov::test::utils::DEVICE_CPU)), ReverseSequenceLayerCPUTest::getTestCaseName); @@ -213,10 +213,11 @@ INSTANTIATE_TEST_SUITE_P(smoke_ReverseSequenceCPUDynamic5D, ReverseSequenceLayer ::testing::ValuesIn(seqAxisIndices), ::testing::ValuesIn(dataInputDynamicShapes5D), ::testing::ValuesIn(seqLengthsDynamicShapes), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(utils::InputLayerType::PARAMETER), ::testing::ValuesIn(netPrecisions), ::testing::Values(ov::test::utils::DEVICE_CPU)), ReverseSequenceLayerCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/rnn_cell.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/rnn_cell.cpp index af94855c414e1d..e8d9bb2359417e 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/rnn_cell.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/rnn_cell.cpp @@ -2,23 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "common_test_utils/node_builders/rnn_cell.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -using RNNCellCPUParams = typename std::tuple< - std::vector, // Shapes - std::vector, // Activations - float, // Clip - ElementType, // Network precision - CPUSpecificParams, // CPU specific params - std::map // Additional config ->; +using RNNCellCPUParams = typename std::tuple, // Shapes + std::vector, // Activations + float, // Clip + ElementType, // Network precision + CPUSpecificParams, // CPU specific params + ov::AnyMap // Additional config + >; class RNNCellCPUTest : public testing::WithParamInterface, virtual public ov::test::SubgraphBaseTest, public CPUTestsBase { @@ -29,7 +28,7 @@ class RNNCellCPUTest : public testing::WithParamInterface, float clip = 0.f; ElementType netPrecision; CPUSpecificParams cpuParams; - std::map additionalConfig; + ov::AnyMap additionalConfig; std::tie(inputShapes, activations, clip, netPrecision, cpuParams, additionalConfig) = obj.param; @@ -53,9 +52,8 @@ class RNNCellCPUTest : public testing::WithParamInterface, if (!additionalConfig.empty()) { result << "_PluginConf"; - for (auto &item : additionalConfig) { - if (item.second == InferenceEngine::PluginConfigParams::YES) - result << "_" << item.first << "=" << item.second; + for (auto& item : additionalConfig) { + result << "_" << item.first << "=" << item.second.as(); } } @@ -69,7 +67,7 @@ class RNNCellCPUTest : public testing::WithParamInterface, float clip = 0.f; ElementType netPrecision; CPUSpecificParams cpuParams; - std::map additionalConfig; + ov::AnyMap additionalConfig; std::tie(inputShapes, activations, clip, netPrecision, cpuParams, additionalConfig) = this->GetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; @@ -82,7 +80,8 @@ class RNNCellCPUTest : public testing::WithParamInterface, configuration.insert(additionalConfig.begin(), additionalConfig.end()); - if (additionalConfig[InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16] == InferenceEngine::PluginConfigParams::YES) { + auto it = additionalConfig.find(ov::hint::inference_precision.name()); + if (it != additionalConfig.end() && it->second.as() == ov::element::bf16) { selectedType = makeSelectedTypeStr(selectedType, ElementType::bf16); } else { selectedType = makeSelectedTypeStr(selectedType, netPrecision); @@ -96,7 +95,7 @@ class RNNCellCPUTest : public testing::WithParamInterface, paramsOuts.push_back(param); } std::vector WRB = {{hiddenSize, inputSize}, {hiddenSize, hiddenSize}, {hiddenSize}}; - auto rnnCellOp = ngraph::builder::makeRNN(paramsOuts, WRB, hiddenSize, activations, {}, {}, clip); + auto rnnCellOp = utils::make_rnn(paramsOuts, WRB, hiddenSize, activations, {}, {}, clip); function = makeNgraphFunction(netPrecision, params, rnnCellOp, "RNNCellCPU"); } @@ -109,10 +108,8 @@ TEST_P(RNNCellCPUTest, CompareWithRefs) { namespace { /* CPU PARAMS */ -std::vector> additionalConfig = { - {{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::YES}} -}; +std::vector additionalConfig = {{ov::hint::inference_precision(ov::element::f32)}, + {ov::hint::inference_precision(ov::element::bf16)}}; CPUSpecificParams cpuParams{{nc, nc}, {nc}, {"ref_any"}, "ref_any"}; std::vector> activations = {{"relu"}, {"sigmoid"}, {"tanh"}}; @@ -167,5 +164,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamic, RNNCellCPUTest, ::testing::Values(cpuParams), ::testing::ValuesIn(additionalConfig)), RNNCellCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/rnn_sequence.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/rnn_sequence.cpp index e3c05700d7243b..41bed3e3b5d171 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/rnn_sequence.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/rnn_sequence.cpp @@ -3,25 +3,26 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "common_test_utils/node_builders/rnn_cell.hpp" #include "test_utils/cpu_test_utils.hpp" #include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp" #include "transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp" using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { using RNNSequenceCpuSpecificParams = typename std::tuple< std::vector, // Shapes - ngraph::helpers::SequenceTestsMode, // Pure Sequence or TensorIterator + ov::test::utils::SequenceTestsMode, // Pure Sequence or TensorIterator std::vector, // Activations float, // Clip ov::op::RecurrentSequenceDirection, // Direction ElementType, // Network precision CPUSpecificParams, // CPU specific params - std::map // Additional config + ov::AnyMap // Additional config >; class RNNSequenceCPUTest : public testing::WithParamInterface, @@ -29,13 +30,13 @@ class RNNSequenceCPUTest : public testing::WithParamInterface &obj) { std::vector inputShapes; - ngraph::helpers::SequenceTestsMode seqMode; + ov::test::utils::SequenceTestsMode seqMode; std::vector activations; float clip; ov::op::RecurrentSequenceDirection direction; ElementType netPrecision; CPUSpecificParams cpuParams; - std::map additionalConfig; + ov::AnyMap additionalConfig; std::tie(inputShapes, seqMode, activations, clip, direction, netPrecision, cpuParams, additionalConfig) = obj.param; @@ -61,9 +62,8 @@ class RNNSequenceCPUTest : public testing::WithParamInterface(); manager.register_pass(); @@ -180,14 +173,13 @@ TEST_P(RNNSequenceCPUTest, CompareWithRefs) { namespace { /* CPU PARAMS */ -std::vector> additionalConfig - = {{{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::YES}}}; +std::vector additionalConfig = {{ov::hint::inference_precision(ov::element::f32)}, + {ov::hint::inference_precision(ov::element::bf16)}}; CPUSpecificParams cpuParams{{ntc, tnc}, {ntc, tnc}, {"ref_any"}, "ref_any"}; CPUSpecificParams cpuParamsBatchSizeOne{{tnc, ntc}, {tnc, tnc}, {"ref_any"}, "ref_any"}; -std::vector mode{ngraph::helpers::SequenceTestsMode::PURE_SEQ}; +std::vector mode{ov::test::utils::SequenceTestsMode::PURE_SEQ}; // output values increase rapidly without clip, so use only seq_lengths = 2 std::vector seq_lengths_zero_clip{ 2 }; std::vector> activations = {{"relu"}, {"sigmoid"}, {"tanh"}}; @@ -221,7 +213,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_static, RNNSequenceCPUTest, ::testing::ValuesIn(direction), ::testing::ValuesIn(netPrecisions), ::testing::Values(cpuParams), - ::testing::Values(std::map{})), + ::testing::Values(ov::AnyMap{})), RNNSequenceCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_static_BatchSizeOne, RNNSequenceCPUTest, @@ -232,7 +224,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_static_BatchSizeOne, RNNSequenceCPUTest, ::testing::ValuesIn(direction), ::testing::ValuesIn(netPrecisions), ::testing::Values(cpuParamsBatchSizeOne), - ::testing::Values(std::map{})), + ::testing::Values(ov::AnyMap{})), RNNSequenceCPUTest::getTestCaseName); const std::vector> dynamicShapes = { @@ -300,7 +292,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamic, RNNSequenceCPUTest, ::testing::ValuesIn(direction), ::testing::ValuesIn(netPrecisions), ::testing::Values(cpuParams), - ::testing::Values(std::map{})), + ::testing::Values(ov::AnyMap{})), RNNSequenceCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_dynamic_BatchSizeOne, RNNSequenceCPUTest, @@ -311,7 +303,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamic_BatchSizeOne, RNNSequenceCPUTest, ::testing::ValuesIn(direction), ::testing::ValuesIn(netPrecisions), ::testing::Values(cpuParamsBatchSizeOne), - ::testing::Values(std::map{})), + ::testing::Values(ov::AnyMap{})), RNNSequenceCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(nightly_dynamic, RNNSequenceCPUTest, @@ -322,7 +314,7 @@ INSTANTIATE_TEST_SUITE_P(nightly_dynamic, RNNSequenceCPUTest, ::testing::ValuesIn(direction), ::testing::ValuesIn(netPrecisions), ::testing::Values(cpuParams), - ::testing::Values(std::map{})), + ::testing::Values(ov::AnyMap{})), RNNSequenceCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(nightly_dynamic_bf16, RNNSequenceCPUTest, @@ -335,5 +327,6 @@ INSTANTIATE_TEST_SUITE_P(nightly_dynamic_bf16, RNNSequenceCPUTest, ::testing::Values(cpuParams), ::testing::Values(additionalConfig[1])), RNNSequenceCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/roi_pooling.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/roi_pooling.cpp index 5b96e040c7db6d..fe8a8ff49a1fc8 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/roi_pooling.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/roi_pooling.cpp @@ -1,38 +1,35 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include +#include "shared_test_classes/single_layer/roi_pooling.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "common_test_utils/file_utils.hpp" -#include +#include "common_test_utils/ov_tensor_utils.hpp" #include "common_test_utils/data_utils.hpp" -#include "ie_common.h" #include "test_utils/cpu_test_utils.hpp" #include "utils/bfloat16.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { enum ProposalGenerationMode { RANDOM, ULTIMATE_RIGHT_BORDER }; using roiPoolingShapes = std::vector; -using roiPoolingParams = std::tuple< - roiPoolingShapes, // Input shapes - std::vector, // Pooled shape {pooled_h, pooled_w} - float, // Spatial scale - ngraph::helpers::ROIPoolingTypes, // ROIPooling method - InferenceEngine::Precision, // Net precision - LayerTestsUtils::TargetDevice>; // Device name +using roiPoolingParams = std::tuple, // Pooled shape {pooled_h, pooled_w} + float, // Spatial scale + utils::ROIPoolingTypes, // ROIPooling method + ov::element::Type, // Net precision + LayerTestsUtils::TargetDevice>; // Device name using ROIPoolingCPUTestParamsSet = std::tuple>; + ov::AnyMap>; class ROIPoolingCPULayerTest : public testing::WithParamInterface, public ov::test::SubgraphBaseTest, @@ -42,20 +39,20 @@ class ROIPoolingCPULayerTest : public testing::WithParamInterface additionalConfig; + ov::AnyMap additionalConfig; std::tie(basicParamsSet, cpuParams, propMode, additionalConfig) = obj.param; roiPoolingShapes inputShapes; std::vector poolShape; float spatial_scale; - ngraph::helpers::ROIPoolingTypes pool_method; - InferenceEngine::Precision netPrecision; + utils::ROIPoolingTypes pool_method; + ov::element::Type netPrecision; std::string targetDevice; std::tie(inputShapes, poolShape, spatial_scale, pool_method, netPrecision, targetDevice) = basicParamsSet; std::ostringstream result; - result << "netPRC=" << netPrecision.name() << "_"; + result << "netPRC=" << netPrecision.to_string() << "_"; for (const auto& shape : inputShapes) { result << ov::test::utils::partialShape2str({ shape.first }) << "_"; } @@ -74,19 +71,18 @@ class ROIPoolingCPULayerTest : public testing::WithParamInterface(roi_pooling)}; + ov::ResultVector results{std::make_shared(roi_pooling)}; - function = makeNgraphFunction(ngPrc, params, roi_pooling, "ROIPooling"); - functionRefs = ngraph::clone_function(*function); + function = makeNgraphFunction(netPrecision, params, roi_pooling, "ROIPooling"); + functionRefs = ov::clone_model(*function); } }; @@ -226,11 +226,8 @@ TEST_P(ROIPoolingCPULayerTest, CompareWithRefs) { namespace { -std::vector> additionalConfig{ - {{PluginConfigParams::KEY_ENFORCE_BF16, PluginConfigParams::NO}}, - {{PluginConfigParams::KEY_ENFORCE_BF16, PluginConfigParams::YES}} -}; - +std::vector additionalConfig = {{ov::hint::inference_precision(ov::element::f32)}, + {ov::hint::inference_precision(ov::element::bf16)}}; /* have to select particular implementation type, since currently * nodes always choose the best one */ std::vector selectCPUInfoForDevice() { @@ -329,21 +326,21 @@ const std::vector> pooledShapes_bilinear = { {6, 6} }; -const std::vector netPRCs = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::BF16}; +const std::vector netPRCs = {ov::element::f32, ov::element::bf16}; const std::vector spatial_scales = {0.625f, 1.f}; const auto test_ROIPooling_max = ::testing::Combine(::testing::ValuesIn(inShapes), ::testing::ValuesIn(pooledShapes_max), ::testing::ValuesIn(spatial_scales), - ::testing::Values(ngraph::helpers::ROIPoolingTypes::ROI_MAX), + ::testing::Values(utils::ROIPoolingTypes::ROI_MAX), ::testing::ValuesIn(netPRCs), ::testing::Values(ov::test::utils::DEVICE_CPU)); const auto test_ROIPooling_bilinear = ::testing::Combine(::testing::ValuesIn(inShapes), ::testing::ValuesIn(pooledShapes_bilinear), ::testing::Values(spatial_scales[1]), - ::testing::Values(ngraph::helpers::ROIPoolingTypes::ROI_BILINEAR), + ::testing::Values(utils::ROIPoolingTypes::ROI_BILINEAR), ::testing::ValuesIn(netPRCs), ::testing::Values(ov::test::utils::DEVICE_CPU)); @@ -363,18 +360,19 @@ INSTANTIATE_TEST_SUITE_P(smoke_ROIPoolingCPU_bilinear, ::testing::ValuesIn(additionalConfig)), ROIPoolingCPULayerTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_ROIPoolingCPU_bilinear_ultimateRightBorderProposal, - ROIPoolingCPULayerTest, - ::testing::Combine(::testing::Combine(::testing::Values(roiPoolingShapes{{{}, {{1, 1, 50, 50}}}, {{}, {{1, 5}}}}), - ::testing::Values(std::vector { 4, 4 }), - ::testing::Values(spatial_scales[1]), - ::testing::Values(ngraph::helpers::ROIPoolingTypes::ROI_BILINEAR), - ::testing::Values(InferenceEngine::Precision::FP32), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(selectCPUInfoForDevice()), - ::testing::Values(ProposalGenerationMode::ULTIMATE_RIGHT_BORDER), - ::testing::Values(std::map{ - {{PluginConfigParams::KEY_ENFORCE_BF16, PluginConfigParams::NO}}})), - ROIPoolingCPULayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_ROIPoolingCPU_bilinear_ultimateRightBorderProposal, + ROIPoolingCPULayerTest, + ::testing::Combine(::testing::Combine(::testing::Values(roiPoolingShapes{{{}, {{1, 1, 50, 50}}}, {{}, {{1, 5}}}}), + ::testing::Values(std::vector{4, 4}), + ::testing::Values(spatial_scales[1]), + ::testing::Values(utils::ROIPoolingTypes::ROI_BILINEAR), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(selectCPUInfoForDevice()), + ::testing::Values(ProposalGenerationMode::ULTIMATE_RIGHT_BORDER), + ::testing::Values(ov::AnyMap{{ov::hint::inference_precision(ov::element::f32)}})), + ROIPoolingCPULayerTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/roialign.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/roialign.cpp index 707f0b2a8f3b69..475607a743fe07 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/roialign.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/roialign.cpp @@ -3,17 +3,13 @@ // #include "test_utils/cpu_test_utils.hpp" -#include - +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { using ROIAlignShapes = std::vector; using ROIAlignSpecificParams = std::tuple< @@ -33,14 +29,14 @@ using ROIAlignLayerTestParams = std::tuple< >; using ROIAlignLayerCPUTestParamsSet = std::tuple< - CPULayerTestsDefinitions::ROIAlignLayerTestParams, + ROIAlignLayerTestParams, CPUSpecificParams>; class ROIAlignLayerCPUTest : public testing::WithParamInterface, public SubgraphBaseTest, public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - CPULayerTestsDefinitions::ROIAlignLayerTestParams basicParamsSet; + ROIAlignLayerTestParams basicParamsSet; CPUSpecificParams cpuParams; std::tie(basicParamsSet, cpuParams) = obj.param; std::string td; @@ -82,14 +78,18 @@ class ROIAlignLayerCPUTest : public testing::WithParamInterface& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); ov::Tensor data_tensor; const auto& dataPrecision = funcInputs[0].get_element_type(); const auto& dataShape = targetInputStaticShapes.front(); - data_tensor = ov::test::utils::create_and_fill_tensor(dataPrecision, dataShape, 10, 0, 1000); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 10; + in_data.resolution = 1000; + data_tensor = ov::test::utils::create_and_fill_tensor(dataPrecision, dataShape, in_data); const auto& coordsET = funcInputs[1].get_element_type(); auto coordsTensor = ov::Tensor{ coordsET, targetInputStaticShapes[1] }; @@ -104,10 +104,10 @@ class ROIAlignLayerCPUTest : public testing::WithParamInterface(coordsTensor.data()); for (size_t i = 0; i < coordsTensor.get_size(); i += 4) { - coordsTensorData[i] = static_cast(ngraph::bfloat16(1.f).to_bits()); - coordsTensorData[i + 1] = static_cast(ngraph::bfloat16(1.f).to_bits()); - coordsTensorData[i + 2] = static_cast(ngraph::bfloat16(19.f).to_bits()); - coordsTensorData[i + 3] = static_cast(ngraph::bfloat16(19.f).to_bits()); + coordsTensorData[i] = static_cast(ov::bfloat16(1.f).to_bits()); + coordsTensorData[i + 1] = static_cast(ov::bfloat16(1.f).to_bits()); + coordsTensorData[i + 2] = static_cast(ov::bfloat16(19.f).to_bits()); + coordsTensorData[i + 3] = static_cast(ov::bfloat16(19.f).to_bits()); } } else { OPENVINO_THROW("roi align. Unsupported precision: ", coordsET); @@ -127,12 +127,12 @@ class ROIAlignLayerCPUTest : public testing::WithParamInterfaceGetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; - CPULayerTestsDefinitions::ROIAlignSpecificParams roiAlignParams; + ROIAlignSpecificParams roiAlignParams; ElementType inputPrecision; std::tie(roiAlignParams, inputPrecision, targetDevice) = basicParamsSet; @@ -151,19 +151,26 @@ class ROIAlignLayerCPUTest : public testing::WithParamInterface(inputPrecision, shape)); } - auto int_param = std::make_shared(ngraph::element::i32, inputDynamicShapes[2]); - auto pooling_mode = ngraph::EnumNames::as_enum(mode); - auto aligned_mode = ngraph::EnumNames::as_enum(alignedMode); - - auto roialign = std::make_shared(float_params[0], float_params[1], int_param, pooledH, pooledW, - samplingRatio, spatialScale, pooling_mode, aligned_mode); + auto int_param = std::make_shared(ov::element::i32, inputDynamicShapes[2]); + auto pooling_mode = ov::EnumNames::as_enum(mode); + auto aligned_mode = ov::EnumNames::as_enum(alignedMode); + + auto roialign = std::make_shared(float_params[0], + float_params[1], + int_param, + pooledH, + pooledW, + samplingRatio, + spatialScale, + pooling_mode, + aligned_mode); selectedType = makeSelectedTypeStr(selectedType, inputPrecision); if (inputPrecision == ElementType::bf16) { rel_threshold = 1e-2; } - ngraph::ParameterVector params{ float_params[0], float_params[1], int_param }; + ov::ParameterVector params{ float_params[0], float_params[1], int_param }; function = makeNgraphFunction(inputPrecision, params, roialign, "ROIAlign"); } }; @@ -178,15 +185,15 @@ namespace { /* CPU PARAMS */ std::vector filterCPUInfoForDevice() { std::vector resCPUParams; - if (InferenceEngine::with_cpu_x86_avx512f()) { + if (ov::with_cpu_x86_avx512f()) { resCPUParams.push_back(CPUSpecificParams{{nchw, nc, x}, {nchw}, {"jit_avx512"}, {"jit_avx512"}}); resCPUParams.push_back(CPUSpecificParams{{nhwc, nc, x}, {nhwc}, {"jit_avx512"}, {"jit_avx512"}}); resCPUParams.push_back(CPUSpecificParams{{nChw16c, nc, x}, {nChw16c}, {"jit_avx512"}, {"jit_avx512"}}); - } else if (InferenceEngine::with_cpu_x86_avx2()) { + } else if (ov::with_cpu_x86_avx2()) { resCPUParams.push_back(CPUSpecificParams{{nchw, nc, x}, {nchw}, {"jit_avx2"}, {"jit_avx2"}}); resCPUParams.push_back(CPUSpecificParams{{nhwc, nc, x}, {nhwc}, {"jit_avx2"}, {"jit_avx2"}}); resCPUParams.push_back(CPUSpecificParams{{nChw8c, nc, x}, {nChw8c}, {"jit_avx2"}, {"jit_avx2"}}); - } else if (InferenceEngine::with_cpu_x86_sse42()) { + } else if (ov::with_cpu_x86_sse42()) { resCPUParams.push_back(CPUSpecificParams{{nchw, nc, x}, {nchw}, {"jit_sse42"}, {"jit_sse42"}}); resCPUParams.push_back(CPUSpecificParams{{nhwc, nc, x}, {nhwc}, {"jit_sse42"}, {"jit_sse42"}}); resCPUParams.push_back(CPUSpecificParams{{nChw8c, nc, x}, {nChw8c}, {"jit_sse42"}, {"jit_sse42"}}); @@ -265,5 +272,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_ROIAlignLayoutTest, ROIAlignLayerCPUTest, ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfoForDevice())), ROIAlignLayerCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/roll.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/roll.cpp index c66384281982ef..eae2668ff5c16c 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/roll.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/roll.cpp @@ -7,9 +7,9 @@ #include "test_utils/cpu_test_utils.hpp" using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { using RollCPUTestParams = typename std::tuple< InputShape, // Input shape @@ -58,12 +58,14 @@ class RollLayerCPUTest : public testing::WithParamInterface, for (auto&& shape : inputDynamicShapes) { paramsIn.push_back(std::make_shared(inputPrecision, shape)); } - auto shiftNode = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shift.size()}, shift)->output(0); - auto axesNode = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{axes.size()}, axes)->output(0); - - const auto roll = std::make_shared(paramsIn[0], shiftNode, axesNode); - const ngraph::ResultVector results{std::make_shared(roll)}; - function = std::make_shared(results, paramsIn, "roll"); + auto shiftNode = + std::make_shared(ov::element::i64, ov::Shape{shift.size()}, shift)->output(0); + auto axesNode = + std::make_shared(ov::element::i64, ov::Shape{axes.size()}, axes)->output(0); + + const auto roll = std::make_shared(paramsIn[0], shiftNode, axesNode); + const ov::ResultVector results{std::make_shared(roll)}; + function = std::make_shared(results, paramsIn, "roll"); } }; @@ -146,5 +148,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_RollCPU_5DRepeatingAxesNegativeShift, RollLayerCP ::testing::Values(ov::test::utils::DEVICE_CPU)), RollLayerCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_ND_update.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_ND_update.cpp index f6905ebd4146b9..33dce880d9c8f9 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_ND_update.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_ND_update.cpp @@ -2,17 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "test_utils/cpu_test_utils.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" +#include "test_utils/cpu_test_utils.hpp" -using namespace ngraph; -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { using ScatterNDUpdateShapes = std::vector; using IndicesValues = std::vector; @@ -21,12 +17,13 @@ struct ScatterNDUpdateLayerParams { IndicesValues indicesValues; }; -using scatterUpdateParams = std::tuple< - ScatterNDUpdateLayerParams, - ElementType, // input precision - ElementType>; // indices precision +using scatterUpdateParams = std::tuple; // indices precision -class ScatterNDUpdateLayerCPUTest : public testing::WithParamInterface, public SubgraphBaseTest, public CPUTestsBase { +class ScatterNDUpdateLayerCPUTest : public testing::WithParamInterface, + public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { ScatterNDUpdateLayerParams scatterParams; @@ -39,7 +36,7 @@ class ScatterNDUpdateLayerCPUTest : public testing::WithParamInterface(inputPrecision, shape)); } auto indicesParam = std::make_shared(idxPrecision, inputDynamicShapes[1]); @@ -110,9 +111,9 @@ class ScatterNDUpdateLayerCPUTest : public testing::WithParamInterfaceset_friendly_name("Param_2"); dataParams[1]->set_friendly_name("Param_3"); - auto scatter = std::make_shared(dataParams[0], indicesParam, dataParams[1]); + auto scatter = std::make_shared(dataParams[0], indicesParam, dataParams[1]); - ngraph::ParameterVector allParams{ dataParams[0], indicesParam, dataParams[1] }; + ov::ParameterVector allParams{dataParams[0], indicesParam, dataParams[1]}; function = makeNgraphFunction(inputPrecision, allParams, scatter, "ScatterNDUpdateLayerCPUTest"); } }; @@ -129,40 +130,26 @@ const std::vector scatterParams = { {{2, 2, 1}, {{2, 2, 1}, {2, 2, 1}, {2, 2, 1}}}, {{-1, -1, -1, -1, -1, -1}, {{2, 2, 9, 10, 9, 10}, {2, 2, 1, 11, 2, 5}, {2, 2, 15, 8, 1, 7}}}, }, - IndicesValues{ 5, 6, 2, 8 } - }, - ScatterNDUpdateLayerParams{ - ScatterNDUpdateShapes{ - {{-1, -1, -1, -1}, {{ 10, 9, 9, 11 }, { 7, 5, 3, 12 }, { 3, 4, 9, 8 }}}, - {{2, 3}, {{2, 3}, {2, 3}, {2, 3}}}, - {{-1, -1}, {{2, 11}, {2, 12}, {2, 8}}} - }, - IndicesValues{ 0, 1, 1, 2, 2, 2 } - }, + IndicesValues{5, 6, 2, 8}}, + ScatterNDUpdateLayerParams{ScatterNDUpdateShapes{{{-1, -1, -1, -1}, {{10, 9, 9, 11}, {7, 5, 3, 12}, {3, 4, 9, 8}}}, + {{2, 3}, {{2, 3}, {2, 3}, {2, 3}}}, + {{-1, -1}, {{2, 11}, {2, 12}, {2, 8}}}}, + IndicesValues{0, 1, 1, 2, 2, 2}}, ScatterNDUpdateLayerParams{ - ScatterNDUpdateShapes{ - {{{3, 10}, -1, {3, 9}, -1}, {{ 10, 9, 9, 11 }, { 7, 5, 3, 12 }, { 3, 4, 9, 8 }}}, - {{2, 3}, {{2, 3}, {2, 3}, {2, 3}}}, - {{{2, 4}, -1}, {{2, 11}, {2, 12}, {2, 8}}} - }, - IndicesValues{ 0, 1, 1, 2, 2, 2 } - }, + ScatterNDUpdateShapes{{{{3, 10}, -1, {3, 9}, -1}, {{10, 9, 9, 11}, {7, 5, 3, 12}, {3, 4, 9, 8}}}, + {{2, 3}, {{2, 3}, {2, 3}, {2, 3}}}, + {{{2, 4}, -1}, {{2, 11}, {2, 12}, {2, 8}}}}, + IndicesValues{0, 1, 1, 2, 2, 2}}, ScatterNDUpdateLayerParams{ - ScatterNDUpdateShapes{ - {{{3, 10}, {4, 11}, {3, 9}, {8, 15}}, {{ 10, 9, 9, 11 }, { 7, 5, 3, 12 }, { 3, 4, 9, 8 }}}, - {{2, 3}, {{2, 3}, {2, 3}, {2, 3}}}, - {{{2, 4}, -1}, {{2, 11}, {2, 12}, {2, 8}}} - }, - IndicesValues{ 0, 1, 1, 2, 2, 2 } - }, + ScatterNDUpdateShapes{{{{3, 10}, {4, 11}, {3, 9}, {8, 15}}, {{10, 9, 9, 11}, {7, 5, 3, 12}, {3, 4, 9, 8}}}, + {{2, 3}, {{2, 3}, {2, 3}, {2, 3}}}, + {{{2, 4}, -1}, {{2, 11}, {2, 12}, {2, 8}}}}, + IndicesValues{0, 1, 1, 2, 2, 2}}, ScatterNDUpdateLayerParams{ - ScatterNDUpdateShapes{ - {{{3, 10}, {4, 11}, {3, 9}, {8, 15}}, {{ 10, 9, 9, 11 }, { 7, 5, 3, 12 }, { 3, 4, 9, 8 }}}, - {{2, 3}, {{2, 3}, {2, 3}, {2, 3}}}, - {{{2, 4}, -1}, {{2, 11}, {2, 12}, {2, 8}}} - }, - IndicesValues{ -1, -1, -1, -2, -2, -2 } - }, + ScatterNDUpdateShapes{{{{3, 10}, {4, 11}, {3, 9}, {8, 15}}, {{10, 9, 9, 11}, {7, 5, 3, 12}, {3, 4, 9, 8}}}, + {{2, 3}, {{2, 3}, {2, 3}, {2, 3}}}, + {{{2, 4}, -1}, {{2, 11}, {2, 12}, {2, 8}}}}, + IndicesValues{-1, -1, -1, -2, -2, -2}}, }; const std::vector inputPrecisions = { @@ -175,10 +162,11 @@ const std::vector constantPrecisions = { ElementType::i64, }; -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs, ScatterNDUpdateLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(scatterParams), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(constantPrecisions)), - ScatterNDUpdateLayerCPUTest::getTestCaseName); -} // namespace CPULayerTestsDefinitions +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs, + ScatterNDUpdateLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(scatterParams), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(constantPrecisions)), + ScatterNDUpdateLayerCPUTest::getTestCaseName); +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_elements_update.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_elements_update.cpp index bdbee3447f0c0b..9d384723f6eff1 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_elements_update.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_elements_update.cpp @@ -2,17 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "test_utils/cpu_test_utils.hpp" -#include +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" +#include "test_utils/cpu_test_utils.hpp" -using namespace ngraph; -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { using ScatterElementsUpdateShapes = std::vector; using IndicesValues = std::vector; @@ -21,13 +17,14 @@ struct ScatterElementsUpdateLayerParams { IndicesValues indicesValues; }; -using scatterUpdateParams = std::tuple< - ScatterElementsUpdateLayerParams, - std::int64_t, // axis - ElementType, // input precision - ElementType>; // indices precision +using scatterUpdateParams = std::tuple; // indices precision -class ScatterElementsUpdateLayerCPUTest : public testing::WithParamInterface, public SubgraphBaseTest, public CPUTestsBase { +class ScatterElementsUpdateLayerCPUTest : public testing::WithParamInterface, + public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { ScatterElementsUpdateLayerParams scatterParams; @@ -41,7 +38,7 @@ class ScatterElementsUpdateLayerCPUTest : public testing::WithParamInterface(inputPrecision, shape)); } auto indicesParam = std::make_shared(idxPrecision, inputDynamicShapes[1]); @@ -114,10 +115,11 @@ class ScatterElementsUpdateLayerCPUTest : public testing::WithParamInterfaceset_friendly_name("Param_2"); dataParams[1]->set_friendly_name("Param_3"); - auto axisNode = ngraph::opset3::Constant::create(idxPrecision, {}, { axis }); - auto scatter = std::make_shared(dataParams[0], indicesParam, dataParams[1], axisNode); + auto axisNode = ov::op::v0::Constant::create(idxPrecision, {}, {axis}); + auto scatter = + std::make_shared(dataParams[0], indicesParam, dataParams[1], axisNode); - ngraph::ParameterVector allParams{ dataParams[0], indicesParam, dataParams[1] }; + ov::ParameterVector allParams{dataParams[0], indicesParam, dataParams[1]}; function = makeNgraphFunction(inputPrecision, allParams, scatter, "ScatterElementsUpdateLayerCPUTest"); } }; @@ -127,7 +129,7 @@ TEST_P(ScatterElementsUpdateLayerCPUTest, CompareWithRefs) { CheckPluginRelatedResults(compiledModel, "ScatterUpdate"); } -const std::vector axes = { -3, -2, -1, 0, 1, 2 }; +const std::vector axes = {-3, -2, -1, 0, 1, 2}; const std::vector scatterParams = { ScatterElementsUpdateLayerParams{ @@ -167,11 +169,12 @@ const std::vector constantPrecisions = { ElementType::i64, }; -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs, ScatterElementsUpdateLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(scatterParams), - ::testing::ValuesIn(axes), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(constantPrecisions)), - ScatterElementsUpdateLayerCPUTest::getTestCaseName); -} // namespace CPULayerTestsDefinitions +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs, + ScatterElementsUpdateLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(scatterParams), + ::testing::ValuesIn(axes), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(constantPrecisions)), + ScatterElementsUpdateLayerCPUTest::getTestCaseName); +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_update.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_update.cpp index 858a27a94109c1..3c43939e654bf2 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_update.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/scatter_update.cpp @@ -2,32 +2,31 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" +#include "test_utils/cpu_test_utils.hpp" -using namespace ngraph; -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { + using ScatterUpdateShapes = std::vector; using IndicesDescription = std::pair>; using Axis = std::int64_t; struct ScatterUpdateLayerParams { - ScatterUpdateShapes inputShapes; // shapes for "data" and "updates" inputs - IndicesDescription indicesDescriprion; // indices shapes and values + ScatterUpdateShapes inputShapes; // shapes for "data" and "updates" inputs + IndicesDescription indicesDescriprion; // indices shapes and values Axis axis; }; -using scatterUpdateParams = std::tuple< - ScatterUpdateLayerParams, - ElementType, // input precision - ElementType>; // indices precision +using scatterUpdateParams = std::tuple; // indices precision -class ScatterUpdateLayerCPUTest : public testing::WithParamInterface, public SubgraphBaseTest, public CPUTestsBase { +class ScatterUpdateLayerCPUTest : public testing::WithParamInterface, + public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { ScatterUpdateLayerParams scatterParams; @@ -41,7 +40,7 @@ class ScatterUpdateLayerCPUTest : public testing::WithParamInterface scatterParams = { + ScatterUpdateLayerParams{ScatterUpdateShapes{{{-1, -1, -1, -1}, {{4, 12, 3, 11}, {7, 11, 2, 3}, {3, 9, 4, 10}}}, + {{-1, -1, -1, -1}, {{4, 8, 3, 11}, {7, 8, 2, 3}, {3, 8, 4, 10}}}}, + IndicesDescription{{8}, {0, 2, 4, 6, 1, 3, 5, 7}}, + Axis{1}}, + ScatterUpdateLayerParams{ScatterUpdateShapes{{{-1, -1, -1, -1}, {{4, 12, 3, 11}, {7, 9, 1, 12}, {3, 2, 1, 9}}}, + {{-1, -1, -1, -1}, {{4, 12, 3, 8}, {7, 9, 1, 8}, {3, 2, 1, 8}}}}, + IndicesDescription{{8}, {0, 2, 4, 6, 1, 3, 5, 7}}, + Axis{3}}, ScatterUpdateLayerParams{ - ScatterUpdateShapes{ - {{-1, -1, -1, -1}, {{4, 12, 3, 11}, {7, 11, 2, 3}, {3, 9, 4, 10}}}, - {{-1, -1, -1, -1}, {{4, 8, 3, 11}, {7, 8, 2, 3}, {3, 8, 4, 10}}} - }, - IndicesDescription{{8}, {0, 2, 4, 6, 1, 3, 5, 7}}, - Axis{1} - }, + ScatterUpdateShapes{{{-1, -1, -1, -1, -1}, {{5, 9, 10, 3, 4}, {7, 8, 11, 2, 2}, {11, 3, 12, 2, 2}}}, + {{-1, -1, -1, -1, -1, -1}, {{5, 9, 4, 2, 3, 4}, {7, 8, 4, 2, 2, 2}, {11, 3, 4, 2, 2, 2}}}}, + IndicesDescription{{4, 2}, {0, 2, 4, 6, 1, 3, 5, 7}}, + Axis{2}}, ScatterUpdateLayerParams{ - ScatterUpdateShapes{ - {{-1, -1, -1, -1}, {{4, 12, 3, 11}, {7, 9, 1, 12}, {3, 2, 1, 9}}}, - {{-1, -1, -1, -1}, {{4, 12, 3, 8}, {7, 9, 1, 8}, {3, 2, 1, 8}}} - }, - IndicesDescription{{8}, {0, 2, 4, 6, 1, 3, 5, 7}}, - Axis{3} - }, - ScatterUpdateLayerParams{ - ScatterUpdateShapes{ - {{-1, -1, -1, -1, -1}, {{5, 9, 10, 3, 4}, {7, 8, 11, 2, 2}, {11, 3, 12, 2, 2}}}, - {{-1, -1, -1, -1, -1, -1}, {{5, 9, 4, 2, 3, 4}, {7, 8, 4, 2, 2, 2}, {11, 3, 4, 2, 2, 2}}} - }, - IndicesDescription{{ 4, 2 }, { 0, 2, 4, 6, 1, 3, 5, 7 }}, - Axis{2} - }, - ScatterUpdateLayerParams{ - ScatterUpdateShapes{ - {{-1, -1, -1, -1, -1}, {{8, 9, 10, 3, 4}, {11, 3, 4, 3, 4}, {12, 9, 11, 2, 2}}}, - {{-1, -1, -1, -1, -1, -1}, {{4, 2, 9, 10, 3, 4}, {4, 2, 3, 4, 3, 4}, {4, 2, 9, 11, 2, 2}}} - }, - IndicesDescription{{ 4, 2 }, { 0, 2, 4, 6, 1, 3, 5, 7 }}, - Axis{0} - }, + ScatterUpdateShapes{{{-1, -1, -1, -1, -1}, {{8, 9, 10, 3, 4}, {11, 3, 4, 3, 4}, {12, 9, 11, 2, 2}}}, + {{-1, -1, -1, -1, -1, -1}, {{4, 2, 9, 10, 3, 4}, {4, 2, 3, 4, 3, 4}, {4, 2, 9, 11, 2, 2}}}}, + IndicesDescription{{4, 2}, {0, 2, 4, 6, 1, 3, 5, 7}}, + Axis{0}}, ScatterUpdateLayerParams{ ScatterUpdateShapes{ {{{8, 12}, {3, 9}, {4, 11}, {2, 3}, {2, 4}}, {{8, 9, 10, 3, 4}, {11, 3, 4, 3, 4}, {12, 9, 11, 2, 2}}}, - {{4, 2, {3, 9}, {4, 11}, {2, 3}, {2, 4}}, {{4, 2, 9, 10, 3, 4}, {4, 2, 3, 4, 3, 4}, {4, 2, 9, 11, 2, 2}}} - }, - IndicesDescription{{ 4, 2 }, { 0, 2, 4, 6, 1, 3, 5, 7 }}, - Axis{0} - }, + {{4, 2, {3, 9}, {4, 11}, {2, 3}, {2, 4}}, {{4, 2, 9, 10, 3, 4}, {4, 2, 3, 4, 3, 4}, {4, 2, 9, 11, 2, 2}}}}, + IndicesDescription{{4, 2}, {0, 2, 4, 6, 1, 3, 5, 7}}, + Axis{0}}, }; const std::vector inputPrecisions = { @@ -140,10 +124,11 @@ const std::vector constantPrecisions = { ElementType::i64, }; -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs, ScatterUpdateLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(scatterParams), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(constantPrecisions)), - ScatterUpdateLayerCPUTest::getTestCaseName); -} // namespace CPULayerTestsDefinitions +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs, + ScatterUpdateLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(scatterParams), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(constantPrecisions)), + ScatterUpdateLayerCPUTest::getTestCaseName); +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/select.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/select.cpp index f66768c314c36e..0bda49805341c1 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/select.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/select.cpp @@ -71,21 +71,21 @@ class SelectLayerCPUTest : public testing::WithParamInterface, void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& modelInputs = function->inputs(); - auto condTensor = ov::test::utils::create_and_fill_tensor(modelInputs[0].get_element_type(), - targetInputStaticShapes[0], - 3, - -1, - 2); - auto thenTensor = ov::test::utils::create_and_fill_tensor(modelInputs[1].get_element_type(), - targetInputStaticShapes[1], - 10, - -10, - 2); - auto elseTensor = ov::test::utils::create_and_fill_tensor(modelInputs[2].get_element_type(), - targetInputStaticShapes[2], - 10, - 0, - 2); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -1; + in_data.range = 3; + in_data.resolution = 2; + auto condTensor = ov::test::utils::create_and_fill_tensor(modelInputs[0].get_element_type(), targetInputStaticShapes[0], in_data); + + in_data.start_from = -10; + in_data.range = 10; + in_data.resolution = 2; + auto thenTensor = ov::test::utils::create_and_fill_tensor(modelInputs[1].get_element_type(), targetInputStaticShapes[1], in_data); + + in_data.start_from = 0; + in_data.range = 10; + in_data.resolution = 2; + auto elseTensor = ov::test::utils::create_and_fill_tensor(modelInputs[2].get_element_type(), targetInputStaticShapes[2], in_data); inputs.insert({modelInputs[0].get_node_shared_ptr(), condTensor}); inputs.insert({modelInputs[1].get_node_shared_ptr(), thenTensor}); inputs.insert({modelInputs[2].get_node_shared_ptr(), elseTensor}); diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/shape_ops.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/shape_ops.cpp index 196281d1572380..6b195a7086e74a 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/shape_ops.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/shape_ops.cpp @@ -103,20 +103,20 @@ class ShapeOpsCPUTest : public testing::WithParamInterface, #undef RESHAPE_TEST_CASE } } else { + ov::test::utils::InputGenerateData in_data; if (isWithNonZero) { // fill tensor with all zero, so the NonZero op will create 0 shape as the input of reshape op - tensor = - utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], 1, 0); + in_data.start_from = 0; + in_data.range = 1; + tensor = utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], in_data); } else { if (funcInput.get_element_type().is_real()) { - tensor = utils::create_and_fill_tensor(funcInput.get_element_type(), - targetInputStaticShapes[i], - 10, - 0, - 1000); + in_data.start_from = 0; + in_data.range = 10; + in_data.resolution = 1000; + tensor = utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], in_data); } else { - tensor = - utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); + tensor = utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); } } } diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/slice.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/slice.cpp index 45b68a6c8379e6..3629c5b28c2993 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/slice.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/slice.cpp @@ -71,16 +71,16 @@ class Slice8LayerCPUTest : public testing::WithParamInterface(); diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/split.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/split.cpp index ac2b9cef88ec44..6d947ce9f3e1d1 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/split.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/split.cpp @@ -25,7 +25,7 @@ class SplitLayerCPUTest : public testing::WithParamInterface int64_t axis; ElementType netPrecision; InputShape inputShapes; - InferenceEngine::SizeVector outIndices; + std::vector outIndices; CPUSpecificParams cpuParams; std::tie(numSplits, axis, netPrecision, inputShapes, outIndices, cpuParams) = obj.param; @@ -54,7 +54,7 @@ class SplitLayerCPUTest : public testing::WithParamInterface int axis; ElementType netPrecision; InputShape inputShapes; - InferenceEngine::SizeVector outIndices; + ov::Shape outIndices; CPUSpecificParams cpuParams; std::tie(numSplits, axis, netPrecision, inputShapes, outIndices, cpuParams) = this->GetParam(); if (outIndices.empty()) { @@ -64,7 +64,7 @@ class SplitLayerCPUTest : public testing::WithParamInterface } std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; - selectedType += std::string("_") + InferenceEngine::details::convertPrecision(netPrecision).name(); + selectedType += std::string("_") + ov::element::Type(netPrecision).to_string(); init_input_shapes({inputShapes}); diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/strided_slice.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/strided_slice.cpp index 58abdabb2c5b9d..f76100141cb7e4 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/strided_slice.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/strided_slice.cpp @@ -76,11 +76,11 @@ class StridedSliceLayerCPUTest : public testing::WithParamInterfaceget_friendly_name() == "data") { - int32_t range = std::accumulate(targetInputStaticShapes[0].begin(), - targetInputStaticShapes[0].end(), - 1, - std::multiplies()); - tensor = utils::create_and_fill_tensor(funcInput.get_element_type(), - targetInputStaticShapes[0], - range, - -range / 2, - 1); + int32_t range = std::accumulate(targetInputStaticShapes[0].begin(), targetInputStaticShapes[0].end(), 1, std::multiplies()); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -range / 2; + in_data.range = range; + tensor = utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[0], in_data); } inputs.insert({funcInput.get_node_shared_ptr(), tensor}); } diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/variadic_split.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/variadic_split.cpp index 4bd2b50f3042a0..de04d053932260 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/variadic_split.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/variadic_split.cpp @@ -61,7 +61,7 @@ class VariadicSplitLayerCPUTest : public testing::WithParamInterfaceGetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; - selectedType += std::string("_") + InferenceEngine::details::convertPrecision(netPrecision).name(); + selectedType += std::string("_") + ov::element::Type(netPrecision).to_string(); std::vector shapesToInit{inputShapes}; if (lengthsType == ov::test::utils::InputLayerType::PARAMETER) { diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/any_layout.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/any_layout.cpp index 84984dda2bf972..c37704699216d6 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/any_layout.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/any_layout.cpp @@ -3,6 +3,7 @@ // #include "test_utils/cpu_test_utils.hpp" +#include "openvino/core/preprocess/pre_post_process.hpp" namespace ov { namespace test { @@ -41,6 +42,11 @@ class AnyLayoutOnInputsAndOutputs : public ::testing::TestWithParam { // Create model auto function = create_test_function(shape); + auto ppp_model = ov::preprocess::PrePostProcessor(function); + ppp_model.input().tensor().set_layout("..."); + ppp_model.output().tensor().set_layout("..."); + function = ppp_model.build(); + auto input = ov::Tensor(ov::element::f32, shape, input_data.data()); auto output = ov::Tensor(ov::element::f32, shape, output_data.data()); diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/broadcast_eltwise.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/broadcast_eltwise.cpp index b3a6c80097d39d..60d3d866a6ed17 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/broadcast_eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/broadcast_eltwise.cpp @@ -2,21 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace ngraph; -using namespace ov::test; using namespace CPUTestUtils; -using namespace InferenceEngine; +namespace ov { +namespace test { -namespace SubgraphTestsDefinitions { using BroadcastEltwiseParams = std::tuple< ElementType, // input precision InputShape, // input shape @@ -134,4 +128,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_BroadcastEltwise, ::testing::ValuesIn(target_shapes)), BroadcastEltwise::getTestCaseName); } // namespace -} // namespace SubgraphTestsDefinitions \ No newline at end of file +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_reshape_concat.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_reshape_concat.cpp index fa0c73c2d7381c..73ffc34651042c 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_reshape_concat.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_reshape_concat.cpp @@ -6,7 +6,6 @@ #include "ov_models/utils/ov_helpers.hpp" #include "ov_models/builders.hpp" - /*This test runs the following subgraph: param1 param2 param3 param4 @@ -39,10 +38,8 @@ Softmax is used as a model of an arbitrary subgraph preceding the pattern. */ -using namespace InferenceEngine; -using namespace ov::test; - -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { using VectorShapes = std::vector; @@ -86,32 +83,32 @@ class ConcatReshapeConcatSubgraphTest : public testing::WithParamInterface(input_params[i], softmax_axis); + auto soft_max = std::make_shared(input_params[i], softmax_axis); auto reshape_param = ngraph::builder::makeConstant(ov::element::i32, {1}, {0}); - auto reshape = std::make_shared(soft_max, reshape_param); + auto reshape = std::make_shared(soft_max, reshape_param); first_level_reshapes.push_back(reshape); } - auto concat1 = std::make_shared(ov::NodeVector{first_level_reshapes[0], first_level_reshapes[1]}, concat_axis); - auto concat2 = std::make_shared(ov::NodeVector{first_level_reshapes[2], first_level_reshapes[3]}, concat_axis); + auto concat1 = std::make_shared(ov::NodeVector{first_level_reshapes[0], first_level_reshapes[1]}, concat_axis); + auto concat2 = std::make_shared(ov::NodeVector{first_level_reshapes[2], first_level_reshapes[3]}, concat_axis); ov::NodeVector second_level_reshapes; ov::NodeVector first_level_concats = {concat1, concat2}; for (size_t i = 0; i < number_of_params / 2; ++i) { auto reshape_param = ngraph::builder::makeConstant(ov::element::i32, {1}, {0}); - auto reshape = std::make_shared(first_level_concats[i], reshape_param); + auto reshape = std::make_shared(first_level_concats[i], reshape_param); second_level_reshapes.push_back(reshape); } - auto concat3 = std::make_shared(second_level_reshapes, concat_axis); - auto soft_max = std::make_shared(concat3, softmax_axis); + auto concat3 = std::make_shared(second_level_reshapes, concat_axis); + auto soft_max = std::make_shared(concat3, softmax_axis); - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < soft_max->get_output_size(); i++) - results.push_back(std::make_shared(soft_max->output(i))); + results.push_back(std::make_shared(soft_max->output(i))); - function = std::make_shared(results, input_params, "ConcatReshapeConcatPattern"); + function = std::make_shared(results, input_params, "ConcatReshapeConcatPattern"); } }; @@ -142,4 +139,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_Concat_Reshape_Concat, ConcatReshapeConcatSubgrap ::testing::ValuesIn(inputShapes), ConcatReshapeConcatSubgraphTest::getTestCaseName); } // namespace -} // namespace SubgraphTestsDefinitions \ No newline at end of file +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_sdp.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_sdp.cpp index 96b9a8d1732ca0..f5514c623172ec 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_sdp.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/concat_sdp.cpp @@ -1,23 +1,19 @@ // Copyright (C) 2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/opsets/opset13.hpp" +#include "transformations/op_conversions/scaled_dot_product_attention_decomposition.hpp" -#include -#include - -#include "ov_models/builders.hpp" #include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "common_test_utils/include/common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" -using namespace ov::test; -using namespace ngraph; using namespace CPUTestUtils; -using namespace InferenceEngine; -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { using ConcatSDPTestParams = std::tuple, @@ -112,7 +108,7 @@ class ConcatSDPTest : public testing::WithParamInterface, v auto concatV = std::make_shared(OutputVector{gatherV, inputParams[2]}, 2); auto sdp = std::make_shared(inputParams[0], concatK, concatV, false); sdp->set_friendly_name("mha"); - auto add = std::make_shared(sdp, op::v0::Constant::create(inType, {1}, {1.0f})); + auto add = std::make_shared(sdp, op::v0::Constant::create(inType, {1}, {1.0f})); auto pastk_assign = std::make_shared(concatK, var_k); auto pastv_assign = std::make_shared(concatV, var_v); pastk_assign->set_friendly_name("pastk_w"); @@ -124,7 +120,7 @@ class ConcatSDPTest : public testing::WithParamInterface, v results.push_back(std::make_shared(pastv_shapeof)); } SinkVector sinks{pastk_assign, pastv_assign}; - function = std::make_shared(results, sinks, inputParams, "ConcatSDP"); + function = std::make_shared(results, sinks, inputParams, "ConcatSDP"); targetDevice = ov::test::utils::DEVICE_CPU; functionRefs = function->clone(); @@ -248,4 +244,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConcatSDPTest, ConcatSDPTest::getTestCaseName); } // namespace -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_concat.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_concat.cpp index a6d6f0501f1287..796cfea709eb8f 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_concat.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_concat.cpp @@ -11,7 +11,6 @@ #include "test_utils/convolution_params.hpp" #include "test_utils/filter_cpu_info.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; namespace ov { diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_dw_conv.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_dw_conv.cpp index e0f3473794b772..71fcb4122cb739 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_dw_conv.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_dw_conv.cpp @@ -7,9 +7,12 @@ #include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" +#include "common_test_utils/node_builders/convolution.hpp" +#include "common_test_utils/node_builders/group_convolution.hpp" + +namespace ov { +namespace test { -using namespace ngraph; -namespace SubgraphTestsDefinitions { class ConvDWConv : virtual public ov::test::SubgraphBaseTest { protected: void SetUp() override { @@ -24,27 +27,27 @@ class ConvDWConv : virtual public ov::test::SubgraphBaseTest { params.push_back(std::make_shared(precision, shape)); } auto conv_weights = ngraph::builder::makeConstant(precision, std::vector{32, 32, 1, 1}, std::vector{}, true); - auto conv = ngraph::builder::makeConvolution(params[0], - conv_weights, - precision, - std::vector{1, 1}, - std::vector{1, 1}, - ov::CoordinateDiff{0, 0}, - ov::CoordinateDiff{0, 0}, - std::vector{1, 1}, - ngraph::op::PadType::EXPLICIT, - 32, - true); + auto conv = ov::test::utils::make_convolution(params[0], + conv_weights, + precision, + std::vector{1, 1}, + std::vector{1, 1}, + ov::CoordinateDiff{0, 0}, + ov::CoordinateDiff{0, 0}, + std::vector{1, 1}, + ov::op::PadType::EXPLICIT, + 32, + true); auto dw_conv_weights = ngraph::builder::makeConstant(precision, std::vector{32, 1, 1, 3, 3}, std::vector{}, true); - auto dw_conv = ngraph::builder::makeGroupConvolution(conv, - dw_conv_weights, - precision, - std::vector{1, 1}, - ov::CoordinateDiff{1, 1}, - ov::CoordinateDiff{1, 1}, - std::vector{1, 1}, - ngraph::op::PadType::EXPLICIT); + auto dw_conv = ov::test::utils::make_group_convolution(conv, + dw_conv_weights, + precision, + std::vector{1, 1}, + ov::CoordinateDiff{1, 1}, + ov::CoordinateDiff{1, 1}, + std::vector{1, 1}, + ov::op::PadType::EXPLICIT); auto bias_const = ngraph::builder::makeConstant(precision, {1, 32 , 1, 1}, std::vector{}, true); auto bias = std::make_shared(dw_conv, bias_const); function = std::make_shared(bias, params, "ConvDWConv"); @@ -55,4 +58,5 @@ TEST_F(ConvDWConv, smoke_CompareWithRefs) { run(); } -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_sum_broadcast.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_sum_broadcast.cpp index 564a6fd4d2c21c..8fed084c64015c 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_sum_broadcast.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_sum_broadcast.cpp @@ -2,28 +2,30 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include "test_utils/fusing_test_utils.hpp" -#include "test_utils/convolution_params.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/utils/ov_helpers.hpp" +#include "common_test_utils/node_builders/activation.hpp" +#include "common_test_utils/node_builders/convolution.hpp" +#include "internal_properties.hpp" #include "ov_models/builders.hpp" -#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "ov_ops/type_relaxed.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "test_utils/convolution_params.hpp" +#include "test_utils/fusing_test_utils.hpp" + +#include using namespace CPUTestUtils; -using namespace InferenceEngine; -using namespace ov::test; -namespace SubgraphTestsDefinitions { -typedef std::tuple< - InputShape, //convShape - InputShape, //second term shape - bool, // bias flag - fusingSpecificParams, - std::map // config -> convSumBroadcastParamSet; +namespace ov { +namespace test { +typedef std::tuple + convSumBroadcastParamSet; class ConvSumInPlaceTest : public testing::WithParamInterface, virtual public SubgraphBaseTest, public CpuTestWithFusing { @@ -33,7 +35,7 @@ class ConvSumInPlaceTest : public testing::WithParamInterface additionalConfig; + ov::AnyMap additionalConfig; std::tie(convShape, secondShape, bias, fusingParams, additionalConfig) = obj.param; std::ostringstream result; @@ -55,30 +57,30 @@ class ConvSumInPlaceTest : public testing::WithParamInterface additionalConfig; + ov::AnyMap additionalConfig; std::tie(convShape, secondShape, bias, fusingParams, additionalConfig) = this->GetParam(); std::tie(postOpMgrPtr, fusedOps) = fusingParams; @@ -108,20 +110,20 @@ class ConvSumInPlaceTest : public testing::WithParamInterface(ngraph::element::Type_t::f32, ngraph::Shape({1, _convOutChannels, 1, 1}), {}, true); - conv = std::make_shared(conv, biasNode); + auto biasNode = ngraph::builder::makeConstant(ov::element::Type_t::f32, ov::Shape({1, _convOutChannels, 1, 1}), {}, true); + conv = std::make_shared(conv, biasNode); } auto sum = addSum(conv, inputParams); runtimeType = getNetType(); - if (configuration.count(PluginConfigParams::KEY_ENFORCE_BF16) && - PluginConfigParams::YES == configuration[PluginConfigParams::KEY_ENFORCE_BF16].as()) { - runtimeType = ngraph::element::Type_t::bf16; + auto it = configuration.find(ov::hint::inference_precision.name()); + if (it != configuration.end() && it->second.as() == ov::element::bf16) { + runtimeType = ov::element::Type_t::bf16; } - if (inputParams.front()->get_element_type() == ngraph::element::i8 || inputParams.front()->get_element_type() == ngraph::element::u8) { - runtimeType = ngraph::element::i8; + if (inputParams.front()->get_element_type() == ov::element::i8 || inputParams.front()->get_element_type() == ov::element::u8) { + runtimeType = ov::element::i8; } selectedType = "?"; @@ -129,10 +131,8 @@ class ConvSumInPlaceTest : public testing::WithParamInterface _padBegin = {0, 0}; std::vector _padEnd = {0, 0}; size_t _convOutChannels = 64; @@ -194,45 +194,51 @@ TEST_P(ConvSumInPlaceStrided, CompareWithRefs) { class ConvSumInPlaceTestInt8 : public ConvSumInPlaceTest { public: - ngraph::ParameterVector makeParams() override { - ngraph::ParameterVector outs(2); - outs[0] = std::make_shared(ngraph::element::u8, inputDynamicShapes[0]); - outs[1] = std::make_shared(ngraph::element::f32, inputDynamicShapes[1]); + ov::ParameterVector makeParams() override { + ov::ParameterVector outs(2); + outs[0] = std::make_shared(ov::element::u8, inputDynamicShapes[0]); + outs[1] = std::make_shared(ov::element::f32, inputDynamicShapes[1]); return outs; } - std::shared_ptr makeConv(const ngraph::ParameterVector& inputParams) override { - using namespace ngraph; + std::shared_ptr makeConv(const ov::ParameterVector& inputParams) override { auto inputParamsFP32 = std::make_shared(element::f32, inputParams.front()->get_partial_shape()); - auto convolutionNodeRelaxed = std::make_shared>( - *as_type_ptr(builder::makeConvolution(inputParamsFP32, element::f32, _kernel, _stride, _padBegin, - _padEnd, _dilation, ngraph::op::PadType::EXPLICIT, _convOutChannels)), - element::f32); + auto convolutionNodeRelaxed = std::make_shared>( + *as_type_ptr(ov::test::utils::make_convolution(inputParamsFP32, + element::f32, + _kernel, + _stride, + _padBegin, + _padEnd, + _dilation, + ov::op::PadType::EXPLICIT, + _convOutChannels)), + ov::element::f32); auto inpShape = inputParams.front()->get_partial_shape(); Shape filterShape = {_convOutChannels, static_cast(inpShape[1].get_length())}; filterShape.insert(filterShape.end(), _kernel.begin(), _kernel.end()); - auto filterWeightsNode = builder::makeConstant(element::i8, filterShape, {}, true); + auto filterWeightsNode = ngraph::builder::makeConstant(ov::element::i8, filterShape, {}, true); auto conv = convolutionNodeRelaxed->copy_with_new_inputs({inputParams.front(), filterWeightsNode}); return conv; } - std::shared_ptr addSum(std::shared_ptr lastNode, const ngraph::ParameterVector& inputParams) override { + std::shared_ptr addSum(std::shared_ptr lastNode, const ov::ParameterVector& inputParams) override { std::vector additionalFusedOps; - lastNode = ngraph::builder::makeActivation(lastNode, ngraph::element::f32, ngraph::helpers::Relu); + lastNode = ov::test::utils::make_activation(lastNode, ov::element::f32, ov::test::utils::Relu); //additionalFusedOps.push_back("Relu"); - auto fqShape = ngraph::Shape(lastNode->get_output_partial_shape(0).size(), 1); - lastNode = ngraph::builder::makeFakeQuantize(lastNode, ngraph::element::f32, 256, fqShape); + auto fqShape = ov::Shape(lastNode->get_output_partial_shape(0).size(), 1); + lastNode = ngraph::builder::makeFakeQuantize(lastNode, ov::element::f32, 256, fqShape); additionalFusedOps.push_back("FakeQuantize"); - auto secondTerm = ngraph::builder::makeFakeQuantize(inputParams[1], ngraph::element::f32, 256, fqShape); + auto secondTerm = ngraph::builder::makeFakeQuantize(inputParams[1], ov::element::f32, 256, fqShape); - auto sum = std::make_shared(lastNode, secondTerm); + auto sum = std::make_shared(lastNode, secondTerm); additionalFusedOps.push_back("Add"); fusedOps.insert(fusedOps.begin(), additionalFusedOps.begin(), additionalFusedOps.end()); @@ -241,12 +247,10 @@ class ConvSumInPlaceTestInt8 : public ConvSumInPlaceTest { void SetUp() override { abs_threshold = 1.001f; - using ngraph::pass::ConvertPrecision; ConvSumInPlaceTest::SetUp(); functionRefs = function->clone(); - ngraph::pass::ConvertPrecision().run_on_model(functionRefs); - ngraph::pass::ConvertPrecision().run_on_model(functionRefs); - functionRefs->validate_nodes_and_infer_types(); + convert_precisions.insert({ov::element::i8, ov::element::f32}); + convert_precisions.insert({ov::element::u8, ov::element::f32}); } }; @@ -258,12 +262,12 @@ TEST_P(ConvSumInPlaceTestInt8, CompareWithRefs) { class ConvSumInPlaceTestSeveralConsumers : public ConvSumInPlaceTest { public: - std::shared_ptr addSum(std::shared_ptr lastNode, const ngraph::ParameterVector& inputParams) override { - auto sum = std::make_shared(lastNode, inputParams[1]); + std::shared_ptr addSum(std::shared_ptr lastNode, const ov::ParameterVector& inputParams) override { + auto sum = std::make_shared(lastNode, inputParams[1]); fusedOps.insert(fusedOps.begin(), "Add"); - auto shapeOf = std::make_shared(sum); - return std::make_shared(sum, shapeOf, true); + auto shapeOf = std::make_shared(sum); + return std::make_shared(sum, shapeOf, true); } }; @@ -278,70 +282,70 @@ TEST_P(ConvSumInPlaceTestSeveralConsumers, CompareWithRefs) { namespace { const auto fusingMulAddFQMullAdd = fusingSpecificParams{ std::make_shared(std::vector{ {[](postNodeConfig& cfg) { - ngraph::Shape newShape = generatePerChannelShape(cfg.input); + ov::Shape newShape = generatePerChannelShape(cfg.input); auto constNode = ngraph::builder::makeConstant(cfg.type, newShape, std::vector{}, true); - return std::make_shared(cfg.input, constNode); + return std::make_shared(cfg.input, constNode); }, "Multiply(PerChannel)"}, {[](postNodeConfig& cfg) { - ngraph::Shape newShape = generatePerChannelShape(cfg.input); + ov::Shape newShape = generatePerChannelShape(cfg.input); auto constNode = ngraph::builder::makeConstant(cfg.type, newShape, std::vector{}, true); - return std::make_shared(cfg.input, constNode); + return std::make_shared(cfg.input, constNode); }, "Add(PerChannel)"}, {[](postNodeConfig& cfg){ auto localPrc = cfg.input->get_element_type(); - ngraph::Shape newShape = generatePerChannelShape(cfg.input); + ov::Shape newShape = generatePerChannelShape(cfg.input); return ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape); }, "FakeQuantize(PerChannel)"}, {[](postNodeConfig& cfg) { - ngraph::Shape newShape = generatePerChannelShape(cfg.input); + ov::Shape newShape = generatePerChannelShape(cfg.input); auto constNode = ngraph::builder::makeConstant(cfg.type, newShape, std::vector{}, true); - return std::make_shared(cfg.input, constNode); + return std::make_shared(cfg.input, constNode); }, "Multiply(PerChannel)"}, {[](postNodeConfig& cfg) { - ngraph::Shape newShape = generatePerChannelShape(cfg.input); + ov::Shape newShape = generatePerChannelShape(cfg.input); auto constNode = ngraph::builder::makeConstant(cfg.type, newShape, std::vector{}, true); - return std::make_shared(cfg.input, constNode); + return std::make_shared(cfg.input, constNode); }, "Add(PerChannel)"}}), {"Add"} }; const auto fusingDivSubFQ = fusingSpecificParams{ std::make_shared(std::vector{ {[](postNodeConfig& cfg){ - ngraph::Shape secondMultInShape = generatePerChannelShape(cfg.input); + ov::Shape secondMultInShape = generatePerChannelShape(cfg.input); auto secondMultInput = ngraph::builder::makeConstant(cfg.type, secondMultInShape, std::vector{}, true); - return std::make_shared(cfg.input, secondMultInput); + return std::make_shared(cfg.input, secondMultInput); }, "Divide(PerChannel)"}, {[](postNodeConfig& cfg){ - ngraph::Shape secondMultInShape = generatePerChannelShape(cfg.input); + ov::Shape secondMultInShape = generatePerChannelShape(cfg.input); auto secondMultInput = ngraph::builder::makeConstant(cfg.type, secondMultInShape, std::vector{}, true); - return std::make_shared(cfg.input, secondMultInput); + return std::make_shared(cfg.input, secondMultInput); }, "Subtract(PerChannel)"}, {[](postNodeConfig& cfg){ auto localPrc = cfg.input->get_element_type(); - ngraph::Shape newShape = generatePerChannelShape(cfg.input); + ov::Shape newShape = generatePerChannelShape(cfg.input); return ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape); }, "FakeQuantize(PerChannel)"}}), {"FakeQuantize"} }; const auto fusingSigmoidFQFQ = fusingSpecificParams{ std::make_shared(std::vector{ {[](postNodeConfig& cfg){ - return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Sigmoid); + return ov::test::utils::make_activation(cfg.input, cfg.type, ov::test::utils::Sigmoid); }, "Sigmoid"}, {[](postNodeConfig& cfg){ auto localPrc = cfg.input->get_element_type(); - ngraph::Shape newShape = generatePerChannelShape(cfg.input); + ov::Shape newShape = generatePerChannelShape(cfg.input); return ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape); }, "FakeQuantize(PerChannel)"}, {[](postNodeConfig& cfg){ auto localPrc = cfg.input->get_element_type(); - ngraph::Shape newShape = generatePerChannelShape(cfg.input); + ov::Shape newShape = generatePerChannelShape(cfg.input); return ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape); }, "FakeQuantize(PerChannel)"}}), {"Sigmoid", "FakeQuantize", "FakeQuantize"} }; const auto fusingClampFQ = fusingSpecificParams{ std::make_shared(std::vector{ {[](postNodeConfig& cfg){ - return ngraph::builder::makeActivation(cfg.input, cfg.type, ngraph::helpers::Clamp, {}, {3.0f, 6.0f}); + return ov::test::utils::make_activation(cfg.input, cfg.type, ov::test::utils::Clamp, {}, {3.0f, 6.0f}); }, "Clamp"}, {[](postNodeConfig& cfg){ auto localPrc = cfg.input->get_element_type(); - ngraph::Shape newShape = generatePerChannelShape(cfg.input); + ov::Shape newShape = generatePerChannelShape(cfg.input); return ngraph::builder::makeFakeQuantize(cfg.input, localPrc, 256, newShape); }, "FakeQuantize(PerChannel)"}}), {"FakeQuantize"} }; @@ -403,16 +407,16 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_Sum_Broadcast_FP32, ConvSumInPlaceTest, ::testing::ValuesIn(secondInp), ::testing::Values(true, false), ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), + ::testing::Values(empty_plugin_config)), ConvSumInPlaceTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Conv_Sum_Broadcast_BF16, ConvSumInPlaceTest, - ::testing::Combine( - ::testing::Values(convInpShape), - ::testing::ValuesIn(secondInp), - ::testing::Values(true, false), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpuBF16PluginConfig)), +INSTANTIATE_TEST_SUITE_P(smoke_Conv_Sum_Broadcast_BF16, + ConvSumInPlaceTest, + ::testing::Combine(::testing::Values(convInpShape), + ::testing::ValuesIn(secondInp), + ::testing::Values(true, false), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), ConvSumInPlaceTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Conv_Sum_Broadcast_INT8, ConvSumInPlaceTestInt8, @@ -421,7 +425,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_Sum_Broadcast_INT8, ConvSumInPlaceTestInt8, ::testing::ValuesIn(secondInp), ::testing::Values(true, false), ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(cpuEmptyPluginConfig)), + ::testing::Values(empty_plugin_config)), ConvSumInPlaceTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Conv_Sum_Broadcast_Several_Consumers, ConvSumInPlaceTestSeveralConsumers, @@ -430,7 +434,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_Sum_Broadcast_Several_Consumers, ConvSumInPl ::testing::ValuesIn(secondInp), ::testing::Values(true), ::testing::Values(emptyFusingSpec), - ::testing::Values(cpuEmptyPluginConfig)), + ::testing::Values(empty_plugin_config)), ConvSumInPlaceTest::getTestCaseName); InputShape convInpShapeStrided = { @@ -457,8 +461,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_Sum_Broadcast_Strided, ConvSumInPlaceStrided ::testing::Values(secondInpStrided), ::testing::Values(true), ::testing::Values(emptyFusingSpec), - ::testing::Values(cpuEmptyPluginConfig)), + ::testing::Values(empty_plugin_config)), ConvSumInPlaceTest::getTestCaseName); } // namespace -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_with_zero_point_fuse.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_with_zero_point_fuse.cpp index 9832fa0ac18551..f4cf62baea49ab 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_with_zero_point_fuse.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/conv_with_zero_point_fuse.cpp @@ -52,7 +52,7 @@ void ConvWithZeroPointFuseSubgraphTest::SetUp() { {-12.8f}, {12.7f}); - std::vector> branches(2); + std::vector> branches(2); { ov::Strides strides{1, 1}; ov::Shape pads_begin{0, 0}, pads_end{0, 0}, kernel{1, 1}; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convert_fq_rnn_to_quantized_rnn.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convert_fq_rnn_to_quantized_rnn.cpp index 43c793230ac85a..02eb3d6409e17f 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convert_fq_rnn_to_quantized_rnn.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convert_fq_rnn_to_quantized_rnn.cpp @@ -2,31 +2,25 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/output_vector.hpp" -#include "ngraph/type/element_type.hpp" +#include "common_test_utils/common_utils.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "openvino/core/node.hpp" #include "openvino/core/type/element_type.hpp" -#include "openvino/op/gru_sequence.hpp" -#include "openvino/op/lstm_sequence.hpp" #include "openvino/runtime/tensor.hpp" -#include "test_utils/cpu_test_utils.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" +#include "test_utils/cpu_test_utils.hpp" #include "test_utils/fusing_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/common_utils.hpp" -#include #include #include #include #include -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -using namespace ov; -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { using ConvertFqRnnToQuantizedRnnTestParams = std::tuple, bool>; @@ -68,22 +62,30 @@ class ConvertFqRnnToQuantizedRnn : public testing::WithParamInterface& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); const auto& shapeX = targetInputStaticShapes[0]; const auto& shapeH = targetInputStaticShapes[1]; - ov::Tensor tensorX = utils::create_and_fill_tensor(funcInputs[0].get_element_type(), shapeX, 1, 0, 16); - ov::Tensor tensorH = utils::create_and_fill_tensor(funcInputs[1].get_element_type(), shapeH, 1, 0, 16); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 1; + in_data.resolution = 16; + ov::Tensor tensorX = utils::create_and_fill_tensor(funcInputs[0].get_element_type(), shapeX, in_data); + ov::Tensor tensorH = utils::create_and_fill_tensor(funcInputs[1].get_element_type(), shapeH, in_data); inputs.insert({funcInputs[0].get_node_shared_ptr(), tensorX}); inputs.insert({funcInputs[1].get_node_shared_ptr(), tensorH}); if (hasCell) { const auto& shapeC = targetInputStaticShapes[cellIdx]; - ov::Tensor tensorC = utils::create_and_fill_tensor(funcInputs[cellIdx].get_element_type(), shapeC, 2, -1, 128, 2); + in_data.start_from = -1; + in_data.range = 2; + in_data.resolution = 128; + in_data.seed = 2; + ov::Tensor tensorC = utils::create_and_fill_tensor(funcInputs[cellIdx].get_element_type(), shapeC, in_data); inputs.insert({funcInputs[cellIdx].get_node_shared_ptr(), tensorC}); } } @@ -116,9 +118,9 @@ class ConvertFqRnnToQuantizedRnn : public testing::WithParamInterface(ngPrec, shape)); - auto makeDataFQ = [](const ngraph::Output& input) { + auto makeDataFQ = [](const ov::Output& input) { const auto fqLevels = 256; - return ngraph::builder::makeFakeQuantize(input, ngraph::element::f32, fqLevels, {}, + return ngraph::builder::makeFakeQuantize(input, ov::element::f32, fqLevels, {}, {-128.f/127}, {1.f}, {-128.f/127}, {1.f}); }; @@ -128,16 +130,16 @@ class ConvertFqRnnToQuantizedRnn : public testing::WithParamInterface weight) { const auto fqLevelsW = 255; - return ngraph::builder::makeFakeQuantize(weight, ngraph::element::f32, + return ngraph::builder::makeFakeQuantize(weight, ov::element::f32, fqLevelsW, std::vector{}, {-127.f/63}, {127.f/63}, {-127.f/63}, {127.f/63}); @@ -152,7 +154,7 @@ class ConvertFqRnnToQuantizedRnn : public testing::WithParamInterface lengths(batchSize, static_cast(maxSeqLen)); - auto seq_lengths = ngraph::opset1::Constant::create(element::i64, Shape{batchSize}, lengths); + auto seq_lengths = ov::op::v0::Constant::create(element::i64, Shape{batchSize}, lengths); if (rnnType == "LSTMSequence") { hasCell = true; @@ -215,4 +217,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_static, ConvertFqRnnToQuantizedRnn, ConvertFqRnnToQuantizedRnn::getTestCaseName); } // namespace -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convert_range.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convert_range.cpp index 86a0e2323dbfc4..f67cc147e6d1a3 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convert_range.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/convert_range.cpp @@ -9,10 +9,9 @@ #include using namespace CPUTestUtils; -using namespace InferenceEngine; -using namespace ov::test; -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { /* This test runs the following subgraph: @@ -149,4 +148,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvertRangeSubgraphCPUTest, ConvertRangeSubgraphCPUTest::getTestCaseName); } // namespace -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/custom_op_insert_convert_i64.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/custom_op_insert_convert_i64.cpp index 518676a15e63b3..9ca6af1f8d96f0 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/custom_op_insert_convert_i64.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/custom_op_insert_convert_i64.cpp @@ -11,7 +11,8 @@ using namespace ov::test; using namespace CPUTestUtils; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { using CustomOpI64CPUTestParams = std::tuple; class CustomOpI64 : public ov::op::Op { @@ -151,4 +152,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_CustomOp, ::testing::Combine(::testing::Values(ElementType::i32), ::testing::Values(inputShapes)), CustomOpConvertI64CPUTest::getTestCaseName); -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/custom_op_scalar.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/custom_op_scalar.cpp index c108d067a47311..4b6b237886ade3 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/custom_op_scalar.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/custom_op_scalar.cpp @@ -2,15 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include +#include "common_test_utils/ov_tensor_utils.hpp" +#include "openvino/op/op.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { using CustomOpScalarCPUTestParams = std::tuple; @@ -149,4 +149,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_CustomOp, ::testing::Combine(::testing::Values(ElementType::u8), ::testing::ValuesIn(inputShapes)), CustomOpScalarCPUTest::getTestCaseName); -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/denormal_check.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/denormal_check.cpp index 5b9e5ae550c236..2c68b14cb41508 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/denormal_check.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/denormal_check.cpp @@ -8,9 +8,8 @@ #include "ov_models/builders.hpp" #include "ngraph/runtime/aligned_buffer.hpp" -using namespace InferenceEngine; -using namespace ov::test; -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { template class AlignedBufferWrapper { @@ -67,13 +66,13 @@ void SetUp() override { pConstStorage.reset(new AlignedBufferWrapper(elemsCount, alignment)); auto constTensor = std::make_shared(rtPrc, inpShape, pConstStorage->get_ptr()); - auto constNode = std::make_shared(constTensor); + auto constNode = std::make_shared(constTensor); ov::NodeVector input = {params[0], constNode}; - auto concat = std::make_shared(input, 1); + auto concat = std::make_shared(input, 1); - ov::ResultVector results{std::make_shared(concat->output(0))}; + ov::ResultVector results{std::make_shared(concat->output(0))}; - function = std::make_shared(results, params, "denormal_check"); + function = std::make_shared(results, params, "denormal_check"); } }; @@ -110,4 +109,5 @@ TEST_F(DenormalNullifyCheck, smoke_CPU_Denormal_Check) { } } -}// namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/eltwise_caching.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/eltwise_caching.cpp index 487235fa8dd15b..9ea9bc8e266f9a 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/eltwise_caching.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/eltwise_caching.cpp @@ -128,7 +128,10 @@ class EltwiseCacheTest : public testing::WithParamInterface, for (size_t i = 0; i < funcInputs.size(); ++i) { const auto& funcInput = funcInputs[i]; ov::Tensor tensor; - tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], 10, 1, 1); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 1; + in_data.range = 10; + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], in_data); inputs.insert({funcInput.get_node_shared_ptr(), tensor}); } } @@ -92,7 +94,7 @@ class EltwiseChainTest : public testing::WithParamInterface, ov::ParameterVector paramVec; std::vector> inputNodes; - if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) { + if (secondaryInputType == utils::InputLayerType::PARAMETER) { for (size_t i = 0; i < inputDynamicShapes.size(); i++) { paramVec.push_back(std::make_shared(inputPrecisions[i], inputDynamicShapes[i])); inputNodes.push_back(paramVec.back()); @@ -116,8 +118,9 @@ class EltwiseChainTest : public testing::WithParamInterface, std::vector constShape(targetStaticShapes[0][0].size(), 1); constShape[1] = targetStaticShapes[0][0][1]; auto fq = ngraph::builder::makeFakeQuantize(eltwiseOps[eltwiseOps.size() - 1], - ::ngraph::element::Type(::ngraph::element::Type_t::f32), - 256, constShape); + ov::element::Type(ov::element::f32), + 256, + constShape); eltwiseOps.push_back(makeEltwise(fq, inputNodes[eltwiseOpTypes.size() - 1], eltwiseOpTypes[eltwiseOpTypes.size() - 1])); diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fq_caching.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fq_caching.cpp index a0bd14db02893a..d8940dc4dfad6d 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fq_caching.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fq_caching.cpp @@ -27,24 +27,21 @@ // |Output| // -------- -#include -#include -#include +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "ov_models/builders.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" +#include "internal_properties.hpp" using namespace CPUTestUtils; -using namespace ov::test; -using namespace InferenceEngine; -using namespace ngraph; -namespace CPUSubgraphTestsDefinitions { +namespace ov { +namespace test { -using InputShapesTuple = std::tuple< - std::vector, // fq dynamic data shapes - std::vector>, // fq range input shapes - std::vector // reshape shape ->; +using InputShapesTuple = std::tuple, // fq dynamic data shapes + std::vector>, // fq range input shapes + std::vector // reshape shape + >; using FqSpecificParams = std::tuple, // output high size_t>; // levels -typedef std::tuple< - InputShapesTuple, // fq input shapes and reshape shape - FqSpecificParams, // fq specific params - std::pair, std::vector>, // il and ih values - CPUSpecificParams, - std::map // Additional config (disable snippets or no) -> FakeQuantizeCacheTestParams; +typedef std::tuple, std::vector>, // il and ih values + CPUSpecificParams, + ov::AnyMap // Additional config (disable snippets or no) + > + FakeQuantizeCacheTestParams; class FakeQuantizeCacheTest : public testing::WithParamInterface, virtual public SubgraphBaseTest, public CPUTestsBase { @@ -68,11 +65,11 @@ class FakeQuantizeCacheTest : public testing::WithParamInterface, std::vector> inputRangesValues; CPUSpecificParams cpuParams; - std::map additionalConfig; + ov::AnyMap additionalConfig; std::tie(inputShapesTuple, fqParams, inputRangesValues, cpuParams, additionalConfig) = obj.param; std::vector shapes; - std::vector> ranges; + std::vector> ranges; std::vector reshapeShape; std::tie(shapes, ranges, reshapeShape) = inputShapesTuple; @@ -113,7 +110,7 @@ class FakeQuantizeCacheTest : public testing::WithParamInterface(concat, ngraphParam, "fq_cache"); + function = std::make_shared(concat, paramVect, "fq_cache"); } void generate_inputs(const std::vector& targetInputStaticShapes) override { @@ -209,10 +206,10 @@ class FakeQuantizeCacheTest : public testing::WithParamInterface emptyConfig = {}; -const std::map disableSnippets = { - {PluginConfigInternalParams::KEY_SNIPPETS_MODE, PluginConfigInternalParams::DISABLE}}; - +const ov::AnyMap emptyConfig = {}; +const ov::AnyMap disableSnippets = {ov::intel_cpu::snippets_mode(ov::intel_cpu::SnippetsMode::DISABLE)}; // 3D std::vector cpuParams_3D = { @@ -525,5 +520,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_FakeQuantizeCache_5D, FakeQuantizeCacheTest, ::testing::Values(disableSnippets)), FakeQuantizeCacheTest::getTestCaseName); -} // namespace -} // namespace CPUSubgraphTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fq_fused_with_ss.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fq_fused_with_ss.cpp index bc2c3e93614860..c497bb3cdb99d1 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fq_fused_with_ss.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fq_fused_with_ss.cpp @@ -3,7 +3,6 @@ // #include "common_test_utils/node_builders/eltwise.hpp" -#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" #include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_scaleshift_and_fakequantize.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_scaleshift_and_fakequantize.cpp index 24381dcc185724..196029c32e3ef8 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_scaleshift_and_fakequantize.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/fuse_scaleshift_and_fakequantize.cpp @@ -5,6 +5,7 @@ #include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" +#include "openvino/util/common_util.hpp" namespace ov { namespace test { @@ -29,10 +30,10 @@ class FuseScaleShiftAndFakeQuantizeTest : public testing::WithParamInterfaceGather->Subgraph->AvgPool when input blob precision is forced to U8. there is a precision mismatch between Gather and Subgraph, diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/interaction.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/interaction.cpp index 572c4e80706f2a..a23fb6953a2aa4 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/interaction.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/interaction.cpp @@ -134,11 +134,11 @@ class IntertactionCPUTest : public testing::WithParamInterface>; typedef std::tuple, // Input shapes @@ -25,19 +27,19 @@ typedef std::tuple, // Input shapes static std::shared_ptr initMHASubgraph0(std::vector& inputDynamicShapes, std::vector& inputPrecisions) { - ov::ParameterVector ngraphParam; + ov::ParameterVector paramVect; auto transpose0Param = std::make_shared(inputPrecisions[0], inputDynamicShapes[0]); - ngraphParam.push_back(transpose0Param); + paramVect.push_back(transpose0Param); auto transpose1Param = std::make_shared(inputPrecisions[1], inputDynamicShapes[1]); - ngraphParam.push_back(transpose1Param); + paramVect.push_back(transpose1Param); auto addParam = std::make_shared(inputPrecisions[2], inputDynamicShapes[2]); - ngraphParam.push_back(addParam); + paramVect.push_back(addParam); auto transpose2Param = std::make_shared(inputPrecisions[3], inputDynamicShapes[3]); - ngraphParam.push_back(transpose2Param); + paramVect.push_back(transpose2Param); std::vector constantShapes; constantShapes.push_back(ov::Shape({inputDynamicShapes[0].get_shape().size()})); @@ -90,24 +92,24 @@ static std::shared_ptr initMHASubgraph0(std::vector const auto transpose3 = std::make_shared(matMul1, transpose3Const); ov::ResultVector results{std::make_shared(transpose3)}; - return std::make_shared(results, ngraphParam, "mha"); + return std::make_shared(results, paramVect, "mha"); } static std::shared_ptr initMHASubgraph1(std::vector& inputDynamicShapes, std::vector& inputPrecisions) { - ov::ParameterVector ngraphParam; + ov::ParameterVector paramVect; auto transpose0Param = std::make_shared(inputPrecisions[0], inputDynamicShapes[0]); - ngraphParam.push_back(transpose0Param); + paramVect.push_back(transpose0Param); auto transpose1Param = std::make_shared(inputPrecisions[1], inputDynamicShapes[1]); - ngraphParam.push_back(transpose1Param); + paramVect.push_back(transpose1Param); auto addParam = std::make_shared(inputPrecisions[2], inputDynamicShapes[2]); - ngraphParam.push_back(addParam); + paramVect.push_back(addParam); auto transpose2Param = std::make_shared(inputPrecisions[3], inputDynamicShapes[3]); - ngraphParam.push_back(transpose2Param); + paramVect.push_back(transpose2Param); std::vector constantShapes; constantShapes.push_back(ov::Shape({inputDynamicShapes[0].get_shape().size()})); @@ -142,7 +144,7 @@ static std::shared_ptr initMHASubgraph1(std::vector const auto transpose3 = std::make_shared(matMul1, transpose3Const); ov::ResultVector results{std::make_shared(transpose3)}; - return std::make_shared(results, ngraphParam, "mha"); + return std::make_shared(results, paramVect, "mha"); } class MHATest : public testing::WithParamInterface, virtual public SubgraphBaseTest, public CPUTestsBase { @@ -187,17 +189,15 @@ class MHATest : public testing::WithParamInterface, virtual public Sub for (size_t i = 0; i < funcInputs.size(); ++i) { const auto& funcInput = funcInputs[i]; ov::Tensor tensor; - if (funcInput.get_element_type() == ov::element::bf16) - tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), - targetInputStaticShapes[i], - 2, - -1, - 256); - else - tensor = ov::test::utils::create_and_fill_tensor_unique_sequence(funcInput.get_element_type(), - targetInputStaticShapes[i], - -1, - 5); + if (funcInput.get_element_type() == ov::element::bf16) { + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -1; + in_data.range = 2; + in_data.resolution = 256; + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], in_data); + } else { + tensor = ov::test::utils::create_and_fill_tensor_unique_sequence(funcInput.get_element_type(), targetInputStaticShapes[i], -1, 5); + } inputs.insert({funcInput.get_node_shared_ptr(), tensor}); inputs.insert({funcInput.get_node_shared_ptr(), tensor}); } @@ -235,8 +235,8 @@ class MHATest : public testing::WithParamInterface, virtual public Sub // Snippets MHA tokenization has limitations to avoid performance degradations. These limitations depend on // target machine. Just for testing, we disable these limitations to allow Snippets to tokenize pattern on all // machines for validation. - if (!configuration.count("SNIPPETS_MODE")) { - configuration.insert({"SNIPPETS_MODE", "IGNORE_CALLBACK"}); + if (!configuration.count(ov::intel_cpu::snippets_mode.name())) { + configuration.insert(ov::intel_cpu::snippets_mode(ov::intel_cpu::SnippetsMode::IGNORE_CALLBACK)); } } }; @@ -313,19 +313,19 @@ INSTANTIATE_TEST_SUITE_P( static std::shared_ptr initMHAQuantSubgraph0(std::vector& inputDynamicShapes, std::vector& inputPrecisions, std::vector& matMulIn0Precisions) { - ov::ParameterVector ngraphParam; + ov::ParameterVector paramVect; auto transpose0Param = std::make_shared(inputPrecisions[0], inputDynamicShapes[0]); - ngraphParam.push_back(transpose0Param); + paramVect.push_back(transpose0Param); auto transpose1Param = std::make_shared(inputPrecisions[1], inputDynamicShapes[1]); - ngraphParam.push_back(transpose1Param); + paramVect.push_back(transpose1Param); auto addParam = std::make_shared(inputPrecisions[2], inputDynamicShapes[2]); - ngraphParam.push_back(addParam); + paramVect.push_back(addParam); auto transpose2Param = std::make_shared(inputPrecisions[3], inputDynamicShapes[3]); - ngraphParam.push_back(transpose2Param); + paramVect.push_back(transpose2Param); std::vector constantShapes; constantShapes.push_back(ov::Shape({inputDynamicShapes[0].get_shape().size()})); @@ -435,26 +435,26 @@ static std::shared_ptr initMHAQuantSubgraph0(std::vector(fakeQuantize5, transpose3Const); ov::ResultVector results{std::make_shared(transpose3)}; - return std::make_shared(results, ngraphParam, "mha"); + return std::make_shared(results, paramVect, "mha"); } static std::shared_ptr initMHAQuantSubgraph1(const std::vector& inputDynamicShapes, const std::vector& inputPrecisions, const std::vector& matMulIn0Precisions, const bool fakeQuantize3Exists) { - ov::ParameterVector ngraphParam; + ov::ParameterVector paramVect; auto transpose0Param = std::make_shared(inputPrecisions[0], inputDynamicShapes[0]); - ngraphParam.push_back(transpose0Param); + paramVect.push_back(transpose0Param); auto transpose1Param = std::make_shared(inputPrecisions[1], inputDynamicShapes[1]); - ngraphParam.push_back(transpose1Param); + paramVect.push_back(transpose1Param); auto addParam = std::make_shared(inputPrecisions[2], inputDynamicShapes[2]); - ngraphParam.push_back(addParam); + paramVect.push_back(addParam); auto transpose2Param = std::make_shared(inputPrecisions[3], inputDynamicShapes[3]); - ngraphParam.push_back(transpose2Param); + paramVect.push_back(transpose2Param); std::vector constantShapes; constantShapes.push_back(ov::Shape({inputDynamicShapes[0].get_shape().size()})); @@ -524,7 +524,7 @@ static std::shared_ptr initMHAQuantSubgraph1(const std::vector(transpose3)}; - return std::make_shared(results, ngraphParam, "mha"); + return std::make_shared(results, paramVect, "mha"); } class MHAQuantTest : public testing::WithParamInterface, @@ -574,17 +574,15 @@ class MHAQuantTest : public testing::WithParamInterface, for (size_t i = 0; i < funcInputs.size(); ++i) { const auto& funcInput = funcInputs[i]; ov::Tensor tensor; - if (funcInput.get_element_type().is_real()) - tensor = ov::test::utils::create_and_fill_tensor_normal_distribution(funcInput.get_element_type(), - targetInputStaticShapes[i], - 0.0f, - 1.5f); - else - tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), - targetInputStaticShapes[i], - 255, - 0, - 1); + if (funcInput.get_element_type().is_real()) { + tensor = ov::test::utils::create_and_fill_tensor_normal_distribution(funcInput.get_element_type(), targetInputStaticShapes[i], 0.0f, 1.5f); + } else { + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 255; + in_data.resolution = 1; + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], in_data); + } inputs.insert({funcInput.get_node_shared_ptr(), tensor}); } @@ -617,8 +615,8 @@ class MHAQuantTest : public testing::WithParamInterface, // Snippets MHA tokenization has limitations to avoid performance degradations. These limitations depend on // target machine. Just for testing, we disable these limitations to allow Snippets to tokenize pattern on all // machines for validation. - if (!configuration.count("SNIPPETS_MODE")) { - configuration.insert({"SNIPPETS_MODE", "IGNORE_CALLBACK"}); + if (!configuration.count(ov::intel_cpu::snippets_mode.name())) { + configuration.insert(ov::intel_cpu::snippets_mode(ov::intel_cpu::SnippetsMode::IGNORE_CALLBACK)); } } }; @@ -704,3 +702,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_MHAQuant_Pattern2, MHAQuantTest::getTestCaseName); } // namespace +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/ngram.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/ngram.cpp index 865268c500a31b..25e7703c92f418 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/ngram.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/ngram.cpp @@ -4,6 +4,7 @@ #include "common_test_utils/common_utils.hpp" #include "common_test_utils/ov_tensor_utils.hpp" +#include "internal_properties.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" @@ -176,7 +177,10 @@ class NgramCPUTest : public testing::WithParamInterface, const auto& indices_et = model_inputs[1].get_element_type(); const auto& indices_shape = targetInputStaticShapes[1]; const size_t batch_size = data_shape[0]; - auto indices_tensor = ov::test::utils::create_and_fill_tensor(indices_et, indices_shape, batch_size, 0); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = batch_size; + auto indices_tensor = ov::test::utils::create_and_fill_tensor(indices_et, indices_shape, in_data); if (indices_et == ov::element::i32) { auto* indices_data = indices_tensor.data(); @@ -201,8 +205,8 @@ class NgramCPUTest : public testing::WithParamInterface, init_input_shapes(inputShapes); function = initNgram(inputDynamicShapes, data_et, idces_et, k); - if (!configuration.count("SNIPPETS_MODE")) { - configuration.insert({"SNIPPETS_MODE", "DISABLE"}); + if (!configuration.count(ov::intel_cpu::snippets_mode.name())) { + configuration.insert(ov::intel_cpu::snippets_mode(ov::intel_cpu::SnippetsMode::DISABLE)); } } }; diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/not_fused_conv_simple_op.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/not_fused_conv_simple_op.cpp index 4229e15282648d..e25350d1adfb50 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/not_fused_conv_simple_op.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/not_fused_conv_simple_op.cpp @@ -2,21 +2,23 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "common_test_utils/node_builders/convolution.hpp" +#include "common_test_utils/node_builders/eltwise.hpp" #include "ov_models/builders.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace ngraph; -using ngraph::helpers::EltwiseTypes; +namespace ov { +namespace test { -namespace SubgraphTestsDefinitions { - -class NotFusedConvSimpleOp : virtual public LayerTestsUtils::LayerTestsCommon { +class NotFusedConvSimpleOp : virtual public ov::test::SubgraphBaseStaticTest { protected: void SetUp() override { targetDevice = ov::test::utils::DEVICE_CPU; - ov::ParameterVector inputParams{std::make_shared(ov::element::f32, ov::Shape{1, 3, 12, 9}), - std::make_shared(ov::element::f32, ov::Shape{1, 16, 12, 9})}; + ov::ParameterVector inputParams{ + std::make_shared(ov::element::f32, ov::Shape{1, 3, 12, 9}), + std::make_shared(ov::element::f32, ov::Shape{1, 16, 12, 9})}; std::shared_ptr conv; { @@ -27,20 +29,29 @@ class NotFusedConvSimpleOp : virtual public LayerTestsUtils::LayerTestsCommon { const std::vector dilation = {1, 1}; const size_t numOutChannels = 16; const op::PadType paddingType = op::PadType::EXPLICIT; - conv = builder::makeConvolution(inputParams[0], element::f32, kernelSize, strides, padBegin, padEnd, dilation, paddingType, numOutChannels); + conv = ov::test::utils::make_convolution(inputParams[0], + element::f32, + kernelSize, + strides, + padBegin, + padEnd, + dilation, + paddingType, + numOutChannels); } - const auto sharedNode = builder::makeConstant(element::f32, {1, 16, 1, 1}, std::vector{}, true); - const auto postOpCandidate = builder::makeEltwise(conv, sharedNode, EltwiseTypes::ADD); + const auto sharedNode = ngraph::builder::makeConstant(element::f32, {1, 16, 1, 1}, std::vector{}, true); + const auto postOpCandidate = ov::test::utils::makeEltwise(conv, sharedNode, utils::EltwiseTypes::ADD); - const auto secondConsumpt = builder::makeEltwise(inputParams[1], sharedNode, EltwiseTypes::ADD); + const auto secondConsumpt = ov::test::utils::makeEltwise(inputParams[1], sharedNode, utils::EltwiseTypes::ADD); NodeVector results{postOpCandidate, secondConsumpt}; - function = std::make_shared(results, inputParams, "NotFusedConvSimpleOp"); + function = std::make_shared(results, inputParams, "NotFusedConvSimpleOp"); } }; TEST_F(NotFusedConvSimpleOp, smoke_CompareWithRefs) { - Run(); + run(); } -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_inplace.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_inplace.cpp index 52caa4fdabc54e..ad97f392b0002f 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_inplace.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/reshape_inplace.cpp @@ -64,11 +64,11 @@ class InPlaceReshapeFromConstantCheck : public SubgraphBaseTest { } } else { if (funcInput.get_element_type().is_real()) { - tensor = utils::create_and_fill_tensor(funcInput.get_element_type(), - targetInputStaticShapes[i], - 10, - 0, - 1000); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 10; + in_data.resolution = 1000; + tensor = utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], in_data); } else { tensor = utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); } diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/rotary_pos_emb.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/rotary_pos_emb.cpp index c9b367599ab5b7..e03e0672c31267 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/rotary_pos_emb.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/rotary_pos_emb.cpp @@ -14,7 +14,6 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/skip_tests_config.hpp" -#include "ie_precision.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" #include "test_utils/fusing_test_utils.hpp" @@ -66,29 +65,29 @@ static std::shared_ptr buildROPE_Llama2(const int batch, auto Constant585 = cos_sin_cache[1]; // concat KV length - auto transpose_Transpose = makeOP({input, {0, 2, 1, 3}}); - auto slice_Unsqueeze_426 = makeOP({pos_id_end, 0}); - auto ScatterUpdate_152236 = makeOP({{0, 0, 0}, {2}, slice_Unsqueeze_426, {0}}); - auto slice_Slice = makeOP({Constant582, {0, 0, 0}, ScatterUpdate_152236, {1, 1, 1}}, + auto transpose_Transpose = makeOP({input, {0, 2, 1, 3}}); + auto slice_Unsqueeze_426 = makeOP({pos_id_end, 0}); + auto ScatterUpdate_152236 = makeOP({{0, 0, 0}, {2}, slice_Unsqueeze_426, {0}}); + auto slice_Slice = makeOP({Constant582, {0, 0, 0}, ScatterUpdate_152236, {1, 1, 1}}, {{"begin_mask", {1, 1, 0}}, {"end_mask", {1, 1, 0}}, {"new_axis_mask", {}}, {"shrink_axis_mask", {}}, {"ellipsis_mask", {}}}); - auto squeeze_Squeeze = makeOP({slice_Slice, 1}); - auto squeeze_Squeeze_435 = makeOP({squeeze_Squeeze, 0}); - auto index_441_Gather = makeOP({squeeze_Squeeze_435, pos_ids, 0}, {{"batch_dims", 0}}); - auto unsqueeze_Unsqueeze = makeOP({index_441_Gather, 1}); + auto squeeze_Squeeze = makeOP({slice_Slice, 1}); + auto squeeze_Squeeze_435 = makeOP({squeeze_Squeeze, 0}); + auto index_441_Gather = makeOP({squeeze_Squeeze_435, pos_ids, 0}, {{"batch_dims", 0}}); + auto unsqueeze_Unsqueeze = makeOP({index_441_Gather, 1}); auto mul_Multiply = - makeOP({transpose_Transpose, unsqueeze_Unsqueeze}, {{"auto_broadcast", "numpy"}}); - auto size_ShapeOf_448 = makeOP({transpose_Transpose}, {{"output_type", "i32"}}); - auto size_Gather_450 = makeOP({size_ShapeOf_448, 3, 0}, {{"batch_dims", 0}}); + makeOP({transpose_Transpose, unsqueeze_Unsqueeze}, {{"auto_broadcast", "numpy"}}); + auto size_ShapeOf_448 = makeOP({transpose_Transpose}, {{"output_type", "i32"}}); + auto size_Gather_450 = makeOP({size_ShapeOf_448, 3, 0}, {{"batch_dims", 0}}); auto floor_divide_Divide = - makeOP({size_Gather_450, 2}, {{"auto_broadcast", "numpy"}, {"m_pythondiv", true}}); - auto floor_divide_Floor = makeOP({floor_divide_Divide}); - auto slice_Unsqueeze_452 = makeOP({floor_divide_Floor, 0}); - auto ScatterUpdate_152312 = makeOP({{0, 0, 0, 0}, {3}, slice_Unsqueeze_452, {0}}); - auto slice_Slice_459 = makeOP( + makeOP({size_Gather_450, 2}, {{"auto_broadcast", "numpy"}, {"m_pythondiv", true}}); + auto floor_divide_Floor = makeOP({floor_divide_Divide}); + auto slice_Unsqueeze_452 = makeOP({floor_divide_Floor, 0}); + auto ScatterUpdate_152312 = makeOP({{0, 0, 0, 0}, {3}, slice_Unsqueeze_452, {0}}); + auto slice_Slice_459 = makeOP( {transpose_Transpose, ScatterUpdate_152312, {0ll, 0ll, 0ll, LLONG_MAX}, {1, 1, 1, 1}}, {{"begin_mask", {1, 1, 1, 0}}, {"end_mask", {1, 1, 1, 0}}, @@ -103,30 +102,30 @@ static std::shared_ptr buildROPE_Llama2(const int batch, 1, }), {-1.000000f}); - auto neg_Multiply = makeOP({slice_Slice_459, Constant_182988}, {{"auto_broadcast", "numpy"}}); - auto ScatterUpdate_152368 = makeOP({{0, 0, 0, 0}, {3}, slice_Unsqueeze_452, {0}}); + auto neg_Multiply = makeOP({slice_Slice_459, Constant_182988}, {{"auto_broadcast", "numpy"}}); + auto ScatterUpdate_152368 = makeOP({{0, 0, 0, 0}, {3}, slice_Unsqueeze_452, {0}}); auto slice_Slice2 = - makeOP({transpose_Transpose, {0, 0, 0, 0}, ScatterUpdate_152368, {1, 1, 1, 1}}, + makeOP({transpose_Transpose, {0, 0, 0, 0}, ScatterUpdate_152368, {1, 1, 1, 1}}, {{"begin_mask", {1, 1, 1, 0}}, {"end_mask", {1, 1, 1, 0}}, {"new_axis_mask", {}}, {"shrink_axis_mask", {}}, {"ellipsis_mask", {}}}); - auto cat_Concat = makeOP({neg_Multiply, slice_Slice2}, {{"axis", -1}}); - auto ScatterUpdate_152421 = makeOP({{0, 0, 0}, {2}, slice_Unsqueeze_426, {0}}); - auto slice_Slice_433 = makeOP({Constant585, {0, 0, 0}, ScatterUpdate_152421, {1, 1, 1}}, + auto cat_Concat = makeOP({neg_Multiply, slice_Slice2}, {{"axis", -1}}); + auto ScatterUpdate_152421 = makeOP({{0, 0, 0}, {2}, slice_Unsqueeze_426, {0}}); + auto slice_Slice_433 = makeOP({Constant585, {0, 0, 0}, ScatterUpdate_152421, {1, 1, 1}}, {{"begin_mask", {1, 1, 0}}, {"end_mask", {1, 1, 0}}, {"new_axis_mask", {}}, {"shrink_axis_mask", {}}, {"ellipsis_mask", {}}}); - auto squeeze_Squeeze_436 = makeOP({slice_Slice_433, 1}); - auto squeeze_Squeeze_437 = makeOP({squeeze_Squeeze_436, 0}); - auto index_446_Gather = makeOP({squeeze_Squeeze_437, pos_ids, 0}, {{"batch_dims", 0}}); - auto unsqueeze_Unsqueeze_447 = makeOP({index_446_Gather, 1}); + auto squeeze_Squeeze_436 = makeOP({slice_Slice_433, 1}); + auto squeeze_Squeeze_437 = makeOP({squeeze_Squeeze_436, 0}); + auto index_446_Gather = makeOP({squeeze_Squeeze_437, pos_ids, 0}, {{"batch_dims", 0}}); + auto unsqueeze_Unsqueeze_447 = makeOP({index_446_Gather, 1}); auto mul_Multiply_463 = - makeOP({cat_Concat, unsqueeze_Unsqueeze_447}, {{"auto_broadcast", "numpy"}}); - auto add_Add = makeOP({mul_Multiply, mul_Multiply_463}, {{"auto_broadcast", "numpy"}}); + makeOP({cat_Concat, unsqueeze_Unsqueeze_447}, {{"auto_broadcast", "numpy"}}); + auto add_Add = makeOP({mul_Multiply, mul_Multiply_463}, {{"auto_broadcast", "numpy"}}); return std::make_shared(ov::NodeVector{add_Add}, ov::ParameterVector{input, pos_id_end, pos_ids}); } @@ -150,8 +149,11 @@ class RoPECPUTestLlama2 : public SubgraphBaseTest { auto& input_shape = targetInputStaticShapes[0]; auto seq_length = input_shape[1]; - ov::Tensor t_input = - utils::create_and_fill_tensor(funcInputs[0].get_element_type(), input_shape, 2, -1.0f, 32768); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -1; + in_data.range = 2; + in_data.resolution = 32768; + ov::Tensor t_input = utils::create_and_fill_tensor(funcInputs[0].get_element_type(), input_shape, in_data); ov::Tensor t_position_id_end = create_i32_tensor(ov::Shape({}), position_id_start + seq_length); ov::Tensor t_position_ids = create_i32_tensor(ov::Shape({1, seq_length}), position_id_start); diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/shapeof_any_layout.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/shapeof_any_layout.cpp index 68439615ec581d..2d8b7f0438cad8 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/shapeof_any_layout.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/shapeof_any_layout.cpp @@ -2,19 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "test_utils/cpu_test_utils.hpp" - +#include "common_test_utils/node_builders/activation.hpp" #include "ov_models/builders.hpp" #include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" +#include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; using InputShape = ov::test::InputShape; using ElementType = ov::element::Type_t; -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { // ┌────────┐ // │ Param │ @@ -52,7 +52,7 @@ class ShapeOfAnyLayoutCPUTest : public testing::WithParamInterface obj) { - SubgraphTestsDefinitions::ShapeOfAnyLayoutParams basicParamsSet; + ShapeOfAnyLayoutParams basicParamsSet; CPUSpecificParams cpuParams; std::tie(basicParamsSet, cpuParams) = obj.param; ElementType netPr; @@ -97,10 +97,10 @@ class ShapeOfAnyLayoutCPUTest : public testing::WithParamInterface(inType, shape)); //make a stub eltwise node to enforce layout, since ShapeOf just mimic any input layout - auto eltwise = ngraph::builder::makeActivation(params[0], inType, ov::test::utils::ActivationTypes::Relu); + auto eltwise = utils::make_activation(params[0], inType, ov::test::utils::ActivationTypes::Relu); eltwise->get_rt_info() = makeCPUInfo(eltwiseInFmts, eltwiseOutFmts, {}); - auto shapeOf = std::make_shared(eltwise, ngraph::element::i32); + auto shapeOf = std::make_shared(eltwise, ov::element::i32); function = makeNgraphFunction(netPrecision, params, shapeOf, "ShapeOf"); } @@ -197,5 +197,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf4dAnyLayoutTest, ShapeOfAnyLayoutCPUTest, params4dDynamic, ShapeOfAnyLayoutCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf5dAnyLayoutTest, ShapeOfAnyLayoutCPUTest, params5dDynamic, ShapeOfAnyLayoutCPUTest::getTestCaseName); -} // namespace -} // namespace SubgraphTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/subgraph_with_blocked_format.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/subgraph_with_blocked_format.cpp index 807e458ee4deb6..8eefc9e644ba72 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/subgraph_with_blocked_format.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/subgraph_with_blocked_format.cpp @@ -2,44 +2,64 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "test_utils/cpu_test_utils.hpp" #include "ov_models/builders.hpp" -#include - -using namespace ngraph; +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "test_utils/cpu_test_utils.hpp" +#include "openvino/opsets/opset8.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { -class SubgraphWithBlockedFormat : virtual public LayerTestsUtils::LayerTestsCommon { +class SubgraphWithBlockedFormat : virtual public SubgraphBaseStaticTest { protected: void SetUp() override { targetDevice = ov::test::utils::DEVICE_CPU; + abs_threshold = 1e-2; auto type = element::f32; - auto param = std::make_shared(type, Shape{1, 32, 64, 32}); - auto weights = builder::makeConstant(type, Shape{32, 32, 1, 1}, std::vector{}, true); - auto conv = std::make_shared(param, weights, Strides{1, 1}, CoordinateDiff{0, 0}, CoordinateDiff{0, 0}, Strides{1, 1}); - auto mean = std::make_shared(conv, opset8::Constant::create(element::i32, Shape{2}, {2, 3}), true); - auto reshape_before = std::make_shared(mean, opset8::Constant::create(element::i32, Shape{3}, {0, 16, -1}), true); - auto mvn = std::make_shared(reshape_before, opset8::Constant::create(element::i32, Shape{1}, {2}), - false, 0.1, op::MVNEpsMode::INSIDE_SQRT); - auto reshape_after = std::make_shared(mvn, std::make_shared(mean), false); - auto mul = std::make_shared(reshape_after, builder::makeConstant(type, Shape{32, 1, 1}, std::vector{}, true)); - auto add = std::make_shared(mul, builder::makeConstant(type, Shape{32, 1, 1}, std::vector{}, true)); - auto sigmoid = std::make_shared(add); - auto mul2 = std::make_shared(conv, sigmoid); - - function = std::make_shared(mul2, ParameterVector{param}); + auto param = std::make_shared(type, Shape{1, 32, 64, 32}); + auto weights = ngraph::builder::makeConstant(type, Shape{32, 32, 1, 1}, std::vector{}, true); + auto conv = std::make_shared(param, + weights, + Strides{1, 1}, + CoordinateDiff{0, 0}, + CoordinateDiff{0, 0}, + Strides{1, 1}); + auto mean = + std::make_shared(conv, + ov::opset8::Constant::create(element::i32, Shape{2}, {2, 3}), + true); + auto reshape_before = + std::make_shared(mean, + ov::opset8::Constant::create(element::i32, Shape{3}, {0, 16, -1}), + true); + auto mvn = std::make_shared(reshape_before, + ov::opset8::Constant::create(element::i32, Shape{1}, {2}), + false, + 0.1, + op::MVNEpsMode::INSIDE_SQRT); + auto reshape_after = + std::make_shared(mvn, std::make_shared(mean), false); + auto mul = std::make_shared( + reshape_after, + ngraph::builder::makeConstant(type, Shape{32, 1, 1}, std::vector{}, true)); + auto add = std::make_shared( + mul, + ngraph::builder::makeConstant(type, Shape{32, 1, 1}, std::vector{}, true)); + auto sigmoid = std::make_shared(add); + auto mul2 = std::make_shared(conv, sigmoid); + + function = std::make_shared(mul2, ParameterVector{param}); } void TearDown() override { - auto runtime_function = executableNetwork.GetExecGraphInfo().getFunction(); + auto runtime_function = compiledModel.get_runtime_model(); int nodes_found = 0; for (const auto& n : runtime_function->get_ordered_ops()) { - auto layer_type = n->get_rt_info().at(ExecGraphInfoSerialization::LAYER_TYPE).as(); + auto layer_type = n->get_rt_info().at(ov::exec_model_info::LAYER_TYPE).as(); if (layer_type == "Subgraph") { nodes_found++; - auto output_layout = n->get_rt_info().at(ExecGraphInfoSerialization::OUTPUT_LAYOUTS).as(); + auto output_layout = n->get_rt_info().at(ov::exec_model_info::OUTPUT_LAYOUTS).as(); // convolution maybe chooses 'nhwc' and the subgraph will follow it ASSERT_TRUE(output_layout == "aBcd8b" || output_layout == "aBcd16b" || output_layout == "acdb"); } @@ -49,7 +69,8 @@ class SubgraphWithBlockedFormat : virtual public LayerTestsUtils::LayerTestsComm }; TEST_F(SubgraphWithBlockedFormat, smoke_CompareWithRefs) { - Run(); + run(); } -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/tile_with_two_output_edges.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/tile_with_two_output_edges.cpp index 97c12a7131cfb6..c3c861d1efef4f 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/tile_with_two_output_edges.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/tile_with_two_output_edges.cpp @@ -2,18 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "common_test_utils/node_builders/eltwise.hpp" #include "ov_models/builders.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace ngraph; -using ngraph::helpers::EltwiseTypes; +namespace ov { +namespace test { -namespace SubgraphTestsDefinitions { - -class TileWithTwoOutputEdges : public LayerTestsUtils::LayerTestsCommon { +class TileWithTwoOutputEdges : public SubgraphBaseStaticTest { protected: void SetUp() override { - targetDevice = ov::test::utils::DEVICE_CPU; + targetDevice = utils::DEVICE_CPU; auto ngPrc = element::f32; ov::ParameterVector inputParams {std::make_shared(ngPrc, ov::Shape{1, 3, 12, 9})}; @@ -25,16 +25,17 @@ class TileWithTwoOutputEdges : public LayerTestsUtils::LayerTestsCommon { const auto const1 = ngraph::builder::makeConstant(ngPrc, std::vector{1, 6, 1, 1}, std::vector{}, true); const auto const2 = ngraph::builder::makeConstant(ngPrc, std::vector{1, 6, 1, 1}, std::vector{}, true); - const auto add1 = ngraph::builder::makeEltwise(tile->output(0), const1, ngraph::helpers::EltwiseTypes::ADD); - const auto add2 = ngraph::builder::makeEltwise(tile->output(0), const2, ngraph::helpers::EltwiseTypes::ADD); + const auto add1 = utils::makeEltwise(tile->output(0), const1, utils::EltwiseTypes::ADD); + const auto add2 = utils::makeEltwise(tile->output(0), const2, utils::EltwiseTypes::ADD); NodeVector results{add1, add2}; - function = std::make_shared(results, inputParams, "TileWithTwoOutputEdges"); + function = std::make_shared(results, inputParams, "TileWithTwoOutputEdges"); } }; TEST_F(TileWithTwoOutputEdges, smoke_CompareWithRefs) { - Run(); + run(); } -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/test_utils/arm/filter_cpu_info.cpp b/src/plugins/intel_cpu/tests/functional/test_utils/arm/filter_cpu_info.cpp index 34f1575d9bb3f3..21a3e8d76b8529 100644 --- a/src/plugins/intel_cpu/tests/functional/test_utils/arm/filter_cpu_info.cpp +++ b/src/plugins/intel_cpu/tests/functional/test_utils/arm/filter_cpu_info.cpp @@ -26,7 +26,11 @@ std::vector filterCPUInfoForArch(const std::vector cpuEmptyPluginConfig; const ov::AnyMap empty_plugin_config{}; -const std::map cpuFP32PluginConfig = - { { InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO } }; const std::map cpuBF16PluginConfig = { { InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::YES } }; - +const ov::AnyMap cpu_bf16_plugin_config = {{ov::hint::inference_precision(ov::element::bf16)}}; // utility functions std::vector filterCPUSpecificParams(const std::vector& paramsVector); diff --git a/src/plugins/intel_cpu/tests/functional/test_utils/properties_test.hpp b/src/plugins/intel_cpu/tests/functional/test_utils/properties_test.hpp index e8abbfe879d872..b7dd89c763ca16 100644 --- a/src/plugins/intel_cpu/tests/functional/test_utils/properties_test.hpp +++ b/src/plugins/intel_cpu/tests/functional/test_utils/properties_test.hpp @@ -7,7 +7,7 @@ #include "openvino/runtime/core.hpp" #include "openvino/runtime/compiled_model.hpp" #include "functional_test_utils/skip_tests_config.hpp" -#include "ov_models/subgraph_builders.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" class OVClassConfigTestCPU : public ::testing::Test { public: @@ -16,6 +16,6 @@ class OVClassConfigTestCPU : public ::testing::Test { void SetUp() override { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - model = ngraph::builder::subgraph::makeConvPoolRelu(); + model = ov::test::utils::make_conv_pool_relu(); } }; diff --git a/src/plugins/intel_cpu/tests/unit/CMakeLists.txt b/src/plugins/intel_cpu/tests/unit/CMakeLists.txt index be9c4a1c5da408..6bb37da132021a 100644 --- a/src/plugins/intel_cpu/tests/unit/CMakeLists.txt +++ b/src/plugins/intel_cpu/tests/unit/CMakeLists.txt @@ -53,8 +53,6 @@ ov_add_test_target( gtest_main gmock dnnl - inference_engine_transformations - inference_engine_lp_transformations openvino::shape_inference inference_engine_s unit_test_utils @@ -62,6 +60,8 @@ ov_add_test_target( ov_snippets_models snippets_test_utils ${MLAS_LIBRARY} + inference_engine_transformations + inference_engine_lp_transformations ADD_CPPLINT LABELS OV UNIT CPU diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/grid_sample_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/grid_sample_shape_inference_test.cpp index 66a28bd1cb3e72..2c2c4d7cf2c4b5 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/grid_sample_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/grid_sample_shape_inference_test.cpp @@ -11,11 +11,11 @@ using namespace ov; using namespace ov::intel_cpu; -class GridSampleStaticShapeInferenceTest : public OpStaticShapeInferenceTest {}; +class GridSampleStaticShapeInferenceTest : public OpStaticShapeInferenceTest {}; TEST_F(GridSampleStaticShapeInferenceTest, GridSample) { - const auto data = std::make_shared(element::i32, PartialShape{-1, -1, -1, -1}); - const auto grid = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); + const auto data = std::make_shared(element::i32, PartialShape{-1, -1, -1, -1}); + const auto grid = std::make_shared(element::f32, PartialShape{-1, -1, -1, -1}); op = make_op(data, grid, opset9::GridSample::Attributes{}); diff --git a/src/plugins/intel_cpu/tests/unit/snippets_transformations/lowered/buffer_allocation.cpp b/src/plugins/intel_cpu/tests/unit/snippets_transformations/lowered/buffer_allocation.cpp index 6202fdc77efd5f..de5b02c3c8349f 100644 --- a/src/plugins/intel_cpu/tests/unit/snippets_transformations/lowered/buffer_allocation.cpp +++ b/src/plugins/intel_cpu/tests/unit/snippets_transformations/lowered/buffer_allocation.cpp @@ -70,8 +70,8 @@ class BufferAllocationCPUTest : public testing::TestWithParam(); pipeline.register_pass(m_vector_size); + pipeline.register_pass(); pipeline.register_pass(m_vector_size); pipeline.register_pass(); if (with_split_loops) @@ -120,7 +120,7 @@ class BufferAllocationCPUTest : public testing::TestWithParam GetModel() const override { - const auto subtensor_scalar = std::vector{1, 1}; + const auto subtensor_scalar = std::vector{1}; const auto subtensor_softmax = std::vector{1, ov::snippets::lowered::PortDescriptor::ServiceDimensions::FULL_DIM}; const auto subtensor_full = std::vector(2, ov::snippets::lowered::PortDescriptor::ServiceDimensions::FULL_DIM); @@ -136,10 +136,12 @@ class MHABF16AMXBufferAllocationTest : public BufferAllocationCPUTest { const auto brgemm_copyb0 = std::make_shared( convert1, ov::element::bf16, ov::intel_cpu::BrgemmCopyB::OnlyRepacking, 0, 0, 0); - const auto scratch0 = std::make_shared(ov::Shape{ov::intel_cpu::BrgemmCPU::SCRATCH_BYTE_SIZE}); + const auto scratch0 = std::make_shared(ov::Shape{ov::intel_cpu::BrgemmCPU::SCRATCH_BYTE_SIZE}); const auto brgemm_cpu0 = std::make_shared( parameter0, brgemm_copyb0->output(0), scratch0, ov::intel_cpu::BrgemmCPU::Type::AMX); brgemm_cpu0->set_m_block_size(32); + brgemm_cpu0->set_k_block_size(16); + brgemm_cpu0->set_n_block_size(64); const auto relu1 = std::make_shared(brgemm_cpu0); const auto softmax = std::make_shared(relu1, 3); @@ -147,10 +149,12 @@ class MHABF16AMXBufferAllocationTest : public BufferAllocationCPUTest { const auto brgemm_copyb1 = std::make_shared( parameter2, ov::element::bf16, ov::intel_cpu::BrgemmCopyB::OnlyRepacking, 0, 0, 0); - const auto scratch1 = std::make_shared(ov::Shape{ov::intel_cpu::BrgemmCPU::SCRATCH_BYTE_SIZE}); + const auto scratch1 = std::make_shared(ov::Shape{ov::intel_cpu::BrgemmCPU::SCRATCH_BYTE_SIZE}); const auto brgemm_cpu1 = std::make_shared( convert2, brgemm_copyb1->output(0), scratch1, ov::intel_cpu::BrgemmCPU::Type::AMX); brgemm_cpu1->set_m_block_size(32); + brgemm_cpu1->set_k_block_size(16); + brgemm_cpu1->set_n_block_size(64); const auto relu2 = std::make_shared(brgemm_cpu1); @@ -191,7 +195,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BufferAllocation_MHAOptimizedWSplit, MHA ::testing::Values(true), ::testing::Values(true), ::testing::Values(90112), - ::testing::Values(4)), + ::testing::Values(5)), BufferAllocationCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Snippets_BufferAllocation_MHANotOptimizedWOSplit, MHABF16AMXBufferAllocationTest, diff --git a/src/plugins/intel_cpu/tests/unit/snippets_transformations/mul_add_to_fma.cpp b/src/plugins/intel_cpu/tests/unit/snippets_transformations/mul_add_to_fma.cpp index ced190761843de..5912cb9debfc83 100644 --- a/src/plugins/intel_cpu/tests/unit/snippets_transformations/mul_add_to_fma.cpp +++ b/src/plugins/intel_cpu/tests/unit/snippets_transformations/mul_add_to_fma.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include "snippets/op/scalar.hpp" diff --git a/src/plugins/intel_cpu/tests/unit/transformations/x64/convert_to_interaction.cpp b/src/plugins/intel_cpu/tests/unit/transformations/x64/convert_to_interaction.cpp index f9406850b4cc67..13b8bba7f848f8 100644 --- a/src/plugins/intel_cpu/tests/unit/transformations/x64/convert_to_interaction.cpp +++ b/src/plugins/intel_cpu/tests/unit/transformations/x64/convert_to_interaction.cpp @@ -25,12 +25,12 @@ using namespace testing; using namespace ov::intel_cpu; using namespace ov; -static std::shared_ptr createFQ(const std::shared_ptr& input) { - auto input_low = std::make_shared(element::f32, ov::Shape{1}, std::vector{0}); - auto input_high = std::make_shared(element::f32, ov::Shape{1}, std::vector{49.4914f}); - auto output_low = std::make_shared(element::f32, ov::Shape{1}, std::vector{0}); - auto output_high = std::make_shared(element::f32, ov::Shape{1}, std::vector{49.4914f}); - return std::make_shared(input, input_low, input_high, output_low, output_high, 256); +static std::shared_ptr createFQ(const std::shared_ptr& input) { + auto input_low = std::make_shared(element::f32, ov::Shape{1}, std::vector{0}); + auto input_high = std::make_shared(element::f32, ov::Shape{1}, std::vector{49.4914f}); + auto output_low = std::make_shared(element::f32, ov::Shape{1}, std::vector{0}); + auto output_high = std::make_shared(element::f32, ov::Shape{1}, std::vector{49.4914f}); + return std::make_shared(input, input_low, input_high, output_low, output_high, 256); } static std::shared_ptr makeInteraction(const ov::PartialShape& inputShape, bool intraFQ = false, bool postFQ = false) { @@ -55,24 +55,24 @@ static std::shared_ptr makeInteraction(const ov::PartialShape& inputS features.push_back(sparse_feat); inputsParams.push_back(sparse_input); } - auto shapeof = std::make_shared(dense_feature); - auto gather_batch_indices = std::make_shared(element::i32, ov::Shape{1}, std::vector{0}); - auto gather_batch_axis = std::make_shared(element::i32, ov::Shape{}, 0); - auto gather_batch = std::make_shared(shapeof, gather_batch_indices, gather_batch_axis); + auto shapeof = std::make_shared(dense_feature); + auto gather_batch_indices = std::make_shared(element::i32, ov::Shape{1}, std::vector{0}); + auto gather_batch_axis = std::make_shared(element::i32, ov::Shape{}, 0); + auto gather_batch = std::make_shared(shapeof, gather_batch_indices, gather_batch_axis); - auto gather_feature_indices = std::make_shared(element::i32, ov::Shape{1}, std::vector{1}); - auto gather_feature_axis = std::make_shared(element::i32, ov::Shape{1}, 0); - auto gather_feature = std::make_shared(shapeof, gather_feature_indices, gather_feature_axis); + auto gather_feature_indices = std::make_shared(element::i32, ov::Shape{1}, std::vector{1}); + auto gather_feature_axis = std::make_shared(element::i32, ov::Shape{1}, 0); + auto gather_feature = std::make_shared(shapeof, gather_feature_indices, gather_feature_axis); - auto reshape_dim2 = std::make_shared(element::i64, ov::Shape{1}, std::vector{-1}); - auto reshape_shape = std::make_shared(NodeVector{gather_batch, reshape_dim2, gather_feature}, 0); + auto reshape_dim2 = std::make_shared(element::i64, ov::Shape{1}, std::vector{-1}); + auto reshape_shape = std::make_shared(NodeVector{gather_batch, reshape_dim2, gather_feature}, 0); - auto concat1 = std::make_shared(features, 1); - auto reshape = std::make_shared(concat1, reshape_shape, true); + auto concat1 = std::make_shared(features, 1); + auto reshape = std::make_shared(concat1, reshape_shape, true); std::vector transpose1_value = {0, 2, 1}; - auto transpose1_shape = std::make_shared(element::i32, ov::Shape{3}, transpose1_value); - auto transpose1 = std::make_shared(reshape, transpose1_shape); - auto matmul = std::make_shared(reshape, transpose1); + auto transpose1_shape = std::make_shared(element::i32, ov::Shape{3}, transpose1_value); + auto transpose1 = std::make_shared(reshape, transpose1_shape); + auto matmul = std::make_shared(reshape, transpose1); std::shared_ptr inter = nullptr; if (intraFQ) { inter = createFQ(matmul); @@ -80,11 +80,11 @@ static std::shared_ptr makeInteraction(const ov::PartialShape& inputS inter = matmul; } std::vector transpose2_value = {1, 2, 0}; - auto transpose2_shape = std::make_shared(element::i32, ov::Shape{3}, transpose2_value); - auto transpose2 = std::make_shared(inter, transpose2_shape); + auto transpose2_shape = std::make_shared(element::i32, ov::Shape{3}, transpose2_value); + auto transpose2 = std::make_shared(inter, transpose2_shape); std::vector reshape2_value = {729, -1}; - auto reshape2_shape = std::make_shared(element::i32, ov::Shape{2}, reshape2_value); - auto reshape2 = std::make_shared(transpose2, reshape2_shape, true); + auto reshape2_shape = std::make_shared(element::i32, ov::Shape{2}, reshape2_value); + auto reshape2 = std::make_shared(transpose2, reshape2_shape, true); std::vector gather_indices_value; for (int i = 1; i < 27; i++) { @@ -92,29 +92,29 @@ static std::shared_ptr makeInteraction(const ov::PartialShape& inputS gather_indices_value.push_back(i * 27 + j); } } - auto gather_indices = std::make_shared(element::i32, ov::Shape{351}, gather_indices_value); - auto gather_axis = std::make_shared(element::i32, ov::Shape{}, 0); - auto gather = std::make_shared(reshape2, gather_indices, gather_axis); - auto reshape3_dim1 = std::make_shared(element::i64, ov::Shape{1}, std::vector{-1}); - auto reshape3_shape = std::make_shared(NodeVector{reshape3_dim1, gather_batch}, 0); - auto reshape3 = std::make_shared(gather, reshape3_shape, true); + auto gather_indices = std::make_shared(element::i32, ov::Shape{351}, gather_indices_value); + auto gather_axis = std::make_shared(element::i32, ov::Shape{}, 0); + auto gather = std::make_shared(reshape2, gather_indices, gather_axis); + auto reshape3_dim1 = std::make_shared(element::i64, ov::Shape{1}, std::vector{-1}); + auto reshape3_shape = std::make_shared(NodeVector{reshape3_dim1, gather_batch}, 0); + auto reshape3 = std::make_shared(gather, reshape3_shape, true); std::vector transpose3_value = {1, 0}; - auto transpose3_shape = std::make_shared(element::i32, ov::Shape{2}, transpose3_value); - auto transpose3 = std::make_shared(reshape3, transpose3_shape); + auto transpose3_shape = std::make_shared(element::i32, ov::Shape{2}, transpose3_value); + auto transpose3 = std::make_shared(reshape3, transpose3_shape); std::vector reshape4_value = {-1, 351}; - auto reshape4_shape = std::make_shared(element::i32, ov::Shape{2}, reshape4_value); - auto reshape4 = std::make_shared(transpose3, reshape4_shape, true); - auto concat2 = std::make_shared(NodeVector{dense_feature, reshape4}, 1); + auto reshape4_shape = std::make_shared(element::i32, ov::Shape{2}, reshape4_value); + auto reshape4 = std::make_shared(transpose3, reshape4_shape, true); + auto concat2 = std::make_shared(NodeVector{dense_feature, reshape4}, 1); std::shared_ptr model; if (postFQ) { - auto input_low = std::make_shared(element::f32, ov::Shape{1}, std::vector{-5.12978f}); - auto input_high = std::make_shared(element::f32, ov::Shape{1}, std::vector{5.08965f}); - auto output_low = std::make_shared(element::f32, ov::Shape{1}, std::vector{-128}); - auto output_high = std::make_shared(element::f32, ov::Shape{1}, std::vector{127}); - auto fq = std::make_shared>( - opset8::FakeQuantize(concat2, input_low, input_high, output_low, output_high, 256), + auto input_low = std::make_shared(element::f32, ov::Shape{1}, std::vector{-5.12978f}); + auto input_high = std::make_shared(element::f32, ov::Shape{1}, std::vector{5.08965f}); + auto output_low = std::make_shared(element::f32, ov::Shape{1}, std::vector{-128}); + auto output_high = std::make_shared(element::f32, ov::Shape{1}, std::vector{127}); + auto fq = std::make_shared>( + ov::op::v0::FakeQuantize(concat2, input_low, input_high, output_low, output_high, 256), element::i8); model = std::make_shared(fq, inputsParams, "interaction"); } else { diff --git a/src/plugins/intel_cpu/thirdparty/CMakeLists.txt b/src/plugins/intel_cpu/thirdparty/CMakeLists.txt index ef8e5cd1da8985..4a45a07021a9fd 100644 --- a/src/plugins/intel_cpu/thirdparty/CMakeLists.txt +++ b/src/plugins/intel_cpu/thirdparty/CMakeLists.txt @@ -126,10 +126,12 @@ function(ov_add_onednn) # but for this we need to install library files install(FILES $ DESTINATION ${OV_CPACK_ARCHIVEDIR} - COMPONENT ${OV_CPACK_COMP_CORE}) + COMPONENT ${OV_CPACK_COMP_CORE} + ${OV_CPACK_COMP_CORE_EXCLUDE_ALL}) install(FILES "${intel_cpu_thirdparty_SOURCE_DIR}/ACLConfig.cmake" DESTINATION ${OV_CPACK_OPENVINO_CMAKEDIR} - COMPONENT ${OV_CPACK_COMP_CORE_DEV}) + COMPONENT ${OV_CPACK_COMP_CORE_DEV} + ${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL}) endif() endfunction() diff --git a/src/plugins/intel_gna/CMakeLists.txt b/src/plugins/intel_gna/CMakeLists.txt index ecb94ece6b3db2..ce51c8fc88a156 100644 --- a/src/plugins/intel_gna/CMakeLists.txt +++ b/src/plugins/intel_gna/CMakeLists.txt @@ -96,7 +96,7 @@ if(BUILD_SHARED_LIBS) set(gna_component gna) else() # during static build all plugins are part of the core, thus the dependencies as well - set(gna_component core) + set(gna_component ${OV_CPACK_COMP_CORE}) endif() file(GLOB_RECURSE gna_libraries "${libGNA_LIBRARIES_BASE_PATH}/*${CMAKE_SHARED_LIBRARY_SUFFIX}*") @@ -125,14 +125,16 @@ if(NOT BUILD_SHARED_LIBS) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cmake/libGNAConfig.cmake ${CMAKE_BINARY_DIR} COPYONLY) install(FILES "${CMAKE_BINARY_DIR}/libGNAConfig.cmake" DESTINATION ${OV_CPACK_IE_CMAKEDIR} - COMPONENT ${gna_component}) + COMPONENT ${gna_component} + ${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL}) # install .lib file on Windows if(WIN32) file(GLOB_RECURSE gna_libraries "${libGNA_LIBRARIES_BASE_PATH}/*${CMAKE_STATIC_LIBRARY_SUFFIX}") install(FILES ${gna_libraries} DESTINATION ${OV_CPACK_LIBRARYDIR} - COMPONENT ${gna_component}) + COMPONENT ${gna_component} + ${OV_CPACK_COMP_CORE_DEV_EXCLUDE_ALL}) endif() endif() diff --git a/src/plugins/intel_gna/legacy/tests/keep_constant_inputs_tests.cpp b/src/plugins/intel_gna/legacy/tests/keep_constant_inputs_tests.cpp index 1dcdfd17d32900..e8c1852eb0cb23 100644 --- a/src/plugins/intel_gna/legacy/tests/keep_constant_inputs_tests.cpp +++ b/src/plugins/intel_gna/legacy/tests/keep_constant_inputs_tests.cpp @@ -21,6 +21,8 @@ #include #include +#include "common_test_utils/subgraph_builders/conv_bias.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" #include "ov_models/subgraph_builders.hpp" #include "shared_test_classes/base/low_precision_transformations/layer_transformation.hpp" @@ -60,7 +62,7 @@ void transformNetwork(InferenceEngine::CNNNetwork& clonedNetwork, bool keep_cons TEST(KeepConstantInputsTests, ConvertConvolutionPoolReluNetworkWithTrue) { std::shared_ptr f_ptr; - f_ptr = ngraph::builder::subgraph::makeConvPoolRelu(); + f_ptr = ov::test::utils::make_conv_pool_relu(); InferenceEngine::CNNNetwork network(f_ptr), originalNetwork = network; transformNetwork(originalNetwork, true); ASSERT_EQ(numberOfInputsForLayerInCNNNetwork(originalNetwork, "Convolution"), 2); @@ -68,7 +70,7 @@ TEST(KeepConstantInputsTests, ConvertConvolutionPoolReluNetworkWithTrue) { TEST(KeepConstantInputsTests, ConvertConvolutionPoolReluNetworkWithFalse) { std::shared_ptr f_ptr; - f_ptr = ngraph::builder::subgraph::makeConvPoolRelu(); + f_ptr = ov::test::utils::make_conv_pool_relu(); InferenceEngine::CNNNetwork network(f_ptr), originalNetwork = network; transformNetwork(originalNetwork, false); ASSERT_EQ(numberOfInputsForLayerInCNNNetwork(originalNetwork, "Convolution"), 1); @@ -76,7 +78,7 @@ TEST(KeepConstantInputsTests, ConvertConvolutionPoolReluNetworkWithFalse) { TEST(KeepConstantInputsTests, ConvertConvolutionBiasNetworkWithTrue) { std::shared_ptr f_ptr; - f_ptr = ngraph::builder::subgraph::makeConvBias(); + f_ptr = ov::test::utils::make_conv_bias(); InferenceEngine::CNNNetwork network(f_ptr), originalNetwork = network; transformNetwork(originalNetwork, true); ASSERT_EQ(numberOfInputsForLayerInCNNNetwork(originalNetwork, "Convolution"), 3); @@ -84,7 +86,7 @@ TEST(KeepConstantInputsTests, ConvertConvolutionBiasNetworkWithTrue) { TEST(KeepConstantInputsTests, ConvertConvolutionBiasNetworkWithFalse) { std::shared_ptr f_ptr; - f_ptr = ngraph::builder::subgraph::makeConvBias(); + f_ptr = ov::test::utils::make_conv_bias(); InferenceEngine::CNNNetwork network(f_ptr), originalNetwork = network; transformNetwork(originalNetwork, false); ASSERT_EQ(numberOfInputsForLayerInCNNNetwork(originalNetwork, "Convolution"), 1); diff --git a/src/plugins/intel_gna/src/gna_graph_compiler.cpp b/src/plugins/intel_gna/src/gna_graph_compiler.cpp index cb86a83d38f5af..a3b7939677a57f 100644 --- a/src/plugins/intel_gna/src/gna_graph_compiler.cpp +++ b/src/plugins/intel_gna/src/gna_graph_compiler.cpp @@ -2404,22 +2404,15 @@ void GNAGraphCompiler::connectOutput(InferenceEngine::CNNLayerPtr layer, void* p log::debug() << "Connecting output " << layer->name << " ...\n"; // in case of Memory Layer it's input allocated in meminput layer if (layer->outData.size() == 1) { - for (int j = 0; j != static_cast(getInputTo(layer->outData.front()).size()); j++) { - auto isNonFunctional = [](CNNLayerPtr l) { - return LayerInfo(l).isNonFunctional(); - }; - - if (!CNNNetHasNextLayerSkipCertain(layer, 0, j, isNonFunctional)) { - continue; - } - auto nextLayer = CNNNetGetNextLayerSkipCertain(layer, 0, j, isNonFunctional); + auto isNonFunctional = [](CNNLayerPtr l) { + return LayerInfo(l).isNonFunctional(); + }; - if (!nextLayer.first) { - log::debug() << "for layer: " << layer->name << "outData[0] has non functional connection at " << j; - } + auto next_layers = CNNNetGetAllNextLayersSkipCertain(layer, -1, isNonFunctional); + for (auto& next_layer : next_layers) { auto nextMemoryLayerIt = std::find_if(begin(memory_connection), end(memory_connection), [&](MemoryConnection::value_type& comp) { - return comp.second.getOutput()->name == nextLayer.first->name; + return comp.second.getOutput()->name == next_layer->name; }); if (nextMemoryLayerIt != memory_connection.end()) { auto& nextMemoryLayer = nextMemoryLayerIt->second; diff --git a/src/plugins/intel_gna/tests/deprecated/unit/engines/gna/i16_quantisation_test.cpp b/src/plugins/intel_gna/tests/deprecated/unit/engines/gna/i16_quantisation_test.cpp index 5a64a2c3bff795..252952a44b2e50 100644 --- a/src/plugins/intel_gna/tests/deprecated/unit/engines/gna/i16_quantisation_test.cpp +++ b/src/plugins/intel_gna/tests/deprecated/unit/engines/gna/i16_quantisation_test.cpp @@ -14,6 +14,7 @@ #include "frontend/model_quantizer.hpp" #include "gna_matcher.hpp" #include "ov_models/builders.hpp" +#include "openvino/opsets/opset8.hpp" using namespace InferenceEngine; using namespace ov::intel_gna::limitations; @@ -223,12 +224,12 @@ TEST_F(I16QuantisationTest, EltwiseToMemory_ActivationInsertion) { } TEST_F(I16QuantisationTest, SplitFollowedByActivation_DummyDiagonalAffineInsertion) { - auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 20}); - const auto axis_node = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {1}); - auto split = std::make_shared(input_params, axis_node, 2); - auto tanh = std::make_shared(split->outputs()[0]); - auto add = std::make_shared(split->outputs()[1], tanh); - auto result = std::make_shared(add); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 20}); + const auto axis_node = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {1}); + auto split = std::make_shared(input_params, axis_node, 2); + auto tanh = std::make_shared(split->outputs()[0]); + auto add = std::make_shared(split->outputs()[1], tanh); + auto result = std::make_shared(add); auto function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); assert_that() @@ -328,10 +329,10 @@ TEST_F(I16QuantisationTest, ScaleShift_Affine_WillResultInIdentityInsertion) { } TEST_F(I16QuantisationTest, ClampFollowedByTanh_ResultInDiagonalInsertion) { - auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}); - auto clamp = std::make_shared(input_params, -50, 50); - auto tanh = std::make_shared(clamp); - auto result = std::make_shared(tanh); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}); + auto clamp = std::make_shared(input_params, -50, 50); + auto tanh = std::make_shared(clamp); + auto result = std::make_shared(tanh); auto function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); assert_that() @@ -346,16 +347,16 @@ TEST_F(I16QuantisationTest, ClampFollowedByTanh_ResultInDiagonalInsertion) { } TEST_F(I16QuantisationTest, EltwiseWithMemoryAndActivationInput_ResultInTwoDiagonalsInsertion) { - auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}); - const auto constant = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{10, 10}, {1}); - auto matmul = std::make_shared(input_params, constant); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}); + const auto constant = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{10, 10}, {1}); + auto matmul = std::make_shared(input_params, constant); auto mem_i = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}, 0); auto mem_r = std::make_shared(mem_i, "r_27-28"); - auto tanh = std::make_shared(matmul); - auto add = std::make_shared(tanh, mem_r); + auto tanh = std::make_shared(matmul); + auto add = std::make_shared(tanh, mem_r); tanh->add_control_dependency(mem_r); auto mem_w = std::make_shared(tanh, "r_27-28"); - auto result = std::make_shared(add); + auto result = std::make_shared(add); mem_w->add_control_dependency(mem_r); result->add_control_dependency(mem_w); auto function = @@ -490,16 +491,16 @@ TEST_F(I16QuantisationTest, fp16tofp32_on_fullyConnected_model) { TEST_F(I16QuantisationTest, MultipleActivationsAfterAffineWithIdentityActivation_MultipleDiagonalLayersWithActivaitons) { - auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}); - const auto constant = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{10, 10}, {1}); - auto matmul1 = std::make_shared(input_params, constant); - auto matmul2 = std::make_shared(input_params, constant); - auto add = std::make_shared(matmul2, matmul1); - auto sigmoid = std::make_shared(matmul2); - auto relu = std::make_shared(matmul2); - auto mul = std::make_shared(sigmoid, relu); - auto add2 = std::make_shared(add, mul); - auto result = std::make_shared(add); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}); + const auto constant = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{10, 10}, {1}); + auto matmul1 = std::make_shared(input_params, constant); + auto matmul2 = std::make_shared(input_params, constant); + auto add = std::make_shared(matmul2, matmul1); + auto sigmoid = std::make_shared(matmul2); + auto relu = std::make_shared(matmul2); + auto mul = std::make_shared(sigmoid, relu); + auto add2 = std::make_shared(add, mul); + auto result = std::make_shared(add); auto function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); // identiy came from automatic insertion due to @@ -514,13 +515,13 @@ TEST_F(I16QuantisationTest, } TEST_F(I16QuantisationTest, MultipleActivationsAfterAffine_ResultInMultipleDiagonalLayersWithActivaitons) { - auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}); - const auto constant = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{10, 10}, {1}); - auto matmul = std::make_shared(input_params, constant); - auto sigmoid = std::make_shared(matmul); - auto relu = std::make_shared(matmul); - auto mul = std::make_shared(sigmoid, relu); - auto result = std::make_shared(mul); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}); + const auto constant = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{10, 10}, {1}); + auto matmul = std::make_shared(input_params, constant); + auto sigmoid = std::make_shared(matmul); + auto relu = std::make_shared(matmul); + auto mul = std::make_shared(sigmoid, relu); + auto result = std::make_shared(mul); auto function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); // extra identity inserted for affine @@ -596,13 +597,13 @@ TEST_F(I16QuantisationTest, PowerWithScaleFactorPropagateForward) { } TEST_F(I16QuantisationTest, ConcatWithDifferentInputScaleFactorsPropagateForward) { - auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 20}); - const auto axis_node = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {1}); - auto split = std::make_shared(input_params, axis_node, 2); - auto sigmoid = std::make_shared(split->outputs()[0]); - auto tanh = std::make_shared(split->outputs()[1]); - auto concat = std::make_shared(ngraph::OutputVector{sigmoid, tanh}, 1); - auto result = std::make_shared(concat); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 20}); + const auto axis_node = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {1}); + auto split = std::make_shared(input_params, axis_node, 2); + auto sigmoid = std::make_shared(split->outputs()[0]); + auto tanh = std::make_shared(split->outputs()[1]); + auto concat = std::make_shared(ngraph::OutputVector{sigmoid, tanh}, 1); + auto result = std::make_shared(concat); auto function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); assert_that() @@ -627,14 +628,14 @@ TEST_F(I16QuantisationTest, TI_quantize) { } TEST_F(I16QuantisationTest, TI_PropagateForward) { - auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}); - auto mul = std::make_shared( + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10}); + auto mul = std::make_shared( input_params, std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10})); - auto add = std::make_shared( + auto add = std::make_shared( mul, std::make_shared(ngraph::element::f32, ngraph::Shape{1, 10})); - auto reshape = std::make_shared( + auto reshape = std::make_shared( add, std::make_shared(ngraph::element::i64, ngraph::Shape{3}, std::vector{1, 1, 10}), false); @@ -646,11 +647,11 @@ TEST_F(I16QuantisationTest, TI_PropagateForward) { auto H_init = ngraph::builder::makeConstant(ngraph::element::f32, {batch_size, hiddenSize}, {}, true); auto C_init = ngraph::builder::makeConstant(ngraph::element::f32, {batch_size, hiddenSize}, {}, true); - auto H_t = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, hiddenSize}); - auto C_t = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, hiddenSize}); + auto H_t = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, hiddenSize}); + auto C_t = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, hiddenSize}); // Body - auto X = std::make_shared(ngraph::element::f32, + auto X = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, 1, reshape_shape[2]}); auto weightsNode = ngraph::builder::makeConstant(ngraph::element::f32, {4 * hiddenSize, reshape_shape[2]}, {}, true); @@ -659,9 +660,9 @@ TEST_F(I16QuantisationTest, TI_PropagateForward) { // lstm auto constantX = - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {batch_size, reshape_shape[2]}); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {batch_size, reshape_shape[2]}); auto lstm1 = - std::make_shared(std::make_shared(X, constantX, false), + std::make_shared(std::make_shared(X, constantX, false), H_t, C_t, weightsNode, @@ -674,7 +675,7 @@ TEST_F(I16QuantisationTest, TI_PropagateForward) { auto body = std::make_shared(ngraph::OutputVector{H_o, C_o}, ngraph::ParameterVector{X, H_t, C_t}); - auto tensor_iterator = std::make_shared(); + auto tensor_iterator = std::make_shared(); tensor_iterator->set_body(body); tensor_iterator->set_sliced_input(X, reshape, 0, 1, 1, -1, 1); @@ -691,7 +692,7 @@ TEST_F(I16QuantisationTest, TI_PropagateForward) { {hiddenSize, output_size}, {1}, {1}); - auto result = std::make_shared(fc); + auto result = std::make_shared(fc); auto function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); assert_that() diff --git a/src/plugins/intel_gna/tests/deprecated/unit/inference_engine_tests/cnn_ngraph_impl_tests.cpp b/src/plugins/intel_gna/tests/deprecated/unit/inference_engine_tests/cnn_ngraph_impl_tests.cpp index 38abd6b94ba981..253036bfaf5d6c 100644 --- a/src/plugins/intel_gna/tests/deprecated/unit/inference_engine_tests/cnn_ngraph_impl_tests.cpp +++ b/src/plugins/intel_gna/tests/deprecated/unit/inference_engine_tests/cnn_ngraph_impl_tests.cpp @@ -25,9 +25,9 @@ #include #include #include -#include -#include -#include +#include +#include +#include #include #include #include @@ -47,10 +47,10 @@ using namespace InferenceEngine; TEST(CNNNGraphImplTests, TestReshapeWithSameShape) { std::shared_ptr f; { - auto input = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); + auto input = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); input->set_friendly_name("input"); - auto shape = ngraph::opset5::Constant::create(ngraph::element::i64, {2}, {1, 4000}); - auto reshape = std::make_shared(input, shape, true); + auto shape = ov::op::v0::Constant::create(ngraph::element::i64, {2}, {1, 4000}); + auto reshape = std::make_shared(input, shape, true); f = std::make_shared(ngraph::OutputVector{reshape}, ngraph::ParameterVector{input}); } @@ -81,10 +81,10 @@ TEST(CNNNGraphImplTests, TestTwoResultsFromOneTensor) { TEST(CNNNGraphImplTests, TestInvalidReshape) { std::shared_ptr f; { - auto input = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); + auto input = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); input->set_friendly_name("input"); - auto shape = ngraph::opset5::Constant::create(ngraph::element::i64, {2}, {1, 4000}); - auto reshape = std::make_shared(input, shape, true); + auto shape = ov::op::v0::Constant::create(ngraph::element::i64, {2}, {1, 4000}); + auto reshape = std::make_shared(input, shape, true); f = std::make_shared(ngraph::OutputVector{reshape}, ngraph::ParameterVector{input}); } @@ -100,18 +100,18 @@ TEST(CNNNGraphImplTests, TestInvalidReshape) { TEST(CNNNGraphImplTests, TestNMS5OutputNames) { std::shared_ptr f; { - auto boxes = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); - auto scores = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1000}); - auto max_output_boxes_per_class = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); - auto iou_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); - auto score_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); - auto nms = std::make_shared( + auto boxes = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); + auto scores = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1000}); + auto max_output_boxes_per_class = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); + auto iou_threshold = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); + auto score_threshold = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); + auto nms = std::make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, - ngraph::opset5::NonMaxSuppression::BoxEncodingType::CORNER, + ov::opset5::NonMaxSuppression::BoxEncodingType::CORNER, true); nms->set_friendly_name("nms"); f = std::make_shared(ngraph::OutputVector{nms->output(0), nms->output(1), nms->output(2)}, @@ -1072,15 +1072,15 @@ TEST(CNNNGraphImplTests, TestCheckStats) { TEST(CNNNGraphImplTests, CanSetBatchReadValue) { std::shared_ptr ngraph; { - auto input = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 2}); - auto constant = std::make_shared(ngraph::element::f32, + auto input = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 2}); + auto constant = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 2}, std::vector{1, 2}); - auto read_value = std::make_shared(constant, "variable_id"); - auto assign = std::make_shared(read_value, "variable_id"); + auto read_value = std::make_shared(constant, "variable_id"); + auto assign = std::make_shared(read_value, "variable_id"); assign->add_control_dependency(read_value); - auto add = std::make_shared(input, read_value); + auto add = std::make_shared(input, read_value); auto result = std::make_shared(add); ngraph::ParameterVector params = {input}; @@ -1101,10 +1101,10 @@ TEST(CNNNGraphImplTests, addSameOutput) { { ngraph::PartialShape shape({1, 3, 22, 22}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); - auto relu = std::make_shared(param); - auto shapeof = std::make_shared(param); - auto reshape = std::make_shared(relu, shapeof, true); + auto param = std::make_shared(type, shape); + auto relu = std::make_shared(param); + auto shapeof = std::make_shared(param); + auto reshape = std::make_shared(relu, shapeof, true); reshape->set_friendly_name("reshape"); auto result = std::make_shared(reshape); @@ -1128,12 +1128,12 @@ TEST(CNNNGraphImplTests, addOutput) { { ngraph::PartialShape shape({1, 3, 22, 22}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); - auto relu = std::make_shared(param); - auto shapeof = std::make_shared(param); - auto reshape = std::make_shared(relu, shapeof, true); + auto param = std::make_shared(type, shape); + auto relu = std::make_shared(param); + auto shapeof = std::make_shared(param); + auto reshape = std::make_shared(relu, shapeof, true); reshape->set_friendly_name("reshape"); - auto relu2 = std::make_shared(reshape); + auto relu2 = std::make_shared(reshape); auto result = std::make_shared(relu2); ngraph::ParameterVector params = {param}; @@ -1156,9 +1156,9 @@ TEST(CNNNGraphImplTests, addOutputForParameter) { { ngraph::PartialShape shape({1, 3, 22, 22}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); auto result = std::make_shared(relu); ngraph::ParameterVector params = {param}; @@ -1855,27 +1855,27 @@ TEST(CNNNGraphImplTests, SaveOriginalResultNameForMultiOutputOpOpset6) { TEST(CNNNGraphImplTests, CheckUniqueNames) { std::shared_ptr f; { - auto boxes = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); + auto boxes = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); boxes->set_friendly_name("boxes"); - auto scores = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1000}); + auto scores = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1000}); scores->set_friendly_name("scores"); - auto max_output_boxes_per_class = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); - auto iou_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); - auto score_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); - auto nms = std::make_shared( + auto max_output_boxes_per_class = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); + auto iou_threshold = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); + auto score_threshold = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); + auto nms = std::make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, - ngraph::opset5::NonMaxSuppression::BoxEncodingType::CORNER, + ov::opset5::NonMaxSuppression::BoxEncodingType::CORNER, true); - auto result1 = std::make_shared(nms->output(0)); + auto result1 = std::make_shared(nms->output(0)); result1->set_friendly_name("result1"); - auto result2 = std::make_shared(nms->output(1)); + auto result2 = std::make_shared(nms->output(1)); result2->set_friendly_name("result2"); - auto result3 = std::make_shared(nms->output(2)); + auto result3 = std::make_shared(nms->output(2)); result3->set_friendly_name("result3"); nms->set_friendly_name("nms"); f = std::make_shared(ngraph::ResultVector{result1, result2, result3}, @@ -1888,27 +1888,27 @@ TEST(CNNNGraphImplTests, CheckUniqueNames) { TEST(CNNNGraphImplTests, CheckNonUniqueParameterName) { std::shared_ptr f; { - auto boxes = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); + auto boxes = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); boxes->set_friendly_name("boxes"); - auto scores = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1000}); + auto scores = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1000}); scores->set_friendly_name("boxes"); - auto max_output_boxes_per_class = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); - auto iou_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); - auto score_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); - auto nms = std::make_shared( + auto max_output_boxes_per_class = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); + auto iou_threshold = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); + auto score_threshold = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); + auto nms = std::make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, - ngraph::opset5::NonMaxSuppression::BoxEncodingType::CORNER, + ov::opset5::NonMaxSuppression::BoxEncodingType::CORNER, true); - auto result1 = std::make_shared(nms->output(0)); + auto result1 = std::make_shared(nms->output(0)); result1->set_friendly_name("result1"); - auto result2 = std::make_shared(nms->output(1)); + auto result2 = std::make_shared(nms->output(1)); result2->set_friendly_name("result2"); - auto result3 = std::make_shared(nms->output(2)); + auto result3 = std::make_shared(nms->output(2)); result3->set_friendly_name("result3"); nms->set_friendly_name("nms"); f = std::make_shared(ngraph::ResultVector{result1, result2, result3}, @@ -1921,27 +1921,27 @@ TEST(CNNNGraphImplTests, CheckNonUniqueParameterName) { TEST(CNNNGraphImplTests, CheckNonUniqueResultName) { std::shared_ptr f; { - auto boxes = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); + auto boxes = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); boxes->set_friendly_name("nms.1"); - auto scores = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1000}); + auto scores = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1000}); scores->set_friendly_name("scores"); - auto max_output_boxes_per_class = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); - auto iou_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); - auto score_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); - auto nms = std::make_shared( + auto max_output_boxes_per_class = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); + auto iou_threshold = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); + auto score_threshold = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); + auto nms = std::make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, - ngraph::opset5::NonMaxSuppression::BoxEncodingType::CORNER, + ov::opset5::NonMaxSuppression::BoxEncodingType::CORNER, true); - auto result1 = std::make_shared(nms->output(0)); + auto result1 = std::make_shared(nms->output(0)); result1->set_friendly_name("result1"); - auto result2 = std::make_shared(nms->output(1)); + auto result2 = std::make_shared(nms->output(1)); result2->set_friendly_name("result2"); - auto result3 = std::make_shared(nms->output(2)); + auto result3 = std::make_shared(nms->output(2)); result3->set_friendly_name("result3"); nms->set_friendly_name("nms"); f = std::make_shared(ngraph::ResultVector{result1, result2, result3}, @@ -1954,25 +1954,25 @@ TEST(CNNNGraphImplTests, CheckNonUniqueResultName) { TEST(CNNNGraphImplTests, CheckNonUniqueNewResultName) { std::shared_ptr f; { - auto boxes = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); + auto boxes = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1000, 4}); boxes->set_friendly_name("nms.1"); - auto scores = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1000}); + auto scores = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1000}); scores->set_friendly_name("scores"); - auto max_output_boxes_per_class = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); - auto iou_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); - auto score_threshold = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); - auto nms = std::make_shared( + auto max_output_boxes_per_class = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {10}); + auto iou_threshold = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.75}); + auto score_threshold = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {0.7}); + auto nms = std::make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, - ngraph::opset5::NonMaxSuppression::BoxEncodingType::CORNER, + ov::opset5::NonMaxSuppression::BoxEncodingType::CORNER, true); - auto result1 = std::make_shared(nms->output(0)); + auto result1 = std::make_shared(nms->output(0)); result1->set_friendly_name("result1"); - auto result3 = std::make_shared(nms->output(2)); + auto result3 = std::make_shared(nms->output(2)); result3->set_friendly_name("result3"); nms->set_friendly_name("nms"); f = std::make_shared(ngraph::ResultVector{result1, result3}, diff --git a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_act_conv_act.cpp b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_act_conv_act.cpp index ca671b4b759c59..19db46d5a6457d 100644 --- a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_act_conv_act.cpp +++ b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_act_conv_act.cpp @@ -119,7 +119,7 @@ class ImportActConvActTest : public testing::WithParamInterface(ngPrc, ov::Shape(inputShape))}; - auto relu1 = std::make_shared(params[0]); + auto relu1 = std::make_shared(params[0]); size_t num_out_channels = 8; size_t kernel_size = 8; @@ -137,8 +137,8 @@ class ImportActConvActTest : public testing::WithParamInterface(conv); - ngraph::ResultVector results{std::make_shared(relu2)}; + auto relu2 = std::make_shared(conv); + ngraph::ResultVector results{std::make_shared(relu2)}; function = std::make_shared(results, params, "ExportImportNetwork"); } diff --git a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_batch_size.cpp b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_batch_size.cpp index 6af98b35808808..e2b5c188eaa405 100644 --- a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_batch_size.cpp +++ b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_batch_size.cpp @@ -38,8 +38,8 @@ class ImportBatchTest : public FuncTestUtils::ImportNetworkTestBase { ov::test::utils::generate_float_numbers(2048 * inputShape[1], -0.1f, 0.1f), false); - auto matmul_1 = std::make_shared(params[0], mul_const_1); - auto sigmoid_1 = std::make_shared(matmul_1); + auto matmul_1 = std::make_shared(params[0], mul_const_1); + auto sigmoid_1 = std::make_shared(matmul_1); auto mul_const_2 = ngraph::builder::makeConstant(ngPrc, @@ -47,7 +47,7 @@ class ImportBatchTest : public FuncTestUtils::ImportNetworkTestBase { ov::test::utils::generate_float_numbers(2048 * 3425, -0.1f, 0.1f), false); - auto matmul_2 = std::make_shared(sigmoid_1, mul_const_2); + auto matmul_2 = std::make_shared(sigmoid_1, mul_const_2); function = std::make_shared(matmul_2, params, "ExportImportNetwork"); } diff --git a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_memory_layer.cpp b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_memory_layer.cpp index 9bfeb3a9f61953..5042883e351475 100644 --- a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_memory_layer.cpp +++ b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_memory_layer.cpp @@ -103,15 +103,15 @@ class ImportMemoryTest : public testing::WithParamInterface(ngPrc, ov::Shape{1, 336})}; auto mem_c = ngraph::builder::makeConstant(ngPrc, {1, 336}, std::vector{1}); - auto mem_r = std::make_shared(mem_c, "id"); + auto mem_r = std::make_shared(mem_c, "id"); - auto mul = std::make_shared(params[0], mem_r); - auto mem_w = std::make_shared(mul, "id"); + auto mul = std::make_shared(params[0], mem_r); + auto mem_w = std::make_shared(mul, "id"); - auto relu = std::make_shared(mul); + auto relu = std::make_shared(mul); mem_w->add_control_dependency(mem_r); relu->add_control_dependency(mem_w); - ngraph::ResultVector results{std::make_shared(relu)}; + ngraph::ResultVector results{std::make_shared(relu)}; function = std::make_shared(results, params, "ExportImportNetwork"); } diff --git a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_multi_inputs.cpp b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_multi_inputs.cpp index fd207630f4d5f5..005e2b5350f6e8 100644 --- a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_multi_inputs.cpp +++ b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_export_multi_inputs.cpp @@ -25,7 +25,7 @@ class ImportMultiInput : public FuncTestUtils::ImportNetworkTestBase { ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape(inputShape)), std::make_shared(ngPrc, ov::Shape(inputShape))}; auto mul1 = ngraph::builder::makeEltwise(input[0], input[1], ngraph::helpers::EltwiseTypes::ADD); - auto result = std::make_shared(mul1); + auto result = std::make_shared(mul1); function = std::make_shared(ngraph::ResultVector{result}, input, "multiple_input"); } diff --git a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_reshape_permute_conv.cpp b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_reshape_permute_conv.cpp index b57cbb822950e9..da9ee1d225a196 100644 --- a/src/plugins/intel_gna/tests/functional/Import_export_tests/import_reshape_permute_conv.cpp +++ b/src/plugins/intel_gna/tests/functional/Import_export_tests/import_reshape_permute_conv.cpp @@ -20,12 +20,12 @@ class ImportReshapePermuteConv : public FuncTestUtils::ImportNetworkTestBase { std::vector outFormShapes1 = {1, 1, 168, 2}; auto pattern1 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, outFormShapes1); - auto reshape1 = std::make_shared(params[0], pattern1, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, outFormShapes1); + auto reshape1 = std::make_shared(params[0], pattern1, false); - auto permute1 = std::make_shared( + auto permute1 = std::make_shared( reshape1, - ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); auto conv1 = ngraph::builder::makeConvolution(permute1, ngPrc, @@ -37,16 +37,16 @@ class ImportReshapePermuteConv : public FuncTestUtils::ImportNetworkTestBase { ngraph::op::PadType::VALID, 12); - auto permute2 = std::make_shared( + auto permute2 = std::make_shared( conv1, - ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})); std::vector outFormShapes2 = {1, 1932}; auto pattern2 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, outFormShapes2); - auto reshape2 = std::make_shared(permute2, pattern2, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, outFormShapes2); + auto reshape2 = std::make_shared(permute2, pattern2, false); - ngraph::ResultVector results{std::make_shared(reshape2)}; + ngraph::ResultVector results{std::make_shared(reshape2)}; function = std::make_shared(results, params, "ExportImportNetwork"); }; }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/4d_eltwise.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/4d_eltwise.cpp index 5fb77c679a3451..c3ade845a12ba6 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/4d_eltwise.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/4d_eltwise.cpp @@ -57,18 +57,18 @@ class Eltwise4dBroadcast : public testing::WithParamInterface, pu std::vector outFormShapes1 = {1, 1, 6, 12}; auto pattern1 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, outFormShapes1); - auto reshape1 = std::make_shared(params[0], pattern1, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, outFormShapes1); + auto reshape1 = std::make_shared(params[0], pattern1, false); auto constant1 = ngraph::builder::makeConstant(ngPrc, {1, 1, 1, 12}, {}, true); auto eltwise = ngraph::builder::makeEltwise(reshape1, constant1, eltwiseType); std::vector outFormShapes2 = {1, 72}; auto pattern2 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, outFormShapes2); - auto reshape2 = std::make_shared(eltwise, pattern2, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, outFormShapes2); + auto reshape2 = std::make_shared(eltwise, pattern2, false); - ngraph::ResultVector results{std::make_shared(reshape2)}; + ngraph::ResultVector results{std::make_shared(reshape2)}; function = std::make_shared(results, params, "Eltwise4dBroadcast"); } }; @@ -106,19 +106,19 @@ class Eltwise4dMultipleInput : public testing::WithParamInterface std::make_shared(ngPrc, ov::Shape{1, 72})}; std::vector outFormShapes1 = {1, 1, 6, 12}; auto pattern1 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, outFormShapes1); - auto reshape1 = std::make_shared(params[0], pattern1, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, outFormShapes1); + auto reshape1 = std::make_shared(params[0], pattern1, false); - auto reshape2 = std::make_shared(params[1], pattern1, false); + auto reshape2 = std::make_shared(params[1], pattern1, false); auto eltwise = ngraph::builder::makeEltwise(reshape1, reshape2, eltwiseType); std::vector outFormShapes2 = {1, 72}; auto pattern2 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, outFormShapes2); - auto reshape3 = std::make_shared(eltwise, pattern2, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, outFormShapes2); + auto reshape3 = std::make_shared(eltwise, pattern2, false); - ngraph::ResultVector results{std::make_shared(reshape3)}; + ngraph::ResultVector results{std::make_shared(reshape3)}; function = std::make_shared(results, params, "Eltwise4dMultipleInput"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/act_maxpool_reordering.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/act_maxpool_reordering.cpp index b92438407d7289..2c95884493e754 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/act_maxpool_reordering.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/act_maxpool_reordering.cpp @@ -101,7 +101,7 @@ class ActMaxpoolReordering : public testing::WithParamInterface(maxpool)}; + ngraph::ResultVector results{std::make_shared(maxpool)}; function = std::make_shared(results, inputVector, "ActMaxpoolReordering"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/broadcast_const_with_fq.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/broadcast_const_with_fq.cpp index b28926831e5657..46fb55bf594f90 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/broadcast_const_with_fq.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/broadcast_const_with_fq.cpp @@ -61,8 +61,8 @@ class BroadcastConstWithFq : public testing::WithParamInterface(ngPrc, inputShape2, {}, true); auto fakeQuantize2 = ngraph::builder::makeFakeQuantize(constant, ngPrc, level, {}, {-0.5}, {0.5}, {-0.5}, {0.5}); - auto add = std::make_shared(fakeQuantize1, fakeQuantize2); - ngraph::ResultVector results{std::make_shared(add)}; + auto add = std::make_shared(fakeQuantize1, fakeQuantize2); + ngraph::ResultVector results{std::make_shared(add)}; function = std::make_shared(results, params, "BroadcastConstWithFq"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/concat_memory_param.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/concat_memory_param.cpp index ce3945dc849723..0f182484e33dbb 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/concat_memory_param.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/concat_memory_param.cpp @@ -11,6 +11,7 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" +#include "openvino/opsets/opset9.hpp" #include "ov_models/builders.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_models/utils/ov_helpers.hpp" @@ -68,8 +69,8 @@ class ConcatMemoryTest : public testing::WithParamInterface(ng_prc, ov::Shape{1, in_total_dims_size})}; auto reshape_pattern = - std::make_shared(ov::element::Type_t::i64, ov::Shape{2}, input_shape); - auto reshape = std::make_shared(params[0], reshape_pattern, false); + std::make_shared(ov::element::Type_t::i64, ov::Shape{2}, input_shape); + auto reshape = std::make_shared(params[0], reshape_pattern, false); ov::op::util::VariableInfo vi{}; vi.data_shape = ov::PartialShape(input_shape); @@ -78,7 +79,7 @@ class ConcatMemoryTest : public testing::WithParamInterface(vi); std::vector initial_state = ov::test::utils::generate_float_numbers(in_total_dims_size, -3.f, 3.f); auto initial_state_node = ngraph::builder::makeConstant(ov::element::Type_t::f32, input_shape, initial_state); - auto readValue = std::make_shared(initial_state_node, var); + auto readValue = std::make_shared(initial_state_node, var); const int axis = 1; ov::OutputVector to_concat{readValue, reshape}; @@ -89,14 +90,14 @@ class ConcatMemoryTest : public testing::WithParamInterface(concat, etlwise_node); + auto etlwise_result_node = std::make_shared(concat, etlwise_node); - ov::ResultVector results{std::make_shared(etlwise_result_node)}; + ov::ResultVector results{std::make_shared(etlwise_result_node)}; auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{axis}); auto split_node = std::make_shared(concat, split_axis_op, 2); - auto assign_node = std::make_shared(split_node->output(1), var); + auto assign_node = std::make_shared(split_node->output(1), var); ngraph::SinkVector sinks{assign_node}; function = std::make_shared(results, sinks, params); } diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/concat_restrictions.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/concat_restrictions.cpp index f749287d38ee7c..f261b5a5e9cda2 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/concat_restrictions.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/concat_restrictions.cpp @@ -64,7 +64,7 @@ struct ReLUConcatAxis { concatInputs.push_back(constNode); auto concat = std::make_shared(concatInputs, axis); - ov::ResultVector results{std::make_shared(concat)}; + ov::ResultVector results{std::make_shared(concat)}; return std::make_shared(results, params, getName()); } static const char* getMatch() { @@ -110,7 +110,7 @@ struct MatmulConcatAxis { concatInputs.push_back(matmul2); auto concat = std::make_shared(concatInputs, axis); - ov::ResultVector results{std::make_shared(concat)}; + ov::ResultVector results{std::make_shared(concat)}; return std::make_shared(results, params, getName()); } static const char* getMatch() { @@ -152,7 +152,7 @@ struct ConvNCHWConcatAxis { concatInputs.push_back(constNode); auto concat = std::make_shared(concatInputs, axis); - ov::ResultVector results{std::make_shared(concat)}; + ov::ResultVector results{std::make_shared(concat)}; return std::make_shared(results, params, getName()); } static const char* getMatch() { @@ -171,7 +171,7 @@ struct ConvNHWCConcatAxis { ov::OutputVector concatInputs; ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto transposeInOrder = ov::opset10::Constant::create(ov::element::i64, ov::Shape{4}, {0, 3, 1, 2}); + auto transposeInOrder = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 3, 1, 2}); auto transposeIn = std::make_shared(params[0], transposeInOrder); size_t numOutChannels = 8; size_t kernelSize = 1; @@ -188,7 +188,7 @@ struct ConvNHWCConcatAxis { numOutChannels, true, filterWeights); - auto transposeOutOrder = ov::opset10::Constant::create(ov::element::i64, ov::Shape{4}, {0, 2, 3, 1}); + auto transposeOutOrder = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 2, 3, 1}); auto transposeOut = std::make_shared(conv, transposeOutOrder); concatInputs.push_back(transposeOut); @@ -198,7 +198,7 @@ struct ConvNHWCConcatAxis { concatInputs.push_back(constNode); auto concat = std::make_shared(concatInputs, axis); - ov::ResultVector results{std::make_shared(concat)}; + ov::ResultVector results{std::make_shared(concat)}; return std::make_shared(results, params, getName()); } static const char* getMatch() { @@ -217,7 +217,7 @@ struct ConvConcatNHWCAxis { ov::OutputVector concatInputs; ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto transposeInOrder = ov::opset10::Constant::create(ov::element::i64, ov::Shape{4}, {0, 3, 1, 2}); + auto transposeInOrder = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 3, 1, 2}); auto transposeIn1 = std::make_shared(params[0], transposeInOrder); auto transposeIn2 = std::make_shared(params[0], transposeInOrder); size_t numOutChannels = 8; @@ -253,10 +253,10 @@ struct ConvConcatNHWCAxis { concatInputs.push_back(conv2); auto concat = std::make_shared(concatInputs, axis); - auto transposeOutOrder = ov::opset10::Constant::create(ov::element::i64, ov::Shape{4}, {0, 2, 3, 1}); + auto transposeOutOrder = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 2, 3, 1}); auto transposeOut = std::make_shared(concat, transposeOutOrder); - ov::ResultVector results{std::make_shared(transposeOut)}; + ov::ResultVector results{std::make_shared(transposeOut)}; return std::make_shared(results, params, getName()); } static const char* getMatch() { @@ -275,7 +275,7 @@ struct ConvConcatConcatNHWCAxis { ov::OutputVector concat1Inputs, concat2Inputs; ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto transposeInOrder = ov::opset10::Constant::create(ov::element::i64, ov::Shape{4}, {0, 3, 1, 2}); + auto transposeInOrder = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 3, 1, 2}); auto transposeIn1 = std::make_shared(params[0], transposeInOrder); auto transposeIn2 = std::make_shared(params[0], transposeInOrder); size_t numOutChannels = 64; @@ -307,7 +307,7 @@ struct ConvConcatConcatNHWCAxis { true, filterWeights2); - auto transposeOutOrder = ov::opset10::Constant::create(ov::element::i64, ov::Shape{4}, {0, 2, 3, 1}); + auto transposeOutOrder = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {0, 2, 3, 1}); auto transposeOut1 = std::make_shared(conv1, transposeOutOrder); auto transposeOut2 = std::make_shared(conv2, transposeOutOrder); @@ -316,7 +316,7 @@ struct ConvConcatConcatNHWCAxis { auto concat1 = std::make_shared(concat1Inputs, 2); auto squeeze = std::make_shared( concat1, - ov::opset10::Constant::create(ov::element::i64, ov::Shape{2}, {0, 1})); + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{2}, {0, 1})); size_t totalSize = ov::shape_size(squeeze->get_shape()); auto constValues = ov::test::utils::generate_float_numbers(totalSize, -0.0001f, 0.0001f); @@ -327,12 +327,12 @@ struct ConvConcatConcatNHWCAxis { auto concat2 = std::make_shared(concat2Inputs, axis); auto reshape = std::make_shared( concat2, - ov::opset10::Constant::create(ov::element::i64, - ov::Shape{2}, - ov::Shape{1, shape_size(concat2->get_shape())}), + ov::op::v0::Constant::create(ov::element::i64, + ov::Shape{2}, + ov::Shape{1, shape_size(concat2->get_shape())}), false); - ov::ResultVector results{std::make_shared(reshape)}; + ov::ResultVector results{std::make_shared(reshape)}; return std::make_shared(results, params, getName()); } static const char* getMatch() { diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/conv_with_padding.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/conv_with_padding.cpp index 999dd3574126f7..fc77b4ab6f0515 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/conv_with_padding.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/conv_with_padding.cpp @@ -70,16 +70,16 @@ class ConvWithPadding : public testing::WithParamInterface(ng_precision, ngraph::Shape{input_shape}); + auto input = std::make_shared(ng_precision, ngraph::Shape{input_shape}); auto filter = ngraph::builder::makeConstant(ng_precision, filter_shape, {1.f}); - auto conv = std::make_shared(input, - filter, - ov::Strides{1, 1}, - padding_size, - padding_size, - ov::Strides{}); - - auto res = std::make_shared(conv); + auto conv = std::make_shared(input, + filter, + ov::Strides{1, 1}, + padding_size, + padding_size, + ov::Strides{}); + + auto res = std::make_shared(conv); function = std::make_shared(ngraph::ResultVector{res}, ngraph::ParameterVector{input}); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/convert_dwsc_to_scaleshifts.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/convert_dwsc_to_scaleshifts.cpp index 33446d1577f417..fb7843e04706d3 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/convert_dwsc_to_scaleshifts.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/convert_dwsc_to_scaleshifts.cpp @@ -17,7 +17,7 @@ #include "transformations/init_node_info.hpp" using namespace ngraph; -using namespace ngraph::opset7; +using namespace ov::opset1; namespace LayerTestsDefinitions { @@ -99,7 +99,7 @@ class DWSCToScaleShiftsTest : public testing::WithParamInterface(ngPrc, ov::Shape(inputShape))}; - auto transposeInOrder = op::Constant::create(element::i64, Shape{4}, {0, 3, 1, 2}); + auto transposeInOrder = Constant::create(element::i64, Shape{4}, {0, 3, 1, 2}); auto transposeIn = std::make_shared(input[0], transposeInOrder); auto filterSize = std::accumulate(std::begin(filter), std::end(filter), 1ull, std::multiplies()); auto filterWeights = @@ -118,7 +118,7 @@ class DWSCToScaleShiftsTest : public testing::WithParamInterface(dwsc, transposeOutOrder); if (model == modelType::TranspDWSCBiasTransp) { diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_fullyconnected.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_fullyconnected.cpp index 65db77afeaa207..08c81e80862766 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_fullyconnected.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_fullyconnected.cpp @@ -67,9 +67,9 @@ class ConvertMatmulToFcPass : public testing::WithParamInterface(ngPrc, inputShape[0], weights); auto const_eltwise = ngraph::builder::makeConstant(ngPrc, {inputShape[0][0], inputShape[1][1]}, {1.0f}); - auto matmul = std::make_shared(const_mult2, params[0], false, false); + auto matmul = std::make_shared(const_mult2, params[0], false, false); - auto eltwise = std::make_shared(matmul, const_eltwise); + auto eltwise = std::make_shared(matmul, const_eltwise); function = std::make_shared(eltwise, params, "ConvertMatmulToFC"); } }; @@ -116,26 +116,26 @@ class ConvertMatmulToFcWithTransposesPass : public testing::WithParamInterface(ngPrc, ov::Shape({1, inputShape[1][0] * inputShape[1][1]}))}; - auto reshape1 = std::make_shared( + auto reshape1 = std::make_shared( params[0], ngraph::builder::makeConstant(ngraph::element::i64, {inputShape[1].size()}, inputShape[1]), false); - auto transpose1 = std::make_shared( + auto transpose1 = std::make_shared( reshape1, - ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0})); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0})); std::vector weights = ov::test::utils::generate_float_numbers(inputShape[0][0] * inputShape[0][1], -0.1f, 0.1f); auto const_mult2 = ngraph::builder::makeConstant(ngPrc, inputShape[0], weights); - auto matmul = std::make_shared(const_mult2, transpose1, false, false); - auto relu = std::make_shared(matmul); + auto matmul = std::make_shared(const_mult2, transpose1, false, false); + auto relu = std::make_shared(matmul); - auto transpose2 = std::make_shared( + auto transpose2 = std::make_shared( relu, - ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0})); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0})); auto transpose_output_shape = transpose2->get_output_shape(0); ngraph::Shape output_shape = {1, transpose_output_shape[0] * transpose_output_shape[1]}; - auto reshape2 = std::make_shared( + auto reshape2 = std::make_shared( transpose2, ngraph::builder::makeConstant(ngraph::element::i64, {output_shape.size()}, output_shape), false); diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_pointwise_conv.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_pointwise_conv.cpp index 85dc52b42f1fe2..702bfbb628c2ec 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_pointwise_conv.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/convert_matmul_to_pointwise_conv.cpp @@ -13,6 +13,7 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" +#include "openvino/opsets/opset7.hpp" #include "ov_models/builders.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_models/utils/ov_helpers.hpp" @@ -79,19 +80,19 @@ class ConvertMatmulToPointwiseConv : public testing::WithParamInterface weights = ov::test::utils::generate_float_numbers(elemNum * elemNum, -0.1f, 0.1f); - auto weightsNode = std::make_shared(ngPrc, ngraph::Shape{elemNum, elemNum}, weights); + auto weightsNode = std::make_shared(ngPrc, ngraph::Shape{elemNum, elemNum}, weights); auto matmul = std::make_shared(params[0], weightsNode, false, true); auto bias = ngraph::builder::makeConstant(ngPrc, std::vector{1, batch, 1}, std::vector{1.0f}); auto add = ngraph::builder::makeEltwise(matmul, bias, ngraph::helpers::EltwiseTypes::ADD); - auto pattern = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{inputShape.size()}, - inputShape); - auto reshape = std::make_shared(matmul, pattern, false); - auto relu = std::make_shared(reshape); + auto pattern = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{inputShape.size()}, + inputShape); + auto reshape = std::make_shared(matmul, pattern, false); + auto relu = std::make_shared(reshape); - ngraph::ResultVector results{std::make_shared(relu)}; + ngraph::ResultVector results{std::make_shared(relu)}; function = std::make_shared(results, params, "ConvertMatmulToPointwiseConv"); } }; @@ -145,29 +146,29 @@ class ConvertMatmulToPointwiseConvWithFqNeg ngraph::builder::makeConstant(ngPrc, std::vector{1}, std::vector{inputDataMin}); auto inputHighNode = ngraph::builder::makeConstant(ngPrc, std::vector{1}, std::vector{inputDataMax}); - auto inputFQ = std::make_shared(params[0], - inputLowNode, - inputHighNode, - inputLowNode, - inputHighNode, - UINT16_MAX); + auto inputFQ = std::make_shared(params[0], + inputLowNode, + inputHighNode, + inputLowNode, + inputHighNode, + UINT16_MAX); size_t elemNum = inputShape[inputShape.size() - 1]; const float weightsMin = -0.2f; const float weightsMax = 0.2f; std::vector weights = ov::test::utils::generate_float_numbers(elemNum * elemNum, weightsMin, weightsMax); - auto weightsNode = std::make_shared(ngPrc, ngraph::Shape{elemNum, elemNum}, weights); + auto weightsNode = std::make_shared(ngPrc, ngraph::Shape{elemNum, elemNum}, weights); auto weightsLowNode = ngraph::builder::makeConstant(ngPrc, std::vector{1}, std::vector{weightsMin}); auto weightsHighNode = ngraph::builder::makeConstant(ngPrc, std::vector{1}, std::vector{weightsMax}); - auto weightsFQNode = std::make_shared(weightsNode, - weightsLowNode, - weightsHighNode, - weightsLowNode, - weightsHighNode, - UINT16_MAX); + auto weightsFQNode = std::make_shared(weightsNode, + weightsLowNode, + weightsHighNode, + weightsLowNode, + weightsHighNode, + UINT16_MAX); auto matmul = std::make_shared(inputFQ, weightsFQNode, false, true); auto bias = ngraph::builder::makeConstant(ngPrc, std::vector{1, 1, 1}, std::vector{1.0f}); @@ -179,21 +180,21 @@ class ConvertMatmulToPointwiseConvWithFqNeg auto outputHighNode = ngraph::builder::makeConstant(ngPrc, std::vector{1}, std::vector{inputDataMax * weightsMax * elemNum}); - auto outputFQ = std::make_shared(add, - outputLowNode, - outputHighNode, - outputLowNode, - outputHighNode, - UINT16_MAX); + auto outputFQ = std::make_shared(add, + outputLowNode, + outputHighNode, + outputLowNode, + outputHighNode, + UINT16_MAX); - auto pattern = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{inputShape.size()}, - inputShape); - auto reshape = std::make_shared(outputFQ, pattern, false); + auto pattern = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{inputShape.size()}, + inputShape); + auto reshape = std::make_shared(outputFQ, pattern, false); - auto relu = std::make_shared(reshape); + auto relu = std::make_shared(reshape); - ngraph::ResultVector results{std::make_shared(relu)}; + ngraph::ResultVector results{std::make_shared(relu)}; function = std::make_shared(results, params, "ConvertMatmulToPointwiseConv"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/convert_padded_to_valid_conv.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/convert_padded_to_valid_conv.cpp index b862a60f2d7a91..905ac67d98ab34 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/convert_padded_to_valid_conv.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/convert_padded_to_valid_conv.cpp @@ -13,6 +13,7 @@ #include "../shared_tests_instances/skip_tests_check.hpp" #include "common_test_utils/test_common.hpp" +#include "ngraph/opsets/opset7.hpp" #include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "transformations/init_node_info.hpp" diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/convolution_align_filter.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/convolution_align_filter.cpp index 65a23654f87909..0e23c8954d4240 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/convolution_align_filter.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/convolution_align_filter.cpp @@ -11,6 +11,7 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" +#include "openvino/opsets/opset8.hpp" #include "ov_models/builders.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_models/utils/ov_helpers.hpp" @@ -68,22 +69,22 @@ class ConvolutionAlignFilterTest : public testing::WithParamInterface()); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape{1, in_total_dims_size})}; auto pattern1 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, splitInputShape); - auto reshape1 = std::make_shared(params[0], pattern1, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, splitInputShape); + auto reshape1 = std::make_shared(params[0], pattern1, false); OPENVINO_SUPPRESS_DEPRECATED_START auto split = ngraph::builder::makeSplit(reshape1, ngPrc, 2, 0); OPENVINO_SUPPRESS_DEPRECATED_END - auto relu1 = std::make_shared(split->output(0)); - auto relu2 = std::make_shared(split->output(1)); + auto relu1 = std::make_shared(split->output(0)); + auto relu2 = std::make_shared(split->output(1)); - auto concat = std::make_shared(ngraph::OutputVector{relu1, relu2}, 0); - auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{2}, - ngraph::Shape{1, in_total_dims_size}); - auto reshape2 = std::make_shared(concat, pattern2, false); + auto concat = std::make_shared(ngraph::OutputVector{relu1, relu2}, 0); + auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{2}, + ngraph::Shape{1, in_total_dims_size}); + auto reshape2 = std::make_shared(concat, pattern2, false); - ngraph::ResultVector results{std::make_shared(reshape2)}; + ngraph::ResultVector results{std::make_shared(reshape2)}; function = std::make_shared(results, params, "ConvAlignFilter"); functionRefs = ngraph::clone_function(*function); } diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/convolution_crop_axis_h.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/convolution_crop_axis_h.cpp index e415b69e7588f9..936b7a9af9f35e 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/convolution_crop_axis_h.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/convolution_crop_axis_h.cpp @@ -55,7 +55,7 @@ class CropAfterConvolutionTest : public testing::WithParamInterface, auto reshape_pattern_size = ngraph::Shape{inputShape.size()}; auto reshape_pattern = ngraph::builder::makeConstant(ov::element::i64, reshape_pattern_size, inputShape); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto input_reshape = std::make_shared(params[0], reshape_pattern, false); + auto input_reshape = std::make_shared(params[0], reshape_pattern, false); const std::vector filterSize{1, 1}; const std::vector strides{1, 1}; @@ -102,7 +102,7 @@ class CropAfterConvolutionTest : public testing::WithParamInterface, numOutChannels, false, weights2_values); - ngraph::ResultVector results{std::make_shared(convolution_node2)}; + ngraph::ResultVector results{std::make_shared(convolution_node2)}; function = std::make_shared(results, params, "CropAfterConvolutionTest"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/decompose_2d_conv.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/decompose_2d_conv.cpp index a61d23b14a0a2d..bff69b08bcc3f9 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/decompose_2d_conv.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/decompose_2d_conv.cpp @@ -18,7 +18,7 @@ #include "transformations/init_node_info.hpp" using namespace ngraph; -using namespace opset1; +using namespace ov::opset1; namespace LayerTestsDefinitions { @@ -130,7 +130,7 @@ class Decompose2DConvTest : public testing::WithParamInterface(ngPrc, ov::Shape(inputShape))}; - auto transposeInOrder = opset7::Constant::create(element::i64, Shape{4}, {0, 3, 1, 2}); + auto transposeInOrder = ov::op::v0::Constant::create(element::i64, Shape{4}, {0, 3, 1, 2}); auto transposeIn = std::make_shared(input[0], transposeInOrder); auto filterSize = std::accumulate(std::begin(kernel), std::end(kernel), 1ull, std::multiplies()); auto filterWeights = @@ -146,7 +146,7 @@ class Decompose2DConvTest : public testing::WithParamInterface biasConst = std::make_shared(ngPrc, biasShape, biasWeights); Output lastOp = std::make_shared(conv, transposeOutOrder); diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/decompose_mvn.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/decompose_mvn.cpp index 07c99e7a23df6e..7ad1d05ea4cad3 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/decompose_mvn.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/decompose_mvn.cpp @@ -12,6 +12,8 @@ #include #include "common_test_utils/test_common.hpp" +#include "ngraph/opsets/opset2.hpp" +#include "ngraph/opsets/opset8.hpp" #include "ov_models/builders.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" #include "transformations/init_node_info.hpp" diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/diagonal_insertion_test.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/diagonal_insertion_test.cpp index c92aad2e8a11e3..cc2b903ceb2a8c 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/diagonal_insertion_test.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/diagonal_insertion_test.cpp @@ -24,7 +24,6 @@ using namespace ngraph; using namespace ngraph::builder; using namespace ngraph::element; using namespace ngraph::op; -using namespace ngraph::opset9; using namespace std; using DiagonalInsertionTestParams = tuple, // Configuration @@ -68,25 +67,26 @@ class DiagonalInsertionTest : public testing::WithParamInterface(type, ov::Shape(shapes))}; } - shared_ptr CreateFQNode(const Type& type, - const shared_ptr& node, - float fq_min, - float fq_max, - std::size_t levels) { + shared_ptr CreateFQNode(const Type& type, + const shared_ptr& node, + float fq_min, + float fq_max, + std::size_t levels) { // auto fq_inp_min = makeConstant(type, {1}, {fq_min}); auto fq_inp_max = makeConstant(type, {1}, {fq_max}); auto fq_out_min = makeConstant(type, {1}, {fq_min}); auto fq_out_max = makeConstant(type, {1}, {fq_max}); - return make_shared(node, fq_inp_min, fq_inp_max, fq_out_min, fq_out_max, levels); + return make_shared(node, fq_inp_min, fq_inp_max, fq_out_min, fq_out_max, levels); } - std::shared_ptr CreateReshapeNode(element::Type in_type, - shared_ptr input_node, - std::vector target_shape_vect) { + std::shared_ptr CreateReshapeNode(element::Type in_type, + shared_ptr input_node, + std::vector target_shape_vect) { // - const auto target_shape_const = Constant::create(in_type, Shape{target_shape_vect.size()}, target_shape_vect); - return std::make_shared(input_node, target_shape_const, false); + const auto target_shape_const = + ov::op::v0::Constant::create(in_type, Shape{target_shape_vect.size()}, target_shape_vect); + return std::make_shared(input_node, target_shape_const, false); } bool IsDebugEnabled(map& configuration) { @@ -143,10 +143,10 @@ class DiagonalInsertionTest : public testing::WithParamInterface(precision, {height}, {}, true); auto add_const_fq = CreateFQNode(precision, add_const, fq_min_max[3][0], fq_min_max[3][1], fq_levels); - auto add = make_shared(add_const_fq, add_mm_reshape); + auto add = make_shared(add_const_fq, add_mm_reshape); auto add_fq = CreateFQNode(precision, add, fq_min_max[4][0], fq_min_max[4][1], fq_levels); - auto relu = make_shared(add_fq); + auto relu = make_shared(add_fq); function = make_shared(relu, input_vect, "DiagonalInsertion"); } diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/fq_activation.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/fq_activation.cpp index b672884319a7fa..5527be1a36dc68 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/fq_activation.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/fq_activation.cpp @@ -77,23 +77,23 @@ class FQActivation : public testing::WithParamInterface, pub auto inputHighNode = ngraph::builder::makeConstant(ngPrc, {1}, {inputMinMax.second}); ov::ParameterVector inputVector{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto inputFQNode = std::make_shared(inputVector[0], - inputLowNode, - inputHighNode, - inputLowNode, - inputHighNode, - levels.first); + auto inputFQNode = std::make_shared(inputVector[0], + inputLowNode, + inputHighNode, + inputLowNode, + inputHighNode, + levels.first); auto relu = ngraph::builder::makeActivation(inputFQNode, ngraph::element::f32, ngraph::helpers::ActivationTypes::Relu); - auto reluFQNode = std::make_shared(relu, - inputLowNode, - inputHighNode, - inputLowNode, - inputHighNode, - levels.second); - - ngraph::ResultVector results{std::make_shared(reluFQNode)}; + auto reluFQNode = std::make_shared(relu, + inputLowNode, + inputHighNode, + inputLowNode, + inputHighNode, + levels.second); + + ngraph::ResultVector results{std::make_shared(reluFQNode)}; function = std::make_shared(results, inputVector, "FQActivation"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/fq_fusion_with_multiple_weights.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/fq_fusion_with_multiple_weights.cpp index 168e411ddfe9f1..63be0f64a7316a 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/fq_fusion_with_multiple_weights.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/fq_fusion_with_multiple_weights.cpp @@ -11,6 +11,7 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" +#include "openvino/opsets/opset7.hpp" #include "ov_models/builders.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_models/utils/ov_helpers.hpp" @@ -75,31 +76,31 @@ class FQFusionWithMultipleWeights : public testing::WithParamInterface(ngPrc, {1}, {weightsMinMax.first * 2}); auto weightsHighNode = ngraph::builder::makeConstant(ngPrc, {1}, {weightsMinMax.second * 2}); - auto weightsFQ = std::make_shared(weights, - weightsLowNode, - weightsHighNode, - weightsLowNode, - weightsHighNode, - levels); - - auto conv1 = std::make_shared(params[0], - weightsFQ, - std::vector{1, 1}, - std::vector{0, 0}, - std::vector{0, 0}, - std::vector{1, 1}, - ngraph::op::PadType::VALID); - auto add1 = std::make_shared( + auto weightsFQ = std::make_shared(weights, + weightsLowNode, + weightsHighNode, + weightsLowNode, + weightsHighNode, + levels); + + auto conv1 = std::make_shared(params[0], + weightsFQ, + std::vector{1, 1}, + std::vector{0, 0}, + std::vector{0, 0}, + std::vector{1, 1}, + ngraph::op::PadType::VALID); + auto add1 = std::make_shared( conv1, ngraph::builder::makeConstant(ngPrc, {}, std::vector{0.0f})); - auto conv2 = std::make_shared(params[1], - weightsFQ, - std::vector{1, 1}, - std::vector{0, 0}, - std::vector{0, 0}, - std::vector{1, 1}, - ngraph::op::PadType::VALID); - auto add2 = std::make_shared( + auto conv2 = std::make_shared(params[1], + weightsFQ, + std::vector{1, 1}, + std::vector{0, 0}, + std::vector{0, 0}, + std::vector{1, 1}, + ngraph::op::PadType::VALID); + auto add2 = std::make_shared( conv2, ngraph::builder::makeConstant(ngPrc, {}, std::vector{0.0f})); @@ -107,22 +108,14 @@ class FQFusionWithMultipleWeights : public testing::WithParamInterface(ngPrc, {1}, {-weightsMinMax.second * kernelSize * 10.0f}); auto outHighNode = ngraph::builder::makeConstant(ngPrc, {1}, {weightsMinMax.second * kernelSize * 10.0f}); - auto fq1 = std::make_shared(add1, - outLowNode, - outHighNode, - outLowNode, - outHighNode, - levels); - auto fq2 = std::make_shared(add2, - outLowNode, - outHighNode, - outLowNode, - outHighNode, - levels); - - auto add3 = std::make_shared(fq1, fq2); - - ngraph::ResultVector results{std::make_shared(add3)}; + auto fq1 = + std::make_shared(add1, outLowNode, outHighNode, outLowNode, outHighNode, levels); + auto fq2 = + std::make_shared(add2, outLowNode, outHighNode, outLowNode, outHighNode, levels); + + auto add3 = std::make_shared(fq1, fq2); + + ngraph::ResultVector results{std::make_shared(add3)}; function = std::make_shared(results, params, "FQFusionWithMultipleWeights"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/fq_fusion_with_sigmoid.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/fq_fusion_with_sigmoid.cpp index 0222ac0bb7d487..3b3824c19bc9b4 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/fq_fusion_with_sigmoid.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/fq_fusion_with_sigmoid.cpp @@ -35,7 +35,7 @@ class FqFusionWithSigmoidTest : public LayerTestsUtils::LayerTestsCommon, ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape{1, inputSize})}; auto constant = ngraph::builder::makeConstant(ngPrc, {1, inputSize}, std::vector{1}); auto mul1 = ngraph::builder::makeEltwise(input[0], constant, ngraph::helpers::EltwiseTypes::ADD); - auto sigmoid1 = std::make_shared(mul1); + auto sigmoid1 = std::make_shared(mul1); auto mul2 = ngraph::builder::makeEltwise(input[0], sigmoid1, ngraph::helpers::EltwiseTypes::MULTIPLY); auto fake3 = ngraph::builder::makeFakeQuantize(sigmoid1, ngPrc, @@ -46,7 +46,7 @@ class FqFusionWithSigmoidTest : public LayerTestsUtils::LayerTestsCommon, {minMaxFq.first}, {minMaxFq.second}); auto mul3 = ngraph::builder::makeEltwise(mul2, fake3, ngraph::helpers::EltwiseTypes::ADD); - auto result = std::make_shared(mul3); + auto result = std::make_shared(mul3); function = std::make_shared(ngraph::ResultVector{result}, input, "fq_fusion_with_sigmoid"); } diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/fq_maxpool_reordering.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/fq_maxpool_reordering.cpp index f9a8204cedc00d..1af3289e17351a 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/fq_maxpool_reordering.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/fq_maxpool_reordering.cpp @@ -94,12 +94,12 @@ class FQMaxpoolReordering : public testing::WithParamInterface(ngPrc, ov::Shape(inputShape))}; - auto inputFQ = std::make_shared(inputVector[0], - inputLowNode1, - inputHighNode1, - inputLowNode1, - inputHighNode1, - levels); + auto inputFQ = std::make_shared(inputVector[0], + inputLowNode1, + inputHighNode1, + inputLowNode1, + inputHighNode1, + levels); auto filterWeightsNode = ngraph::builder::makeConstant(ngPrc, {8, inputShape[1], 1, 8}, {1.0f}); auto convLowNode = ngraph::builder::makeConstant(ngraph::element::f32, @@ -108,30 +108,30 @@ class FQMaxpoolReordering : public testing::WithParamInterface{1}, std::vector{inputDataMax1 * 35}); - auto convWeightsFQNode = std::make_shared(filterWeightsNode, - convLowNode, - convHighNode, - convLowNode, - convHighNode, - levels); - auto convWeightsFQ = std::dynamic_pointer_cast(convWeightsFQNode); - - auto conv = std::make_shared(inputFQ, - convWeightsFQ, - std::vector{1, 1}, - std::vector{0, 0}, - std::vector{0, 0}, - std::vector{1, 1}, - ngraph::op::PadType::VALID); + auto convWeightsFQNode = std::make_shared(filterWeightsNode, + convLowNode, + convHighNode, + convLowNode, + convHighNode, + levels); + auto convWeightsFQ = std::dynamic_pointer_cast(convWeightsFQNode); + + auto conv = std::make_shared(inputFQ, + convWeightsFQ, + std::vector{1, 1}, + std::vector{0, 0}, + std::vector{0, 0}, + std::vector{1, 1}, + ngraph::op::PadType::VALID); auto biasesWeightsNode = ngraph::builder::makeConstant(ngPrc, {}, std::vector{0.0f}); - auto add = std::make_shared(conv, biasesWeightsNode); + auto add = std::make_shared(conv, biasesWeightsNode); - auto convFQNode = std::make_shared(add, - inputLowNode2, - inputHighNode2, - inputLowNode2, - inputHighNode2, - levels); + auto convFQNode = std::make_shared(add, + inputLowNode2, + inputHighNode2, + inputLowNode2, + inputHighNode2, + levels); std::shared_ptr node_before_pooling = convFQNode; if (reshape) { @@ -139,9 +139,9 @@ class FQMaxpoolReordering : public testing::WithParamInterface()); auto reshapeConst1 = ngraph::builder::makeConstant(ngraph::element::i64, std::vector{2}, ngraph::Shape{1, total}); - auto reshapeNode1 = std::make_shared(convFQNode, reshapeConst1, false); + auto reshapeNode1 = std::make_shared(convFQNode, reshapeConst1, false); auto reshapeConst2 = ngraph::builder::makeConstant(ngraph::element::i64, std::vector{4}, shape); - auto reshapeNode2 = std::make_shared(reshapeNode1, reshapeConst2, false); + auto reshapeNode2 = std::make_shared(reshapeNode1, reshapeConst2, false); node_before_pooling = reshapeNode2; } @@ -157,7 +157,7 @@ class FQMaxpoolReordering : public testing::WithParamInterface(maxpool)}; + ngraph::ResultVector results{std::make_shared(maxpool)}; function = std::make_shared(results, inputVector, "FQMaxPoolReorder"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/fq_outputs_activation_.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/fq_outputs_activation_.cpp index d0fc0e250a4e8f..ea646f5e3dea00 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/fq_outputs_activation_.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/fq_outputs_activation_.cpp @@ -89,13 +89,13 @@ class FQOutputsActivation : public testing::WithParamInterfaceoutput(i), ngraph::element::f32, ngraph::helpers::ActivationTypes::Sigmoid); - auto reluFQNode = std::make_shared(relu, - inputLowNode, - inputHighNode, - inputLowNode, - inputHighNode, - levels); - results.push_back(std::make_shared(reluFQNode)); + auto reluFQNode = std::make_shared(relu, + inputLowNode, + inputHighNode, + inputLowNode, + inputHighNode, + levels); + results.push_back(std::make_shared(reluFQNode)); } function = std::make_shared(results, inputVector, "FQOutputsActivation"); } diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/fq_with_multiple_out_connections.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/fq_with_multiple_out_connections.cpp index 0704d75983a55d..752b78acffd97c 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/fq_with_multiple_out_connections.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/fq_with_multiple_out_connections.cpp @@ -11,6 +11,7 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" +#include "openvino/opsets/opset8.hpp" #include "ov_models/builders.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_models/utils/ov_helpers.hpp" @@ -53,32 +54,31 @@ class FQWithMultipleOutConnections : public testing::WithParamInterface(ngPrc, ov::Shape(shape))}; - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{3}, - ngraph::Shape{1, 2, 64}); - auto reshape1 = std::make_shared(params[0], pattern1, false); + auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{3}, + ngraph::Shape{1, 2, 64}); + auto reshape1 = std::make_shared(params[0], pattern1, false); - auto relu1 = std::make_shared(reshape1); + auto relu1 = std::make_shared(reshape1); auto lowNode = ngraph::builder::makeConstant(ngPrc, {1}, {-10.0f}); auto highNode = ngraph::builder::makeConstant(ngPrc, {1}, {10.0f}); - auto fq = std::make_shared(relu1, - lowNode, - highNode, - lowNode, - highNode, - std::numeric_limits::max()); - - auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{shape.size()}, - shape); - auto reshape2 = std::make_shared(fq, pattern2, false); - - auto relu2 = std::make_shared(fq); - auto reshape3 = std::make_shared(relu2, pattern2, false); - - ngraph::ResultVector results{std::make_shared(reshape2), - std::make_shared(reshape3)}; + auto fq = std::make_shared(relu1, + lowNode, + highNode, + lowNode, + highNode, + std::numeric_limits::max()); + + auto pattern2 = + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shape.size()}, shape); + auto reshape2 = std::make_shared(fq, pattern2, false); + + auto relu2 = std::make_shared(fq); + auto reshape3 = std::make_shared(relu2, pattern2, false); + + ngraph::ResultVector results{std::make_shared(reshape2), + std::make_shared(reshape3)}; function = std::make_shared(results, params, "FQFusionWithMultipleWeights"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/insert_copy_layer_before_self_concat.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/insert_copy_layer_before_self_concat.cpp index d347c226910fb1..b7162a36c35b64 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/insert_copy_layer_before_self_concat.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/insert_copy_layer_before_self_concat.cpp @@ -90,9 +90,9 @@ class InsertCopyBeforeSelfConcatTest : public testing::WithParamInterface(concatInputs, axis); - auto relu = std::make_shared(concat); - results.push_back(std::make_shared(relu)); + auto concat = std::make_shared(concatInputs, axis); + auto relu = std::make_shared(concat); + results.push_back(std::make_shared(relu)); } function = std::make_shared(results, params, "InsertCopyBeforeSelfConcat"); } diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/insert_transpose_before_matmul.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/insert_transpose_before_matmul.cpp index 9acaef8ca98a3a..482161a72884cb 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/insert_transpose_before_matmul.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/insert_transpose_before_matmul.cpp @@ -72,24 +72,22 @@ class InsertTransposeBeforeMatmul : public testing::WithParamInterface(ngraph::element::Type_t::i64, ngraph::Shape{2}, matmul_in_shape); - auto reshape = std::make_shared(params[0], pattern, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, matmul_in_shape); + auto reshape = std::make_shared(params[0], pattern, false); std::shared_ptr weights_node; if (firstInConst) { std::vector weights = ov::test::utils::generate_float_numbers(matmul_in_shape[0], -0.2f, 0.2f); - weights_node = - std::make_shared(ngPrc, ngraph::Shape{1, matmul_in_shape[0]}, weights); + weights_node = std::make_shared(ngPrc, ngraph::Shape{1, matmul_in_shape[0]}, weights); } else { std::vector weights = ov::test::utils::generate_float_numbers(matmul_in_shape[1], -0.2f, 0.2f); - weights_node = - std::make_shared(ngPrc, ngraph::Shape{matmul_in_shape[1], 1}, weights); + weights_node = std::make_shared(ngPrc, ngraph::Shape{matmul_in_shape[1], 1}, weights); } auto matmul = firstInConst ? std::make_shared(weights_node, reshape, false, false) : std::make_shared(reshape, weights_node, false, false); - ngraph::ResultVector results{std::make_shared(matmul)}; + ngraph::ResultVector results{std::make_shared(matmul)}; function = std::make_shared(results, params, "InsertTransposeBeforeMatmul"); } }; @@ -164,24 +162,23 @@ class InsertTransposeBeforeConcatConcat : public testing::WithParamInterface(ngPrc, ov::Shape{1, inputShape})}; auto matmul_in_shape = ngraph::Shape{inputShape / 8, 8}; auto pattern = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, matmul_in_shape); - auto reshape = std::make_shared(params[0], pattern, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, matmul_in_shape); + auto reshape = std::make_shared(params[0], pattern, false); std::vector data = ov::test::utils::generate_float_numbers(ngraph::shape_size(matmul_in_shape), -0.2f, 0.2f); - auto concat_const = std::make_shared(ngPrc, matmul_in_shape, data); + auto concat_const = std::make_shared(ngPrc, matmul_in_shape, data); ngraph::OutputVector concat_chunks{reshape, concat_const}; - auto concat = std::make_shared(concat_chunks, 0); + auto concat = std::make_shared(concat_chunks, 0); std::shared_ptr weights_node; std::vector weights = ov::test::utils::generate_float_numbers(matmul_in_shape[0] * 2, -0.2f, 0.2f); - weights_node = - std::make_shared(ngPrc, ngraph::Shape{1, matmul_in_shape[0] * 2}, weights); + weights_node = std::make_shared(ngPrc, ngraph::Shape{1, matmul_in_shape[0] * 2}, weights); auto matmul = firstInConst ? std::make_shared(weights_node, concat, false, false) : std::make_shared(concat, weights_node, false, false); - ngraph::ResultVector results{std::make_shared(matmul)}; + ngraph::ResultVector results{std::make_shared(matmul)}; function = std::make_shared(results, params, "InsertTransposeBeforeConcatConcat"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/insert_transpose_between_convs.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/insert_transpose_between_convs.cpp index 9e8ec3a61b7f94..7f79573fb087f6 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/insert_transpose_between_convs.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/insert_transpose_between_convs.cpp @@ -68,8 +68,8 @@ class InsertTransposeBetweenConvs : public testing::WithParamInterface(ngPrc, ov::Shape(inputShape_2d))}; auto pattern1 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, inputShape); - auto reshape1 = std::make_shared(params[0], pattern1, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, inputShape); + auto reshape1 = std::make_shared(params[0], pattern1, false); size_t num_out_channels = 8; size_t kernal_size = 8; @@ -92,8 +92,8 @@ class InsertTransposeBetweenConvs : public testing::WithParamInterface(ngraph::element::Type_t::i64, ngraph::Shape{4}, pattern2_shape); - auto reshape2 = std::make_shared(conv1, pattern2, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, pattern2_shape); + auto reshape2 = std::make_shared(conv1, pattern2, false); std::vector filter_weights_2 = ov::test::utils::generate_float_numbers(num_out_channels * kernal_size, -0.2f, 0.2f); @@ -113,10 +113,10 @@ class InsertTransposeBetweenConvs : public testing::WithParamInterface(ngraph::element::Type_t::i64, ngraph::Shape{2}, pattern3_shape); - auto reshape3 = std::make_shared(conv2, pattern3, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, pattern3_shape); + auto reshape3 = std::make_shared(conv2, pattern3, false); - ngraph::ResultVector results{std::make_shared(reshape3)}; + ngraph::ResultVector results{std::make_shared(reshape3)}; function = std::make_shared(results, params, "InsertTransposeBetweenConvs"); } }; @@ -164,8 +164,8 @@ class InsertTransposeBetweenConvsWithPool : public testing::WithParamInterface(ngPrc, ov::Shape(inputShape_2d))}; auto pattern1 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, inputShape); - auto reshape1 = std::make_shared(params[0], pattern1, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, inputShape); + auto reshape1 = std::make_shared(params[0], pattern1, false); size_t num_out_channels = 8; size_t kernal_size = 8; @@ -200,8 +200,8 @@ class InsertTransposeBetweenConvsWithPool : public testing::WithParamInterface(ngraph::element::Type_t::i64, ngraph::Shape{4}, pattern2_shape); - auto reshape2 = std::make_shared(pool, pattern2, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{4}, pattern2_shape); + auto reshape2 = std::make_shared(pool, pattern2, false); std::vector filter_weights_2 = ov::test::utils::generate_float_numbers(num_out_channels * kernal_size, -0.2f, 0.2f); @@ -221,10 +221,10 @@ class InsertTransposeBetweenConvsWithPool : public testing::WithParamInterface(ngraph::element::Type_t::i64, ngraph::Shape{2}, pattern3_shape); - auto reshape3 = std::make_shared(conv2, pattern3, false); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, pattern3_shape); + auto reshape3 = std::make_shared(conv2, pattern3, false); - ngraph::ResultVector results{std::make_shared(reshape3)}; + ngraph::ResultVector results{std::make_shared(reshape3)}; function = std::make_shared(results, params, "InsertTransposeBetweenConvs"); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/layers_restrictions.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/layers_restrictions.cpp index 48a9724f25600c..47216d5bcc2005 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/layers_restrictions.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/layers_restrictions.cpp @@ -31,7 +31,7 @@ struct FullyConnectedBatchSizeMoreThan8 { ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; auto weights = ov::test::utils::generate_float_numbers(inputShape[1] * inputShape[1], -0.0001f, 0.0001f); auto fullyConnected = ngraph::builder::makeFullyConnected(params[0], ngPrc, inputShape[1], false, {}, weights); - ngraph::ResultVector results{std::make_shared(fullyConnected)}; + ngraph::ResultVector results{std::make_shared(fullyConnected)}; return std::make_shared(results, params, getName()); } static const char* getMatch() { @@ -49,7 +49,7 @@ struct FullyConnectedBatchSizeLessThanOrEqual8 { ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; auto weights = ov::test::utils::generate_float_numbers(inputShape[1] * inputShape[1], -0.0001f, 0.0001f); auto fullyConnected = ngraph::builder::makeFullyConnected(params[0], ngPrc, inputShape[1], false, {}, weights); - ngraph::ResultVector results{std::make_shared(fullyConnected)}; + ngraph::ResultVector results{std::make_shared(fullyConnected)}; return std::make_shared(results, params, getName()); } }; diff --git a/src/plugins/intel_gna/tests/functional/pass_tests/remove_permutations_NHWC_to_NCHW_pass.cpp b/src/plugins/intel_gna/tests/functional/pass_tests/remove_permutations_NHWC_to_NCHW_pass.cpp index 4cad39bd6c9f70..0ada71e606928b 100644 --- a/src/plugins/intel_gna/tests/functional/pass_tests/remove_permutations_NHWC_to_NCHW_pass.cpp +++ b/src/plugins/intel_gna/tests/functional/pass_tests/remove_permutations_NHWC_to_NCHW_pass.cpp @@ -66,9 +66,9 @@ std::shared_ptr CreateTranspose(std::shared_ptr inpu } else { permute_order = shape_size == 4 ? std::vector{0, 2, 3, 1} : std::vector{0, 2, 1}; } - return std::make_shared( + return std::make_shared( input, - ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{shape_size}, permute_order)); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{shape_size}, permute_order)); } ngraph::Shape GetLayerTransposedOutputShape(std::shared_ptr layer) { @@ -122,7 +122,7 @@ std::shared_ptr CreateConvolution(const ngraph::Output(pool) : pool; + return withActivation ? std::make_shared(pool) : pool; } class RemovePermutationsNHWCToNCHWPassTest : public testing::WithParamInterface, @@ -175,10 +175,9 @@ class RemovePermutationsNHWCToNCHWPassTest : public testing::WithParamInterface< std::accumulate(std::begin(inputShape), std::end(inputShape), 1, std::multiplies()); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape{1, in_total_dims_size})}; - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{shape_size}, - inputShape); - auto reshape1 = std::make_shared(params[0], pattern1, false); + auto pattern1 = + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shape_size}, inputShape); + auto reshape1 = std::make_shared(params[0], pattern1, false); auto permute1 = CreateTranspose(reshape1, shape_size, true); auto conv = CreateConvolution(permute1, ngPrc, inputShape, output1D); @@ -188,12 +187,12 @@ class RemovePermutationsNHWCToNCHWPassTest : public testing::WithParamInterface< std::end(conv->get_output_shape(0)), size_t(1), std::multiplies()); - auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{2}, - ngraph::Shape{1, conv_out_size}); - auto reshape2 = std::make_shared(permute2, pattern2, false); + auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{2}, + ngraph::Shape{1, conv_out_size}); + auto reshape2 = std::make_shared(permute2, pattern2, false); - ngraph::ResultVector results{std::make_shared(reshape2)}; + ngraph::ResultVector results{std::make_shared(reshape2)}; function = std::make_shared(results, params, "RemovePermutationsTest"); if (transpose_to_reshape) { ngraph::pass::Manager manager; @@ -239,7 +238,7 @@ class RemovePermutationsNHWCToNCHWPassNoReshapesTest : public testing::WithParam auto conv = CreateConvolution(permute1, ngPrc, inputShape); auto permute2 = CreateTranspose(conv, shape_size, false); - ngraph::ResultVector results{std::make_shared(permute2)}; + ngraph::ResultVector results{std::make_shared(permute2)}; function = std::make_shared(results, params, "RemovePermutationPassNoReshapes"); } @@ -313,10 +312,9 @@ class RemovePermutationsWithPoolAndActTest : public testing::WithParamInterface< std::accumulate(std::begin(inputShape), std::end(inputShape), 1, std::multiplies()); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape{1, in_total_dims_size})}; - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{shape_size}, - inputShape); - auto reshape1 = std::make_shared(params[0], pattern1, false); + auto pattern1 = + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shape_size}, inputShape); + auto reshape1 = std::make_shared(params[0], pattern1, false); auto permute1 = CreateTranspose(reshape1, shape_size, true); auto conv = CreateConvolution(permute1, ngPrc, inputShape, false, true, true); @@ -326,12 +324,12 @@ class RemovePermutationsWithPoolAndActTest : public testing::WithParamInterface< std::end(conv->get_output_shape(0)), size_t(1), std::multiplies()); - auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{2}, - ngraph::Shape{1, conv_out_size}); - auto reshape2 = std::make_shared(permute2, pattern2, false); + auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{2}, + ngraph::Shape{1, conv_out_size}); + auto reshape2 = std::make_shared(permute2, pattern2, false); - ngraph::ResultVector results{std::make_shared(reshape2)}; + ngraph::ResultVector results{std::make_shared(reshape2)}; function = std::make_shared(results, params, "RemovePermutationsWithPoolAndActTest"); if (transpose_to_reshape) { @@ -400,10 +398,9 @@ class RemovePermutationsWithTwoConvTest : public testing::WithParamInterface()); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape{1, in_total_dims_size})}; - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{shape_size}, - inputShape); - auto reshape1 = std::make_shared(params[0], pattern1, false); + auto pattern1 = + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shape_size}, inputShape); + auto reshape1 = std::make_shared(params[0], pattern1, false); auto permute1 = CreateTranspose(reshape1, shape_size, true); auto conv1 = CreateConvolution(permute1, ngPrc, inputShape); @@ -414,12 +411,12 @@ class RemovePermutationsWithTwoConvTest : public testing::WithParamInterfaceget_output_shape(0)), size_t(1), std::multiplies()); - auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{2}, - ngraph::Shape{1, conv_out_size}); - auto reshape2 = std::make_shared(permute2, pattern2, false); + auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{2}, + ngraph::Shape{1, conv_out_size}); + auto reshape2 = std::make_shared(permute2, pattern2, false); - ngraph::ResultVector results{std::make_shared(reshape2)}; + ngraph::ResultVector results{std::make_shared(reshape2)}; function = std::make_shared(results, params, "RemovePermutationPass"); } }; @@ -487,35 +484,33 @@ class RemovePermutationsWithEltwiseTest : public testing::WithParamInterface(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); auto split = std::make_shared(params[0], split_axis_op, 2); - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{shape_size}, - inputShape); - auto reshape1 = std::make_shared(split->output(0), pattern1, false); + auto pattern1 = + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shape_size}, inputShape); + auto reshape1 = std::make_shared(split->output(0), pattern1, false); auto permute1 = CreateTranspose(reshape1, shape_size, true); auto conv1 = CreateConvolution(permute1, ngPrc, inputShape); - auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{shape_size}, - inputShape); - auto reshape2 = std::make_shared(split->output(1), pattern2, false); + auto pattern2 = + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shape_size}, inputShape); + auto reshape2 = std::make_shared(split->output(1), pattern2, false); auto permute2 = CreateTranspose(reshape2, shape_size, true); auto conv2 = CreateConvolution(permute2, ngPrc, inputShape); - auto add = std::make_shared(conv1, conv2); + auto add = std::make_shared(conv1, conv2); auto permute3 = CreateTranspose(add, add->get_output_shape(0).size(), false); auto conv_out_size = std::accumulate(std::begin(add->get_output_shape(0)), std::end(add->get_output_shape(0)), size_t(1), std::multiplies()); - auto pattern3 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{2}, - ngraph::Shape{1, conv_out_size}); - auto reshape3 = std::make_shared(permute3, pattern3, false); + auto pattern3 = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{2}, + ngraph::Shape{1, conv_out_size}); + auto reshape3 = std::make_shared(permute3, pattern3, false); - ngraph::ResultVector results{std::make_shared(reshape3)}; + ngraph::ResultVector results{std::make_shared(reshape3)}; function = std::make_shared(results, params, "RemovePermutationPass"); } }; @@ -587,10 +582,10 @@ class RemoveSharedPermutationTest : public testing::WithParamInterface 1 ? 1 : (inputShape.size() - 2); multipleInputShape[mul_dim] *= splits_num; - auto pattern = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{multipleInputShape.size()}, - multipleInputShape); - auto reshape = std::make_shared(params[0], pattern, false); + auto pattern = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{multipleInputShape.size()}, + multipleInputShape); + auto reshape = std::make_shared(params[0], pattern, false); auto permute = CreateTranspose(reshape, shape_size, true); OPENVINO_SUPPRESS_DEPRECATED_START auto split = ngraph::builder::makeSplit( @@ -606,10 +601,10 @@ class RemoveSharedPermutationTest : public testing::WithParamInterfaceget_output_shape(0)), size_t(1), std::multiplies()); - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{2}, - ngraph::Shape{1, conv1_out_size}); - auto reshape1 = std::make_shared(permute1, pattern1, false); + auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{2}, + ngraph::Shape{1, conv1_out_size}); + auto reshape1 = std::make_shared(permute1, pattern1, false); auto conv2 = CreateConvolution(split->output(1), ngPrc, inputShape); auto permute2 = CreateTranspose(conv2, conv2->get_output_shape(0).size(), false); @@ -617,14 +612,14 @@ class RemoveSharedPermutationTest : public testing::WithParamInterfaceget_output_shape(0)), size_t(1), std::multiplies()); - auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{2}, - ngraph::Shape{1, conv2_out_size}); - auto reshape2 = std::make_shared(permute2, pattern2, false); + auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{2}, + ngraph::Shape{1, conv2_out_size}); + auto reshape2 = std::make_shared(permute2, pattern2, false); auto concat = std::make_shared(ov::NodeVector{reshape1, reshape2}, 1); - ngraph::ResultVector results{std::make_shared(concat)}; + ngraph::ResultVector results{std::make_shared(concat)}; function = std::make_shared(results, params, "RemoveSharedPermutationTest"); } }; diff --git a/src/plugins/intel_gna/tests/functional/preprocess_tests/precision_convert.cpp b/src/plugins/intel_gna/tests/functional/preprocess_tests/precision_convert.cpp index 2dffe679072639..eb224e20c78890 100644 --- a/src/plugins/intel_gna/tests/functional/preprocess_tests/precision_convert.cpp +++ b/src/plugins/intel_gna/tests/functional/preprocess_tests/precision_convert.cpp @@ -61,8 +61,8 @@ class PreprocessGNATest : public testing::WithParamInterface(paramsOuts, 1); - ngraph::ResultVector results{std::make_shared(concat)}; + auto concat = std::make_shared(paramsOuts, 1); + ngraph::ResultVector results{std::make_shared(concat)}; function = std::make_shared(results, params, "concat"); } }; diff --git a/src/plugins/intel_gna/tests/functional/scale_factors_tests/add_overload_correction.cpp b/src/plugins/intel_gna/tests/functional/scale_factors_tests/add_overload_correction.cpp index 6a11f56b7b308f..1642b372fcf98b 100644 --- a/src/plugins/intel_gna/tests/functional/scale_factors_tests/add_overload_correction.cpp +++ b/src/plugins/intel_gna/tests/functional/scale_factors_tests/add_overload_correction.cpp @@ -11,6 +11,7 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" +#include "openvino/opsets/opset8.hpp" #include "ov_models/builders.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_models/utils/ov_helpers.hpp" @@ -74,39 +75,31 @@ class AddOverloadCorrectionTest : public testing::WithParamInterface(ngPrc, {1}, {-10.0f}); auto highNodeIn = ngraph::builder::makeConstant(ngPrc, {1}, {10.0f}); - auto fqIn = std::make_shared(params[0], - lowNodeIn, - highNodeIn, - lowNodeIn, - highNodeIn, - levels16); + auto fqIn = std::make_shared(params[0], + lowNodeIn, + highNodeIn, + lowNodeIn, + highNodeIn, + levels16); auto constant = ngraph::builder::makeConstant(ngPrc, inputShape, ov::test::utils::generate_float_numbers(inputShape[1], -1.0f, 1.0f)); - auto mul = std::make_shared(params[1], constant); + auto mul = std::make_shared(params[1], constant); auto lowNodeMul = ngraph::builder::makeConstant(ngPrc, {1}, {-1.0f}); auto highNodeMul = ngraph::builder::makeConstant(ngPrc, {1}, {1.0f}); - auto fqMul = std::make_shared(mul, - lowNodeMul, - highNodeMul, - lowNodeMul, - highNodeMul, - levels16); + auto fqMul = + std::make_shared(mul, lowNodeMul, highNodeMul, lowNodeMul, highNodeMul, levels16); - auto add = std::make_shared(fqIn, fqMul); + auto add = std::make_shared(fqIn, fqMul); auto lowNodeOut = ngraph::builder::makeConstant(ngPrc, {1}, {-11.0f}); auto highNodeOut = ngraph::builder::makeConstant(ngPrc, {1}, {11.0f}); - auto fqOut = std::make_shared(add, - lowNodeOut, - highNodeOut, - lowNodeOut, - highNodeOut, - levels16); - - ngraph::ResultVector results{std::make_shared(fqOut)}; + auto fqOut = + std::make_shared(add, lowNodeOut, highNodeOut, lowNodeOut, highNodeOut, levels16); + + ngraph::ResultVector results{std::make_shared(fqOut)}; function = std::make_shared(results, params, "AddOverloadCorrection"); } diff --git a/src/plugins/intel_gna/tests/functional/scale_factors_tests/const_input_add.cpp b/src/plugins/intel_gna/tests/functional/scale_factors_tests/const_input_add.cpp index 56f2d879e7c2a2..7b561d5dbf1704 100644 --- a/src/plugins/intel_gna/tests/functional/scale_factors_tests/const_input_add.cpp +++ b/src/plugins/intel_gna/tests/functional/scale_factors_tests/const_input_add.cpp @@ -71,7 +71,7 @@ class ConstInputAddTest : public testing::WithParamInterface(ngPrc, shape, {}, true, constRange.second, constRange.first); auto eltwise = ngraph::builder::makeEltwise(constant, params[0], ngraph::helpers::EltwiseTypes::ADD); - ngraph::ResultVector results{std::make_shared(eltwise)}; + ngraph::ResultVector results{std::make_shared(eltwise)}; function = std::make_shared(results, params, "InputConstAdd"); } diff --git a/src/plugins/intel_gna/tests/functional/scale_factors_tests/eltwise_act_fq.cpp b/src/plugins/intel_gna/tests/functional/scale_factors_tests/eltwise_act_fq.cpp index 70334b08f0e3c3..0106d80fec5820 100644 --- a/src/plugins/intel_gna/tests/functional/scale_factors_tests/eltwise_act_fq.cpp +++ b/src/plugins/intel_gna/tests/functional/scale_factors_tests/eltwise_act_fq.cpp @@ -11,6 +11,7 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" +#include "openvino/opsets/opset8.hpp" #include "ov_models/builders.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_models/utils/ov_helpers.hpp" @@ -91,35 +92,35 @@ class EltwiseActFqTest : public testing::WithParamInterface, auto lowNodeIn = ngraph::builder::makeConstant(ngPrc, {1}, {100 * -inputDataMax}); auto highNodeIn = ngraph::builder::makeConstant(ngPrc, {1}, {100 * inputDataMax}); - auto fqIn = std::make_shared(params[0], - lowNodeIn, - highNodeIn, - lowNodeIn, - highNodeIn, - levels16); + auto fqIn = std::make_shared(params[0], + lowNodeIn, + highNodeIn, + lowNodeIn, + highNodeIn, + levels16); auto constant = ngraph::builder::makeConstant( ngPrc, shape, ov::test::utils::generate_float_numbers(shape[1], inputDataMin, inputDataMax)); - auto add = std::make_shared(fqIn, constant); + auto add = std::make_shared(fqIn, constant); auto lowNode = ngraph::builder::makeConstant(ngPrc, {1}, {2 * inputDataMin}); auto highNode = ngraph::builder::makeConstant(ngPrc, {1}, {2 * inputDataMax}); - auto fq = std::make_shared(add, lowNode, highNode, lowNode, highNode, levels32); + auto fq = std::make_shared(add, lowNode, highNode, lowNode, highNode, levels32); auto tanh = ngraph::builder::makeActivation(fq, ngPrc, act); auto lowNodeOut = ngraph::builder::makeConstant(ngPrc, {1}, {std::tanh(2 * inputDataMin)}); auto highNodeOut = ngraph::builder::makeConstant(ngPrc, {1}, {std::tanh(2 * inputDataMax)}); - auto fqOut = std::make_shared(tanh, - lowNodeOut, - highNodeOut, - lowNodeOut, - highNodeOut, - levels16); - - ngraph::ResultVector results{std::make_shared(fqOut)}; + auto fqOut = std::make_shared(tanh, + lowNodeOut, + highNodeOut, + lowNodeOut, + highNodeOut, + levels16); + + ngraph::ResultVector results{std::make_shared(fqOut)}; function = std::make_shared(results, params, "TanhFq"); } diff --git a/src/plugins/intel_gna/tests/functional/scale_factors_tests/matmul_overload_correction.cpp b/src/plugins/intel_gna/tests/functional/scale_factors_tests/matmul_overload_correction.cpp index fd4112bf6c6b1f..afb739876c7793 100644 --- a/src/plugins/intel_gna/tests/functional/scale_factors_tests/matmul_overload_correction.cpp +++ b/src/plugins/intel_gna/tests/functional/scale_factors_tests/matmul_overload_correction.cpp @@ -13,6 +13,7 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" +#include "openvino/opsets/opset8.hpp" #include "ov_models/builders.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_models/utils/ov_helpers.hpp" @@ -67,7 +68,7 @@ class MatMulOverloadCorrectionNegTest : public testing::WithParamInterface(ngPrc, ov::Shape(shape1))}; - auto relu = std::make_shared(params[0]); + auto relu = std::make_shared(params[0]); std::shared_ptr input2; if (isSecondInputConst) { @@ -77,33 +78,33 @@ class MatMulOverloadCorrectionNegTest : public testing::WithParamInterface(ngPrc, shape2); - params.push_back(std::dynamic_pointer_cast(input2)); + params.push_back(std::dynamic_pointer_cast(input2)); } auto lowNodeIn1 = ngraph::builder::makeConstant(ngPrc, {1}, {-maxInputValue}); auto highNodeIn1 = ngraph::builder::makeConstant(ngPrc, {1}, {maxInputValue}); - auto fqIn1 = std::make_shared(relu, - lowNodeIn1, - highNodeIn1, - lowNodeIn1, - highNodeIn1, - levels16); + auto fqIn1 = std::make_shared(relu, + lowNodeIn1, + highNodeIn1, + lowNodeIn1, + highNodeIn1, + levels16); auto lowNodeIn2 = ngraph::builder::makeConstant(ngPrc, {1}, {-maxInputValue}); auto highNodeIn2 = ngraph::builder::makeConstant(ngPrc, {1}, {maxInputValue}); - auto fqIn2 = std::make_shared(input2, - lowNodeIn2, - highNodeIn2, - lowNodeIn2, - highNodeIn2, - levels16); + auto fqIn2 = std::make_shared(input2, + lowNodeIn2, + highNodeIn2, + lowNodeIn2, + highNodeIn2, + levels16); std::shared_ptr matmul_input2 = fqIn2; if (!isSecondInputConst) { - auto pattern = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{2}, - ngraph::Shape{shape1[1], shape1[1]}); - matmul_input2 = std::make_shared(fqIn2, pattern, false); + auto pattern = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{2}, + ngraph::Shape{shape1[1], shape1[1]}); + matmul_input2 = std::make_shared(fqIn2, pattern, false); } auto matmul = swapInputs ? std::make_shared(matmul_input2, fqIn1, false, true) @@ -113,14 +114,14 @@ class MatMulOverloadCorrectionNegTest : public testing::WithParamInterface(ngPrc, {1}, {-maxInputValue * maxInputValue * inputShape[1] / 10}); auto highNodeOut = ngraph::builder::makeConstant(ngPrc, {1}, {maxInputValue * maxInputValue * inputShape[1] / 10}); - auto fqOut = std::make_shared(matmul, - lowNodeOut, - highNodeOut, - lowNodeOut, - highNodeOut, - levels32); - - ngraph::ResultVector results{std::make_shared(fqOut)}; + auto fqOut = std::make_shared(matmul, + lowNodeOut, + highNodeOut, + lowNodeOut, + highNodeOut, + levels32); + + ngraph::ResultVector results{std::make_shared(fqOut)}; function = std::make_shared(results, params, "MatMulOverloadCorrection"); } diff --git a/src/plugins/intel_gna/tests/functional/scale_factors_tests/perchannel_quant_test.cpp b/src/plugins/intel_gna/tests/functional/scale_factors_tests/perchannel_quant_test.cpp index 308c87e58805d3..5fd0cd60a7e69a 100644 --- a/src/plugins/intel_gna/tests/functional/scale_factors_tests/perchannel_quant_test.cpp +++ b/src/plugins/intel_gna/tests/functional/scale_factors_tests/perchannel_quant_test.cpp @@ -11,6 +11,7 @@ #include "common_test_utils/common_utils.hpp" #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" +#include "openvino/opsets/opset8.hpp" #include "ov_models/builders.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_models/utils/ov_helpers.hpp" @@ -75,15 +76,15 @@ class PerchannelQuantTest : public testing::WithParamInterface, pu auto constant = ngraph::builder::makeConstant(ngPrc, constShape, weights); auto wLowNode = ngraph::builder::makeConstant(ngPrc, {constShape.front()}, {weightsMin}); auto wHighNode = ngraph::builder::makeConstant(ngPrc, {constShape.front()}, {weightsMax}); - auto wFq = std::make_shared(constant, - wLowNode, - wHighNode, - wLowNode, - wHighNode, - std::numeric_limits::max() - 1); - auto matmul = std::make_shared(params[0], wFq, false, true); - - ngraph::ResultVector results{std::make_shared(matmul)}; + auto wFq = std::make_shared(constant, + wLowNode, + wHighNode, + wLowNode, + wHighNode, + std::numeric_limits::max() - 1); + auto matmul = std::make_shared(params[0], wFq, false, true); + + ngraph::ResultVector results{std::make_shared(matmul)}; function = std::make_shared(results, params, "PerchannelQuantTest"); } }; diff --git a/src/plugins/intel_gna/tests/functional/scale_factors_tests/test_fq_scale_factors.cpp b/src/plugins/intel_gna/tests/functional/scale_factors_tests/test_fq_scale_factors.cpp index b1e3b7f73e4fde..92897c230cafe0 100644 --- a/src/plugins/intel_gna/tests/functional/scale_factors_tests/test_fq_scale_factors.cpp +++ b/src/plugins/intel_gna/tests/functional/scale_factors_tests/test_fq_scale_factors.cpp @@ -12,6 +12,7 @@ #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "openvino/opsets/opset10.hpp" +#include "openvino/opsets/opset8.hpp" #include "ov_models/builders.hpp" #include "ov_models/pass/convert_prc.hpp" #include "ov_models/utils/ov_helpers.hpp" @@ -114,25 +115,17 @@ class TestFQScaleFactorsTest : public testing::WithParamInterface(ngPrc, {1}, {inputDataMin}); auto highNodeIn = ngraph::builder::makeConstant(ngPrc, {1}, {inputDataMax}); - auto fqIn = std::make_shared(test_node, - lowNodeIn, - highNodeIn, - lowNodeIn, - highNodeIn, - levels); + auto fqIn = + std::make_shared(test_node, lowNodeIn, highNodeIn, lowNodeIn, highNodeIn, levels); - auto mul = std::make_shared(fqIn, test_node); + auto mul = std::make_shared(fqIn, test_node); auto lowNodeOut = ngraph::builder::makeConstant(ngPrc, {1}, {-inputDataMin * inputDataMin}); auto highNodeOut = ngraph::builder::makeConstant(ngPrc, {1}, {inputDataMax * inputDataMax}); - auto fqOut = std::make_shared(mul, - lowNodeOut, - highNodeOut, - lowNodeOut, - highNodeOut, - levels); - - ngraph::ResultVector results{std::make_shared(fqOut)}; + auto fqOut = + std::make_shared(mul, lowNodeOut, highNodeOut, lowNodeOut, highNodeOut, levels); + + ngraph::ResultVector results{std::make_shared(fqOut)}; function = std::make_shared(results, params, "FQWithSmallScaleFactor"); functionRefs = ngraph::clone_function(*function); } diff --git a/src/plugins/intel_gna/tests/functional/scale_factors_tests/weighable_layer_without_fq.cpp b/src/plugins/intel_gna/tests/functional/scale_factors_tests/weighable_layer_without_fq.cpp index f9fa6be55195dc..ef2b2da8d29c1c 100644 --- a/src/plugins/intel_gna/tests/functional/scale_factors_tests/weighable_layer_without_fq.cpp +++ b/src/plugins/intel_gna/tests/functional/scale_factors_tests/weighable_layer_without_fq.cpp @@ -7,6 +7,7 @@ #include #include +#include "openvino/opsets/opset8.hpp" #include "ov_models/builders.hpp" #include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/layer_test_utils.hpp" @@ -55,22 +56,22 @@ class WeighableLayerWithoutFqTest : public testing::WithParamInterface(ngPrc, ov::Shape(inputShape))}; - auto relu = std::make_shared(params[0]); - auto fq1 = std::make_shared( - relu, - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {-10.}), - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {10.}), - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {-10.}), - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {10.}), - static_cast(std::numeric_limits::max()) + 1); + auto relu = std::make_shared(params[0]); + auto fq1 = + std::make_shared(relu, + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-10.}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {10.}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-10.}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {10.}), + static_cast(std::numeric_limits::max()) + 1); auto constant = ngraph::builder::makeConstant(ngPrc, constantShape, std::vector{}, true); - auto fq2 = std::make_shared( - constant, - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {-10}), - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {10.}), - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {-10.}), - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {10.}), - static_cast(std::numeric_limits::max()) + 1); + auto fq2 = + std::make_shared(constant, + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-10}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {10.}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-10.}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {10.}), + static_cast(std::numeric_limits::max()) + 1); auto concat = std::make_shared(ov::NodeVector{fq1, fq2}, 1); function = std::make_shared(concat, params, "WeighableLayerWithoutFq"); } diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp index 00db2c4ec2f37f..a84648cf4a809d 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp @@ -20,13 +20,13 @@ class OVClassNetworkTestGNA : public ::testing::Test { void SetUp() override { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - auto param0 = std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape(1, 1024)); - auto reshape = std::make_shared( - param0, - std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{4}, - ngraph::Shape{1, 1, 1, 1024}), - false); + auto param0 = std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape(1, 1024)); + auto reshape = + std::make_shared(param0, + std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{4}, + ngraph::Shape{1, 1, 1, 1024}), + false); param0->set_friendly_name("input"); auto conv1 = ngraph::builder::makeConvolution(reshape, ngraph::element::Type_t::f32, @@ -37,7 +37,7 @@ class OVClassNetworkTestGNA : public ::testing::Test { {1, 1}, ngraph::op::PadType::EXPLICIT, 4); - auto result = std::make_shared(conv1); + auto result = std::make_shared(conv1); gnaSimpleNetwork = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{param0}); gnaSimpleNetwork->set_friendly_name("GnaSingleConv"); diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index 6f372a95c75e6f..da69df5a58fe3c 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -6,6 +6,8 @@ #include +#include "common_test_utils/subgraph_builders/split_conv_concat.hpp" + using namespace ov::test::behavior; namespace { @@ -14,7 +16,7 @@ const std::vector configs = {{}}; INSTANTIATE_TEST_SUITE_P( smoke_BehaviorTests, OVInferRequestDynamicTests, - ::testing::Combine(::testing::Values(ngraph::builder::subgraph::makeSplitConvConcat()), + ::testing::Combine(::testing::Values(ov::test::utils::make_split_conv_concat()), ::testing::Values(std::vector, std::vector>>{ {{1, 4, 20, 20}, {1, 10, 18, 18}}, {{2, 4, 20, 20}, {2, 10, 18, 18}}}), diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp index c56b77a0db892f..c729d94754029b 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/execution_graph_tests/add_output.cpp @@ -16,9 +16,9 @@ InferenceEngine::CNNNetwork getTargetNetwork() { auto input = std::make_shared(type, shape); auto mem_i = std::make_shared(type, shape, 0); auto mem_r = std::make_shared(mem_i, "r_1-3"); - auto mul = std::make_shared(mem_r, input); + auto mul = std::make_shared(mem_r, input); auto mem_w = std::make_shared(mul, "r_1-3"); - auto sigm = std::make_shared(mul); + auto sigm = std::make_shared(mul); mem_r->set_friendly_name("Memory_1"); mem_w->add_control_dependency(mem_r); sigm->add_control_dependency(mem_w); diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/activation.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/activation.cpp index 210540b5ac6841..b3dd8026cb2b26 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/activation.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/activation.cpp @@ -33,15 +33,14 @@ class ActivationLayerGNATest : public ActivationLayerTest { threshold = 1.0; } - const auto inputReshapePattern = std::make_shared(ngraph::element::i64, - ngraph::Shape{inputShape.size()}, - inputShape); - const auto inputReshape = std::make_shared(params[0], inputReshapePattern, false); + const auto inputReshapePattern = + std::make_shared(ngraph::element::i64, ngraph::Shape{inputShape.size()}, inputShape); + const auto inputReshape = std::make_shared(params[0], inputReshapePattern, false); const auto activation = ngraph::builder::makeActivation(inputReshape, ngPrc, activationType, shapes.second, constantsValue); const auto outputReshapePattern = - std::make_shared(ngraph::element::i64, ngraph::Shape{2}, inputDims); - const auto outputReshape = std::make_shared(activation, outputReshapePattern, false); + std::make_shared(ngraph::element::i64, ngraph::Shape{2}, inputDims); + const auto outputReshape = std::make_shared(activation, outputReshapePattern, false); function = std::make_shared(ngraph::NodeVector{outputReshape}, params); } diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/gru_cell.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/gru_cell.cpp index 2efdb5d1ef3b65..81114099f46390 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/gru_cell.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/gru_cell.cpp @@ -69,19 +69,19 @@ class GRUCellGNATest : public GRUCellTest { auto reccurrenceWeightsNode = ngraph::builder::makeConstant(ngPrc, WRB[1], reccurrenceWeights_vals); auto biasNode = ngraph::builder::makeConstant(ngPrc, WRB[2], bias_vals); - auto gru_cell = std::make_shared(params[0], - params[1], - weightsNode, - reccurrenceWeightsNode, - biasNode, - hidden_size, - activations, - activations_alpha, - activations_beta, - clip, - linear_before_reset); - - ngraph::ResultVector results{std::make_shared(gru_cell->output(0))}; + auto gru_cell = std::make_shared(params[0], + params[1], + weightsNode, + reccurrenceWeightsNode, + biasNode, + hidden_size, + activations, + activations_alpha, + activations_beta, + clip, + linear_before_reset); + + ngraph::ResultVector results{std::make_shared(gru_cell->output(0))}; function = std::make_shared(results, params, "gru_cell"); if (should_decompose) { ngraph::pass::Manager m; diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp index 598ca5167420bb..97cd54070f1506 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp @@ -78,22 +78,22 @@ class GRUSequenceGNATest : public GRUSequenceTest { std::shared_ptr seq_length = ngraph::builder::makeConstant(ngraph::element::i64, WRB[3], lengths, false); - auto gru_sequence = std::make_shared(params[0], - params[1], - seq_length, - weightsNode, - reccurrenceWeightsNode, - biasNode, - hidden_size, - direction, - activations, - activations_alpha, - activations_beta, - clip, - linear_before_reset); - - ngraph::ResultVector results{std::make_shared(gru_sequence->output(0)), - std::make_shared(gru_sequence->output(1))}; + auto gru_sequence = std::make_shared(params[0], + params[1], + seq_length, + weightsNode, + reccurrenceWeightsNode, + biasNode, + hidden_size, + direction, + activations, + activations_alpha, + activations_beta, + clip, + linear_before_reset); + + ngraph::ResultVector results{std::make_shared(gru_sequence->output(0)), + std::make_shared(gru_sequence->output(1))}; function = std::make_shared(results, params, "gru_sequence"); bool is_pure_sequence = m_mode == SequenceTestsMode::PURE_SEQ; diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/lstm_sequence.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/lstm_sequence.cpp index 0b7ea4daa080c8..aa846c0a9617a8 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/lstm_sequence.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/single_layer_tests/lstm_sequence.cpp @@ -77,23 +77,23 @@ class LSTMSequenceGNATest : public LSTMSequenceTest { params[0]->get_partial_shape()[1].get_min_length()); std::shared_ptr seq_length = ngraph::builder::makeConstant(ngraph::element::i64, WRB[3], lengths, false); - auto lstm_sequence = std::make_shared(params[0], - params[1], - params[2], - seq_length, - weightsNode, - reccurrenceWeightsNode, - biasNode, - hidden_size, - direction, - activations_alpha, - activations_beta, - activations, - clip); - - ngraph::ResultVector results{std::make_shared(lstm_sequence->output(0)), - std::make_shared(lstm_sequence->output(1)), - std::make_shared(lstm_sequence->output(2))}; + auto lstm_sequence = std::make_shared(params[0], + params[1], + params[2], + seq_length, + weightsNode, + reccurrenceWeightsNode, + biasNode, + hidden_size, + direction, + activations_alpha, + activations_beta, + activations, + clip); + + ngraph::ResultVector results{std::make_shared(lstm_sequence->output(0)), + std::make_shared(lstm_sequence->output(1)), + std::make_shared(lstm_sequence->output(2))}; function = std::make_shared(results, params, "lstm_sequence"); bool is_pure_sequence = m_mode == SequenceTestsMode::PURE_SEQ; diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/skip_tests_config.cpp index 50b796d7ccf98f..5ff4029de15207 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -68,9 +68,6 @@ std::vector disabledTestPatterns() { R"(.*OVCompiledModelBaseTest.*canGetInputsInfoAndCheck.*)", R"(.*OVCompiledModelBaseTest.*getOutputsFromSplitFunctionWithSeveralOutputs.*)", R"(.*OVCompiledModelBaseTest.*canCompileModelFromMemory.*)", - R"(.*OVCompiledModelBaseTest.*CanSetOutputPrecisionForNetwork.*)", - R"(.*OVCompiledModelBaseTest.*CanSetInputPrecisionForNetwork.*)", - R"(.*OVCompiledModelBaseTest.*CanCreateTwoCompiledModelsAndCheckRuntimeModel.*)", R"(.*(OVClass|IEClass)HeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK.*GetMetricNoThrow.*)", R"(.*LoadNetwork*.*LoadNetwork(HETEROWithDeviceIDNoThrow|WithBigDeviceID|WithInvalidDeviceID)*.*)", R"(.*QueryNetwork*.*QueryNetwork(HETEROWithDeviceIDNoThrow|WithBigDeviceID|WithInvalidDeviceID)*.*)", @@ -107,5 +104,10 @@ std::vector disabledTestPatterns() { R"(.*smoke_Decompose2DConv.*)", // TODO: Issue: 123306 R"(smoke_convert_matmul_to_fc/ConvertMatmulToFcWithTransposesPass.CompareWithRefImpl/netPRC=FP(32|16)_targetDevice=GNA__configItem=GNA_COMPACT_MODE_NO_configItem=GNA_DEVICE_MODE_GNA_SW_(FP32|EXACT)_IS=\(8.*)", + // TODO: CVS-125686 + R"(.*OVCompiledModelBaseTest.*CanSetOutputPrecisionForNetwork.*)", + R"(.*OVCompiledModelBaseTest.*CanSetInputPrecisionForNetwork.*)", + R"(.*OVCompiledModelBaseTest.*CanCreateTwoCompiledModelsAndCheckRuntimeModel.*)", + R"(.*OVInferRequestWaitTests.*FailedAsyncInferWithNegativeTimeForWait.*)", }; } diff --git a/src/plugins/intel_gna/tests/unit/gna_executable_network_metrics_test.cpp b/src/plugins/intel_gna/tests/unit/gna_executable_network_metrics_test.cpp index 838fa4b31b9ee9..bdcac55afb0d76 100644 --- a/src/plugins/intel_gna/tests/unit/gna_executable_network_metrics_test.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_executable_network_metrics_test.cpp @@ -39,10 +39,10 @@ class GnaExecutableNetworkMetricsTest : public ::testing::Test { protected: std::shared_ptr getFunction() { - auto firstInput = std::make_shared(net_precision, shape); - auto secondInput = std::make_shared(net_precision, shape); - auto matmul = std::make_shared(firstInput, secondInput, false, true); - auto result = std::make_shared(matmul); + auto firstInput = std::make_shared(net_precision, shape); + auto secondInput = std::make_shared(net_precision, shape); + auto matmul = std::make_shared(firstInput, secondInput, false, true); + auto result = std::make_shared(matmul); auto function = std::make_shared(ov::ResultVector({result}), ov::ParameterVector({firstInput}), "MatMul"); return function; diff --git a/src/plugins/intel_gna/tests/unit/gna_get_aligned_split_sizes.cpp b/src/plugins/intel_gna/tests/unit/gna_get_aligned_split_sizes.cpp index 0cfc4caed8ae59..0ec3d2063b264b 100644 --- a/src/plugins/intel_gna/tests/unit/gna_get_aligned_split_sizes.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_get_aligned_split_sizes.cpp @@ -57,12 +57,10 @@ void RunVariadicSplitSupportedTest(DeviceVersion device_version, std::vector( - std::make_shared(ngraph::element::f32, input_shape), - ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape({1}), {axis}), - ngraph::opset9::Constant::create(ngraph::element::i64, - ngraph::Shape({split_lengths.size()}), - split_lengths)); + auto split = std::make_shared( + std::make_shared(ngraph::element::f32, input_shape), + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape({1}), {axis}), + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape({split_lengths.size()}), split_lengths)); ASSERT_TRUE(Limitations::is_split_supported(split, false) == result); } Limitations::deinit(); @@ -103,9 +101,9 @@ void RunSplitSupportedTest(DeviceVersion device_version, std::vector( - std::make_shared(ngraph::element::f32, input_shape), - ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape({}), {axis}), + auto split = std::make_shared( + std::make_shared(ngraph::element::f32, input_shape), + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape({}), {axis}), num_splits); ASSERT_TRUE(Limitations::is_split_supported(split, false) == result); } diff --git a/src/plugins/intel_gna/tests/unit/gna_hw_precision_test.cpp b/src/plugins/intel_gna/tests/unit/gna_hw_precision_test.cpp index 3cca0989543e25..36c97f3c030d68 100644 --- a/src/plugins/intel_gna/tests/unit/gna_hw_precision_test.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_hw_precision_test.cpp @@ -52,10 +52,10 @@ class GNAHwPrecisionTest : public ::testing::Test { protected: std::shared_ptr getFunction() { - auto firstInput = std::make_shared(net_precision, shape); - auto secondInput = std::make_shared(net_precision, shape); - auto matmul = std::make_shared(firstInput, secondInput, false, true); - auto result = std::make_shared(matmul); + auto firstInput = std::make_shared(net_precision, shape); + auto secondInput = std::make_shared(net_precision, shape); + auto matmul = std::make_shared(firstInput, secondInput, false, true); + auto result = std::make_shared(matmul); auto function = std::make_shared(ov::ResultVector({result}), ov::ParameterVector({firstInput}), "MatMul"); return function; diff --git a/src/plugins/intel_gna/tests/unit/gna_infer_request_test.cpp b/src/plugins/intel_gna/tests/unit/gna_infer_request_test.cpp index c20c37b4f1f0fb..4077a86159c013 100644 --- a/src/plugins/intel_gna/tests/unit/gna_infer_request_test.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_infer_request_test.cpp @@ -49,7 +49,7 @@ class GNAInferRequestTest : public ::testing::Test { ov::test::utils::generate_float_numbers(shape_size, -0.5f, 0.5f), false); - auto add = std::make_shared(params[0], add_const); + auto add = std::make_shared(params[0], add_const); auto res = std::make_shared(add); auto function = std::make_shared(res, params, "Add"); return function; diff --git a/src/plugins/intel_gna/tests/unit/gna_memory_alignment.cpp b/src/plugins/intel_gna/tests/unit/gna_memory_alignment.cpp index 0678ccbd2329f3..02c3b7ce0c6cf3 100644 --- a/src/plugins/intel_gna/tests/unit/gna_memory_alignment.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_memory_alignment.cpp @@ -97,10 +97,10 @@ class GNAPluginLoadNetworkTests : public ::testing::TestWithParam getMulFunction(const ngraph::Shape input_shape) { const ngraph::element::Type net_precision = ngraph::element::f32; - auto input = std::make_shared(net_precision, input_shape); - auto multiplier = std::make_shared(net_precision, input_shape); - auto matmul = std::make_shared(input, multiplier, false, true); - auto result = std::make_shared(matmul); + auto input = std::make_shared(net_precision, input_shape); + auto multiplier = std::make_shared(net_precision, input_shape); + auto matmul = std::make_shared(input, multiplier, false, true); + auto result = std::make_shared(matmul); auto function = std::make_shared(ov::ResultVector({result}), ov::ParameterVector({input}), "MatMul"); return function; } diff --git a/src/plugins/intel_gna/tests/unit/gna_memory_compact_test.cpp b/src/plugins/intel_gna/tests/unit/gna_memory_compact_test.cpp index 0673e7b11064c9..9918008f7997a7 100644 --- a/src/plugins/intel_gna/tests/unit/gna_memory_compact_test.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_memory_compact_test.cpp @@ -14,6 +14,7 @@ #include "gna_fused_iterator.hpp" #include "gna_plugin.hpp" #include "memory/gna_memory.hpp" +#include "openvino/opsets/opset8.hpp" #include "ov_models/builders.hpp" using namespace InferenceEngine; @@ -316,11 +317,11 @@ TEST_F(GNAMemoryOrderTest, orderingFusedLayersActivation) { ov::CoordinateDiff pad_begin(0, 0), pad_end(0, 0); auto weights = ngraph::builder::makeConstant(ov::element::f32, {8, 16, 1, 1}, {1.f}); - auto input = std::make_shared(ov::element::f32, input_shape); - auto conv = std::make_shared(input, weights, strides, pad_begin, pad_end, dilations); + auto input = std::make_shared(ov::element::f32, input_shape); + auto conv = std::make_shared(input, weights, strides, pad_begin, pad_end, dilations); auto activation = ngraph::builder::makeActivation(conv, ov::element::f32, ngraph::helpers::ActivationTypes::Sigmoid); - auto result = std::make_shared(activation); + auto result = std::make_shared(activation); auto function = std::make_shared(ov::ResultVector({result}), ov::ParameterVector({input}), "convolution"); @@ -338,8 +339,8 @@ TEST_F(GNAMemoryOrderTest, orderingFusedLayersMaxPool) { ov::CoordinateDiff pad_begin(0, 0), pad_end(0, 0); auto weights = ngraph::builder::makeConstant(ov::element::f32, {8, 16, 1, 1}, {1.f}); - auto input = std::make_shared(ov::element::f32, input_shape); - auto conv = std::make_shared(input, weights, strides, pad_begin, pad_end, dilations); + auto input = std::make_shared(ov::element::f32, input_shape); + auto conv = std::make_shared(input, weights, strides, pad_begin, pad_end, dilations); OPENVINO_SUPPRESS_DEPRECATED_START auto maxpool = ngraph::builder::makePooling(conv, {1, 1}, @@ -351,7 +352,7 @@ TEST_F(GNAMemoryOrderTest, orderingFusedLayersMaxPool) { false, ngraph::helpers::PoolingTypes::MAX); OPENVINO_SUPPRESS_DEPRECATED_END - auto result = std::make_shared(maxpool); + auto result = std::make_shared(maxpool); auto function = std::make_shared(ov::ResultVector({result}), ov::ParameterVector({input}), "convolution"); @@ -369,8 +370,8 @@ TEST_F(GNAMemoryOrderTest, orderingFusedLayersActivationMaxPool) { ov::CoordinateDiff pad_begin(0, 0), pad_end(0, 0); auto weights = ngraph::builder::makeConstant(ov::element::f32, {8, 16, 1, 1}, {1.f}); - auto input = std::make_shared(ov::element::f32, input_shape); - auto conv = std::make_shared(input, weights, strides, pad_begin, pad_end, dilations); + auto input = std::make_shared(ov::element::f32, input_shape); + auto conv = std::make_shared(input, weights, strides, pad_begin, pad_end, dilations); auto activation = ngraph::builder::makeActivation(conv, ov::element::f32, ngraph::helpers::ActivationTypes::Sigmoid); OPENVINO_SUPPRESS_DEPRECATED_START @@ -384,7 +385,7 @@ TEST_F(GNAMemoryOrderTest, orderingFusedLayersActivationMaxPool) { false, ngraph::helpers::PoolingTypes::MAX); OPENVINO_SUPPRESS_DEPRECATED_END - auto result = std::make_shared(maxpool); + auto result = std::make_shared(maxpool); auto function = std::make_shared(ov::ResultVector({result}), ov::ParameterVector({input}), "convolution"); diff --git a/src/plugins/intel_gna/tests/unit/gna_plugin_load_network_test.cpp b/src/plugins/intel_gna/tests/unit/gna_plugin_load_network_test.cpp index 24d6a674104cfe..435f0bbab3d1bf 100644 --- a/src/plugins/intel_gna/tests/unit/gna_plugin_load_network_test.cpp +++ b/src/plugins/intel_gna/tests/unit/gna_plugin_load_network_test.cpp @@ -60,9 +60,9 @@ class GNAPluginLoadNetworkTest : public ::testing::Test, public ::testing::WithP const auto& model = GetParam().model; using ngraph::element::f32; - auto parameter = std::make_shared(f32, ngraph::Shape{model.input_size}); + auto parameter = std::make_shared(f32, ngraph::Shape{model.input_size}); - auto conv = std::dynamic_pointer_cast( + auto conv = std::dynamic_pointer_cast( ngraph::builder::makeConvolution(parameter, f32, model.filter_size, @@ -72,7 +72,7 @@ class GNAPluginLoadNetworkTest : public ::testing::Test, public ::testing::WithP c_dilations, ngraph::op::PadType::EXPLICIT, c_num_out_channels)); - auto result = std::make_shared(conv); + auto result = std::make_shared(conv); function = std::make_shared(result, ov::ParameterVector{parameter}, "convolution"); } }; diff --git a/src/plugins/intel_gna/tests/unit/ops/util_test.cpp b/src/plugins/intel_gna/tests/unit/ops/util_test.cpp index 2433b9864c3fbf..30a34995bd4483 100644 --- a/src/plugins/intel_gna/tests/unit/ops/util_test.cpp +++ b/src/plugins/intel_gna/tests/unit/ops/util_test.cpp @@ -80,9 +80,9 @@ TEST_P(GnaOpsUtilIsEltwiseAddTest, isEltwiseAddTest) { ov::NodeVector pooling_nodes_false = {std::make_shared(), std::make_shared(), std::make_shared(), - std::make_shared()}; + std::make_shared()}; -ov::NodeVector pooling_nodes_true = {std::make_shared()}; +ov::NodeVector pooling_nodes_true = {std::make_shared()}; ov::NodeVector eltwise_mul_nodes_false = { std::make_shared(), diff --git a/src/plugins/intel_gna/tests/unit/transformations/gather_sinking_test_utils.hpp b/src/plugins/intel_gna/tests/unit/transformations/gather_sinking_test_utils.hpp index a659bbb85878e9..d89eb0ee91f3bf 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gather_sinking_test_utils.hpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gather_sinking_test_utils.hpp @@ -23,9 +23,9 @@ std::shared_ptr make_gather(std::shared_ptr input const ov::Shape& input_shape = input_node->get_output_shape(0); const std::vector indexes = create_indices_func(input_shape[axis], 0); - auto gather_indexes_node = ov::opset12::Constant::create(ov::element::i64, ov::Shape{indexes.size()}, indexes); + auto gather_indexes_node = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{indexes.size()}, indexes); - auto gather_axis_node = ov::opset12::Constant::create(ov::element::i64, ov::Shape{}, {axis}); + auto gather_axis_node = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {axis}); return std::make_shared(input_node->output(0), gather_indexes_node, gather_axis_node); } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_broadcast_const.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_broadcast_const.cpp index 184f20b8204525..7b3b59f55f2036 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_broadcast_const.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_broadcast_const.cpp @@ -12,6 +12,7 @@ #include "common_test_utils/ov_test_utils.hpp" #include "legacy/ngraph_ops/eltwise.hpp" #include "legacy/ngraph_ops/scaleshift.hpp" +#include "openvino/opsets/opset8.hpp" #include "transformations/broadcast_const.hpp" namespace testing { @@ -26,17 +27,12 @@ std::unique_ptr createUnique(Args&&... args) { return std::unique_ptr(new T(std::forward(args)...)); } -std::shared_ptr createFakeQuantizeNode(std::shared_ptr parent_node) { - auto input_low = ngraph::opset8::Constant::create(ngraph::element::f32, {}, {-0.5}); - auto input_high = ngraph::opset8::Constant::create(ngraph::element::f32, {}, {0.5}); - auto output_low = ngraph::opset8::Constant::create(ngraph::element::f32, {}, {-0.5}); - auto output_high = ngraph::opset8::Constant::create(ngraph::element::f32, {}, {0.5}); - return std::make_shared(parent_node, - input_low, - input_high, - output_low, - output_high, - 0); +std::shared_ptr createFakeQuantizeNode(std::shared_ptr parent_node) { + auto input_low = ov::op::v0::Constant::create(ngraph::element::f32, {}, {-0.5}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::f32, {}, {0.5}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::f32, {}, {-0.5}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::f32, {}, {0.5}); + return std::make_shared(parent_node, input_low, input_high, output_low, output_high, 0); } using Node = std::shared_ptr; @@ -133,23 +129,21 @@ std::shared_ptr CreateFunction(const ngraph::Shape& data_shape bool swap_outputs, bool add_scaleshift, EltwiseFactoryPtr eltwise_factory) { - const auto input_params_1 = std::make_shared(ngraph::element::Type_t::f32, data_shape); + const auto input_params_1 = std::make_shared(ngraph::element::Type_t::f32, data_shape); ngraph::ParameterVector params{input_params_1}; - const auto constant_1 = ngraph::opset8::Constant::create(ngraph::element::Type_t::f32, - ngraph::Shape{const_shape_dims}, - const_shape_values); + const auto constant_1 = + ov::op::v0::Constant::create(ngraph::element::Type_t::f32, ngraph::Shape{const_shape_dims}, const_shape_values); Node const_last_node = constant_1; if (add_scaleshift) { - const auto input_params_2 = - std::make_shared(ngraph::element::Type_t::f32, data_shape); + const auto input_params_2 = std::make_shared(ngraph::element::Type_t::f32, data_shape); params.push_back(input_params_2); - const auto constant_2 = ngraph::opset8::Constant::create(ngraph::element::Type_t::f32, - ngraph::Shape{const_shape_dims}, - const_shape_values); + const auto constant_2 = ov::op::v0::Constant::create(ngraph::element::Type_t::f32, + ngraph::Shape{const_shape_dims}, + const_shape_values); const_last_node = std::make_shared(input_params_2, constant_1, @@ -177,7 +171,7 @@ std::shared_ptr CreateFunction(const ngraph::Shape& data_shape const auto add = eltwise_factory->CreateNode(left_node, right_node); - const auto result = std::make_shared(add); + const auto result = std::make_shared(add); return std::make_shared(ngraph::ResultVector{result}, params); } @@ -268,13 +262,13 @@ void execute_cloned_test(std::shared_ptr function) { namespace { -std::vector opset8_eltwise_factories = {CreateEltwiseFactory(), - CreateEltwiseFactory(), - CreateEltwiseFactory()}; +std::vector opset8_eltwise_factories = {CreateEltwiseFactory(), + CreateEltwiseFactory(), + CreateEltwiseFactory()}; -std::vector all_eltwise_factories = {CreateEltwiseFactory(), - CreateEltwiseFactory(), - CreateEltwiseFactory(), +std::vector all_eltwise_factories = {CreateEltwiseFactory(), + CreateEltwiseFactory(), + CreateEltwiseFactory(), CreateEltwiseFactory()}; std::vector broadcast_passed_types = {ov::op::AutoBroadcastType::NONE, diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_convert_dwsc_to_scaleshifts.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_convert_dwsc_to_scaleshifts.cpp index 0b88e4a6439fa4..e453dfbad66837 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_convert_dwsc_to_scaleshifts.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_convert_dwsc_to_scaleshifts.cpp @@ -12,6 +12,7 @@ #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset7.hpp" #include "transformations/convert_dwsc_to_scaleshifts.hpp" namespace testing { @@ -40,19 +41,19 @@ typedef std::tuple fqDWSCToScaleShiftsParams; -std::shared_ptr createFQ(std::shared_ptr& in_node) { - auto input_low = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {5}); - auto output_low = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {10}); - return std::make_shared(in_node, input_low, input_high, output_low, output_high, 11); +std::shared_ptr createFQ(std::shared_ptr& in_node) { + auto input_low = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {5}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {10}); + return std::make_shared(in_node, input_low, input_high, output_low, output_high, 11); } std::shared_ptr createBiasFQ(const std::shared_ptr& in_node, - std::shared_ptr& bias_const, + std::shared_ptr& bias_const, const bool& fq) { std::shared_ptr node; - node = std::make_shared(in_node, bias_const); + node = std::make_shared(in_node, bias_const); if (fq) { node = createFQ(node); @@ -61,64 +62,64 @@ std::shared_ptr createBiasFQ(const std::shared_ptr& return node; } -std::shared_ptr createFunction(const bool& fq, - const modelType& model, - const ngraph::Output& input_node, - const ngraph::Shape& filters_shape, - const ngraph::Strides& conv_stride, - const ngraph::CoordinateDiff& pads_begin, - const ngraph::CoordinateDiff& pads_end, - const ngraph::Strides& conv_dilation, - const ngraph::Shape& bias_shape, - const ngraph::op::PadType& pad_type, - std::shared_ptr& dwsc, - std::shared_ptr& bias_const, - std::shared_ptr& fq_bias) { +std::shared_ptr createFunction(const bool& fq, + const modelType& model, + const ngraph::Output& input_node, + const ngraph::Shape& filters_shape, + const ngraph::Strides& conv_stride, + const ngraph::CoordinateDiff& pads_begin, + const ngraph::CoordinateDiff& pads_end, + const ngraph::Strides& conv_dilation, + const ngraph::Shape& bias_shape, + const ngraph::op::PadType& pad_type, + std::shared_ptr& dwsc, + std::shared_ptr& bias_const, + std::shared_ptr& fq_bias) { std::shared_ptr fq_filters; - auto transpose_in_order = std::make_shared(ngraph::element::i64, - ngraph::Shape{4}, - std::vector{0, 3, 1, 2}); - auto transpose_in = std::make_shared(input_node, transpose_in_order); + auto transpose_in_order = std::make_shared(ngraph::element::i64, + ngraph::Shape{4}, + std::vector{0, 3, 1, 2}); + auto transpose_in = std::make_shared(input_node, transpose_in_order); if (fq) { - fq_filters = std::make_shared( + fq_filters = std::make_shared( ngraph::element::i64, ngraph::Shape{input_node.get_shape()[3], 1, filters_shape[0], filters_shape[1]}); fq_filters = createFQ(fq_filters); - fq_filters = std::make_shared( + fq_filters = std::make_shared( fq_filters, - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::i64, ngraph::Shape{5}, ngraph::Shape{input_node.get_shape()[3], 1, 1, filters_shape[0], filters_shape[1]}), false); } else { - fq_filters = std::make_shared( + fq_filters = std::make_shared( ngraph::element::i64, ngraph::Shape{input_node.get_shape()[3], 1, 1, filters_shape[0], filters_shape[1]}); } - dwsc = std::make_shared(transpose_in, - fq_filters, - conv_stride, - pads_begin, - pads_end, - conv_dilation, - pad_type); - auto transpose_out_order = std::make_shared(ngraph::element::i64, - ngraph::Shape{4}, - std::vector{0, 2, 3, 1}); - auto last_op = std::make_shared(dwsc, transpose_out_order); + dwsc = std::make_shared(transpose_in, + fq_filters, + conv_stride, + pads_begin, + pads_end, + conv_dilation, + pad_type); + auto transpose_out_order = std::make_shared(ngraph::element::i64, + ngraph::Shape{4}, + std::vector{0, 2, 3, 1}); + auto last_op = std::make_shared(dwsc, transpose_out_order); if (model == modelType::TranspDWSCBiasTransp || fq) { - bias_const = std::make_shared(ngraph::element::i64, bias_shape); + bias_const = std::make_shared(ngraph::element::i64, bias_shape); auto bias = createBiasFQ(dwsc, bias_const, fq); - fq_bias = std::dynamic_pointer_cast(bias); - last_op = std::make_shared(bias, transpose_out_order); + fq_bias = std::dynamic_pointer_cast(bias); + last_op = std::make_shared(bias, transpose_out_order); } - return std::make_shared(last_op); + return std::make_shared(last_op); } std::shared_ptr get_initial_function(const bool& fq, @@ -131,10 +132,10 @@ std::shared_ptr get_initial_function(const bool& fq, const ngraph::Strides& conv_dilation, const ngraph::Shape& bias_shape, const ngraph::op::PadType& pad_type, - std::shared_ptr& dwsc, - std::shared_ptr& bias_const, - std::shared_ptr& fq_bias) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + std::shared_ptr& dwsc, + std::shared_ptr& bias_const, + std::shared_ptr& fq_bias) { + auto input_params = std::make_shared(ngraph::element::i64, input_shape); auto result = createFunction(fq, model, input_params, @@ -171,9 +172,9 @@ void ConvertDWSCToScaleShiftsTestInvalidFixture::SetUp() { ngraph::Strides conv_stride, conv_dilation; ngraph::CoordinateDiff pads_begin, pads_end; ngraph::op::PadType pad_type; - std::shared_ptr dwsc; - std::shared_ptr bias_const; - std::shared_ptr fq_bias; + std::shared_ptr dwsc; + std::shared_ptr bias_const; + std::shared_ptr fq_bias; std::tie(fq, params) = this->GetParam(); std::tie(model, input_shape, @@ -229,9 +230,9 @@ class ConvertDWSCToScaleShiftsTestFixture : public ov::test::TestsCommon, const ngraph::Strides& conv_dilation, const ngraph::Shape& bias_shape, const ngraph::op::PadType& pad_type, - const std::shared_ptr& dwsc, - const std::shared_ptr& bias_const, - const std::shared_ptr& fq_bias); + const std::shared_ptr& dwsc, + const std::shared_ptr& bias_const, + const std::shared_ptr& fq_bias); public: std::shared_ptr function, reference_function; @@ -246,9 +247,9 @@ void ConvertDWSCToScaleShiftsTestFixture::SetUp() { ngraph::Strides conv_stride, conv_dilation; ngraph::CoordinateDiff pads_begin, pads_end; ngraph::op::PadType pad_type; - std::shared_ptr dwsc; - std::shared_ptr bias_const; - std::shared_ptr fq_bias; + std::shared_ptr dwsc; + std::shared_ptr bias_const; + std::shared_ptr fq_bias; std::tie(fq, params) = this->GetParam(); std::tie(model, input_shape, @@ -288,55 +289,53 @@ void ConvertDWSCToScaleShiftsTestFixture::SetUp() { fq_bias); } -std::shared_ptr FlatCrop(ngraph::Output input, size_t offset, size_t size) { - return std::make_shared( - input, // data - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - {(size_t)0, offset}), // begin sice index - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - {(size_t)0, offset + size}), // end slice index - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)1, (size_t)1}), // strides - std::vector{1, 0}, // begin mask - std::vector{1, 0}); // end mask +std::shared_ptr FlatCrop(ngraph::Output input, size_t offset, size_t size) { + return std::make_shared( + input, // data + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)0, offset}), // begin sice index + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{2}, + {(size_t)0, offset + size}), // end slice index + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)1, (size_t)1}), // strides + std::vector{1, 0}, // begin mask + std::vector{1, 0}); // end mask } -std::shared_ptr InsertFQLayer(const std::shared_ptr fq_layer, +std::shared_ptr InsertFQLayer(const std::shared_ptr fq_layer, std::shared_ptr last_node) { if (fq_layer != nullptr) { return fq_layer->clone_with_new_inputs( {last_node, - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::f32, ngraph::Shape{1}, - std::dynamic_pointer_cast(fq_layer->input_value(1).get_node_shared_ptr()) + std::dynamic_pointer_cast(fq_layer->input_value(1).get_node_shared_ptr()) ->cast_vector()), - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::f32, ngraph::Shape{1}, - std::dynamic_pointer_cast(fq_layer->input_value(2).get_node_shared_ptr()) + std::dynamic_pointer_cast(fq_layer->input_value(2).get_node_shared_ptr()) ->cast_vector()), - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::f32, ngraph::Shape{1}, - std::dynamic_pointer_cast(fq_layer->input_value(3).get_node_shared_ptr()) + std::dynamic_pointer_cast(fq_layer->input_value(3).get_node_shared_ptr()) ->cast_vector()), - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::f32, ngraph::Shape{1}, - std::dynamic_pointer_cast(fq_layer->input_value(4).get_node_shared_ptr()) + std::dynamic_pointer_cast(fq_layer->input_value(4).get_node_shared_ptr()) ->cast_vector())}); } return last_node; } -std::shared_ptr DecomposeDWSC(std::shared_ptr dwsc, - std::shared_ptr bias_const, - std::shared_ptr fq_bias, - std::shared_ptr flat_input_plane, +std::shared_ptr DecomposeDWSC(std::shared_ptr dwsc, + std::shared_ptr bias_const, + std::shared_ptr fq_bias, + std::shared_ptr flat_input_plane, std::shared_ptr flat_filters_plane) { - std::shared_ptr const_zero_padding; + std::shared_ptr const_zero_padding; std::shared_ptr reshaped_bias; ngraph::OutputVector output_chunks; auto input_channel_count = dwsc->get_input_shape(0)[1]; @@ -349,17 +348,16 @@ std::shared_ptr DecomposeDWSC(std::shared_ptr(dwsc->get_element_type(), - ngraph::Shape{1, input_channel_count}, - 0); + const_zero_padding = + std::make_shared(dwsc->get_element_type(), ngraph::Shape{1, input_channel_count}, 0); } // Reshape bias const if (bias_const) { auto bias_size = shape_size(bias_const->get_shape()); - reshaped_bias = ov::op::util::make_try_fold( + reshaped_bias = ov::op::util::make_try_fold( bias_const, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, bias_size}), + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, bias_size}), false); } @@ -382,17 +380,15 @@ std::shared_ptr DecomposeDWSC(std::shared_ptr(conv_input_slice, conv_filter_slice); + previous_layer_output = std::make_shared(conv_input_slice, conv_filter_slice); if (bias_const) { - previous_layer_output = - std::make_shared(previous_layer_output, reshaped_bias); + previous_layer_output = std::make_shared(previous_layer_output, reshaped_bias); previous_layer_output = InsertFQLayer(fq_bias, previous_layer_output); } last_layer_output = previous_layer_output; } else { - last_layer_output = std::make_shared(conv_input_slice, conv_filter_slice); - last_layer_output = std::make_shared(last_layer_output, previous_layer_output); + last_layer_output = std::make_shared(conv_input_slice, conv_filter_slice); + last_layer_output = std::make_shared(last_layer_output, previous_layer_output); previous_layer_output = last_layer_output; } } @@ -408,7 +404,7 @@ std::shared_ptr DecomposeDWSC(std::shared_ptr 1 if (output_chunks.size() > 1) { - return std::make_shared(output_chunks, 0); + return std::make_shared(output_chunks, 0); } return output_chunks[0].get_node_shared_ptr(); @@ -425,46 +421,46 @@ std::shared_ptr ConvertDWSCToScaleShiftsTestFixture::get_refer const ngraph::Strides& conv_dilation, const ngraph::Shape& bias_shape, const ngraph::op::PadType& pad_type, - const std::shared_ptr& dwsc, - const std::shared_ptr& bias_const, - const std::shared_ptr& fq_bias) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + const std::shared_ptr& dwsc, + const std::shared_ptr& bias_const, + const std::shared_ptr& fq_bias) { + auto input_params = std::make_shared(ngraph::element::i64, input_shape); auto output_channel_count = dwsc->get_output_shape(0)[1]; auto output_width = dwsc->get_output_shape(0)[3]; // Prepare flat input data - auto flat_input_plane = std::make_shared( + auto flat_input_plane = std::make_shared( input_params, - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - ngraph::Shape{1, ngraph::shape_size(input_shape)}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{2}, + ngraph::Shape{1, ngraph::shape_size(input_shape)}), false); // Prepare flat filter data auto filters_const = std::dynamic_pointer_cast(dwsc->get_input_node_shared_ptr(1)); auto filters_size = ngraph::shape_size(filters_const->get_shape()); - auto transposed_filters_const = ov::op::util::make_try_fold( + auto transposed_filters_const = ov::op::util::make_try_fold( filters_const, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{5}, ngraph::Shape{4, 1, 2, 3, 0})); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{5}, ngraph::Shape{4, 1, 2, 3, 0})); - auto flat_filters_plane = ov::op::util::make_try_fold( + auto flat_filters_plane = ov::op::util::make_try_fold( transposed_filters_const, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, filters_size}), + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, filters_size}), false); // Convert DWSC to a set of diagonal layers auto output_plane = DecomposeDWSC(dwsc, bias_const, fq_bias, flat_input_plane, flat_filters_plane); // Restore the original output shape - auto result = std::make_shared( + auto result = std::make_shared( output_plane, - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{4}, - ngraph::Shape{1, output_channel_count, 1, output_width}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{4}, + ngraph::Shape{1, output_channel_count, 1, output_width}), false); - return std::make_shared(ngraph::ResultVector{std::make_shared(result)}, + return std::make_shared(ngraph::ResultVector{std::make_shared(result)}, ngraph::ParameterVector{input_params}); } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_convert_matmul_to_pointwise_convolution.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_convert_matmul_to_pointwise_convolution.cpp index 4c8dc70f452cb5..3ec27e0b5e151e 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_convert_matmul_to_pointwise_convolution.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_convert_matmul_to_pointwise_convolution.cpp @@ -12,6 +12,7 @@ #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset7.hpp" #include "ov_models/builders.hpp" #include "transformations/convert_matmul_to_pointwise_convolution.hpp" @@ -22,12 +23,12 @@ namespace { struct Graph { std::shared_ptr createFunction(); - std::shared_ptr input_params; + std::shared_ptr input_params; std::shared_ptr output; }; std::shared_ptr Graph::createFunction() { - auto result = std::make_shared(output); + auto result = std::make_shared(output); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } @@ -84,8 +85,8 @@ class CreateBaseDecorator : public CreateGraphDecorator { Graph CreateBaseDecorator::build() { Graph graph; - graph.input_params = std::make_shared(ngraph::element::i64, input_data_shape_); - graph.output = ngraph::opset7::Constant::create(ngraph::element::i64, input_const_shape_, {1}); + graph.input_params = std::make_shared(ngraph::element::i64, input_data_shape_); + graph.output = ov::op::v0::Constant::create(ngraph::element::i64, input_const_shape_, {1}); return graph; } @@ -98,17 +99,12 @@ class CreateFakeQuantize : public CreateGraphDecorator { void updateGraph(Graph&) override; }; -std::shared_ptr createFakeQuantizeNode(std::shared_ptr parent_node) { - auto input_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); - auto output_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); - return std::make_shared(parent_node, - input_low, - input_high, - output_low, - output_high, - 11); +std::shared_ptr createFakeQuantizeNode(std::shared_ptr parent_node) { + auto input_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); + return std::make_shared(parent_node, input_low, input_high, output_low, output_high, 11); } void CreateFakeQuantize::updateGraph(Graph& graph) { @@ -124,7 +120,7 @@ class CreateMatMul : public CreateGraphDecorator { }; void CreateMatMul::updateGraph(Graph& graph) { - auto matmul_node = std::make_shared(graph.input_params, graph.output); + auto matmul_node = std::make_shared(graph.input_params, graph.output); graph.output = matmul_node; } @@ -149,7 +145,7 @@ void CreateAdd::updateGraph(Graph& graph) { } auto bias = ngraph::builder::makeConstant(ngraph::element::i64, axes, {}, true); - auto add_node = std::make_shared(graph.output, bias); + auto add_node = std::make_shared(graph.output, bias); graph.output = add_node; } @@ -181,17 +177,17 @@ template (ngraph::element::i64, ngraph::Shape{16, 8}); - auto constant_node = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{8, 8}, {1}); + graph.input_params = std::make_shared(ngraph::element::i64, ngraph::Shape{16, 8}); + auto constant_node = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{8, 8}, {1}); - auto const_reshape_before = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{4}, - ngraph::Shape{1, 1, 16, 8}); - auto reshape_before = std::make_shared(graph.input_params, const_reshape_before, false); + auto const_reshape_before = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{4}, + ngraph::Shape{1, 1, 16, 8}); + auto reshape_before = std::make_shared(graph.input_params, const_reshape_before, false); auto const_transpose_before = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 3, 1, 2}); - auto transpose_before = std::make_shared(reshape_before, const_transpose_before); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 3, 1, 2}); + auto transpose_before = std::make_shared(reshape_before, const_transpose_before); std::shared_ptr parent_node = constant_node; if (std::is_same, @@ -199,18 +195,18 @@ Graph createReferenceGraph() { parent_node = createFakeQuantizeNode(constant_node); } - auto weights_reshape_const = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{4}, - ngraph::Shape{8, 8, 1, 1}); - auto weights_reshaped = std::make_shared(parent_node, weights_reshape_const, false); + auto weights_reshape_const = std::make_shared(ngraph::element::Type_t::i64, + ngraph::Shape{4}, + ngraph::Shape{8, 8, 1, 1}); + auto weights_reshaped = std::make_shared(parent_node, weights_reshape_const, false); - auto conv_node = std::make_shared(transpose_before, - weights_reshaped, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}, - ngraph::op::PadType::VALID); + auto conv_node = std::make_shared(transpose_before, + weights_reshaped, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}, + ngraph::op::PadType::VALID); parent_node = conv_node; if (std::is_same, std::integral_constant>::value) { @@ -221,7 +217,7 @@ Graph createReferenceGraph() { } auto bias = ngraph::builder::makeConstant(ngraph::element::i64, axes, {}, true); - auto add_node = std::make_shared(parent_node, bias); + auto add_node = std::make_shared(parent_node, bias); parent_node = add_node; } @@ -231,13 +227,12 @@ Graph createReferenceGraph() { } auto const_transpose_after = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 2, 3, 1}); - auto transpose_after = std::make_shared(parent_node, const_transpose_after); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 2, 3, 1}); + auto transpose_after = std::make_shared(parent_node, const_transpose_after); - auto const_reshape_after = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{2}, - ngraph::Shape{16, 8}); - graph.output = std::make_shared(transpose_after, const_reshape_after, false); + auto const_reshape_after = + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{2}, ngraph::Shape{16, 8}); + graph.output = std::make_shared(transpose_after, const_reshape_after, false); return graph; } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_convert_padded_to_valid_convolution.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_convert_padded_to_valid_convolution.cpp index ce4ec6b702c95c..e0c1485e5691f8 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_convert_padded_to_valid_convolution.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_convert_padded_to_valid_convolution.cpp @@ -11,6 +11,7 @@ #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset7.hpp" #include "transformations/convert_padded_to_valid_convolution.hpp" namespace testing { @@ -61,7 +62,7 @@ struct ConvData { size_t pads_end_height; }; -void GetConvParams(std::shared_ptr conv, ConvData& conv_data) { +void GetConvParams(std::shared_ptr conv, ConvData& conv_data) { conv_data.input_channel_count = conv->input_value(0).get_shape()[1]; conv_data.input_height = conv->input_value(0).get_shape()[2]; conv_data.input_width = conv->input_value(0).get_shape()[3]; @@ -71,18 +72,18 @@ void GetConvParams(std::shared_ptr conv, ConvData& conv_data.pads_end_width = conv->get_pads_end()[1]; } -std::shared_ptr createFQ(std::shared_ptr& in_node) { - auto input_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {5}); - auto output_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); - return std::make_shared(in_node, input_low, input_high, output_low, output_high, 11); +std::shared_ptr createFQ(std::shared_ptr& in_node) { + auto input_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {5}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); + return std::make_shared(in_node, input_low, input_high, output_low, output_high, 11); } ngraph::Output createBiasFQ(const ngraph::Output& in_node, - std::shared_ptr& bias_const, + std::shared_ptr& bias_const, const bool& fq) { - std::shared_ptr bcast_add = std::make_shared(in_node, bias_const); + std::shared_ptr bcast_add = std::make_shared(in_node, bias_const); if (fq) { bcast_add = createFQ(bcast_add); @@ -91,24 +92,24 @@ ngraph::Output createBiasFQ(const ngraph::Output& in return bcast_add; } -std::shared_ptr createFunction(const bool& fq, - const modelType& model, - const ngraph::Output& input_node, - const ngraph::Shape& filters_shape, - const ngraph::Strides& conv_stride, - const ngraph::CoordinateDiff& pads_begin, - const ngraph::CoordinateDiff& pads_end, - const ngraph::Strides& conv_dilation, - const ngraph::Shape& bias_shape, - const ngraph::Strides& maxpool_stride, - const ngraph::Shape& maxpool_shape, - const ngraph::op::PadType& pad_type, - ConvData* conv_data) { - auto transpose_in_order = std::make_shared(ngraph::element::i64, - ngraph::Shape{4}, - std::vector{0, 3, 1, 2}); - auto transpose_in = std::make_shared(input_node, transpose_in_order); - std::shared_ptr filters = std::make_shared( +std::shared_ptr createFunction(const bool& fq, + const modelType& model, + const ngraph::Output& input_node, + const ngraph::Shape& filters_shape, + const ngraph::Strides& conv_stride, + const ngraph::CoordinateDiff& pads_begin, + const ngraph::CoordinateDiff& pads_end, + const ngraph::Strides& conv_dilation, + const ngraph::Shape& bias_shape, + const ngraph::Strides& maxpool_stride, + const ngraph::Shape& maxpool_shape, + const ngraph::op::PadType& pad_type, + ConvData* conv_data) { + auto transpose_in_order = std::make_shared(ngraph::element::i64, + ngraph::Shape{4}, + std::vector{0, 3, 1, 2}); + auto transpose_in = std::make_shared(input_node, transpose_in_order); + std::shared_ptr filters = std::make_shared( ngraph::element::i64, ngraph::Shape{4, input_node.get_shape()[3], filters_shape[0], filters_shape[1]}); @@ -116,69 +117,69 @@ std::shared_ptr createFunction(const bool& fq, filters = createFQ(filters); } - auto conv = std::make_shared(transpose_in, - filters, - conv_stride, - pads_begin, - pads_end, - conv_dilation, - pad_type); + auto conv = std::make_shared(transpose_in, + filters, + conv_stride, + pads_begin, + pads_end, + conv_dilation, + pad_type); if (conv_data) GetConvParams(conv, *conv_data); - auto transpose_out_order = std::make_shared(ngraph::element::i64, - ngraph::Shape{4}, - std::vector{0, 2, 3, 1}); - auto bias_const = std::make_shared(ngraph::element::i64, bias_shape); + auto transpose_out_order = std::make_shared(ngraph::element::i64, + ngraph::Shape{4}, + std::vector{0, 2, 3, 1}); + auto bias_const = std::make_shared(ngraph::element::i64, bias_shape); - ngraph::Output last_op = std::make_shared(conv, transpose_out_order); + ngraph::Output last_op = std::make_shared(conv, transpose_out_order); switch (model) { case modelType::TranspConvBcastAddTransp: { auto bcast_add = createBiasFQ(conv, bias_const, fq); - last_op = std::make_shared(bcast_add, transpose_out_order); + last_op = std::make_shared(bcast_add, transpose_out_order); } break; case modelType::TranspConvActTransp: { auto bcast_add = createBiasFQ(conv, bias_const, fq); - std::shared_ptr activation = std::make_shared(bcast_add); + std::shared_ptr activation = std::make_shared(bcast_add); if (fq) { activation = createFQ(activation); } - last_op = std::make_shared(activation, transpose_out_order); + last_op = std::make_shared(activation, transpose_out_order); } break; case modelType::TranspConvBcastAddMaxPoolTransp: { auto bcast_add = createBiasFQ(conv, bias_const, fq); - auto maxpool = std::make_shared(bcast_add, - maxpool_stride, - ngraph::Shape{0, 0}, - ngraph::Shape{0, 0}, - maxpool_shape, - ngraph::op::RoundingType::FLOOR, - ngraph::op::PadType::VALID); - auto transpose = std::make_shared(maxpool, transpose_out_order); - last_op = std::make_shared(transpose); + auto maxpool = std::make_shared(bcast_add, + maxpool_stride, + ngraph::Shape{0, 0}, + ngraph::Shape{0, 0}, + maxpool_shape, + ngraph::op::RoundingType::FLOOR, + ngraph::op::PadType::VALID); + auto transpose = std::make_shared(maxpool, transpose_out_order); + last_op = std::make_shared(transpose); } break; case modelType::TranspConvBcastAddActTransp: { auto bcast_add = createBiasFQ(conv, bias_const, fq); - auto activation = std::make_shared(bcast_add); - last_op = std::make_shared(activation, transpose_out_order); + auto activation = std::make_shared(bcast_add); + last_op = std::make_shared(activation, transpose_out_order); } break; case modelType::TranspConvBcastAddMaxPoolActTransp: { auto bcast_add = createBiasFQ(conv, bias_const, fq); - auto maxpool = std::make_shared(bcast_add, - maxpool_stride, - ngraph::Shape{0, 0}, - ngraph::Shape{0, 0}, - maxpool_shape, - ngraph::op::RoundingType::FLOOR, - ngraph::op::PadType::VALID); - auto activation = std::make_shared(maxpool); - last_op = std::make_shared(activation, transpose_out_order); + auto maxpool = std::make_shared(bcast_add, + maxpool_stride, + ngraph::Shape{0, 0}, + ngraph::Shape{0, 0}, + maxpool_shape, + ngraph::op::RoundingType::FLOOR, + ngraph::op::PadType::VALID); + auto activation = std::make_shared(maxpool); + last_op = std::make_shared(activation, transpose_out_order); } break; case modelType::TranspConvTranspBcastAdd: { @@ -187,7 +188,7 @@ std::shared_ptr createFunction(const bool& fq, case modelType::TranspConvTranspBcastAddAct: { auto bcast_add = createBiasFQ(last_op, bias_const, fq); - last_op = std::make_shared(bcast_add); + last_op = std::make_shared(bcast_add); } break; case modelType::TranspConvTransp: @@ -195,7 +196,7 @@ std::shared_ptr createFunction(const bool& fq, break; } - return std::make_shared(last_op); + return std::make_shared(last_op); } std::shared_ptr get_initial_function(const bool& fq, @@ -211,7 +212,7 @@ std::shared_ptr get_initial_function(const bool& fq, const ngraph::Shape& maxpool_shape, const ngraph::op::PadType& pad_type, ConvData& conv_data) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + auto input_params = std::make_shared(ngraph::element::i64, input_shape); auto result = createFunction(fq, model, input_params, @@ -365,23 +366,21 @@ void ConvertPaddedToValidConvTestFixture::SetUp() { conv_data); } -std::shared_ptr FlatCrop(ngraph::Output input, size_t offset, size_t size) { - return std::make_shared( - input, // data - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - {(size_t)0, offset}), // begin sice index - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - {(size_t)0, offset + size}), // end slice index - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)1, (size_t)1}), // strides - std::vector{1, 0}, // begin mask - std::vector{1, 0}); // end mask +std::shared_ptr FlatCrop(ngraph::Output input, size_t offset, size_t size) { + return std::make_shared( + input, // data + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)0, offset}), // begin sice index + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{2}, + {(size_t)0, offset + size}), // end slice index + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)1, (size_t)1}), // strides + std::vector{1, 0}, // begin mask + std::vector{1, 0}); // end mask } void InsertPadding(ngraph::OutputVector& input_rows_to_concat, size_t size, - const std::shared_ptr padding_const, + const std::shared_ptr padding_const, size_t biggest_padding) { if (size == biggest_padding) { input_rows_to_concat.push_back(padding_const); @@ -409,16 +408,16 @@ std::shared_ptr CreatePaddedNet(const ngraph::Output if (!biggest_padding) return nullptr; - auto flat_input = std::make_shared( + auto flat_input = std::make_shared( input_node, - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - ngraph::Shape{1ull, shape_size(input_node.get_shape())}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{2}, + ngraph::Shape{1ull, shape_size(input_node.get_shape())}), false); // Constant with zero padding auto const_holding_padding = - std::make_shared(ngraph::element::i64, ngraph::Shape{1, biggest_padding}, 0); + std::make_shared(ngraph::element::i64, ngraph::Shape{1, biggest_padding}, 0); std::shared_ptr original_row = flat_input; ngraph::OutputVector input_rows_to_concat; @@ -450,7 +449,7 @@ std::shared_ptr CreatePaddedNet(const ngraph::Output if (flat_right_padding) { InsertPadding(single_row_concat_inputs, flat_right_padding, const_holding_padding, biggest_padding); } - auto padded_row_concat = std::make_shared(single_row_concat_inputs, 1); + auto padded_row_concat = std::make_shared(single_row_concat_inputs, 1); input_rows_to_concat.push_back(padded_row_concat); } @@ -463,7 +462,7 @@ std::shared_ptr CreatePaddedNet(const ngraph::Output InsertPadding(input_rows_to_concat, padded_row_size, const_holding_padding, biggest_padding); } - auto padded_input_plane = std::make_shared(input_rows_to_concat, 1); + auto padded_input_plane = std::make_shared(input_rows_to_concat, 1); return padded_input_plane; } @@ -481,7 +480,7 @@ std::shared_ptr ConvertPaddedToValidConvTestFixture::get_refer const ngraph::Shape& maxpool_shape, const ngraph::op::PadType& pad_type, const ConvData& conv_data) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + auto input_params = std::make_shared(ngraph::element::i64, input_shape); // Add padding where neccessary @@ -494,10 +493,10 @@ std::shared_ptr ConvertPaddedToValidConvTestFixture::get_refer // padding // padding auto padded_input_plane = CreatePaddedNet(input_params, conv_data); - std::shared_ptr result; + std::shared_ptr result; if (padded_input_plane) { - auto shape_const = std::make_shared( + auto shape_const = std::make_shared( ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{static_cast(1), @@ -505,7 +504,7 @@ std::shared_ptr ConvertPaddedToValidConvTestFixture::get_refer conv_data.pads_begin_width + conv_data.input_width + conv_data.pads_end_width, conv_data.input_channel_count}); auto padded_input_plane_reshaped = - std::make_shared(padded_input_plane, shape_const, false); + std::make_shared(padded_input_plane, shape_const, false); result = createFunction(fq, model, padded_input_plane_reshaped, diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_decompose_2d_convolution.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_decompose_2d_convolution.cpp index 2eed9b367d034e..0f7c5a99b463ac 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_decompose_2d_convolution.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_decompose_2d_convolution.cpp @@ -13,6 +13,7 @@ #include "backend/gna_limitations.hpp" #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset7.hpp" #include "transformations/decompose_2d_convolution.hpp" using namespace ov::intel_gna::limitations; @@ -53,14 +54,14 @@ typedef std::tuple input_node; - std::shared_ptr fq_filters; - std::shared_ptr conv; - std::shared_ptr bias; - std::shared_ptr fq_conv; - std::shared_ptr fq_bias; - std::shared_ptr max_pool; + std::shared_ptr fq_filters; + std::shared_ptr conv; + std::shared_ptr bias; + std::shared_ptr fq_conv; + std::shared_ptr fq_bias; + std::shared_ptr max_pool; std::shared_ptr af; - std::shared_ptr fq_af; + std::shared_ptr fq_af; std::shared_ptr bias_const; std::shared_ptr last_op_in_sequence_for_replacement; size_t conv_count; @@ -85,7 +86,7 @@ struct ConvParams { size_t output_width; }; -void GetConvParams(std::shared_ptr conv, ConvParams& conv_params) { +void GetConvParams(std::shared_ptr conv, ConvParams& conv_params) { conv_params.output_height = conv->get_output_shape(0)[2]; conv_params.output_width = conv->get_output_shape(0)[3]; conv_params.input_channel_count = conv->input_value(0).get_shape()[1]; @@ -102,20 +103,20 @@ void GetConvParams(std::shared_ptr conv, ConvParams conv_params.output_channel_count = conv_params.filter_count; } -std::shared_ptr createFQ(std::shared_ptr& in_node) { - auto input_low = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {5}); - auto output_low = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {10}); - return std::make_shared(in_node, input_low, input_high, output_low, output_high, 11); +std::shared_ptr createFQ(std::shared_ptr& in_node) { + auto input_low = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {5}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {10}); + return std::make_shared(in_node, input_low, input_high, output_low, output_high, 11); } std::shared_ptr createBiasFQ(const ngraph::Output& in_node, - std::shared_ptr& bias_const, - std::shared_ptr& bias, + std::shared_ptr& bias_const, + std::shared_ptr& bias, const bool& fq) { std::shared_ptr node; - bias = std::make_shared(in_node, bias_const); + bias = std::make_shared(in_node, bias_const); node = bias; if (fq) { @@ -125,22 +126,22 @@ std::shared_ptr createBiasFQ(const ngraph::Output& i return node; } -std::shared_ptr createFunction(const bool& fq, - const modelType& model, - const ngraph::Output& input_node, - const ngraph::Shape& filters_shape, - const ngraph::Strides& conv_stride, - const ngraph::Strides& conv_dilation, - const ngraph::Shape& bias_shape, - const ngraph::Strides& maxpool_stride, - const ngraph::Shape& maxpool_shape, - GraphData* graph_data, - ConvParams* conv_params) { - auto transpose_in_order = std::make_shared(ngraph::element::i64, - ngraph::Shape{4}, - std::vector{0, 3, 1, 2}); - auto transpose_in = std::make_shared(input_node, transpose_in_order); - std::shared_ptr fq_filters = std::make_shared( +std::shared_ptr createFunction(const bool& fq, + const modelType& model, + const ngraph::Output& input_node, + const ngraph::Shape& filters_shape, + const ngraph::Strides& conv_stride, + const ngraph::Strides& conv_dilation, + const ngraph::Shape& bias_shape, + const ngraph::Strides& maxpool_stride, + const ngraph::Shape& maxpool_shape, + GraphData* graph_data, + ConvParams* conv_params) { + auto transpose_in_order = std::make_shared(ngraph::element::i64, + ngraph::Shape{4}, + std::vector{0, 3, 1, 2}); + auto transpose_in = std::make_shared(input_node, transpose_in_order); + std::shared_ptr fq_filters = std::make_shared( ngraph::element::i64, ngraph::Shape{4, input_node.get_shape()[3], filters_shape[0], filters_shape[1]}); @@ -148,77 +149,77 @@ std::shared_ptr createFunction(const bool& fq, fq_filters = createFQ(fq_filters); } - auto conv = std::make_shared(transpose_in, - fq_filters, - conv_stride, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - conv_dilation, - ngraph::op::PadType::VALID); + auto conv = std::make_shared(transpose_in, + fq_filters, + conv_stride, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + conv_dilation, + ngraph::op::PadType::VALID); if (conv_params) GetConvParams(conv, *conv_params); - auto transpose_out_order = std::make_shared(ngraph::element::i64, - ngraph::Shape{4}, - std::vector{0, 2, 3, 1}); - auto bias_const = std::make_shared(ngraph::element::i64, bias_shape); - std::shared_ptr bias = nullptr; + auto transpose_out_order = std::make_shared(ngraph::element::i64, + ngraph::Shape{4}, + std::vector{0, 2, 3, 1}); + auto bias_const = std::make_shared(ngraph::element::i64, bias_shape); + std::shared_ptr bias = nullptr; std::shared_ptr fq_bias = nullptr, fq_af = nullptr; - std::shared_ptr max_pool = nullptr; + std::shared_ptr max_pool = nullptr; std::shared_ptr activation = nullptr; std::shared_ptr fq_conv = nullptr; - std::shared_ptr last_op = std::make_shared(conv, transpose_out_order); + std::shared_ptr last_op = std::make_shared(conv, transpose_out_order); switch (model) { case modelType::TranspConvBcastAddTransp: { fq_bias = createBiasFQ(conv, bias_const, bias, fq); - last_op = std::make_shared(fq_bias, transpose_out_order); + last_op = std::make_shared(fq_bias, transpose_out_order); } break; case modelType::TranspConvActTransp: { fq_bias = createBiasFQ(conv, bias_const, bias, fq); - std::shared_ptr activation = std::make_shared(fq_bias); + std::shared_ptr activation = std::make_shared(fq_bias); if (fq) { activation = createFQ(activation); } - last_op = std::make_shared(activation, transpose_out_order); + last_op = std::make_shared(activation, transpose_out_order); } break; case modelType::TranspConvBcastAddMaxPoolTransp: { fq_bias = createBiasFQ(conv, bias_const, bias, fq); - max_pool = std::make_shared(fq_bias, - maxpool_stride, - ngraph::Shape{0, 0}, - ngraph::Shape{0, 0}, - maxpool_shape, - ngraph::op::RoundingType::FLOOR, - ngraph::op::PadType::VALID); - auto transpose = std::make_shared(max_pool, transpose_out_order); - last_op = std::make_shared(transpose); + max_pool = std::make_shared(fq_bias, + maxpool_stride, + ngraph::Shape{0, 0}, + ngraph::Shape{0, 0}, + maxpool_shape, + ngraph::op::RoundingType::FLOOR, + ngraph::op::PadType::VALID); + auto transpose = std::make_shared(max_pool, transpose_out_order); + last_op = std::make_shared(transpose); } break; case modelType::TranspConvBcastAddActTransp: { fq_bias = createBiasFQ(conv, bias_const, bias, fq); - activation = std::make_shared(fq_bias); - last_op = std::make_shared(activation, transpose_out_order); + activation = std::make_shared(fq_bias); + last_op = std::make_shared(activation, transpose_out_order); } break; case modelType::TranspConvBcastAddMaxPoolActTransp: { fq_bias = createBiasFQ(conv, bias_const, bias, fq); - max_pool = std::make_shared(fq_bias, - maxpool_stride, - ngraph::Shape{0, 0}, - ngraph::Shape{0, 0}, - maxpool_shape, - ngraph::op::RoundingType::FLOOR, - ngraph::op::PadType::VALID); - activation = std::make_shared(max_pool); + max_pool = std::make_shared(fq_bias, + maxpool_stride, + ngraph::Shape{0, 0}, + ngraph::Shape{0, 0}, + maxpool_shape, + ngraph::op::RoundingType::FLOOR, + ngraph::op::PadType::VALID); + activation = std::make_shared(max_pool); if (fq) { fq_af = createFQ(activation); } - last_op = std::make_shared(fq_af ? fq_af : activation, transpose_out_order); + last_op = std::make_shared(fq_af ? fq_af : activation, transpose_out_order); } break; case modelType::TranspConvTranspBcastAdd: { @@ -227,14 +228,14 @@ std::shared_ptr createFunction(const bool& fq, case modelType::TranspConvTranspBcastAddAct: { fq_bias = createBiasFQ(last_op, bias_const, bias, fq); - last_op = std::make_shared(fq_bias); + last_op = std::make_shared(fq_bias); } break; case modelType::TranspConvTransp: { if (fq) { auto conv_ptr = conv->shared_from_this(); fq_conv = createFQ(conv_ptr); - last_op = std::make_shared(fq_conv, transpose_out_order); + last_op = std::make_shared(fq_conv, transpose_out_order); } } default: @@ -242,13 +243,13 @@ std::shared_ptr createFunction(const bool& fq, } if (graph_data) { - graph_data->fq_filters = fq ? std::dynamic_pointer_cast(fq_filters) : nullptr; + graph_data->fq_filters = fq ? std::dynamic_pointer_cast(fq_filters) : nullptr; graph_data->conv = conv; graph_data->bias = bias; - graph_data->fq_conv = fq ? std::dynamic_pointer_cast(fq_conv) : nullptr; - graph_data->fq_bias = fq ? std::dynamic_pointer_cast(fq_bias) : nullptr; + graph_data->fq_conv = fq ? std::dynamic_pointer_cast(fq_conv) : nullptr; + graph_data->fq_bias = fq ? std::dynamic_pointer_cast(fq_bias) : nullptr; graph_data->af = std::dynamic_pointer_cast(activation); - graph_data->fq_af = fq ? std::dynamic_pointer_cast(fq_af) : nullptr; + graph_data->fq_af = fq ? std::dynamic_pointer_cast(fq_af) : nullptr; graph_data->max_pool = max_pool; graph_data->last_op_in_sequence_for_replacement = last_op; graph_data->bias_const = nullptr; @@ -260,7 +261,7 @@ std::shared_ptr createFunction(const bool& fq, } } - return std::make_shared(last_op); + return std::make_shared(last_op); } std::shared_ptr get_initial_function(const bool& fq, @@ -274,7 +275,7 @@ std::shared_ptr get_initial_function(const bool& fq, const ngraph::Shape& maxpool_shape, GraphData& graph_data, ConvParams& conv_params) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + auto input_params = std::make_shared(ngraph::element::i64, input_shape); auto result = createFunction(fq, model, input_params, @@ -395,36 +396,33 @@ void Decompose2DConvTestFixture::TearDown() { Limitations::deinit(); } -std::shared_ptr ReshapeBiasConst(std::shared_ptr conv_bias, +std::shared_ptr ReshapeBiasConst(std::shared_ptr conv_bias, const ConvParams& conv_params) { - auto add_const = - std::dynamic_pointer_cast(conv_bias->input_value(1).get_node_shared_ptr()); + auto add_const = std::dynamic_pointer_cast(conv_bias->input_value(1).get_node_shared_ptr()); IE_ASSERT(add_const); auto bias_size = shape_size(add_const->get_shape()); - return ov::op::util::make_try_fold( + return ov::op::util::make_try_fold( add_const, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{1, bias_size, 1, 1}), + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{1, bias_size, 1, 1}), false); } -std::shared_ptr FlatCrop(ngraph::Output input, size_t offset, size_t size) { +std::shared_ptr FlatCrop(ngraph::Output input, size_t offset, size_t size) { auto shape = input.get_shape(); - return std::make_shared( - input, // data - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - {(size_t)0, offset}), // begin slice index - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - {(size_t)0, offset + size}), // end slice index - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)1, (size_t)1}), // strides - std::vector{1, 0}, // begin mask - std::vector{1, 0}); // end mask + return std::make_shared( + input, // data + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)0, offset}), // begin slice index + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{2}, + {(size_t)0, offset + size}), // end slice index + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {(size_t)1, (size_t)1}), // strides + std::vector{1, 0}, // begin mask + std::vector{1, 0}); // end mask } -static std::vector> Split2DConvFilters(std::shared_ptr& filters, +static std::vector> Split2DConvFilters(std::shared_ptr& filters, const bool& vertical_permute, const bool& horizontal_permute, const size_t& split_channels) { @@ -438,17 +436,17 @@ static std::vector> Split2DConvFilters(std::shared IE_ASSERT(filter_shape.size() == 4); if (split_channels > 1) { - const auto axis_node = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{}, {1}); - const auto split = std::make_shared(filters, axis_node, split_channels); + const auto axis_node = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {1}); + const auto split = std::make_shared(filters, axis_node, split_channels); flat_filters = split->outputs(); } if (horizontal_permute) { for (size_t split_index = 0; split_index < split_channels; split_index++) { ngraph::Output& flat_filter = flat_filters[split_index]; - result.push_back(std::make_shared( + result.push_back(std::make_shared( flat_filter, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 1, 3, 2}))); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 1, 3, 2}))); } } @@ -467,9 +465,9 @@ static std::vector> Split2DConvFilters(std::shared } for (auto& new_filter : result) - new_filter = ov::op::util::make_try_fold( + new_filter = ov::op::util::make_try_fold( new_filter, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, reshape_shape), + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, reshape_shape), false); return result; @@ -478,33 +476,32 @@ static std::vector> Split2DConvFilters(std::shared ngraph::OutputVector SplitInput(const GraphData& graph_data, ConvParams& conv_params) { // We need to have proper input shape first ngraph::OutputVector split_planes; - auto padded_input_plane = std::make_shared( + auto padded_input_plane = std::make_shared( graph_data.input_node, - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - ngraph::Shape{1, shape_size(graph_data.input_node->get_shape())}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{2}, + ngraph::Shape{1, shape_size(graph_data.input_node->get_shape())}), false); if (graph_data.conv_count > 1) { // If we split input plane and filters due to GNA limitations - we must sum their results at the end conv_params.input_channel_count /= graph_data.conv_count; - auto reshape_before_transpose = std::make_shared( + auto reshape_before_transpose = std::make_shared( padded_input_plane, - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::i64, ngraph::Shape{2}, {shape_size(padded_input_plane->get_shape()) / graph_data.conv_count, graph_data.conv_count}), false); - auto transpose_before_channel_wise_split = std::make_shared( + auto transpose_before_channel_wise_split = std::make_shared( reshape_before_transpose, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0})->output(0)); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0})->output(0)); - const auto axis_node = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}); - const auto split = std::make_shared(transpose_before_channel_wise_split, - axis_node, - graph_data.conv_count); + const auto axis_node = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}); + const auto split = + std::make_shared(transpose_before_channel_wise_split, axis_node, graph_data.conv_count); split_planes = split->outputs(); } else { split_planes.push_back(padded_input_plane); @@ -518,7 +515,7 @@ std::vector> SplitFilters(const GraphData& graph_d // data as well; we also need to take filter height and potential dilation into account when modifying the filters // Take account of fake quantize when getting filter values - auto filter_values = std::dynamic_pointer_cast( + auto filter_values = std::dynamic_pointer_cast( graph_data.fq_filters == nullptr ? graph_data.conv->input_value(1).get_node_shared_ptr() : graph_data.fq_filters->input_value(0).get_node_shared_ptr()); bool vertical_permute = (conv_params.filter_height > 1); @@ -571,14 +568,13 @@ void TransformInput(const GraphData& graph_data, } // Interleaving dilated input planes - std::shared_ptr dilated_chunks_concat = - std::make_shared(dilated_input_planes, 0); + std::shared_ptr dilated_chunks_concat = std::make_shared(dilated_input_planes, 0); // Additional reshape is required for strided slices of input intended for each filter row if (conv_params.filter_stride_height > 1) { - dilated_chunks_concat = std::make_shared( + dilated_chunks_concat = std::make_shared( dilated_chunks_concat, - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::i64, ngraph::Shape{2}, {conv_params.filter_height, @@ -586,47 +582,47 @@ void TransformInput(const GraphData& graph_data, false); } - auto transposed_dilated_chunks = std::make_shared( + auto transposed_dilated_chunks = std::make_shared( dilated_chunks_concat, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0})->output(0)); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0})->output(0)); // Flattening of interleaved input planes - auto flattened_dilated_transposed_input = std::make_shared( + auto flattened_dilated_transposed_input = std::make_shared( transposed_dilated_chunks, - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - {(size_t)1, - conv_params.input_width * conv_params.input_channel_count * - conv_params.output_height * conv_params.filter_height}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{2}, + {(size_t)1, + conv_params.input_width * conv_params.input_channel_count * + conv_params.output_height * conv_params.filter_height}), false); split_input_plane = flattened_dilated_transposed_input; } -static void InsertFQLayer(const std::shared_ptr fqLayer, +static void InsertFQLayer(const std::shared_ptr fqLayer, std::shared_ptr lastNode) { if (fqLayer != nullptr) { lastNode = fqLayer->clone_with_new_inputs( {lastNode, - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::f32, ngraph::Shape{1}, - std::dynamic_pointer_cast(fqLayer->input_value(1).get_node_shared_ptr()) + std::dynamic_pointer_cast(fqLayer->input_value(1).get_node_shared_ptr()) ->cast_vector()), - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::f32, ngraph::Shape{1}, - std::dynamic_pointer_cast(fqLayer->input_value(2).get_node_shared_ptr()) + std::dynamic_pointer_cast(fqLayer->input_value(2).get_node_shared_ptr()) ->cast_vector()), - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::f32, ngraph::Shape{1}, - std::dynamic_pointer_cast(fqLayer->input_value(3).get_node_shared_ptr()) + std::dynamic_pointer_cast(fqLayer->input_value(3).get_node_shared_ptr()) ->cast_vector()), - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::f32, ngraph::Shape{1}, - std::dynamic_pointer_cast(fqLayer->input_value(4).get_node_shared_ptr()) + std::dynamic_pointer_cast(fqLayer->input_value(4).get_node_shared_ptr()) ->cast_vector())}); } } @@ -638,21 +634,21 @@ std::shared_ptr Create1DConv(const GraphData& graph_data, const size_t conv_index, const size_t h_index) { // Transpose NHWC => NCHW - std::shared_ptr nchw_input = std::make_shared( + std::shared_ptr nchw_input = std::make_shared( input, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})->output(0)); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})->output(0)); // Fake quantize InsertFQLayer(graph_data.fq_filters, filters); // 1D Convolution & fake quantize - auto conv = std::make_shared(nchw_input, - filters, - ngraph::Strides{1, conv_params.filter_stride_width}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}, - ngraph::op::PadType::VALID); + auto conv = std::make_shared(nchw_input, + filters, + ngraph::Strides{1, conv_params.filter_stride_width}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}, + ngraph::op::PadType::VALID); std::string conv_name = graph_data.conv->get_friendly_name() + "_H_" + std::to_string(h_index) + "_CH_" + std::to_string(0); conv->set_friendly_name(conv_name); @@ -661,19 +657,19 @@ std::shared_ptr Create1DConv(const GraphData& graph_data, // Bias & fake quantize if (graph_data.bias_const && conv_index == 0) { - last_conv_block_op = std::make_shared(conv, graph_data.bias_const); + last_conv_block_op = std::make_shared(conv, graph_data.bias_const); InsertFQLayer(graph_data.fq_bias, last_conv_block_op); } // Max pooling if (graph_data.pool_size_width > 1 || graph_data.pool_stride_width > 1) { - last_conv_block_op = std::make_shared(last_conv_block_op, - ngraph::Strides{1, graph_data.pool_stride_width}, - ngraph::Shape{0, 0}, - ngraph::Shape{0, 0}, - ngraph::Shape{1, graph_data.pool_size_width}, - graph_data.max_pool->get_rounding_type(), - ngraph::op::PadType::VALID); + last_conv_block_op = std::make_shared(last_conv_block_op, + ngraph::Strides{1, graph_data.pool_stride_width}, + ngraph::Shape{0, 0}, + ngraph::Shape{0, 0}, + ngraph::Shape{1, graph_data.pool_size_width}, + graph_data.max_pool->get_rounding_type(), + ngraph::op::PadType::VALID); } // Activation function if (graph_data.af && graph_data.conv_count == 1) { @@ -682,9 +678,9 @@ std::shared_ptr Create1DConv(const GraphData& graph_data, } // Transpose NCHW => NHWC - auto nhwc_output = std::make_shared( + auto nhwc_output = std::make_shared( last_conv_block_op, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})->output(0)); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})->output(0)); return nhwc_output; } @@ -740,16 +736,16 @@ std::shared_ptr CreateDeomposedConv(const GraphData& graph_data, dilated_chunks.push_back(slice); } - dilated_chunks_concat = std::make_shared(dilated_chunks, 0); + dilated_chunks_concat = std::make_shared(dilated_chunks, 0); } - auto transposed_dilated_chunks = std::make_shared( + auto transposed_dilated_chunks = std::make_shared( dilated_chunks_concat, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0})->output(0)); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0})->output(0)); - auto flattened_dilated_conv_input = std::make_shared( + auto flattened_dilated_conv_input = std::make_shared( transposed_dilated_chunks, - ngraph::opset7::Constant::create( + ov::op::v0::Constant::create( ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{1, 1, output_width, h_1_filter_channel_count * conv_params.filter_width}), @@ -758,12 +754,11 @@ std::shared_ptr CreateDeomposedConv(const GraphData& graph_data, nhwc_conv_y_input = flattened_dilated_conv_input; } else { // If no horizontal split is done, only reshape is required before decomposed convolution - nhwc_conv_y_input = std::make_shared( + nhwc_conv_y_input = std::make_shared( nhwc_conv_y_input, - ngraph::opset7::Constant::create( - ngraph::element::i64, - ngraph::Shape{4}, - ngraph::Shape{1, 1, conv_params.input_width, h_1_filter_channel_count}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{4}, + ngraph::Shape{1, 1, conv_params.input_width, h_1_filter_channel_count}), false); } @@ -784,7 +779,7 @@ std::shared_ptr CreateDeomposedConv(const GraphData& graph_data, if (result_chunks.size() > 1) { // Concat in horizontal dimension // In NHWC index of H is 1 - auto concatenated_sub_results = std::make_shared(result_chunks, 1); + auto concatenated_sub_results = std::make_shared(result_chunks, 1); last_op = concatenated_sub_results; } return last_op; @@ -823,7 +818,7 @@ static bool ShouldDecompose(GraphData& graph_data, const ConvParams& conv_params return true; } -std::shared_ptr Decompose(const GraphData& graph_data, ConvParams& conv_params) { +std::shared_ptr Decompose(const GraphData& graph_data, ConvParams& conv_params) { std::vector> partial_conv_results; // Split input and filters due to GNA filter element count limit @@ -845,7 +840,7 @@ std::shared_ptr Decompose(const GraphData& graph_data, C std::shared_ptr conv_result = partial_conv_results.front(); for (size_t i = 1; i < partial_conv_results.size(); i++) { - auto add_result = std::make_shared(partial_conv_results[i], conv_result); + auto add_result = std::make_shared(partial_conv_results[i], conv_result); conv_result = add_result; } @@ -859,7 +854,7 @@ std::shared_ptr Decompose(const GraphData& graph_data, C ngraph::replace_node(graph_data.last_op_in_sequence_for_replacement, conv_result); conv_result->set_friendly_name(conv_result_name); - return std::make_shared(conv_result); + return std::make_shared(conv_result); } std::shared_ptr Decompose2DConvTestFixture::get_reference(const bool& fq, @@ -867,18 +862,18 @@ std::shared_ptr Decompose2DConvTestFixture::get_reference(cons const ngraph::PartialShape& input_shape, GraphData& graph_data, ConvParams& conv_params) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + auto input_params = std::make_shared(ngraph::element::i64, input_shape); graph_data.input_node = input_params; ShouldDecompose(graph_data, conv_params); if (model != modelType::TranspConvTransp) { graph_data.bias_const = - ReshapeBiasConst(std::dynamic_pointer_cast(graph_data.bias), conv_params); + ReshapeBiasConst(std::dynamic_pointer_cast(graph_data.bias), conv_params); } // Create decomposed reference function - std::shared_ptr result; + std::shared_ptr result; result = Decompose(graph_data, conv_params); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_decompose_mvn.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_decompose_mvn.cpp index a5e90ea86a6672..a568ab5d5fde84 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_decompose_mvn.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_decompose_mvn.cpp @@ -10,6 +10,8 @@ #include "backend/gna_limitations.hpp" #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset2.hpp" +#include "openvino/opsets/opset8.hpp" #include "transformations/decompose_mvn.hpp" #include "transformations/op_conversions/convert_mvn1_to_mvn6.hpp" @@ -40,73 +42,73 @@ struct MVNParams { static std::shared_ptr NormalizeVariance( const MVNParams& mvn_data, - const std::shared_ptr& subtract_mean, - const std::shared_ptr& avg_broadcast_const) { + const std::shared_ptr& subtract_mean, + const std::shared_ptr& avg_broadcast_const) { // Prepare consts auto combined_C_H = mvn_data.C * mvn_data.H; std::vector avg_weights(8 * mvn_data.W / mvn_data.num_parts, 1.0f / mvn_data.W); - auto avg_weights_const = ngraph::opset8::Constant::create(ngraph::element::f32, - ngraph::Shape{8, mvn_data.W / mvn_data.num_parts, 1, 1}, - avg_weights); + auto avg_weights_const = ov::op::v0::Constant::create(ngraph::element::f32, + ngraph::Shape{8, mvn_data.W / mvn_data.num_parts, 1, 1}, + avg_weights); std::vector eps_tensor(combined_C_H * mvn_data.W, mvn_data.eps); auto eps_tensor_const = - ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1, combined_C_H * mvn_data.W}, eps_tensor); + ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1, combined_C_H * mvn_data.W}, eps_tensor); std::vector minus_half(combined_C_H * mvn_data.W, -0.5f); auto minus_half_const = - ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1, combined_C_H * mvn_data.W}, minus_half); + ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1, combined_C_H * mvn_data.W}, minus_half); // Calculate square of the difference between input and its mean - auto squared_diff = std::make_shared(subtract_mean, subtract_mean); + auto squared_diff = std::make_shared(subtract_mean, subtract_mean); squared_diff->set_friendly_name("MvnSqrDiff"); // Calculate sum of the squares - auto squared_diff_reshape = std::make_shared( + auto squared_diff_reshape = std::make_shared( squared_diff, - ngraph::opset8::Constant::create( + ov::op::v0::Constant::create( ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{mvn_data.N, combined_C_H * mvn_data.num_parts, 1ull, mvn_data.W / mvn_data.num_parts}), false); - auto transposed_input_3 = std::make_shared( + auto transposed_input_3 = std::make_shared( squared_diff_reshape, - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); - auto transposed_avg_conv_3 = std::make_shared(transposed_input_3, - avg_weights_const, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}, - ngraph::op::PadType::VALID); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); + auto transposed_avg_conv_3 = std::make_shared(transposed_input_3, + avg_weights_const, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}, + ngraph::op::PadType::VALID); transposed_avg_conv_3->set_friendly_name("MvnAvg3"); - auto avg_conv_3 = std::make_shared( + auto avg_conv_3 = std::make_shared( transposed_avg_conv_3, - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})); - auto reshape_avg_conv_3 = std::make_shared( + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})); + auto reshape_avg_conv_3 = std::make_shared( avg_conv_3, - ngraph::opset8::Constant::create(ngraph::element::i64, - ngraph::Shape{4}, - ngraph::Shape{mvn_data.N, 1ull, combined_C_H, 8 * mvn_data.num_parts}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{4}, + ngraph::Shape{mvn_data.N, 1ull, combined_C_H, 8 * mvn_data.num_parts}), false); - auto transposed_input_4 = std::make_shared( + auto transposed_input_4 = std::make_shared( reshape_avg_conv_3, - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); - auto transposed_avg_conv_4 = std::make_shared(transposed_input_4, - avg_broadcast_const, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}, - ngraph::op::PadType::VALID); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); + auto transposed_avg_conv_4 = std::make_shared(transposed_input_4, + avg_broadcast_const, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}, + ngraph::op::PadType::VALID); transposed_avg_conv_4->set_friendly_name("MvnAvg4"); - auto avg_conv_4 = std::make_shared( + auto avg_conv_4 = std::make_shared( transposed_avg_conv_4, - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})); - auto reshape_avg_conv_4 = std::make_shared( + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})); + auto reshape_avg_conv_4 = std::make_shared( avg_conv_4, - ngraph::opset8::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - ngraph::Shape{1ull, combined_C_H * mvn_data.W}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{2}, + ngraph::Shape{1ull, combined_C_H * mvn_data.W}), false); std::shared_ptr inv_stdev; @@ -115,103 +117,102 @@ static std::shared_ptr NormalizeVariance( // even though the built-in MVN1 to MVN6 transformation enforces outside setting // Add epsilon inside the square root - auto add_epsilon = std::make_shared(eps_tensor_const, reshape_avg_conv_4); + auto add_epsilon = std::make_shared(eps_tensor_const, reshape_avg_conv_4); // Calculate square root and inversion - auto log_var_eps = std::make_shared(add_epsilon); + auto log_var_eps = std::make_shared(add_epsilon); log_var_eps->set_friendly_name("MvnLogVarEps"); - auto log_inv_stdev = std::make_shared(log_var_eps, minus_half_const); + auto log_inv_stdev = std::make_shared(log_var_eps, minus_half_const); log_inv_stdev->set_friendly_name("MvnLogInvStdev"); - inv_stdev = std::make_shared(log_inv_stdev); + inv_stdev = std::make_shared(log_inv_stdev); inv_stdev->set_friendly_name("MvnInvStdev"); - auto normalized_output = std::make_shared(subtract_mean, inv_stdev); + auto normalized_output = std::make_shared(subtract_mean, inv_stdev); normalized_output->set_friendly_name("MvnOutput"); return normalized_output; } -static std::shared_ptr Decompose(const std::shared_ptr input_node, - const MVNParams& mvn_data) { +static std::shared_ptr Decompose(const std::shared_ptr input_node, + const MVNParams& mvn_data) { // Prepare data auto combined_C_H = mvn_data.C * mvn_data.H; std::vector neg_avg_weights(8 * mvn_data.W / mvn_data.num_parts, -1.0f / mvn_data.W); - auto neg_avg_weights_const = - ngraph::opset8::Constant::create(ngraph::element::f32, - ngraph::Shape{8, mvn_data.W / mvn_data.num_parts, 1, 1}, - neg_avg_weights); + auto neg_avg_weights_const = ov::op::v0::Constant::create(ngraph::element::f32, + ngraph::Shape{8, mvn_data.W / mvn_data.num_parts, 1, 1}, + neg_avg_weights); std::vector avg_broadcast(8 * mvn_data.W * mvn_data.num_parts, 0.0f); for (size_t i = 0; i < mvn_data.W * mvn_data.num_parts; i++) { avg_broadcast[i * 8] = 1.0f; } - auto avg_broadcast_const = ngraph::opset8::Constant::create(ngraph::element::f32, - ngraph::Shape{mvn_data.W, 8 * mvn_data.num_parts, 1, 1}, - avg_broadcast); + auto avg_broadcast_const = ov::op::v0::Constant::create(ngraph::element::f32, + ngraph::Shape{mvn_data.W, 8 * mvn_data.num_parts, 1, 1}, + avg_broadcast); // Create average calculation part of the graph // We assume C = 1 case (combined channels) - auto reshape = std::make_shared( + auto reshape = std::make_shared( input_node, - ngraph::opset8::Constant::create(ngraph::element::i64, - ngraph::Shape{4}, - ngraph::Shape{mvn_data.N, 1ull, combined_C_H, mvn_data.W}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{4}, + ngraph::Shape{mvn_data.N, 1ull, combined_C_H, mvn_data.W}), false); - auto input_4d = std::make_shared( + auto input_4d = std::make_shared( reshape, - ngraph::opset8::Constant::create( + ov::op::v0::Constant::create( ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{mvn_data.N, combined_C_H * mvn_data.num_parts, 1ull, mvn_data.W / mvn_data.num_parts}), false); - auto input_2d = std::make_shared( + auto input_2d = std::make_shared( reshape, - ngraph::opset8::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - ngraph::Shape{1ull, combined_C_H * mvn_data.W}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{2}, + ngraph::Shape{1ull, combined_C_H * mvn_data.W}), false); - auto transposed_input_1 = std::make_shared( + auto transposed_input_1 = std::make_shared( input_4d, - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); - auto transposed_avg_conv_1 = std::make_shared(transposed_input_1, - neg_avg_weights_const, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}, - ngraph::op::PadType::VALID); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); + auto transposed_avg_conv_1 = std::make_shared(transposed_input_1, + neg_avg_weights_const, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}, + ngraph::op::PadType::VALID); transposed_avg_conv_1->set_friendly_name("MvnAvg1"); - auto avg_conv_1 = std::make_shared( + auto avg_conv_1 = std::make_shared( transposed_avg_conv_1, - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})); - auto reshape_avg_conv_1 = std::make_shared( + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})); + auto reshape_avg_conv_1 = std::make_shared( avg_conv_1, - ngraph::opset8::Constant::create(ngraph::element::i64, - ngraph::Shape{4}, - ngraph::Shape{mvn_data.N, 1ull, combined_C_H, 8 * mvn_data.num_parts}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{4}, + ngraph::Shape{mvn_data.N, 1ull, combined_C_H, 8 * mvn_data.num_parts}), false); - auto transposed_input_2 = std::make_shared( + auto transposed_input_2 = std::make_shared( reshape_avg_conv_1, - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); - auto transposed_avg_conv_2 = std::make_shared(transposed_input_2, - avg_broadcast_const, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}, - ngraph::op::PadType::VALID); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); + auto transposed_avg_conv_2 = std::make_shared(transposed_input_2, + avg_broadcast_const, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}, + ngraph::op::PadType::VALID); transposed_avg_conv_2->set_friendly_name("MvnAvg2"); - auto avg_conv_2 = std::make_shared( + auto avg_conv_2 = std::make_shared( transposed_avg_conv_2, - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})); - auto avg_conv_2_2d = std::make_shared( + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 2, 3, 1})); + auto avg_conv_2_2d = std::make_shared( avg_conv_2, - ngraph::opset8::Constant::create(ngraph::element::i64, - ngraph::Shape{2}, - ngraph::Shape{1ull, combined_C_H * mvn_data.W}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{2}, + ngraph::Shape{1ull, combined_C_H * mvn_data.W}), false); - auto subtract_mean = std::make_shared(input_2d, avg_conv_2_2d); + auto subtract_mean = std::make_shared(input_2d, avg_conv_2_2d); subtract_mean->set_friendly_name("MvnSubMean"); std::shared_ptr mvn_output, pre_output = subtract_mean; @@ -223,22 +224,20 @@ static std::shared_ptr Decompose(const std::shared_ptrget_output_shape(0).size() == 3) { - mvn_output = std::make_shared( + mvn_output = std::make_shared( pre_output, - ngraph::opset8::Constant::create(ngraph::element::i64, - ngraph::Shape{3}, - {mvn_data.C, mvn_data.H, mvn_data.W}), + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {mvn_data.C, mvn_data.H, mvn_data.W}), false); } else { - mvn_output = std::make_shared( + mvn_output = std::make_shared( pre_output, - ngraph::opset8::Constant::create(ngraph::element::i64, - ngraph::Shape{4}, - {mvn_data.N, mvn_data.C, mvn_data.H, mvn_data.W}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{4}, + {mvn_data.N, mvn_data.C, mvn_data.H, mvn_data.W}), false); } - return std::make_shared(mvn_output); + return std::make_shared(mvn_output); } std::shared_ptr getReferenceFunction(const ngraph::Shape& input_shape, @@ -271,8 +270,8 @@ std::shared_ptr getReferenceFunction(const ngraph::Shape& inpu } // Create decomposed reference function - auto input_params = std::make_shared(ngraph::element::f32, input_shape); - std::shared_ptr result = Decompose(input_params, mvn_data); + auto input_params = std::make_shared(ngraph::element::f32, input_shape); + std::shared_ptr result = Decompose(input_params, mvn_data); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } @@ -284,18 +283,18 @@ std::shared_ptr getInitialFunction(const ngraph::Shape& input_ const InferenceEngine::SizeVector& axes, const bool& across_channels, const bool& mvn_version_6) { - auto input_params = std::make_shared(ngraph::element::f32, input_shape); + auto input_params = std::make_shared(ngraph::element::f32, input_shape); std::shared_ptr mvn; if (mvn_version_6) { const auto axesConst = - std::make_shared(ngraph::element::i64, ngraph::Shape{axes.size()}, axes); - mvn = std::make_shared(input_params, axesConst, normalize_variance, eps, eps_mode); + std::make_shared(ngraph::element::i64, ngraph::Shape{axes.size()}, axes); + mvn = std::make_shared(input_params, axesConst, normalize_variance, eps, eps_mode); } else { - mvn = std::make_shared(input_params, across_channels, normalize_variance, eps); + mvn = std::make_shared(input_params, across_channels, normalize_variance, eps); } - auto result = std::make_shared(mvn); + auto result = std::make_shared(mvn); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_handle_transposes_around_matmul.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_handle_transposes_around_matmul.cpp index c4ae53b7255da6..7fbdfd7cf40966 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_handle_transposes_around_matmul.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_handle_transposes_around_matmul.cpp @@ -11,6 +11,7 @@ #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset7.hpp" #include "transformations/handle_transposes_around_matmul.hpp" namespace handle_transpose_before_matmul { @@ -19,30 +20,30 @@ std::shared_ptr CreateTransposeMatmulFunction(const ngraph::Sh const ngraph::Shape& reshape_shape, const ngraph::Shape& matmul_shape, bool create_reshape_after_transpose) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + auto input_params = std::make_shared(ngraph::element::i64, input_shape); auto new_shape_const = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{reshape_shape.size()}, reshape_shape); - auto reshape = std::make_shared(input_params, new_shape_const, false); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{reshape_shape.size()}, reshape_shape); + auto reshape = std::make_shared(input_params, new_shape_const, false); - auto transpose_order = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0}); - auto transpose = std::make_shared(reshape, transpose_order); + auto transpose_order = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0}); + auto transpose = std::make_shared(reshape, transpose_order); std::vector data(ngraph::shape_size(matmul_shape)); std::iota(std::begin(data), std::end(data), 1); - auto constant = ngraph::opset7::Constant::create(ngraph::element::i64, matmul_shape, data); - std::shared_ptr matmul; + auto constant = ov::op::v0::Constant::create(ngraph::element::i64, matmul_shape, data); + std::shared_ptr matmul; if (create_reshape_after_transpose) { auto reshape_after_transpose_const = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{reshape_shape.size()}, reshape_shape); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{reshape_shape.size()}, reshape_shape); auto reshape_after_transpose = - std::make_shared(transpose, reshape_after_transpose_const, false); - matmul = std::make_shared(reshape_after_transpose, constant); + std::make_shared(transpose, reshape_after_transpose_const, false); + matmul = std::make_shared(reshape_after_transpose, constant); } else { - matmul = std::make_shared(transpose, constant); + matmul = std::make_shared(transpose, constant); } - auto result = std::make_shared(matmul); + auto result = std::make_shared(matmul); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } @@ -50,27 +51,27 @@ std::shared_ptr CreateMatmulFunction(const ngraph::Shape& inpu const ngraph::Shape& reshape_shape, const ngraph::Shape& matmul_shape, bool create_reshape_instead_of_transpose) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + auto input_params = std::make_shared(ngraph::element::i64, input_shape); - std::shared_ptr reshape; + std::shared_ptr reshape; auto const_shape = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{reshape_shape.size()}, reshape_shape); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{reshape_shape.size()}, reshape_shape); if (create_reshape_instead_of_transpose) { - auto new_reshape = std::make_shared(input_params, const_shape, false); - auto new_shape_after_transpose = ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{reshape_shape.size()}, - {reshape_shape[1], reshape_shape[0]}); - reshape = std::make_shared(new_reshape, new_shape_after_transpose, false); + auto new_reshape = std::make_shared(input_params, const_shape, false); + auto new_shape_after_transpose = ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{reshape_shape.size()}, + {reshape_shape[1], reshape_shape[0]}); + reshape = std::make_shared(new_reshape, new_shape_after_transpose, false); } else { - reshape = std::make_shared(input_params, const_shape, false); + reshape = std::make_shared(input_params, const_shape, false); } std::vector data(ngraph::shape_size(matmul_shape)); std::iota(std::begin(data), std::end(data), 1); - auto constant = ngraph::opset7::Constant::create(ngraph::element::i64, matmul_shape, data); - auto matmul = std::make_shared(reshape, constant); + auto constant = ov::op::v0::Constant::create(ngraph::element::i64, matmul_shape, data); + auto matmul = std::make_shared(reshape, constant); - auto result = std::make_shared(matmul); + auto result = std::make_shared(matmul); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } @@ -79,43 +80,41 @@ std::shared_ptr CreateConcatTransposeMatmulFunction(const ngra const ngraph::Shape& reshape1_shape, const ngraph::Shape& reshape2_shape, bool create_reshape_after_transpose) { - auto transpose_order = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0}); + auto transpose_order = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0}); - auto input1_params = std::make_shared(ngraph::element::i64, input1_shape); + auto input1_params = std::make_shared(ngraph::element::i64, input1_shape); std::vector data1(ngraph::shape_size(input1_shape)); std::iota(std::begin(data1), std::end(data1), 1); - auto concat1_const = ngraph::opset7::Constant::create(ngraph::element::i64, input1_shape, data1); + auto concat1_const = ov::op::v0::Constant::create(ngraph::element::i64, input1_shape, data1); ngraph::OutputVector concat1_chunks{input1_params, concat1_const}; - auto concat1 = std::make_shared(concat1_chunks, 0); - auto transpose1 = std::make_shared(concat1, transpose_order); + auto concat1 = std::make_shared(concat1_chunks, 0); + auto transpose1 = std::make_shared(concat1, transpose_order); - auto input2_params = std::make_shared(ngraph::element::i64, input2_shape); + auto input2_params = std::make_shared(ngraph::element::i64, input2_shape); std::vector data2(ngraph::shape_size(input2_shape)); std::iota(std::begin(data2), std::end(data2), 1); - auto concat2_const = ngraph::opset7::Constant::create(ngraph::element::i64, input2_shape, data2); + auto concat2_const = ov::op::v0::Constant::create(ngraph::element::i64, input2_shape, data2); ngraph::OutputVector concat2_chunks{input2_params, concat2_const}; - auto concat2 = std::make_shared(concat2_chunks, 0); - auto transpose2 = std::make_shared(concat2, transpose_order); + auto concat2 = std::make_shared(concat2_chunks, 0); + auto transpose2 = std::make_shared(concat2, transpose_order); - std::shared_ptr matmul; + std::shared_ptr matmul; if (create_reshape_after_transpose) { - auto reshape_after_transpose1_const = ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{reshape1_shape.size()}, - reshape1_shape); + auto reshape_after_transpose1_const = + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{reshape1_shape.size()}, reshape1_shape); auto reshape_after_transpose1 = - std::make_shared(transpose1, reshape_after_transpose1_const, false); - auto reshape_after_transpose2_const = ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{reshape2_shape.size()}, - reshape2_shape); + std::make_shared(transpose1, reshape_after_transpose1_const, false); + auto reshape_after_transpose2_const = + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{reshape2_shape.size()}, reshape2_shape); auto reshape_after_transpose2 = - std::make_shared(transpose2, reshape_after_transpose2_const, false); - matmul = std::make_shared(reshape_after_transpose1, reshape_after_transpose2); + std::make_shared(transpose2, reshape_after_transpose2_const, false); + matmul = std::make_shared(reshape_after_transpose1, reshape_after_transpose2); } else { - matmul = std::make_shared(transpose1, transpose2); + matmul = std::make_shared(transpose1, transpose2); } - auto result = std::make_shared(matmul); + auto result = std::make_shared(matmul); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input1_params, input2_params}); } @@ -125,37 +124,37 @@ std::shared_ptr CreateConcatMatmulFunction(const ngraph::Shape const ngraph::Shape& reshape1_shape, const ngraph::Shape& reshape2_shape, bool create_reshape_instead_of_transpose) { - auto input1_params = std::make_shared(ngraph::element::i64, input1_shape); + auto input1_params = std::make_shared(ngraph::element::i64, input1_shape); std::vector data1(ngraph::shape_size(input1_shape)); std::iota(std::begin(data1), std::end(data1), 1); - auto concat1_const = ngraph::opset7::Constant::create(ngraph::element::i64, input1_shape, data1); + auto concat1_const = ov::op::v0::Constant::create(ngraph::element::i64, input1_shape, data1); ngraph::OutputVector concat1_chunks{input1_params, concat1_const}; - auto concat1 = std::make_shared(concat1_chunks, 0); + auto concat1 = std::make_shared(concat1_chunks, 0); - auto input2_params = std::make_shared(ngraph::element::i64, input2_shape); + auto input2_params = std::make_shared(ngraph::element::i64, input2_shape); std::vector data2(ngraph::shape_size(input2_shape)); std::iota(std::begin(data2), std::end(data2), 1); - auto concat2_const = ngraph::opset7::Constant::create(ngraph::element::i64, input2_shape, data2); + auto concat2_const = ov::op::v0::Constant::create(ngraph::element::i64, input2_shape, data2); ngraph::OutputVector concat2_chunks{input2_params, concat2_const}; - auto concat2 = std::make_shared(concat2_chunks, 0); + auto concat2 = std::make_shared(concat2_chunks, 0); - std::shared_ptr matmul; + std::shared_ptr matmul; if (create_reshape_instead_of_transpose) { - auto new_shape_after_transpose1 = ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{reshape1_shape.size()}, - {reshape1_shape[1], reshape1_shape[0]}); - auto reshape1 = std::make_shared(concat1, new_shape_after_transpose1, false); - auto new_shape_after_transpose2 = ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{reshape2_shape.size()}, - {reshape2_shape[1], reshape2_shape[0]}); - auto reshape2 = std::make_shared(concat2, new_shape_after_transpose2, false); - matmul = std::make_shared(reshape1, reshape2); + auto new_shape_after_transpose1 = ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{reshape1_shape.size()}, + {reshape1_shape[1], reshape1_shape[0]}); + auto reshape1 = std::make_shared(concat1, new_shape_after_transpose1, false); + auto new_shape_after_transpose2 = ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{reshape2_shape.size()}, + {reshape2_shape[1], reshape2_shape[0]}); + auto reshape2 = std::make_shared(concat2, new_shape_after_transpose2, false); + matmul = std::make_shared(reshape1, reshape2); } else { - matmul = std::make_shared(concat1, concat2); + matmul = std::make_shared(concat1, concat2); } - auto result = std::make_shared(matmul); + auto result = std::make_shared(matmul); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input1_params, input2_params}); } @@ -173,39 +172,39 @@ std::shared_ptr CreateMatmulTransposeFunction(const ngraph::Sh bool matmul_on_left_side, bool enable_fq1, bool enable_fq2) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + auto input_params = std::make_shared(ngraph::element::i64, input_shape); std::vector data(ngraph::shape_size(matmul_shape)); std::iota(std::begin(data), std::end(data), 1); - auto matmul_constant = ngraph::opset7::Constant::create(ngraph::element::i64, matmul_shape, data); - std::shared_ptr node = std::make_shared(input_params, matmul_constant); + auto matmul_constant = ov::op::v0::Constant::create(ngraph::element::i64, matmul_shape, data); + std::shared_ptr node = std::make_shared(input_params, matmul_constant); const auto matmul_output_shape = node->get_output_shape(0); if (enable_fq1) { - node = std::make_shared( - node, - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {-0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {-0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {0.1}), - 255); + node = + std::make_shared(node, + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {0.1}), + 255); } if (enable_add) { - auto add_const = ngraph::opset7::Constant::create(ngraph::element::i64, matmul_output_shape, {1}); + auto add_const = ov::op::v0::Constant::create(ngraph::element::i64, matmul_output_shape, {1}); if (matmul_on_left_side) { - node = std::make_shared(add_const, node); + node = std::make_shared(add_const, node); } else { - node = std::make_shared(node, add_const); + node = std::make_shared(node, add_const); } if (enable_fq2) { - node = std::make_shared( + node = std::make_shared( node, - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {-0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {-0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {0.1}), 255); } } @@ -213,23 +212,22 @@ std::shared_ptr CreateMatmulTransposeFunction(const ngraph::Sh if (create_reshape_before_transpose) { auto matmul_output_shape = node->get_output_shape(0); std::swap(matmul_output_shape[0], matmul_output_shape[1]); - auto reshape_before_transpose_const = - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{matmul_output_shape.size()}, - matmul_output_shape); - node = std::make_shared(node, reshape_before_transpose_const, false); + auto reshape_before_transpose_const = ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{matmul_output_shape.size()}, + matmul_output_shape); + node = std::make_shared(node, reshape_before_transpose_const, false); } - auto transpose_order = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0}); - node = std::make_shared(node, transpose_order); + auto transpose_order = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, 0}); + node = std::make_shared(node, transpose_order); if (enable_last_reshape) { auto shape_const = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{reshape_shape.size()}, reshape_shape); - node = std::make_shared(node, shape_const, false); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{reshape_shape.size()}, reshape_shape); + node = std::make_shared(node, shape_const, false); } - auto result = std::make_shared(node); + auto result = std::make_shared(node); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } @@ -242,65 +240,65 @@ std::shared_ptr CreateMatmulFunction(const ngraph::Shape& inpu bool matmul_on_left_side, bool enable_fq1, bool enable_fq2) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + auto input_params = std::make_shared(ngraph::element::i64, input_shape); std::vector data(ngraph::shape_size(matmul_shape)); std::iota(std::begin(data), std::end(data), 1); - auto matmul_constant = ngraph::opset7::Constant::create(ngraph::element::i64, matmul_shape, data); - std::shared_ptr node = std::make_shared(input_params, matmul_constant); + auto matmul_constant = ov::op::v0::Constant::create(ngraph::element::i64, matmul_shape, data); + std::shared_ptr node = std::make_shared(input_params, matmul_constant); const auto matmul_output_shape = node->get_output_shape(0); if (enable_fq1) { - node = std::make_shared( - node, - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {-0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {-0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {0.1}), - 255); + node = + std::make_shared(node, + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {0.1}), + 255); } if (enable_add) { - auto add_const = ngraph::opset7::Constant::create(ngraph::element::i64, matmul_output_shape, {1}); + auto add_const = ov::op::v0::Constant::create(ngraph::element::i64, matmul_output_shape, {1}); if (matmul_on_left_side) { - node = std::make_shared(add_const, node); + node = std::make_shared(add_const, node); } else { - node = std::make_shared(node, add_const); + node = std::make_shared(node, add_const); } if (enable_fq2) { - node = std::make_shared( + node = std::make_shared( node, - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {-0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {-0.1}), - ngraph::opset7::Constant::create(ngraph::element::f32, {1}, {0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {0.1}), 255); } } std::shared_ptr reshape; auto shape_const = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{reshape_shape.size()}, reshape_shape); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{reshape_shape.size()}, reshape_shape); if (create_reshape_instead_of_transpose) { auto reshape_instead_of_transpose_const = - ngraph::opset7::Constant::create(ngraph::element::i64, - ngraph::Shape{matmul_output_shape.size()}, - {matmul_output_shape[1], matmul_output_shape[0]}); + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{matmul_output_shape.size()}, + {matmul_output_shape[1], matmul_output_shape[0]}); auto reshape_instead_of_transpose = - std::make_shared(node, reshape_instead_of_transpose_const, false); + std::make_shared(node, reshape_instead_of_transpose_const, false); reshape = reshape_instead_of_transpose; if (enable_last_reshape) { - reshape = std::make_shared(reshape_instead_of_transpose, shape_const, false); + reshape = std::make_shared(reshape_instead_of_transpose, shape_const, false); } } else { reshape = node; if (enable_last_reshape) { - reshape = std::make_shared(node, shape_const, false); + reshape = std::make_shared(node, shape_const, false); } } - auto result = std::make_shared(reshape); + auto result = std::make_shared(reshape); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_insert_copy_layer.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_insert_copy_layer.cpp index 87d53ff68f9c1f..8ef907c90e64bc 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_insert_copy_layer.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_insert_copy_layer.cpp @@ -89,30 +89,30 @@ class InsertCopyLayerConcatTest : public InsertCopyLayerTest { InsertCopyLayerTest::SetUp(); { - auto params = std::make_shared(ngraph::element::i64, input_shape); - auto add = std::make_shared(params, params); + auto params = std::make_shared(ngraph::element::i64, input_shape); + auto add = std::make_shared(params, params); ngraph::OutputVector concat_inputs; for (int i = 0; i < m_inputs_num; ++i) { concat_inputs.push_back(add); } - auto concat = std::make_shared(concat_inputs, m_axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, m_axis); + auto result = std::make_shared(concat); m_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, input_shape); - auto add = std::make_shared(params, params); + auto params = std::make_shared(ngraph::element::i64, input_shape); + auto add = std::make_shared(params, params); auto copy = std::make_shared(add); ngraph::OutputVector concat_inputs = {}; for (int i = 0; i < m_inputs_num - 1; ++i) { concat_inputs.push_back(copy); } concat_inputs.push_back(add); - auto concat = std::make_shared(concat_inputs, m_axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, m_axis); + auto result = std::make_shared(concat); m_ref_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); @@ -145,7 +145,7 @@ class InsertCopyLayerSplitConcatTest : public InsertCopyLayerTest { InsertCopyLayerTest::SetUp(); { - auto params = std::make_shared(ngraph::element::i64, input_shape); + auto params = std::make_shared(ngraph::element::i64, input_shape); OPENVINO_SUPPRESS_DEPRECATED_START auto split = ngraph::builder::makeSplit(params, ngraph::element::i64, m_inputs_num, m_axis); OPENVINO_SUPPRESS_DEPRECATED_END @@ -154,15 +154,15 @@ class InsertCopyLayerSplitConcatTest : public InsertCopyLayerTest { for (int i = 0; i < m_inputs_num; ++i) { concat_inputs.push_back(split->output(i)); } - auto concat = std::make_shared(concat_inputs, m_axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, m_axis); + auto result = std::make_shared(concat); m_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, input_shape); + auto params = std::make_shared(ngraph::element::i64, input_shape); OPENVINO_SUPPRESS_DEPRECATED_START auto split = ngraph::builder::makeSplit(params, ngraph::element::i64, m_inputs_num, m_axis); OPENVINO_SUPPRESS_DEPRECATED_END @@ -178,9 +178,9 @@ class InsertCopyLayerSplitConcatTest : public InsertCopyLayerTest { else concat_inputs.push_back(split->output(i)); } - auto concat = std::make_shared(concat_inputs, m_axis); + auto concat = std::make_shared(concat_inputs, m_axis); - auto result = std::make_shared(concat); + auto result = std::make_shared(concat); m_ref_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); @@ -256,21 +256,21 @@ TEST_P(InsertCopyLayerMultiParamConcatTest, CompareWithRefs) { ngraph::Shape in_shape{10}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); ngraph::OutputVector concat_inputs{params, params}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); m_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto copy = std::make_shared(params); ngraph::OutputVector concat_inputs{copy, copy}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); ref_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } @@ -305,27 +305,27 @@ TEST_P(InsertCopyLayerMultiParamNFLConcatTest, CompareWithRefs) { ngraph::Shape in_shape = {1, 2, 4}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape1 = ov::op::util::reshapeTo(params, shape); auto reshape2 = ov::op::util::reshapeTo(params, shape); ngraph::OutputVector concat_inputs{reshape1, reshape2}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); m_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape1 = ov::op::util::reshapeTo(params, shape); auto reshape2 = ov::op::util::reshapeTo(params, shape); auto copy1 = std::make_shared(reshape1); auto copy2 = std::make_shared(reshape2); ngraph::OutputVector concat_inputs{copy1, copy2}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); ref_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } @@ -361,34 +361,34 @@ TEST_P(InsertCopyLayerMultiParamMultiNFLConcatTest, CompareWithRefs) { ngraph::Shape in_shape = {1, 2, 4}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape1 = ov::op::util::reshapeTo(params, shape); auto reshape2 = ov::op::util::reshapeTo(params, shape); ngraph::OutputVector concat_inputs{reshape1, reshape2}; - auto concat1 = std::make_shared(concat_inputs, axis); - auto concat2 = std::make_shared(concat_inputs, axis); - auto result1 = std::make_shared(concat1); - auto result2 = std::make_shared(concat2); - auto result3 = std::make_shared(reshape1); + auto concat1 = std::make_shared(concat_inputs, axis); + auto concat2 = std::make_shared(concat_inputs, axis); + auto result1 = std::make_shared(concat1); + auto result2 = std::make_shared(concat2); + auto result3 = std::make_shared(reshape1); m_func = std::make_shared(ngraph::ResultVector{result1, result2, result3}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape1 = ov::op::util::reshapeTo(params, shape); auto reshape2 = ov::op::util::reshapeTo(params, shape); auto copy1 = std::make_shared(reshape1); auto copy2 = std::make_shared(reshape2); ngraph::OutputVector concat_inputs{copy1, copy2}; - auto concat1 = std::make_shared(concat_inputs, axis); - auto concat2 = std::make_shared(concat_inputs, axis); - auto result1 = std::make_shared(concat1); - auto result2 = std::make_shared(concat2); - auto result3 = std::make_shared(reshape1); + auto concat1 = std::make_shared(concat_inputs, axis); + auto concat2 = std::make_shared(concat_inputs, axis); + auto result1 = std::make_shared(concat1); + auto result2 = std::make_shared(concat2); + auto result3 = std::make_shared(reshape1); ref_func = std::make_shared(ngraph::ResultVector{result1, result2, result3}, ngraph::ParameterVector{params}, "Concat"); @@ -421,36 +421,36 @@ TEST_P(InsertCopyLayerMultiConstConcatTest, CompareWithRefs) { ngraph::Shape in_shape{10}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); - auto constant = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); + auto constant = std::make_shared(ngraph::element::i64, in_shape); ngraph::OutputVector concat_inputs{params, constant, constant}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); m_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); - auto constant = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); + auto constant = std::make_shared(ngraph::element::i64, in_shape); auto copy = std::make_shared(constant); ngraph::OutputVector concat_inputs{params, copy, constant}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); ref_func1 = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); - auto constant = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); + auto constant = std::make_shared(ngraph::element::i64, in_shape); auto copy = std::make_shared(constant); ngraph::OutputVector concat_inputs{params, constant, copy}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); ref_func2 = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } @@ -484,35 +484,35 @@ TEST_P(InsertCopyLayerMultiLayerConcatTest, CompareWithRefs) { ngraph::Shape in_shape{10}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); - auto add = std::make_shared(params, params); + auto params = std::make_shared(ngraph::element::i64, in_shape); + auto add = std::make_shared(params, params); ngraph::OutputVector concat_inputs{add, add}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); m_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); - auto add = std::make_shared(params, params); + auto params = std::make_shared(ngraph::element::i64, in_shape); + auto add = std::make_shared(params, params); auto copy = std::make_shared(add); ngraph::OutputVector concat_inputs{copy, add}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); ref_func1 = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); - auto add = std::make_shared(params, params); + auto params = std::make_shared(ngraph::element::i64, in_shape); + auto add = std::make_shared(params, params); auto copy = std::make_shared(add); ngraph::OutputVector concat_inputs{add, copy}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); ref_func2 = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } @@ -547,41 +547,41 @@ TEST_P(InsertCopyLayerMultiLayerNFLConcatTest, CompareWithRefs) { ngraph::Shape in_shape = {1, 2, 4}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); - auto add = std::make_shared(params, params); + auto params = std::make_shared(ngraph::element::i64, in_shape); + auto add = std::make_shared(params, params); auto reshape1 = ov::op::util::reshapeTo(add, shape); auto reshape2 = ov::op::util::reshapeTo(add, shape); ngraph::OutputVector concat_inputs{reshape1, reshape2}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); m_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); - auto add = std::make_shared(params, params); + auto params = std::make_shared(ngraph::element::i64, in_shape); + auto add = std::make_shared(params, params); auto reshape1 = ov::op::util::reshapeTo(add, shape); auto reshape_copy = std::make_shared(reshape1); auto reshape2 = ov::op::util::reshapeTo(add, shape); ngraph::OutputVector concat_inputs{reshape_copy, reshape2}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); ref_func1 = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); - auto add = std::make_shared(params, params); + auto params = std::make_shared(ngraph::element::i64, in_shape); + auto add = std::make_shared(params, params); auto reshape1 = ov::op::util::reshapeTo(add, shape); auto reshape2 = ov::op::util::reshapeTo(add, shape); auto reshape_copy = std::make_shared(reshape2); ngraph::OutputVector concat_inputs{reshape1, reshape_copy}; - auto concat = std::make_shared(concat_inputs, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, axis); + auto result = std::make_shared(concat); ref_func2 = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } @@ -617,12 +617,12 @@ TEST_P(InsertCopyLayerMultiParamMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{in_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, in_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto add = std::make_shared(input, read_value); - auto result = std::make_shared(add); - auto assign = std::make_shared(input, variable); + auto read_value = std::make_shared(init_value, variable); + auto add = std::make_shared(input, read_value); + auto result = std::make_shared(add); + auto assign = std::make_shared(input, variable); assign->add_control_dependency(read_value); ngraph::ParameterVector params = {input}; @@ -634,14 +634,14 @@ TEST_P(InsertCopyLayerMultiParamMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{in_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, in_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); + auto read_value = std::make_shared(init_value, variable); auto copy1 = std::make_shared(input); - auto add = std::make_shared(copy1, read_value); - auto result = std::make_shared(add); + auto add = std::make_shared(copy1, read_value); + auto result = std::make_shared(add); auto copy2 = std::make_shared(input); - auto assign = std::make_shared(copy2, variable); + auto assign = std::make_shared(copy2, variable); assign->add_control_dependency(read_value); ngraph::ParameterVector params = {input}; @@ -680,13 +680,13 @@ TEST_P(InsertCopyLayerMultiParamConcatMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{in_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, in_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(input, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(input, variable); assign->add_control_dependency(read_value); - auto concat = std::make_shared(ngraph::OutputVector{input, read_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{input, read_value}, axis); + auto result = std::make_shared(concat); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -697,15 +697,15 @@ TEST_P(InsertCopyLayerMultiParamConcatMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{in_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto copy1 = std::make_shared(input); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, in_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(copy1, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(copy1, variable); assign->add_control_dependency(read_value); auto copy2 = std::make_shared(input); - auto concat = std::make_shared(ngraph::OutputVector{copy2, read_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{copy2, read_value}, axis); + auto result = std::make_shared(concat); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -748,17 +748,17 @@ TEST_P(InsertCopyLayerMultiParamNFLConcatMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{allowed_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto reshape1 = ov::op::util::reshapeTo(input, shape1); auto reshape2 = ov::op::util::reshapeTo(input, shape2); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, shape2, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(reshape1, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(reshape1, variable); assign->add_control_dependency(read_value); - auto concat = std::make_shared(ngraph::OutputVector{reshape2, read_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{reshape2, read_value}, axis); + auto result = std::make_shared(concat); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -769,19 +769,19 @@ TEST_P(InsertCopyLayerMultiParamNFLConcatMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{allowed_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto reshape1 = ov::op::util::reshapeTo(input, shape1); auto reshape2 = ov::op::util::reshapeTo(input, shape2); auto copy1 = std::make_shared(reshape1); auto copy2 = std::make_shared(reshape2); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, shape2, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(copy1, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(copy1, variable); assign->add_control_dependency(read_value); - auto concat = std::make_shared(ngraph::OutputVector{copy2, read_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{copy2, read_value}, axis); + auto result = std::make_shared(concat); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -825,16 +825,16 @@ TEST_P(InsertCopyLayerMultiLayerConcatMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{out_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto reshape = ov::op::util::reshapeTo(input, shape); auto crop = std::make_shared(reshape, axes, dim, offset); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, out_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto mul = std::make_shared(crop, read_value); - auto assign = std::make_shared(crop, variable); + auto read_value = std::make_shared(init_value, variable); + auto mul = std::make_shared(crop, read_value); + auto assign = std::make_shared(crop, variable); assign->add_control_dependency(read_value); - auto result = std::make_shared(mul); + auto result = std::make_shared(mul); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -845,17 +845,17 @@ TEST_P(InsertCopyLayerMultiLayerConcatMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{out_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto reshape = ov::op::util::reshapeTo(input, shape); auto crop = std::make_shared(reshape, axes, dim, offset); auto copy = std::make_shared(crop); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, out_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto mul = std::make_shared(crop, read_value); - auto assign = std::make_shared(copy, variable); + auto read_value = std::make_shared(init_value, variable); + auto mul = std::make_shared(crop, read_value); + auto assign = std::make_shared(copy, variable); assign->add_control_dependency(read_value); - auto result = std::make_shared(mul); + auto result = std::make_shared(mul); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -901,17 +901,17 @@ TEST_P(InsertCopyLayerCropMemoryTest, CompareWithRefs) { { auto variable = std::make_shared(ov::op::util::VariableInfo{shape2, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto reshape1 = ov::op::util::reshapeTo(input, shape1); auto crop = std::make_shared(reshape1, axes, dim, offset); auto reshape2 = ov::op::util::reshapeTo(crop, shape2); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, shape2, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto add = std::make_shared(reshape2, read_value); - auto assign = std::make_shared(reshape2, variable); + auto read_value = std::make_shared(init_value, variable); + auto add = std::make_shared(reshape2, read_value); + auto assign = std::make_shared(reshape2, variable); assign->add_control_dependency(read_value); - auto result = std::make_shared(add); + auto result = std::make_shared(add); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -922,18 +922,18 @@ TEST_P(InsertCopyLayerCropMemoryTest, CompareWithRefs) { { auto variable = std::make_shared(ov::op::util::VariableInfo{shape2, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto reshape1 = ov::op::util::reshapeTo(input, shape1); auto crop = std::make_shared(reshape1, axes, dim, offset); auto reshape2 = ov::op::util::reshapeTo(crop, shape2); auto copy = std::make_shared(reshape2); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, shape2, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto add = std::make_shared(reshape2, read_value); - auto assign = std::make_shared(copy, variable); + auto read_value = std::make_shared(init_value, variable); + auto add = std::make_shared(reshape2, read_value); + auto assign = std::make_shared(copy, variable); assign->add_control_dependency(read_value); - auto result = std::make_shared(add); + auto result = std::make_shared(add); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -975,14 +975,14 @@ TEST_P(InsertCopyLayerCropNFLMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{in_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, in_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto concat = std::make_shared(ngraph::OutputVector{input, read_value}, axis); - auto axis_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}); - auto split = std::make_shared(concat, axis_const, 2); - auto result = std::make_shared(split); - auto assign = std::make_shared(split, variable); + auto read_value = std::make_shared(init_value, variable); + auto concat = std::make_shared(ngraph::OutputVector{input, read_value}, axis); + auto axis_const = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}); + auto split = std::make_shared(concat, axis_const, 2); + auto result = std::make_shared(split); + auto assign = std::make_shared(split, variable); assign->add_control_dependency(read_value); ngraph::ParameterVector params = {input}; @@ -994,15 +994,15 @@ TEST_P(InsertCopyLayerCropNFLMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{in_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, in_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto concat = std::make_shared(ngraph::OutputVector{input, read_value}, axis); - auto axis_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}); - auto split = std::make_shared(concat, axis_const, 2); - auto result = std::make_shared(split); + auto read_value = std::make_shared(init_value, variable); + auto concat = std::make_shared(ngraph::OutputVector{input, read_value}, axis); + auto axis_const = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {0}); + auto split = std::make_shared(concat, axis_const, 2); + auto result = std::make_shared(split); auto copy = std::make_shared(split); - auto assign = std::make_shared(copy, variable); + auto assign = std::make_shared(copy, variable); assign->add_control_dependency(read_value); ngraph::ParameterVector params = {input}; @@ -1045,16 +1045,16 @@ TEST_P(InsertCopyLayerConcatMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{out_shape, ngraph::element::i64, variable_name}); - auto input1 = std::make_shared(ngraph::element::i64, in_shape); - auto input2 = std::make_shared(ngraph::element::i64, in_shape); - auto concat = std::make_shared(ngraph::OutputVector{input1, input2}, axis); + auto input1 = std::make_shared(ngraph::element::i64, in_shape); + auto input2 = std::make_shared(ngraph::element::i64, in_shape); + auto concat = std::make_shared(ngraph::OutputVector{input1, input2}, axis); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, out_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(concat, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(concat, variable); assign->add_control_dependency(read_value); - auto add = std::make_shared(concat, read_value); - auto result = std::make_shared(add); + auto add = std::make_shared(concat, read_value); + auto result = std::make_shared(add); ngraph::ParameterVector params = {input1, input2}; ngraph::ResultVector results = {result}; @@ -1065,17 +1065,17 @@ TEST_P(InsertCopyLayerConcatMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{out_shape, ngraph::element::i64, variable_name}); - auto input1 = std::make_shared(ngraph::element::i64, in_shape); - auto input2 = std::make_shared(ngraph::element::i64, in_shape); - auto concat = std::make_shared(ngraph::OutputVector{input1, input2}, axis); + auto input1 = std::make_shared(ngraph::element::i64, in_shape); + auto input2 = std::make_shared(ngraph::element::i64, in_shape); + auto concat = std::make_shared(ngraph::OutputVector{input1, input2}, axis); auto copy = std::make_shared(concat); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, out_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(copy, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(copy, variable); assign->add_control_dependency(read_value); - auto add = std::make_shared(concat, read_value); - auto result = std::make_shared(add); + auto add = std::make_shared(concat, read_value); + auto result = std::make_shared(add); ngraph::ParameterVector params = {input1, input2}; ngraph::ResultVector results = {result}; @@ -1120,17 +1120,17 @@ TEST_P(InsertCopyLayerConcatNFLMemoryTest, CompareWithRefs) { { auto variable = std::make_shared(ov::op::util::VariableInfo{shape, ngraph::element::i64, variable_name}); - auto input1 = std::make_shared(ngraph::element::i64, in_shape); - auto input2 = std::make_shared(ngraph::element::i64, in_shape); - auto concat = std::make_shared(ngraph::OutputVector{input1, input2}, axis); + auto input1 = std::make_shared(ngraph::element::i64, in_shape); + auto input2 = std::make_shared(ngraph::element::i64, in_shape); + auto concat = std::make_shared(ngraph::OutputVector{input1, input2}, axis); auto reshape = ov::op::util::reshapeTo(concat, shape); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(reshape, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(reshape, variable); assign->add_control_dependency(read_value); - auto add = std::make_shared(reshape, read_value); - auto result = std::make_shared(add); + auto add = std::make_shared(reshape, read_value); + auto result = std::make_shared(add); ngraph::ParameterVector params = {input1, input2}; ngraph::ResultVector results = {result}; @@ -1141,18 +1141,18 @@ TEST_P(InsertCopyLayerConcatNFLMemoryTest, CompareWithRefs) { { auto variable = std::make_shared(ov::op::util::VariableInfo{shape, ngraph::element::i64, variable_name}); - auto input1 = std::make_shared(ngraph::element::i64, in_shape); - auto input2 = std::make_shared(ngraph::element::i64, in_shape); - auto concat = std::make_shared(ngraph::OutputVector{input1, input2}, axis); + auto input1 = std::make_shared(ngraph::element::i64, in_shape); + auto input2 = std::make_shared(ngraph::element::i64, in_shape); + auto concat = std::make_shared(ngraph::OutputVector{input1, input2}, axis); auto reshape = ov::op::util::reshapeTo(concat, shape); auto copy = std::make_shared(reshape); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(copy, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(copy, variable); assign->add_control_dependency(read_value); - auto add = std::make_shared(reshape, read_value); - auto result = std::make_shared(add); + auto add = std::make_shared(reshape, read_value); + auto result = std::make_shared(add); ngraph::ParameterVector params = {input1, input2}; ngraph::ResultVector results = {result}; @@ -1192,16 +1192,16 @@ TEST_P(InsertCopyLayerSplitMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{allowed_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); OPENVINO_SUPPRESS_DEPRECATED_START auto split = ngraph::builder::makeSplit(input, ngraph::element::i64, 1, axis); OPENVINO_SUPPRESS_DEPRECATED_END auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, out_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(split, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(split, variable); assign->add_control_dependency(read_value); - auto concat = std::make_shared(ngraph::OutputVector{split, read_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{split, read_value}, axis); + auto result = std::make_shared(concat); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -1212,17 +1212,17 @@ TEST_P(InsertCopyLayerSplitMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{allowed_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); OPENVINO_SUPPRESS_DEPRECATED_START auto split = ngraph::builder::makeSplit(input, ngraph::element::i64, 1, axis); OPENVINO_SUPPRESS_DEPRECATED_END auto copy = std::make_shared(split); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, out_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(copy, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(copy, variable); assign->add_control_dependency(read_value); - auto concat = std::make_shared(ngraph::OutputVector{split, read_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{split, read_value}, axis); + auto result = std::make_shared(concat); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -1265,17 +1265,17 @@ TEST_P(InsertCopyLayerSplitNFLMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{allowed_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); OPENVINO_SUPPRESS_DEPRECATED_START auto split = ngraph::builder::makeSplit(input, ngraph::element::i64, 2, axis); OPENVINO_SUPPRESS_DEPRECATED_END auto reshape = ov::op::util::reshapeTo(split, shape); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, out_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(reshape, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(reshape, variable); assign->add_control_dependency(read_value); - auto concat = std::make_shared(ngraph::OutputVector{split, read_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{split, read_value}, axis); + auto result = std::make_shared(concat); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -1286,18 +1286,18 @@ TEST_P(InsertCopyLayerSplitNFLMemoryTest, CompareWithRefs) { { auto variable = std::make_shared( ov::op::util::VariableInfo{allowed_shape, ngraph::element::i64, variable_name}); - auto input = std::make_shared(ngraph::element::i64, in_shape); + auto input = std::make_shared(ngraph::element::i64, in_shape); OPENVINO_SUPPRESS_DEPRECATED_START auto split = ngraph::builder::makeSplit(input, ngraph::element::i64, 2, axis); OPENVINO_SUPPRESS_DEPRECATED_END auto reshape = ov::op::util::reshapeTo(split, shape); auto copy = std::make_shared(reshape); auto init_value = ngraph::builder::makeConstant(ngraph::element::i64, out_shape, std::vector{0}); - auto read_value = std::make_shared(init_value, variable); - auto assign = std::make_shared(copy, variable); + auto read_value = std::make_shared(init_value, variable); + auto assign = std::make_shared(copy, variable); assign->add_control_dependency(read_value); - auto concat = std::make_shared(ngraph::OutputVector{split, read_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{split, read_value}, axis); + auto result = std::make_shared(concat); ngraph::ParameterVector params = {input}; ngraph::ResultVector results = {result}; @@ -1341,24 +1341,24 @@ TEST_P(InsertCopyLayerCropConcatTest, CompareWithRefs) { ngraph::Shape out_shape = {1, 1, 2, 2}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape = ov::op::util::reshapeTo(params, shape); auto crop = std::make_shared(reshape, axes, dim, offset); auto const_value = ngraph::builder::makeConstant(ngraph::element::i64, out_shape, std::vector{1}); - auto concat = std::make_shared(ngraph::OutputVector{crop, const_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{crop, const_value}, axis); + auto result = std::make_shared(concat); m_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape = ov::op::util::reshapeTo(params, shape); auto crop = std::make_shared(reshape, axes, dim, offset); auto copy = std::make_shared(crop); auto const_value = ngraph::builder::makeConstant(ngraph::element::i64, out_shape, std::vector{1}); - auto concat = std::make_shared(ngraph::OutputVector{copy, const_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{copy, const_value}, axis); + auto result = std::make_shared(concat); ref_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } @@ -1393,19 +1393,19 @@ TEST_P(InsertCopyLayerNonfuncTest, CompareWithRefs) { ngraph::Shape in_shape = {1, 2, 4}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape = ov::op::util::reshapeTo(params, shape); - auto result = std::make_shared(reshape); + auto result = std::make_shared(reshape); m_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "nonfunc"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto copy = std::make_shared(params); auto reshape = ov::op::util::reshapeTo(copy, shape); - auto result = std::make_shared(reshape); + auto result = std::make_shared(reshape); ref_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "nonfunc"); @@ -1441,23 +1441,23 @@ TEST_P(InsertCopyLayerNonfuncTwoSubgraphsTest, CompareWithRefs) { ngraph::Shape in_shape = {1, 2, 4}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape1 = ov::op::util::reshapeTo(params, shape); auto reshape2 = ov::op::util::reshapeTo(params, shape); - auto result1 = std::make_shared(reshape1); - auto result2 = std::make_shared(reshape2); + auto result1 = std::make_shared(reshape1); + auto result2 = std::make_shared(reshape2); m_func = std::make_shared(ngraph::ResultVector{result1, result2}, ngraph::ParameterVector{params}, "nonfunc"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto copy = std::make_shared(params); auto reshape1 = ov::op::util::reshapeTo(copy, shape); auto reshape2 = ov::op::util::reshapeTo(copy, shape); - auto result1 = std::make_shared(reshape1); - auto result2 = std::make_shared(reshape2); + auto result1 = std::make_shared(reshape1); + auto result2 = std::make_shared(reshape2); ref_func = std::make_shared(ngraph::ResultVector{result1, result2}, ngraph::ParameterVector{params}, "nonfunc"); @@ -1493,21 +1493,21 @@ TEST_P(InsertCopyLayerNonfuncTwoResultsTest, CompareWithRefs) { ngraph::Shape in_shape = {1, 2, 4}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape = ov::op::util::reshapeTo(params, shape); - auto result1 = std::make_shared(reshape); - auto result2 = std::make_shared(reshape); + auto result1 = std::make_shared(reshape); + auto result2 = std::make_shared(reshape); m_func = std::make_shared(ngraph::ResultVector{result1, result2}, ngraph::ParameterVector{params}, "nonfunc"); } { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto copy = std::make_shared(params); auto reshape = ov::op::util::reshapeTo(copy, shape); - auto result1 = std::make_shared(reshape); - auto result2 = std::make_shared(reshape); + auto result1 = std::make_shared(reshape); + auto result2 = std::make_shared(reshape); ref_func = std::make_shared(ngraph::ResultVector{result1, result2}, ngraph::ParameterVector{params}, "nonfunc"); @@ -1545,13 +1545,13 @@ TEST_P(InsertCopyLayerNFLBranchTest, CompareWithRefs) { ngraph::Shape in_shape = {1, 2, 4}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape = ov::op::util::reshapeTo(params, shape); auto reshape2 = ov::op::util::reshapeTo(reshape, shape); - auto result = std::make_shared(reshape2); + auto result = std::make_shared(reshape2); - auto relu = std::make_shared(reshape); - auto result_relu = std::make_shared(relu); + auto relu = std::make_shared(reshape); + auto result_relu = std::make_shared(relu); m_func = std::make_shared(ngraph::ResultVector{result, result_relu}, ngraph::ParameterVector{params}, @@ -1559,14 +1559,14 @@ TEST_P(InsertCopyLayerNFLBranchTest, CompareWithRefs) { } { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape = ov::op::util::reshapeTo(params, shape); auto copy = std::make_shared(reshape); auto reshape2 = ov::op::util::reshapeTo(copy, shape); - auto result = std::make_shared(reshape2); + auto result = std::make_shared(reshape2); - auto relu = std::make_shared(reshape); - auto result_relu = std::make_shared(relu); + auto relu = std::make_shared(reshape); + auto result_relu = std::make_shared(relu); ref_func = std::make_shared(ngraph::ResultVector{result, result_relu}, ngraph::ParameterVector{params}, @@ -1605,13 +1605,13 @@ TEST_P(InsertCopyLayerNFLvsFLSubgraphTest, CompareWithRefs) { ngraph::Shape in_shape = {1, 2, 4}; { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto reshape = ov::op::util::reshapeTo(params, shape); - auto result = std::make_shared(reshape); + auto result = std::make_shared(reshape); - auto relu = std::make_shared(params); + auto relu = std::make_shared(params); auto reshape2 = ov::op::util::reshapeTo(relu, shape); - auto result_relu = std::make_shared(reshape2); + auto result_relu = std::make_shared(reshape2); m_func = std::make_shared(ngraph::ResultVector{result, result_relu}, ngraph::ParameterVector{params}, @@ -1619,14 +1619,14 @@ TEST_P(InsertCopyLayerNFLvsFLSubgraphTest, CompareWithRefs) { } { - auto params = std::make_shared(ngraph::element::i64, in_shape); + auto params = std::make_shared(ngraph::element::i64, in_shape); auto copy = std::make_shared(params); auto reshape = ov::op::util::reshapeTo(copy, shape); - auto result = std::make_shared(reshape); + auto result = std::make_shared(reshape); - auto relu = std::make_shared(params); + auto relu = std::make_shared(params); auto reshape2 = ov::op::util::reshapeTo(relu, shape); - auto result_relu = std::make_shared(reshape2); + auto result_relu = std::make_shared(reshape2); ref_func = std::make_shared(ngraph::ResultVector{result, result_relu}, ngraph::ParameterVector{params}, @@ -1664,28 +1664,28 @@ TEST_P(InsertCopyLayerSplitNFLConcatTest, CompareWithRefs) { size_t axis = 0; { - auto params = std::make_shared(ngraph::element::i64, input_shape); + auto params = std::make_shared(ngraph::element::i64, input_shape); OPENVINO_SUPPRESS_DEPRECATED_START auto split = ngraph::builder::makeSplit(params, ngraph::element::i64, 1, axis); OPENVINO_SUPPRESS_DEPRECATED_END auto reshape = ov::op::util::reshapeTo(split->output(0), shape); auto const_value = ngraph::builder::makeConstant(ngraph::element::i64, shape, std::vector{1}); - auto concat = std::make_shared(ngraph::OutputVector{reshape, const_value}, axis); - auto result = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{reshape, const_value}, axis); + auto result = std::make_shared(concat); m_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } { - auto params = std::make_shared(ngraph::element::i64, input_shape); + auto params = std::make_shared(ngraph::element::i64, input_shape); OPENVINO_SUPPRESS_DEPRECATED_START auto split = ngraph::builder::makeSplit(params, ngraph::element::i64, 1, axis); OPENVINO_SUPPRESS_DEPRECATED_END auto reshape = ov::op::util::reshapeTo(split->output(0), shape); auto copy = std::make_shared(reshape); auto const_value = ngraph::builder::makeConstant(ngraph::element::i64, shape, std::vector{1}); - auto concat = std::make_shared(ngraph::OutputVector{copy, const_value}, axis); + auto concat = std::make_shared(ngraph::OutputVector{copy, const_value}, axis); - auto result = std::make_shared(concat); + auto result = std::make_shared(concat); ref_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Concat"); } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_insert_reshape_around_matmul.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_insert_reshape_around_matmul.cpp index f23acb81887e89..8b4f021e8cb67b 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_insert_reshape_around_matmul.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_insert_reshape_around_matmul.cpp @@ -5,9 +5,9 @@ #include #include -#include #include #include +#include #include #include "common_test_utils/ov_test_utils.hpp" @@ -23,8 +23,8 @@ struct InsertReshapeAroundMatmulTest { const ngraph::Shape& constant_shape) { std::vector data(ngraph::shape_size(constant_shape)); std::iota(std::begin(data), std::end(data), 1); - auto constant = ngraph::opset8::Constant::create(ngraph::element::i64, constant_shape, data); - return std::make_shared(input, constant); + auto constant = ov::op::v0::Constant::create(ngraph::element::i64, constant_shape, data); + return std::make_shared(input, constant); } static std::shared_ptr CreateMatmul(std::shared_ptr input, @@ -32,9 +32,9 @@ struct InsertReshapeAroundMatmulTest { const ngraph::Shape& permutation_shape) { std::vector data(ngraph::shape_size(matmul_constant_shape)); std::iota(std::begin(data), std::end(data), 1); - auto constant = ngraph::opset8::Constant::create(ngraph::element::i64, matmul_constant_shape, data); + auto constant = ov::op::v0::Constant::create(ngraph::element::i64, matmul_constant_shape, data); std::shared_ptr node; - node = std::make_shared(input, constant); + node = std::make_shared(input, constant); if (ADD) { std::vector add_constant_shape(2, 1); @@ -52,28 +52,28 @@ struct InsertReshapeAroundMatmulTest { } auto constant_add = - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{add_constant_shape}, data); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{add_constant_shape}, data); if (ADD_FIRST_INPUT_NOT_CONSTANT) { - node = std::make_shared(node, constant_add); + node = std::make_shared(node, constant_add); } else { - node = std::make_shared(constant_add, node); + node = std::make_shared(constant_add, node); } } if (FQ) { - node = std::make_shared( + node = std::make_shared( node, - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {-0.1}), - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {0.1}), - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {-0.1}), - ngraph::opset8::Constant::create(ngraph::element::f32, {1}, {0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {-0.1}), + ov::op::v0::Constant::create(ngraph::element::f32, {1}, {0.1}), 255); } if (TRANSPOSE) { - node = std::make_shared( + node = std::make_shared( node, - ngraph::opset8::Constant::create(ngraph::element::i64, {permutation_shape.size()}, permutation_shape)); + ov::op::v0::Constant::create(ngraph::element::i64, {permutation_shape.size()}, permutation_shape)); } return node; @@ -82,11 +82,11 @@ struct InsertReshapeAroundMatmulTest { static std::shared_ptr CreateFunction(const ngraph::Shape& input_shape, const ngraph::Shape& matmul_constant_shape, const ngraph::Shape& permutation_shape = ngraph::Shape()) { - auto input = std::make_shared(ngraph::element::i64, input_shape); - auto before = std::make_shared(input); + auto input = std::make_shared(ngraph::element::i64, input_shape); + auto before = std::make_shared(input); auto matmul = CreateMatmul(before, matmul_constant_shape, permutation_shape); - auto after = std::make_shared(matmul); - return std::make_shared(ngraph::ResultVector{std::make_shared(after)}, + auto after = std::make_shared(matmul); + return std::make_shared(ngraph::ResultVector{std::make_shared(after)}, ngraph::ParameterVector{input}); } @@ -96,19 +96,19 @@ struct InsertReshapeAroundMatmulTest { const ngraph::Shape& matmul_constant_shape, const ngraph::Shape& reshape_after_shape, const ngraph::Shape& permutation_shape = ngraph::Shape()) { - auto input = std::make_shared(ngraph::element::i64, input_shape); - auto before = std::make_shared(input); - auto reshape_before_constant = ngraph::opset8::Constant::create(ngraph::element::i64, - ngraph::Shape{reshape_before_shape.size()}, - reshape_before_shape); - auto reshape_before = std::make_shared(before, reshape_before_constant, false); + auto input = std::make_shared(ngraph::element::i64, input_shape); + auto before = std::make_shared(input); + auto reshape_before_constant = ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{reshape_before_shape.size()}, + reshape_before_shape); + auto reshape_before = std::make_shared(before, reshape_before_constant, false); auto matmul = CreateMatmul(reshape_before, matmul_constant_shape, permutation_shape); - auto reshape_after_constant = ngraph::opset8::Constant::create(ngraph::element::i64, - ngraph::Shape{reshape_after_shape.size()}, - reshape_after_shape); - auto reshape_after = std::make_shared(matmul, reshape_after_constant, false); - auto after = std::make_shared(reshape_after); - return std::make_shared(ngraph::ResultVector{std::make_shared(after)}, + auto reshape_after_constant = ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape{reshape_after_shape.size()}, + reshape_after_shape); + auto reshape_after = std::make_shared(matmul, reshape_after_constant, false); + auto after = std::make_shared(reshape_after); + return std::make_shared(ngraph::ResultVector{std::make_shared(after)}, ngraph::ParameterVector{input}); } }; // struct InsertReshapeAroundMatmulTest diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_insert_transpose_after_convolution_or_pooling.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_insert_transpose_after_convolution_or_pooling.cpp index 5e068db48219c5..a1ffcc75fe78bb 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_insert_transpose_after_convolution_or_pooling.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_insert_transpose_after_convolution_or_pooling.cpp @@ -10,6 +10,7 @@ #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset7.hpp" #include "transformations/insert_transpose_after_convolution_or_pooling.hpp" namespace testing { @@ -19,29 +20,29 @@ TEST(TransformationTests, InsertTransposeAfterConvOrPoolTestStartConvolution) { { auto input_params_convolution = - std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 1, 64}); + std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 1, 64}); - auto weights = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{3, 3, 1, 2}, {1}); - auto convolution_operation = std::make_shared(input_params_convolution, - weights, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 1}, - ngraph::Strides{1, 1}); + auto weights = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{3, 3, 1, 2}, {1}); + auto convolution_operation = std::make_shared(input_params_convolution, + weights, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 1}, + ngraph::Strides{1, 1}); - auto new_shape = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 1, 1, 3 * 64}); - auto reshape_operation = std::make_shared(convolution_operation, new_shape, true); + auto new_shape = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 1, 1, 3 * 64}); + auto reshape_operation = std::make_shared(convolution_operation, new_shape, true); auto weights_next_convolution = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 1, 3 * 63}, {1}); - auto next_convolution_operation = std::make_shared(reshape_operation, - weights_next_convolution, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 1}, - ngraph::Strides{1, 1}); - - auto result = std::make_shared(next_convolution_operation); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 1, 3 * 63}, {1}); + auto next_convolution_operation = std::make_shared(reshape_operation, + weights_next_convolution, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 1}, + ngraph::Strides{1, 1}); + + auto result = std::make_shared(next_convolution_operation); func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params_convolution}); ngraph::pass::Manager m; @@ -53,37 +54,36 @@ TEST(TransformationTests, InsertTransposeAfterConvOrPoolTestStartConvolution) { { auto input_params_convolution = - std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 1, 64}); + std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 1, 64}); - auto weights = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{3, 3, 1, 2}, {1}); - auto convolution_operation = std::make_shared(input_params_convolution, - weights, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 1}, - ngraph::Strides{1, 1}); + auto weights = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{3, 3, 1, 2}, {1}); + auto convolution_operation = std::make_shared(input_params_convolution, + weights, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 1}, + ngraph::Strides{1, 1}); - auto new_shape_out = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 64, 1, 3}); - auto reshape_out_operation = - std::make_shared(convolution_operation, new_shape_out, false); + auto new_shape_out = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 64, 1, 3}); + auto reshape_out_operation = std::make_shared(convolution_operation, new_shape_out, false); - auto transpose = std::make_shared( + auto transpose = std::make_shared( reshape_out_operation, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); - auto new_shape = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 1, 1, 3 * 64}); - auto reshape_operation = std::make_shared(transpose, new_shape, true); + auto new_shape = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 1, 1, 3 * 64}); + auto reshape_operation = std::make_shared(transpose, new_shape, true); auto weights_next_convolution = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 1, 3 * 63}, {1}); - auto next_convolution_operation = std::make_shared(reshape_operation, - weights_next_convolution, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 1}, - ngraph::Strides{1, 1}); - - auto result = std::make_shared(next_convolution_operation); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 1, 3 * 63}, {1}); + auto next_convolution_operation = std::make_shared(reshape_operation, + weights_next_convolution, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 1}, + ngraph::Strides{1, 1}); + + auto result = std::make_shared(next_convolution_operation); reference_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params_convolution}); } @@ -98,28 +98,27 @@ TEST(TransformationTests, InsertTransposeAfterConvOrPoolTestStartMaxPool) { std::shared_ptr func(nullptr), reference_func(nullptr); { - auto input_params = - std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 1, 64}); + auto input_params = std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 1, 64}); - auto max_pool_operation = std::make_shared(input_params, - ngraph::Strides{1, 1}, - ngraph::Shape{0, 0}, - ngraph::Shape{0, 1}, - ngraph::Shape{1, 2}); + auto max_pool_operation = std::make_shared(input_params, + ngraph::Strides{1, 1}, + ngraph::Shape{0, 0}, + ngraph::Shape{0, 1}, + ngraph::Shape{1, 2}); - auto new_shape = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 1, 1, 3 * 64}); - auto reshape_operation = std::make_shared(max_pool_operation, new_shape, true); + auto new_shape = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 1, 1, 3 * 64}); + auto reshape_operation = std::make_shared(max_pool_operation, new_shape, true); auto weights_next_convolution = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 1, 3 * 63}, {1}); - auto next_convolution_operation = std::make_shared(reshape_operation, - weights_next_convolution, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 1}, - ngraph::Strides{1, 1}); - - auto result = std::make_shared(next_convolution_operation); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 1, 3 * 63}, {1}); + auto next_convolution_operation = std::make_shared(reshape_operation, + weights_next_convolution, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 1}, + ngraph::Strides{1, 1}); + + auto result = std::make_shared(next_convolution_operation); func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); ngraph::pass::Manager m; m.register_pass(); @@ -129,36 +128,34 @@ TEST(TransformationTests, InsertTransposeAfterConvOrPoolTestStartMaxPool) { } { - auto input_params = - std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 1, 64}); + auto input_params = std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 1, 64}); - auto max_pool_operation = std::make_shared(input_params, - ngraph::Strides{1, 1}, - ngraph::Shape{0, 0}, - ngraph::Shape{0, 1}, - ngraph::Shape{1, 2}); + auto max_pool_operation = std::make_shared(input_params, + ngraph::Strides{1, 1}, + ngraph::Shape{0, 0}, + ngraph::Shape{0, 1}, + ngraph::Shape{1, 2}); - auto new_shape_out = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 64, 1, 3}); - auto reshape_out_operation = - std::make_shared(max_pool_operation, new_shape_out, false); + auto new_shape_out = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 64, 1, 3}); + auto reshape_out_operation = std::make_shared(max_pool_operation, new_shape_out, false); - auto transpose = std::make_shared( + auto transpose = std::make_shared( reshape_out_operation, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {0, 3, 1, 2})); - auto new_shape = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 1, 1, 3 * 64}); - auto reshape_operation = std::make_shared(transpose, new_shape, true); + auto new_shape = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 1, 1, 3 * 64}); + auto reshape_operation = std::make_shared(transpose, new_shape, true); auto weights_next_convolution = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 1, 3 * 63}, {1}); - auto next_convolution_operation = std::make_shared(reshape_operation, - weights_next_convolution, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 1}, - ngraph::Strides{1, 1}); - - auto result = std::make_shared(next_convolution_operation); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 1, 3 * 63}, {1}); + auto next_convolution_operation = std::make_shared(reshape_operation, + weights_next_convolution, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 1}, + ngraph::Strides{1, 1}); + + auto result = std::make_shared(next_convolution_operation); reference_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } @@ -174,29 +171,29 @@ TEST(TransformationTests, InsertTransposeAfterConvOrPoolTestInputRank3) { { auto input_params_convolution = - std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 64}); + std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 64}); - auto weights = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2, 3, 2}, {1}); - auto convolution_operation = std::make_shared(input_params_convolution, - weights, - ngraph::Strides{1}, - ngraph::CoordinateDiff{0}, - ngraph::CoordinateDiff{1}, - ngraph::Strides{1}); + auto weights = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2, 3, 2}, {1}); + auto convolution_operation = std::make_shared(input_params_convolution, + weights, + ngraph::Strides{1}, + ngraph::CoordinateDiff{0}, + ngraph::CoordinateDiff{1}, + ngraph::Strides{1}); - auto new_shape = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 1, 128}); - auto reshape_operation = std::make_shared(convolution_operation, new_shape, true); + auto new_shape = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 1, 128}); + auto reshape_operation = std::make_shared(convolution_operation, new_shape, true); auto weights_next_convolution = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 63}, {1}); - auto next_convolution_operation = std::make_shared(reshape_operation, - weights_next_convolution, - ngraph::Strides{1}, - ngraph::CoordinateDiff{0}, - ngraph::CoordinateDiff{1}, - ngraph::Strides{1}); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 63}, {1}); + auto next_convolution_operation = std::make_shared(reshape_operation, + weights_next_convolution, + ngraph::Strides{1}, + ngraph::CoordinateDiff{0}, + ngraph::CoordinateDiff{1}, + ngraph::Strides{1}); - auto result = std::make_shared(next_convolution_operation); + auto result = std::make_shared(next_convolution_operation); func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params_convolution}); @@ -209,37 +206,36 @@ TEST(TransformationTests, InsertTransposeAfterConvOrPoolTestInputRank3) { { auto input_params_convolution = - std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 64}); + std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 64}); - auto weights = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{2, 3, 2}, {1}); - auto convolution_operation = std::make_shared(input_params_convolution, - weights, - ngraph::Strides{1}, - ngraph::CoordinateDiff{0}, - ngraph::CoordinateDiff{1}, - ngraph::Strides{1}); + auto weights = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2, 3, 2}, {1}); + auto convolution_operation = std::make_shared(input_params_convolution, + weights, + ngraph::Strides{1}, + ngraph::CoordinateDiff{0}, + ngraph::CoordinateDiff{1}, + ngraph::Strides{1}); - auto new_shape_out = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 64, 2}); - auto reshape_out_operation = - std::make_shared(convolution_operation, new_shape_out, false); + auto new_shape_out = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 64, 2}); + auto reshape_out_operation = std::make_shared(convolution_operation, new_shape_out, false); - auto transpose = std::make_shared( + auto transpose = std::make_shared( reshape_out_operation, - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {0, 2, 1})); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {0, 2, 1})); - auto new_shape = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 1, 128}); - auto reshape_operation = std::make_shared(transpose, new_shape, true); + auto new_shape = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 1, 128}); + auto reshape_operation = std::make_shared(transpose, new_shape, true); auto weights_next_convolution = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 63}, {1}); - auto next_convolution_operation = std::make_shared(reshape_operation, - weights_next_convolution, - ngraph::Strides{1}, - ngraph::CoordinateDiff{0}, - ngraph::CoordinateDiff{1}, - ngraph::Strides{1}); - - auto result = std::make_shared(next_convolution_operation); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1, 1, 63}, {1}); + auto next_convolution_operation = std::make_shared(reshape_operation, + weights_next_convolution, + ngraph::Strides{1}, + ngraph::CoordinateDiff{0}, + ngraph::CoordinateDiff{1}, + ngraph::Strides{1}); + + auto result = std::make_shared(next_convolution_operation); reference_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params_convolution}); } @@ -252,13 +248,13 @@ TEST(TransformationTests, InsertTransposeAfterConvOrPoolTestInputRank3) { std::shared_ptr CreatePoolConvFunction(const ngraph::Shape& input_shape, const ngraph::Shape& pool_kernel_shape) { - auto input_params = std::make_shared(ngraph::element::i64, input_shape); + auto input_params = std::make_shared(ngraph::element::i64, input_shape); - auto max_pool_operation = std::make_shared(input_params, - pool_kernel_shape, - ngraph::Shape{0, 0}, - ngraph::Shape{0, 1}, - pool_kernel_shape); + auto max_pool_operation = std::make_shared(input_params, + pool_kernel_shape, + ngraph::Shape{0, 0}, + ngraph::Shape{0, 1}, + pool_kernel_shape); auto pool_out_shape = max_pool_operation->get_output_shape(0); ngraph::Shape new_shape = { @@ -266,18 +262,18 @@ std::shared_ptr CreatePoolConvFunction(const ngraph::Shape& in 1, 1, std::accumulate(std::begin(pool_out_shape), std::end(pool_out_shape), size_t{1}, std::multiplies())}; - auto new_shape_const = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, new_shape); - auto reshape_operation = std::make_shared(max_pool_operation, new_shape_const, true); - - auto weights_next_convolution = ngraph::opset7::Constant::create(ngraph::element::i64, new_shape, {1}); - auto next_convolution_operation = std::make_shared(reshape_operation, - weights_next_convolution, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 1}, - ngraph::Strides{1, 1}); - - auto result = std::make_shared(next_convolution_operation); + auto new_shape_const = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, new_shape); + auto reshape_operation = std::make_shared(max_pool_operation, new_shape_const, true); + + auto weights_next_convolution = ov::op::v0::Constant::create(ngraph::element::i64, new_shape, {1}); + auto next_convolution_operation = std::make_shared(reshape_operation, + weights_next_convolution, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 1}, + ngraph::Strides{1, 1}); + + auto result = std::make_shared(next_convolution_operation); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_pwl.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_pwl.cpp index 58f493a9f15659..24b846f2ef6ae6 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_pwl.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_pwl.cpp @@ -11,6 +11,7 @@ #include "common_test_utils/data_utils.hpp" #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset9.hpp" #include "transformations/pwl_approximation.hpp" using namespace ov::intel_gna::common; @@ -21,7 +22,7 @@ template struct Function {}; template <> -struct Function { +struct Function { static std::function get_function() { return [](const double x) { return 0.5 * (1.0 + std::tanh(x / 2.0)); @@ -30,7 +31,7 @@ struct Function { }; template <> -struct Function { +struct Function { static std::function get_function() { return [](const double x) { return std::tanh(x); @@ -39,7 +40,7 @@ struct Function { }; template <> -struct Function { +struct Function { static std::function get_function() { return [](const double x) { return x / (1.0 + std::abs(x)); @@ -48,7 +49,7 @@ struct Function { }; template <> -struct Function { +struct Function { static std::function get_function() { return [](const double x) { return std::log(x); @@ -57,7 +58,7 @@ struct Function { }; template <> -struct Function { +struct Function { static std::function get_function() { return [](const double x) { return std::exp(x); @@ -66,7 +67,7 @@ struct Function { }; template <> -struct Function { +struct Function { static std::function get_function(double exp) { return [exp](const double x) { return std::pow(x, exp); @@ -75,13 +76,12 @@ struct Function { }; template -using Enable = - std::enable_if::value || std::is_same::value || - std::is_same::value || - std::is_same::value || std::is_same::value, - int>; +using Enable = std::enable_if::value || std::is_same::value || + std::is_same::value || + std::is_same::value || std::is_same::value, + int>; template -using EnableWithExtraArg = std::enable_if::value, int>; +using EnableWithExtraArg = std::enable_if::value, int>; template class GnaPWlTestsFixture { @@ -152,9 +152,9 @@ template template inline std::shared_ptr GnaPWlTestsFixture::create_activation_function( const ngraph::Shape& input_shape) { - auto input_params = std::make_shared(ngraph::element::f32, input_shape); + auto input_params = std::make_shared(ngraph::element::f32, input_shape); auto f = std::make_shared(input_params); - auto result = std::make_shared(f); + auto result = std::make_shared(f); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } @@ -163,10 +163,10 @@ template inline std::shared_ptr GnaPWlTestsFixture::create_activation_function( const ngraph::Shape& input_shape, double exp) { - auto input_params = std::make_shared(ngraph::element::f32, input_shape); - auto exponents = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {exp}); + auto input_params = std::make_shared(ngraph::element::f32, input_shape); + auto exponents = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {exp}); auto f = std::make_shared(input_params, exponents); - auto result = std::make_shared(f); + auto result = std::make_shared(f); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } @@ -242,37 +242,37 @@ inline void GnaPWlTestsFixture::validate_results(const std::vector& in } TEST(GnaPwlTest, Sigmoid) { - GnaPWlTestsFixture test_instance({1, 100}, -10.0, 10.0, 1.0); + GnaPWlTestsFixture test_instance({1, 100}, -10.0, 10.0, 1.0); test_instance.run(); } TEST(GnaPwlTest, Tanh) { - GnaPWlTestsFixture test_instance({1, 32}, -5.0, 5.0, 1.0); + GnaPWlTestsFixture test_instance({1, 32}, -5.0, 5.0, 1.0); test_instance.run(); } TEST(GnaPwlTest, Exp) { - GnaPWlTestsFixture test_instance({1, 32}, -std::log2(INT16_MAX), std::log10(INT16_MAX), 1.0); + GnaPWlTestsFixture test_instance({1, 32}, -std::log2(INT16_MAX), std::log10(INT16_MAX), 1.0); test_instance.run(); } TEST(GnaPwlTest, SoftSign) { - GnaPWlTestsFixture test_instance({1, 32}, -10, 10, 1.0); + GnaPWlTestsFixture test_instance({1, 32}, -10, 10, 1.0); test_instance.run(); } TEST(GnaPwlTest, Log) { - GnaPWlTestsFixture test_instance({1, 32}, 0.001, 2981, 1.0); + GnaPWlTestsFixture test_instance({1, 32}, 0.001, 2981, 1.0); test_instance.run(); } TEST(GnaPwlTest, Power) { for (float exp = 1; exp <= 2.2; exp += 0.1) { - GnaPWlTestsFixture test_instance({1, 32}, - AreFpEq(std::fmod(exp, 1.0), 0.0) ? -16 : 0, - 16, - exp, - 1.0); + GnaPWlTestsFixture test_instance({1, 32}, + AreFpEq(std::fmod(exp, 1.0), 0.0) ? -16 : 0, + 16, + exp, + 1.0); test_instance.run(); } } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_remove_convert.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_remove_convert.cpp index 0b31e229a525f3..5746f1dae21a50 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_remove_convert.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_remove_convert.cpp @@ -5,11 +5,11 @@ #include #include -#include #include #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset8.hpp" #include "ov_models/builders.hpp" #include "transformations/remove_converts.hpp" @@ -54,12 +54,12 @@ void RemoveInputConvertTest::SetUp() { // test function { - auto params = std::make_shared(target_precision_, input_shape); + auto params = std::make_shared(target_precision_, input_shape); auto conversion = std::make_shared(params, net_precision_); - auto add_const = ngraph::opset8::Constant::create(net_precision_, input_shape, {10}); - auto add = std::make_shared(conversion, add_const); + auto add_const = ov::op::v0::Constant::create(net_precision_, input_shape, {10}); + auto add = std::make_shared(conversion, add_const); - auto result = std::make_shared(add); + auto result = std::make_shared(add); func_ = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Conversion"); @@ -67,11 +67,11 @@ void RemoveInputConvertTest::SetUp() { // ref function convert should be removed { - auto params = std::make_shared(net_precision_, input_shape); - auto add_const = ngraph::opset8::Constant::create(net_precision_, input_shape, {10}); - auto add = std::make_shared(params, add_const); + auto params = std::make_shared(net_precision_, input_shape); + auto add_const = ov::op::v0::Constant::create(net_precision_, input_shape, {10}); + auto add = std::make_shared(params, add_const); - auto result = std::make_shared(add); + auto result = std::make_shared(add); ref_func_no_convert_ = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Conversion"); @@ -111,11 +111,11 @@ class RemoveOutputConvertTest : public RemoveInputConvertTest { // test function { - auto params = std::make_shared(net_precision_, input_shape); - auto add_const = ngraph::opset8::Constant::create(net_precision_, input_shape, {10}); - auto add = std::make_shared(params, add_const); + auto params = std::make_shared(net_precision_, input_shape); + auto add_const = ov::op::v0::Constant::create(net_precision_, input_shape, {10}); + auto add = std::make_shared(params, add_const); auto conversion = std::make_shared(add, target_precision_); - auto result = std::make_shared(conversion); + auto result = std::make_shared(conversion); func_ = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Conversion"); @@ -123,11 +123,11 @@ class RemoveOutputConvertTest : public RemoveInputConvertTest { // ref function { - auto params = std::make_shared(net_precision_, input_shape); - auto add_const = ngraph::opset8::Constant::create(net_precision_, input_shape, {10}); - auto add = std::make_shared(params, add_const); + auto params = std::make_shared(net_precision_, input_shape); + auto add_const = ov::op::v0::Constant::create(net_precision_, input_shape, {10}); + auto add = std::make_shared(params, add_const); - auto result = std::make_shared(add); + auto result = std::make_shared(add); ref_func_no_convert_ = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Conversion"); @@ -167,12 +167,12 @@ class LeaveConvertTest : public RemoveInputConvertTest { // test function { - auto params = std::make_shared(net_precision_, input_shape); - auto add_const = ngraph::opset8::Constant::create(net_precision_, input_shape, {10}); - auto add1 = std::make_shared(params, add_const); + auto params = std::make_shared(net_precision_, input_shape); + auto add_const = ov::op::v0::Constant::create(net_precision_, input_shape, {10}); + auto add1 = std::make_shared(params, add_const); auto conversion = std::make_shared(add1, net_precision_); - auto add2 = std::make_shared(conversion, add_const); - auto result = std::make_shared(add2); + auto add2 = std::make_shared(conversion, add_const); + auto result = std::make_shared(add2); func_ = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{params}, "Conversion"); @@ -212,7 +212,7 @@ class RemoveMultiInputsConvertTest : public RemoveInputConvertTest { auto convert3 = std::make_shared(input[2], net_precision_); auto mul1 = ngraph::builder::makeEltwise(convert1, convert2, ngraph::helpers::EltwiseTypes::ADD); auto mul2 = ngraph::builder::makeEltwise(convert3, mul1, ngraph::helpers::EltwiseTypes::ADD); - auto result = std::make_shared(mul2); + auto result = std::make_shared(mul2); func_ = std::make_shared(ngraph::ResultVector{result}, input, "multiple_input"); } @@ -223,7 +223,7 @@ class RemoveMultiInputsConvertTest : public RemoveInputConvertTest { std::make_shared(net_precision_, input_shape)}; auto mul1 = ngraph::builder::makeEltwise(input[0], input[1], ngraph::helpers::EltwiseTypes::ADD); auto mul2 = ngraph::builder::makeEltwise(input[2], mul1, ngraph::helpers::EltwiseTypes::ADD); - auto result = std::make_shared(mul2); + auto result = std::make_shared(mul2); ref_func_no_convert_ = std::make_shared(ngraph::ResultVector{result}, input, "multiple_input"); } @@ -248,8 +248,8 @@ class RemoveMultiOutputsConvertTest : public RemoveOutputConvertTest { auto mul2 = ngraph::builder::makeEltwise(input[2], input[3], ngraph::helpers::EltwiseTypes::ADD); auto convert1 = std::make_shared(mul1, target_precision_); auto convert2 = std::make_shared(mul2, target_precision_); - auto result1 = std::make_shared(convert1); - auto result2 = std::make_shared(convert2); + auto result1 = std::make_shared(convert1); + auto result2 = std::make_shared(convert2); func_ = std::make_shared(ngraph::ResultVector{result1, result2}, input, "multiple_output"); @@ -263,8 +263,8 @@ class RemoveMultiOutputsConvertTest : public RemoveOutputConvertTest { std::make_shared(net_precision_, input_shape)}; auto mul1 = ngraph::builder::makeEltwise(input[0], input[1], ngraph::helpers::EltwiseTypes::ADD); auto mul2 = ngraph::builder::makeEltwise(input[2], input[3], ngraph::helpers::EltwiseTypes::ADD); - auto result1 = std::make_shared(mul1); - auto result2 = std::make_shared(mul2); + auto result1 = std::make_shared(mul1); + auto result2 = std::make_shared(mul2); ref_func_no_convert_ = std::make_shared(ngraph::ResultVector{result1, result2}, input, "multiple_output"); @@ -292,9 +292,9 @@ class RemoveOutputConvertConnectedToLayerTest : public RemoveOutputConvertTest { auto convert1 = std::make_shared(mul1, target_precision_); auto convert2 = std::make_shared(mul2, target_precision_); auto convert3 = std::make_shared(mul3, target_precision_); - auto result1 = std::make_shared(convert1); - auto result2 = std::make_shared(convert2); - auto result3 = std::make_shared(convert3); + auto result1 = std::make_shared(convert1); + auto result2 = std::make_shared(convert2); + auto result3 = std::make_shared(convert3); func_ = std::make_shared(ngraph::ResultVector{result1, result2, result3}, input, @@ -310,9 +310,9 @@ class RemoveOutputConvertConnectedToLayerTest : public RemoveOutputConvertTest { auto mul1 = ngraph::builder::makeEltwise(input[0], input[1], ngraph::helpers::EltwiseTypes::ADD); auto mul2 = ngraph::builder::makeEltwise(input[2], input[3], ngraph::helpers::EltwiseTypes::ADD); auto mul3 = ngraph::builder::makeEltwise(mul1, mul2, ngraph::helpers::EltwiseTypes::ADD); - auto result1 = std::make_shared(mul1); - auto result2 = std::make_shared(mul2); - auto result3 = std::make_shared(mul3); + auto result1 = std::make_shared(mul1); + auto result2 = std::make_shared(mul2); + auto result3 = std::make_shared(mul3); ref_func_no_convert_ = std::make_shared(ngraph::ResultVector{result1, result2, result3}, input, diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_remove_extra_reshapes.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_remove_extra_reshapes.cpp index 842681b8d34f47..3ffecba3f60074 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_remove_extra_reshapes.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_remove_extra_reshapes.cpp @@ -10,6 +10,7 @@ #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset7.hpp" #include "transformations/remove_extra_reshapes.hpp" namespace testing { @@ -19,15 +20,15 @@ TEST(TransformationTests, RemoveExtraReshapesTestReshapeNotEqualInputOutput) { const ngraph::Shape data_shape{1, 3, 64, 64}; { - auto input_params = std::make_shared(ngraph::element::f32, data_shape); - auto new_shape = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 3, 64 * 64}); - auto reshape_operation = std::make_shared(input_params, new_shape, true); - auto max_pool_operation = std::make_shared(reshape_operation, - ngraph::Strides{1}, - ngraph::Shape{0}, - ngraph::Shape{0}, - ngraph::Shape{3}); - auto result = std::make_shared(max_pool_operation); + auto input_params = std::make_shared(ngraph::element::f32, data_shape); + auto new_shape = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 3, 64 * 64}); + auto reshape_operation = std::make_shared(input_params, new_shape, true); + auto max_pool_operation = std::make_shared(reshape_operation, + ngraph::Strides{1}, + ngraph::Shape{0}, + ngraph::Shape{0}, + ngraph::Shape{3}); + auto result = std::make_shared(max_pool_operation); func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); reference_func = ngraph::clone_function(*func); @@ -50,15 +51,15 @@ TEST(TransformationTests, RemoveExtraReshapesTestReshapeEqualInputOutput) { const ngraph::Shape data_shape{1, 3, 64, 64}; { - auto input_params = std::make_shared(ngraph::element::f32, data_shape); - auto new_shape = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 3, 64, 64}); - auto reshape_operation = std::make_shared(input_params, new_shape, true); - auto max_pool_operation = std::make_shared(reshape_operation, - ngraph::Strides{1, 1}, - ngraph::Shape{0, 0}, - ngraph::Shape{0, 0}, - ngraph::Shape{3, 3}); - auto result = std::make_shared(max_pool_operation); + auto input_params = std::make_shared(ngraph::element::f32, data_shape); + auto new_shape = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, {1, 3, 64, 64}); + auto reshape_operation = std::make_shared(input_params, new_shape, true); + auto max_pool_operation = std::make_shared(reshape_operation, + ngraph::Strides{1, 1}, + ngraph::Shape{0, 0}, + ngraph::Shape{0, 0}, + ngraph::Shape{3, 3}); + auto result = std::make_shared(max_pool_operation); func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); ngraph::pass::Manager m; @@ -69,13 +70,13 @@ TEST(TransformationTests, RemoveExtraReshapesTestReshapeEqualInputOutput) { } { - auto input_params = std::make_shared(ngraph::element::f32, data_shape); - auto max_pool_operation = std::make_shared(input_params, - ngraph::Strides{1, 1}, - ngraph::Shape{0, 0}, - ngraph::Shape{1, 1}, - ngraph::Shape{4, 4}); - auto result = std::make_shared(max_pool_operation); + auto input_params = std::make_shared(ngraph::element::f32, data_shape); + auto max_pool_operation = std::make_shared(input_params, + ngraph::Strides{1, 1}, + ngraph::Shape{0, 0}, + ngraph::Shape{1, 1}, + ngraph::Shape{4, 4}); + auto result = std::make_shared(max_pool_operation); reference_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_remove_single_input_concat.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_remove_single_input_concat.cpp index 7af95a3a67c277..28f97615dbf635 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_remove_single_input_concat.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_remove_single_input_concat.cpp @@ -10,12 +10,13 @@ #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset8.hpp" #include "transformations/remove_single_input_concat.hpp" namespace testing { namespace { -using GraphInputs = std::vector>; +using GraphInputs = std::vector>; using GraphOutputs = ngraph::OutputVector; struct Graph { @@ -31,7 +32,7 @@ std::shared_ptr Graph::createFunction() { outputs.end(), std::back_inserter(results), [](ngraph::Output output) { - return std::make_shared(output); + return std::make_shared(output); }); ngraph::ParameterVector params(inputs.begin(), inputs.end()); @@ -48,7 +49,7 @@ Graph createGraph(int n_inputs, bool has_concat, int n_outputs) { Operations outputs; for (int i = 0; i < n_inputs; ++i) { - auto input = std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 64}); + auto input = std::make_shared(ngraph::element::i64, ngraph::Shape{1, 3, 64}); inputs.push_back(input); outputs.push_back(input); } @@ -56,8 +57,8 @@ Graph createGraph(int n_inputs, bool has_concat, int n_outputs) { { Operations new_outputs; for (auto output : outputs) { - auto add_bias = ngraph::opset8::Constant::create(ngraph::element::i64, {1, 1, 1}, {2}); - auto add_operation = std::make_shared(output, add_bias); + auto add_bias = ov::op::v0::Constant::create(ngraph::element::i64, {1, 1, 1}, {2}); + auto add_operation = std::make_shared(output, add_bias); new_outputs.push_back(add_operation); } outputs.swap(new_outputs); @@ -65,7 +66,7 @@ Graph createGraph(int n_inputs, bool has_concat, int n_outputs) { if (has_concat) { auto concat_operation = - std::make_shared(ngraph::OutputVector(outputs.begin(), outputs.end()), 0); + std::make_shared(ngraph::OutputVector(outputs.begin(), outputs.end()), 0); outputs = {concat_operation}; } @@ -73,8 +74,8 @@ Graph createGraph(int n_inputs, bool has_concat, int n_outputs) { Operations new_outputs; for (auto output : outputs) { for (int i = 0; i < n_outputs; ++i) { - auto add_bias = ngraph::opset8::Constant::create(ngraph::element::i64, {1, 1, 1}, {3}); - auto add_operation = std::make_shared(output, add_bias); + auto add_bias = ov::op::v0::Constant::create(ngraph::element::i64, {1, 1, 1}, {3}); + auto add_operation = std::make_shared(output, add_bias); new_outputs.push_back(add_operation); } } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_reorder_activation_and_pooling.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_reorder_activation_and_pooling.cpp index d4a65bd8cd3c3d..fb989ab2369345 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_reorder_activation_and_pooling.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_reorder_activation_and_pooling.cpp @@ -5,11 +5,11 @@ #include #include -#include #include #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset7.hpp" #include "transformations/reorder_activation_and_pooling.hpp" namespace testing { @@ -36,11 +36,11 @@ class ActivationNodeFactory : public IActivationNodeFactory { }; template <> -class ActivationNodeFactory : public IActivationNodeFactory { +class ActivationNodeFactory : public IActivationNodeFactory { public: ActivationNodeFactory(const double min, const double max) : min_(min), max_(max) {} std::shared_ptr createNode(const ngraph::Output& operation_before) override { - return std::make_shared(operation_before, min_, max_); + return std::make_shared(operation_before, min_, max_); } private: @@ -67,7 +67,7 @@ ActivationFactoryPtr createActivationFactory(Args&&... args) { */ typedef std::tuple ConvolutionActivationPoolTestOptions; @@ -96,33 +96,32 @@ std::shared_ptr ConvolutionActivationPoolTestFixture::get_init ActivationFactoryPtr activation_factory, bool isAddNodeNeeded) { auto input_params_convolution = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); - auto input_params_add = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); - - auto weights = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); - auto bias = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); - auto convolution_operation = std::make_shared(input_params_convolution, - weights, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}); + std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); + auto input_params_add = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); + + auto weights = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); + auto bias = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); + auto convolution_operation = std::make_shared(input_params_convolution, + weights, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}); std::shared_ptr last_operation = convolution_operation; if (isAddNodeNeeded) { - auto add_operation = std::make_shared(convolution_operation, input_params_add); + auto add_operation = std::make_shared(convolution_operation, input_params_add); last_operation = add_operation; } auto activation = activation_factory->createNode(last_operation); - auto max_pool_operation = std::make_shared(activation, - ngraph::Strides{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}); + auto max_pool_operation = std::make_shared(activation, + ngraph::Strides{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}); - auto result = std::make_shared(max_pool_operation); + auto result = std::make_shared(max_pool_operation); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params_convolution, input_params_add}); } @@ -131,35 +130,34 @@ std::shared_ptr ConvolutionActivationPoolTestFixture::get_refe ActivationFactoryPtr activation_factory, bool isAddNodeNeeded) { auto input_params_convolution = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); + std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); - auto input_params_add = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); + auto input_params_add = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); - auto weights = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); - auto bias = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); - auto convolution_operation = std::make_shared(input_params_convolution, - weights, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}); + auto weights = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); + auto bias = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); + auto convolution_operation = std::make_shared(input_params_convolution, + weights, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}); std::shared_ptr last_operation = convolution_operation; if (isAddNodeNeeded) { - auto add_operation = std::make_shared(convolution_operation, input_params_convolution); + auto add_operation = std::make_shared(convolution_operation, input_params_convolution); last_operation = add_operation; } - auto max_pool_operation = std::make_shared(last_operation, - ngraph::Strides{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}); + auto max_pool_operation = std::make_shared(last_operation, + ngraph::Strides{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}); auto activation = activation_factory->createNode(max_pool_operation); - auto result = std::make_shared(activation); + auto result = std::make_shared(activation); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params_convolution, input_params_add}); } @@ -179,15 +177,14 @@ TEST_P(ConvolutionActivationPoolTestFixture, CompareFunctions) { execute_test(function, reference_function); } -const std::vector activationFactories = { - createActivationFactory(), - createActivationFactory(), - createActivationFactory(), - createActivationFactory(), - createActivationFactory(), - createActivationFactory(), - createActivationFactory(), - createActivationFactory(0.1, 0.2)}; +const std::vector activationFactories = {createActivationFactory(), + createActivationFactory(), + createActivationFactory(), + createActivationFactory(), + createActivationFactory(), + createActivationFactory(), + createActivationFactory(), + createActivationFactory(0.1, 0.2)}; INSTANTIATE_TEST_SUITE_P(ConvolutionActivationPoolTestSuite, ConvolutionActivationPoolTestFixture, @@ -203,35 +200,35 @@ TEST(TransformationTests, ReorderActivationAndPoolingTestConvFqMp) { { auto input_params_convolution = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); - - auto weights = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); - auto bias = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); - auto convolution_operation = std::make_shared(input_params_convolution, - weights, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}); - - auto input_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); - auto output_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); - auto fake_quantize_op = std::make_shared(convolution_operation, - input_low, - input_high, - output_low, - output_high, - 11); - - auto max_pool_operation = std::make_shared(fake_quantize_op, - ngraph::Strides{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}); - - auto result = std::make_shared(max_pool_operation); + std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); + + auto weights = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); + auto bias = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); + auto convolution_operation = std::make_shared(input_params_convolution, + weights, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}); + + auto input_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); + auto fake_quantize_op = std::make_shared(convolution_operation, + input_low, + input_high, + output_low, + output_high, + 11); + + auto max_pool_operation = std::make_shared(fake_quantize_op, + ngraph::Strides{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}); + + auto result = std::make_shared(max_pool_operation); func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params_convolution}); @@ -245,35 +242,35 @@ TEST(TransformationTests, ReorderActivationAndPoolingTestConvFqMp) { { auto input_params_convolution = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); - - auto weights = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); - auto bias = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); - auto convolution_operation = std::make_shared(input_params_convolution, - weights, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}); - - auto max_pool_operation = std::make_shared(convolution_operation, - ngraph::Strides{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}); - - auto input_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); - auto output_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); - auto fake_quantize_op = std::make_shared(max_pool_operation, - input_low, - input_high, - output_low, - output_high, - 11); - - auto result = std::make_shared(fake_quantize_op); + std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); + + auto weights = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); + auto bias = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); + auto convolution_operation = std::make_shared(input_params_convolution, + weights, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}); + + auto max_pool_operation = std::make_shared(convolution_operation, + ngraph::Strides{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}); + + auto input_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); + auto fake_quantize_op = std::make_shared(max_pool_operation, + input_low, + input_high, + output_low, + output_high, + 11); + + auto result = std::make_shared(fake_quantize_op); reference_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params_convolution}); } @@ -291,40 +288,40 @@ TEST(TransformationTests, ReorderActivationAndPoolingTestConvAddFqMp) { { auto input_params_convolution = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); + std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); auto input_params_add = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); - - auto weights = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); - auto bias = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); - auto convolution_operation = std::make_shared(input_params_convolution, - weights, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}); - - auto add_operation = std::make_shared(convolution_operation, input_params_add); - - auto input_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); - auto output_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); - auto fake_quantize_op = std::make_shared(add_operation, - input_low, - input_high, - output_low, - output_high, - 11); - - auto max_pool_operation = std::make_shared(fake_quantize_op, - ngraph::Strides{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}); - - auto result = std::make_shared(max_pool_operation); + std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); + + auto weights = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); + auto bias = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); + auto convolution_operation = std::make_shared(input_params_convolution, + weights, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}); + + auto add_operation = std::make_shared(convolution_operation, input_params_add); + + auto input_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); + auto fake_quantize_op = std::make_shared(add_operation, + input_low, + input_high, + output_low, + output_high, + 11); + + auto max_pool_operation = std::make_shared(fake_quantize_op, + ngraph::Strides{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}); + + auto result = std::make_shared(max_pool_operation); func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params_convolution, input_params_add}); @@ -338,40 +335,40 @@ TEST(TransformationTests, ReorderActivationAndPoolingTestConvAddFqMp) { { auto input_params_convolution = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); + std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); auto input_params_add = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); - - auto weights = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); - auto bias = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); - auto convolution_operation = std::make_shared(input_params_convolution, - weights, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}); - - auto add_operation = std::make_shared(convolution_operation, input_params_add); - - auto max_pool_operation = std::make_shared(add_operation, - ngraph::Strides{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}, - ngraph::Shape{1, 1}); - - auto input_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); - auto output_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); - auto fake_quantize_op = std::make_shared(max_pool_operation, - input_low, - input_high, - output_low, - output_high, - 11); - - auto result = std::make_shared(fake_quantize_op); + std::make_shared(ngraph::element::f32, ngraph::Shape{1, 3, 64, 64}); + + auto weights = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 3, 1, 1}, {1}); + auto bias = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{3, 1, 1}, {1}); + auto convolution_operation = std::make_shared(input_params_convolution, + weights, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}); + + auto add_operation = std::make_shared(convolution_operation, input_params_add); + + auto max_pool_operation = std::make_shared(add_operation, + ngraph::Strides{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}, + ngraph::Shape{1, 1}); + + auto input_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); + auto fake_quantize_op = std::make_shared(max_pool_operation, + input_low, + input_high, + output_low, + output_high, + 11); + + auto result = std::make_shared(fake_quantize_op); reference_func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params_convolution, input_params_add}); diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_split_convolution_with_large_buffer_size.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_split_convolution_with_large_buffer_size.cpp index 863e42d21a45bd..fe8aeedb43f529 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_split_convolution_with_large_buffer_size.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_split_convolution_with_large_buffer_size.cpp @@ -12,6 +12,7 @@ #include "backend/gna_limitations.hpp" #include "common/gna_target.hpp" #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset7.hpp" #include "transformations/split_convolution_with_large_buffer_size.hpp" using namespace ov::intel_gna::limitations; @@ -23,12 +24,12 @@ namespace { struct Graph { std::shared_ptr createFunction(); - std::shared_ptr input_params; + std::shared_ptr input_params; ngraph::OutputVector output_nodes; }; std::shared_ptr Graph::createFunction() { - auto result = std::make_shared(output_nodes.front()); + auto result = std::make_shared(output_nodes.front()); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } @@ -102,7 +103,7 @@ using CreateBaseDecoratorPtr = std::unique_ptr; Graph CreateBaseDecorator::build() { Graph graph; - graph.input_params = std::make_shared(ngraph::element::f32, input_data_shape_); + graph.input_params = std::make_shared(ngraph::element::f32, input_data_shape_); return graph; } @@ -120,14 +121,14 @@ class CreateConvolution : public CreateAppendableGraphDecorator { }; ngraph::Output CreateConvolution::createOutputNode(const ngraph::Output& parent_node) { - auto kernel = ngraph::opset7::Constant::create(ngraph::element::f32, kernel_shape_, {1}); - - return std::make_shared(parent_node, - kernel, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}); + auto kernel = ov::op::v0::Constant::create(ngraph::element::f32, kernel_shape_, {1}); + + return std::make_shared(parent_node, + kernel, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}); } // should be used only after CreateBaseDecorator @@ -142,21 +143,20 @@ class CreateSplittedConvolution : public CreateGraphDecorator { protected: void updateGraph(Graph& graph) override { auto split_node_c1 = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape({1}), std::vector{3}); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape({1}), std::vector{3}); auto split_node_c2 = - ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape({split_shape_.size()}), split_shape_); - auto split_node = - std::make_shared(graph.input_params, split_node_c1, split_node_c2); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape({split_shape_.size()}), split_shape_); + auto split_node = std::make_shared(graph.input_params, split_node_c1, split_node_c2); - auto kernel = ngraph::opset7::Constant::create(ngraph::element::f32, kernel_shape_, {1}); + auto kernel = ov::op::v0::Constant::create(ngraph::element::f32, kernel_shape_, {1}); for (int i = 0; i < split_shape_.size(); ++i) { - auto convolution_operation = std::make_shared(split_node->output(i), - kernel, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}); + auto convolution_operation = std::make_shared(split_node->output(i), + kernel, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}); graph.output_nodes.push_back(convolution_operation); } } @@ -175,8 +175,8 @@ class CreateAdd : public CreateAppendableGraphDecorator { }; ngraph::Output CreateAdd::createOutputNode(const ngraph::Output& parent_node) { - auto bias = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - return std::make_shared(parent_node, bias); + auto bias = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + return std::make_shared(parent_node, bias); } class CreateFakeQuantize : public CreateAppendableGraphDecorator { @@ -188,16 +188,11 @@ class CreateFakeQuantize : public CreateAppendableGraphDecorator { }; ngraph::Output CreateFakeQuantize::createOutputNode(const ngraph::Output& parent_node) { - auto input_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); - auto output_low = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset7::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); - return std::make_shared(parent_node, - input_low, - input_high, - output_low, - output_high, - 11); + auto input_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); + return std::make_shared(parent_node, input_low, input_high, output_low, output_high, 11); } class CreateConcat : public CreateGraphDecorator { @@ -210,7 +205,7 @@ class CreateConcat : public CreateGraphDecorator { void CreateConcat::updateGraph(Graph& graph) { ngraph::OutputVector new_graph_output; - new_graph_output.emplace_back(std::make_shared(graph.output_nodes, 3)); + new_graph_output.emplace_back(std::make_shared(graph.output_nodes, 3)); graph.output_nodes.swap(new_graph_output); } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_split_eltwise.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_split_eltwise.cpp index fd92a12cf89eb2..b8415d6fc9f727 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_split_eltwise.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_split_eltwise.cpp @@ -7,7 +7,6 @@ #include #include #include -#include #include #include @@ -15,6 +14,7 @@ #include "common/gna_target.hpp" #include "common_test_utils/common_utils.hpp" #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset9.hpp" #include "transformations/split_eltwise.hpp" using namespace ov::intel_gna::limitations; @@ -31,25 +31,25 @@ static std::shared_ptr createFunction(const ngraph::Shape& inp std::shared_ptr last_node, last_node0, last_node1; ngraph::ParameterVector parameters; - auto input0 = std::make_shared(ngraph::element::f32, input_shape); + auto input0 = std::make_shared(ngraph::element::f32, input_shape); parameters.push_back(input0); last_node0 = input0; std::shared_ptr input1; if (with_const) { - auto const_input = ngraph::opset9::Constant::create(ngraph::element::f32, input_shape, {1}); + auto const_input = ov::op::v0::Constant::create(ngraph::element::f32, input_shape, {1}); last_node1 = const_input; } else { - auto input1 = std::make_shared(ngraph::element::f32, input_shape); + auto input1 = std::make_shared(ngraph::element::f32, input_shape); last_node1 = input1; parameters.push_back(input1); } auto add_fake_quantize = [&](const std::shared_ptr& node) { - auto input_low = ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {5}); - auto output_low = ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {10}); - return std::make_shared(node, input_low, input_high, output_low, output_high, 11); + auto input_low = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {5}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {10}); + return std::make_shared(node, input_low, input_high, output_low, output_high, 11); }; if (with_fq) { @@ -61,33 +61,33 @@ static std::shared_ptr createFunction(const ngraph::Shape& inp if (split) { auto split_sizes_per_axis = ov::intel_gna::AlignedSplitSizesPerAxis(input_shape); - auto split0 = std::make_shared( + auto split0 = std::make_shared( last_node0, - ngraph::opset9::Constant::create(ngraph::element::i64, - ngraph::Shape({1}), - std::vector{split_sizes_per_axis.first}), - ngraph::opset9::Constant::create(ngraph::element::i64, - ngraph::Shape({split_sizes_per_axis.second.size()}), - split_sizes_per_axis.second)); - auto split1 = std::make_shared( + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape({1}), + std::vector{split_sizes_per_axis.first}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape({split_sizes_per_axis.second.size()}), + split_sizes_per_axis.second)); + auto split1 = std::make_shared( last_node1, - ngraph::opset9::Constant::create(ngraph::element::i64, - ngraph::Shape({1}), - std::vector{split_sizes_per_axis.first}), - ngraph::opset9::Constant::create(ngraph::element::i64, - ngraph::Shape({split_sizes_per_axis.second.size()}), - split_sizes_per_axis.second)); + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape({1}), + std::vector{split_sizes_per_axis.first}), + ov::op::v0::Constant::create(ngraph::element::i64, + ngraph::Shape({split_sizes_per_axis.second.size()}), + split_sizes_per_axis.second)); ov::NodeVector concat_inputs; for (size_t i = 0; i < split_sizes_per_axis.second.size(); i++) { auto eltwise_node_part = std::make_shared(split0->output(i), split1->output(i), type); concat_inputs.push_back(eltwise_node_part); } - auto concat = std::make_shared(concat_inputs, split_sizes_per_axis.first); - auto result = std::make_shared(concat); + auto concat = std::make_shared(concat_inputs, split_sizes_per_axis.first); + auto result = std::make_shared(concat); return std::make_shared(ngraph::ResultVector{result}, parameters); } else { auto eltwise = std::make_shared(last_node0, last_node1, type); - auto result = std::make_shared(eltwise); + auto result = std::make_shared(eltwise); return std::make_shared(ngraph::ResultVector{result}, parameters); } } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_substitute_softsign.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_substitute_softsign.cpp index 6cd0be8bc4cdf5..91c610d1478f1a 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_substitute_softsign.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_substitute_softsign.cpp @@ -5,9 +5,8 @@ #include #include -#include -#include #include +#include #include #include "common_test_utils/ov_test_utils.hpp" @@ -17,7 +16,7 @@ namespace testing { namespace { std::shared_ptr createSoftSignFunction() { - auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1, 64}); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1, 64}); auto softsign = std::make_shared(input_params); @@ -32,18 +31,17 @@ TEST(TransformationTests, SubstituteSoftSignMulPower) { std::shared_ptr func(nullptr), reference_func(nullptr); { - auto input_params = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1, 64}); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1, 64}); - auto abs = std::make_shared(input_params); + auto abs = std::make_shared(input_params); - auto const_1 = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {1}); - auto const_neg_1 = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {-1}); + auto const_1 = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {1}); + auto const_neg_1 = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {-1}); - auto add = std::make_shared(abs, const_1); - auto power = std::make_shared(add, const_neg_1); + auto add = std::make_shared(abs, const_1); + auto power = std::make_shared(add, const_neg_1); - auto mul = std::make_shared(power, input_params); + auto mul = std::make_shared(power, input_params); ngraph::ResultVector results{std::make_shared(mul)}; func = std::make_shared(ngraph::ResultVector{results}, ngraph::ParameterVector{input_params}); @@ -66,16 +64,15 @@ TEST(TransformationTests, SubstituteSoftSignDivide) { std::shared_ptr func(nullptr), reference_func(nullptr); { - auto input_params = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1, 64}); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1, 64}); - auto abs = std::make_shared(input_params); + auto abs = std::make_shared(input_params); - auto const_1 = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {1}); - auto add = std::make_shared(abs, const_1); + auto const_1 = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {1}); + auto add = std::make_shared(abs, const_1); - auto divide = std::make_shared(input_params, add); - ngraph::ResultVector results{std::make_shared(divide)}; + auto divide = std::make_shared(input_params, add); + ngraph::ResultVector results{std::make_shared(divide)}; func = std::make_shared(ngraph::ResultVector{results}, ngraph::ParameterVector{input_params}); ngraph::pass::Manager m; @@ -97,18 +94,17 @@ TEST(TransformationTests, SubstituteSoftSignMulPowerInvalidAddConst) { std::shared_ptr func(nullptr), reference_func(nullptr); { - auto input_params = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1, 64}); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1, 64}); - auto abs = std::make_shared(input_params); + auto abs = std::make_shared(input_params); - auto const_1 = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {1.1}); - auto const_neg_1 = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {-1}); + auto const_1 = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {1.1}); + auto const_neg_1 = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {-1}); - auto add = std::make_shared(abs, const_1); - auto power = std::make_shared(add, const_neg_1); + auto add = std::make_shared(abs, const_1); + auto power = std::make_shared(add, const_neg_1); - auto mul = std::make_shared(power, input_params); + auto mul = std::make_shared(power, input_params); ngraph::ResultVector results{std::make_shared(mul)}; func = std::make_shared(ngraph::ResultVector{results}, ngraph::ParameterVector{input_params}); @@ -131,18 +127,17 @@ TEST(TransformationTests, SubstituteSoftSignMulPowerInvalidPowerConst) { std::shared_ptr func(nullptr), reference_func(nullptr); { - auto input_params = - std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1, 64}); + auto input_params = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1, 64}); - auto abs = std::make_shared(input_params); + auto abs = std::make_shared(input_params); - auto const_1 = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {1}); - auto const_neg_1 = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{}, {-1.1}); + auto const_1 = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {1}); + auto const_neg_1 = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{}, {-1.1}); - auto add = std::make_shared(abs, const_1); - auto power = std::make_shared(add, const_neg_1); + auto add = std::make_shared(abs, const_1); + auto power = std::make_shared(add, const_neg_1); - auto mul = std::make_shared(power, input_params); + auto mul = std::make_shared(power, input_params); ngraph::ResultVector results{std::make_shared(mul)}; func = std::make_shared(ngraph::ResultVector{results}, ngraph::ParameterVector{input_params}); diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_swap_input_matmul.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_swap_input_matmul.cpp index 8118ac2948ca19..6f361e5f4c74e8 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_swap_input_matmul.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_swap_input_matmul.cpp @@ -10,6 +10,7 @@ #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset8.hpp" #include "transformations/swap_input_matmul_gna.hpp" namespace testing { @@ -24,30 +25,26 @@ static std::shared_ptr CreateMatMulFunction(const ngraph::Shap bool swappedInputs, bool needTranspose, bool expected = false) { - auto input_params = std::make_shared(ngraph::element::i64, input2_shape); + auto input_params = std::make_shared(ngraph::element::i64, input2_shape); std::shared_ptr input = input_params; if (input->get_output_shape(0).size() == 2 && needTranspose) { auto transpose_order = - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0}); - input = std::make_shared(input, transpose_order); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0}); + input = std::make_shared(input, transpose_order); } - auto constant = ngraph::opset8::Constant::create(ngraph::element::i64, input1_shape, {1}); + auto constant = ov::op::v0::Constant::create(ngraph::element::i64, input1_shape, {1}); std::shared_ptr const_input = constant; if (withWeightsFq) { - auto input_low = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); - auto output_low = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); - const_input = std::make_shared(const_input, - input_low, - input_high, - output_low, - output_high, - 11); + auto input_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); + const_input = + std::make_shared(const_input, input_low, input_high, output_low, output_high, 11); } - auto matmul = swappedInputs ? std::make_shared(input, const_input, false, needTranspose) - : std::make_shared(const_input, input, needTranspose, false); + auto matmul = swappedInputs ? std::make_shared(input, const_input, false, needTranspose) + : std::make_shared(const_input, input, needTranspose, false); std::shared_ptr final_node = matmul; if (withBias) { @@ -55,40 +52,36 @@ static std::shared_ptr CreateMatMulFunction(const ngraph::Shap if ((needTranspose && !expected || !needTranspose && expected) && bias_shape.size() > 1) { std::swap(shape[0], shape[1]); } - auto bias = ngraph::opset8::Constant::create(ngraph::element::i64, shape, {1}); + auto bias = ov::op::v0::Constant::create(ngraph::element::i64, shape, {1}); std::shared_ptr bias_node = bias; if (expected && bias_shape.size() > 1) { auto transpose_order = - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0}); - bias_node = std::make_shared(bias_node, transpose_order); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0}); + bias_node = std::make_shared(bias_node, transpose_order); } - final_node = std::make_shared(matmul, bias_node); + final_node = std::make_shared(matmul, bias_node); } if (withOutFq) { - auto input_low = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); - auto output_low = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset8::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); - final_node = std::make_shared(final_node, - input_low, - input_high, - output_low, - output_high, - 11); + auto input_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {20}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {10}); + final_node = + std::make_shared(final_node, input_low, input_high, output_low, output_high, 11); } if (withAct) { - final_node = std::make_shared(final_node); + final_node = std::make_shared(final_node); } if (final_node->get_output_shape(0).size() == 2 && needTranspose) { auto transpose_order = - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0}); - final_node = std::make_shared(final_node, transpose_order); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, 0}); + final_node = std::make_shared(final_node, transpose_order); } - auto result = std::make_shared(final_node); + auto result = std::make_shared(final_node); return std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input_params}); } diff --git a/src/plugins/intel_gna/tests/unit/transformations/gna_unfuse_reshape_and_transpose.cpp b/src/plugins/intel_gna/tests/unit/transformations/gna_unfuse_reshape_and_transpose.cpp index f0312aa24d40ce..8be223526e39c8 100644 --- a/src/plugins/intel_gna/tests/unit/transformations/gna_unfuse_reshape_and_transpose.cpp +++ b/src/plugins/intel_gna/tests/unit/transformations/gna_unfuse_reshape_and_transpose.cpp @@ -11,6 +11,7 @@ #include #include "common_test_utils/ov_test_utils.hpp" +#include "openvino/opsets/opset8.hpp" #include "transformations/unfuse_reshape_and_transpose.hpp" namespace testing { @@ -36,11 +37,11 @@ class ActivationFactory : public IActivationFactory { }; template <> -class ActivationFactory : public IActivationFactory { +class ActivationFactory : public IActivationFactory { public: ActivationFactory(const double min, const double max) : min_(min), max_(max) {} std::shared_ptr createNode(const ngraph::Output& operation_before) override { - return std::make_shared(operation_before, min_, max_); + return std::make_shared(operation_before, min_, max_); } private: @@ -70,32 +71,31 @@ static std::shared_ptr createFunction(const ngraph::Shape& con bool single_batch) { size_t total_in = std::accumulate(std::begin(conv_input_shape), std::end(conv_input_shape), 1, std::multiplies()); - auto input = std::make_shared(ngraph::element::f32, ngraph::Shape{1, total_in}); + auto input = std::make_shared(ngraph::element::f32, ngraph::Shape{1, total_in}); std::shared_ptr last_node, last_const; auto add_fake_quantize = [&](const std::shared_ptr& node) { - auto input_low = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto input_high = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {5}); - auto output_low = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); - auto output_high = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {10}); - return std::make_shared(node, input_low, input_high, output_low, output_high, 11); + auto input_low = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto input_high = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {5}); + auto output_low = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); + auto output_high = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {10}); + return std::make_shared(node, input_low, input_high, output_low, output_high, 11); }; if (single_reshape_before) { - auto reshape_in_const = - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, conv_input_shape); - auto reshape_in = std::make_shared(input, reshape_in_const, false); + auto reshape_in_const = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, conv_input_shape); + auto reshape_in = std::make_shared(input, reshape_in_const, false); last_node = reshape_in; } else { - auto reshape_in_const = ngraph::opset8::Constant::create( + auto reshape_in_const = ov::op::v0::Constant::create( ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{conv_input_shape[0], conv_input_shape[2], conv_input_shape[3], conv_input_shape[1]}); - auto reshape_in = std::make_shared(input, reshape_in_const, false); + auto reshape_in = std::make_shared(input, reshape_in_const, false); auto transpose_in_const = - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 3, 1, 2}); - auto transpose_in = std::make_shared(reshape_in, transpose_in_const); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 3, 1, 2}); + auto transpose_in = std::make_shared(reshape_in, transpose_in_const); last_node = transpose_in; } - auto conv_weights = ngraph::opset8::Constant::create(ngraph::element::f32, conv_filter_shape, {1}); + auto conv_weights = ov::op::v0::Constant::create(ngraph::element::f32, conv_filter_shape, {1}); last_const = conv_weights; if (with_fq) { auto conv_input_fq = add_fake_quantize(last_node); @@ -103,21 +103,20 @@ static std::shared_ptr createFunction(const ngraph::Shape& con auto conv_weights_fq = add_fake_quantize(conv_weights); last_const = conv_weights_fq; } - auto conv = std::make_shared(last_node, - last_const, - ngraph::Strides{1, 1}, - ngraph::CoordinateDiff{0, 0}, - ngraph::CoordinateDiff{0, 0}, - ngraph::Strides{1, 1}); + auto conv = std::make_shared(last_node, + last_const, + ngraph::Strides{1, 1}, + ngraph::CoordinateDiff{0, 0}, + ngraph::CoordinateDiff{0, 0}, + ngraph::Strides{1, 1}); last_node = conv; auto conv_output_shape = conv->get_output_shape(0); size_t total_out = std::accumulate(std::begin(conv_output_shape), std::end(conv_output_shape), 1, std::multiplies()); if (with_bias) { - auto add_const = ngraph::opset8::Constant::create(ngraph::element::f32, - ngraph::Shape{1, conv_output_shape.at(1), 1, 1}, - {1}); - auto add = std::make_shared(conv, add_const); + auto add_const = + ov::op::v0::Constant::create(ngraph::element::f32, ngraph::Shape{1, conv_output_shape.at(1), 1, 1}, {1}); + auto add = std::make_shared(conv, add_const); last_node = add; } if (with_fq) { @@ -125,11 +124,11 @@ static std::shared_ptr createFunction(const ngraph::Shape& con last_node = conv_bias_fq; } if (with_pool) { - auto pool = std::make_shared(last_node, - ngraph::Strides{1, 1}, - ngraph::Shape{0, 0}, - ngraph::Shape{0, 0}, - ngraph::Shape{1, 1}); + auto pool = std::make_shared(last_node, + ngraph::Strides{1, 1}, + ngraph::Shape{0, 0}, + ngraph::Shape{0, 0}, + ngraph::Shape{1, 1}); last_node = pool; } if (activation_factory) { @@ -145,16 +144,16 @@ static std::shared_ptr createFunction(const ngraph::Shape& con } } auto out_shape = single_batch ? ngraph::Shape{1, total_out} : ngraph::Shape{total_out, 1}; - auto reshape_out_const = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{2}, out_shape); + auto reshape_out_const = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{2}, out_shape); if (!single_reshape_after) { auto transpose_out_const = - ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 2, 3, 1}); - auto transpose_out = std::make_shared(last_node, transpose_out_const); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{0, 2, 3, 1}); + auto transpose_out = std::make_shared(last_node, transpose_out_const); last_node = transpose_out; } - auto reshape_out = std::make_shared(last_node, reshape_out_const, false); + auto reshape_out = std::make_shared(last_node, reshape_out_const, false); - auto result = std::make_shared(reshape_out); + auto result = std::make_shared(reshape_out); auto func = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{input}); return func; @@ -228,16 +227,15 @@ TEST_P(UnfuseReshapeAndTransposeTestSuiteFixture, CompareFunctions) { execute_test(function, reference_function); } -const std::vector activationFactories = { - nullptr, - createActivationFactory(), - createActivationFactory(), - createActivationFactory(), - createActivationFactory(), - createActivationFactory(), - createActivationFactory(), - createActivationFactory(), - createActivationFactory(0.1, 0.2)}; +const std::vector activationFactories = {nullptr, + createActivationFactory(), + createActivationFactory(), + createActivationFactory(), + createActivationFactory(), + createActivationFactory(), + createActivationFactory(), + createActivationFactory(), + createActivationFactory(0.1, 0.2)}; INSTANTIATE_TEST_SUITE_P( UnfuseReshapeAndTransposeTestSuite, diff --git a/src/plugins/intel_gpu/include/intel_gpu/graph/program.hpp b/src/plugins/intel_gpu/include/intel_gpu/graph/program.hpp index d4b30edbfd949f..373033fb086a96 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/graph/program.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/graph/program.hpp @@ -229,9 +229,7 @@ struct program { // Reverses connection - user becomes dependency. void remove_nodes(std::vector& to_remove); - void dump_program(const char* stage, - bool with_full_info, - std::function const& filter = nullptr) const; + void dump_program(const char* stage, bool with_full_info) const; const primitives_info& get_primitives_info() const; data_types get_inference_precision(const program_node& node) const; diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/broadcast.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/broadcast.hpp index 70fcb1ee44cde7..c86c0ed2a7da97 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/broadcast.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/broadcast.hpp @@ -4,6 +4,7 @@ #pragma once +#include "openvino/core/partial_shape.hpp" #include "openvino/op/broadcast.hpp" #include "primitive.hpp" @@ -131,6 +132,8 @@ struct broadcast : public primitive_base { /// along which broadcast should happen. std::vector broadcast_axes; + ov::PartialShape output_pshape = ov::PartialShape::dynamic(); + size_t hash() const override { size_t seed = primitive::hash(); seed = hash_range(seed, broadcast_axes.begin(), broadcast_axes.end()); @@ -146,7 +149,8 @@ struct broadcast : public primitive_base { return axes_mapping == rhs_casted.axes_mapping && broadcast_mode == rhs_casted.broadcast_mode && - broadcast_sizes == rhs_casted.broadcast_sizes; + broadcast_sizes == rhs_casted.broadcast_sizes && + output_pshape == rhs_casted.output_pshape; } void save(BinaryOutputBuffer& ob) const override { @@ -156,6 +160,7 @@ struct broadcast : public primitive_base { ob << make_data(&broadcast_mode, sizeof(ov::op::BroadcastModeSpec)); ob << broadcast_sizes; ob << broadcast_axes; + ob << output_pshape; } void load(BinaryInputBuffer& ib) override { @@ -165,6 +170,7 @@ struct broadcast : public primitive_base { ib >> make_data(&broadcast_mode, sizeof(ov::op::BroadcastModeSpec)); ib >> broadcast_sizes; ib >> broadcast_axes; + ib >> output_pshape; } }; } // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/broadcast.cpp b/src/plugins/intel_gpu/src/graph/broadcast.cpp index e4e5b369dabd0c..7af4434b3c7e06 100644 --- a/src/plugins/intel_gpu/src/graph/broadcast.cpp +++ b/src/plugins/intel_gpu/src/graph/broadcast.cpp @@ -90,7 +90,7 @@ std::vector broadcast_inst::calc_output_layouts(broadcast_node const& /* if (input1.is_static()) { output_rank = input1.get_dim(0); // target shape rank is set as second input. } - output_shapes[0] = ShapeType::dynamic(std::max(static_cast(output_rank), 1)); + output_shapes[0] = desc->output_pshape.rank().is_static() ? desc->output_pshape : ShapeType::dynamic(std::max(static_cast(output_rank), 1)); } format output_format = format::adjust_to_rank(input0_layout.format, output_shapes[0].size()); diff --git a/src/plugins/intel_gpu/src/graph/fully_connected.cpp b/src/plugins/intel_gpu/src/graph/fully_connected.cpp index 8abf9200fdbabb..3650fb4d0c4f89 100644 --- a/src/plugins/intel_gpu/src/graph/fully_connected.cpp +++ b/src/plugins/intel_gpu/src/graph/fully_connected.cpp @@ -174,15 +174,30 @@ kernel_impl_params fully_connected_inst::get_fake_aligned_params(kernel_impl_par auto orig_output_layout = orig_impl_param.get_output_layout(); OPENVINO_ASSERT(orig_input_layout.is_static() && orig_output_layout.is_static(), "in/out layouts should be static for fake alignment!"); - if (orig_input_layout.format == format::bfyx && orig_output_layout.format == format::bfyx) { + + auto input_shape = orig_input_layout.get_partial_shape().to_shape(); + auto output_shape = orig_output_layout.get_partial_shape().to_shape(); + + // Allow padding only for feature and outermost dimmension + auto can_apply_fake_alignment = true; + if (input_shape.size() == 3) + can_apply_fake_alignment &= orig_input_layout.data_padding.lower_size().sizes()[1] == 0 && + orig_input_layout.data_padding.upper_size().sizes()[1] == 0; + + if (output_shape.size() == 3) + can_apply_fake_alignment &= orig_output_layout.data_padding.lower_size().sizes()[1] == 0 && + orig_output_layout.data_padding.upper_size().sizes()[1] == 0; + + if (orig_input_layout.format == format::bfyx && orig_output_layout.format == format::bfyx && can_apply_fake_alignment) { auto updated_param = orig_impl_param; - auto input_shape = orig_input_layout.get_partial_shape().to_shape(); - auto input_row_idx = input_shape.size() - 2; - auto output_shape = orig_output_layout.get_partial_shape().to_shape(); - auto output_row_idx = output_shape.size() - 2; + + auto batch_size = std::accumulate(input_shape.begin(), + input_shape.end() - 1, + size_t{1}, + std::multiplies()); // Vector by matrix multiplication sometimes works slower if we align it - if (input_shape[input_row_idx] == 1 && output_shape[output_row_idx] == 1 && input_shape[input_shape.size() - 1] >= 1024) { + if (batch_size == 1 && input_shape.back() >= 1024) { return std::move(orig_impl_param); } @@ -190,12 +205,15 @@ kernel_impl_params fully_connected_inst::get_fake_aligned_params(kernel_impl_par if (orig_impl_param.dev_type == cldnn::device_type::integrated_gpu) { auto weights_layout_dt = orig_impl_param.weights_layout.value().data_type; auto is_4bit = weights_layout_dt == data_types::i4 || weights_layout_dt == data_types::u4; - auto is_extra_alignment_needed = output_shape[output_row_idx] >= 256; + auto is_extra_alignment_needed = batch_size >= 256; fake_align_base = is_4bit && is_extra_alignment_needed ? 64 : 16; } - input_shape[input_row_idx] = align_to(input_shape[input_row_idx], fake_align_base); - output_shape[output_row_idx] = align_to(output_shape[output_row_idx], fake_align_base); + std::fill(input_shape.begin(), input_shape.end() - 1, 1); + std::fill(output_shape.begin(), output_shape.end() - 1, 1); + + input_shape[0] = align_to(batch_size, fake_align_base); + output_shape[0] = align_to(batch_size, fake_align_base); updated_param.input_layouts[0] = layout(ov::PartialShape(input_shape), orig_input_layout.data_type, diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_shape_of_subgraphs.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_shape_of_subgraphs.cpp index 4d7e61a7e4eff7..d6d365e0d1f94b 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_shape_of_subgraphs.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/mark_shape_of_subgraphs.cpp @@ -3,6 +3,7 @@ // #include "shape_of_inst.h" +#include "read_value_inst.h" #include "reshape_inst.h" #include "eltwise_inst.h" #include "pass_manager.h" @@ -43,6 +44,10 @@ bool mark_shape_of_subgraphs::can_mark_node(const program_node& node) { if (node.has_fused_primitives()) return false; + // read_value may have initializer which is shape_of sub-graph, but read_value itself is not a part of such sub-graph + if (node.is_type()) + return false; + if (node.is_type()) return true; diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp index 95e6079f48f78c..db67b47cb71d3c 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp @@ -14,6 +14,7 @@ #include "resample_inst.h" #include "loop_inst.h" #include "strided_slice_inst.h" +#include "shape_of_inst.h" #include "non_max_suppression_inst.h" #include "experimental_detectron_roi_feature_extractor_inst.hpp" #include "border_inst.h" @@ -407,6 +408,19 @@ static bool can_crop_be_optimized_along_batch(const crop_node& node) { return false; } +static bool can_read_value_be_optimize(const read_value_node& node) { + if (node.get_users().size() == 1) + return true; + + const auto non_shape_of_users_count = std::count_if(node.get_users().begin(), node.get_users().end(), [](const program_node* user) { + return !user->is_type(); + }); + if (non_shape_of_users_count <= 1) + return true; + + return false; +} + static void propagate_padding_to_opt_out_users(program_node& node, cldnn::padding padding_data) { if (padding_data == cldnn::padding()) return; @@ -632,10 +646,10 @@ void prepare_buffer_fusing::run(program& p) { // ┌────┴──────┐ // │ Result │ // └───────────┘ - // If read_value here returns virable memory w/o copy, then based on Add-s and Assign execution order we may have different results + // If read_value here returns variable memory w/o copy, then based on Add-s and Assign execution order we may have different results // TODO: Allow optimizations for the case above too. Looks like it can be achieved by more careful // topological sort (i.e. if we ensure that all read_value users are completed before assign is run) - node.can_be_optimized(node.get_users().size() == 1); + node.can_be_optimized(can_read_value_be_optimize(node)); }); } } diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp index b9a88e01c4a9e3..d223c2cd55075f 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp @@ -470,6 +470,8 @@ void prepare_primitive_fusing::fuse_bias(program &p) { fc_with_bias_prim->compressed_weights = true; fc_with_bias_prim->decompression_scale = desc->decompression_scale; fc_with_bias_prim->decompression_zero_point = desc->decompression_zero_point; + if (desc->decompression_zero_point_scalar.has_value()) + fc_with_bias_prim->decompression_zero_point_scalar = desc->decompression_zero_point_scalar.value(); } auto& new_fc_node = p.get_or_create(fc_with_bias_prim); fuse_bias_f(fc, new_fc_node, bias_node, eltw_node); diff --git a/src/plugins/intel_gpu/src/graph/impls/onednn/primitive_onednn_base.h b/src/plugins/intel_gpu/src/graph/impls/onednn/primitive_onednn_base.h index 53657e2ee93bf9..f6d5769b77d218 100644 --- a/src/plugins/intel_gpu/src/graph/impls/onednn/primitive_onednn_base.h +++ b/src/plugins/intel_gpu/src/graph/impls/onednn/primitive_onednn_base.h @@ -95,7 +95,7 @@ struct typed_primitive_onednn_impl : public typed_primitive_impl { } bool is_cpu() const override { return false; } - bool is_onednn() const { return true; } + bool is_onednn() const override { return true; } // Cache blob format: // [ dnnl::primitive_attr ] diff --git a/src/plugins/intel_gpu/src/graph/include/program_dump_graph.h b/src/plugins/intel_gpu/src/graph/include/program_dump_graph.h index 08523f0ad1f976..ca05c678df9718 100644 --- a/src/plugins/intel_gpu/src/graph/include/program_dump_graph.h +++ b/src/plugins/intel_gpu/src/graph/include/program_dump_graph.h @@ -4,6 +4,7 @@ #include "intel_gpu/graph/program.hpp" #include "program_node.h" +#include "primitive_inst.h" #include #include @@ -12,6 +13,7 @@ std::string get_dir_path(const ExecutionConfig& config); void dump_graph_optimized(std::ofstream&, const program&); void dump_graph_processing_order(std::ofstream&, const program&); -void dump_graph_init(std::ofstream&, const program&, std::function const&); -void dump_graph_info(std::ofstream&, const program&, std::function const&); +void dump_graph_init(std::ofstream&, const program&, + std::function(const primitive_id&)> get_primitive_inst = nullptr); +void dump_graph_info(std::ofstream&, const program&); } // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/include/read_value_inst.h b/src/plugins/intel_gpu/src/graph/include/read_value_inst.h index a84be19aae2651..7209e8756fbf76 100644 --- a/src/plugins/intel_gpu/src/graph/include/read_value_inst.h +++ b/src/plugins/intel_gpu/src/graph/include/read_value_inst.h @@ -34,11 +34,8 @@ class typed_primitive_inst : public typed_primitive_inst_base calc_output_layouts(read_value_node const& /*node*/, const kernel_impl_params& impl_param) { auto desc = impl_param.typed_desc(); const auto default_layout = desc->output_layout; - auto out_layout = impl_param.state_layout.value_or(default_layout); - if (out_layout.is_dynamic() && desc->input_size() > 0) { - out_layout = impl_param.get_input_layout(0); - } - return { out_layout }; + + return { impl_param.state_layout.value_or(default_layout) }; } static layout calc_output_layout(const read_value_node& node, kernel_impl_params const& impl_param); diff --git a/src/plugins/intel_gpu/src/graph/loop.cpp b/src/plugins/intel_gpu/src/graph/loop.cpp index ae13b8a08c2de5..c456cff4433306 100644 --- a/src/plugins/intel_gpu/src/graph/loop.cpp +++ b/src/plugins/intel_gpu/src/graph/loop.cpp @@ -803,8 +803,8 @@ void loop_inst::concatenated_memory_mapping::slice_mem(const int64_t num_iterati char* concat_data = reinterpret_cast(concatenated_mem->lock(stream, cldnn::mem_lock_type::read)); auto concate_layout = concatenated_mem->get_layout(); - auto trait = format::traits(concate_layout.format); - if (format::is_blocked(concate_layout.format) || concate_layout.data_padding) { + auto dims = concat_mem_shape.size(); + if (!format::is_default_format(concate_layout.format) || dims == 1 || concate_layout.data_padding) { // BE CAREFUL: ov::reference::split is extremely slow. // If we encounter any case where this code path is executed, we need to optimize it ov::reference::split(concat_data, concat_mem_shape, elem_size, axis, num_iters, pointers_to_data.data()); @@ -819,14 +819,16 @@ void loop_inst::concatenated_memory_mapping::slice_mem(const int64_t num_iterati auto& lb_at_axis = lower_bounds[axis]; auto& ub_at_axis = upper_bounds[axis]; + // Format of concat_layout is invalid here : No mixed order size_t continuous_size = 1; - auto dims_order = trait._order; - auto target_axis = std::find(dims_order.begin(), dims_order.end(), axis); - for (auto iter = target_axis + 1 ; iter != dims_order.end() ; ++iter) { - continuous_size *= ((output_shape.size() > *iter) ? output_shape[*iter] : 1); + size_t inner_axis = axis + 1; + for (auto iter = inner_axis ; iter < dims ; ++iter) { + continuous_size *= ((output_shape.size() > iter) ? output_shape[iter] : 1); } + auto strides = ov::Strides(lower_bounds.size(), 1); - strides[*(target_axis+1)] = continuous_size; + if (inner_axis < dims) + strides[inner_axis] = continuous_size; const auto strides_copy_size = elem_size * continuous_size; const auto out_last = std::next(out_data, num_iters); @@ -834,12 +836,9 @@ void loop_inst::concatenated_memory_mapping::slice_mem(const int64_t num_iterati auto dst_mem = *out_iter; auto slice_ranges = ov::coordinates::slice(concat_mem_shape, lower_bounds, upper_bounds, strides); for (const auto& range : slice_ranges) { - auto src_index = range.begin_index; - for (size_t i = 0; i < range.element_number; src_index += range.step, ++i) { - const auto src_mem = concat_data + src_index * elem_size; - std::memcpy(dst_mem, src_mem, strides_copy_size); - std::advance(dst_mem, strides_copy_size); - } + const auto src_mem = concat_data + range.begin_index * elem_size; + std::memcpy(dst_mem, src_mem, strides_copy_size); + std::advance(dst_mem, strides_copy_size); } lb_at_axis += part_length; diff --git a/src/plugins/intel_gpu/src/graph/network.cpp b/src/plugins/intel_gpu/src/graph/network.cpp index 75d5bc27d716ac..dc7f797dfd1f1b 100644 --- a/src/plugins/intel_gpu/src/graph/network.cpp +++ b/src/plugins/intel_gpu/src/graph/network.cpp @@ -37,6 +37,7 @@ #include "program_helpers.h" #include "to_string_utils.h" #include "kernels_cache.hpp" +#include "program_dump_graph.h" // TODO: Remove once we have an abstraction for kernels_cache #include "kernel_base.h" @@ -1121,6 +1122,22 @@ void network::execute_impl(const std::vector& events) { << data_shape_str.str() << std::endl; } + GPU_DEBUG_IF(!debug_config->dump_graphs.empty() && debug_config->is_target_iteration(curr_iter)) { + auto get_fixed_str = [](int value, int length = 2) -> std::string { + std::ostringstream ss; + ss << std::setw(length) << std::setfill('0') << std::to_string(value); + return ss.str(); + }; + std::string path = get_dir_path(get_config()); + if (!path.empty()) { + std::ofstream ofs(path + "cldnn_program_exec_p" + get_fixed_str(get_program()->get_id()) + "_n" + get_fixed_str(get_id()) + + "_" + get_fixed_str(curr_iter, 5) + ".graph"); + dump_graph_init(ofs, *get_program(), [&](const primitive_id& id) -> std::shared_ptr { + return get_primitive(id); + }); + } + } + // Store events only in case of OOO queue or enabled Profiling auto store_events = is_out_of_order_queue || _enable_profiling; if (store_events) { diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index 8c3b430e3efe28..e967a5b7f152e9 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -265,8 +265,30 @@ void primitive_inst::update_shape() { } if (get_node().is_type()) { - const auto& variable_id = get_node().as().get_primitive()->variable_id; - auto new_layout = get_network().get_variable(variable_id).get_layout(); + auto prim = get_node().as().get_primitive(); + const auto& variable_id = prim->variable_id; + auto& variable = get_network().get_variable(variable_id); + // Initial variable shape is taken from variable itself + auto new_layout = variable.get_layout(); + + // If variable is not set and we have an initializer - use it's shape as shape of variable + if (!variable.is_set() && _impl_params->input_layouts.size() == 1) { + new_layout = _impl_params->get_input_layout(0); + } + + // If we still have a dynamic dimension, which basiclly means that we don't have an initializer, then replace dynamic dims with 0 + if (new_layout.is_dynamic()) { + auto pshape = new_layout.get_partial_shape(); + for (auto& d : pshape) { + if (d.is_dynamic()) { + d = 0; + } + } + new_layout.set_partial_shape(pshape); + } + + variable.set_layout(new_layout); + if (!_impl_params->state_layout.has_value() || _impl_params->state_layout.value() != new_layout) { _impl_params->state_layout = new_layout; input_shape_changed = true; @@ -299,7 +321,7 @@ void primitive_inst::update_shape() { } } if (!subgraph_input_changed) { - GPU_DEBUG_TRACE_DETAIL << id() << ": skip shape_update, because it is in shape_of_subgrap and input shape is not changed\n"; + GPU_DEBUG_TRACE_DETAIL << id() << ": skip shape_update, because it is in shape_of_subgraph and input shape is not changed\n"; reset_shape_change(); return; } @@ -402,20 +424,6 @@ void primitive_inst::update_shape() { get_network().get_variable(desc->variable_id).set_layout(_impl_params->get_output_layout()); _impl_params->state_layout = _impl_params->get_output_layout(); } - - if (get_node().is_type()) { - auto desc = get_node().as().get_primitive(); - if (_impl_params->output_layouts[0].is_dynamic()) { - auto pshape = _impl_params->output_layouts[0].get_partial_shape(); - for (auto& d : pshape) { - if (d.is_dynamic()) { - d = 0; - } - } - _impl_params->output_layouts[0].set_partial_shape(pshape); - } - get_network().get_variable(desc->variable_id).set_layout(_impl_params->get_output_layout()); - } } event::ptr primitive_inst::realloc_if_needed() { @@ -445,10 +453,17 @@ event::ptr primitive_inst::realloc_if_needed() { if (_node->is_type()) return ev; + // read_value/assign nodes are supposed to always use variable memory if (auto stateful_prim = dynamic_cast(this)) { std::string variable_id = stateful_prim->variable_id(); - auto variable = get_network().get_variable(variable_id); + auto& variable = get_network().get_variable(variable_id); variable.set_layout(actual_layout); + GPU_DEBUG_TRACE_DETAIL << id() << ": use variable memory " << variable.get_memory() + << " (size=" << variable.get_memory()->size() << ")" << std::endl; + // For nodes that can be optimized, variable memory is used as output memory + // so there is no need for output memory reallocation + if (can_be_optimized()) + return ev; } // Update output layout with respect to FC's fake alignment diff --git a/src/plugins/intel_gpu/src/graph/program.cpp b/src/plugins/intel_gpu/src/graph/program.cpp index dad504cb09e532..78d6b21f39e4f9 100644 --- a/src/plugins/intel_gpu/src/graph/program.cpp +++ b/src/plugins/intel_gpu/src/graph/program.cpp @@ -1217,19 +1217,17 @@ void program::remove_nodes(std::vector& to_remove) { // TODO: break this function into number of smaller ones + add per-primitive fields (possibly use // primitive_inst::to_string?) -void program::dump_program(const char* stage, - bool with_full_info, - std::function const& filter) const { +void program::dump_program(const char* stage, bool with_full_info) const { std::string path = get_dir_path(_config); if (path.empty() || !with_full_info) { return; } std::ofstream graph(path + "cldnn_program_" + std::to_string(prog_id) + "_" + stage + ".graph"); - dump_graph_init(graph, *this, filter); + dump_graph_init(graph, *this); graph.open(path + "cldnn_program_" + std::to_string(prog_id) + "_" + stage + ".info"); - dump_graph_info(graph, *this, filter); + dump_graph_info(graph, *this); graph.open(path + "cldnn_program_" + std::to_string(prog_id) + "_" + stage + ".order"); dump_graph_processing_order(graph, *this); diff --git a/src/plugins/intel_gpu/src/graph/program_dump_graph.cpp b/src/plugins/intel_gpu/src/graph/program_dump_graph.cpp index f8aa9fb1c08c60..0ebbb37ccba31a 100644 --- a/src/plugins/intel_gpu/src/graph/program_dump_graph.cpp +++ b/src/plugins/intel_gpu/src/graph/program_dump_graph.cpp @@ -170,10 +170,10 @@ std::string get_dir_path(const ExecutionConfig& config) { void dump_graph_init(std::ofstream& graph, const program& program, - std::function const& filter) { + std::function(const primitive_id&)> get_primitive_inst) { const std::string invalid_layout_msg = "(invalid layout)"; - const auto dump_mem_info = [&invalid_layout_msg](const program_node* ptr) { + const auto dump_mem_info = [&invalid_layout_msg, &get_primitive_inst](const program_node* ptr) { std::string out = "layout_info: "; if (!ptr->is_valid_output_layout()) { return out + invalid_layout_msg; @@ -185,6 +185,9 @@ void dump_graph_init(std::ofstream& graph, } else { out += " " + out_layout.to_string(); } + if (get_primitive_inst) { + out += "\nshape: " + get_primitive_inst(ptr->id())->get_output_layout().get_partial_shape().to_string(); + } return out; }; @@ -199,7 +202,8 @@ void dump_graph_init(std::ofstream& graph, } auto output_fmts = ptr->get_preferred_output_fmts(); if (!output_fmts.empty()) { - out += "\npreferred_out_fmt"; + out += ((out.empty()) ? "" : "\n"); + out += "preferred_out_fmt"; for (auto& fmt : output_fmts) { out += ":" + fmt_to_str(fmt); } @@ -210,9 +214,6 @@ void dump_graph_init(std::ofstream& graph, graph << "digraph cldnn_program {\n"; for (auto& node : program.get_processing_order()) { - if (filter && !filter(*node)) { - continue; - } #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpotentially-evaluated-expression" @@ -259,9 +260,6 @@ void dump_graph_init(std::ofstream& graph, graph << "];\n"; for (auto& user : node->get_users()) { - if (filter && !filter(*user)) { - continue; - } bool doubled = true; auto it = user->get_dependencies().begin(); while (it != user->get_dependencies().end()) { @@ -289,10 +287,6 @@ void dump_graph_init(std::ofstream& graph, } for (auto& dep : node->get_dependencies()) { - if (filter && !filter(*dep.first)) { - continue; - } - if (std::find(dep.first->get_users().begin(), dep.first->get_users().end(), node) != dep.first->get_users().end()) { continue; } @@ -318,13 +312,8 @@ void dump_graph_optimized(std::ofstream& graph, const program& program) { close_stream(graph); } -void dump_graph_info(std::ofstream& graph, - const program& program, - std::function const& filter) { +void dump_graph_info(std::ofstream& graph, const program& program) { for (auto& node : program.get_processing_order()) { - if (filter && !filter(*node)) - continue; - dump_full_node(graph, node); graph << std::endl << std::endl; } diff --git a/src/plugins/intel_gpu/src/plugin/ops/broadcast.cpp b/src/plugins/intel_gpu/src/plugin/ops/broadcast.cpp index 3e208bd2c99063..aa764774275e73 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/broadcast.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/broadcast.cpp @@ -90,6 +90,8 @@ static void CreateCommonBroadcastOp(ProgramBuilder& p, const std::shared_ptroutput_pshape = op->get_output_partial_shape(0); + p.add_primitive(*op, broadcast_prim); } diff --git a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp index cf6b0e502f92ed..3ea765527accff 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp @@ -105,7 +105,6 @@ #include "transformations/op_conversions/convert_prior_box_v8_to_v0.hpp" #include "transformations/op_conversions/convert_shapeof3.hpp" #include "transformations/op_conversions/convert_topk11_downgrade.hpp" -#include "transformations/op_conversions/group_normalization_decomposition.hpp" #include "transformations/op_conversions/eye_decomposition.hpp" #include "transformations/op_conversions/convert_pad12_downgrade.hpp" #include "transformations/convert_precision.hpp" @@ -146,6 +145,35 @@ static bool disable_reduce_decomposition(const std::shared_ptr n } return false; } + +static bool is_non_decompression_multiply(const std::shared_ptr node) { + auto get_single_consumer = [](const std::shared_ptr node) -> std::shared_ptr { + const auto consumers = node->get_output_target_inputs(0); + if (consumers.size() != 1) + return nullptr; + return consumers.begin()->get_node()->shared_from_this(); + }; + + auto consumer = get_single_consumer(node); + if (!consumer) + return true; + + if (ov::is_type(consumer)) { + return false; + } else if (ov::is_type(consumer)) { + consumer = get_single_consumer(consumer); + if (consumer != nullptr && ov::is_type(consumer)) { + return false; + } + } + if (consumer != nullptr && ov::is_type(consumer)) { + consumer = get_single_consumer(consumer); + if (consumer != nullptr && ov::is_type(consumer)) { + return false; + } + } + return true; +} } // namespace namespace ov { @@ -247,6 +275,8 @@ void TransformationsPipeline::apply(std::shared_ptr func) { }); manager.register_pass(ov::element::TypeVector{ov::element::u8, ov::element::u4, ov::element::i4}, true); + // Ignore nodes that are not related to FullyConnected and allow ConstantFolding to be applied to them + pass_config->set_callback(is_non_decompression_multiply); const bool keep_precision_sensitive_in_fp32_1 = true; const bool convert_input_output_precision = false; @@ -506,7 +536,6 @@ void TransformationsPipeline::apply(std::shared_ptr func) { pass_config->disable(); pass_config->disable(); pass_config->disable(); - pass_config->disable(); pass_config->enable(); diff --git a/src/plugins/intel_gpu/src/plugin/variable_state.cpp b/src/plugins/intel_gpu/src/plugin/variable_state.cpp index 80a393e506747c..cdd551b5ca82ed 100644 --- a/src/plugins/intel_gpu/src/plugin/variable_state.cpp +++ b/src/plugins/intel_gpu/src/plugin/variable_state.cpp @@ -9,6 +9,7 @@ #include "intel_gpu/plugin/variable_state.hpp" #include "intel_gpu/runtime/memory_caps.hpp" #include "intel_gpu/runtime/layout.hpp" +#include "intel_gpu/runtime/debug_configuration.hpp" #include @@ -45,6 +46,7 @@ void VariableState::set() { void VariableState::set_layout(const cldnn::layout& new_layout) { m_layout = new_layout; + GPU_DEBUG_TRACE_DETAIL << "Update state layout to " << new_layout.to_short_string() << std::endl; update_device_buffer(); } diff --git a/src/plugins/intel_gpu/src/runtime/debug_configuration.cpp b/src/plugins/intel_gpu/src/runtime/debug_configuration.cpp index 55f166c4880015..6866e85220a611 100644 --- a/src/plugins/intel_gpu/src/runtime/debug_configuration.cpp +++ b/src/plugins/intel_gpu/src/runtime/debug_configuration.cpp @@ -115,7 +115,8 @@ static void print_help_messages() { message_list.emplace_back("OV_GPU_DumpProfilingData", "Enables dump of extended profiling information to specified directory." " Please use OV_GPU_DumpProfilingDataPerIter=1 env variable to collect performance per iteration." " Note: Performance impact may be significant as this option enforces host side sync after each primitive"); - message_list.emplace_back("OV_GPU_DumpGraphs", "Dump optimized graph"); + message_list.emplace_back("OV_GPU_DumpGraphs", "1) dump ngraph before and after transformation. 2) dump graph in model compiling." + "3) dump graph in execution."); message_list.emplace_back("OV_GPU_DumpSources", "Dump opencl sources"); message_list.emplace_back("OV_GPU_DumpLayersPath", "Enable dumping intermediate buffers and set the dest path"); message_list.emplace_back("OV_GPU_DumpLayers", "Dump intermediate buffers of specified layers only, separated by space." @@ -140,6 +141,7 @@ static void print_help_messages() { message_list.emplace_back("OV_GPU_DisableDynamicImpl", "Disable dynamic implementation"); message_list.emplace_back("OV_GPU_DisableRuntimeBufferFusing", "Disable runtime buffer fusing"); message_list.emplace_back("OV_GPU_DisableMemoryReuse", "Disable memory reuse"); + message_list.emplace_back("OV_GPU_DumpRuntimeMemoryPool", "Dump memory pool contents of each iteration"); message_list.emplace_back("OV_GPU_DisableBuildTimeWeightReorderForDynamicNodes", "Disable build time weight reorder for dynmaic nodes."); message_list.emplace_back("OV_GPU_DisableRuntimeSkipReorder", "Disable runtime skip reorder."); message_list.emplace_back("OV_GPU_DisablePrimitiveFusing", "Disable primitive fusing"); diff --git a/src/plugins/intel_gpu/tests/common/subgraphs_builders.hpp b/src/plugins/intel_gpu/tests/common/subgraphs_builders.hpp index 12fc37cac187d6..7696d547ea1c22 100644 --- a/src/plugins/intel_gpu/tests/common/subgraphs_builders.hpp +++ b/src/plugins/intel_gpu/tests/common/subgraphs_builders.hpp @@ -7,13 +7,20 @@ #include #include "openvino/core/dimension.hpp" #include "openvino/core/model.hpp" +#include "openvino/core/node_vector.hpp" +#include "openvino/core/partial_shape.hpp" +#include "openvino/op/broadcast.hpp" #include "openvino/op/constant.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/read_value.hpp" +#include "openvino/op/shape_of.hpp" #include "openvino/op/transpose.hpp" #include "openvino/op/result.hpp" #include "openvino/op/parameter.hpp" #include "openvino/op/matmul.hpp" #include "openvino/op/convert.hpp" #include "openvino/op/concat.hpp" +#include "openvino/op/util/read_value_base.hpp" #include "openvino/pass/make_stateful.hpp" namespace tests { @@ -22,7 +29,9 @@ inline std::shared_ptr make_llm_kv_cache_pattern(ov::Dimension batch ov::Dimension n_heads = ov::Dimension::dynamic(), ov::Dimension n_features = ov::Dimension::dynamic(), ov::element::Type_t element_type = ov::element::f32, - bool stateful = false) { + bool stateful = false, + bool fuse_cache_reorder = false, + bool build_state_initializer = false) { ov::PartialShape kv_cache_size = {batch, n_heads, -1, n_features}; ov::PartialShape new_token_size = {batch, -1, n_heads, n_features}; ov::PartialShape matmul_in_size = {batch, n_heads, -1, -1}; @@ -34,9 +43,37 @@ inline std::shared_ptr make_llm_kv_cache_pattern(ov::Dimension batch auto in_matmul = std::make_shared(element_type, matmul_in_size); in_matmul->set_friendly_name("in_matmul"); + ov::ParameterVector params{in_kv_prev, in_new_token, in_matmul}; + std::shared_ptr concat_input = in_kv_prev; + if (fuse_cache_reorder) { + auto in_beam_idx = std::make_shared(ov::element::i32, ov::PartialShape{batch}); + in_beam_idx->set_friendly_name("beam_idx"); + params.push_back(in_beam_idx); + auto axis = std::make_shared(ov::element::i32, ov::Shape{}, 0); + auto gather = std::make_shared(in_kv_prev, in_beam_idx, axis, 0); + concat_input = gather; + } + + std::shared_ptr state_initializer = nullptr; + if (stateful && build_state_initializer) { + auto shapeof = std::make_shared(in_new_token, ov::element::i32); + + auto indices = std::make_shared(ov::element::i32, ov::Shape{1}, 0); + auto axis = std::make_shared(ov::element::i32, ov::Shape{}, 0); + auto gather = std::make_shared(shapeof, indices, axis, 0); + + auto bcast_value = std::make_shared(element_type, ov::Shape{}, 0.0f); + ov::NodeVector dims = {gather}; + for (size_t i = 1; i < kv_cache_size.size(); i++) { + dims.push_back(std::make_shared(ov::element::i32, ov::Shape{1}, static_cast(kv_cache_size[i].get_min_length()))); + } + auto shape = std::make_shared(dims, 0); + state_initializer = std::make_shared(bcast_value, shape); + } + auto transpose_const = ov::op::v0::Constant::create(ov::element::i32, {new_token_size.size()}, {0, 2, 1, 3}); auto transpose = std::make_shared(in_new_token, transpose_const); - auto concat = std::make_shared(ov::OutputVector{in_kv_prev, transpose}, 2); + auto concat = std::make_shared(ov::OutputVector{concat_input, transpose}, 2); auto convert = std::make_shared(concat, element_type); auto kv_present = std::make_shared(convert); kv_present->set_friendly_name("present_key_values"); @@ -44,13 +81,22 @@ inline std::shared_ptr make_llm_kv_cache_pattern(ov::Dimension batch auto matmul_out = std::make_shared(matmul); matmul_out->set_friendly_name("matmul_out"); - ov::ParameterVector params{in_kv_prev, in_new_token, in_matmul}; ov::ResultVector results{kv_present, matmul_out}; auto model = std::make_shared(results, params, "LLM-KV-Cache"); if (stateful) { ov::pass::MakeStateful({{in_kv_prev, kv_present}}).run_on_model(model); } + if (state_initializer) { + for (auto op : model->get_ops()) { + if (auto read_value = std::dynamic_pointer_cast(op)) { + read_value->set_arguments(ov::OutputVector{state_initializer}); + break; + } + } + } + model->validate_nodes_and_infer_types(); + return model; } diff --git a/src/plugins/intel_gpu/tests/functional/behavior/hetero_gpu_query_network.cpp b/src/plugins/intel_gpu/tests/functional/behavior/hetero_gpu_query_network.cpp index fe4ae47541d178..54865d7273a752 100644 --- a/src/plugins/intel_gpu/tests/functional/behavior/hetero_gpu_query_network.cpp +++ b/src/plugins/intel_gpu/tests/functional/behavior/hetero_gpu_query_network.cpp @@ -4,19 +4,16 @@ #include "behavior/plugin/hetero_query_network.hpp" -using namespace HeteroTests; - namespace HeteroTests { - TEST_P(HeteroQueryNetworkTest, HeteroSinglePlugin) { std::string deviceName = GetParam(); RunTest(deviceName); } INSTANTIATE_TEST_CASE_P( - HeteroGpu, - HeteroQueryNetworkTest, - ::testing::Values( - std::string("HETERO:GPU"))); + HeteroGpu, + HeteroQueryNetworkTest, + ::testing::Values( + std::string("HETERO:GPU"))); } // namespace HeteroTests diff --git a/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp b/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp index 6262c366aec905..c0fcdfbd2e0d9e 100644 --- a/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp +++ b/src/plugins/intel_gpu/tests/functional/behavior/infer_request.cpp @@ -2,129 +2,100 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include - +#include "common_test_utils/test_common.hpp" +#include "common_test_utils/common_utils.hpp" +#include "common_test_utils/node_builders/activation.hpp" +#include "openvino/core/preprocess/pre_post_process.hpp" #include "openvino/runtime/core.hpp" - -#include #include "ov_models/subgraph_builders.hpp" -#include "functional_test_utils/blob_utils.hpp" -#include "openvino/core/preprocess/pre_post_process.hpp" #include "transformations/utils/utils.hpp" -#include "common_test_utils/common_utils.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" - -using namespace ::testing; - -const std::vector inputPrecisions = { - InferenceEngine::Precision::I16, - InferenceEngine::Precision::U16, - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::U8, - InferenceEngine::Precision::I8, - InferenceEngine::Precision::I32, - InferenceEngine::Precision::U32, - InferenceEngine::Precision::U64, - InferenceEngine::Precision::I64, - // Interpreter backend doesn't implement evaluate method for OP - // InferenceEngine::Precision::FP64, -}; +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp" +#include "common_test_utils/subgraph_builders/read_concat_split_assign.hpp" +namespace { typedef std::tuple< - InferenceEngine::Precision, // Input/Output Precision - InferenceEngine::Layout, // Input layout - InferenceEngine::Layout, // Output layout - std::vector, // Input Shape + ov::element::Type, // Input/Output type + ov::Shape, // Input Shape std::string> newtworkParams; class InferRequestIOPrecision : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { + virtual public ov::test::SubgraphBaseStaticTest { public: static std::string getTestCaseName(const testing::TestParamInfo &obj); - InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo &info) const override; protected: void SetUp() override; }; std::string InferRequestIOPrecision::getTestCaseName(const testing::TestParamInfo &obj) { - InferenceEngine::Precision netPrecision; - InferenceEngine::Layout inLayout, outLayout; - std::vector shape; + ov::element::Type model_type; + ov::Shape shape; std::string targetDevice; - std::tie(netPrecision, inLayout, outLayout, shape, targetDevice) = obj.param; + std::tie(model_type, shape, targetDevice) = obj.param; std::ostringstream result; const char separator = '_'; - result << "netPRC=" << netPrecision.name() << separator; - result << "inL=" << inLayout << separator; - result << "outL=" << outLayout << separator; + result << "netPRC=" << model_type.get_type_name() << separator; result << "trgDev=" << targetDevice; return result.str(); } void InferRequestIOPrecision::SetUp() { - InferenceEngine::Precision netPrecision; - std::vector shape; - std::tie(netPrecision, inLayout, outLayout, shape, targetDevice) = GetParam(); - inPrc = netPrecision; - outPrc = netPrecision; + ov::element::Type model_type; + ov::Shape shape; + std::tie(model_type, shape, targetDevice) = GetParam(); - float clamp_min = netPrecision.isSigned() ? -5.f : 0.0f; + float clamp_min = model_type.is_signed() ? -5.f : 0.0f; float clamp_max = 5.0f; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(shape))}; + ov::ParameterVector params {std::make_shared(model_type, ov::Shape(shape))}; params[0]->set_friendly_name("Input"); - auto activation = ngraph::builder::makeActivation(params[0], - ngPrc, - ngraph::helpers::ActivationTypes::Clamp, - {}, - {clamp_min, clamp_max}); + auto activation = ov::test::utils::make_activation(params[0], + model_type, + ov::test::utils::ActivationTypes::Clamp, + {}, + {clamp_min, clamp_max}); - function = std::make_shared(ngraph::NodeVector{activation}, params); + function = std::make_shared(ov::NodeVector{activation}, params); } -InferenceEngine::Blob::Ptr InferRequestIOPrecision::GenerateInput(const InferenceEngine::InputInfo &info) const { - bool inPrcSigned = function->get_parameters()[0]->get_element_type().is_signed(); - bool inPrcReal = function->get_parameters()[0]->get_element_type().is_real(); - - int32_t data_start_from = inPrcSigned ? -10 : 0; - uint32_t data_range = 20; - int32_t resolution = inPrcReal ? 32768 : 1; - - return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), data_range, - data_start_from, - resolution); +TEST_P(InferRequestIOPrecision, Inference) { + run(); } -TEST_P(InferRequestIOPrecision, CompareWithRefs) { - Run(); -} +const std::vector input_types = { + ov::element::i16, + ov::element::u16, + ov::element::f32, + ov::element::f16, + ov::element::u8, + ov::element::i8, + ov::element::i32, + ov::element::u32, + ov::element::u64, + ov::element::i64, + // Interpreter backend doesn't implement evaluate method for OP + // ov::element::f64, +}; INSTANTIATE_TEST_SUITE_P(smoke_GPU_BehaviorTests, InferRequestIOPrecision, ::testing::Combine( - ::testing::ValuesIn(inputPrecisions), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(InferenceEngine::Layout::ANY), - ::testing::Values(std::vector{1, 50}), + ::testing::ValuesIn(input_types), + ::testing::Values(ov::Shape{1, 50}), ::testing::Values(ov::test::utils::DEVICE_GPU)), InferRequestIOPrecision::getTestCaseName); TEST(TensorTest, smoke_canSetShapeForPreallocatedTensor) { - auto ie = ov::Core(); + auto core = ov::Core(); using namespace ov::preprocess; - auto p = PrePostProcessor(ngraph::builder::subgraph::makeSplitMultiConvConcat()); + auto p = PrePostProcessor(ov::test::utils::make_split_multi_conv_concat()); p.input().tensor().set_element_type(ov::element::i8); p.input().preprocess().convert_element_type(ov::element::f32); auto function = p.build(); - auto exec_net = ie.compile_model(function, ov::test::utils::DEVICE_GPU); + auto exec_net = core.compile_model(function, ov::test::utils::DEVICE_GPU); auto inf_req = exec_net.create_infer_request(); // Check set_shape call for pre-allocated input/output tensors @@ -144,36 +115,36 @@ TEST(TensorTest, smoke_canSetScalarTensor) { params.front()->output(0).get_tensor().set_names({"scalar1"}); std::vector const_shape = {1}; - auto const1 = ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{1}, const_shape); + auto const1 = std::make_shared(ov::element::i64, ov::Shape{1}, const_shape); const1->set_friendly_name("Const_1"); const1->output(0).get_tensor().set_names({"const1"}); const1->fill_data(ov::element::i64, 0); - auto unsqueeze1 = std::make_shared(params.front(), const1); + auto unsqueeze1 = std::make_shared(params.front(), const1); - ngraph::ResultVector results{std::make_shared(unsqueeze1)}; - std::shared_ptr fnPtr = std::make_shared(results, params); + ov::ResultVector results{std::make_shared(unsqueeze1)}; + auto model = std::make_shared(results, params); - auto ie = ov::Core(); - auto compiled_model = ie.compile_model(fnPtr, ov::test::utils::DEVICE_GPU); + auto core = ov::Core(); + auto compiled_model = core.compile_model(model, ov::test::utils::DEVICE_GPU); auto request = compiled_model.create_infer_request(); double real_data = 1.0; - ov::Tensor input_data(ngraph::element::f64, {}, &real_data); + ov::Tensor input_data(ov::element::f64, {}, &real_data); request.set_tensor("scalar1", input_data); ASSERT_NO_THROW(request.infer()); } TEST(TensorTest, smoke_canSetTensorForDynamicInput) { - auto ie = ov::Core(); + auto core = ov::Core(); using namespace ov::preprocess; - auto p = PrePostProcessor(ngraph::builder::subgraph::makeSplitMultiConvConcat()); + auto p = PrePostProcessor(ov::test::utils::make_split_multi_conv_concat()); p.input().tensor().set_element_type(ov::element::i8); p.input().preprocess().convert_element_type(ov::element::f32); auto function = p.build(); std::map shapes = { {0, ov::PartialShape{-1, -1, -1, -1}} }; function->reshape(shapes); - auto exec_net = ie.compile_model(function, ov::test::utils::DEVICE_GPU); + auto exec_net = core.compile_model(function, ov::test::utils::DEVICE_GPU); auto inf_req = exec_net.create_infer_request(); ov::Tensor t1(ov::element::i8, {1, 4, 20, 20}); @@ -203,7 +174,7 @@ TEST(TensorTest, smoke_canSetTensorForDynamicInput) { TEST(TensorTest, smoke_canReallocateDeviceInputForHostTensor) { auto ov = ov::Core(); using namespace ov::preprocess; - auto p = PrePostProcessor(ngraph::builder::subgraph::makeSplitMultiConvConcat()); + auto p = PrePostProcessor(ov::test::utils::make_split_multi_conv_concat()); p.input().tensor().set_element_type(ov::element::i8); p.input().preprocess().convert_element_type(ov::element::f32); auto function = p.build(); @@ -227,7 +198,7 @@ TEST(VariablesTest, smoke_canSetStateTensor) { const ov::Shape virable_shape = {1, 3, 2, 4}; const ov::Shape input_shape = {1, 3, 2, 4}; const ov::element::Type et = ov::element::f16; - auto model = ngraph::builder::subgraph::makeReadConcatSplitAssign(input_shape, et); + auto model = ov::test::utils::make_read_concat_split_assign(input_shape, et); auto compiled_model = ov.compile_model(model, ov::test::utils::DEVICE_GPU); auto request = compiled_model.create_infer_request(); @@ -243,3 +214,4 @@ TEST(VariablesTest, smoke_canSetStateTensor) { ASSERT_NO_THROW(request.infer()); } +} // namespace \ No newline at end of file diff --git a/src/plugins/intel_gpu/tests/functional/behavior/inference_precision.cpp b/src/plugins/intel_gpu/tests/functional/behavior/inference_precision.cpp index ac4d17fb7739e2..3d2882924aafda 100644 --- a/src/plugins/intel_gpu/tests/functional/behavior/inference_precision.cpp +++ b/src/plugins/intel_gpu/tests/functional/behavior/inference_precision.cpp @@ -2,24 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include - -#include "openvino/runtime/core.hpp" - -#include -#include "shared_test_classes/base/layer_test_utils.hpp" #include "base/ov_behavior_test_utils.hpp" -#include "functional_test_utils/ov_plugin_cache.hpp" - -using namespace ::testing; +#include "openvino/runtime/core.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" +namespace { using params = std::tuple; -class InferencePrecisionTests : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { +class InferencePrecisionTests : public ::testing::TestWithParam { public: static std::string getTestCaseName(const testing::TestParamInfo &obj) { ov::element::Type model_precision; @@ -33,7 +23,7 @@ class InferencePrecisionTests : public testing::WithParamInterface, TEST_P(InferencePrecisionTests, smoke_canSetInferencePrecisionAndInfer) { SKIP_IF_CURRENT_TEST_IS_DISABLED() - auto core = ov::test::utils::PluginCache::get().core(); + auto core = ov::test::utils::PluginCache::get().core(); ov::element::Type model_precision; ov::element::Type inference_precision; std::tie(model_precision, inference_precision) = GetParam(); @@ -65,7 +55,7 @@ TEST(ExecutionModeTest, SetCompileGetInferPrecisionAndExecMode) { ov::Core core; core.set_property(ov::test::utils::DEVICE_GPU, ov::hint::execution_mode(ov::hint::ExecutionMode::PERFORMANCE)); - auto model = ngraph::builder::subgraph::makeConvPoolRelu(); + auto model = ov::test::utils::make_conv_pool_relu(); { auto compiled_model = core.compile_model(model, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::f32)); ASSERT_EQ(ov::hint::ExecutionMode::PERFORMANCE, compiled_model.get_property(ov::hint::execution_mode)); @@ -84,3 +74,4 @@ TEST(ExecutionModeTest, SetCompileGetInferPrecisionAndExecMode) { ASSERT_EQ(ov::element::f16, compiled_model.get_property(ov::hint::inference_precision)); } } +} // namespace \ No newline at end of file diff --git a/src/plugins/intel_gpu/tests/functional/behavior/memory_dyn_batch.cpp b/src/plugins/intel_gpu/tests/functional/behavior/memory_dyn_batch.cpp index d502850e367027..9572a128e95424 100644 --- a/src/plugins/intel_gpu/tests/functional/behavior/memory_dyn_batch.cpp +++ b/src/plugins/intel_gpu/tests/functional/behavior/memory_dyn_batch.cpp @@ -2,23 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "common_test_utils/test_common.hpp" +#include "common_test_utils/common_utils.hpp" +#include "common_test_utils/test_constants.hpp" +#include "functional_test_utils/skip_tests_config.hpp" +#include "functional_test_utils/ov_plugin_cache.hpp" #include "openvino/core/partial_shape.hpp" -#include "openvino/opsets/opset8.hpp" #include "openvino/runtime/compiled_model.hpp" #include "openvino/runtime/infer_request.hpp" #include "openvino/runtime/core.hpp" -#include "ov_models/subgraph_builders.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "functional_test_utils/skip_tests_config.hpp" -#include "functional_test_utils/ov_plugin_cache.hpp" -#include "common_test_utils/common_utils.hpp" - -#include - -#include - -using namespace ov::test; +#include "openvino/op/add.hpp" +namespace { using MemoryDynamicBatchParams = std::tuple< ov::PartialShape, // Partial shape for network initialization ov::Shape, // Actual shape to be passed to inference request @@ -57,9 +52,9 @@ class MemoryDynamicBatch : public ::testing::Test, infer_request = compiled_model.create_infer_request(); } - static std::shared_ptr build_model(ElementType precision, const ov::PartialShape& shape) { - auto param = std::make_shared(precision, shape); - const ov::op::util::VariableInfo variable_info { shape, precision, "v0" }; + static std::shared_ptr build_model(ov::element::Type type, const ov::PartialShape& shape) { + auto param = std::make_shared(type, shape); + const ov::op::util::VariableInfo variable_info { shape, type, "v0" }; auto variable = std::make_shared(variable_info); auto read_value = std::make_shared(param, variable); auto add = std::make_shared(read_value, param); @@ -169,3 +164,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_MemoryDynamicBatch, MemoryDynamicBatch, ::testing::ValuesIn(iterations_num), ::testing::Values(ov::test::utils::DEVICE_GPU)), MemoryDynamicBatch::get_test_case_name); +} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp b/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp index 29b80e9759df2a..ceff31a121913c 100644 --- a/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/concurrency/gpu_concurrency_tests.cpp @@ -2,16 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "openvino/runtime/core.hpp" - -#include -#include -#include +#include "common_test_utils/test_common.hpp" +#include "functional_test_utils/plugin_cache.hpp" #include "ov_models/subgraph_builders.hpp" #include "functional_test_utils/blob_utils.hpp" #include "openvino/core/preprocess/pre_post_process.hpp" @@ -20,19 +13,26 @@ #include "openvino/runtime/intel_gpu/properties.hpp" #include "common_test_utils/ov_tensor_utils.hpp" #include "common_test_utils/data_utils.hpp" - -using namespace ::testing; - +#include "openvino/runtime/core.hpp" +#include "openvino/runtime/infer_request.hpp" +#include "openvino/runtime/compiled_model.hpp" +#include "functional_test_utils/ov_plugin_cache.hpp" +#include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp" +#include "common_test_utils/subgraph_builders/ti_with_lstm_cell.hpp" +#include "common_test_utils/subgraph_builders/detection_output.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" + +namespace { using ConcurrencyTestParams = std::tuple; // number of requests class OVConcurrencyTest : public ov::test::TestsCommon, - public testing::WithParamInterface { + public testing::WithParamInterface { void SetUp() override { std::tie(num_streams, num_requests) = this->GetParam(); - fn_ptrs = {ngraph::builder::subgraph::makeSplitMultiConvConcat(), - ngraph::builder::subgraph::makeMultiSingleConv(), - ngraph::builder::subgraph::makeTIwithLSTMcell()}; + fn_ptrs = {ov::test::utils::make_split_multi_conv_concat(), + ov::test::utils::make_multi_single_conv(), + ov::test::utils::make_ti_with_lstm_cell()}; }; public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { @@ -43,7 +43,7 @@ class OVConcurrencyTest : public ov::test::TestsCommon, } void execute(bool is_caching_test = false) { - auto ie = ov::Core(); + auto core = ov::test::utils::PluginCache::get().core(); std::string cacheFolderName; if (is_caching_test) { @@ -53,80 +53,67 @@ class OVConcurrencyTest : public ov::test::TestsCommon, ov::test::utils::removeFilesWithExt(cacheFolderName, "blob"); ov::test::utils::removeFilesWithExt(cacheFolderName, "cl_cache"); ov::test::utils::removeDir(cacheFolderName); - ie.set_property(ov::cache_dir(cacheFolderName)); - ie.set_property(ov::intel_gpu::enable_loop_unrolling(false)); + core->set_property(ov::cache_dir(cacheFolderName)); + core->set_property(ov::test::utils::DEVICE_GPU, ov::intel_gpu::enable_loop_unrolling(false)); } - ov::ResultVector outputs; - std::vector irs; - std::vector> ref; - std::vector outElementsCount; + std::vector, ov::InferRequest>> irs; + std::vector irs_ref; for (size_t i = 0; i < fn_ptrs.size(); ++i) { auto fn = fn_ptrs[i]; - ov::CompiledModel exec_net; - if (is_caching_test) { { - auto _dummy_exec_net = ie.compile_model(fn_ptrs[i], ov::test::utils::DEVICE_GPU, + auto _dummy_exec_net = core->compile_model(fn, ov::test::utils::DEVICE_GPU, ov::num_streams(ov::streams::Num(num_streams)), ov::hint::inference_precision(ov::element::f32)); } { - exec_net = ie.compile_model(fn_ptrs[i], ov::test::utils::DEVICE_GPU, + exec_net = core->compile_model(fn, ov::test::utils::DEVICE_GPU, ov::num_streams(ov::streams::Num(num_streams)), ov::hint::inference_precision(ov::element::f32)); } } else { - exec_net = ie.compile_model(fn_ptrs[i], ov::test::utils::DEVICE_GPU, + exec_net = core->compile_model(fn, ov::test::utils::DEVICE_GPU, ov::num_streams(ov::streams::Num(num_streams)), ov::hint::inference_precision(ov::element::f32)); } - auto output = fn_ptrs[i]->get_results().at(0); - for (size_t j = 0; j < num_streams * num_requests; j++) { - outputs.push_back(output); - auto inf_req = exec_net.create_infer_request(); - irs.push_back(inf_req); + irs.push_back({fn, inf_req}); + + auto compiled_model_ref = core->compile_model(fn, ov::test::utils::DEVICE_TEMPLATE); + auto inf_req_ref = compiled_model_ref.create_infer_request(); + irs_ref.push_back(inf_req_ref); - std::vector> inputs; - for (size_t param_idx = 0; param_idx < fn_ptrs[i]->get_parameters().size(); ++param_idx) { - auto input = fn_ptrs[i]->get_parameters().at(param_idx); + std::vector input_tensors; + for (size_t param_idx = 0; param_idx < fn->get_parameters().size(); ++param_idx) { + auto input = fn->get_parameters().at(param_idx); auto tensor = ov::test::utils::create_and_fill_tensor(input->get_element_type(), input->get_shape()); inf_req.set_tensor(input, tensor); - - const auto in_tensor = inf_req.get_tensor(input); - const auto tensorSize = in_tensor.get_byte_size(); - const auto inBlobBuf = static_cast(in_tensor.data()); - std::vector inData(inBlobBuf, inBlobBuf + tensorSize); - inputs.emplace_back(inData); + inf_req_ref.set_tensor(input, tensor); } - - auto reOutData = ngraph::helpers::interpreterFunction(fn_ptrs[i], inputs).front().second; - ref.push_back(reOutData); - outElementsCount.push_back(ov::shape_size(fn_ptrs[i]->get_output_shape(0))); + inf_req_ref.infer(); } } const int niter = 10; for (int i = 0; i < niter; i++) { for (auto ir : irs) { - ir.start_async(); + ir.second.start_async(); } for (auto ir : irs) { - ir.wait(); + ir.second.wait(); } } - auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32); for (size_t i = 0; i < irs.size(); ++i) { - const auto &refBuffer = ref[i].data(); - ASSERT_EQ(outElementsCount[i], irs[i].get_tensor(outputs[i]).get_size()); - FuncTestUtils::compareRawBuffers(irs[i].get_tensor(outputs[i]).data(), - reinterpret_cast(refBuffer), outElementsCount[i], - outElementsCount[i], - thr); + // TODO now it compares only 1st output + // When CVS-126856 is fixed, update to compare all outputs + auto output = irs[i].first->get_results().at(0); + auto out = irs[i].second.get_tensor(output); + auto out_ref = irs_ref[i].get_tensor(output); + ov::test::utils::compare(out_ref, out); } if (is_caching_test) { @@ -139,15 +126,15 @@ class OVConcurrencyTest : public ov::test::TestsCommon, protected: size_t num_streams; size_t num_requests; - std::vector> fn_ptrs; + std::vector> fn_ptrs; }; TEST_P(OVConcurrencyTest, canInferTwoExecNets) { - this->execute(false); + execute(false); } TEST_P(OVConcurrencyTest, canInferTwoExecNets_cached) { - this->execute(true); + execute(true); } const std::vector num_streams{ 1, 2 }; @@ -159,12 +146,12 @@ INSTANTIATE_TEST_SUITE_P(smoke_RemoteTensor, OVConcurrencyTest, OVConcurrencyTest::getTestCaseName); TEST(canSwapTensorsBetweenInferRequests, inputs) { - std::vector> ref; + std::vector ref; std::vector input_tensors; - auto fn = ngraph::builder::subgraph::makeSplitMultiConvConcat(); + auto fn = ov::test::utils::make_split_multi_conv_concat(); - auto ie = ov::Core(); - auto compiled_model = ie.compile_model(fn, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::f32)); + auto core = ov::test::utils::PluginCache::get().core(); + auto compiled_model = core->compile_model(fn, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::f32)); const int infer_requests_num = 2; ov::InferRequest infer_request1 = compiled_model.create_infer_request(); @@ -174,20 +161,16 @@ TEST(canSwapTensorsBetweenInferRequests, inputs) { input_tensors.push_back(infer_request2.get_input_tensor()); auto calc_ref_results = [&](const ov::Tensor& tensor){ - const auto tensor_size = tensor.get_byte_size(); - const auto in_blob_buf = static_cast(tensor.data()); - std::vector inData(in_blob_buf, in_blob_buf + tensor_size); - auto ref_out_data = ngraph::helpers::interpreterFunction(fn, {inData}).front().second; - ref.push_back(ref_out_data); - }; + auto compiled_model_ref = core->compile_model(fn, ov::test::utils::DEVICE_TEMPLATE); + auto inf_req_ref = compiled_model_ref.create_infer_request(); + + auto input = fn->input(0); + inf_req_ref.set_tensor(input, tensor); + inf_req_ref.infer(); - auto compare_results = [&](ov::Tensor& result, const uint8_t* refResult) { - auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32); - ASSERT_EQ(ov::shape_size(fn->get_output_shape(0)), result.get_size()); - FuncTestUtils::compareRawBuffers(result.data(), - reinterpret_cast(refResult), ov::shape_size(fn->get_output_shape(0)), - ov::shape_size(fn->get_output_shape(0)), - thr); + auto output = fn->get_result(); + auto out_ref = inf_req_ref.get_tensor(output); + ref.push_back(out_ref); }; for (int32_t i = 0; i < infer_requests_num; i++) { @@ -207,7 +190,7 @@ TEST(canSwapTensorsBetweenInferRequests, inputs) { } else { iter1++; ov::Tensor output_tensor = infer_request1.get_output_tensor(); - compare_results(output_tensor, ref[iter1 % 2].data()); + ov::test::utils::compare(ref[iter1 % 2], output_tensor); if (iter1 < niter_limit) { infer_request1.set_input_tensor(input_tensors[(iter1 + 1) % 2]); infer_request1.start_async(); @@ -221,7 +204,7 @@ TEST(canSwapTensorsBetweenInferRequests, inputs) { } else { iter2++; ov::Tensor output_tensor = infer_request2.get_output_tensor(); - compare_results(output_tensor, ref[(iter2 + 1) % 2].data()); + ov::test::utils::compare(ref[(iter2 + 1) % 2], output_tensor); if (iter2 < niter_limit) { infer_request2.set_input_tensor(input_tensors[iter2 % 2]); infer_request2.start_async(); @@ -239,10 +222,10 @@ TEST(canSwapTensorsBetweenInferRequests, inputs) { } TEST(smoke_InferRequestDeviceMemoryAllocation, usmHostIsNotChanged) { - auto fn = ngraph::builder::subgraph::makeDetectionOutput(ngraph::element::Type_t::f32); + auto fn = ov::test::utils::make_detection_output(ov::element::f32); - auto ie = ov::Core(); - auto compiled_model = ie.compile_model(fn, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::f32)); + auto core = ov::test::utils::PluginCache::get().core(); + auto compiled_model = core->compile_model(fn, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::f32)); ov::InferRequest infer_request1 = compiled_model.create_infer_request(); ov::InferRequest infer_request2 = compiled_model.create_infer_request(); @@ -278,10 +261,10 @@ TEST(smoke_InferRequestDeviceMemoryAllocation, usmHostIsNotChanged) { } TEST(smoke_InferRequestDeviceMemoryAllocation, canSetSystemHostTensor) { - auto fn = ngraph::builder::subgraph::makeDetectionOutput(ngraph::element::Type_t::f32); + auto fn = ov::test::utils::make_detection_output(ov::element::f32); - auto ie = ov::Core(); - auto compiled_model = ie.compile_model(fn, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::f32)); + auto core = ov::test::utils::PluginCache::get().core(); + auto compiled_model = core->compile_model(fn, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::f32)); ov::InferRequest infer_request1 = compiled_model.create_infer_request(); ov::InferRequest infer_request2 = compiled_model.create_infer_request(); @@ -301,13 +284,13 @@ TEST(smoke_InferRequestDeviceMemoryAllocation, canSetSystemHostTensor) { } TEST(canSwapTensorsBetweenInferRequests, outputs) { - std::vector> ref; + std::vector ref; std::vector input_tensors; std::vector output_tensors; - auto fn = ngraph::builder::subgraph::makeSplitMultiConvConcat(); + auto fn = ov::test::utils::make_split_multi_conv_concat(); - auto ie = ov::Core(); - auto compiled_model = ie.compile_model(fn, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::f32)); + auto core = ov::test::utils::PluginCache::get().core(); + auto compiled_model = core->compile_model(fn, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::f32)); const int infer_requests_num = 2; ov::InferRequest infer_request1 = compiled_model.create_infer_request(); @@ -319,20 +302,16 @@ TEST(canSwapTensorsBetweenInferRequests, outputs) { output_tensors.push_back(infer_request2.get_output_tensor()); auto calc_ref_results = [&](const ov::Tensor& tensor){ - const auto tensor_size = tensor.get_byte_size(); - const auto in_blob_buf = static_cast(tensor.data()); - std::vector inData(in_blob_buf, in_blob_buf + tensor_size); - auto ref_out_data = ngraph::helpers::interpreterFunction(fn, {inData}).front().second; - ref.push_back(ref_out_data); - }; + auto compiled_model_ref = core->compile_model(fn, ov::test::utils::DEVICE_TEMPLATE); + auto inf_req_ref = compiled_model_ref.create_infer_request(); + + auto input = fn->input(0); + inf_req_ref.set_tensor(input, tensor); + inf_req_ref.infer(); - auto compare_results = [&](ov::Tensor& result, const uint8_t* refResult) { - auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32); - ASSERT_EQ(ov::shape_size(fn->get_output_shape(0)), result.get_size()); - FuncTestUtils::compareRawBuffers(result.data(), - reinterpret_cast(refResult), ov::shape_size(fn->get_output_shape(0)), - ov::shape_size(fn->get_output_shape(0)), - thr); + auto output = fn->get_result(); + auto out_ref = inf_req_ref.get_tensor(output); + ref.push_back(out_ref); }; for (int32_t i = 0; i < infer_requests_num; i++) { @@ -352,7 +331,7 @@ TEST(canSwapTensorsBetweenInferRequests, outputs) { } else { iter1++; ov::Tensor output_tensor = infer_request1.get_output_tensor(); - compare_results(output_tensor, ref[0].data()); + ov::test::utils::compare(ref[0], output_tensor); if (iter1 < niter_limit) { infer_request1.set_output_tensor(output_tensors[(iter1 + 1) % 2]); infer_request1.start_async(); @@ -366,7 +345,7 @@ TEST(canSwapTensorsBetweenInferRequests, outputs) { } else { iter2++; ov::Tensor output_tensor = infer_request2.get_output_tensor(); - compare_results(output_tensor, ref[1].data()); + ov::test::utils::compare(ref[1], output_tensor); if (iter2 < niter_limit) { infer_request2.set_output_tensor(output_tensors[iter2 % 2]); infer_request2.start_async(); @@ -382,3 +361,4 @@ TEST(canSwapTensorsBetweenInferRequests, outputs) { infer_request2.wait(); } } +} // namespace \ No newline at end of file diff --git a/src/plugins/intel_gpu/tests/functional/dynamic_tests/gpu_dyn_batch_shape_tests.cpp b/src/plugins/intel_gpu/tests/functional/dynamic_tests/gpu_dyn_batch_shape_tests.cpp index 9f4c18ef5d2ce4..7e479025dab3da 100644 --- a/src/plugins/intel_gpu/tests/functional/dynamic_tests/gpu_dyn_batch_shape_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/dynamic_tests/gpu_dyn_batch_shape_tests.cpp @@ -2,42 +2,42 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "openvino/runtime/core.hpp" -#include -#include "common_test_utils/common_utils.hpp" +#include "common_test_utils/test_common.hpp" #include "common_test_utils/file_utils.hpp" #include "functional_test_utils/skip_tests_config.hpp" #include "functional_test_utils/ov_plugin_cache.hpp" +#include "openvino/runtime/core.hpp" #include "ov_models/subgraph_builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" +#include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp" -using namespace ::testing; -using namespace ov::test; +namespace { +using ov::test::InputShape; using OVDynamicBatchParams = std::tuple< - std::vector, // dynamic and static case sizes - ElementType, // Network precision + std::vector, // dynamic and static case sizes + ov::element::Type, // Model type std::string, // Device name ov::AnyMap // Config >; -class OVDynamicBatchShape_Tests : public WithParamInterface, - virtual public ov::test::SubgraphBaseTest { +class OVDynamicBatchShape_Tests : public ::testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { public: - static std::string getTestCaseName(TestParamInfo obj) { - std::vector inputShapes; - ElementType netPrecision; - std::string targetDevice; + static std::string getTestCaseName(::testing::TestParamInfo obj) { + std::vector input_shapes; + ov::element::Type model_type; + std::string target_device; ov::AnyMap configuration; - std::tie(inputShapes, netPrecision, targetDevice, configuration) = obj.param; + std::tie(input_shapes, model_type, target_device, configuration) = obj.param; std::ostringstream result; result << "IS="; - for (const auto& shape : inputShapes) { + for (const auto& shape : input_shapes) { result << ov::test::utils::partialShape2str({ shape.first }) << "_"; } result << "TS="; - for (const auto& shape : inputShapes) { + for (const auto& shape : input_shapes) { result << "("; if (!shape.second.empty()) { for (const auto& itr : shape.second) { @@ -46,8 +46,8 @@ class OVDynamicBatchShape_Tests : public WithParamInterface input_shape; - std::tie(inputShape, netPrecision, targetDevice, configuration) = this->GetParam(); + std::tie(input_shape, model_type, targetDevice, configuration) = this->GetParam(); - init_input_shapes(inputShape); + init_input_shapes(input_shape); //TODO: think how we can switch between several input topologies in the future - // function = ngraph::builder::subgraph::makeSplitConvConcat(inputShape.front().first.get_min_shape(), netPrecision); - function = ngraph::builder::subgraph::makeSplitMultiConvConcat(inputShape.front().first.get_min_shape(), netPrecision); + // function = ov::test::utils::make_split_conv_concat(input_shape.front().first.get_min_shape(), model_type); + function = ov::test::utils::make_split_multi_conv_concat(input_shape.front().first.get_min_shape(), model_type); // make topology dynamic std::map dynShape; - dynShape["input_tensor"] = inputShape.front().first; + dynShape["input_tensor"] = input_shape.front().first; function->reshape(dynShape); } - std::shared_ptr src_func; - // std::map configuration; - std::vector inputShape; - ElementType netPrecision; + ov::element::Type model_type; }; TEST_P(OVDynamicBatchShape_Tests, InferDynamicBatchBound) { @@ -94,7 +92,7 @@ TEST_P(OVDynamicBatchShape_Tests, InferDynamicBatchBound_cached) { std::string cacheFolderName; { std::stringstream ss; - ss << "InferDynamicBatchBound_cached_" << netPrecision << "_" << targetDevice; + ss << "InferDynamicBatchBound_cached_" << model_type << "_" << targetDevice; cacheFolderName = ss.str(); ov::test::utils::removeFilesWithExt(cacheFolderName, "blob"); @@ -116,7 +114,6 @@ TEST_P(OVDynamicBatchShape_Tests, InferDynamicBatchBound_cached) { } } -namespace { auto config = []() { return ov::AnyMap{}; }; @@ -125,27 +122,27 @@ auto hetero_config = []() { return ov::AnyMap{{"TARGET_FALLBACK", ov::test::utils::DEVICE_GPU}}; }; -const std::vector inputShapes = { +const std::vector input_shapes = { { { {1, 19}, 4, 20, 20}, { {1, 4, 20, 20}, {7, 4, 20, 20}, {17, 4, 20, 20} } } }; -const std::vector netPrecisions = { - ElementType::f16, - ElementType::f32 +const std::vector model_types = { + ov::element::f16, + ov::element::f32 }; INSTANTIATE_TEST_SUITE_P(smoke_GPU_DynBatch, OVDynamicBatchShape_Tests, ::testing::Combine( - ::testing::Values(inputShapes), - ::testing::ValuesIn(netPrecisions), + ::testing::Values(input_shapes), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(config())), OVDynamicBatchShape_Tests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_GPU_DynBatchHetero, OVDynamicBatchShape_Tests, ::testing::Combine( - ::testing::Values(inputShapes), - ::testing::ValuesIn(netPrecisions), + ::testing::Values(input_shapes), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_HETERO), ::testing::Values(hetero_config())), OVDynamicBatchShape_Tests::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/dynamic_tests/gpu_dyn_huge_input_range.cpp b/src/plugins/intel_gpu/tests/functional/dynamic_tests/gpu_dyn_huge_input_range.cpp index 62eb867df971fa..f852a378f26048 100644 --- a/src/plugins/intel_gpu/tests/functional/dynamic_tests/gpu_dyn_huge_input_range.cpp +++ b/src/plugins/intel_gpu/tests/functional/dynamic_tests/gpu_dyn_huge_input_range.cpp @@ -1,18 +1,16 @@ // Copyright (C) 2022 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // - +#include "common_test_utils/test_constants.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_enums.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/single_layer/strided_slice.hpp" #include "shared_test_classes/single_layer/shape_of.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/test_constants.hpp" -#include "common_test_utils/ov_tensor_utils.hpp" - -using namespace InferenceEngine; -using namespace ov::test; -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; struct StridedSliceParams { std::vector begin; @@ -28,22 +26,21 @@ struct StridedSliceParams { typedef std::tuple< InputShape, // Input shapes StridedSliceParams, - ElementType, // Element type - std::vector, // begin/end/stride input type + ov::element::Type, // Element type + std::vector, // begin/end/stride input type std::map // Additional network configuration > StridedSliceLayerParamSet; class DynamicShapeHugeRangeGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { InputShape shapes; StridedSliceParams params; - ElementType elementType; - std::vector restInputType; - TargetDevice targetDevice; + ov::element::Type model_type; + std::vector restInputType; std::map additionalConfig; - std::tie(shapes, params, elementType, restInputType, additionalConfig) = obj.param; + std::tie(shapes, params, model_type, restInputType, additionalConfig) = obj.param; std::ostringstream results; results << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_"; @@ -51,7 +48,7 @@ class DynamicShapeHugeRangeGPUTest : public testing::WithParamInterface& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); ov::Tensor tensor; @@ -83,7 +80,7 @@ class DynamicShapeHugeRangeGPUTest : public testing::WithParamInterface(); @@ -94,7 +91,7 @@ class DynamicShapeHugeRangeGPUTest : public testing::WithParamInterface(); @@ -105,7 +102,7 @@ class DynamicShapeHugeRangeGPUTest : public testing::WithParamInterface(); @@ -122,7 +119,7 @@ class DynamicShapeHugeRangeGPUTest : public testing::WithParamInterface begin; std::vector end; std::vector stride; - std::vector restInputType; + std::vector restInputType; size_t inferRequestNum = 0; void SetUp() override { @@ -139,11 +136,11 @@ class DynamicShapeHugeRangeGPUTest : public testing::WithParamInterface inputShapes; inputShapes.push_back(shapes); - if (restInputType[0] == ngraph::helpers::InputLayerType::PARAMETER) + if (restInputType[0] == ov::test::utils::InputLayerType::PARAMETER) inputShapes.push_back(InputShape({static_cast(begin.size())}, std::vector(shapes.second.size(), {begin.size()}))); - if (restInputType[1] == ngraph::helpers::InputLayerType::PARAMETER) + if (restInputType[1] == ov::test::utils::InputLayerType::PARAMETER) inputShapes.push_back(InputShape({static_cast(end.size())}, std::vector(shapes.second.size(), {end.size()}))); - if (restInputType[2] == ngraph::helpers::InputLayerType::PARAMETER) + if (restInputType[2] == ov::test::utils::InputLayerType::PARAMETER) inputShapes.push_back(InputShape({static_cast(stride.size())}, std::vector(shapes.second.size(), {stride.size()}))); init_input_shapes(inputShapes); @@ -151,67 +148,65 @@ class DynamicShapeHugeRangeGPUTest : public testing::WithParamInterface(inType, inputDynamicShapes.front())}; std::shared_ptr beginInput, endInput, strideInput; - if (restInputType[0] == ngraph::helpers::InputLayerType::PARAMETER) { - auto beginNode = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{begin.size()}); + if (restInputType[0] == ov::test::utils::InputLayerType::PARAMETER) { + auto beginNode = std::make_shared(ov::element::i64, ov::Shape{begin.size()}); params.push_back(beginNode); beginInput = beginNode; } else { - beginInput = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{begin.size()}, begin); + beginInput = std::make_shared(ov::element::i64, ov::Shape{begin.size()}, begin); } - if (restInputType[1] == ngraph::helpers::InputLayerType::PARAMETER) { - auto endNode = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{end.size()}); + if (restInputType[1] == ov::test::utils::InputLayerType::PARAMETER) { + auto endNode = std::make_shared(ov::element::i64, ov::Shape{end.size()}); params.push_back(endNode); endInput = endNode; } else { - endInput = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{end.size()}, end); + endInput = std::make_shared(ov::element::i64, ov::Shape{end.size()}, end); } - if (restInputType[2] == ngraph::helpers::InputLayerType::PARAMETER) { - auto strideNode = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{stride.size()}); + if (restInputType[2] == ov::test::utils::InputLayerType::PARAMETER) { + auto strideNode = std::make_shared(ov::element::i64, ov::Shape{stride.size()}); params.push_back(strideNode); strideInput = strideNode; } else { - strideInput = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{stride.size()}, stride); + strideInput = std::make_shared(ov::element::i64, ov::Shape{stride.size()}, stride); } - auto stridedSliceOp = std::make_shared(params[0], beginInput, endInput, strideInput, ssParams.beginMask, ssParams.endMask, + auto stridedSliceOp = std::make_shared(params[0], beginInput, endInput, strideInput, ssParams.beginMask, ssParams.endMask, ssParams.newAxisMask, ssParams.shrinkAxisMask, ssParams.ellipsisAxisMask); - auto shapeOfOp = std::make_shared(stridedSliceOp, ov::element::Type_t::i32); + auto shapeOfOp = std::make_shared(stridedSliceOp, ov::element::i32); - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < shapeOfOp->get_output_size(); i++) { - results.push_back(std::make_shared(shapeOfOp->output(i))); + results.push_back(std::make_shared(shapeOfOp->output(i))); } - function = std::make_shared(results, params, "result"); + function = std::make_shared(results, params, "result"); } }; -TEST_P(DynamicShapeHugeRangeGPUTest, CompareWithRefs) { +TEST_P(DynamicShapeHugeRangeGPUTest, Inference) { SKIP_IF_CURRENT_TEST_IS_DISABLED() run(); } -namespace { - std::map emptyAdditionalConfig; -const std::vector inputPrecisions = { - ElementType::f32 +const std::vector model_types = { + ov::element::f32 }; -const std::vector> restInputTypes = { - {ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::CONSTANT}, - {ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::PARAMETER}, - {ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::CONSTANT}, - {ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::CONSTANT}, - {ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::PARAMETER}, - {ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::PARAMETER}, - {ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::PARAMETER}, - {ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::CONSTANT}, +const std::vector> restInputTypes = { + {ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT}, + {ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER}, + {ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT}, + {ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::CONSTANT}, + {ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER}, + {ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER}, + {ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER}, + {ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::CONSTANT}, }; const std::vector inputShapesDynamic2D_excessive_uppper_boundary = { @@ -227,9 +222,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Dynamic_2D_excessive_uppper_bound ::testing::Combine( ::testing::ValuesIn(inputShapesDynamic2D_excessive_uppper_boundary), ::testing::ValuesIn(paramsPlain2D_excessive_uppper_boundary), - ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(model_types), ::testing::Values(restInputTypes[0]), ::testing::Values(emptyAdditionalConfig)), DynamicShapeHugeRangeGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/remote_blob_tests/cldnn_remote_blob_tests.cpp b/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/cldnn_remote_blob_tests.cpp similarity index 98% rename from src/plugins/intel_gpu/tests/functional/remote_blob_tests/cldnn_remote_blob_tests.cpp rename to src/plugins/intel_gpu/tests/functional/remote_tensor_tests/cldnn_remote_blob_tests.cpp index ee3f484927f48b..15efd758849c42 100644 --- a/src/plugins/intel_gpu/tests/functional/remote_blob_tests/cldnn_remote_blob_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/cldnn_remote_blob_tests.cpp @@ -11,13 +11,15 @@ #include #include -#include +#include #include #include #include "base/ov_behavior_test_utils.hpp" #include "ov_models/subgraph_builders.hpp" #include "functional_test_utils/blob_utils.hpp" +#include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" using namespace ::testing; using namespace InferenceEngine; @@ -578,8 +580,8 @@ class TwoNets_Test : public ov::test::TestsCommon, public testing::WithParamInterface { void SetUp() override { std::tie(num_streams, num_requests) = this->GetParam(); - fn_ptrs = {ngraph::builder::subgraph::makeSplitMultiConvConcat(), - ngraph::builder::subgraph::makeMultiSingleConv()}; + fn_ptrs = {ov::test::utils::make_split_multi_conv_concat(), + ov::test::utils::make_multi_single_conv()}; }; public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { diff --git a/src/plugins/intel_gpu/tests/functional/remote_blob_tests/dx11_remote_ctx_test.cpp b/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/dx11_remote_ctx_test.cpp similarity index 89% rename from src/plugins/intel_gpu/tests/functional/remote_blob_tests/dx11_remote_ctx_test.cpp rename to src/plugins/intel_gpu/tests/functional/remote_tensor_tests/dx11_remote_ctx_test.cpp index 00761773bba2d6..eabaf01d86193e 100644 --- a/src/plugins/intel_gpu/tests/functional/remote_blob_tests/dx11_remote_ctx_test.cpp +++ b/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/dx11_remote_ctx_test.cpp @@ -8,14 +8,13 @@ #include #include -#include - -#include -#include -#include +#include "gpu/gpu_config.hpp" +#include "common_test_utils/test_common.hpp" +#include "common_test_utils/test_constants.hpp" #include "common_test_utils/file_utils.hpp" #include "ov_models/subgraph_builders.hpp" -#include +#include "openvino/core/preprocess/pre_post_process.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" #ifdef _WIN32 #ifdef ENABLE_DX11 @@ -30,12 +29,13 @@ #define NOMINMAX_DEFINED_CTX_UT #endif -#include -#include #include #include #include +#include "gpu/gpu_context_api_dx.hpp" +#include "openvino/runtime/intel_gpu/ocl/dx.hpp" + #ifdef NOMINMAX_DEFINED_CTX_UT #undef NOMINMAX #undef NOMINMAX_DEFINED_CTX_UT @@ -148,7 +148,7 @@ struct DX11CachedTexture_Test : DX11RemoteCtx_Test { GTEST_SKIP(); #endif // inference using remote blob with batch - auto fn_ptr_remote = ngraph::builder::subgraph::makeConvPoolRelu({1, 3, texture_description.Height, texture_description.Width}); + auto fn_ptr_remote = ov::test::utils::make_conv_pool_relu({1, 3, texture_description.Height, texture_description.Width}); ov::Core core; ov::intel_gpu::ocl::D3DContext context(core, device_ptr); @@ -202,29 +202,26 @@ TEST_F(DX11RemoteCtx_Test, smoke_make_shared_context) { #if defined(ANDROID) GTEST_SKIP(); #endif - using namespace InferenceEngine; - using namespace InferenceEngine::gpu; - auto ie = InferenceEngine::Core(); + auto core = ov::Core(); CComPtr device_ptr; CComPtr ctx_ptr; ASSERT_NO_THROW(std::tie(device_ptr, ctx_ptr) = create_device_with_ctx(intel_adapters[0])); - auto remote_context = make_shared_context(ie, - ov::test::utils::DEVICE_GPU, - device_ptr); - ASSERT_TRUE(remote_context); + + auto gpu_context = core.get_default_context("GPU").as(); + auto context_handle = gpu_context.get(); + ASSERT_TRUE(context_handle); for (auto adapter : other_adapters) { CComPtr device_ptr; CComPtr ctx_ptr; ASSERT_NO_THROW(std::tie(device_ptr, ctx_ptr) = - create_device_with_ctx(adapter)); - ASSERT_THROW(make_shared_context(ie, ov::test::utils::DEVICE_GPU, - device_ptr), - std::runtime_error); + create_device_with_ctx(adapter)); + ASSERT_THROW(ov::intel_gpu::ocl::D3DContext gpu_context(core, device_ptr), + std::runtime_error); } } diff --git a/src/plugins/intel_gpu/tests/functional/remote_blob_tests/gpu_remote_tensor_tests.cpp b/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/gpu_remote_tensor_tests.cpp similarity index 96% rename from src/plugins/intel_gpu/tests/functional/remote_blob_tests/gpu_remote_tensor_tests.cpp rename to src/plugins/intel_gpu/tests/functional/remote_tensor_tests/gpu_remote_tensor_tests.cpp index 6b98f98d44a228..4e6a60ca4ab2ce 100644 --- a/src/plugins/intel_gpu/tests/functional/remote_blob_tests/gpu_remote_tensor_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/gpu_remote_tensor_tests.cpp @@ -2,43 +2,31 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include - -#include "openvino/core/dimension.hpp" -#include "openvino/core/except.hpp" -#include "openvino/core/model.hpp" #include "openvino/core/preprocess/pre_post_process.hpp" #include "openvino/runtime/intel_gpu/ocl/ocl.hpp" -#include "openvino/runtime/core.hpp" #include "openvino/runtime/intel_gpu/properties.hpp" -#include "openvino/runtime/properties.hpp" #include "openvino/runtime/remote_tensor.hpp" -#include "remote_blob_tests/remote_blob_helpers.hpp" -#include "common_test_utils/test_assertions.hpp" +#include "remote_tensor_tests/helpers.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include "common_test_utils/test_common.hpp" #include "base/ov_behavior_test_utils.hpp" #include "ov_models/subgraph_builders.hpp" -#include "functional_test_utils/blob_utils.hpp" -#include "subgraphs_builders.hpp" -#include "transformations/utils/utils.hpp" - -using namespace ::testing; +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" +#include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp" +#include "common_test_utils/subgraph_builders/convert_transpose.hpp" class OVRemoteTensor_Test : public ov::test::TestsCommon { protected: - std::shared_ptr fn_ptr; + std::shared_ptr fn_ptr; void SetUp() override { - fn_ptr = ngraph::builder::subgraph::makeSplitMultiConvConcat(); + fn_ptr = ov::test::utils::make_split_multi_conv_concat(); } }; namespace { +using ::testing::HasSubstr; + std::vector ov_dynamic {true, false}; std::vector ov_with_auto_batching {true, false}; enum class RemoteTensorSharingType { @@ -71,7 +59,7 @@ using RemoteTensorSharingTestOptionsParams = std::tuple { protected: - std::shared_ptr fn_ptr; + std::shared_ptr fn_ptr; std::string deviceName; ov::AnyMap config; @@ -340,10 +328,9 @@ TEST_P(OVRemoteTensorInputBlob_Test, smoke_canInputRemoteTensor) { { ASSERT_EQ(output->get_element_type(), ov::element::f32); ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size()); - auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32); ASSERT_NO_THROW(output_tensor_regular.data()); ASSERT_NO_THROW(output_tensor_shared.data()); - ov::test::utils::compare(output_tensor_regular, output_tensor_shared, thr); + ov::test::utils::compare(output_tensor_regular, output_tensor_shared); } } @@ -611,10 +598,9 @@ TEST_P(OVRemoteTensorInputBlob_Test, smoke_canInputOutputRemoteTensor) { { ASSERT_EQ(output->get_element_type(), ov::element::f32); ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size()); - auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32); ASSERT_NO_THROW(output_tensor_regular.data()); ASSERT_NO_THROW(output_tensor_shared.data()); - ov::test::utils::compare(output_tensor_regular, output_tensor_shared, thr); + ov::test::utils::compare(output_tensor_regular, output_tensor_shared); } } @@ -750,13 +736,13 @@ TEST(OVRemoteTensorTests, smoke_MixedTensorTypes) { class OVRemoteTensor_TestsWithContext : public OVRemoteTensor_Test, public testing::WithParamInterface { protected: - std::shared_ptr fn_ptr; + std::shared_ptr fn_ptr; std::string deviceName; ov::AnyMap config; public: void SetUp() override { - fn_ptr = ngraph::builder::subgraph::makeSplitMultiConvConcat(); + fn_ptr = ov::test::utils::make_split_multi_conv_concat(); deviceName = ov::test::utils::DEVICE_GPU; auto with_auto_batching = this->GetParam(); if (with_auto_batching) { @@ -819,10 +805,9 @@ class OVRemoteTensor_TestsWithContext : public OVRemoteTensor_Test, public testi { ASSERT_EQ(output->get_element_type(), ov::element::f32); ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size()); - auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32); ASSERT_NO_THROW(output_tensor_regular.data()); ASSERT_NO_THROW(output_tensor_shared.data()); - ov::test::utils::compare(output_tensor_regular, output_tensor_shared, thr); + ov::test::utils::compare(output_tensor_regular, output_tensor_shared); } if (is_caching_test) { @@ -883,10 +868,9 @@ class OVRemoteTensor_TestsWithContext : public OVRemoteTensor_Test, public testi { ASSERT_EQ(output->get_element_type(), ov::element::f32); ASSERT_EQ(output_tensor_regular.get_size(), output_tensor_shared.get_size()); - auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32); ASSERT_NO_THROW(output_tensor_regular.data()); ASSERT_NO_THROW(output_tensor_shared.data()); - ov::test::utils::compare(output_tensor_regular, output_tensor_shared, thr); + ov::test::utils::compare(output_tensor_regular, output_tensor_shared); } if (is_caching_test) { @@ -979,9 +963,8 @@ class OVRemoteTensor_TestsWithContext : public OVRemoteTensor_Test, public testi { ASSERT_EQ(output->get_element_type(), ov::element::f32); ASSERT_EQ(output_tensor_regular.get_size(), out_tensor.get_size()); - auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32); ASSERT_NO_THROW(output_tensor_regular.data()); - ov::test::utils::compare(output_tensor_regular, out_tensor, thr); + ov::test::utils::compare(output_tensor_regular, out_tensor); } if (is_caching_test) { @@ -1070,9 +1053,8 @@ class OVRemoteTensor_TestsWithContext : public OVRemoteTensor_Test, public testi { ASSERT_EQ(output->get_element_type(), ov::element::f32); ASSERT_EQ(output_tensor_regular.get_size(), out_tensor.get_size()); - auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32); ASSERT_NO_THROW(output_tensor_regular.data()); - ov::test::utils::compare(output_tensor_regular, out_tensor, thr); + ov::test::utils::compare(output_tensor_regular, out_tensor); } if (is_caching_test) { @@ -1162,9 +1144,8 @@ class OVRemoteTensor_TestsWithContext : public OVRemoteTensor_Test, public testi { ASSERT_EQ(output->get_element_type(), ov::element::f32); ASSERT_EQ(output_tensor_regular.get_size(), out_tensor.get_size()); - auto thr = FuncTestUtils::GetComparisonThreshold(InferenceEngine::Precision::FP32); ASSERT_NO_THROW(output_tensor_regular.data()); - ov::test::utils::compare(output_tensor_regular, out_tensor, thr); + ov::test::utils::compare(output_tensor_regular, out_tensor); } if (is_caching_test) { @@ -1281,7 +1262,10 @@ TEST_F(OVRemoteTensor_Test, NV12toGray) { // ------------------------------------------------------ // Prepare input data - ov::Tensor fake_image = ov::test::utils::create_and_fill_tensor(ov::element::i8, {1, height, width, feature}, 50, 0, 1); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 50; + ov::Tensor fake_image = ov::test::utils::create_and_fill_tensor(ov::element::i8, {1, height, width, feature}, in_data); ov::Tensor fake_image_regular = ov::test::utils::create_and_fill_tensor(ov::element::f32, {1, height, width, feature }); auto image_ptr = static_cast(fake_image.data()); @@ -1299,7 +1283,7 @@ TEST_F(OVRemoteTensor_Test, NV12toGray) { // ------------------------------------------------------ // inference using remote tensor - auto fn_ptr_remote = ngraph::builder::subgraph::makeConvPoolRelu({1, feature, height, width}); + auto fn_ptr_remote = ov::test::utils::make_conv_pool_relu({1, feature, height, width}); using namespace ov::preprocess; @@ -1347,7 +1331,7 @@ TEST_F(OVRemoteTensor_Test, NV12toGray) { // ------------------------------------------------------ // regular inference - auto fn_ptr_regular = ngraph::builder::subgraph::makeConvPoolRelu({1, feature, height, width}); + auto fn_ptr_regular = ov::test::utils::make_conv_pool_relu({1, feature, height, width}); auto p_reg = PrePostProcessor(fn_ptr_regular); p_reg.input().tensor().set_element_type(ov::element::f32) @@ -1383,14 +1367,18 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_image_ConvertTranspose) { // ------------------------------------------------------ // Prepare input data - ov::Tensor fake_image_data_y = ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height, width, 1}, 50, 0, 1); - ov::Tensor fake_image_data_uv = ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height / 2, width / 2, 2}, 256, 0, 1); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 50; + ov::Tensor fake_image_data_y = ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height, width, 1}, in_data); + in_data.range = 256; + ov::Tensor fake_image_data_uv = ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height / 2, width / 2, 2}, in_data); auto ie = ov::Core(); // ------------------------------------------------------ // inference using remote tensor - auto fn_ptr_remote = ngraph::builder::subgraph::makeConvertTranspose({1, 3, height, width}); + auto fn_ptr_remote = ov::test::utils::make_convert_transpose({1, 3, height, width}); using namespace ov::preprocess; auto p = PrePostProcessor(fn_ptr_remote); @@ -1457,7 +1445,7 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_image_ConvertTranspose) { // ------------------------------------------------------ // regular inference - auto fn_ptr_regular = ngraph::builder::subgraph::makeConvertTranspose({1, 3, height, width}); + auto fn_ptr_regular = ov::test::utils::make_convert_transpose({1, 3, height, width}); using namespace ov::preprocess; auto p_reg = PrePostProcessor(fn_ptr_regular); @@ -1494,13 +1482,16 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_image_single_plane) { // ------------------------------------------------------ // Prepare input data - ov::Tensor fake_image_data_yuv = ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height * 3 / 2, width, 1}, 50); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 50; + ov::Tensor fake_image_data_yuv = ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height * 3 / 2, width, 1}, in_data); auto ie = ov::Core(); // ------------------------------------------------------ // inference using remote tensor - auto fn_ptr_remote = ngraph::builder::subgraph::makeConvPoolRelu({1, 3, height, width}); + auto fn_ptr_remote = ov::test::utils::make_conv_pool_relu({1, 3, height, width}); using namespace ov::preprocess; auto p = PrePostProcessor(fn_ptr_remote); @@ -1548,7 +1539,7 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_image_single_plane) { // ------------------------------------------------------ // regular inference - auto fn_ptr_regular = ngraph::builder::subgraph::makeConvPoolRelu({1, 3, height, width}); + auto fn_ptr_regular = ov::test::utils::make_conv_pool_relu({1, 3, height, width}); using namespace ov::preprocess; auto p_reg = PrePostProcessor(fn_ptr_regular); @@ -1584,14 +1575,18 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_image_two_planes) { // ------------------------------------------------------ // Prepare input data - ov::Tensor fake_image_data_y = ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height, width, 1}, 50, 0, 1); - ov::Tensor fake_image_data_uv = ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height / 2, width / 2, 2}, 256, 0, 1); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 50; + ov::Tensor fake_image_data_y = ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height, width, 1}, in_data); + in_data.range = 256; + ov::Tensor fake_image_data_uv = ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height / 2, width / 2, 2}, in_data); auto ie = ov::Core(); // ------------------------------------------------------ // inference using remote tensor - auto fn_ptr_remote = ngraph::builder::subgraph::makeConvPoolRelu({1, 3, height, width}); + auto fn_ptr_remote = ov::test::utils::make_conv_pool_relu({1, 3, height, width}); using namespace ov::preprocess; auto p = PrePostProcessor(fn_ptr_remote); @@ -1656,7 +1651,7 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_image_two_planes) { // ------------------------------------------------------ // regular inference - auto fn_ptr_regular = ngraph::builder::subgraph::makeConvPoolRelu({1, 3, height, width}); + auto fn_ptr_regular = ov::test::utils::make_conv_pool_relu({1, 3, height, width}); using namespace ov::preprocess; auto p_reg = PrePostProcessor(fn_ptr_regular); @@ -1693,12 +1688,16 @@ TEST_F(OVRemoteTensor_Test, NV12toBGR_buffer) { // ------------------------------------------------------ // Prepare input data - ov::Tensor fake_image_data_y = ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height, width, 1}, 50, 0, 1); - ov::Tensor fake_image_data_uv = ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height / 2, width / 2, 2}, 256, 0, 1); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 50; + ov::Tensor fake_image_data_y = ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height, width, 1}, in_data); + in_data.range = 256; + ov::Tensor fake_image_data_uv = ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height / 2, width / 2, 2}, in_data); auto ie = ov::Core(); - auto fn_ptr_remote = ngraph::builder::subgraph::makeConvPoolRelu({1, 3, height, width}); + auto fn_ptr_remote = ov::test::utils::make_conv_pool_relu({1, 3, height, width}); using namespace ov::preprocess; auto p = PrePostProcessor(fn_ptr_remote); @@ -1781,7 +1780,7 @@ class OVRemoteTensorBatched_Test : public ov::test::TestsCommon, public testing: protected: size_t num_batch; - std::vector> fn_ptrs; + std::vector> fn_ptrs; }; TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_image_single_plane) { @@ -1795,15 +1794,19 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_image_single_plane) { // Prepare input data std::vector fake_image_data_yuv; for (size_t i = 0; i < num_batch; i++) { - fake_image_data_yuv.push_back(ov::test::utils::create_and_fill_tensor( - ov::element::u8, {1, height * 3 / 2, width, 1}, 50, 0, 1, static_cast(i))); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 50; + in_data.resolution = 1; + in_data.seed = static_cast(i); + fake_image_data_yuv.push_back(ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height * 3 / 2, width, 1}, in_data)); } auto ie = ov::Core(); // ------------------------------------------------------ // inference using remote tensor - auto fn_ptr_remote = ngraph::builder::subgraph::makeConvPoolRelu({num_batch, 3, height, width}); + auto fn_ptr_remote = ov::test::utils::make_conv_pool_relu({num_batch, 3, height, width}); using namespace ov::preprocess; auto p = PrePostProcessor(fn_ptr_remote); @@ -1861,7 +1864,7 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_image_single_plane) { // ------------------------------------------------------ // regular inference - auto fn_ptr_regular = ngraph::builder::subgraph::makeConvPoolRelu({1, 3, height, width}); + auto fn_ptr_regular = ov::test::utils::make_conv_pool_relu({1, 3, height, width}); using namespace ov::preprocess; auto p_reg = PrePostProcessor(fn_ptr_regular); @@ -1902,16 +1905,21 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_image_two_planes) { // Prepare input data std::vector fake_image_data_y, fake_image_data_uv; for (size_t i = 0; i < num_batch; i++) { - fake_image_data_y.push_back(ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height, width, 1}, 50, 0, 1, static_cast(i))); - fake_image_data_uv.push_back(ov::test::utils::create_and_fill_tensor( - ov::element::u8, {1, height / 2, width / 2, 2}, 256, 0, 1, static_cast(i))); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 50; + in_data.resolution = 1; + in_data.seed = static_cast(i); + fake_image_data_y.push_back(ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height, width, 1}, in_data)); + in_data.range = 256; + fake_image_data_uv.push_back(ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height / 2, width / 2, 2}, in_data)); } auto ie = ov::Core(); // ------------------------------------------------------ // inference using remote tensor - auto fn_ptr_remote = ngraph::builder::subgraph::makeConvPoolRelu({num_batch, 3, height, width}); + auto fn_ptr_remote = ov::test::utils::make_conv_pool_relu({num_batch, 3, height, width}); using namespace ov::preprocess; auto p = PrePostProcessor(fn_ptr_remote); @@ -1985,7 +1993,7 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_image_two_planes) { // ------------------------------------------------------ // regular inference - auto fn_ptr_regular = ngraph::builder::subgraph::makeConvPoolRelu({1, 3, height, width}); + auto fn_ptr_regular = ov::test::utils::make_conv_pool_relu({1, 3, height, width}); using namespace ov::preprocess; auto p_reg = PrePostProcessor(fn_ptr_regular); @@ -2030,7 +2038,12 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toGray) { std::vector fake_image; std::vector fake_image_regular; for (size_t i = 0; i < num_batch; i++) { - auto tensor_image = ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height, width, feature}, 50, 0, 1, static_cast(i)); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 50; + in_data.resolution = 1; + in_data.seed = static_cast(i); + auto tensor_image = ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height, width, feature}, in_data); auto tensor_regular = ov::test::utils::create_and_fill_tensor(ov::element::f32, {1, feature, height, width }); auto image_ptr = static_cast(tensor_image.data()); auto image_ptr_regular = static_cast(tensor_regular.data()); @@ -2050,7 +2063,7 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toGray) { // ------------------------------------------------------ // inference using remote tensor - auto fn_ptr_remote = ngraph::builder::subgraph::makeConvPoolRelu({num_batch, feature, height, width}); + auto fn_ptr_remote = ov::test::utils::make_conv_pool_relu({num_batch, feature, height, width}); using namespace ov::preprocess; @@ -2108,7 +2121,7 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toGray) { // ------------------------------------------------------ // regular inference - auto fn_ptr_regular = ngraph::builder::subgraph::makeConvPoolRelu({1, 1, height, width}); + auto fn_ptr_regular = ov::test::utils::make_conv_pool_relu({1, 1, height, width}); auto p_reg = PrePostProcessor(fn_ptr_regular); p_reg.input().tensor().set_element_type(ov::element::f32) @@ -2146,16 +2159,21 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_buffer) { // Prepare input data std::vector fake_image_data_y, fake_image_data_uv; for (size_t i = 0; i < num_batch * 2; ++i) { - fake_image_data_y.push_back(ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height, width, 1}, 50, 0, 1, static_cast(i))); - fake_image_data_uv.push_back(ov::test::utils::create_and_fill_tensor( - ov::element::u8, {1, height / 2, width / 2, 2}, 256, 0, 1, static_cast(i))); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 50; + in_data.resolution = 1; + in_data.seed = static_cast(i); + fake_image_data_y.push_back(ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height, width, 1}, in_data)); + in_data.range = 256; + fake_image_data_uv.push_back(ov::test::utils::create_and_fill_tensor(ov::element::u8, {1, height / 2, width / 2, 2}, in_data)); } auto ie = ov::Core(); // ------------------------------------------------------ // inference using remote tensor - auto fn_ptr_remote = ngraph::builder::subgraph::makeConvPoolRelu({num_batch, 3, height, width}); + auto fn_ptr_remote = ov::test::utils::make_conv_pool_relu({num_batch, 3, height, width}); using namespace ov::preprocess; auto p = PrePostProcessor(fn_ptr_remote); @@ -2257,7 +2275,7 @@ TEST_P(OVRemoteTensorBatched_Test, NV12toBGR_buffer) { // ------------------------------------------------------ // regular inference - auto fn_ptr_regular = ngraph::builder::subgraph::makeConvPoolRelu({1, 3, height, width}); + auto fn_ptr_regular = ov::test::utils::make_conv_pool_relu({1, 3, height, width}); using namespace ov::preprocess; auto p_reg = PrePostProcessor(fn_ptr_regular); @@ -2391,7 +2409,7 @@ TEST(OVRemoteContextGPU, smoke_RemoteContextCaching) { const auto gpuDeviceFirst = gpuDevices[0]; const auto gpuDeviceSecond = gpuDevices[1]; - auto model = ngraph::builder::subgraph::makeConvertTranspose(); + auto model = ov::test::utils::make_convert_transpose(); auto compiledModelFirst = core.compile_model(model, gpuDeviceFirst); auto compiledModelSecond = core.compile_model(model, gpuDeviceSecond); @@ -2432,7 +2450,7 @@ TEST(OVRemoteContextGPU, smoke_RemoteContextSingleDevice) { check_contexts_are_same(default_ctx, core.get_default_context(ov::test::utils::DEVICE_GPU)); // Ensure compiled model uses default context too - auto model = ngraph::builder::subgraph::makeConvertTranspose(); + auto model = ov::test::utils::make_convert_transpose(); auto compiled_model = core.compile_model(model, ov::test::utils::DEVICE_GPU); check_contexts_are_same(default_ctx, compiled_model.get_context()); ASSERT_EQ(2, compiled_model.get_property(ov::streams::num)); diff --git a/src/plugins/intel_gpu/tests/functional/remote_blob_tests/remote_blob_helpers.hpp b/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/helpers.hpp similarity index 98% rename from src/plugins/intel_gpu/tests/functional/remote_blob_tests/remote_blob_helpers.hpp rename to src/plugins/intel_gpu/tests/functional/remote_tensor_tests/helpers.hpp index 0e33e2af69109e..3dc43b93414b1e 100644 --- a/src/plugins/intel_gpu/tests/functional/remote_blob_tests/remote_blob_helpers.hpp +++ b/src/plugins/intel_gpu/tests/functional/remote_tensor_tests/helpers.hpp @@ -12,11 +12,11 @@ #endif #ifdef _WIN32 -# include +# include "gpu/gpu_context_api_dx.hpp" #elif defined ENABLE_LIBVA -# include +# include "gpu/gpu_context_api_va.hpp" #endif -#include +#include "gpu/gpu_context_api_ocl.hpp" namespace { template diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index 7f5402bfa5cc8e..07634731309133 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -23,7 +23,7 @@ std::shared_ptr getFunction1() { params.front()->set_friendly_name("Param_1"); params.front()->get_output_tensor(0).set_names({"input_tensor"}); - auto relu = std::make_shared(params[0]); + auto relu = std::make_shared(params[0]); relu->get_output_tensor(0).set_names({"relu"}); return std::make_shared(relu, params, "SimpleActivation"); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp index 3ba2fc4cd4c9e7..b5af69fa6d0f59 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/plugin/core_threading_tests.cpp @@ -3,7 +3,12 @@ // #include -#include +#include "remote_tensor_tests/helpers.hpp" +#include "common_test_utils/subgraph_builders/split_conv_concat.hpp" +#include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp" +#include "common_test_utils/subgraph_builders/single_conv.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" +#include "common_test_utils/subgraph_builders/2_input_subtract.hpp" using namespace InferenceEngine; using namespace InferenceEngine::gpu; @@ -26,11 +31,11 @@ TEST_P(CoreThreadingTestsWithIterations, smoke_LoadNetwork_RemoteContext) { std::atomic counter{0u}; std::vector networks; - networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::make2InputSubtract())); - networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeMultiSingleConv())); - networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSingleConv())); - networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitConvConcat())); - networks.emplace_back(InferenceEngine::CNNNetwork(ngraph::builder::subgraph::makeSplitMultiConvConcat())); + networks.emplace_back(InferenceEngine::CNNNetwork(ov::test::utils::make_2_input_subtract())); + networks.emplace_back(InferenceEngine::CNNNetwork(ov::test::utils::make_multi_single_conv())); + networks.emplace_back(InferenceEngine::CNNNetwork(ov::test::utils::make_single_conv())); + networks.emplace_back(InferenceEngine::CNNNetwork(ov::test::utils::make_split_conv_concat())); + networks.emplace_back(InferenceEngine::CNNNetwork(ov::test::utils::make_split_multi_conv_concat())); auto ocl_instance = std::make_shared(); ie.SetConfig(config, target_device); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp index 80e61648cc86b7..ebeeb2a2d6017c 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/concat_transformation.cpp @@ -77,7 +77,7 @@ const std::vector testValues = { {}, { 256ul, ngraph::Shape({}), {0.f}, {2.55f}, {0.f}, {2.55f} }, {}, - std::make_shared(ov::element::u8, ov::Shape{1, 3, 16, 16}, std::vector(3 * 16 * 16, 1.0)), + std::make_shared(ov::element::u8, ov::Shape{1, 3, 16, 16}, std::vector(3 * 16 * 16, 1.0)), {}, { { ov::element::f16 }, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp index 8fc71d9ba38215..5449287a2dbb46 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/depth_to_space_transformation.cpp @@ -9,7 +9,6 @@ using namespace LayerTestsDefinitions; using namespace InferenceEngine::details; -using namespace ngraph::opset1; namespace { const std::vector precisions = { @@ -17,9 +16,9 @@ const std::vector precisions = { ngraph::element::f16 }; -const std::vector modes = { - DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, - DepthToSpace::DepthToSpaceMode::DEPTH_FIRST +const std::vector modes = { + ov::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, + ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST }; const std::vector inputShapesBS2 = { diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp index 8f6f43f4195264..52c847feb354d8 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/pad_transformation.cpp @@ -27,11 +27,11 @@ const std::vector trasform namespace commonTestCases { -const std::vector padModes = { - ngraph::op::PadMode::CONSTANT, - ngraph::op::PadMode::EDGE, - ngraph::op::PadMode::REFLECT, - ngraph::op::PadMode::SYMMETRIC +const std::vector padModes = { + ov::op::PadMode::CONSTANT, + ov::op::PadMode::EDGE, + ov::op::PadMode::REFLECT, + ov::op::PadMode::SYMMETRIC }; const std::vector params = { @@ -92,7 +92,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, PadTransformation, ::testing::Combine( ::testing::ValuesIn(netPrecisions), ::testing::ValuesIn(inputShapes), - ::testing::Values(ngraph::op::PadMode::CONSTANT), + ::testing::Values(ov::op::PadMode::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(trasformationParamValues), ::testing::ValuesIn(params)), @@ -101,10 +101,10 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, PadTransformation, namespace testCasesForOtherModes { -const std::vector modesWithoutConstant = { - ngraph::op::PadMode::EDGE, - ngraph::op::PadMode::REFLECT, - ngraph::op::PadMode::SYMMETRIC +const std::vector modesWithoutConstant = { + ov::op::PadMode::EDGE, + ov::op::PadMode::REFLECT, + ov::op::PadMode::SYMMETRIC }; const std::vector params = { diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp index 3387375a0c07c1..5513123a7862cb 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/broadcast.cpp @@ -96,7 +96,7 @@ INSTANTIATE_TEST_CASE_P(smoke_TestNumpyBroadcast2D, BroadcastLayerTest, ::testing::Combine(::testing::ValuesIn(targetShapesNumpy2D), ::testing::Values(ngraph::AxisSet{}), // not used in numpy mode - ::testing::Values(ngraph::op::BroadcastType::NUMPY), + ::testing::Values(ov::op::BroadcastType::NUMPY), ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(input_shapes_2d_static)), ::testing::ValuesIn(inputPrecisions), ::testing::Values(ov::test::utils::DEVICE_GPU)), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/convolution.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/convolution.cpp index 2f00b4e38e7090..85012f392611ab 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/convolution.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/convolution.cpp @@ -46,7 +46,7 @@ const auto conv2DParams_AutoPadValid = ::testing::Combine( ::testing::Values(std::vector({0, 0})), ::testing::ValuesIn(dilations), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::VALID) + ::testing::Values(ov::op::PadType::VALID) ); INSTANTIATE_TEST_SUITE_P(smoke_Convolution2D_ExplicitPadding, ConvolutionLayerTest, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/convolution_backprop_data.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/convolution_backprop_data.cpp index 1488f7cbf6358b..8fdfe1f197a9fd 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/convolution_backprop_data.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/convolution_backprop_data.cpp @@ -137,7 +137,7 @@ const auto conv3DParams_ExplicitPadding = ::testing::Combine( ::testing::ValuesIn(padEnds3D), ::testing::ValuesIn(dilations3D), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::ValuesIn(emptyOutputPadding) ); const auto conv3DParams_AutoPadValid = ::testing::Combine( @@ -147,7 +147,7 @@ const auto conv3DParams_AutoPadValid = ::testing::Combine( ::testing::Values(std::vector({0, 0, 0})), ::testing::ValuesIn(dilations3D), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::VALID), + ::testing::Values(ov::op::PadType::VALID), ::testing::ValuesIn(emptyOutputPadding) ); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/group_convolution.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/group_convolution.cpp index e643f5716e5696..cf57cdd081b164 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/group_convolution.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/group_convolution.cpp @@ -82,7 +82,7 @@ const auto groupConv2DParams_ExplicitPadding = ::testing::Combine( ::testing::ValuesIn(dilations), ::testing::ValuesIn(numOutChannels), ::testing::ValuesIn(numGroups), - ::testing::Values(ngraph::op::PadType::EXPLICIT) + ::testing::Values(ov::op::PadType::EXPLICIT) ); const auto groupConv2DParams_AutoPadValid = ::testing::Combine( ::testing::ValuesIn(kernels), @@ -92,7 +92,7 @@ const auto groupConv2DParams_AutoPadValid = ::testing::Combine( ::testing::ValuesIn(dilations), ::testing::ValuesIn(numOutChannels), ::testing::ValuesIn(numGroups), - ::testing::Values(ngraph::op::PadType::VALID) + ::testing::Values(ov::op::PadType::VALID) ); INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolution2D_ExplicitPadding, GroupConvolutionLayerTest, @@ -128,7 +128,7 @@ const auto groupConv3DParams_ExplicitPadding = ::testing::Combine( ::testing::ValuesIn(dilations3d), ::testing::Values(4), ::testing::Values(2), - ::testing::Values(ngraph::op::PadType::EXPLICIT) + ::testing::Values(ov::op::PadType::EXPLICIT) ); const auto groupConv3DParams_AutoPadValid = ::testing::Combine( ::testing::ValuesIn(kernels3d), @@ -138,7 +138,7 @@ const auto groupConv3DParams_AutoPadValid = ::testing::Combine( ::testing::ValuesIn(dilations3d), ::testing::Values(4), ::testing::Values(2), - ::testing::Values(ngraph::op::PadType::VALID) + ::testing::Values(ov::op::PadType::VALID) ); INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolution3D_ExplicitPadding, GroupConvolutionLayerTest, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp index 85f94d0b7d6e7c..b40264d2fa1eab 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/gru_sequence.cpp @@ -27,9 +27,9 @@ namespace { std::vector linear_before_reset = {true, false}; std::vector clip{0.f}; std::vector clip_non_zeros{0.7f}; - std::vector direction = {ngraph::op::RecurrentSequenceDirection::FORWARD, - ngraph::op::RecurrentSequenceDirection::REVERSE, - ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL + std::vector direction = {ov::op::RecurrentSequenceDirection::FORWARD, + ov::op::RecurrentSequenceDirection::REVERSE, + ov::op::RecurrentSequenceDirection::BIDIRECTIONAL }; std::vector netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16}; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/interpolate.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/interpolate.cpp index 97d88b32360cd7..2bb3abc7e38ff1 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/interpolate.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/interpolate.cpp @@ -87,43 +87,43 @@ const std::vector> target5dShapes = { {1, 1, 4, 4, 4}, }; -const std::vector modesWithoutNearest = { - ngraph::op::v4::Interpolate::InterpolateMode::LINEAR, - ngraph::op::v4::Interpolate::InterpolateMode::CUBIC, - ngraph::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX, +const std::vector modesWithoutNearest = { + ov::op::v4::Interpolate::InterpolateMode::LINEAR, + ov::op::v4::Interpolate::InterpolateMode::CUBIC, + ov::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX, }; -const std::vector nearestMode = { - ngraph::op::v4::Interpolate::InterpolateMode::NEAREST, +const std::vector nearestMode = { + ov::op::v4::Interpolate::InterpolateMode::NEAREST, }; -const std::vector linearOnnxMode = { - ngraph::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX, +const std::vector linearOnnxMode = { + ov::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX, }; -const std::vector coordinateTransformModes = { - ngraph::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN, - ngraph::op::v4::Interpolate::CoordinateTransformMode::PYTORCH_HALF_PIXEL, - ngraph::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL, - ngraph::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC, - ngraph::op::v4::Interpolate::CoordinateTransformMode::ALIGN_CORNERS, +const std::vector coordinateTransformModes = { + ov::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN, + ov::op::v4::Interpolate::CoordinateTransformMode::PYTORCH_HALF_PIXEL, + ov::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL, + ov::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC, + ov::op::v4::Interpolate::CoordinateTransformMode::ALIGN_CORNERS, }; -const std::vector shapeCalculationMode = { - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, - ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES, +const std::vector shapeCalculationMode = { + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SCALES, }; -const std::vector nearestModes = { - ngraph::op::v4::Interpolate::NearestMode::SIMPLE, - ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, - ngraph::op::v4::Interpolate::NearestMode::FLOOR, - ngraph::op::v4::Interpolate::NearestMode::CEIL, - ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_CEIL, +const std::vector nearestModes = { + ov::op::v4::Interpolate::NearestMode::SIMPLE, + ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, + ov::op::v4::Interpolate::NearestMode::FLOOR, + ov::op::v4::Interpolate::NearestMode::CEIL, + ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_CEIL, }; -const std::vector defaultNearestMode = { - ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, +const std::vector defaultNearestMode = { + ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, }; const std::vector> pads = { @@ -308,9 +308,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Basic, Interpolate11LayerTest, ::t ::testing::Values(additional_config)), Interpolate11LayerTest::getTestCaseName); -const std::vector modesPillow = { - ngraph::op::v4::Interpolate::InterpolateMode::BILINEAR_PILLOW, - ngraph::op::v4::Interpolate::InterpolateMode::BICUBIC_PILLOW, +const std::vector modesPillow = { + ov::op::v4::Interpolate::InterpolateMode::BILINEAR_PILLOW, + ov::op::v4::Interpolate::InterpolateMode::BICUBIC_PILLOW, }; const std::vector pillowModePrecisions = { @@ -321,9 +321,9 @@ const std::vector pillowModePrecisions = { INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow, Interpolate11LayerTest, ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(modesPillow), - ::testing::Values(ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES), - ::testing::Values(ngraph::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), - ::testing::Values(ngraph::op::v4::Interpolate::NearestMode::SIMPLE), + ::testing::Values(ov::op::v4::Interpolate::ShapeCalcMode::SCALES), + ::testing::Values(ov::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), + ::testing::Values(ov::op::v4::Interpolate::NearestMode::SIMPLE), ::testing::Values(false), ::testing::Values(std::vector{0, 0, 1, 1}), ::testing::Values(std::vector{0, 0, 1, 1}), @@ -345,9 +345,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow, Interpolate11LayerTest, :: INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow_Horizontal, Interpolate11LayerTest, ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(modesPillow), - ::testing::Values(ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES), - ::testing::Values(ngraph::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), - ::testing::Values(ngraph::op::v4::Interpolate::NearestMode::SIMPLE), + ::testing::Values(ov::op::v4::Interpolate::ShapeCalcMode::SCALES), + ::testing::Values(ov::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), + ::testing::Values(ov::op::v4::Interpolate::NearestMode::SIMPLE), ::testing::Values(false), ::testing::Values(std::vector{0, 0, 1, 1}), ::testing::Values(std::vector{0, 0, 1, 1}), @@ -369,9 +369,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow_Horizontal, Interpolate11La INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow_Vertical, Interpolate11LayerTest, ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(modesPillow), - ::testing::Values(ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES), - ::testing::Values(ngraph::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), - ::testing::Values(ngraph::op::v4::Interpolate::NearestMode::SIMPLE), + ::testing::Values(ov::op::v4::Interpolate::ShapeCalcMode::SCALES), + ::testing::Values(ov::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), + ::testing::Values(ov::op::v4::Interpolate::NearestMode::SIMPLE), ::testing::Values(false), ::testing::Values(std::vector{0, 0, 1, 1}), ::testing::Values(std::vector{0, 0, 1, 1}), @@ -393,9 +393,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow_Vertical, Interpolate11Laye INSTANTIATE_TEST_SUITE_P(smoke_Interpolate_11_Pillow_Vertical_BF, Interpolate11LayerTest, ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(modesPillow), - ::testing::Values(ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES), - ::testing::Values(ngraph::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), - ::testing::Values(ngraph::op::v4::Interpolate::NearestMode::SIMPLE), + ::testing::Values(ov::op::v4::Interpolate::ShapeCalcMode::SCALES), + ::testing::Values(ov::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN), + ::testing::Values(ov::op::v4::Interpolate::NearestMode::SIMPLE), ::testing::Values(false), ::testing::Values(std::vector{2, 1, 0, 0}), ::testing::Values(std::vector{2, 1, 0, 0}), diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/loop.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/loop.cpp similarity index 100% rename from src/plugins/intel_gpu/tests/functional/single_layer_tests/loop.cpp rename to src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/loop.cpp diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/lstm_sequence.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/lstm_sequence.cpp index bc9278fb203ee8..95021cf61038f4 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/lstm_sequence.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/lstm_sequence.cpp @@ -29,9 +29,9 @@ std::vector> activations = {{"relu", "sigmoid", "tanh"} std::vector> activations_smoke = {{"relu", "sigmoid", "tanh"}}; std::vector clip{0.f}; std::vector clip_non_zeros{0.7f}; -std::vector direction = {ngraph::op::RecurrentSequenceDirection::FORWARD, - ngraph::op::RecurrentSequenceDirection::REVERSE, - ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL +std::vector direction = {ov::op::RecurrentSequenceDirection::FORWARD, + ov::op::RecurrentSequenceDirection::REVERSE, + ov::op::RecurrentSequenceDirection::BIDIRECTIONAL }; std::vector netPrecisions = {InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16}; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/matrix_nms.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/matrix_nms.cpp index 89e855b5062335..7a040365fff045 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/matrix_nms.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/matrix_nms.cpp @@ -18,17 +18,17 @@ const std::vector> inStaticShapeParams = {{{3, 100, 4}, { const auto inputPrecisions = InputPrecisions{ov::element::f32, ov::element::i32, ov::element::f32}; -const std::vector sortResultType = {op::v8::MatrixNms::SortResultType::CLASSID, - op::v8::MatrixNms::SortResultType::SCORE, - op::v8::MatrixNms::SortResultType::NONE}; +const std::vector sortResultType = {ov::op::v8::MatrixNms::SortResultType::CLASSID, + ov::op::v8::MatrixNms::SortResultType::SCORE, + ov::op::v8::MatrixNms::SortResultType::NONE}; const std::vector outType = {element::i32, element::i64}; const std::vector topKParams = {TopKParams{-1, 5}, TopKParams{100, -1}}; const std::vector thresholdParams = {ThresholdParams{0.0f, 2.0f, 0.0f}, ThresholdParams{0.1f, 1.5f, 0.2f}}; const std::vector backgroudClass = {-1, 1}; const std::vector normalized = {true, false}; -const std::vector decayFunction = {op::v8::MatrixNms::DecayFunction::GAUSSIAN, - op::v8::MatrixNms::DecayFunction::LINEAR}; +const std::vector decayFunction = {ov::op::v8::MatrixNms::DecayFunction::GAUSSIAN, + ov::op::v8::MatrixNms::DecayFunction::LINEAR}; const std::vector outStaticShape = {true}; // only be true as gpu plugin not support nms with internal dynamic yet. diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/normalize_l2.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/normalize_l2.cpp index e8e90841be99bb..5a5369d3e9d80c 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/normalize_l2.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/normalize_l2.cpp @@ -18,9 +18,9 @@ const std::vector> axes = { }; const std::vector eps = {1e-7f, 1e-6f, 1e-5f, 1e-4f}; -const std::vector epsMode = { - ngraph::op::EpsMode::ADD, - ngraph::op::EpsMode::MAX, +const std::vector epsMode = { + ov::op::EpsMode::ADD, + ov::op::EpsMode::MAX, }; INSTANTIATE_TEST_SUITE_P(smoke_NormalizeL2, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/pooling.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/pooling.cpp index 5e9614e6c674ff..af4cdeeb0f426a 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/pooling.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/pooling.cpp @@ -33,8 +33,8 @@ const std::vector> padBegins = {{0, 0}, {0, 2}}; const std::vector> padEnds = {{0, 0}, {0, 2}}; -const std::vector roundingTypes = {ngraph::op::RoundingType::CEIL, - ngraph::op::RoundingType::FLOOR}; +const std::vector roundingTypes = {ov::op::RoundingType::CEIL, + ov::op::RoundingType::FLOOR}; const std::vector indexElementTypes = {ngraph::element::Type_t::i32}; const std::vector axes = {0, 2}; const std::vector inputShapeSmall = {1, 3, 30, 30}; @@ -48,8 +48,8 @@ const auto maxPool_ExplicitPad_FloorRounding_Params = ::testing::Combine( ::testing::ValuesIn(strides), ::testing::ValuesIn(padBegins), ::testing::ValuesIn(padEnds), - ::testing::Values(ngraph::op::RoundingType::FLOOR), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::RoundingType::FLOOR), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::Values(false) // placeholder value - exclude pad not applicable for max pooling ); @@ -73,8 +73,8 @@ const auto maxPool_ExplicitPad_CeilRounding_Params = ::testing::Combine( ::testing::Values(std::vector({1, 1})), ::testing::ValuesIn(padBegins), ::testing::ValuesIn(padEnds), - ::testing::Values(ngraph::op::RoundingType::CEIL), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::RoundingType::CEIL), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::Values(false) // placeholder value - exclude pad not applicable for max pooling ); @@ -100,8 +100,8 @@ const auto avgPoolExplicitPadCeilRoundingParams = ::testing::Combine( ::testing::Values(std::vector({1, 1})), ::testing::ValuesIn(padBegins), ::testing::ValuesIn(padEnds), - ::testing::Values(ngraph::op::RoundingType::CEIL), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::RoundingType::CEIL), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::Values(true, false) ); @@ -124,8 +124,8 @@ const auto avgPoolExplicitPadFloorRoundingParams = ::testing::Combine( ::testing::ValuesIn(strides), ::testing::ValuesIn(padBegins), ::testing::ValuesIn(padEnds), - ::testing::Values(ngraph::op::RoundingType::FLOOR), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::RoundingType::FLOOR), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::Values(true, false) ); @@ -150,9 +150,9 @@ const auto allPools_ValidPad_Params = ::testing::Combine( ::testing::ValuesIn(strides), ::testing::Values(std::vector({0, 0})), ::testing::ValuesIn(padEnds), - ::testing::Values(ngraph::op::RoundingType::FLOOR), // placeholder value - Rounding Type not applicable for Valid pad type + ::testing::Values(ov::op::RoundingType::FLOOR), // placeholder value - Rounding Type not applicable for Valid pad type // TODO: PadType::VALID seems not to ignore padBegins - ::testing::Values(ngraph::op::PadType::VALID), + ::testing::Values(ov::op::PadType::VALID), ::testing::Values(false) // placeholder value - exclude pad not applicable for max pooling ); @@ -180,8 +180,8 @@ const auto maxPool8_ExplicitPad_FloorRounding_Params = ::testing::Combine( ::testing::ValuesIn(padEnds), ::testing::ValuesIn(indexElementTypes), ::testing::ValuesIn(axes), - ::testing::Values(ngraph::op::RoundingType::FLOOR), - ::testing::Values(ngraph::op::PadType::EXPLICIT) + ::testing::Values(ov::op::RoundingType::FLOOR), + ::testing::Values(ov::op::PadType::EXPLICIT) ); INSTANTIATE_TEST_SUITE_P(smoke_MaxPool8_ExplicitPad_FloorRounding, MaxPoolingV8LayerTest, @@ -205,8 +205,8 @@ const auto maxPool8_ExplicitPad_CeilRounding_Params = ::testing::Combine( ::testing::ValuesIn(padEnds), ::testing::ValuesIn(indexElementTypes), ::testing::ValuesIn(axes), - ::testing::Values(ngraph::op::RoundingType::CEIL), - ::testing::Values(ngraph::op::PadType::EXPLICIT) + ::testing::Values(ov::op::RoundingType::CEIL), + ::testing::Values(ov::op::PadType::EXPLICIT) ); INSTANTIATE_TEST_SUITE_P(smoke_MaxPool8_ExplicitPad_CeilRounding, MaxPoolingV8LayerTest, diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/rnn_sequence.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/rnn_sequence.cpp index d789bf3e57a14f..f1ba2a1eaf2df3 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/rnn_sequence.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/rnn_sequence.cpp @@ -25,9 +25,9 @@ std::vector input_size{10}; std::vector> activations = {{"relu"}, {"sigmoid"}, {"tanh"}}; std::vector clip{0.f}; std::vector clip_non_zeros{0.7f}; -std::vector direction = {ngraph::op::RecurrentSequenceDirection::FORWARD, - ngraph::op::RecurrentSequenceDirection::REVERSE, - ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL, +std::vector direction = {ov::op::RecurrentSequenceDirection::FORWARD, + ov::op::RecurrentSequenceDirection::REVERSE, + ov::op::RecurrentSequenceDirection::BIDIRECTIONAL, }; std::vector netPrecisions = {InferenceEngine::Precision::FP32}; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/scatter_elements_update.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/scatter_elements_update.cpp index 952cb4c82dc0f6..ba6c4b7e0d773a 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/scatter_elements_update.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/scatter_elements_update.cpp @@ -9,7 +9,6 @@ #include "common_test_utils/test_constants.hpp" using namespace LayerTestsDefinitions; -using namespace ngraph::opset3; namespace { // map> diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index d96723d3f47ca3..24e9c674b7b925 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -86,10 +86,10 @@ std::vector disabledTestPatterns() { // unsupported metrics R"(.*nightly_HeteroAutoBatchOVGetMetricPropsTest.*OVGetMetricPropsTest.*(FULL_DEVICE_NAME_with_DEVICE_ID|AVAILABLE_DEVICES|DEVICE_UUID|OPTIMIZATION_CAPABILITIES|MAX_BATCH_SIZE|DEVICE_GOPS|DEVICE_TYPE|RANGE_FOR_ASYNC_INFER_REQUESTS|RANGE_FOR_STREAMS).*)", // Issue: 111437 - R"(.*smoke_Deconv_2D_Dynamic_.*FP32/DeconvolutionLayerGPUTest.CompareWithRefs.*)", - R"(.*smoke_GroupDeconv_2D_Dynamic_.*FP32/GroupDeconvolutionLayerGPUTest.CompareWithRefs.*)", + R"(.*smoke_Deconv_2D_Dynamic_.*FP32/DeconvolutionLayerGPUTest.Inference.*)", + R"(.*smoke_GroupDeconv_2D_Dynamic_.*FP32/GroupDeconvolutionLayerGPUTest.Inference.*)", // Issue: 111440 - R"(.*smoke_set1/GatherElementsGPUTest.CompareWithRefs.*)", + R"(.*smoke_set1/GatherElementsGPUTest.Inference.*)", // New plugin work with tensors, so it means that blob in old API can have different pointers R"(.*InferRequestIOBBlobTest.*secondCallGetInputDoNotReAllocateData.*)", R"(.*InferRequestIOBBlobTest.*secondCallGetOutputDoNotReAllocateData.*)", diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/quantized_convolution_backprop_data.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/quantized_convolution_backprop_data.cpp index 373e9eabd16934..83ea688c2eb3c8 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/quantized_convolution_backprop_data.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/quantized_convolution_backprop_data.cpp @@ -34,7 +34,7 @@ const auto quantConvBackpropData2DParams = ::testing::Combine( ::testing::ValuesIn(padEnds2D), ::testing::ValuesIn(dilations2D), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::AUTO), + ::testing::Values(ov::op::PadType::AUTO), ::testing::ValuesIn(levels), ::testing::ValuesIn(granularity) ); @@ -62,7 +62,7 @@ const auto quantConvBackpropData3DParams = ::testing::Combine( ::testing::ValuesIn(padEnds3D), ::testing::ValuesIn(dilations3D), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::AUTO), + ::testing::Values(ov::op::PadType::AUTO), ::testing::ValuesIn(levels), ::testing::ValuesIn(granularity) ); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/quantized_group_convolution_backprop_data.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/quantized_group_convolution_backprop_data.cpp index e06ae6690d427d..0e851a36c51430 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/quantized_group_convolution_backprop_data.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/subgraph_tests/quantized_group_convolution_backprop_data.cpp @@ -36,7 +36,7 @@ const auto quantGroupConvBackpropData2DParams = ::testing::Combine( ::testing::ValuesIn(dilations2D), ::testing::ValuesIn(numOutChannels), ::testing::ValuesIn(numGroups), - ::testing::Values(ngraph::op::PadType::AUTO), + ::testing::Values(ov::op::PadType::AUTO), ::testing::ValuesIn(levels), ::testing::ValuesIn(granularity) ); @@ -65,7 +65,7 @@ const auto quantGroupConvBackpropData3DParams = ::testing::Combine( ::testing::ValuesIn(dilations3D), ::testing::ValuesIn(numOutChannels), ::testing::ValuesIn(numGroups), - ::testing::Values(ngraph::op::PadType::AUTO), + ::testing::Values(ov::op::PadType::AUTO), ::testing::ValuesIn(levels), ::testing::ValuesIn(granularity) ); diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/convolution.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/convolution.cpp index ca0c19bd3f3c54..458b5fffd76d50 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/convolution.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/convolution.cpp @@ -1,47 +1,40 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/convolution.hpp" -#include "common_test_utils/test_constants.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/file_utils.hpp" +#include "shared_test_classes/single_op/convolution.hpp" +#include "common_test_utils/node_builders/convolution.hpp" -// using namespace LayerTestsDefinitions; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" -namespace GPULayerTestsDefinitions { - -using LayerTestsDefinitions::convSpecificParams; +namespace { +using ov::test::InputShape; +using ov::test::convSpecificParams; typedef std::tuple< convSpecificParams, - ElementType, // Net precision - ElementType, // Input precision - ElementType, // Output precision - InputShape, // Input shape - LayerTestsUtils::TargetDevice // Device name + ov::element::Type, // Net precision + ov::element::Type, // Input precision + ov::element::Type, // Output precision + InputShape, // Input shape + std::string // Device name > convLayerTestParamsSet; - class ConvolutionLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { convSpecificParams convParams; - ElementType netType; - ElementType inType, outType; + ov::element::Type netType; + ov::element::Type inType, outType; InputShape inputShape; std::string targetDevice; std::tie(convParams, netType, inType, outType, inputShape, targetDevice) = obj.param; - ngraph::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; + ov::op::PadType padType; + std::vector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType) = convParams; @@ -73,13 +66,13 @@ class ConvolutionLayerGPUTest : public testing::WithParamInterfaceGetParam(); init_input_shapes({inputShape}); - ngraph::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; + ov::op::PadType padType; + std::vector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType) = convParams; @@ -88,40 +81,36 @@ class ConvolutionLayerGPUTest : public testing::WithParamInterface(inType, shape)); - auto convolutionNode = ngraph::builder::makeConvolution(inputParams.front(), netType, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels); + auto convolutionNode = ov::test::utils::make_convolution(inputParams.front(), netType, kernel, stride, padBegin, + padEnd, dilation, padType, convOutChannels); - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < convolutionNode->get_output_size(); i++) - results.push_back(std::make_shared(convolutionNode->output(i))); + results.push_back(std::make_shared(convolutionNode->output(i))); - function = std::make_shared(results, inputParams, "Convolution"); + function = std::make_shared(results, inputParams, "Convolution"); } }; -TEST_P(ConvolutionLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(ConvolutionLayerGPUTest, Inference) { run(); } -namespace { -// Check 3D input tensor for convolution is handled properly and its output is correct comparing with ngraph runtime. +// Check 3D input tensor for convolution is handled properly and its output is correct comparing with ov runtime. INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_3D_tensor_basic, ConvolutionLayerGPUTest, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3}), - ::testing::Values(SizeVector{1}), + ::testing::Values(std::vector{3}), + ::testing::Values(std::vector{1}), ::testing::Values(std::vector{0}), ::testing::Values(std::vector{0}), - ::testing::Values(SizeVector{1}), + ::testing::Values(std::vector{1}), ::testing::Values(13), - ::testing::Values(ngraph::op::PadType::SAME_UPPER)), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::Values(ov::op::PadType::SAME_UPPER)), + ::testing::Values(ov::element::f16), + ::testing::Values(ov::element::f16), + ::testing::Values(ov::element::undefined), ::testing::Values(InputShape{{}, {{1, 13, 30}}}), ::testing::Values(ov::test::utils::DEVICE_GPU)), ConvolutionLayerGPUTest::getTestCaseName); } // namespace - -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/batch_to_space.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/batch_to_space.cpp index 9c1b6c5796aef9..15ada4b92c3832 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/batch_to_space.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/batch_to_space.cpp @@ -2,16 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/batch_to_space.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/test_constants.hpp" #include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/batch_to_space.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; struct BatchToSpaceParams { std::vector block; @@ -22,22 +23,21 @@ struct BatchToSpaceParams { typedef std::tuple< InputShape, // Input shapes BatchToSpaceParams, - ElementType, // Element type - ngraph::helpers::InputLayerType, // block/begin/end input type + ov::element::Type, // Element type + ov::test::utils::InputLayerType, // block/begin/end input type std::map // Additional network configuration > BatchToSpaceParamsLayerParamSet; class BatchToSpaceLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { InputShape shapes; BatchToSpaceParams params; - ElementType elementType; - ngraph::helpers::InputLayerType restInputType; - TargetDevice targetDevice; + ov::element::Type model_type; + ov::test::utils::InputLayerType restInputType; std::map additionalConfig; - std::tie(shapes, params, elementType, restInputType, additionalConfig) = obj.param; + std::tie(shapes, params, model_type, restInputType, additionalConfig) = obj.param; std::ostringstream results; results << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_"; @@ -45,7 +45,7 @@ class BatchToSpaceLayerGPUTest : public testing::WithParamInterface& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { @@ -100,7 +100,7 @@ class BatchToSpaceLayerGPUTest : public testing::WithParamInterface additionalConfig; std::tie(shapes, ssParams, inType, restInputType, additionalConfig) = this->GetParam(); @@ -112,7 +112,7 @@ class BatchToSpaceLayerGPUTest : public testing::WithParamInterface inputShapes; inputShapes.push_back(shapes); - if (restInputType == ngraph::helpers::InputLayerType::PARAMETER) { + if (restInputType == ov::test::utils::InputLayerType::PARAMETER) { inputShapes.push_back(InputShape({static_cast(block.size())}, std::vector(shapes.second.size(), {block.size()}))); inputShapes.push_back(InputShape({static_cast(begin.size())}, std::vector(shapes.second.size(), {begin.size()}))); inputShapes.push_back(InputShape({static_cast(end.size())}, std::vector(shapes.second.size(), {end.size()}))); @@ -122,10 +122,10 @@ class BatchToSpaceLayerGPUTest : public testing::WithParamInterface(inType, inputDynamicShapes.front())}; std::shared_ptr blockInput, beginInput, endInput; - if (restInputType == ngraph::helpers::InputLayerType::PARAMETER) { - auto blockNode = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{block.size()}); - auto beginNode = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{begin.size()}); - auto endNode = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{end.size()}); + if (restInputType == ov::test::utils::InputLayerType::PARAMETER) { + auto blockNode = std::make_shared(ov::element::i64, ov::Shape{block.size()}); + auto beginNode = std::make_shared(ov::element::i64, ov::Shape{begin.size()}); + auto endNode = std::make_shared(ov::element::i64, ov::Shape{end.size()}); params.push_back(blockNode); params.push_back(beginNode); @@ -135,38 +135,34 @@ class BatchToSpaceLayerGPUTest : public testing::WithParamInterface(ngraph::element::Type_t::i64, ov::Shape{block.size()}, block); - beginInput = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{begin.size()}, begin); - endInput = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{end.size()}, end); + blockInput = std::make_shared(ov::element::i64, ov::Shape{block.size()}, block); + beginInput = std::make_shared(ov::element::i64, ov::Shape{begin.size()}, begin); + endInput = std::make_shared(ov::element::i64, ov::Shape{end.size()}, end); } - auto ss = std::make_shared(params[0], blockInput, beginInput, endInput); + auto ss = std::make_shared(params[0], blockInput, beginInput, endInput); - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < ss->get_output_size(); i++) { - results.push_back(std::make_shared(ss->output(i))); + results.push_back(std::make_shared(ss->output(i))); } - function = std::make_shared(results, params, "BatchToSpaceFuncTest"); + function = std::make_shared(results, params, "BatchToSpaceFuncTest"); } }; -TEST_P(BatchToSpaceLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(BatchToSpaceLayerGPUTest, Inference) { run(); } -namespace { - std::map emptyAdditionalConfig; -const std::vector inputPrecisions = { - ElementType::f32 +const std::vector inputPrecisions = { + ov::element::f32 }; -const std::vector restInputTypes = { - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER +const std::vector restInputTypes = { + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER }; const std::vector inputShapesDynamic3D = { @@ -224,4 +220,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Plain_Dynamic_5D, BatchToSpaceLay BatchToSpaceLayerGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp index 8b7c750756b11f..6249b6f6159073 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp @@ -2,48 +2,46 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/broadcast.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" -#include -#include -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/broadcast.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< std::vector, // Shapes std::vector, // Target shapes std::vector, // Axes mapping ov::op::BroadcastType, // Broadcast mode - ov::element::Type_t, // Network precision + ov::element::Type, // Network precision std::vector, // Const inputs std::string // Device name > BroadcastLayerTestParamsSet; class BroadcastLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - std::vector inputShapes; + std::vector shapes; std::vector targetShapes, axesMapping; ov::op::BroadcastType mode; - ov::element::Type_t netPrecision; + ov::element::Type model_type; std::vector isConstInputs; std::string deviceName; - std::tie(inputShapes, targetShapes, axesMapping, mode, netPrecision, isConstInputs, deviceName) = obj.param; + std::tie(shapes, targetShapes, axesMapping, mode, model_type, isConstInputs, deviceName) = obj.param; std::ostringstream result; result << "IS=("; - for (const auto& shape : inputShapes) { + for (const auto& shape : shapes) { result << ov::test::utils::partialShape2str({shape.first}) << "_"; } result << ")_TS=("; - for (const auto& shape : inputShapes) { + for (const auto& shape : shapes) { for (const auto& item : shape.second) { result << ov::test::utils::vec2str(item) << "_"; } @@ -51,7 +49,7 @@ class BroadcastLayerGPUTest : public testing::WithParamInterface &lastNode) { - ResultVector results; + auto makeFunction = [](ov::ParameterVector ¶ms, const std::shared_ptr &lastNode) { + ov::ResultVector results; for (size_t i = 0; i < lastNode->get_output_size(); i++) - results.push_back(std::make_shared(lastNode->output(i))); + results.push_back(std::make_shared(lastNode->output(i))); - return std::make_shared(results, params, "BroadcastLayerGPUTest"); + return std::make_shared(results, params, "BroadcastLayerGPUTest"); }; function = makeFunction(functionParams, broadcastOp); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0lu; i < funcInputs.size(); i++) { @@ -172,8 +170,11 @@ class BroadcastLayerGPUTest : public testing::WithParamInterface inputPrecisionsFloat = { +const std::vector inputPrecisionsFloat = { ov::element::f32, }; -const std::vector inputPrecisionsInt = { +const std::vector inputPrecisionsInt = { ov::element::i32, }; @@ -407,5 +404,3 @@ INSTANTIATE_TEST_CASE_P(smoke_broadcast_6d_numpy_compareWithRefs_dynamic, BroadcastLayerGPUTest::getTestCaseName); } // namespace - -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution.cpp index c957a35d0e203f..f3f97b4d325262 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution.cpp @@ -1,49 +1,43 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/node_builders/activation.hpp" +#include "common_test_utils/node_builders/convolution.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/convolution.hpp" -#include "common_test_utils/test_constants.hpp" +#include "shared_test_classes/single_op/convolution.hpp" -// using namespace LayerTestsDefinitions; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/convolution.hpp" -namespace GPULayerTestsDefinitions { - -using LayerTestsDefinitions::convSpecificParams; +namespace { +using ov::test::InputShape; +using ov::test::convSpecificParams; typedef std::tuple< convSpecificParams, - ElementType, // Net precision - ElementType, // Input precision - ElementType, // Output precision - InputShape, // Input shape - LayerTestsUtils::TargetDevice, // Device name - bool // activation fusing + ov::element::Type, // Model type + InputShape, // Input shape + std::string, // Device name + bool // activation fusing > convLayerTestParamsSet; class ConvolutionLayerGPUTestDynamic : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { convSpecificParams convParams; - ElementType netType; - ElementType inType, outType; + ov::element::Type model_type; InputShape inputShape; std::string targetDevice; bool activationFusing; - std::tie(convParams, netType, inType, outType, inputShape, targetDevice, activationFusing) = obj.param; + std::tie(convParams, model_type, inputShape, targetDevice, activationFusing) = obj.param; - ngraph::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; + ov::op::PadType padType; + std::vector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType) = convParams; @@ -63,9 +57,7 @@ class ConvolutionLayerGPUTestDynamic : public testing::WithParamInterface(results, inputParams, "Convolution"); } else { - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < convolutionNode->get_output_size(); i++) - results.push_back(std::make_shared(convolutionNode->output(i))); + results.push_back(std::make_shared(convolutionNode->output(i))); - function = std::make_shared(results, inputParams, "Convolution"); + function = std::make_shared(results, inputParams, "Convolution"); } } }; -TEST_P(ConvolutionLayerGPUTestDynamic, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(ConvolutionLayerGPUTestDynamic, Inference) { run(); } -namespace { - // ======== 1D convolutions const std::vector dynInputShapes1D = { { @@ -130,27 +119,25 @@ const std::vector dynInputShapes1D = { INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic1DSymPad, ConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3}), - ::testing::Values(SizeVector{1}), + ::testing::Values(std::vector{3}), + ::testing::Values(std::vector{1}), ::testing::Values(std::vector{1}), ::testing::Values(std::vector{1}), - ::testing::Values(SizeVector{1}), + ::testing::Values(std::vector{1}), ::testing::Values(10), - ::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes1D), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(false)), ConvolutionLayerGPUTestDynamic::getTestCaseName); -const std::vector kernels1D = { {3}, {1} }; -const std::vector strides1D = { {1} }; +const std::vector> kernels1D = { {3}, {1} }; +const std::vector> strides1D = { {1} }; const std::vector> padBegins1D = { {0}, {1} }; const std::vector> padEnds1D = { {0}, {1} }; -const std::vector dilations1D = { {1} }; -const SizeVector numOutChannels = { 64, 63 }; +const std::vector> dilations1D = { {1} }; +const std::vector numOutChannels = { 64, 63 }; const std::vector inputShapes1D = { {{}, {{ 2, 64, 7 }}}, {{}, {{ 1, 67, 7 }}}, @@ -181,10 +168,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_ExplicitPad1D, Convolutio ::testing::ValuesIn(padEnds1D), ::testing::ValuesIn(dilations1D), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT)), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::Values(ov::op::PadType::EXPLICIT)), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(inputShapes1D), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(false)), @@ -209,16 +194,14 @@ const std::vector dynInputShapes2D_static_output = { INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2DSymPad, ConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{3, 3}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(std::vector{1, 2}), ::testing::Values(std::vector{1, 2}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(10), - ::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes2D), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(false)), @@ -228,16 +211,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2DSymPad, Convolut INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2DSymAutoPad, ConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{3, 3}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(std::vector{0, 0}), ::testing::Values(std::vector{0, 0}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(10), - ::testing::ValuesIn({ngraph::op::PadType::SAME_LOWER, ngraph::op::PadType::SAME_UPPER})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes2D), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(false)), @@ -247,16 +228,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2DSymAutoPad, Conv INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2D_AsymPad, ConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{3, 3}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(std::vector{1, 2}), ::testing::Values(std::vector{2, 1}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(10), - ::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes2D), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(false)), @@ -266,16 +245,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2D_AsymPad, Convol INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic2D_static_output, ConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3}), - ::testing::Values(SizeVector{2, 2}), + ::testing::Values(std::vector{3, 3}), + ::testing::Values(std::vector{2, 2}), ::testing::Values(std::vector{1, 1}), ::testing::Values(std::vector{1, 1}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(256), - ::testing::Values(ngraph::op::PadType::EXPLICIT)), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), + ::testing::Values(ov::op::PadType::EXPLICIT)), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(dynInputShapes2D_static_output), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(true)), @@ -293,16 +270,14 @@ const std::vector dynInputShapes3D = { INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic3DSymPad, ConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3, 3}), - ::testing::Values(SizeVector{1, 1, 1}), + ::testing::Values(std::vector{3, 3, 3}), + ::testing::Values(std::vector{1, 1, 1}), ::testing::Values(std::vector{1, 2, 1}), ::testing::Values(std::vector{1, 2, 1}), - ::testing::Values(SizeVector{1, 1, 1}), + ::testing::Values(std::vector{1, 1, 1}), ::testing::Values(3), - ::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes3D), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(false)), @@ -312,16 +287,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic3DSymPad, Convolut INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic3DSymAutoPad, ConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3, 3}), - ::testing::Values(SizeVector{1, 1, 1}), + ::testing::Values(std::vector{3, 3, 3}), + ::testing::Values(std::vector{1, 1, 1}), ::testing::Values(std::vector{0, 0, 0}), ::testing::Values(std::vector{0, 0, 0}), - ::testing::Values(SizeVector{1, 1, 1}), + ::testing::Values(std::vector{1, 1, 1}), ::testing::Values(3), - ::testing::ValuesIn({ngraph::op::PadType::SAME_LOWER, ngraph::op::PadType::SAME_UPPER})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes3D), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(false)), @@ -331,20 +304,17 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic3DSymAutoPad, Conv INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_dynamic3DAsymPad, ConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3, 3}), - ::testing::Values(SizeVector{1, 1, 1}), + ::testing::Values(std::vector{3, 3, 3}), + ::testing::Values(std::vector{1, 1, 1}), ::testing::Values(std::vector{1, 2, 1}), ::testing::Values(std::vector{2, 1, 1}), - ::testing::Values(SizeVector{1, 1, 1}), + ::testing::Values(std::vector{1, 1, 1}), ::testing::Values(3), - ::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes3D), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(false)), ConvolutionLayerGPUTestDynamic::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution_backprop_data.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution_backprop_data.cpp index a36426cd84c373..473935bd799840 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution_backprop_data.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution_backprop_data.cpp @@ -2,54 +2,50 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/convolution_backprop_data.hpp" -#include "common_test_utils/test_constants.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include "openvino/core/preprocess/pre_post_process.hpp" - -using namespace InferenceEngine; -using namespace ov::test; +#include "common_test_utils/test_enums.hpp" +#include "common_test_utils/node_builders/convolution_backprop_data.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "shared_test_classes/single_op/convolution_backprop_data.hpp" -namespace GPULayerTestsDefinitions { +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/convolution.hpp" -using DeconvSpecParams = LayerTestsDefinitions::convBackpropDataSpecificParams; +namespace { +using ov::test::InputShape; +using ov::test::convBackpropDataSpecificParams; using DeconvInputData = std::tuple>>; // values for 'output_shape' -using DeconvLayerTestParamsSet = std::tuple>; class DeconvolutionLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - DeconvSpecParams basicParamsSet; + convBackpropDataSpecificParams basicParamsSet; DeconvInputData inputData; - ElementType prec; + ov::element::Type model_type; std::string targetDevice; std::map additionalConfig; - std::tie(basicParamsSet, inputData, prec, targetDevice, additionalConfig) = obj.param; + std::tie(basicParamsSet, inputData, model_type, targetDevice, additionalConfig) = obj.param; - ngraph::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; + ov::op::PadType padType; + std::vector kernel, stride, dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType, outPadding) = basicParamsSet; InputShape inputShape; - ngraph::helpers::InputLayerType outShapeType; + ov::test::utils::InputLayerType outShapeType; std::vector> outShapeData; std::tie(inputShape, outShapeType, outShapeData) = inputData; @@ -62,7 +58,7 @@ class DeconvolutionLayerGPUTest : public testing::WithParamInterface& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { if (function->get_parameters().size() != 1) { // WA: output_shape depends on 3rd deconvolution input data // but the reference implementation doesn't implement shape inference - // so we need to build a new ngraph function and replace the 3rd input parameter with a constant + // so we need to build a new ov function and replace the 3rd input parameter with a constant // to get valid output shapes - functionRefs = createGraph({targetInputStaticShapes[0]}, ngraph::helpers::InputLayerType::CONSTANT); + functionRefs = createGraph({targetInputStaticShapes[0]}, ov::test::utils::InputLayerType::CONSTANT); } inputs.clear(); const auto& funcInputs = function->inputs(); @@ -105,7 +101,11 @@ class DeconvolutionLayerGPUTest : public testing::WithParamInterfaceget_parameters().size() == 2) { - auto pos = std::find_if(inputs.begin(), inputs.end(), - [](const std::pair, ov::Tensor> ¶ms) { - return params.first->get_friendly_name() == "param_1"; - }); - IE_ASSERT(pos != inputs.end()); - inputs.erase(pos); - } - auto expectedOutputs = calculate_refs(); - if (expectedOutputs.empty()) { - return; - } - ASSERT_EQ(actualOutputs.size(), expectedOutputs.size()) - << "nGraph interpreter has " << expectedOutputs.size() << " outputs, while IE " << actualOutputs.size(); - - abs_threshold = 1e-2f; - compare(expectedOutputs, actualOutputs); - } - - void configure_model() override { - ov::preprocess::PrePostProcessor p(function); - { - auto& params = function->get_parameters(); - for (size_t i = 0; i < params.size(); i++) { - if (i > 0) { - continue; - } - if (inType != ov::element::Type_t::undefined) { - p.input(i).tensor().set_element_type(inType); - } - } - } - { - auto results = function->get_results(); - for (size_t i = 0; i < results.size(); i++) { - if (outType != ov::element::Type_t::undefined) { - p.output(i).tensor().set_element_type(outType); - } - } - } - function = p.build(); - } - - std::shared_ptr createGraph(const std::vector& inShapes, ngraph::helpers::InputLayerType outShapeType) { - ov::ParameterVector params{std::make_shared(prec, inShapes.front())}; + std::shared_ptr createGraph(const std::vector& inShapes, ov::test::utils::InputLayerType outShapeType) { + ov::ParameterVector params{std::make_shared(model_type, inShapes.front())}; std::shared_ptr outShapeNode; if (!outShapeData.empty()) { - if (outShapeType == ngraph::helpers::InputLayerType::PARAMETER) { + if (outShapeType == ov::test::utils::InputLayerType::PARAMETER) { IE_ASSERT(inputDynamicShapes.size() == 2); - auto outShapeParam = std::make_shared(ngraph::element::i32, inputDynamicShapes.back()); + auto outShapeParam = std::make_shared(ov::element::i32, inputDynamicShapes.back()); params.push_back(outShapeParam); outShapeNode = outShapeParam; } else { - outShapeNode = ngraph::opset8::Constant::create(ngraph::element::i32, {outShapeData[inferRequestNum].size()}, outShapeData[inferRequestNum]); + outShapeNode = ov::op::v0::Constant::create(ov::element::i32, {outShapeData[inferRequestNum].size()}, outShapeData[inferRequestNum]); } } @@ -179,36 +134,36 @@ class DeconvolutionLayerGPUTest : public testing::WithParamInterface deconv; if (!outShapeData.empty()) { IE_ASSERT(outShapeNode != nullptr); - deconv = ngraph::builder::makeConvolutionBackpropData(params[0], outShapeNode, prec, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels); + deconv = ov::test::utils::make_convolution_backprop_data(params[0], outShapeNode, model_type, kernel, stride, padBegin, + padEnd, dilation, padType, convOutChannels); } else { - deconv = ngraph::builder::makeConvolutionBackpropData(params[0], prec, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels, false, outPadding); + deconv = ov::test::utils::make_convolution_backprop_data(params[0], model_type, kernel, stride, padBegin, + padEnd, dilation, padType, convOutChannels, false, outPadding); } - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < deconv->get_output_size(); i++) - results.push_back(std::make_shared(deconv->output(i))); + results.push_back(std::make_shared(deconv->output(i))); - return std::make_shared(results, params, "Deconv"); + return std::make_shared(results, params, "Deconv"); } protected: void SetUp() override { - DeconvSpecParams basicParamsSet; + convBackpropDataSpecificParams basicParamsSet; DeconvInputData inputData; std::map additionalConfig; - std::tie(basicParamsSet, inputData, prec, targetDevice, additionalConfig) = this->GetParam(); + std::tie(basicParamsSet, inputData, model_type, targetDevice, additionalConfig) = this->GetParam(); InputShape inputShape; - ngraph::helpers::InputLayerType outShapeType; + ov::test::utils::InputLayerType outShapeType; std::tie(inputShape, outShapeType, outShapeData) = inputData; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType, outPadding) = basicParamsSet; std::vector paramsShapes; paramsShapes.push_back(inputShape); - if (!outShapeData.empty() && outShapeType == ngraph::helpers::InputLayerType::PARAMETER) { + if (!outShapeData.empty() && outShapeType == ov::test::utils::InputLayerType::PARAMETER) { const auto outShapeDims = ov::Shape{outShapeData.front().size()}; paramsShapes.push_back(InputShape{outShapeDims, std::vector(inputShape.second.size(), outShapeDims)}); } @@ -219,36 +174,32 @@ class DeconvolutionLayerGPUTest : public testing::WithParamInterface kernel, stride, dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels; std::vector> outShapeData; size_t inferRequestNum = 0; }; -TEST_P(DeconvolutionLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(DeconvolutionLayerGPUTest, Inference) { run(); } -namespace { - std::map emptyAdditionalConfig; const std::vector> emptyOutputPadding = { {} }; /* ============= Deconvolution params ============= */ -const InferenceEngine::SizeVector numOutChannels = { 6 }; +const std::vector numOutChannels = { 6 }; /* ============= Deconvolution params (2D) ============= */ -const std::vector kernels2d = { {3, 3}, {1, 1} }; -const std::vector strides2d = { {1, 1}, {2, 2} }; +const std::vector> kernels2d = { {3, 3}, {1, 1} }; +const std::vector> strides2d = { {1, 1}, {2, 2} }; const std::vector> padBegins2d = { {0, 0} }; const std::vector> padEnds2d = { {0, 0} }; -const std::vector dilations2d = { {1, 1} }; +const std::vector> dilations2d = { {1, 1} }; /* ============= Deconvolution (2D) ============= */ const auto convParams_ExplicitPadding_2D = ::testing::Combine( @@ -258,29 +209,29 @@ const auto convParams_ExplicitPadding_2D = ::testing::Combine( ::testing::ValuesIn(padEnds2d), ::testing::ValuesIn(dilations2d), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::ValuesIn(emptyOutputPadding) ); const std::vector dyn_2D_inputs_smoke = { DeconvInputData{ InputShape{{-1, 12, -1, -1}, {{1, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {} }, DeconvInputData{ InputShape{{-1, 12, -1, -1}, {{2, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 9, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {} }, DeconvInputData{ InputShape{{-1, 12, 7, 7}, {{1, 12, 7, 7}, {2, 12, 7, 7}, {1, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {} }, DeconvInputData{ InputShape{{{1, 10}, 12, 7, 7}, {{1, 12, 7, 7}, {2, 12, 7, 7}, {3, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {} }, }; @@ -289,7 +240,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_Dynamic_FP32, DeconvolutionLayerGPUTest ::testing::Combine( convParams_ExplicitPadding_2D, ::testing::ValuesIn(dyn_2D_inputs_smoke), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(emptyAdditionalConfig)), DeconvolutionLayerGPUTest::getTestCaseName); @@ -297,17 +248,17 @@ INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_Dynamic_FP32, DeconvolutionLayerGPUTest const std::vector dyn_2D_inputs_with_output_shape = { DeconvInputData{ InputShape{{-1, 12, -1, -1}, {{1, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{15, 15}, {9, 10}, {15, 15}} }, DeconvInputData{ InputShape{{-1, 12, 7, 7}, {{1, 12, 7, 7}, {2, 12, 7, 7}, {1, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{15, 15}} }, DeconvInputData{ InputShape{{{1, 10}, 12, 7, 7}, {{1, 12, 7, 7}, {2, 12, 7, 7}, {3, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{15, 15}} }, }; @@ -315,20 +266,18 @@ const std::vector dyn_2D_inputs_with_output_shape = { INSTANTIATE_TEST_SUITE_P(smoke_Deconv_2D_Dynamic_OutputShape_FP32, DeconvolutionLayerGPUTest, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3}), + ::testing::Values(std::vector{3, 3}), ::testing::ValuesIn(strides2d), ::testing::ValuesIn(padBegins2d), ::testing::ValuesIn(padEnds2d), ::testing::ValuesIn(dilations2d), ::testing::ValuesIn(numOutChannels), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::ValuesIn(emptyOutputPadding)), ::testing::ValuesIn(dyn_2D_inputs_with_output_shape), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(emptyAdditionalConfig)), DeconvolutionLayerGPUTest::getTestCaseName); } // namespace - -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/cum_sum.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/cum_sum.cpp index db4bea2f89ee5e..6664c8f5026e2f 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/cum_sum.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/cum_sum.cpp @@ -2,22 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/cum_sum.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" -#include -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/cum_sum.hpp" -using ElementType = ov::element::Type_t; - -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< - ElementType, // data precision + ov::element::Type, // data type InputShape, // input shape std::int64_t, // axis bool, // exclusive @@ -25,15 +22,15 @@ typedef std::tuple< > CumSumLayerGPUParamSet; class CumSumLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - ElementType inputPrecision; + ov::element::Type model_type; InputShape shapes; std::int64_t axis; bool exclusive; bool reverse; - std::tie(inputPrecision, shapes, axis, exclusive, reverse) = obj.param; + std::tie(model_type, shapes, axis, exclusive, reverse) = obj.param; std::ostringstream results; results << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_"; @@ -41,7 +38,7 @@ class CumSumLayerGPUTest : public testing::WithParamInterfaceGetParam(); + std::tie(model_type, shapes, axis, exclusive, reverse) = this->GetParam(); init_input_shapes({shapes}); ov::ParameterVector params; for (auto&& shape : inputDynamicShapes) { - params.push_back(std::make_shared(inputPrecision, shape)); + params.push_back(std::make_shared(model_type, shape)); } - auto axisNode = ngraph::opset1::Constant::create(ngraph::element::i32, ngraph::Shape{}, std::vector{axis})->output(0); - auto cumSum = std::make_shared(params[0], axisNode, exclusive, reverse); + auto axisNode = std::make_shared(ov::element::i32, ov::Shape{}, std::vector{axis}); + auto cumSum = std::make_shared(params[0], axisNode, exclusive, reverse); - auto makeFunction = [](ParameterVector ¶ms, const std::shared_ptr &lastNode) { - ResultVector results; + auto makeFunction = [](ov::ParameterVector ¶ms, const std::shared_ptr &lastNode) { + ov::ResultVector results; for (size_t i = 0; i < lastNode->get_output_size(); i++) - results.push_back(std::make_shared(lastNode->output(i))); + results.push_back(std::make_shared(lastNode->output(i))); - return std::make_shared(results, params, "CumSumLayerGPUTest"); + return std::make_shared(results, params, "CumSumLayerGPUTest"); }; function = makeFunction(params, cumSum); } }; -TEST_P(CumSumLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(CumSumLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector inputPrecision = { - ngraph::element::f32 +const std::vector model_type = { + ov::element::f32 }; const std::vector axes = { 0, 1, 2, 3, 4, 5 }; @@ -117,7 +110,7 @@ const std::vector inShapes = { }; const auto testCasesAxis_0 = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(model_type), ::testing::ValuesIn(inShapes), ::testing::Values(axes[0]), ::testing::ValuesIn(exclusive), @@ -125,7 +118,7 @@ const auto testCasesAxis_0 = ::testing::Combine( ); const auto testCasesAxis_1 = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(model_type), ::testing::ValuesIn(std::vector(inShapes.begin() + 1, inShapes.end())), ::testing::Values(axes[1]), ::testing::ValuesIn(exclusive), @@ -133,7 +126,7 @@ const auto testCasesAxis_1 = ::testing::Combine( ); const auto testCasesAxis_2 = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(model_type), ::testing::ValuesIn(std::vector(inShapes.begin() + 2, inShapes.end())), ::testing::Values(axes[2]), ::testing::ValuesIn(exclusive), @@ -141,7 +134,7 @@ const auto testCasesAxis_2 = ::testing::Combine( ); const auto testCasesAxis_3 = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(model_type), ::testing::ValuesIn(std::vector(inShapes.begin() + 3, inShapes.end())), ::testing::Values(axes[3]), ::testing::ValuesIn(exclusive), @@ -149,7 +142,7 @@ const auto testCasesAxis_3 = ::testing::Combine( ); const auto testCasesAxis_4 = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(model_type), ::testing::ValuesIn(std::vector(inShapes.begin() + 4, inShapes.end())), ::testing::Values(axes[4]), ::testing::ValuesIn(exclusive), @@ -157,7 +150,7 @@ const auto testCasesAxis_4 = ::testing::Combine( ); const auto testCasesAxis_5 = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(model_type), ::testing::ValuesIn(std::vector(inShapes.begin() + 5, inShapes.end())), ::testing::Values(axes[5]), ::testing::ValuesIn(exclusive), @@ -165,7 +158,7 @@ const auto testCasesAxis_5 = ::testing::Combine( ); const auto testCasesAxis_negative = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(model_type), ::testing::ValuesIn(std::vector(inShapes.begin() + 5, inShapes.end())), ::testing::ValuesIn(negativeAxes), ::testing::ValuesIn(exclusive), @@ -181,5 +174,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_cum_sum_axis_5_CompareWithRefs_dynamic, CumSumLay INSTANTIATE_TEST_SUITE_P(smoke_cum_sum_neg_axes_CompareWithRefs_dynamic, CumSumLayerGPUTest, testCasesAxis_negative, CumSumLayerGPUTest::getTestCaseName); } // namespace - -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/depth_to_space.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/depth_to_space.cpp index 3f0ea75534ebd1..b291aeaff1c57c 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/depth_to_space.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/depth_to_space.cpp @@ -2,22 +2,21 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/depth_to_space.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph::opset3; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/depth_to_space.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; +using ov::op::v0::DepthToSpace; typedef std::tuple< InputShape, // Input shape - ElementType, // Input element type + ov::element::Type, // Input element type DepthToSpace::DepthToSpaceMode, // Mode std::size_t // Block size > DepthToSpaceLayerGPUTestParams; @@ -27,7 +26,7 @@ class DepthToSpaceLayerGPUTest : public testing::WithParamInterface obj) { InputShape shapes; - ElementType inType; + ov::element::Type inType; DepthToSpace::DepthToSpaceMode mode; std::size_t blockSize; std::tie(shapes, inType, mode, blockSize) = obj.param; @@ -40,10 +39,10 @@ class DepthToSpaceLayerGPUTest : public testing::WithParamInterfaceGetParam(); @@ -70,30 +69,26 @@ class DepthToSpaceLayerGPUTest : public testing::WithParamInterface(params[0], mode, blockSize); - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < d2s->get_output_size(); i++) - results.push_back(std::make_shared(d2s->output(i))); - function = std::make_shared(results, params, "DepthToSpace"); + results.push_back(std::make_shared(d2s->output(i))); + function = std::make_shared(results, params, "DepthToSpace"); } }; -TEST_P(DepthToSpaceLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(DepthToSpaceLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector inputElementType = { - ElementType::f32, - ElementType::f16, - ElementType::i8 +const std::vector input_types = { + ov::element::f32, + ov::element::f16, + ov::element::i8 }; -const std::vector depthToSpaceModes = { - DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, - DepthToSpace::DepthToSpaceMode::DEPTH_FIRST +const std::vector depthToSpaceModes = { + ov::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, + ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST }; // ======================== Static Shapes Tests ======================== @@ -120,16 +115,16 @@ const std::vector inputShapesBS3_4D = { INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceStaticBS2_4D, DepthToSpaceLayerGPUTest, testing::Combine( - testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS2_4D)), - testing::ValuesIn(inputElementType), + testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapesBS2_4D)), + testing::ValuesIn(input_types), testing::ValuesIn(depthToSpaceModes), testing::Values(1, 2)), DepthToSpaceLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceStaticBS3_4D, DepthToSpaceLayerGPUTest, testing::Combine( - testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS3_4D)), - testing::ValuesIn(inputElementType), + testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapesBS3_4D)), + testing::ValuesIn(input_types), testing::ValuesIn(depthToSpaceModes), testing::Values(1, 3)), DepthToSpaceLayerGPUTest::getTestCaseName); @@ -153,16 +148,16 @@ const std::vector inputShapesBS3_5D = { INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceStaticBS2_5D, DepthToSpaceLayerGPUTest, testing::Combine( - testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS2_5D)), - testing::ValuesIn(inputElementType), + testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapesBS2_5D)), + testing::ValuesIn(input_types), testing::ValuesIn(depthToSpaceModes), testing::Values(1, 2)), DepthToSpaceLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceStaticBS3_5D, DepthToSpaceLayerGPUTest, testing::Combine( - testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS3_5D)), - testing::ValuesIn(inputElementType), + testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapesBS3_5D)), + testing::ValuesIn(input_types), testing::ValuesIn(depthToSpaceModes), testing::Values(1, 3)), DepthToSpaceLayerGPUTest::getTestCaseName); @@ -171,8 +166,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceStaticBS3_5D, DepthToSpaceLayerGPU //======================== Dynamic Shapes Tests ======================== -namespace dynamic_shapes { - const std::vector inputShapes4D = { {{-1, -1, -1, -1}, // dynamic {{2, 36, 1, 1}, {1, 36, 3, 1}, {2, 36, 1, 1}, {1, 36, 3, 1}}}, // target @@ -198,7 +191,7 @@ const std::vector inputShapes5D = { INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceDynamic4D, DepthToSpaceLayerGPUTest, testing::Combine( testing::ValuesIn(inputShapes4D), - testing::ValuesIn(inputElementType), + testing::ValuesIn(input_types), testing::ValuesIn(depthToSpaceModes), testing::Values(1, 2, 3)), DepthToSpaceLayerGPUTest::getTestCaseName); @@ -206,12 +199,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceDynamic4D, DepthToSpaceLayerGPUTes INSTANTIATE_TEST_SUITE_P(smoke_GPUDepthToSpaceDynamic5D, DepthToSpaceLayerGPUTest, testing::Combine( testing::ValuesIn(inputShapes5D), - testing::ValuesIn(inputElementType), + testing::ValuesIn(input_types), testing::ValuesIn(depthToSpaceModes), testing::Values(1, 2, 3)), DepthToSpaceLayerGPUTest::getTestCaseName); -} // namespace dynamic_shapes - } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp index ab76700486d423..9df68f3af86b72 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp @@ -2,18 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/detection_output.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/detection_output.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; enum { idxLocation, @@ -65,7 +63,7 @@ class DetectionOutputLayerGPUTest : public testing::WithParamInterface obj) { DetectionOutputAttributes commonAttrs; ParamsWhichSizeDependsDynamic specificAttrs; - ngraph::op::DetectionOutputAttrs attrs; + ov::op::v0::DetectionOutput::Attributes attrs; size_t batch; bool replaceDynamicShapesToIntervals; std::string targetDevice; @@ -101,14 +99,30 @@ class DetectionOutputLayerGPUTest : public testing::WithParamInterface(detOut)}; - function = std::make_shared(results, params, "DetectionOutputDynamic"); + ov::ResultVector results{std::make_shared(detOut)}; + function = std::make_shared(results, params, "DetectionOutputDynamic"); } else { std::shared_ptr detOut; if (params.size() == 3) @@ -220,8 +238,8 @@ class DetectionOutputLayerGPUTest : public testing::WithParamInterface(detOut)}; - function = std::make_shared(results, params, "DetectionOutputDynamic"); + ov::ResultVector results{std::make_shared(detOut)}; + function = std::make_shared(results, params, "DetectionOutputDynamic"); } } @@ -253,18 +271,14 @@ class DetectionOutputLayerGPUTest : public testing::WithParamInterface inShapes; }; -TEST_P(DetectionOutputLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(DetectionOutputLayerGPUTest, Inference) { run(); } -namespace { - const std::vector numClasses = {11, -1}; const int backgroundLabelId = 0; const std::vector topK = {75}; @@ -438,4 +452,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_GPUDetectionOutputV8Dynamic3In, DetectionOutputLa params3InputsDynamic_v8, DetectionOutputLayerGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather.cpp index bdda5ccbe2a947..1f32a7356a47aa 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather.cpp @@ -2,18 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/gather.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/gather.hpp" + +namespace { +using ov::test::InputShape; -namespace GPULayerTestsDefinitions { struct GatherShapeParams { InputShape inputShapes; InputShape targetShapes; @@ -23,22 +23,21 @@ struct GatherShapeParams { typedef std::tuple< GatherShapeParams, - ElementType, // Network precision + ov::element::Type, // Network precision bool, // Is const Indices bool // Is const Axis > GatherGPUTestParams; - class GatherGPUTest : public testing::WithParamInterface, - virtual public ov::test::SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { GatherShapeParams Shapes; - ElementType netPrecision; + ov::element::Type model_type; bool isIndicesConstant; bool isAxisConstant; - std::tie(Shapes, netPrecision, isIndicesConstant, isAxisConstant) = obj.param; + std::tie(Shapes, model_type, isIndicesConstant, isAxisConstant) = obj.param; std::ostringstream result; result << "IS=("; @@ -57,7 +56,7 @@ class GatherGPUTest : public testing::WithParamInterface, } result << "axis=" << Shapes.axis << "_"; result << "batchDims=" << Shapes.batch_dims << "_"; - result << "netPrc=" << netPrecision << "_"; + result << "netPrc=" << model_type << "_"; result << "constIdx=" << (isIndicesConstant ? "True" : "False") << "_"; result << "constAx=" << (isAxisConstant ? "True" : "False") << "_"; @@ -67,12 +66,12 @@ class GatherGPUTest : public testing::WithParamInterface, protected: void SetUp() override { GatherShapeParams Shapes; - ElementType netPrecision; + ov::element::Type model_type; bool isAxisConstant; bool isIndicesConstant; - const ElementType intInputsPrecision = ElementType::i32; + const auto int_model_type = ov::element::i32; - std::tie(Shapes, netPrecision, isIndicesConstant, isAxisConstant) = this->GetParam(); + std::tie(Shapes, model_type, isIndicesConstant, isAxisConstant) = this->GetParam(); const int axis = Shapes.axis; const int batchDims = Shapes.batch_dims; targetDevice = ov::test::utils::DEVICE_GPU; @@ -86,7 +85,7 @@ class GatherGPUTest : public testing::WithParamInterface, init_input_shapes({Shapes.inputShapes, Shapes.targetShapes}); } - ngraph::ParameterVector params{std::make_shared(netPrecision, inputDynamicShapes[0])}; + ov::ParameterVector params{std::make_shared(model_type, inputDynamicShapes[0])}; params.back()->set_friendly_name("data"); if (isIndicesConstant) { @@ -96,26 +95,22 @@ class GatherGPUTest : public testing::WithParamInterface, for (size_t i = 0; i < Shapes.inputShapes.second.size(); ++i) { idx_range = std::min(static_cast(Shapes.inputShapes.second[i][axis_norm]), idx_range); } - indicesNode = ngraph::builder::makeConstant( - ngraph::element::i64, - Shapes.targetShapes.second[0], - {}, - true, - idx_range - 1, - 0); + + auto indices_tensor = ov::test::utils::create_and_fill_tensor(ov::element::i64, Shapes.targetShapes.second[0], idx_range - 1, 0); + indicesNode = std::make_shared(indices_tensor); } else { - params.push_back(std::make_shared(intInputsPrecision, inputDynamicShapes[1])); + params.push_back(std::make_shared(int_model_type, inputDynamicShapes[1])); params.back()->set_friendly_name("indices"); } if (isAxisConstant) { - axisNode = ngraph::builder::makeConstant(intInputsPrecision, ov::Shape({1}), {axis}); + axisNode = std::make_shared(int_model_type, ov::Shape({1}), std::vector{axis}); } else { inputDynamicShapes.push_back({1}); for (size_t i = 0lu; i < targetStaticShapes.size(); i++) { targetStaticShapes[i].push_back({1}); } - params.push_back(std::make_shared(intInputsPrecision, inputDynamicShapes[2])); + params.push_back(std::make_shared(int_model_type, inputDynamicShapes[2])); params.back()->set_friendly_name("axis"); } @@ -125,20 +120,16 @@ class GatherGPUTest : public testing::WithParamInterface, : isIndicesConstant ? params[1] : params[2], batchDims); - ngraph::ResultVector results{std::make_shared(gatherNode)}; - function = std::make_shared(results, params, "Gather"); + ov::ResultVector results{std::make_shared(gatherNode)}; + function = std::make_shared(results, params, "Gather"); } }; -TEST_P(GatherGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(GatherGPUTest, Inference) { run(); } -namespace { - -const std::vector netPrecisions = { +const std::vector model_types = { ov::element::f32, ov::element::i32, ov::element::i64, @@ -201,9 +192,8 @@ const std::vector dynamicInputShapeConstTargetShape = { INSTANTIATE_TEST_SUITE_P(smoke_dynamic_input_shapes_const_target_shapes, GatherGPUTest, ::testing::Combine( ::testing::ValuesIn(dynamicInputShapeConstTargetShape), // input shapes - ::testing::ValuesIn(netPrecisions), // network precision - ::testing::Values(true), // is const indices - ::testing::Values(true)), // is const axis + ::testing::ValuesIn(model_types), // network precision + ::testing::Values(true), // is const indices + ::testing::Values(true)), // is const axis GatherGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_elements.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_elements.cpp index 680474953110ca..804f6d42a6db5e 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_elements.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_elements.cpp @@ -2,36 +2,33 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ov::test; -using namespace ngraph; -using namespace InferenceEngine; -using namespace ngraph::helpers; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/gather_elements.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; using GatherElementsParams = std::tuple< std::vector, // Dynamic shape + Target static shapes int, // Axis - ElementType, // Data precision - ElementType, // Indices precision - TargetDevice // Device name ->; + ov::element::Type, // Data type + ov::element::Type, // Indices type + std::string>; // Device name class GatherElementsGPUTest : public testing::WithParamInterface, virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { std::vector shapes; - ElementType dPrecision, iPrecision; + ov::element::Type data_type, indices_type; int axis; std::string device; - std::tie(shapes, axis, dPrecision, iPrecision, device) = obj.param; + std::tie(shapes, axis, data_type, indices_type, device) = obj.param; std::ostringstream result; result << "IS=("; @@ -45,21 +42,25 @@ class GatherElementsGPUTest : public testing::WithParamInterface(gather)}; + function = std::make_shared(results, params, "GatherElements"); } }; -TEST_P(GatherElementsGPUTest, CompareWithRefs) { +TEST_P(GatherElementsGPUTest, Inference) { run(); } -namespace { - const std::vector> inDynamicShapeParams = { {{{-1, -1, -1, -1}, {{2, 3, 5, 7}, {3, 4, 6, 8}}}, {{-1, -1, -1, -1}, {{2, 3, 9, 7}, {3, 4, 4, 8}}}}, @@ -102,10 +101,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_set1, GatherElementsGPUTest, ::testing::Combine( ::testing::ValuesIn(inDynamicShapeParams), // shape ::testing::ValuesIn(std::vector({2, -2})), // Axis - ::testing::ValuesIn(std::vector({ElementType::f16, ElementType::f32})), - ::testing::Values(ElementType::i32), + ::testing::ValuesIn(std::vector({ov::element::f16, ov::element::f32})), + ::testing::Values(ov::element::i32), ::testing::Values(ov::test::utils::DEVICE_GPU)), GatherElementsGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_nd.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_nd.cpp index f3d98ff82decd8..09fa75d5a5485c 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_nd.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_nd.cpp @@ -3,17 +3,18 @@ // #include "shared_test_classes/single_layer/gather_nd.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/gather_nd.hpp" + +namespace { +using ov::test::InputShape; -namespace GPULayerTestsDefinitions { struct GatherNDShapeParams { InputShape inputShapes; InputShape targetShapes; @@ -22,20 +23,20 @@ struct GatherNDShapeParams { typedef std::tuple< GatherNDShapeParams, - ElementType, // Network precision - bool // Is const Indices + ov::element::Type, // Model type + bool // Is const Indices > GatherNDGPUTestParams; class GatherNDGPUTest : public testing::WithParamInterface, - virtual public ov::test::SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { GatherNDShapeParams Shapes; - ElementType netPrecision; + ov::element::Type model_type; bool isIndicesConstant; - std::tie(Shapes, netPrecision, isIndicesConstant) = obj.param; + std::tie(Shapes, model_type, isIndicesConstant) = obj.param; std::ostringstream result; result << "IS=("; @@ -53,7 +54,7 @@ class GatherNDGPUTest : public testing::WithParamInterfaceGetParam(); + std::tie(Shapes, model_type, isIndicesConstant) = this->GetParam(); const int batchDims = Shapes.batch_dims; targetDevice = ov::test::utils::DEVICE_GPU; std::shared_ptr indicesNode; @@ -78,7 +79,7 @@ class GatherNDGPUTest : public testing::WithParamInterface(netPrecision, inputDynamicShapes[0])}; + ov::ParameterVector params{std::make_shared(model_type, inputDynamicShapes[0])}; params.back()->set_friendly_name("data"); if (isIndicesConstant) { @@ -88,13 +89,8 @@ class GatherNDGPUTest : public testing::WithParamInterface(Shapes.inputShapes.second[i][j]), idx_range); } } - indicesNode = ngraph::builder::makeConstant( - ngraph::element::i64, - Shapes.targetShapes.second[0], - {}, - true, - idx_range - 1, - 0); + auto indices_tensor = ov::test::utils::create_and_fill_tensor(ov::element::i64, Shapes.targetShapes.second[0], idx_range - 1, 0); + indicesNode = std::make_shared(indices_tensor); } else { params.push_back(std::make_shared(intInputsPrecision, inputDynamicShapes[1])); params.back()->set_friendly_name("indices"); @@ -103,20 +99,16 @@ class GatherNDGPUTest : public testing::WithParamInterface(params[0], isIndicesConstant ? indicesNode : params[1], batchDims); - ngraph::ResultVector results{std::make_shared(gather_ndNode)}; - function = std::make_shared(results, params, "GatherND"); + ov::ResultVector results{std::make_shared(gather_ndNode)}; + function = std::make_shared(results, params, "GatherND"); } }; -TEST_P(GatherNDGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(GatherNDGPUTest, Inference) { run(); } -namespace { - -const std::vector netPrecisions = { +const std::vector model_types = { ov::element::f32, ov::element::f16, ov::element::i32 @@ -158,8 +150,7 @@ const std::vector dynamicInputShapeConstTargetShape = { INSTANTIATE_TEST_SUITE_P(smoke_dynamic_input_shapes_const_target_shapes, GatherNDGPUTest, ::testing::Combine( ::testing::ValuesIn(dynamicInputShapeConstTargetShape), // input shapes - ::testing::ValuesIn(netPrecisions), // network precision + ::testing::ValuesIn(model_types), // network precision ::testing::Values(true)), // is const indices GatherNDGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_tree.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_tree.cpp index 2b48e5f4a5c754..ffedacd6bae4b1 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_tree.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/gather_tree.cpp @@ -2,36 +2,35 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/gather_tree.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/gather_tree.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< InputShape, // Input tensors shape - ngraph::helpers::InputLayerType, // Secondary input type - ov::element::Type_t, // Network precision + ov::test::utils::InputLayerType, // Secondary input type + ov::element::Type, // Model type std::string // Device name > GatherTreeGPUTestParams; class GatherTreeLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo &obj) { InputShape inputShape; - ov::element::Type_t netPrecision; - ngraph::helpers::InputLayerType secondaryInputType; + ov::element::Type_t model_type; + ov::test::utils::InputLayerType secondaryInputType; std::string targetName; - std::tie(inputShape, secondaryInputType, netPrecision, targetName) = obj.param; + std::tie(inputShape, secondaryInputType, model_type, targetName) = obj.param; std::ostringstream result; result << "IS=" << ov::test::utils::partialShape2str({inputShape.first}) << "_"; @@ -40,7 +39,7 @@ class GatherTreeLayerGPUTest : public testing::WithParamInterface(paramsIn.front(), inp2, inp3, inp4); + auto operationResult = std::make_shared(paramsIn.front(), inp2, inp3, inp4); - ngraph::ResultVector results{std::make_shared(operationResult)}; - function = std::make_shared(results, paramsIn, "GatherTree"); + ov::ResultVector results{std::make_shared(operationResult)}; + function = std::make_shared(results, paramsIn, "GatherTree"); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto maxBeamIndex = targetInputStaticShapes.front().at(2) - 1; const auto& funcInputs = function->inputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { + ov::test::utils::InputGenerateData in_data; + in_data.start_from = (i == 2 || i == 3) ? maxBeamIndex / 2 : 0; + in_data.range = maxBeamIndex; auto tensor = - ov::test::utils::create_and_fill_tensor(funcInputs[i].get_element_type(), - targetInputStaticShapes[i], - maxBeamIndex, - (i == 2 || i == 3) ? maxBeamIndex / 2 : 0); + ov::test::utils::create_and_fill_tensor(funcInputs[i].get_element_type(), targetInputStaticShapes[i], in_data); inputs.insert({funcInputs[i].get_node_shared_ptr(), tensor}); } } }; -TEST_P(GatherTreeLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(GatherTreeLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector netPrecisions = { +const std::vector model_types = { ov::element::f32, ov::element::i32 }; @@ -167,19 +165,17 @@ const std::vector inputDynamicShapesConstant = { INSTANTIATE_TEST_SUITE_P(smoke_gathertree_parameter_compareWithRefs_dynamic, GatherTreeLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(inputDynamicShapesParameter), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), - ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_GPU)), GatherTreeLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_gathertree_constant_compareWithRefs_dynamic, GatherTreeLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(inputDynamicShapesConstant), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_GPU)), GatherTreeLayerGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions - diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/grid_sample.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/grid_sample.cpp index 11862e3d42cbb2..8c432154f730b3 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/grid_sample.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/grid_sample.cpp @@ -2,38 +2,37 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/select.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_enums.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" -#include -#include - -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; -using ov::op::v9::GridSample; -namespace GPULayerTestsDefinitions { +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/grid_sample.hpp" + +namespace { +using ov::test::InputShape; +using ov::op::v9::GridSample; typedef std::tuple< std::vector, // Input shapes GridSample::InterpolationMode, // Interpolation mode GridSample::PaddingMode, // Padding mode bool, // Align corners - ElementType, // Data precision - ElementType // Grid precision + ov::element::Type, // Data precision + ov::element::Type // Grid precision > GridSampleLayerTestGPUParams; class GridSampleLayerTestGPU : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { std::vector inputShapes; GridSample::InterpolationMode interpolateMode; GridSample::PaddingMode paddingMode; bool alignCorners; - ElementType dataPrecision, gridPrecision; + ov::element::Type dataPrecision, gridPrecision; std::tie(inputShapes, interpolateMode, paddingMode, alignCorners, dataPrecision, gridPrecision) = obj.param; @@ -69,7 +68,7 @@ class GridSampleLayerTestGPU : public testing::WithParamInterfaceGetParam(); targetDevice = ov::test::utils::DEVICE_GPU; @@ -86,12 +85,12 @@ class GridSampleLayerTestGPU : public testing::WithParamInterface(params[0], params[1], attributes); - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < gridSampleNode->get_output_size(); i++) { - results.push_back(std::make_shared(gridSampleNode->output(i))); + results.push_back(std::make_shared(gridSampleNode->output(i))); } - function = std::make_shared(results, params, "GridSampleGPU"); + function = std::make_shared(results, params, "GridSampleGPU"); } void generate_inputs(const std::vector& targetInputStaticShapes) override { @@ -101,24 +100,26 @@ class GridSampleLayerTestGPU : public testing::WithParamInterfaceget_friendly_name() == "data") { int32_t range = std::accumulate(targetInputStaticShapes[0].begin(), targetInputStaticShapes[0].end(), 1u, std::multiplies()); - tensor = utils::create_and_fill_tensor( - funcInput.get_element_type(), targetInputStaticShapes[0], range, -range / 2, 1); + in_data.start_from = -range / 2; + in_data.range = range; + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[0], in_data); } else if (funcInput.get_node()->get_friendly_name() == "grid") { int32_t range = std::max(targetInputStaticShapes[0][2], targetInputStaticShapes[0][3]) + 2; - int32_t resolution = range / 2; - tensor = utils::create_and_fill_tensor( - funcInput.get_element_type(), targetInputStaticShapes[1], range, -1, resolution == 0 ? 1 : resolution); + in_data.start_from = -1; + in_data.range = range; + in_data.resolution = range / 2; + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[1], in_data); } inputs.insert({funcInput.get_node_shared_ptr(), tensor}); } } }; -TEST_P(GridSampleLayerTestGPU, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(GridSampleLayerTestGPU, Inference) { run(); } @@ -152,8 +153,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_dynamic, GridSampleLayerTestGPU, ::testing::ValuesIn(interpolateMode), ::testing::ValuesIn(paddingMode), ::testing::ValuesIn(alignCorners), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::f32)), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::f32)), GridSampleLayerTestGPU::getTestCaseName); - -} // namespace GPULayerTestsDefinitions +} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp index 531c97fa218a16..8a93e4b89b12d2 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp @@ -2,54 +2,50 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/group_convolution_backprop_data.hpp" -#include "common_test_utils/test_constants.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include "openvino/core/preprocess/pre_post_process.hpp" - -using namespace InferenceEngine; -using namespace ov::test; +#include "common_test_utils/test_enums.hpp" +#include "common_test_utils/node_builders/group_convolution_backprop_data.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "shared_test_classes/single_op/group_convolution_backprop_data.hpp" -namespace GPULayerTestsDefinitions { +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/group_conv.hpp" -using GroupDeconvSpecParams = LayerTestsDefinitions::groupConvBackpropSpecificParams; +namespace { +using ov::test::InputShape; +using ov::test::groupConvBackpropSpecificParams; using DeconvInputData = std::tuple>>; // values for 'output_shape' -using GroupDeconvLayerTestParamsSet = std::tuple>; class GroupDeconvolutionLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - GroupDeconvSpecParams basicParamsSet; + groupConvBackpropSpecificParams basicParamsSet; DeconvInputData inputData; - ElementType prec; + ov::element::Type prec; std::string targetDevice; std::map additionalConfig; std::tie(basicParamsSet, inputData, prec, targetDevice, additionalConfig) = obj.param; - ngraph::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; + ov::op::PadType padType; + std::vector kernel, stride, dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels, groupNum; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, groupNum, padType, outPadding) = basicParamsSet; InputShape inputShape; - ngraph::helpers::InputLayerType outShapeType; + ov::test::utils::InputLayerType outShapeType; std::vector> outShapeData; std::tie(inputShape, outShapeType, outShapeData) = inputData; @@ -89,13 +85,13 @@ class GroupDeconvolutionLayerGPUTest : public testing::WithParamInterface& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { if (function->get_parameters().size() != 1) { // WA: output_shape depends on 3rd deconvolution input data // but the reference implementation doesn't implement shape inference - // so we need to build a new ngraph function and replace the 3rd input parameter with a constant + // so we need to build a new ov function and replace the 3rd input parameter with a constant // to get valid output shapes - functionRefs = createGraph({targetInputStaticShapes[0]}, ngraph::helpers::InputLayerType::CONSTANT); + functionRefs = createGraph({targetInputStaticShapes[0]}, ov::test::utils::InputLayerType::CONSTANT); } inputs.clear(); const auto& funcInputs = function->inputs(); @@ -106,7 +102,11 @@ class GroupDeconvolutionLayerGPUTest : public testing::WithParamInterface createGraph(const std::vector& inShapes, ngraph::helpers::InputLayerType outShapeType) { + std::shared_ptr createGraph(const std::vector& inShapes, ov::test::utils::InputLayerType outShapeType) { ov::ParameterVector params{std::make_shared(prec, inShapes.front())}; std::shared_ptr outShapeNode; if (!outShapeData.empty()) { - if (outShapeType == ngraph::helpers::InputLayerType::PARAMETER) { + if (outShapeType == ov::test::utils::InputLayerType::PARAMETER) { IE_ASSERT(inputDynamicShapes.size() == 2); - auto outShapeParam = std::make_shared(ngraph::element::i32, inputDynamicShapes.back()); + auto outShapeParam = std::make_shared(ov::element::i32, inputDynamicShapes.back()); params.push_back(outShapeParam); outShapeNode = outShapeParam; } else { - outShapeNode = ngraph::opset8::Constant::create(ngraph::element::i32, {outShapeData[inferRequestNum].size()}, outShapeData[inferRequestNum]); + outShapeNode = ov::op::v0::Constant::create(ov::element::i32, {outShapeData[inferRequestNum].size()}, outShapeData[inferRequestNum]); } } @@ -180,36 +180,36 @@ class GroupDeconvolutionLayerGPUTest : public testing::WithParamInterface deconv; if (!outShapeData.empty()) { IE_ASSERT(outShapeNode != nullptr); - deconv = ngraph::builder::makeGroupConvolutionBackpropData(params[0], outShapeNode, prec, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels, groupNum); + deconv = ov::test::utils::make_group_convolution_backprop_data(params[0], outShapeNode, prec, kernel, stride, padBegin, + padEnd, dilation, padType, convOutChannels, groupNum); } else { - deconv = ngraph::builder::makeGroupConvolutionBackpropData(params[0], prec, kernel, stride, padBegin, - padEnd, dilation, padType, convOutChannels, groupNum, false, outPadding); + deconv = ov::test::utils::make_group_convolution_backprop_data(params[0], prec, kernel, stride, padBegin, + padEnd, dilation, padType, convOutChannels, groupNum, false, outPadding); } - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < deconv->get_output_size(); i++) - results.push_back(std::make_shared(deconv->output(i))); + results.push_back(std::make_shared(deconv->output(i))); - return std::make_shared(results, params, "GroupDeconv"); + return std::make_shared(results, params, "GroupDeconv"); } protected: void SetUp() override { - GroupDeconvSpecParams basicParamsSet; + groupConvBackpropSpecificParams basicParamsSet; DeconvInputData inputData; std::map additionalConfig; std::tie(basicParamsSet, inputData, prec, targetDevice, additionalConfig) = this->GetParam(); InputShape inputShape; - ngraph::helpers::InputLayerType outShapeType; + ov::test::utils::InputLayerType outShapeType; std::tie(inputShape, outShapeType, outShapeData) = inputData; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, groupNum, padType, outPadding) = basicParamsSet; std::vector paramsShapes; paramsShapes.push_back(inputShape); - if (!outShapeData.empty() && outShapeType == ngraph::helpers::InputLayerType::PARAMETER) { + if (!outShapeData.empty() && outShapeType == ov::test::utils::InputLayerType::PARAMETER) { const auto outShapeDims = ov::Shape{outShapeData.front().size()}; paramsShapes.push_back(InputShape{outShapeDims, std::vector(inputShape.second.size(), outShapeDims)}); } @@ -220,38 +220,34 @@ class GroupDeconvolutionLayerGPUTest : public testing::WithParamInterface kernel, stride, dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels, groupNum; std::vector> outShapeData; size_t inferRequestNum = 0; }; -TEST_P(GroupDeconvolutionLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(GroupDeconvolutionLayerGPUTest, Inference) { run(); } -namespace { - std::map emptyAdditionalConfig; const std::vector> emptyOutputShape = {{}}; const std::vector> emptyOutputPadding = {{}}; /* ============= GroupConvolution params ============= */ -const InferenceEngine::SizeVector numOutChannels = {6}; -const InferenceEngine::SizeVector numGroups = {2, 3}; +const std::vector numOutChannels = {6}; +const std::vector numGroups = {2, 3}; /* ============= GroupConvolution params (2D) ============= */ -const std::vector kernels2d = {{3, 3}, {1, 1}}; -const std::vector strides2d = {{1, 1}, {2, 2}}; +const std::vector> kernels2d = {{3, 3}, {1, 1}}; +const std::vector> strides2d = {{1, 1}, {2, 2}}; const std::vector> padBegins2d = {{0, 0}}; const std::vector> padEnds2d = {{0, 0}}; -const std::vector dilations2d = {{1, 1}}; +const std::vector> dilations2d = {{1, 1}}; /* ============= GroupConvolution (2D) ============= */ const auto groupConvParams_ExplicitPadding_2D = ::testing::Combine( @@ -262,29 +258,29 @@ const auto groupConvParams_ExplicitPadding_2D = ::testing::Combine( ::testing::ValuesIn(dilations2d), ::testing::ValuesIn(numOutChannels), ::testing::ValuesIn(numGroups), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::ValuesIn(emptyOutputPadding) ); const std::vector dyn_2D_inputs_smoke = { DeconvInputData{ InputShape{{-1, 12, -1, -1}, {{1, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {} }, DeconvInputData{ InputShape{{-1, 12, -1, -1}, {{2, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 9, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {} }, DeconvInputData{ InputShape{{-1, 12, -1, -1}, {{2, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 9, 4}, {2, 12, 5, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {} }, DeconvInputData{ InputShape{{{1, 10}, 12, 7, 7}, {{1, 12, 7, 7}, {3, 12, 7, 7}, {2, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {} } }; @@ -293,7 +289,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_Dynamic_FP32, GroupDeconvolutionLa ::testing::Combine( groupConvParams_ExplicitPadding_2D, ::testing::ValuesIn(dyn_2D_inputs_smoke), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(emptyAdditionalConfig)), GroupDeconvolutionLayerGPUTest::getTestCaseName); @@ -301,17 +297,17 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_Dynamic_FP32, GroupDeconvolutionLa const std::vector dyn_2D_inputs_with_output_shape = { DeconvInputData{ InputShape{{-1, 12, -1, -1}, {{1, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{15, 15}, {9, 10}, {15, 15}} }, DeconvInputData{ InputShape{{-1, 12, -1, -1}, {{2, 12, 7, 7}, {2, 12, 5, 7}, {1, 12, 9, 4}, {2, 12, 5, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{15, 15}} }, DeconvInputData{ InputShape{{{1, 10}, 12, 7, 7}, {{1, 12, 7, 7}, {3, 12, 7, 7}, {2, 12, 7, 7}}}, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{15, 15}} } }; @@ -319,21 +315,19 @@ const std::vector dyn_2D_inputs_with_output_shape = { INSTANTIATE_TEST_SUITE_P(smoke_GroupDeconv_2D_Dynamic_OutputShape_FP32, GroupDeconvolutionLayerGPUTest, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3}), + ::testing::Values(std::vector{3, 3}), ::testing::ValuesIn(strides2d), ::testing::ValuesIn(padBegins2d), ::testing::ValuesIn(padEnds2d), ::testing::ValuesIn(dilations2d), ::testing::ValuesIn(numOutChannels), ::testing::ValuesIn(numGroups), - ::testing::Values(ngraph::op::PadType::EXPLICIT), + ::testing::Values(ov::op::PadType::EXPLICIT), ::testing::ValuesIn(emptyOutputPadding)), ::testing::ValuesIn(dyn_2D_inputs_with_output_shape), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(emptyAdditionalConfig)), GroupDeconvolutionLayerGPUTest::getTestCaseName); } // namespace - -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/groupconvolution.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/groupconvolution.cpp index 01fad8fc9f6817..892be327e07f23 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/groupconvolution.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/groupconvolution.cpp @@ -1,46 +1,39 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/node_builders/group_convolution.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/group_convolution.hpp" -#include "common_test_utils/test_constants.hpp" +#include "shared_test_classes/single_op/group_convolution.hpp" -// using namespace LayerTestsDefinitions; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/group_conv.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; +using ov::test::groupConvSpecificParams; -using LayerTestsDefinitions::groupConvSpecificParams; typedef std::tuple< groupConvSpecificParams, - ElementType, // Net precision - ElementType, // Input precision - ElementType, // Output precision - InputShape, // Input shape - LayerTestsUtils::TargetDevice // Device name + ov::element::Type, // Model type + InputShape, // Input shape + std::string // Device name > groupConvLayerTestParamsSet; - class GroupConvolutionLayerGPUTestDynamic : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { groupConvSpecificParams groupConvParams; - ElementType netType; - ElementType inType, outType; + ov::element::Type model_type; InputShape inputShape; std::string targetDevice; - std::tie(groupConvParams, netType, inType, outType, inputShape, targetDevice) = obj.param; + std::tie(groupConvParams, model_type, inputShape, targetDevice) = obj.param; - ngraph::op::PadType padType; - InferenceEngine::SizeVector kernel, stride, dilation; + ov::op::PadType padType; + std::vector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels; size_t numGroups; @@ -62,9 +55,7 @@ class GroupConvolutionLayerGPUTestDynamic : public testing::WithParamInterface(results, inputParams, "GroupConvolution"); } }; -TEST_P(GroupConvolutionLayerGPUTestDynamic, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(GroupConvolutionLayerGPUTestDynamic, Inference) { run(); } -namespace { const std::vector dynInputShapes1D = { { {1, 12, ov::Dimension::dynamic()}, @@ -116,17 +105,15 @@ const std::vector dynInputShapes1D = { INSTANTIATE_TEST_SUITE_P(smoke_DwGroupConvolutionLayerGPUTest_dynamic1DSymPad, GroupConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3}), - ::testing::Values(SizeVector{1}), + ::testing::Values(std::vector{3}), + ::testing::Values(std::vector{1}), ::testing::Values(std::vector{0}), ::testing::Values(std::vector{0}), - ::testing::Values(SizeVector{1}), + ::testing::Values(std::vector{1}), ::testing::Values(12), ::testing::Values(12), - ::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes1D), ::testing::Values(ov::test::utils::DEVICE_GPU)), GroupConvolutionLayerGPUTestDynamic::getTestCaseName); @@ -135,17 +122,15 @@ INSTANTIATE_TEST_SUITE_P(smoke_DwGroupConvolutionLayerGPUTest_dynamic1DSymPad, G INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic1DSymPad_Disabled, GroupConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3}), - ::testing::Values(SizeVector{1}), + ::testing::Values(std::vector{3}), + ::testing::Values(std::vector{1}), ::testing::Values(std::vector{0}), ::testing::Values(std::vector{0}), - ::testing::Values(SizeVector{1}), + ::testing::Values(std::vector{1}), ::testing::Values(4), ::testing::Values(4), - ::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes1D), ::testing::Values(ov::test::utils::DEVICE_GPU)), GroupConvolutionLayerGPUTestDynamic::getTestCaseName); @@ -161,17 +146,15 @@ const std::vector dynInputShapes2D = { INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic2DSymPad, GroupConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{3, 3}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(std::vector{1, 2}), ::testing::Values(std::vector{1, 2}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(4), ::testing::Values(4), - ::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes2D), ::testing::Values(ov::test::utils::DEVICE_GPU)), GroupConvolutionLayerGPUTestDynamic::getTestCaseName); @@ -179,17 +162,15 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic2DSymPad, Gro INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic2D_AsymPad, GroupConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{3, 3}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(std::vector{1, 2}), ::testing::Values(std::vector{2, 1}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(4), ::testing::Values(4), - ::testing::ValuesIn({ngraph::op::PadType::EXPLICIT, ngraph::op::PadType::VALID})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::EXPLICIT, ov::op::PadType::VALID})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes2D), ::testing::Values(ov::test::utils::DEVICE_GPU)), GroupConvolutionLayerGPUTestDynamic::getTestCaseName); @@ -197,17 +178,15 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic2D_AsymPad, G INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic2D_SymAutoPad, GroupConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{3, 3}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(std::vector{1, 2}), ::testing::Values(std::vector{1, 2}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(4), ::testing::Values(4), - ::testing::ValuesIn({ngraph::op::PadType::SAME_LOWER, ngraph::op::PadType::SAME_UPPER})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes2D), ::testing::Values(ov::test::utils::DEVICE_GPU)), GroupConvolutionLayerGPUTestDynamic::getTestCaseName); @@ -215,20 +194,16 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic2D_SymAutoPad INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionLayerGPUTest_dynamic2D_AsymAutoPad, GroupConvolutionLayerGPUTestDynamic, ::testing::Combine( ::testing::Combine( - ::testing::Values(SizeVector{3, 3}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{3, 3}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(std::vector{1, 2}), ::testing::Values(std::vector{2, 1}), - ::testing::Values(SizeVector{1, 1}), + ::testing::Values(std::vector{1, 1}), ::testing::Values(4), ::testing::Values(4), - ::testing::ValuesIn({ngraph::op::PadType::SAME_LOWER, ngraph::op::PadType::SAME_UPPER})), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), + ::testing::ValuesIn({ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER})), + ::testing::Values(ov::element::f16), ::testing::ValuesIn(dynInputShapes2D), ::testing::Values(ov::test::utils::DEVICE_GPU)), GroupConvolutionLayerGPUTestDynamic::getTestCaseName); } // namespace - -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/interpolate.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/interpolate.cpp index 5bfe9bb5612205..133a515fe58104 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/interpolate.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/interpolate.cpp @@ -2,67 +2,69 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/interpolate.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_enums.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" -#include -#include "openvino/core/preprocess/pre_post_process.hpp" -using namespace ov::test; -using ngraph::helpers::operator<<; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/interpolate.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; -using InterpolateSpecificParams = std::tuple, // PadBegin std::vector, // PadEnd double>; // Cube coef -using ShapeParams = std::tuple>, // scales or sizes values std::vector>; // axes using InterpolateLayerGPUTestParamsSet = std::tuple; // use Interpolate_v11 class InterpolateLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { InterpolateSpecificParams specificParams; ShapeParams shapeParams; - ElementType prec; + ov::element::Type prec; bool useInterpolateV11; std::map additionalConfig; std::tie(specificParams, shapeParams, prec, useInterpolateV11) = obj.param; - ngraph::op::v4::Interpolate::InterpolateMode mode; - ngraph::op::v4::Interpolate::CoordinateTransformMode transfMode; - ngraph::op::v4::Interpolate::NearestMode nearMode; + ov::op::v4::Interpolate::InterpolateMode mode; + ov::op::v4::Interpolate::CoordinateTransformMode transfMode; + ov::op::v4::Interpolate::NearestMode nearMode; bool antiAlias; std::vector padBegin; std::vector padEnd; double cubeCoef; std::tie(mode, transfMode, nearMode, antiAlias, padBegin, padEnd, cubeCoef) = specificParams; - ngraph::op::v4::Interpolate::ShapeCalcMode shapeCalcMode; + ov::op::v4::Interpolate::ShapeCalcMode shapeCalcMode; InputShape inputShapes; - ngraph::helpers::InputLayerType sizesInputType; - ngraph::helpers::InputLayerType scalesInputType; + ov::test::utils::InputLayerType sizesInputType; + ov::test::utils::InputLayerType scalesInputType; std::vector> shapeDataForInput; std::vector axes; std::tie(shapeCalcMode, inputShapes, sizesInputType, scalesInputType, shapeDataForInput, axes) = shapeParams; std::ostringstream result; + using ov::operator<<; result << "ShapeCalcMode=" << shapeCalcMode << "_"; result << "IS="; result << ov::test::utils::partialShape2str({inputShapes.first}) << "_"; @@ -70,7 +72,7 @@ class InterpolateLayerGPUTest : public testing::WithParamInterface interpolate; - bool scalesMode = shapeCalcMode == ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES; + std::shared_ptr interpolate; + bool scalesMode = shapeCalcMode == ov::op::v4::Interpolate::ShapeCalcMode::SCALES; if (useInterpolateV11) { if (axes.size() != dataShape.first.size()) { - interpolate = std::make_shared(params[0], + interpolate = std::make_shared(params[0], scalesMode ? scalesInput : sizesInput, axesInput, interpAttr); } else { - interpolate = std::make_shared(params[0], + interpolate = std::make_shared(params[0], scalesMode ? scalesInput : sizesInput, interpAttr); } } else { - interpolate = std::make_shared(params[0], + interpolate = std::make_shared(params[0], sizesInput, scalesInput, axesInput, interpAttr); } - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < interpolate->get_output_size(); ++i) { - results.push_back(std::make_shared(interpolate->output(i))); + results.push_back(std::make_shared(interpolate->output(i))); } - function = std::make_shared(results, params, "InterpolateGPU"); + function = std::make_shared(results, params, "InterpolateGPU"); } }; -TEST_P(InterpolateLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(InterpolateLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector coordinateTransformModes_Smoke = { - ngraph::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL, - ngraph::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC, +const std::vector coordinateTransformModes_Smoke = { + ov::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL, + ov::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC, }; -const std::vector coordinateTransformModes_Full = { - ngraph::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN, - ngraph::op::v4::Interpolate::CoordinateTransformMode::PYTORCH_HALF_PIXEL, - ngraph::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL, - ngraph::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC, - ngraph::op::v4::Interpolate::CoordinateTransformMode::ALIGN_CORNERS, +const std::vector coordinateTransformModes_Full = { + ov::op::v4::Interpolate::CoordinateTransformMode::TF_HALF_PIXEL_FOR_NN, + ov::op::v4::Interpolate::CoordinateTransformMode::PYTORCH_HALF_PIXEL, + ov::op::v4::Interpolate::CoordinateTransformMode::HALF_PIXEL, + ov::op::v4::Interpolate::CoordinateTransformMode::ASYMMETRIC, + ov::op::v4::Interpolate::CoordinateTransformMode::ALIGN_CORNERS, }; -const std::vector nearestModes_Smoke = { - ngraph::op::v4::Interpolate::NearestMode::SIMPLE, - ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, - ngraph::op::v4::Interpolate::NearestMode::FLOOR, +const std::vector nearestModes_Smoke = { + ov::op::v4::Interpolate::NearestMode::SIMPLE, + ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, + ov::op::v4::Interpolate::NearestMode::FLOOR, }; -const std::vector nearestModes_Full = { - ngraph::op::v4::Interpolate::NearestMode::SIMPLE, - ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, - ngraph::op::v4::Interpolate::NearestMode::FLOOR, - ngraph::op::v4::Interpolate::NearestMode::CEIL, - ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_CEIL, +const std::vector nearestModes_Full = { + ov::op::v4::Interpolate::NearestMode::SIMPLE, + ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, + ov::op::v4::Interpolate::NearestMode::FLOOR, + ov::op::v4::Interpolate::NearestMode::CEIL, + ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_CEIL, }; -const std::vector defNearestModes = { - ngraph::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, +const std::vector defNearestModes = { + ov::op::v4::Interpolate::NearestMode::ROUND_PREFER_FLOOR, }; const std::vector antialias = { @@ -337,50 +340,50 @@ const std::vector> reducedAxes4D = { const std::vector shapeParams4D_Smoke = { ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES, + ov::op::v4::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER, {{1.f, 1.f, 1.25f, 1.5f}, {1.f, 1.f, 1.25f, 1.25f}, {1.f, 1.f, 1.25f, 1.5f}}, defaultAxes4D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES, + ov::op::v4::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {1, 10}, -1, -1}, {{1, 2, 12, 20}}}, - ngraph::helpers::InputLayerType::PARAMETER, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1.f, 1.f, 0.5f, 2.0f}}, defaultAxes4D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES, + ov::op::v4::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {1, 10}, -1, -1}, {{1, 2, 12, 20}}}, - ngraph::helpers::InputLayerType::PARAMETER, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{0.5f, 2.0f}}, reducedAxes4D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::CONSTANT, {{1, 11, 5, 6}, {2, 7, 8, 7}, {1, 11, 5, 6}}, defaultAxes4D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {1, 10}, -1, -1}, {{1, 2, 12, 20}}}, - ngraph::helpers::InputLayerType::PARAMETER, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1, 2, 24, 10}}, defaultAxes4D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {1, 10}, -1, -1}, {{1, 2, 12, 20}}}, - ngraph::helpers::InputLayerType::PARAMETER, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{24, 10}}, reducedAxes4D.front() } @@ -388,18 +391,18 @@ const std::vector shapeParams4D_Smoke = { const std::vector shapeParams4D_Full = { ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES, + ov::op::v4::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.f, 1.f, 1.25f, 1.5f}}, defaultAxes4D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1, 11, 5, 6}}, defaultAxes4D.front() } @@ -407,41 +410,41 @@ const std::vector shapeParams4D_Full = { const std::vector shapeParams4DReducedAxis_Full = { ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES, + ov::op::v4::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.f, 1.f, 1.25f, 1.5f}}, defaultAxes4D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1, 11, 5, 6}}, defaultAxes4D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES, + ov::op::v4::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}, {2, 7, 6, 5}, {1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.5f}}, reducedAxes4D.back() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1}, {{1, 11, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{6}}, reducedAxes4D.back() } }; const auto interpolateCasesNN_Smoke = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::NEAREST), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::NEAREST), ::testing::ValuesIn(coordinateTransformModes_Smoke), ::testing::ValuesIn(nearestModes_Smoke), ::testing::ValuesIn(antialias), @@ -450,7 +453,7 @@ const auto interpolateCasesNN_Smoke = ::testing::Combine( ::testing::ValuesIn(cubeCoefs)); const auto interpolateCasesNN_Full = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::NEAREST), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::NEAREST), ::testing::ValuesIn(coordinateTransformModes_Full), ::testing::ValuesIn(nearestModes_Full), ::testing::ValuesIn(antialias), @@ -462,7 +465,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_InterpolateNN_Layout_Test, InterpolateLayerGPUTes ::testing::Combine( interpolateCasesNN_Smoke, ::testing::ValuesIn(shapeParams4D_Smoke), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(true, false)), InterpolateLayerGPUTest::getTestCaseName); @@ -470,12 +473,12 @@ INSTANTIATE_TEST_SUITE_P(InterpolateNN_Layout_Test, InterpolateLayerGPUTest, ::testing::Combine( interpolateCasesNN_Full, ::testing::ValuesIn(shapeParams4DReducedAxis_Full), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(true, false)), InterpolateLayerGPUTest::getTestCaseName); const auto interpolateCasesLinearOnnx_Smoke = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX), ::testing::ValuesIn(coordinateTransformModes_Smoke), ::testing::ValuesIn(defNearestModes), ::testing::ValuesIn(antialias), @@ -484,7 +487,7 @@ const auto interpolateCasesLinearOnnx_Smoke = ::testing::Combine( ::testing::ValuesIn(cubeCoefs)); const auto interpolateCasesLinearOnnx_Full = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX), ::testing::ValuesIn(coordinateTransformModes_Full), ::testing::ValuesIn(defNearestModes), ::testing::ValuesIn(antialias), @@ -496,7 +499,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_InterpolateLinearOnnx_Layout_Test, InterpolateLay ::testing::Combine( interpolateCasesLinearOnnx_Smoke, ::testing::ValuesIn(shapeParams4D_Smoke), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(false)), InterpolateLayerGPUTest::getTestCaseName); @@ -504,12 +507,12 @@ INSTANTIATE_TEST_SUITE_P(InterpolateLinearOnnx_Layout_Test, InterpolateLayerGPUT ::testing::Combine( interpolateCasesLinearOnnx_Full, ::testing::ValuesIn(shapeParams4D_Full), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(true, false)), InterpolateLayerGPUTest::getTestCaseName); const auto interpolateCasesLinear_Smoke = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::LINEAR), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::LINEAR), ::testing::ValuesIn(coordinateTransformModes_Smoke), ::testing::ValuesIn(defNearestModes), ::testing::ValuesIn(antialias), @@ -518,7 +521,7 @@ const auto interpolateCasesLinear_Smoke = ::testing::Combine( ::testing::ValuesIn(cubeCoefs)); const auto interpolateCasesLinear_Full = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::LINEAR), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::LINEAR), ::testing::ValuesIn(coordinateTransformModes_Full), ::testing::ValuesIn(defNearestModes), ::testing::ValuesIn(antialias), @@ -530,7 +533,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_InterpolateLinear_Layout_Test, InterpolateLayerGP ::testing::Combine( interpolateCasesLinear_Smoke, ::testing::ValuesIn(shapeParams4D_Smoke), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(false)), InterpolateLayerGPUTest::getTestCaseName); @@ -538,12 +541,12 @@ INSTANTIATE_TEST_SUITE_P(InterpolateLinear_Layout_Test, InterpolateLayerGPUTest, ::testing::Combine( interpolateCasesLinear_Full, ::testing::ValuesIn(shapeParams4DReducedAxis_Full), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(true, false)), InterpolateLayerGPUTest::getTestCaseName); const auto interpolateCasesCubic_Smoke = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::CUBIC), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::CUBIC), ::testing::ValuesIn(coordinateTransformModes_Smoke), ::testing::ValuesIn(defNearestModes), ::testing::ValuesIn(antialias), @@ -552,7 +555,7 @@ const auto interpolateCasesCubic_Smoke = ::testing::Combine( ::testing::ValuesIn(cubeCoefs)); const auto interpolateCasesCubic_Full = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::CUBIC), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::CUBIC), ::testing::ValuesIn(coordinateTransformModes_Full), ::testing::ValuesIn(defNearestModes), ::testing::ValuesIn(antialias), @@ -564,7 +567,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_InterpolateCubic_Layout_Test, InterpolateLayerGPU ::testing::Combine( interpolateCasesCubic_Smoke, ::testing::ValuesIn(shapeParams4D_Smoke), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(false)), InterpolateLayerGPUTest::getTestCaseName); @@ -572,7 +575,7 @@ INSTANTIATE_TEST_SUITE_P(InterpolateCubic_Layout_Test, InterpolateLayerGPUTest, ::testing::Combine( interpolateCasesCubic_Full, ::testing::ValuesIn(shapeParams4DReducedAxis_Full), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(true, false)), InterpolateLayerGPUTest::getTestCaseName); @@ -592,42 +595,42 @@ const std::vector> reducedAxes5D = { const std::vector shapeParams5D_Smoke = { ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES, + ov::op::v4::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {2, 7, 6, 5, 8}, {1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER, {{1.f, 1.f, 1.25f, 1.5f, 0.5f}, {1.f, 1.f, 1.25f, 1.25f, 1.25f}, {1.f, 1.f, 1.25f, 1.5f, 0.5f}}, defaultAxes5D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES, + ov::op::v4::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 10}, -1, -1, -1}, {{1, 4, 2, 3, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1.f, 1.f, 1.5f, 2.f, 0.5f}}, defaultAxes5D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {2, 7, 6, 5, 8}, {1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::CONSTANT, {{1, 11, 5, 6, 2}, {2, 7, 8, 7, 4}, {1, 11, 5, 6, 2}}, defaultAxes5D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 10}, -1, -1, -1}, {{1, 4, 2, 3, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{1, 4, 4, 1, 6}}, defaultAxes5D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 10}, -1, -1, -1}, {{1, 4, 2, 3, 4}}}, - ngraph::helpers::InputLayerType::PARAMETER, - ngraph::helpers::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, + ov::test::utils::InputLayerType::PARAMETER, {{4, 1, 6}}, reducedAxes5D.front() }, @@ -635,33 +638,33 @@ const std::vector shapeParams5D_Smoke = { const std::vector shapeParams5D_Full = { ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SCALES, + ov::op::v4::Interpolate::ShapeCalcMode::SCALES, InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {2, 7, 6, 5, 8}, {1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1.f, 1.f, 1.25f, 1.5f, 0.5f}}, defaultAxes5D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {1, 11, 5, 5, 8}, {1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1, 11, 5, 6, 4}}, defaultAxes5D.front() }, ShapeParams{ - ngraph::op::v4::Interpolate::ShapeCalcMode::SIZES, + ov::op::v4::Interpolate::ShapeCalcMode::SIZES, InputShape{{-1, {2, 20}, -1, -1, -1}, {{1, 11, 4, 4, 4}, {1, 11, 5, 5, 8}, {1, 11, 4, 4, 4}}}, - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::CONSTANT, {{1, 6, 4}}, reducedAxes5D.front() } }; const auto interpolateCasesLinearOnnx5D_Smoke = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX), ::testing::ValuesIn(coordinateTransformModes_Smoke), ::testing::ValuesIn(defNearestModes), ::testing::ValuesIn(antialias), @@ -670,7 +673,7 @@ const auto interpolateCasesLinearOnnx5D_Smoke = ::testing::Combine( ::testing::ValuesIn(cubeCoefs)); const auto interpolateCasesLinearOnnx5D_Full = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::LINEAR_ONNX), ::testing::ValuesIn(coordinateTransformModes_Full), ::testing::ValuesIn(defNearestModes), ::testing::ValuesIn(antialias), @@ -682,7 +685,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_InterpolateLinearOnnx5D_Layout_Test, InterpolateL ::testing::Combine( interpolateCasesLinearOnnx5D_Smoke, ::testing::ValuesIn(shapeParams5D_Smoke), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(false)), InterpolateLayerGPUTest::getTestCaseName); @@ -690,12 +693,12 @@ INSTANTIATE_TEST_SUITE_P(InterpolateLinearOnnx5D_Layout_Test, InterpolateLayerGP ::testing::Combine( interpolateCasesLinearOnnx5D_Full, ::testing::ValuesIn(shapeParams5D_Full), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(true, false)), InterpolateLayerGPUTest::getTestCaseName); const auto interpolateCasesNN5D_Smoke = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::NEAREST), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::NEAREST), ::testing::ValuesIn(coordinateTransformModes_Smoke), ::testing::ValuesIn(nearestModes_Smoke), ::testing::ValuesIn(antialias), @@ -704,7 +707,7 @@ const auto interpolateCasesNN5D_Smoke = ::testing::Combine( ::testing::ValuesIn(cubeCoefs)); const auto interpolateCasesNN5D_Full = ::testing::Combine( - ::testing::Values(ngraph::op::v4::Interpolate::InterpolateMode::NEAREST), + ::testing::Values(ov::op::v4::Interpolate::InterpolateMode::NEAREST), ::testing::ValuesIn(coordinateTransformModes_Full), ::testing::ValuesIn(nearestModes_Full), ::testing::ValuesIn(antialias), @@ -716,7 +719,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_InterpolateNN5D_Layout_Test, InterpolateLayerGPUT ::testing::Combine( interpolateCasesNN5D_Smoke, ::testing::ValuesIn(shapeParams5D_Smoke), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(true, false)), InterpolateLayerGPUTest::getTestCaseName); @@ -724,10 +727,7 @@ INSTANTIATE_TEST_SUITE_P(InterpolateNN5D_Layout_Test, InterpolateLayerGPUTest, ::testing::Combine( interpolateCasesNN5D_Full, ::testing::ValuesIn(shapeParams5D_Full), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::Values(true, false)), InterpolateLayerGPUTest::getTestCaseName); - } // namespace - -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp index 4f0f69ab172d9c..2b3d2dccf2cc77 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp @@ -2,18 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/mat_mul.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" -#include #include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/matmul.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; struct ShapeRelatedParams { std::vector inputShapes; @@ -22,36 +21,36 @@ struct ShapeRelatedParams { typedef std::tuple< ShapeRelatedParams, - ElementType, // Network precision - ElementType, // Input precision - ElementType, // Output precision - ngraph::helpers::InputLayerType, // Secondary input type - TargetDevice, // Device name + ov::element::Type, // Network precision + ov::element::Type, // Input precision + ov::element::Type, // Output precision + ov::test::utils::InputLayerType, // Secondary input type + std::string, // Device name std::map // Additional network configuration > MatMulLayerTestParamsSet; class MatMulLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { MatMulLayerTestParamsSet basicParamsSet = obj.param; - ElementType netType; - ElementType inType, outType; - ShapeRelatedParams shapeRelatedParams; - ngraph::helpers::InputLayerType secondaryInputType; - TargetDevice targetDevice; - std::map additionalConfig; - std::tie(shapeRelatedParams, netType, inType, outType, secondaryInputType, targetDevice, additionalConfig) = + ov::element::Type model_type; + ov::element::Type inType, outType; + ShapeRelatedParams shape_related_params; + ov::test::utils::InputLayerType secondary_input_type; + std::string targetDevice; + std::map additional_config; + std::tie(shape_related_params, model_type, inType, outType, secondary_input_type, targetDevice, additional_config) = basicParamsSet; std::ostringstream result; result << "IS="; - for (const auto& shape : shapeRelatedParams.inputShapes) { + for (const auto& shape : shape_related_params.inputShapes) { result << ov::test::utils::partialShape2str({shape.first}) << "_"; } result << "TS="; - for (const auto& shape : shapeRelatedParams.inputShapes) { + for (const auto& shape : shape_related_params.inputShapes) { result << "("; if (!shape.second.empty()) { auto itr = shape.second.begin(); @@ -61,15 +60,15 @@ class MatMulLayerGPUTest : public testing::WithParamInterfaceGetParam(); - ShapeRelatedParams shapeRelatedParams; - ElementType netType; - helpers::InputLayerType secondaryInputType; - std::map additionalConfig; + ShapeRelatedParams shape_related_params; + ov::element::Type model_type; + ov::test::utils::InputLayerType secondary_input_type; + std::map additional_config; - std::tie(shapeRelatedParams, netType, inType, outType, secondaryInputType, targetDevice, additionalConfig) = basicParamsSet; + std::tie(shape_related_params, model_type, inType, outType, secondary_input_type, targetDevice, additional_config) = basicParamsSet; - init_input_shapes(shapeRelatedParams.inputShapes); + init_input_shapes(shape_related_params.inputShapes); - bool transpA = shapeRelatedParams.transpose.first; - bool transpB = shapeRelatedParams.transpose.second; + bool transpA = shape_related_params.transpose.first; + bool transpB = shape_related_params.transpose.second; if (transpA) { transpose(inputDynamicShapes[0]); @@ -115,69 +114,64 @@ class MatMulLayerGPUTest : public testing::WithParamInterface(netType, inShapeA)}; + ov::ParameterVector params{std::make_shared(model_type, inShapeA)}; std::shared_ptr matrixB; - if (secondaryInputType == helpers::InputLayerType::PARAMETER) { - auto param = std::make_shared(netType, inShapeB); + if (secondary_input_type == ov::test::utils::InputLayerType::PARAMETER) { + auto param = std::make_shared(model_type, inShapeB); matrixB = param; params.push_back(param); } else { ASSERT_TRUE(inShapeB.is_static()); - auto tensor = ov::test::utils::create_and_fill_tensor(netType, inShapeB.to_shape()); + auto tensor = ov::test::utils::create_and_fill_tensor(model_type, inShapeB.to_shape()); matrixB = std::make_shared(tensor); } auto matMul = std::make_shared(params[0], matrixB, transpA, transpB); - auto makeFunction = [](const ngraph::element::Type &ngPrc, ngraph::ParameterVector ¶ms, const std::shared_ptr &lastNode) { - ngraph::ResultVector results; + auto makeFunction = [](const ov::element::Type &ngPrc, ov::ParameterVector ¶ms, const std::shared_ptr &lastNode) { + ov::ResultVector results; for (size_t i = 0; i < lastNode->get_output_size(); i++) - results.push_back(std::make_shared(lastNode->output(i))); + results.push_back(std::make_shared(lastNode->output(i))); - return std::make_shared(results, params, "MatMul"); + return std::make_shared(results, params, "MatMul"); }; - function = makeFunction(netType, params, matMul); + function = makeFunction(model_type, params, matMul); } }; -TEST_P(MatMulLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(MatMulLayerGPUTest, Inference) { run(); } -namespace { - /* ============= Common params ============= */ std::map emptyAdditionalConfig; -std::vector> additionalConfig { +std::vector> additional_config { std::map{/* empty config */}, }; -const std::vector netPRCs { - ElementType::f32, +const std::vector netPRCs { + ov::element::f32, }; /* ============= FullyConnected ============= */ -namespace fullyConnected { const std::vector IS2D_smoke = { - {static_shapes_to_test_representation({{59, 1}, {1, 120}}), {false, true}}, - {static_shapes_to_test_representation({{59, 1}, {1, 120}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{59, 1}, {1, 120}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{59, 1}, {1, 120}}), {true, true}}, - {static_shapes_to_test_representation({{59, 120}, {120, 1}}), {false, false}}, - {static_shapes_to_test_representation({{59, 120}, {120, 1}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{59, 120}, {120, 1}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{59, 120}, {120, 1}}), {true, true}}, - {static_shapes_to_test_representation({{1, 120}, {120, 59}}), {false, false}}, - {static_shapes_to_test_representation({{1, 120}, {120, 59}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{1, 120}, {120, 59}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{1, 120}, {120, 59}}), {true, false}}, - {static_shapes_to_test_representation({{71, 128}, {128, 20}}), {true, false}}, - {static_shapes_to_test_representation({{71, 128}, {128, 20}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{71, 128}, {128, 20}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{71, 128}, {128, 20}}), {false, true}}, { { @@ -196,17 +190,17 @@ const std::vector IS2D_smoke = { }; const std::vector IS2D_nightly = { - {static_shapes_to_test_representation({{59, 1}, {1, 120}}), {false, false}}, - {static_shapes_to_test_representation({{59, 1}, {1, 120}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{59, 1}, {1, 120}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{59, 1}, {1, 120}}), {true, false}}, - {static_shapes_to_test_representation({{59, 120}, {120, 1}}), {true, false}}, - {static_shapes_to_test_representation({{59, 120}, {120, 1}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{59, 120}, {120, 1}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{59, 120}, {120, 1}}), {false, true}}, - {static_shapes_to_test_representation({{1, 120}, {120, 59}}), {true, true}}, - {static_shapes_to_test_representation({{1, 120}, {120, 59}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{1, 120}, {120, 59}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{1, 120}, {120, 59}}), {false, true}}, - {static_shapes_to_test_representation({{71, 128}, {128, 20}}), {true, true}}, - {static_shapes_to_test_representation({{71, 128}, {128, 20}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{71, 128}, {128, 20}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{71, 128}, {128, 20}}), {false, false}}, { { @@ -232,31 +226,31 @@ const std::vector IS2D_nightly = { }; const auto testParams2D_smoke = ::testing::Combine(::testing::ValuesIn(IS2D_smoke), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(emptyAdditionalConfig)); INSTANTIATE_TEST_SUITE_P(smoke_FC_2D, MatMulLayerGPUTest, testParams2D_smoke, MatMulLayerGPUTest::getTestCaseName); const auto testParams2D_nightly = ::testing::Combine(::testing::ValuesIn(IS2D_nightly), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(emptyAdditionalConfig)); INSTANTIATE_TEST_SUITE_P(nightly_FC_2D, MatMulLayerGPUTest, testParams2D_nightly, MatMulLayerGPUTest::getTestCaseName); const std::vector IS3D_smoke = { - {static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {false, false}}, - {static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {false, true}}, - {static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {true, false}}, - {static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {false, true}}, { { @@ -266,7 +260,7 @@ const std::vector IS3D_smoke = { {false, true} }, - {static_shapes_to_test_representation({{1, 429}, {1, 429, 1}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{1, 429}, {1, 429, 1}}), {true, true}}, { { {{-1, -1}, {{1, 129}, {2, 129}, {1, 129}, {2, 129}}}, @@ -285,11 +279,11 @@ const std::vector IS3D_smoke = { }; const std::vector IS3D_nightly = { - {static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {true, false}}, - {static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 5}}), {true, true}}, - {static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {false, false}}, - {static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{1, 32, 120}, {120, 50}}), {true, true}}, { { @@ -315,20 +309,20 @@ const std::vector IS3D_nightly = { }; const auto fullyConnectedParams3D_smoke = ::testing::Combine(::testing::ValuesIn(IS3D_smoke), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(emptyAdditionalConfig)); INSTANTIATE_TEST_SUITE_P(smoke_FC_3D, MatMulLayerGPUTest, fullyConnectedParams3D_smoke, MatMulLayerGPUTest::getTestCaseName); const auto fullyConnectedParams3D_nightly = ::testing::Combine(::testing::ValuesIn(IS3D_nightly), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(emptyAdditionalConfig)); @@ -366,62 +360,59 @@ const std::vector IS4D_smoke = { }; const auto fullyConnectedParams4D_smoke = ::testing::Combine(::testing::ValuesIn(IS4D_smoke), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(emptyAdditionalConfig)); INSTANTIATE_TEST_SUITE_P(smoke_FC_4D, MatMulLayerGPUTest, fullyConnectedParams4D_smoke, MatMulLayerGPUTest::getTestCaseName); -} // namespace fullyConnected - /* ============= MatMul ============= */ -namespace matmul { const std::vector IS = { - {static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {false, false}}, - {static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {true, false}}, - {static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {false, true}}, - {static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {true, true}}, - - {static_shapes_to_test_representation({{1, 2, 100010, 120}, {120, 5}}), {true, true}}, - {static_shapes_to_test_representation({{1, 2, 200010, 120}, {120, 5}}), {false, true}}, - {static_shapes_to_test_representation({{1, 2, 30, 120}, {120, 100010}}), {true, true}}, - {static_shapes_to_test_representation({{1, 2, 30, 120}, {120, 100010}}), {true, false}}, - - {static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {false, false}}, - {static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {true, false}}, - {static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {false, true}}, - {static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {true, true}}, - - {static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {false, false}}, - {static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {true, false}}, - {static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {false, true}}, - {static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {true, true}}, - - {static_shapes_to_test_representation({{55, 12}, {12, 55}}), {false, false}}, - {static_shapes_to_test_representation({{55, 12}, {12, 55}}), {true, false}}, - {static_shapes_to_test_representation({{55, 12}, {12, 55}}), {false, true}}, - {static_shapes_to_test_representation({{55, 12}, {12, 55}}), {true, true}} + {ov::test::static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{1, 2, 32, 120}, {120, 5}}), {true, true}}, + + {ov::test::static_shapes_to_test_representation({{1, 2, 100010, 120}, {120, 5}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{1, 2, 200010, 120}, {120, 5}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{1, 2, 30, 120}, {120, 100010}}), {true, true}}, + {ov::test::static_shapes_to_test_representation({{1, 2, 30, 120}, {120, 100010}}), {true, false}}, + + {ov::test::static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{7, 32, 120}, {3, 7, 120, 50}}), {true, true}}, + + {ov::test::static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{10, 10, 10}, {10, 10, 10}}), {true, true}}, + + {ov::test::static_shapes_to_test_representation({{55, 12}, {12, 55}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{55, 12}, {12, 55}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{55, 12}, {12, 55}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{55, 12}, {12, 55}}), {true, true}} }; const std::vector IS_OneDNN = { - {static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {false, false}}, - {static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {true, false}}, - {static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {false, true}}, - {static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {true, true}}, - - {static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {false, false}}, - {static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {true, false}}, - {static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {false, true}}, - {static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {true, true}}, - - {static_shapes_to_test_representation({{12, 12}, {12, 12}}), {false, false}}, - {static_shapes_to_test_representation({{12, 12}, {12, 12}}), {true, false}}, - {static_shapes_to_test_representation({{12, 12}, {12, 12}}), {false, true}}, - {static_shapes_to_test_representation({{12, 12}, {12, 12}}), {true, true}} + {ov::test::static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{2, 4, 32, 120}, {2, 4, 120, 5}}), {true, true}}, + + {ov::test::static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{2, 2, 32, 120}, {1, 1, 120, 5}}), {true, true}}, + + {ov::test::static_shapes_to_test_representation({{12, 12}, {12, 12}}), {false, false}}, + {ov::test::static_shapes_to_test_representation({{12, 12}, {12, 12}}), {true, false}}, + {ov::test::static_shapes_to_test_representation({{12, 12}, {12, 12}}), {false, true}}, + {ov::test::static_shapes_to_test_representation({{12, 12}, {12, 12}}), {true, true}} }; const std::vector IS_Dynamic = { @@ -678,44 +669,41 @@ const std::vector IS_Dynamic_nightly = { const auto testParams = ::testing::Combine(::testing::ValuesIn(IS), ::testing::ValuesIn(netPRCs), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(additionalConfig)); + ::testing::ValuesIn(additional_config)); INSTANTIATE_TEST_SUITE_P(smoke_MM_Static, MatMulLayerGPUTest, testParams, MatMulLayerGPUTest::getTestCaseName); const auto testParamsOneDNN = ::testing::Combine(::testing::ValuesIn(IS_OneDNN), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::element::f16), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(additionalConfig)); + ::testing::ValuesIn(additional_config)); INSTANTIATE_TEST_SUITE_P(smoke_MM_Static_OneDNN, MatMulLayerGPUTest, testParamsOneDNN, MatMulLayerGPUTest::getTestCaseName); const auto testParamsDynamic = ::testing::Combine(::testing::ValuesIn(IS_Dynamic), ::testing::ValuesIn(netPRCs), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(additionalConfig)); + ::testing::ValuesIn(additional_config)); INSTANTIATE_TEST_SUITE_P(smoke_MM_Dynamic, MatMulLayerGPUTest, testParamsDynamic, MatMulLayerGPUTest::getTestCaseName); const auto testParamsDynamic_nightly = ::testing::Combine(::testing::ValuesIn(IS_Dynamic_nightly), ::testing::ValuesIn(netPRCs), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(additionalConfig)); + ::testing::ValuesIn(additional_config)); INSTANTIATE_TEST_SUITE_P(nightly_MM_Dynamic, MatMulLayerGPUTest, testParamsDynamic_nightly, MatMulLayerGPUTest::getTestCaseName); - -} // namespace matmul } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/mvn.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/mvn.cpp index d6f438bfa2f1da..1ff9fe2378cf2c 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/mvn.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/mvn.cpp @@ -2,36 +2,38 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "ov_models/builders.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/mvn.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; using basicGPUMvnParams = std::tuple< InputShape, // Input shapes - ElementType, // Input precision + ov::element::Type, // Input precision std::vector, // Reduction axes bool, // Normalize variance double>; // Epsilon using MvnLayerGPUTestParamSet = std::tuple< basicGPUMvnParams, - ElementType>; // CNNNetwork input precision + ov::element::Type>; // CNNNetwork input precision class MvnLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { basicGPUMvnParams basicParamsSet; - ElementType inputPrecision; + ov::element::Type inputPrecision; std::tie(basicParamsSet, inputPrecision) = obj.param; InputShape inputShapes; - ElementType netPrecision; + ov::element::Type netPrecision; std::vector axes; bool normalizeVariance; double eps; @@ -56,11 +58,11 @@ class MvnLayerGPUTest : public testing::WithParamInterfaceGetParam(); InputShape inputShapes; - ElementType netPrecision; + ov::element::Type netPrecision; std::vector axes; bool normalizeVariance; double eps; @@ -75,7 +77,7 @@ class MvnLayerGPUTest : public testing::WithParamInterface(netPrecision, shape)); - auto axesNode = ngraph::builder::makeConstant(axesType, ngraph::Shape{axes.size()}, axes); + auto axesNode = std::make_shared(axesType, ov::Shape{axes.size()}, axes); ov::op::MVNEpsMode nEpsMode = ov::op::MVNEpsMode::INSIDE_SQRT; if (eps_mode == "outside_sqrt") nEpsMode = ov::op::MVNEpsMode::OUTSIDE_SQRT; @@ -83,21 +85,18 @@ class MvnLayerGPUTest : public testing::WithParamInterfaceget_output_size(); ++i) { - results.push_back(std::make_shared(mvn->output(i))); + results.push_back(std::make_shared(mvn->output(i))); } - function = std::make_shared(results, params, "MVN"); + function = std::make_shared(results, params, "MVN"); } }; -TEST_P(MvnLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(MvnLayerGPUTest, Inference) { run(); } -namespace { - const std::vector inputShapes_1D = { { // dynamic @@ -205,12 +204,12 @@ const std::vector reduction_axes_12 = {1, 2}; const std::vector reduction_axes_3 = {3}; const std::vector reduction_axes_2 = {2}; -std::vector inpPrc = {ElementType::i8, ElementType::f16, ElementType::f32}; +std::vector inpPrc = {ov::element::i8, ov::element::f16, ov::element::f32}; const auto Mvn3D = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(inputShapes_3D), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn({reduction_axes_12, reduction_axes_2}), ::testing::ValuesIn(normalizeVariance), ::testing::ValuesIn(epsilon)), @@ -221,7 +220,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Mvn3D, MvnLayerGPUTest, Mvn3D, Mv const auto Mvn4D = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(inputShapes_4D), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn({reduction_axes_2, reduction_axes_3, reduction_axes_12, reduction_axes_23, reduction_axes_123}), ::testing::ValuesIn(normalizeVariance), ::testing::ValuesIn(epsilon)), @@ -232,13 +231,11 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Mvn4D, MvnLayerGPUTest, Mvn4D, Mv const auto Mvn5D = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(inputShapes_5D), - ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn({reduction_axes_3, reduction_axes_23, reduction_axes_123, reduction_axes_1234}), ::testing::ValuesIn(normalizeVariance), ::testing::ValuesIn(epsilon)), ::testing::ValuesIn(inpPrc)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Mvn5D, MvnLayerGPUTest, Mvn5D, MvnLayerGPUTest::getTestCaseName); - } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/non_max_suppression.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/non_max_suppression.cpp index 4708b2f0ffb5a4..2e798d7639542b 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/non_max_suppression.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/non_max_suppression.cpp @@ -2,22 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/non_max_suppression.hpp" -#include "common_test_utils/test_constants.hpp" #include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace InferenceEngine; -using namespace ov::test; -using namespace ngraph; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/non_max_suppression.hpp" -namespace GPULayerTestsDefinitions { +namespace { enum { BATCHES, @@ -32,9 +26,9 @@ using TargetShapeParams = std::tuple, // bounds for input dynamic shape std::vector>; // target input dimensions -using InputPrecisions = std::tuple; // iou_threshold, score_threshold, soft_nms_sigma precisions +using InputPrecisions = std::tuple; // iou_threshold, score_threshold, soft_nms_sigma precisions using ThresholdValues = std::tuple>; // Additional network configuration -class NmsLayerGPUTest : public testing::WithParamInterface, virtual public SubgraphBaseTest { +class NmsLayerGPUTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { InputShapeParams inShapeParams; @@ -58,17 +53,17 @@ class NmsLayerGPUTest : public testing::WithParamInterface, int32_t maxOutBoxesPerClass; ThresholdValues thrValues; float iouThr, scoreThr, softNmsSigma; - op::v9::NonMaxSuppression::BoxEncodingType boxEncoding; + ov::op::v9::NonMaxSuppression::BoxEncodingType boxEncoding; bool sortResDescend; - element::Type outType; - TargetDevice targetDevice; + ov::element::Type outType; + std::string targetDevice; std::map additionalConfig; std::tie(inShapeParams, inPrecisions, maxOutBoxesPerClass, thrValues, boxEncoding, sortResDescend, outType, targetDevice, additionalConfig) = obj.param; std::tie(iouThr, scoreThr, softNmsSigma) = thrValues; - ElementType paramsPrec, maxBoxPrec, thrPrec; + ov::element::Type paramsPrec, maxBoxPrec, thrPrec; std::tie(paramsPrec, maxBoxPrec, thrPrec) = inPrecisions; std::vector bounds; @@ -88,6 +83,7 @@ class NmsLayerGPUTest : public testing::WithParamInterface, result << "paramsPrec=" << paramsPrec << "_maxBoxPrec=" << maxBoxPrec << "_thrPrec=" << thrPrec << "_"; result << "maxOutBoxesPerClass=" << maxOutBoxesPerClass << "_"; result << "iouThr=" << iouThr << "_scoreThr=" << scoreThr << "_softNmsSigma=" << softNmsSigma << "_"; + using ov::operator<<; result << "boxEncoding=" << boxEncoding << "_sortResDescend=" << sortResDescend << "_outType=" << outType << "_"; result << "config=("; for (const auto& configEntry : additionalConfig) { @@ -99,7 +95,7 @@ class NmsLayerGPUTest : public testing::WithParamInterface, return result.str(); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { SubgraphBaseTest::generate_inputs(targetInputStaticShapes); // w/a to fill valid data for port 2 const auto& funcInputs = function->inputs(); @@ -122,13 +118,13 @@ class NmsLayerGPUTest : public testing::WithParamInterface, InputPrecisions inPrecisions; ThresholdValues thrValues; float iouThr, scoreThr, softNmsSigma; - op::v9::NonMaxSuppression::BoxEncodingType boxEncoding; + ov::op::v9::NonMaxSuppression::BoxEncodingType boxEncoding; bool sortResDescend; - element::Type outType; + ov::element::Type outType; std::map additionalConfig; std::tie(inShapeParams, inPrecisions, maxOutBoxesPerClass, thrValues, boxEncoding, sortResDescend, outType, targetDevice, additionalConfig) = this->GetParam(); - element::Type paramsPrec, maxBoxPrec, thrPrec; + ov::element::Type paramsPrec, maxBoxPrec, thrPrec; std::tie(paramsPrec, maxBoxPrec, thrPrec) = inPrecisions; std::tie(iouThr, scoreThr, softNmsSigma) = thrValues; @@ -137,18 +133,18 @@ class NmsLayerGPUTest : public testing::WithParamInterface, std::tie(bounds, targetInDims) = inShapeParams; if (!bounds.empty()) { - inputDynamicShapes = std::vector{{bounds[BATCHES], bounds[BOXES], 4}, {bounds[BATCHES], bounds[CLASSES], bounds[BOXES]}}; + inputDynamicShapes = std::vector{{bounds[BATCHES], bounds[BOXES], 4}, {bounds[BATCHES], bounds[CLASSES], bounds[BOXES]}}; } else { size_t batches, boxes, classes; std::tie(batches, boxes, classes) = targetInDims.front(); ov::Dimension numBatches(batches), numBoxes(boxes), numClasses(classes); - inputDynamicShapes = std::vector{{numBatches, numBoxes, 4}, {numBatches, numClasses, numBoxes}}; + inputDynamicShapes = std::vector{{numBatches, numBoxes, 4}, {numBatches, numClasses, numBoxes}}; } for (const auto &ts : targetInDims) { size_t numBatches, numBoxes, numClasses; std::tie(numBatches, numBoxes, numClasses) = ts; - targetStaticShapes.push_back(std::vector{{numBatches, numBoxes, 4}, {numBatches, numClasses, numBoxes}}); + targetStaticShapes.push_back(std::vector{{numBatches, numBoxes, 4}, {numBatches, numClasses, numBoxes}}); } ov::ParameterVector params; @@ -158,17 +154,18 @@ class NmsLayerGPUTest : public testing::WithParamInterface, params[0]->set_friendly_name("param_1"); params[1]->set_friendly_name("param_2"); - auto maxOutBoxesPerClassNode = builder::makeConstant(maxBoxPrec, ngraph::Shape{}, std::vector{maxOutBoxesPerClass})->output(0); - auto iouThrNode = builder::makeConstant(thrPrec, ngraph::Shape{}, std::vector{iouThr})->output(0); - auto scoreThrNode = builder::makeConstant(thrPrec, ngraph::Shape{}, std::vector{scoreThr})->output(0); - auto softNmsSigmaNode = builder::makeConstant(thrPrec, ngraph::Shape{}, std::vector{softNmsSigma})->output(0); - auto nms = std::make_shared(params[0], params[1], maxOutBoxesPerClassNode, iouThrNode, scoreThrNode, - softNmsSigmaNode, boxEncoding, sortResDescend, outType); - ngraph::ResultVector results; + auto maxOutBoxesPerClassNode = std::make_shared(maxBoxPrec, ov::Shape{}, std::vector{maxOutBoxesPerClass}); + auto iouThrNode = std::make_shared(thrPrec, ov::Shape{}, std::vector{iouThr}); + auto scoreThrNode = std::make_shared(thrPrec, ov::Shape{}, std::vector{scoreThr}); + auto softNmsSigmaNode = std::make_shared(thrPrec, ov::Shape{}, std::vector{softNmsSigma}); + + auto nms = std::make_shared(params[0], params[1], maxOutBoxesPerClassNode, iouThrNode, scoreThrNode, + softNmsSigmaNode, boxEncoding, sortResDescend, outType); + ov::ResultVector results; for (size_t i = 0; i < nms->get_output_size(); i++) { - results.push_back(std::make_shared(nms->output(i))); + results.push_back(std::make_shared(nms->output(i))); } - function = std::make_shared(results, params, "Nms"); + function = std::make_shared(results, params, "Nms"); } private: @@ -397,14 +394,10 @@ class NmsLayerGPUTest : public testing::WithParamInterface, int32_t maxOutBoxesPerClass; }; -TEST_P(NmsLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(NmsLayerGPUTest, Inference) { run(); } -namespace { - std::map emptyAdditionalConfig; const std::vector inShapeParams = { @@ -419,18 +412,20 @@ const std::vector inShapeParams = { const std::vector maxOutBoxPerClass = {5, 20}; const std::vector threshold = {0.3f, 0.7f}; const std::vector sigmaThreshold = {0.0f, 0.5f}; -const std::vector encodType = {op::v9::NonMaxSuppression::BoxEncodingType::CENTER, - op::v9::NonMaxSuppression::BoxEncodingType::CORNER}; +const std::vector encodType = + {ov::op::v9::NonMaxSuppression::BoxEncodingType::CENTER, + ov::op::v9::NonMaxSuppression::BoxEncodingType::CORNER}; + const std::vector sortResDesc = {true, false}; -const std::vector outType = {element::i32}; +const std::vector outType = {ov::element::i32}; INSTANTIATE_TEST_SUITE_P(smoke_Nms_dynamic, NmsLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(inShapeParams), ::testing::Combine( - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i32), - ::testing::Values(ElementType::f32)), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::i32), + ::testing::Values(ov::element::f32)), ::testing::ValuesIn(maxOutBoxPerClass), ::testing::Combine( ::testing::ValuesIn(threshold), @@ -444,4 +439,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_Nms_dynamic, NmsLayerGPUTest, NmsLayerGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/normalize_l2.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/normalize_l2.cpp index a7e5f9a8dad781..14dcb81dd04c3f 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/normalize_l2.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/normalize_l2.cpp @@ -2,30 +2,32 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "ov_models/builders.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/normalize_l2.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; using NormalizeL2LayerGPUTestParams = std::tuple< InputShape, // Input shapes - ElementType, // Input precision + ov::element::Type, // Input precision std::vector, // Reduction axes - ngraph::op::EpsMode, // EpsMode + ov::op::EpsMode, // EpsMode float>; // Epsilon class NormalizeL2LayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { InputShape inputShapes; - ElementType netPrecision; + ov::element::Type netPrecision; std::vector axes; - ngraph::op::EpsMode epsMode; + ov::op::EpsMode epsMode; float eps; std::tie(inputShapes, netPrecision, axes, epsMode, eps) = obj.param; @@ -47,9 +49,9 @@ class NormalizeL2LayerGPUTest : public testing::WithParamInterface axes; - ngraph::op::EpsMode epsMode; + ov::op::EpsMode epsMode; float eps; std::tie(inputShapes, netPrecision, axes, epsMode, eps) = this->GetParam(); @@ -62,18 +64,15 @@ class NormalizeL2LayerGPUTest : public testing::WithParamInterface(ov::element::i64, ov::Shape{axes.size()}, axes); auto normalize = std::make_shared(params[0], normAxes, eps, epsMode); - ngraph::ResultVector results{std::make_shared(normalize)}; - function = std::make_shared(results, params, "NormalizeL2"); + ov::ResultVector results{std::make_shared(normalize)}; + function = std::make_shared(results, params, "NormalizeL2"); } }; -TEST_P(NormalizeL2LayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(NormalizeL2LayerGPUTest, Inference) { run(); } -namespace { - const std::vector inputShapes_1D = { { // dynamic @@ -163,8 +162,8 @@ const std::vector inputShapes_5D = { } }; -const std::vector epsMode = { - ngraph::op::EpsMode::ADD, ngraph::op::EpsMode::MAX +const std::vector epsMode = { + ov::op::EpsMode::ADD, ov::op::EpsMode::MAX }; const std::vector epsilon = { @@ -179,7 +178,7 @@ const std::vector reduction_axes_12 = {1, 2}; const std::vector reduction_axes_3 = {3}; const std::vector reduction_axes_2 = {2}; -std::vector nrtPrecision = {ElementType::f16, ElementType::f32}; +std::vector nrtPrecision = {ov::element::f16, ov::element::f32}; const auto NormalizeL2_3D = ::testing::Combine( ::testing::ValuesIn(inputShapes_3D), @@ -209,4 +208,3 @@ const auto NormalizeL2_5D = ::testing::Combine( INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_NormalizeL2_5D, NormalizeL2LayerGPUTest, NormalizeL2_5D, NormalizeL2LayerGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pad.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pad.cpp index 4a30f042df0226..c18f3bfb8fa347 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pad.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pad.cpp @@ -2,41 +2,38 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/pad.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_enums.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" -#include -#include -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov; -using namespace test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/pad.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; using PadLayerGPUTestParamSet = std::tuple< InputShape, // Input shape - ElementType, // Input element type + ov::element::Type, // Input element type std::vector, // padsBegin std::vector, // padsEnd float, // argPadValue - std::vector, // for {begin, end, padValue} - ov::op::PadMode // padMode ->; + std::vector, // for {begin, end, padValue} + ov::op::PadMode>; // padMode class PadLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { InputShape shapes; - ElementType elementType; + ov::element::Type model_type; std::vector padsBegin, padsEnd; ov::op::PadMode padMode; float argPadValue; - std::vector inputLayerTypes; - std::tie(shapes, elementType, padsBegin, padsEnd, argPadValue, inputLayerTypes, padMode) = obj.param; + std::vector inputLayerTypes; + std::tie(shapes, model_type, padsBegin, padsEnd, argPadValue, inputLayerTypes, padMode) = obj.param; std::ostringstream results; results << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_"; @@ -44,7 +41,7 @@ class PadLayerGPUTest : public testing::WithParamInterface inputLayerTypes; + std::vector inputLayerTypes; std::tie(shapes, inType, padsBegin, padsEnd, argPadValue, inputLayerTypes, padMode) = this->GetParam(); targetDevice = ov::test::utils::DEVICE_GPU; std::vector inputShapes; inputShapes.push_back(shapes); - if (inputLayerTypes[0] == helpers::InputLayerType::PARAMETER) { + if (inputLayerTypes[0] == ov::test::utils::InputLayerType::PARAMETER) { inputShapes.push_back(InputShape({static_cast(padsBegin.size())}, std::vector(shapes.second.size(), {padsBegin.size()}))); } - if (inputLayerTypes[1] == helpers::InputLayerType::PARAMETER) { + if (inputLayerTypes[1] == ov::test::utils::InputLayerType::PARAMETER) { inputShapes.push_back(InputShape({static_cast(padsEnd.size())}, std::vector(shapes.second.size(), {padsEnd.size()}))); } init_input_shapes(inputShapes); // Add empty shape for parameter input of scalar 'pad_value' - if (inputLayerTypes[2] == helpers::InputLayerType::PARAMETER) { + if (inputLayerTypes[2] == ov::test::utils::InputLayerType::PARAMETER) { inputDynamicShapes.push_back(ov::PartialShape({})); for (size_t i = 0; i < shapes.second.size(); ++i) { for (size_t k = 0; k < targetStaticShapes.size(); ++k) { @@ -95,43 +92,43 @@ class PadLayerGPUTest : public testing::WithParamInterface pads_begin, pads_end, arg_pad_value; // padsBegin - if (inputLayerTypes[0] == helpers::InputLayerType::PARAMETER) { - functionParams.push_back(std::make_shared(ngraph::element::i64, ov::Shape{padsBegin.size()})); + if (inputLayerTypes[0] == ov::test::utils::InputLayerType::PARAMETER) { + functionParams.push_back(std::make_shared(ov::element::i64, ov::Shape{padsBegin.size()})); functionParams.back()->set_friendly_name("padsBegin"); pads_begin = functionParams.back(); } else { - pads_begin = std::make_shared(ngraph::element::i64, ngraph::Shape{padsBegin.size()}, padsBegin.data()); + pads_begin = std::make_shared(ov::element::i64, ov::Shape{padsBegin.size()}, padsBegin.data()); } // padsEnd - if (inputLayerTypes[1] == helpers::InputLayerType::PARAMETER) { - functionParams.push_back(std::make_shared(ngraph::element::i64, ov::Shape{padsEnd.size()})); + if (inputLayerTypes[1] == ov::test::utils::InputLayerType::PARAMETER) { + functionParams.push_back(std::make_shared(ov::element::i64, ov::Shape{padsEnd.size()})); functionParams.back()->set_friendly_name("padsEnd"); pads_end = functionParams.back(); } else { - pads_end = std::make_shared(ngraph::element::i64, ngraph::Shape{padsEnd.size()}, padsEnd.data()); + pads_end = std::make_shared(ov::element::i64, ov::Shape{padsEnd.size()}, padsEnd.data()); } // argPadValue - if (inputLayerTypes[2] == helpers::InputLayerType::PARAMETER) { - functionParams.push_back(std::make_shared(inType, ov::PartialShape({}))); + if (inputLayerTypes[2] == ov::test::utils::InputLayerType::PARAMETER) { + functionParams.push_back(std::make_shared(inType, ov::PartialShape({}))); functionParams.back()->set_friendly_name("padValue"); arg_pad_value = functionParams.back(); } else { - arg_pad_value = std::make_shared(inType, ngraph::Shape{}, &argPadValue); + arg_pad_value = std::make_shared(inType, ov::Shape{}, &argPadValue); } - auto pad = std::make_shared(functionParams[0], pads_begin, pads_end, arg_pad_value, padMode); + auto pad = std::make_shared(functionParams[0], pads_begin, pads_end, arg_pad_value, padMode); - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < pad->get_output_size(); ++i) { - results.push_back(std::make_shared(pad->output(i))); + results.push_back(std::make_shared(pad->output(i))); } - function = std::make_shared(results, functionParams, "PadLayerGPUTest"); + function = std::make_shared(results, functionParams, "PadLayerGPUTest"); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0lu; i < funcInputs.size(); i++) { @@ -155,8 +152,11 @@ class PadLayerGPUTest : public testing::WithParamInterface inputPrecisions = { - ElementType::f32 +const std::vector inputPrecisions = { + ov::element::f32 }; const std::vector argPadValue = {0.f, -1.f}; @@ -185,11 +182,11 @@ const std::vector padMode = { ov::op::PadMode::SYMMETRIC }; -const std::vector> isConstantInput = { - {helpers::InputLayerType::CONSTANT, helpers::InputLayerType::CONSTANT, helpers::InputLayerType::CONSTANT}, - {helpers::InputLayerType::CONSTANT, helpers::InputLayerType::PARAMETER, helpers::InputLayerType::CONSTANT}, - {helpers::InputLayerType::CONSTANT, helpers::InputLayerType::PARAMETER, helpers::InputLayerType::PARAMETER}, - {helpers::InputLayerType::PARAMETER, helpers::InputLayerType::PARAMETER, helpers::InputLayerType::PARAMETER} +const std::vector> isConstantInput = { + {ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT}, + {ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::CONSTANT}, + {ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER}, + {ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER} }; //====================== Dynamic Shapes Tests 2D ====================== @@ -210,7 +207,7 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(padsEnd2D_Smoke), ::testing::ValuesIn(argPadValue), ::testing::ValuesIn(isConstantInput), - ::testing::Values(ngraph::helpers::PadMode::CONSTANT)), + ::testing::Values(ov::op::PadMode::CONSTANT)), PadLayerGPUTest::getTestCaseName ); @@ -246,7 +243,7 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(padsEnd4D_Smoke), ::testing::ValuesIn(argPadValue), ::testing::ValuesIn(isConstantInput), - ::testing::Values(ngraph::helpers::PadMode::CONSTANT)), + ::testing::Values(ov::op::PadMode::CONSTANT)), PadLayerGPUTest::getTestCaseName ); @@ -282,7 +279,7 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(padsEnd5D_Smoke), ::testing::ValuesIn(argPadValue), ::testing::ValuesIn(isConstantInput), - ::testing::Values(ngraph::helpers::PadMode::CONSTANT)), + ::testing::Values(ov::op::PadMode::CONSTANT)), PadLayerGPUTest::getTestCaseName ); @@ -299,6 +296,4 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(padMode)), PadLayerGPUTest::getTestCaseName ); - } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pooling.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pooling.cpp index 7b5c7f9ae62330..41db2ac8b28655 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pooling.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/pooling.cpp @@ -2,33 +2,36 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ov_models/builders.hpp" - -#include "shared_test_classes/single_layer/pooling.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" +#include "shared_test_classes/single_op/pooling.hpp" -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; -using poolLayerGpuTestParamsSet = std::tuple; +using poolLayerGpuTestParamsSet = + std::tuple; class PoolingLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { - LayerTestsDefinitions::poolSpecificParams basicParamsSet; + ov::test::poolSpecificParams basicParamsSet; InputShape inputShapes; - ElementType inPrc; + ov::element::Type inPrc; std::tie(basicParamsSet, inputShapes, inPrc) = obj.param; - ngraph::helpers::PoolingTypes poolType; + ov::test::utils::PoolingTypes poolType; std::vector kernel, stride; std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; + ov::op::PadType padType; + ov::op::RoundingType roundingType; bool excludePad; std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = basicParamsSet; @@ -41,10 +44,10 @@ class PoolingLayerGPUTest : public testing::WithParamInterfaceGetParam(); - ngraph::helpers::PoolingTypes poolType; + ov::test::utils::PoolingTypes poolType; std::vector kernel, stride; std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; + ov::op::PadType padType; + ov::op::RoundingType roundingType; bool excludePad; std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = basicParamsSet; @@ -82,7 +85,7 @@ class PoolingLayerGPUTest : public testing::WithParamInterface(inPrc, shape)); } - std::shared_ptr poolInput = params[0]; + std::shared_ptr poolInput = params[0]; std::shared_ptr pooling; if (ov::test::utils::PoolingTypes::MAX == poolType) { @@ -91,27 +94,23 @@ class PoolingLayerGPUTest : public testing::WithParamInterface(poolInput, stride, padBegin, padEnd, kernel, excludePad, roundingType, padType); } - auto makeFunction = [](const ngraph::element::Type &ngPrc, ngraph::ParameterVector ¶ms, const std::shared_ptr &lastNode) { - ngraph::ResultVector results; + auto makeFunction = [](const ov::element::Type &ngPrc, ov::ParameterVector ¶ms, const std::shared_ptr &lastNode) { + ov::ResultVector results; for (size_t i = 0; i < lastNode->get_output_size(); i++) - results.push_back(std::make_shared(lastNode->output(i))); + results.push_back(std::make_shared(lastNode->output(i))); - return std::make_shared(results, params, "PoolingGPU"); + return std::make_shared(results, params, "PoolingGPU"); }; function = makeFunction(inPrc, params, pooling); } }; -TEST_P(PoolingLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(PoolingLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector inpOutPrecision = { ElementType::f32 }; +const std::vector inpOutPrecision = { ov::element::f32 }; const std::vector inputShapes3D = { { {}, {{3, 4, 64}} }, @@ -220,22 +219,22 @@ const std::vector inputShapes5D = { }; /* ============= Pooling (1D) ============= */ -const std::vector paramsMax3D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2}, {2}, {0}, {0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4}, {2}, {0}, {0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2}, {1}, {0}, {0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, +const std::vector paramsMax3D = { + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2}, {2}, {0}, {0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {4}, {2}, {0}, {0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2}, {1}, {0}, {0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false }, }; -const std::vector paramsAvg3D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3}, {1}, {1}, {0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3}, {1}, {1}, {0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4}, {4}, {2}, {2}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true }, +const std::vector paramsAvg3D = { + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {3}, {1}, {1}, {0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {3}, {1}, {1}, {0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, true }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {4}, {4}, {2}, {2}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, true }, }; INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_GPU_3D, PoolingLayerGPUTest, @@ -253,30 +252,30 @@ INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_GPU_3D, PoolingLayerGPUTest, PoolingLayerGPUTest::getTestCaseName); /* ============= Pooling (2D) ============= */ -const std::vector paramsMax4D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4, 2}, {2, 2}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {4, 2}, {2, 1}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, +const std::vector paramsMax4D = { + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {4, 2}, {2, 2}, {0, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {4, 2}, {2, 1}, {0, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false }, }; -const std::vector paramsAvg4D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2}, {2, 2}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4}, {4, 4}, {2, 2}, {2, 2}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true } +const std::vector paramsAvg4D = { + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER, true }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, true }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2}, {2, 2}, {1, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2}, {2, 2}, {0, 0}, {0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, true }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {4, 4}, {4, 4}, {2, 2}, {2, 2}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, true } }; INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_GPU_4D, PoolingLayerGPUTest, @@ -293,9 +292,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_GPU_4D, PoolingLayerGPUTest, ::testing::ValuesIn(inpOutPrecision)), PoolingLayerGPUTest::getTestCaseName); -const std::vector paramsAvg4D_Large = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {65, 65}, {65, 65}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::FLOOR, ngraph::op::PadType::VALID, true }, +const std::vector paramsAvg4D_Large = { + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {65, 65}, {65, 65}, {0, 0}, {0, 0}, + ov::op::RoundingType::FLOOR, ov::op::PadType::VALID, true }, }; INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_GPU_Large, PoolingLayerGPUTest, @@ -306,32 +305,32 @@ INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_GPU_Large, PoolingLayerGPUTest, PoolingLayerGPUTest::getTestCaseName); /* ============= Pooling (3D) ============= */ -const std::vector paramsMax5D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, {3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, false }, +const std::vector paramsMax5D = { + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, false }, }; -const std::vector paramsAvg5D = { - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_LOWER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::SAME_UPPER, false }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {3, 3, 3}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true }, - LayerTestsDefinitions::poolSpecificParams{ ngraph::helpers::PoolingTypes::AVG, {4, 4, 4}, {2, 2, 2}, {2, 2, 2}, {2, 2, 2}, - ngraph::op::RoundingType::CEIL, ngraph::op::PadType::EXPLICIT, true } +const std::vector paramsAvg5D = { + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER, true }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, true }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_LOWER, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {1, 0, 0}, {0, 0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::SAME_UPPER, false }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {2, 2, 2}, {2, 2, 2}, {0, 0, 0}, {0, 0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, true }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {3, 3, 3}, {3, 3, 3}, {1, 1, 1}, {0, 0, 0}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, true }, + ov::test::poolSpecificParams{ ov::test::utils::PoolingTypes::AVG, {4, 4, 4}, {2, 2, 2}, {2, 2, 2}, {2, 2, 2}, + ov::op::RoundingType::CEIL, ov::op::PadType::EXPLICIT, true } }; INSTANTIATE_TEST_SUITE_P(smoke_MaxPool_GPU_5D, PoolingLayerGPUTest, @@ -348,5 +347,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_AvgPool_GPU_5D, PoolingLayerGPUTest, ::testing::ValuesIn(inpOutPrecision)), PoolingLayerGPUTest::getTestCaseName); } // namespace - -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/prior_box.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/prior_box.cpp index 49393f39d71404..f8e73f9f058001 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/prior_box.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/prior_box.cpp @@ -2,23 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/shape_of.hpp" -#include "shared_test_classes/single_layer/strided_slice.hpp" -#include "shared_test_classes/single_layer/prior_box.hpp" -#include "shared_test_classes/single_layer/prior_box_clustered.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" -#include -#include +#include "openvino/core/type/element_type_traits.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/random_uniform.hpp" -using ElementType = ov::element::Type_t; +namespace { +using ov::test::InputShape; -namespace GPULayerTestsDefinitions { enum class priorbox_type { V0, V8, @@ -27,20 +22,21 @@ enum class priorbox_type { typedef std::tuple< InputShape, InputShape, - ElementType, // Net precision + ov::element::Type, std::vector, priorbox_type > PriorBoxLayerGPUTestParamsSet; + class PriorBoxLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { InputShape input1Shape; InputShape input2Shape; - ElementType netPrecision; + ov::element::Type model_type; std::vector max_size; priorbox_type priorboxType; - std::tie(input1Shape, input2Shape, netPrecision, max_size, priorboxType) = obj.param; + std::tie(input1Shape, input2Shape, model_type, max_size, priorboxType) = obj.param; std::ostringstream result; switch (priorboxType) { @@ -55,7 +51,7 @@ class PriorBoxLayerGPUTest : public testing::WithParamInterface max_size; priorbox_type priorboxType; - std::tie(input1Shape, input2Shape, netPrecision, max_size, priorboxType) = this->GetParam(); + std::tie(input1Shape, input2Shape, model_type, max_size, priorboxType) = this->GetParam(); init_input_shapes({input1Shape, input2Shape}); - inType = ov::element::Type(netPrecision); - outType = ElementType::f32; + inType = ov::element::Type(model_type); + outType = ov::element::f32; - auto beginInput = ov::op::v0::Constant::create(ngraph::element::i32, ngraph::Shape{1}, {2}); - auto endInput = ov::op::v0::Constant::create(ngraph::element::i32, ngraph::Shape{1}, {4}); - auto strideInput = ov::op::v0::Constant::create(ngraph::element::i32, ngraph::Shape{1}, {1}); + auto beginInput = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{1}, {2}); + auto endInput = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{1}, {4}); + auto strideInput = ov::op::v0::Constant::create(ov::element::i32, ov::Shape{1}, {1}); ov::ParameterVector functionParams; for (auto&& shape : inputDynamicShapes) functionParams.push_back(std::make_shared(inType, shape)); - auto shapeOfOp1 = std::make_shared(functionParams[0], element::i32); - auto shapeOfOp2 = std::make_shared(functionParams[1], element::i32); + auto shapeOfOp1 = std::make_shared(functionParams[0], ov::element::i32); + auto shapeOfOp2 = std::make_shared(functionParams[1], ov::element::i32); auto stridedSliceOp1 = std::make_shared(shapeOfOp1, beginInput, @@ -123,7 +119,7 @@ class PriorBoxLayerGPUTest : public testing::WithParamInterface(stridedSliceOp1, stridedSliceOp2, attributes_clustered); + auto priorBoxOp = std::make_shared(stridedSliceOp1, stridedSliceOp2, attributes_clustered); - ngraph::ResultVector results{std::make_shared(priorBoxOp)}; - function = std::make_shared (results, functionParams, "PriorBoxV0Function"); + ov::ResultVector results{std::make_shared(priorBoxOp)}; + function = std::make_shared (results, functionParams, "PriorBoxV0Function"); break; } case priorbox_type::V0: { - ngraph::op::v0::PriorBox::Attributes attributes_v0; + ov::op::v0::PriorBox::Attributes attributes_v0; attributes_v0.min_size = {64}; attributes_v0.max_size = max_size; @@ -153,15 +149,15 @@ class PriorBoxLayerGPUTest : public testing::WithParamInterface(stridedSliceOp1, stridedSliceOp2, attributes_v0); + auto priorBoxOp = std::make_shared(stridedSliceOp1, stridedSliceOp2, attributes_v0); - ngraph::ResultVector results{std::make_shared(priorBoxOp)}; - function = std::make_shared (results, functionParams, "PriorBoxV0Function"); + ov::ResultVector results{std::make_shared(priorBoxOp)}; + function = std::make_shared (results, functionParams, "PriorBoxV0Function"); break; } case priorbox_type::V8: default: { - ngraph::op::v8::PriorBox::Attributes attributes_v8; + ov::op::v8::PriorBox::Attributes attributes_v8; attributes_v8.min_size = {64}; attributes_v8.max_size = max_size; @@ -174,25 +170,21 @@ class PriorBoxLayerGPUTest : public testing::WithParamInterface(stridedSliceOp1, stridedSliceOp2, attributes_v8); + auto priorBoxOp = std::make_shared(stridedSliceOp1, stridedSliceOp2, attributes_v8); - ngraph::ResultVector results{std::make_shared(priorBoxOp)}; - function = std::make_shared (results, functionParams, "PriorBoxV8Function"); + ov::ResultVector results{std::make_shared(priorBoxOp)}; + function = std::make_shared (results, functionParams, "PriorBoxV8Function"); } } } }; -TEST_P(PriorBoxLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(PriorBoxLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector netPrecisions = { - ElementType::f32, +const std::vector model_types = { + ov::element::f32, }; const std::vector mode = { @@ -230,10 +222,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_prior_box_full_dynamic, ::testing::Combine( ::testing::ValuesIn(inShapesDynamic), ::testing::ValuesIn(imgShapesDynamic), - ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(model_types), ::testing::ValuesIn(max_size), ::testing::ValuesIn(mode)), PriorBoxLayerGPUTest::getTestCaseName); -} // namespace - -} // namespace GPULayerTestsDefinitions +} // namespace \ No newline at end of file diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/random_uniform.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/random_uniform.cpp index 755371e1b0a548..d3f324a72ff117 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/random_uniform.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/random_uniform.cpp @@ -2,26 +2,28 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ov_models/builders.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" +#include "openvino/core/type/element_type_traits.hpp" -using namespace ngraph; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/random_uniform.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< std::vector, // Input shapes std::pair, // Min value, Max value std::pair, // Global seed, operation seed - ElementType, // Network precision - TargetDevice, // Device name - std::map // Additional network configuration + ov::element::Type, // Network precision + std::string // Device name > RandomUnifromDynamicGPUTestParamsSet; class RandomUnifromDynamicGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { RandomUnifromDynamicGPUTestParamsSet basicParamsSet = obj.param; @@ -29,10 +31,9 @@ class RandomUnifromDynamicGPUTest : public testing::WithParamInterface input_shapes; std::pair min_max_values; std::pair seeds; - ElementType precision; - TargetDevice target_device; - std::map additionalConfig; - std::tie(input_shapes, min_max_values, seeds, precision, target_device, additionalConfig) = basicParamsSet; + ov::element::Type precision; + std::string target_device; + std::tie(input_shapes, min_max_values, seeds, precision, target_device) = basicParamsSet; result << "shape="; for (const auto& shape : input_shapes) { @@ -75,37 +76,37 @@ class RandomUnifromDynamicGPUTest : public testing::WithParamInterface::value_type>(); \ - dataPtr[0] = static_cast::value_type>(scalar); \ + auto *dataPtr = tensor.data::value_type>(); \ + dataPtr[0] = static_cast::value_type>(scalar); \ break; \ } switch (tensor.get_element_type()) { - CASE(ElementType::boolean) - CASE(ElementType::i8) - CASE(ElementType::i16) - CASE(ElementType::i32) - CASE(ElementType::i64) - CASE(ElementType::u8) - CASE(ElementType::u16) - CASE(ElementType::u32) - CASE(ElementType::u64) - CASE(ElementType::bf16) - CASE(ElementType::f16) - CASE(ElementType::f32) - CASE(ElementType::f64) - CASE(ElementType::u1) - CASE(ElementType::i4) - CASE(ElementType::u4) + CASE(ov::element::boolean) + CASE(ov::element::i8) + CASE(ov::element::i16) + CASE(ov::element::i32) + CASE(ov::element::i64) + CASE(ov::element::u8) + CASE(ov::element::u16) + CASE(ov::element::u32) + CASE(ov::element::u64) + CASE(ov::element::bf16) + CASE(ov::element::f16) + CASE(ov::element::f32) + CASE(ov::element::f64) + CASE(ov::element::u1) + CASE(ov::element::i4) + CASE(ov::element::u4) default: OPENVINO_THROW("Unsupported element type: ", tensor.get_element_type()); } } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); - auto generate_input = [&](size_t index, ElementType element_type) { + auto generate_input = [&](size_t index, ov::element::Type element_type) { ov::Tensor tensor(element_type, targetInputStaticShapes[index]); if (index != 0) { auto scalar_val = index == 1 ? min_max_values.first : min_max_values.second; @@ -121,11 +122,9 @@ class RandomUnifromDynamicGPUTest : public testing::WithParamInterfaceGetParam(); std::vector shapes; - ElementType netType; - std::map additionalConfig; + ov::element::Type netType; std::pair seeds; - - std::tie(shapes, min_max_values, seeds, netType, targetDevice, additionalConfig) = basicParamsSet; + std::tie(shapes, min_max_values, seeds, netType, targetDevice) = basicParamsSet; init_input_shapes(shapes); @@ -144,14 +143,10 @@ class RandomUnifromDynamicGPUTest : public testing::WithParamInterface min_max_values; }; - -TEST_P(RandomUnifromDynamicGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(RandomUnifromDynamicGPUTest, Inference) { run(); } -namespace { -std::map emptyAdditionalConfig; const std::vector> dynInputShapes = { { {{ov::PartialShape::dynamic(4)}, {{1, 2, 3, 4}, {1, 1, 5, 5}, {2, 3, 4, 5}}}, @@ -183,21 +178,18 @@ const std::vector> seeds = { {100, 10}, }; -const std::vector netPrecisions = { - ElementType::i32, - ElementType::f32, - ElementType::f16, +const std::vector netPrecisions = { + ov::element::i32, + ov::element::f32, + ov::element::f16, }; const auto testParams_smoke = ::testing::Combine(::testing::ValuesIn(dynInputShapes), ::testing::ValuesIn(min_max_values), ::testing::ValuesIn(seeds), ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(emptyAdditionalConfig)); + ::testing::Values(ov::test::utils::DEVICE_GPU)); INSTANTIATE_TEST_SUITE_P(smoke_dynamic_random_uniform, RandomUnifromDynamicGPUTest, testParams_smoke, RandomUnifromDynamicGPUTest::getTestCaseName); - } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/range.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/range.cpp index f171bfebb4814a..6db87fb65afa91 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/range.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/range.cpp @@ -2,35 +2,36 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ov_models/builders.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" +#include "openvino/core/type/element_type_traits.hpp" -using namespace ngraph; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/range.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< std::vector, // input shapes std::vector, // input values - ElementType, // Network precision - TargetDevice, // Device name - std::map // Additional network configuration + ov::element::Type, // Model type + std::string // Device name > RangeDynamicGPUTestParamsSet; class RangeDynamicGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { RangeDynamicGPUTestParamsSet basicParamsSet = obj.param; std::ostringstream result; std::vector inputShapes; std::vector inputValues; - ElementType netType; - TargetDevice targetDevice; - std::map additionalConfig; - std::tie(inputShapes, inputValues, netType, targetDevice, additionalConfig) = basicParamsSet; + ov::element::Type model_type; + std::string targetDevice; + std::tie(inputShapes, inputValues, model_type, targetDevice) = basicParamsSet; result << "IS="; for (const auto& shape : inputShapes) { @@ -43,7 +44,7 @@ class RangeDynamicGPUTest : public testing::WithParamInterface& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); - auto generate_input = [&](size_t index, ElementType element_type) { + auto generate_input = [&](size_t index, ov::element::Type element_type) { ov::Tensor tensor(element_type, targetInputStaticShapes[index]); add_scalar_to_tensor(input_values[index], tensor); inputs.insert({funcInputs[index].get_node_shared_ptr(), tensor}); }; // net_type=undifined means mixed type test - if (net_type == ElementType::undefined) { - generate_input(0, ElementType::f32); - generate_input(1, ElementType::i32); - generate_input(2, ElementType::f32); + if (net_type == ov::element::undefined) { + generate_input(0, ov::element::f32); + generate_input(1, ov::element::i32); + generate_input(2, ov::element::f32); } else { for (size_t i = 0; i < funcInputs.size(); ++i) { generate_input(i, funcInputs[i].get_element_type()); @@ -127,47 +128,43 @@ class RangeDynamicGPUTest : public testing::WithParamInterfaceGetParam(); std::vector inputShapes; std::vector inputValues; - ElementType netType; - std::map additionalConfig; + ov::element::Type model_type; ov::ParameterVector params; - std::tie(inputShapes, inputValues, netType, targetDevice, additionalConfig) = basicParamsSet; + std::tie(inputShapes, inputValues, model_type, targetDevice) = basicParamsSet; input_values = inputValues; - net_type = netType; + net_type = model_type; init_input_shapes(inputShapes); - if (netType == ElementType::undefined) { - std::vector types = { ElementType::f32, ElementType::i32, ElementType::f32 }; + if (model_type == ov::element::undefined) { + std::vector types = { ov::element::f32, ov::element::i32, ov::element::f32 }; for (size_t i = 0; i < types.size(); i++) { auto paramNode = std::make_shared(types[i], inputDynamicShapes[i]); params.push_back(paramNode); } - netType = ElementType::f32; + model_type = ov::element::f32; } else { for (auto&& shape : inputDynamicShapes) { - params.push_back(std::make_shared(netType, shape)); + params.push_back(std::make_shared(model_type, shape)); } } - const auto range = std::make_shared(params[0], params[1], params[2], netType); + const auto range = std::make_shared(params[0], params[1], params[2], model_type); - ngraph::ResultVector results = {std::make_shared(range)}; - function = std::make_shared(results, params, "shapeof_out"); + ov::ResultVector results = {std::make_shared(range)}; + function = std::make_shared(results, params, "shapeof_out"); } private: std::vector input_values; - ElementType net_type; + ov::element::Type net_type; }; -TEST_P(RangeDynamicGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(RangeDynamicGPUTest, Inference) { run(); } -namespace { -std::map emptyAdditionalConfig; const std::vector> dynInputShapes = { { // Inputs for Range @@ -187,17 +184,16 @@ const std::vector> inputValues = { } }; -const std::vector netPrecisions = { - ElementType::i8, - ElementType::i32, - ElementType::i64, +const std::vector netPrecisions = { + ov::element::i8, + ov::element::i32, + ov::element::i64, }; const auto testParams_smoke = ::testing::Combine(::testing::ValuesIn(dynInputShapes), ::testing::ValuesIn(inputValues), ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(emptyAdditionalConfig)); + ::testing::Values(ov::test::utils::DEVICE_GPU)); INSTANTIATE_TEST_SUITE_P(smoke_dynamic_range_01, RangeDynamicGPUTest, testParams_smoke, RangeDynamicGPUTest::getTestCaseName); @@ -211,16 +207,15 @@ const std::vector> inputFloatValues = { } }; -const std::vector netFloatPrecisions = { - ElementType::f16, - ElementType::f32, +const std::vector netFloatPrecisions = { + ov::element::f16, + ov::element::f32, }; const auto testFloatParams_smoke = ::testing::Combine(::testing::ValuesIn(dynInputShapes), ::testing::ValuesIn(inputFloatValues), ::testing::ValuesIn(netFloatPrecisions), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(emptyAdditionalConfig)); + ::testing::Values(ov::test::utils::DEVICE_GPU)); INSTANTIATE_TEST_SUITE_P(smoke_dynamic_range_02, RangeDynamicGPUTest, testFloatParams_smoke, RangeDynamicGPUTest::getTestCaseName); @@ -233,19 +228,17 @@ const std::vector> inputMixedValues = { } }; -const std::vector netMixedPrecisions = { +const std::vector netMixedPrecisions = { // Mixed type test(start/step:fp32, end:i32) - ElementType::undefined + ov::element::undefined }; const auto testMixedParams_smoke = ::testing::Combine(::testing::ValuesIn(dynInputShapes), ::testing::ValuesIn(inputMixedValues), ::testing::ValuesIn(netMixedPrecisions), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(emptyAdditionalConfig)); + ::testing::Values(ov::test::utils::DEVICE_GPU)); INSTANTIATE_TEST_SUITE_P(smoke_dynamic_diff_types, RangeDynamicGPUTest, testMixedParams_smoke, RangeDynamicGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/reduce.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/reduce.cpp index e4ae7b23381b00..1e19eb0f1d252d 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/reduce.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/reduce.cpp @@ -2,17 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/reduce_ops.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/node_builders/reduce.hpp" +#include "common_test_utils/test_enums.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" -#include -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef struct { std::vector data_shape; @@ -20,22 +20,22 @@ typedef struct { } ReduceInput; typedef std::tuple< - ReduceInput, // input data (data shape, axes shape, axes values) - ElementType, // presion of inputs - helpers::ReductionType, // reduction type - bool, // keepDims - TargetDevice // device name + ReduceInput, // input data (data shape, axes shape, axes values) + ov::element::Type, // presion of inputs + ov::test::utils::ReductionType, // reduction type + bool, // keepDims + std::string // device name > ReduceLayerTestParamSet; class ReduceLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { ReduceInput input_data; - ElementType netType; - helpers::ReductionType reductionType; + ov::element::Type netType; + ov::test::utils::ReductionType reductionType; bool keepDims; - TargetDevice targetDevice; + std::string targetDevice; std::tie(input_data, netType, reductionType, keepDims, targetDevice) = obj.param; std::vector inshapes = input_data.data_shape; @@ -67,8 +67,8 @@ class ReduceLayerGPUTest : public testing::WithParamInterfaceGetParam(); @@ -84,52 +84,43 @@ class ReduceLayerGPUTest : public testing::WithParamInterface shapeAxes; shapeAxes.push_back(axes.size()); - auto reductionAxesNode = std::dynamic_pointer_cast( - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape(shapeAxes), axes)); + auto reductionAxesNode = std::make_shared(ov::element::i64, ov::Shape(shapeAxes), axes); - const auto reduce = ngraph::builder::makeReduce(params[0], reductionAxesNode, keepDims, reductionType); + const auto reduce = ov::test::utils::make_reduce(params[0], reductionAxesNode, keepDims, reductionType); - auto makeFunction = [](ParameterVector ¶ms, const std::shared_ptr &lastNode) { - ResultVector results; + auto makeFunction = [](ov::ParameterVector ¶ms, const std::shared_ptr &lastNode) { + ov::ResultVector results; for (size_t i = 0; i < lastNode->get_output_size(); i++) - results.push_back(std::make_shared(lastNode->output(i))); + results.push_back(std::make_shared(lastNode->output(i))); - return std::make_shared(results, params, "ReduceLayerGPUTest"); + return std::make_shared(results, params, "ReduceLayerGPUTest"); }; function = makeFunction(params, reduce); } }; -TEST_P(ReduceLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(ReduceLayerGPUTest, Inference) { run(); } -namespace { - const std::vector keepDims = { true, false, }; -const std::vector floatPrecisions = { - ElementType::f32, - ElementType::f16, +const std::vector float_types = { + ov::element::f32, + ov::element::f16, }; -const std::vector floatIntPrecisions = { - ElementType::f32, - ElementType::f16, - ElementType::i32, +const std::vector float_int_types = { + ov::element::f32, + ov::element::f16, + ov::element::i32, }; - - -namespace Reduce { - const ReduceInput dyn1d = { { { {-1}, {{4}, {5}} } @@ -176,8 +167,8 @@ const ReduceInput dyn6d = { // ================== Reduction int32/float types (Sum, Min, Max, L1) ================== const auto reduceSum = ::testing::Combine( ::testing::ValuesIn({dyn1d, dyn5d}), - ::testing::ValuesIn(floatIntPrecisions), - ::testing::Values(helpers::ReductionType::Sum), + ::testing::ValuesIn(float_int_types), + ::testing::Values(ov::test::utils::ReductionType::Sum), ::testing::ValuesIn(keepDims), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -185,8 +176,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_sum_compareWithRefs_dynamic, ReduceLayerGP const auto reduceMin = ::testing::Combine( ::testing::ValuesIn({dyn2d, dyn6d}), - ::testing::ValuesIn(floatIntPrecisions), - ::testing::Values(helpers::ReductionType::Min), + ::testing::ValuesIn(float_int_types), + ::testing::Values(ov::test::utils::ReductionType::Min), ::testing::ValuesIn(keepDims), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -194,8 +185,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_min_compareWithRefs_dynamic, ReduceLayerGP const auto reduceMax = ::testing::Combine( ::testing::ValuesIn({dyn3d, dyn5d}), - ::testing::ValuesIn(floatIntPrecisions), - ::testing::Values(helpers::ReductionType::Max), + ::testing::ValuesIn(float_int_types), + ::testing::Values(ov::test::utils::ReductionType::Max), ::testing::ValuesIn(keepDims), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -203,8 +194,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_max_compareWithRefs_dynamic, ReduceLayerGP const auto reduceL1 = ::testing::Combine( ::testing::ValuesIn({dyn4d, dyn6d}), - ::testing::ValuesIn(floatIntPrecisions), - ::testing::Values(helpers::ReductionType::L1), + ::testing::ValuesIn(float_int_types), + ::testing::Values(ov::test::utils::ReductionType::L1), ::testing::ValuesIn(keepDims), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -214,8 +205,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_l1_compareWithRefs_dynamic, ReduceLayerGPU // ================== Reduction float types (Mean, Prod, L2) ================== const auto reduceMean = ::testing::Combine( ::testing::ValuesIn({dyn1d, dyn6d}), - ::testing::ValuesIn(floatPrecisions), - ::testing::Values(helpers::ReductionType::Mean), + ::testing::ValuesIn(float_types), + ::testing::Values(ov::test::utils::ReductionType::Mean), ::testing::ValuesIn(keepDims), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -223,8 +214,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_mean_compareWithRefs_dynamic, ReduceLayerG const auto reduceProd = ::testing::Combine( ::testing::ValuesIn({dyn2d, dyn4d}), - ::testing::ValuesIn({ElementType::f32}), - ::testing::Values(helpers::ReductionType::Prod), + ::testing::ValuesIn({ov::element::f32}), + ::testing::Values(ov::test::utils::ReductionType::Prod), ::testing::ValuesIn(keepDims), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -232,8 +223,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_prod_compareWithRefs_dynamic, ReduceLayerG const auto reduceL2 = ::testing::Combine( ::testing::ValuesIn({dyn4d, dyn5d}), - ::testing::ValuesIn(floatPrecisions), - ::testing::Values(helpers::ReductionType::L2), + ::testing::ValuesIn(float_types), + ::testing::Values(ov::test::utils::ReductionType::L2), ::testing::ValuesIn(keepDims), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -243,8 +234,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_l2_compareWithRefs_dynamic, ReduceLayerGPU // ================== Reduction logical types (LogicalOr, LogicalAnd) ================== const auto reduceLogicalOr = ::testing::Combine( ::testing::ValuesIn({dyn1d, dyn6d}), - ::testing::Values(ElementType::boolean), - ::testing::Values(helpers::ReductionType::LogicalOr), + ::testing::Values(ov::element::boolean), + ::testing::Values(ov::test::utils::ReductionType::LogicalOr), ::testing::ValuesIn(keepDims), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -252,8 +243,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reduce_logicalor_compareWithRefs_dynamic, ReduceL const auto reduceLogicalAnd = ::testing::Combine( ::testing::ValuesIn({dyn3d, dyn5d}), - ::testing::Values(ElementType::boolean), - ::testing::Values(helpers::ReductionType::LogicalAnd), + ::testing::Values(ov::element::boolean), + ::testing::Values(ov::test::utils::ReductionType::LogicalAnd), ::testing::ValuesIn(keepDims), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -347,15 +338,11 @@ const std::vector dynVariousAxisInputs = { const auto reduceMaxWithVariousAxis = ::testing::Combine( ::testing::ValuesIn(dynVariousAxisInputs), - ::testing::Values(ElementType::f32), - ::testing::Values(helpers::ReductionType::Max), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::test::utils::ReductionType::Max), ::testing::ValuesIn(keepDims), ::testing::Values(ov::test::utils::DEVICE_GPU) ); INSTANTIATE_TEST_SUITE_P(smoke_reduce_max_withVariousAxis_compareWithRefs_dynamic, ReduceLayerGPUTest, reduceMaxWithVariousAxis, ReduceLayerGPUTest::getTestCaseName); - - -} // namespace Reduce } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/region_yolo.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/region_yolo.cpp index 9ced7435d6b77e..eda97b5aacbe31 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/region_yolo.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/region_yolo.cpp @@ -2,18 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/region_yolo.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/region_yolo.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; struct regionYoloAttributes { size_t classes; @@ -28,9 +26,7 @@ typedef std::tuple< InputShape, // Input Shape regionYoloAttributes, // Params std::vector, // mask - ov::test::ElementType, // Network input precision - ov::test::ElementType, // Network output precision - std::map, // Additional network configuration + ov::element::Type, // Model type std::string // Device name > RegionYoloGPUTestParam; @@ -38,70 +34,60 @@ class RegionYoloLayerGPUTest : public testing::WithParamInterface obj) { - InputShape inputShape; + InputShape shapes; regionYoloAttributes attributes; std::vector mask; - ov::test::ElementType inpPrecision; - ov::test::ElementType outPrecision; + ov::element::Type model_type; std::string targetName; - std::map additionalConfig; - - std::tie(inputShape, attributes, mask, inpPrecision, outPrecision, additionalConfig, targetName) = obj.param; + std::tie(shapes, attributes, mask, model_type, targetName) = obj.param; std::ostringstream result; - result << "IS=" << inputShape << "_"; + result << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_"; + for (const auto& item : shapes.second) { + result << ov::test::utils::vec2str(item) << "_"; + } result << "classes=" << attributes.classes << "_"; result << "coords=" << attributes.coordinates << "_"; result << "num=" << attributes.num_regions << "_"; result << "doSoftmax=" << attributes.do_softmax << "_"; result << "axis=" << attributes.start_axis << "_"; result << "endAxis=" << attributes.end_axis << "_"; - result << "inpPRC=" << inpPrecision << "_"; - result << "outPRC=" << outPrecision << "_"; + result << "inpPRC=" << model_type << "_"; result << "targetDevice=" << targetName << "_"; return result.str(); } protected: void SetUp() override { - InputShape inputShape; + InputShape shapes; regionYoloAttributes attributes; std::vector mask; - ov::test::ElementType inPrc; - ov::test::ElementType outPrc; - std::map additionalConfig; + ov::element::Type model_type; + std::tie(shapes, attributes, mask, model_type, targetDevice) = this->GetParam(); - std::tie(inputShape, attributes, mask, inPrc, outPrc, additionalConfig, targetDevice) = this->GetParam(); - - init_input_shapes({ inputShape }); + init_input_shapes({ shapes }); ov::ParameterVector paramRegionYolo; for (auto&& shape : inputDynamicShapes) { - paramRegionYolo.push_back(std::make_shared(inPrc, shape)); + paramRegionYolo.push_back(std::make_shared(model_type, shape)); } - const auto region_yolo = std::make_shared(paramRegionYolo[0], + const auto region_yolo = std::make_shared(paramRegionYolo[0], attributes.coordinates, attributes.classes, attributes.num_regions, attributes.do_softmax, mask, attributes.start_axis, attributes.end_axis); - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < region_yolo->get_output_size(); i++) - results.push_back(std::make_shared(region_yolo->output(i))); - function = std::make_shared(results, paramRegionYolo, "RegionYolo"); + results.push_back(std::make_shared(region_yolo->output(i))); + function = std::make_shared(results, paramRegionYolo, "RegionYolo"); } }; -TEST_P(RegionYoloLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(RegionYoloLayerGPUTest, Inference) { run(); } -namespace { - -std::map emptyAdditionalConfig; - -const std::vector inpOutPrc = {ov::test::ElementType::f16, ov::test::ElementType::f32}; +const std::vector model_types = {ov::element::f16, ov::element::f32}; const std::vector inShapes_caffe_dynamic = { {{-1, -1, -1, -1}, {{1, 125, 13, 13}, {1, 125, 26, 26}}}, @@ -134,9 +120,7 @@ const auto testCase_yolov3_dynamic = ::testing::Combine( ::testing::ValuesIn(inShapes_v3_dynamic), ::testing::Values(yoloV3attr), ::testing::Values(masks[2]), - ::testing::ValuesIn(inpOutPrc), - ::testing::ValuesIn(inpOutPrc), - ::testing::Values(emptyAdditionalConfig), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -146,9 +130,7 @@ const auto testCase_yolov3_mxnet_dynamic = ::testing::Combine( ::testing::ValuesIn(inShapes_mxnet_dynamic), ::testing::Values(yoloV3mxnetAttr), ::testing::Values(masks[1]), - ::testing::ValuesIn(inpOutPrc), - ::testing::ValuesIn(inpOutPrc), - ::testing::Values(emptyAdditionalConfig), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -158,9 +140,7 @@ const auto testCase_yolov2_caffe_dynamic = ::testing::Combine( ::testing::ValuesIn(inShapes_caffe_dynamic), ::testing::Values(yoloV2caffeAttr), ::testing::Values(masks[0]), - ::testing::ValuesIn(inpOutPrc), - ::testing::ValuesIn(inpOutPrc), - ::testing::Values(emptyAdditionalConfig), + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -177,4 +157,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_GPURegionYoloCaffeDynamic, RegionYoloLayerGPUTest RegionYoloLayerGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/reorg_yolo.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/reorg_yolo.cpp index b27d9fbf423c86..8f3f4dbd96e75f 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/reorg_yolo.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/reorg_yolo.cpp @@ -2,69 +2,65 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/reorg_yolo.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/reorg_yolo.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< - InputShape, // Input Shape - size_t, // Stride - ElementType, // Network precision - TargetDevice // Device + InputShape, // Input Shape + size_t, // Stride + ov::element::Type, // Model type + std::string // Device > ReorgYoloGPUTestParams; class ReorgYoloLayerGPUTest : public testing::WithParamInterface, virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - InputShape inputShape; + InputShape shapes; size_t stride; - ElementType netPrecision; - TargetDevice targetDev; - std::tie(inputShape, stride, netPrecision, targetDev) = obj.param; + ov::element::Type model_type; + std::string targetDev; + std::tie(shapes, stride, model_type, targetDev) = obj.param; std::ostringstream result; - result << "IS=" << ov::test::utils::partialShape2str({inputShape.first}) << "_"; - for (const auto& item : inputShape.second) { + result << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_"; + for (const auto& item : shapes.second) { result << ov::test::utils::vec2str(item) << "_"; } result << "stride=" << stride << "_"; - result << "netPRC=" << netPrecision << "_"; + result << "modelPRC=" << model_type << "_"; result << "targetDevice=" << targetDev << "_"; return result.str(); } protected: void SetUp() override { - InputShape inputShape; + InputShape shapes; size_t stride; - ElementType netPrecision; - std::tie(inputShape, stride, netPrecision, targetDevice) = this->GetParam(); + ov::element::Type model_type; + std::tie(shapes, stride, model_type, targetDevice) = this->GetParam(); - init_input_shapes({inputShape}); + init_input_shapes({shapes}); - auto param = std::make_shared(ngraph::element::f32, inputDynamicShapes[0]); - auto reorg_yolo = std::make_shared(param, stride); - function = std::make_shared(std::make_shared(reorg_yolo), - ngraph::ParameterVector{param}, + auto param = std::make_shared(ov::element::f32, inputDynamicShapes[0]); + auto reorg_yolo = std::make_shared(param, stride); + function = std::make_shared(std::make_shared(reorg_yolo), + ov::ParameterVector{param}, "ReorgYolo"); } }; -TEST_P(ReorgYoloLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(ReorgYoloLayerGPUTest, Inference) { run(); }; -namespace { - const std::vector inShapesDynamic1 = { {{{1, 2}, -1, -1, -1}, {{1, 4, 4, 4}, {1, 8, 4, 4}, {2, 8, 4, 4}}} }; @@ -94,4 +90,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_TestsReorgYolo_stride2_DynamicShape, ReorgYoloLay ReorgYoloLayerGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/roi_pooling.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/roi_pooling.cpp index 90d2f1b5f63399..5c92531c41daf3 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/roi_pooling.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/roi_pooling.cpp @@ -2,61 +2,57 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/roi_pooling.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include - -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "common_test_utils/data_utils.hpp" +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -namespace GPULayerTestsDefinitions { +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/tile.hpp" +namespace { enum ProposalGenerationMode { RANDOM, ULTIMATE_RIGHT_BORDER }; -using ROIPoolingShapes = std::vector; +using ROIPoolingShapes = std::vector; typedef std::tuple< - ROIPoolingShapes, // Input shapes - std::vector, // Pooled shape {pooled_h, pooled_w} - float, // Spatial scale - ngraph::helpers::ROIPoolingTypes, // ROIPooling method - InferenceEngine::Precision // Net precision + ROIPoolingShapes, // Input shapes + std::vector, // Pooled shape {pooled_h, pooled_w} + float, // Spatial scale + ov::test::utils::ROIPoolingTypes, // ROIPooling method + ov::element::Type // Model type > ROIPoolingParams; typedef std::tuple< ROIPoolingParams, - ProposalGenerationMode, - std::map + ProposalGenerationMode > ROIPoolingGPUTestParams; class ROIPoolingLayerGPUTest : public testing::WithParamInterface, virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - ROIPoolingParams basicParamsSet; - ProposalGenerationMode propMode; - std::map additionalConfig; + ROIPoolingParams basic_params_set; + ProposalGenerationMode prop_mode; - std::tie(basicParamsSet, propMode, additionalConfig) = obj.param; + std::tie(basic_params_set, prop_mode) = obj.param; - ROIPoolingShapes inputShapes; - std::vector poolShape; + ROIPoolingShapes shapes; + std::vector pool_shape; float spatial_scale; - ngraph::helpers::ROIPoolingTypes pool_method; - InferenceEngine::Precision netPrecision; - std::tie(inputShapes, poolShape, spatial_scale, pool_method, netPrecision) = basicParamsSet; + ov::test::utils::ROIPoolingTypes pool_method; + ov::element::Type model_type; + std::tie(shapes, pool_shape, spatial_scale, pool_method, model_type) = basic_params_set; std::ostringstream result; - result << "netPRC=" << netPrecision.name() << "_"; - for (const auto& shape : inputShapes) { + result << "netPRC=" << model_type << "_"; + for (const auto& shape : shapes) { result << ov::test::utils::partialShape2str({ shape.first }) << "_"; } result << "TS="; - for (const auto& shape : inputShapes) { + for (const auto& shape : shapes) { result << "("; if (!shape.second.empty()) { auto itr = shape.second.begin(); @@ -67,22 +63,17 @@ class ROIPoolingLayerGPUTest : public testing::WithParamInterface& targetInputStaticShapes) override { - const ProposalGenerationMode propMode = std::get<1>(this->GetParam()); + void generate_inputs(const std::vector& targetInputStaticShapes) override { + const ProposalGenerationMode prop_mode = std::get<1>(this->GetParam()); const float spatial_scale = std::get<2>(std::get<0>(this->GetParam())); - const ngraph::helpers::ROIPoolingTypes pool_method = std::get<3>(std::get<0>(this->GetParam())); + const ov::test::utils::ROIPoolingTypes pool_method = std::get<3>(std::get<0>(this->GetParam())); inputs.clear(); const auto& funcInputs = function->inputs(); auto feat_map_shape = targetInputStaticShapes[0]; - const auto is_roi_max_mode = (pool_method == ngraph::helpers::ROIPoolingTypes::ROI_MAX); + const auto is_roi_max_mode = (pool_method == ov::test::utils::ROIPoolingTypes::ROI_MAX); const int height = is_roi_max_mode ? feat_map_shape[2] / spatial_scale : 1; const int width = is_roi_max_mode ? feat_map_shape[3] / spatial_scale : 1; @@ -115,13 +106,13 @@ class ROIPoolingLayerGPUTest : public testing::WithParamInterface (end_h - start_h) * (input_h - 1) // and as result excess of right limit for proposal value if the border case (current_h == pooled_h - 1) // will not be handled explicitly switch (funcInput.get_element_type()) { - case ngraph::element::f32: { + case ov::element::f32: { auto* dataPtr = tensor.data(); for (size_t i = 0; i < tensor.get_size(); i += 5) { dataPtr[i] = 0; @@ -132,14 +123,14 @@ class ROIPoolingLayerGPUTest : public testing::WithParamInterface(); for (size_t i = 0; i < tensor.get_size(); i += 5) { - dataPtr[i] = static_cast(ngraph::float16(0.f).to_bits()); - dataPtr[i + 1] = static_cast(ngraph::float16(0.f).to_bits()); - dataPtr[i + 2] = static_cast(ngraph::float16(0.248046786f).to_bits()); - dataPtr[i + 3] = static_cast(ngraph::float16(0.471333951f).to_bits()); - dataPtr[i + 4] = static_cast(ngraph::float16(1.f).to_bits()); + dataPtr[i] = static_cast(ov::float16(0.f).to_bits()); + dataPtr[i + 1] = static_cast(ov::float16(0.f).to_bits()); + dataPtr[i + 2] = static_cast(ov::float16(0.248046786f).to_bits()); + dataPtr[i + 3] = static_cast(ov::float16(0.471333951f).to_bits()); + dataPtr[i + 4] = static_cast(ov::float16(1.f).to_bits()); } break; } @@ -158,7 +149,11 @@ class ROIPoolingLayerGPUTest : public testing::WithParamInterface additionalConfig; + ROIPoolingParams basic_params_set; + ProposalGenerationMode prop_mode; - std::tie(basicParamsSet, propMode, additionalConfig) = this->GetParam(); - ROIPoolingShapes inputShapes; - std::vector poolShape; + std::tie(basic_params_set, prop_mode) = this->GetParam(); + ROIPoolingShapes shapes; + std::vector pool_shape; float spatial_scale; - ngraph::helpers::ROIPoolingTypes pool_method; - InferenceEngine::Precision netPrecision; - std::tie(inputShapes, poolShape, spatial_scale, pool_method, netPrecision) = basicParamsSet; + ov::test::utils::ROIPoolingTypes pool_method; + ov::element::Type model_type; + std::tie(shapes, pool_shape, spatial_scale, pool_method, model_type) = basic_params_set; targetDevice = ov::test::utils::DEVICE_GPU; - init_input_shapes(inputShapes); + init_input_shapes(shapes); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params; for (auto&& shape : inputDynamicShapes) - params.push_back(std::make_shared(ngPrc, shape)); + params.push_back(std::make_shared(model_type, shape)); std::shared_ptr roi_pooling; if (ov::test::utils::ROIPoolingTypes::ROI_MAX == pool_method) { - roi_pooling = std::make_shared(params[0], params[1], poolShape, spatial_scale, "max"); + roi_pooling = std::make_shared(params[0], params[1], pool_shape, spatial_scale, "max"); } else { - roi_pooling = std::make_shared(params[0], params[1], poolShape, spatial_scale, "bilinear"); + roi_pooling = std::make_shared(params[0], params[1], pool_shape, spatial_scale, "bilinear"); } - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < roi_pooling->get_output_size(); i++) - results.push_back(std::make_shared(roi_pooling->output(i))); - function = std::make_shared(results, params, "ROIPooling"); - functionRefs = ngraph::clone_function(*function); + results.push_back(std::make_shared(roi_pooling->output(i))); + function = std::make_shared(results, params, "ROIPooling"); + functionRefs = function->clone(); } }; -TEST_P(ROIPoolingLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(ROIPoolingLayerGPUTest, Inference) { run(); } -namespace { - -std::map emptyAdditionalConfig; - const std::vector inShapes = { ROIPoolingShapes{{{}, {{1, 3, 8, 8}}}, {{}, {{1, 5}}}}, ROIPoolingShapes{{{}, {{1, 3, 8, 8}}}, {{}, {{3, 5}}}}, @@ -291,43 +278,39 @@ const std::vector> pooledShapes_bilinear = { {6, 6} }; -const std::vector netPRCs = {InferenceEngine::Precision::FP32}; +const std::vector model_types = {ov::element::f32}; const std::vector spatial_scales = {0.625f, 1.f}; const auto test_ROIPooling_max = ::testing::Combine(::testing::ValuesIn(inShapes), ::testing::ValuesIn(pooledShapes_max), ::testing::ValuesIn(spatial_scales), - ::testing::Values(ngraph::helpers::ROIPoolingTypes::ROI_MAX), - ::testing::ValuesIn(netPRCs)); + ::testing::Values(ov::test::utils::ROIPoolingTypes::ROI_MAX), + ::testing::ValuesIn(model_types)); const auto test_ROIPooling_bilinear = ::testing::Combine(::testing::ValuesIn(inShapes), ::testing::ValuesIn(pooledShapes_bilinear), ::testing::Values(spatial_scales[1]), - ::testing::Values(ngraph::helpers::ROIPoolingTypes::ROI_BILINEAR), - ::testing::ValuesIn(netPRCs)); + ::testing::Values(ov::test::utils::ROIPoolingTypes::ROI_BILINEAR), + ::testing::ValuesIn(model_types)); INSTANTIATE_TEST_SUITE_P(smoke_ROIPoolingGPU_max, ROIPoolingLayerGPUTest, ::testing::Combine(test_ROIPooling_max, - ::testing::Values(ProposalGenerationMode::RANDOM), - ::testing::Values(emptyAdditionalConfig)), + ::testing::Values(ProposalGenerationMode::RANDOM)), ROIPoolingLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_ROIPoolingGPU_bilinear, ROIPoolingLayerGPUTest, ::testing::Combine(test_ROIPooling_bilinear, - ::testing::Values(ProposalGenerationMode::RANDOM), - ::testing::Values(emptyAdditionalConfig)), + ::testing::Values(ProposalGenerationMode::RANDOM)), ROIPoolingLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_ROIPoolingGPU_bilinear_ultimateRightBorderProposal, ROIPoolingLayerGPUTest, ::testing::Combine(::testing::Combine(::testing::Values(ROIPoolingShapes{{{}, {{1, 1, 50, 50}}}, {{}, {{1, 5}}}}), ::testing::Values(std::vector { 4, 4 }), ::testing::Values(spatial_scales[1]), - ::testing::Values(ngraph::helpers::ROIPoolingTypes::ROI_BILINEAR), - ::testing::Values(InferenceEngine::Precision::FP32)), - ::testing::Values(ProposalGenerationMode::ULTIMATE_RIGHT_BORDER), - ::testing::Values(emptyAdditionalConfig)), + ::testing::Values(ov::test::utils::ROIPoolingTypes::ROI_BILINEAR), + ::testing::Values(ov::element::f32)), + ::testing::Values(ProposalGenerationMode::ULTIMATE_RIGHT_BORDER)), ROIPoolingLayerGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/scatter_nd_update.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/scatter_nd_update.cpp index b1c010d80f4198..6333dc12258d72 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/scatter_nd_update.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/scatter_nd_update.cpp @@ -2,19 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/scatter_ND_update.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" -#include -#include -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/scatter_nd_update.hpp" +#include "openvino/op/scatter_update.hpp" +#include "openvino/op/scatter_elements_update.hpp" -namespace GPULayerTestsDefinitions { -using ScatterUpdateShapes = std::vector; +namespace { +using ScatterUpdateShapes = std::vector; using IndicesValues = std::vector; enum class Scatterupdate_type { @@ -31,24 +30,24 @@ struct ScatterUpdateLayerParams { typedef std::tuple< ScatterUpdateLayerParams, - ElementType, // input precision - ElementType // indices precision + ov::element::Type, // input precision + ov::element::Type // indices precision > ScatterUpdateParams; class ScatterUpdateLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { ScatterUpdateLayerParams scatterParams; - ElementType inputPrecision; - ElementType idxPrecision; - std::tie(scatterParams, inputPrecision, idxPrecision) = obj.param; + ov::element::Type model_type; + ov::element::Type idx_type; + std::tie(scatterParams, model_type, idx_type) = obj.param; const auto inputShapes = scatterParams.inputShapes; const auto indicesValues = scatterParams.indicesValues; const auto scType = scatterParams.scType; std::ostringstream result; - result << inputPrecision << "_IS="; + result << model_type << "_IS="; for (const auto& shape : inputShapes) { result << ov::test::utils::partialShape2str({ shape.first }) << "_"; } @@ -61,7 +60,7 @@ class ScatterUpdateLayerGPUTest : public testing::WithParamInterfaceGetParam(); + ov::element::Type model_type; + ov::element::Type idx_type; + std::tie(scatterParams, model_type, idx_type) = this->GetParam(); const auto inputShapes = scatterParams.inputShapes; const auto scType = scatterParams.scType; init_input_shapes({inputShapes[0], inputShapes[1], inputShapes[2]}); - ov::ParameterVector dataParams{std::make_shared(inputPrecision, inputDynamicShapes[0]), - std::make_shared(inputPrecision, inputDynamicShapes[2])}; + ov::ParameterVector dataParams{std::make_shared(model_type, inputDynamicShapes[0]), + std::make_shared(model_type, inputDynamicShapes[2])}; - auto indicesParam = std::make_shared(idxPrecision, inputDynamicShapes[1]); + auto indicesParam = std::make_shared(idx_type, inputDynamicShapes[1]); dataParams[0]->set_friendly_name("Param_1"); indicesParam->set_friendly_name("Param_2"); dataParams[1]->set_friendly_name("Param_3"); @@ -137,42 +140,39 @@ class ScatterUpdateLayerGPUTest : public testing::WithParamInterface scatter; switch (scType) { case Scatterupdate_type::ND: { - scatter = std::make_shared(dataParams[0], indicesParam, dataParams[1]); + scatter = std::make_shared(dataParams[0], indicesParam, dataParams[1]); break; } case Scatterupdate_type::Elements: { auto axis = ov::op::v0::Constant::create(ov::element::i32, inputShapes[3].first.get_shape(), inputShapes[3].second[0]); - scatter = std::make_shared(dataParams[0], indicesParam, dataParams[1], axis); + scatter = std::make_shared(dataParams[0], indicesParam, dataParams[1], axis); break; } case Scatterupdate_type::Basic: default: { auto axis = ov::op::v0::Constant::create(ov::element::i32, inputShapes[3].first.get_shape(), inputShapes[3].second[0]); - scatter = std::make_shared(dataParams[0], indicesParam, dataParams[1], axis); + scatter = std::make_shared(dataParams[0], indicesParam, dataParams[1], axis); } } - ngraph::ParameterVector allParams{ dataParams[0], indicesParam, dataParams[1] }; + ov::ParameterVector allParams{ dataParams[0], indicesParam, dataParams[1] }; - auto makeFunction = [](ParameterVector ¶ms, const std::shared_ptr &lastNode) { - ResultVector results; + auto makeFunction = [](ov::ParameterVector ¶ms, const std::shared_ptr &lastNode) { + ov::ResultVector results; for (size_t i = 0; i < lastNode->get_output_size(); i++) - results.push_back(std::make_shared(lastNode->output(i))); + results.push_back(std::make_shared(lastNode->output(i))); - return std::make_shared(results, params, "ScatterUpdateLayerGPUTest"); + return std::make_shared(results, params, "ScatterUpdateLayerGPUTest"); }; function = makeFunction(allParams, scatter); } }; -TEST_P(ScatterUpdateLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(ScatterUpdateLayerGPUTest, Inference) { run(); } -namespace ScatterNDUpdate { - const std::vector scatterNDParams = { ScatterUpdateLayerParams{ ScatterUpdateShapes{ @@ -245,12 +245,12 @@ const std::vector scatterElementsParams = { }, }; -const std::vector inputPrecisions = { - ElementType::f32, +const std::vector model_types = { + ov::element::f32, }; -const std::vector constantPrecisions = { - ElementType::i32, +const std::vector constantPrecisions = { + ov::element::i32, }; const std::vector scatterUpdate_EmptyInput1_2Params = { @@ -294,28 +294,28 @@ const std::vector scatterElementsUpdate_EmptyInput1_2P INSTANTIATE_TEST_SUITE_P(smoke_ScatterNDUpdate_CompareWithRefs_dynamic, ScatterUpdateLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(scatterNDParams), - ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(model_types), ::testing::ValuesIn(constantPrecisions)), ScatterUpdateLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_ScatterElementsUpdate_CompareWithRefs_dynamic, ScatterUpdateLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(scatterElementsParams), - ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(model_types), ::testing::ValuesIn(constantPrecisions)), ScatterUpdateLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_ScatterUpdate_EmptyInput1_2_CompareWithRefs_dynamic, ScatterUpdateLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(scatterUpdate_EmptyInput1_2Params), - ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(model_types), ::testing::ValuesIn(constantPrecisions)), ScatterUpdateLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_ScatterNDUpdate_EmptyInput1_2_CompareWithRefs_dynamic, ScatterUpdateLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(scatterNDUpdate_EmptyInput1_2Params), - ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(model_types), ::testing::ValuesIn(constantPrecisions)), ScatterUpdateLayerGPUTest::getTestCaseName); @@ -323,8 +323,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_ScatterNDUpdate_EmptyInput1_2_CompareWithRefs_dyn INSTANTIATE_TEST_SUITE_P(smoke_ScatterElementsUpdate_EmptyInput1_2_CompareWithRefs_dynamic, ScatterUpdateLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(scatterElementsUpdate_EmptyInput1_2Params), - ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(model_types), ::testing::ValuesIn(constantPrecisions)), ScatterUpdateLayerGPUTest::getTestCaseName); -} // namespace ScatterNDUpdate -} // namespace GPULayerTestsDefinitions +} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/select.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/select.cpp index c8503052f316ef..73c24ff347f7bc 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/select.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/select.cpp @@ -2,34 +2,33 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/select.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" -#include -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/select.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< - std::vector, // input shapes - ElementType, // presion of 'then' and 'else' of inputs - op::AutoBroadcastSpec, // broadcast spec - TargetDevice // device name + std::vector, // input shapes + ov::element::Type, // presion of 'then' and 'else' of inputs + ov::op::AutoBroadcastSpec, // broadcast spec + std::string // device name > SelectLayerTestParamSet; class SelectLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { std::vector inshapes; - ElementType netType; - op::AutoBroadcastSpec broadcast; - TargetDevice targetDevice; - std::tie(inshapes, netType, broadcast, targetDevice) = obj.param; + ov::element::Type model_type; + ov::op::AutoBroadcastSpec broadcast; + std::string targetDevice; + std::tie(inshapes, model_type, broadcast, targetDevice) = obj.param; std::ostringstream result; @@ -43,7 +42,7 @@ class SelectLayerGPUTest : public testing::WithParamInterface(results, params, "SelectLayerGPUTest"); }; function = makeFunction(params, select); } }; -TEST_P(SelectLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(SelectLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector netPrecisions = { - ElementType::f32, - ElementType::f16, - ElementType::i32, +const std::vector model_types = { + ov::element::f32, + ov::element::f16, + ov::element::i32, }; -namespace Select { - // AutoBroadcastType: NUMPY const std::vector> inShapesDynamicNumpy = { { @@ -131,8 +124,8 @@ const std::vector> inShapesDynamicNumpy = { const auto numpyCases = ::testing::Combine( ::testing::ValuesIn(inShapesDynamicNumpy), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(op::AutoBroadcastType::NUMPY), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::op::AutoBroadcastType::NUMPY), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -148,8 +141,8 @@ const std::vector> inShapesDynamicRangeNumpy = { const auto rangeNumpyCases = ::testing::Combine( ::testing::ValuesIn(inShapesDynamicRangeNumpy), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(op::AutoBroadcastType::NUMPY), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::op::AutoBroadcastType::NUMPY), ::testing::Values(ov::test::utils::DEVICE_GPU) ); @@ -171,13 +164,10 @@ const std::vector> inShapesDynamicNone = { const auto noneCases = ::testing::Combine( ::testing::ValuesIn(inShapesDynamicNone), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(op::AutoBroadcastType::NONE), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::op::AutoBroadcastType::NONE), ::testing::Values(ov::test::utils::DEVICE_GPU) ); INSTANTIATE_TEST_SUITE_P(smoke_select_CompareWithRefsNone_dynamic, SelectLayerGPUTest, noneCases, SelectLayerGPUTest::getTestCaseName); - -} // namespace Select } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/shapeof.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/shapeof.cpp index d231567a6a33fc..fd9f2912f6bf81 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/shapeof.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/shapeof.cpp @@ -2,36 +2,34 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/shape_of.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" -#include -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/shape_of.hpp" -using ElementType = ov::element::Type_t; +namespace { +using ov::test::InputShape; -namespace GPULayerTestsDefinitions { typedef std::tuple< InputShape, - ElementType // Net precision + ov::element::Type > ShapeOfLayerGPUTestParamsSet; class ShapeOfLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { InputShape inputShape; - ElementType netPrecision; - std::tie(inputShape, netPrecision) = obj.param; + ov::element::Type model_type; + std::tie(inputShape, model_type) = obj.param; std::ostringstream result; result << "ShapeOfTest_"; result << std::to_string(obj.index) << "_"; - result << "netPrec=" << netPrecision << "_"; + result << "netPrec=" << model_type << "_"; result << "IS="; result << ov::test::utils::partialShape2str({inputShape.first}) << "_"; result << "TS=("; @@ -45,43 +43,39 @@ class ShapeOfLayerGPUTest : public testing::WithParamInterfaceGetParam(); + std::tie(inputShape, model_type) = this->GetParam(); init_input_shapes({inputShape}); - outType = ElementType::i32; + outType = ov::element::i32; ov::ParameterVector functionParams; for (auto&& shape : inputDynamicShapes) - functionParams.push_back(std::make_shared(netPrecision, shape)); + functionParams.push_back(std::make_shared(model_type, shape)); - auto shapeOfOp = std::make_shared(functionParams[0], element::i32); + auto shapeOfOp = std::make_shared(functionParams[0], ov::element::i32); - auto makeFunction = [](ParameterVector ¶ms, const std::shared_ptr &lastNode) { - ResultVector results; + auto makeFunction = [](ov::ParameterVector ¶ms, const std::shared_ptr &lastNode) { + ov::ResultVector results; for (size_t i = 0; i < lastNode->get_output_size(); i++) - results.push_back(std::make_shared(lastNode->output(i))); + results.push_back(std::make_shared(lastNode->output(i))); - return std::make_shared(results, params, "ShapeOfLayerGPUTest"); + return std::make_shared(results, params, "ShapeOfLayerGPUTest"); }; function = makeFunction(functionParams, shapeOfOp); } }; -TEST_P(ShapeOfLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(ShapeOfLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector netPrecisions = { - ElementType::i32, +const std::vector model_types = { + ov::element::i32, }; // We don't check static case, because of constant folding @@ -110,10 +104,10 @@ INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf_3d_compareWithRefs_dynamic, ShapeOfLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(inShapesDynamic3d), - ::testing::ValuesIn(netPrecisions)), + ::testing::ValuesIn(model_types)), ShapeOfLayerGPUTest::getTestCaseName); -std::vector inShapesStatic3d = { +std::vector inShapesStatic3d = { { 8, 5, 4 }, { 8, 5, 3 }, { 8, 5, 2 }, @@ -124,8 +118,8 @@ std::vector inShapesStatic3d = { INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf_3d_compareWithRefs_static, ShapeOfLayerGPUTest, ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapesStatic3d)), - ::testing::ValuesIn(netPrecisions)), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(inShapesStatic3d)), + ::testing::Values(ov::element::i32)), ShapeOfLayerGPUTest::getTestCaseName); // ============================================================================== @@ -152,10 +146,10 @@ INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf_4d_compareWithRefs_dynamic, ShapeOfLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(inShapesDynamic4d), - ::testing::ValuesIn(netPrecisions)), + ::testing::ValuesIn(model_types)), ShapeOfLayerGPUTest::getTestCaseName); -std::vector inShapesStatic4d = { +std::vector inShapesStatic4d = { { 8, 5, 3, 4 }, { 8, 5, 3, 3 }, { 8, 5, 3, 2 }, @@ -166,8 +160,8 @@ std::vector inShapesStatic4d = { INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf_4d_compareWithRefs_static, ShapeOfLayerGPUTest, ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapesStatic4d)), - ::testing::ValuesIn(netPrecisions)), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(inShapesStatic4d)), + ::testing::ValuesIn(model_types)), ShapeOfLayerGPUTest::getTestCaseName); // ============================================================================== @@ -194,10 +188,10 @@ INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf_5d_compareWithRefs_dynamic, ShapeOfLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(inShapesDynamic5d), - ::testing::ValuesIn(netPrecisions)), + ::testing::ValuesIn(model_types)), ShapeOfLayerGPUTest::getTestCaseName); -std::vector inShapesStatic5d = { +std::vector inShapesStatic5d = { { 8, 5, 3, 2, 4 }, { 8, 5, 3, 2, 3 }, { 8, 5, 3, 2, 2 }, @@ -208,37 +202,35 @@ std::vector inShapesStatic5d = { INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf_5d_compareWithRefs_static, ShapeOfLayerGPUTest, ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapesStatic5d)), - ::testing::ValuesIn(netPrecisions)), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(inShapesStatic5d)), + ::testing::ValuesIn(model_types)), ShapeOfLayerGPUTest::getTestCaseName); -} // namespace - using ShapeOfParams = typename std::tuple< - InputShape, // Shape - InferenceEngine::Precision, // Precision - LayerTestsUtils::TargetDevice // Device name + InputShape, // Shape + ov::element::Type, // Model type + std::string // Device name >; class ShapeOfDynamicInputGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { - InputShape inputShapes; - InferenceEngine::Precision dataPrc; + InputShape shapes; + ov::element::Type model_type; std::string targetDevice; - std::tie(inputShapes, dataPrc, targetDevice) = obj.param; + std::tie(shapes, model_type, targetDevice) = obj.param; std::ostringstream result; result << "IS=("; - result << ov::test::utils::partialShape2str({inputShapes.first}) << "_"; - for (size_t i = 0lu; i < inputShapes.second.size(); i++) { + result << ov::test::utils::partialShape2str({shapes.first}) << "_"; + for (size_t i = 0lu; i < shapes.second.size(); i++) { result << "{"; - result << ov::test::utils::vec2str(inputShapes.second[i]) << "_"; + result << ov::test::utils::vec2str(shapes.second[i]) << "_"; result << "}_"; } result << ")_"; - result << "netPRC=" << dataPrc << "_"; + result << "netPRC=" << model_type << "_"; result << "targetDevice=" << targetDevice << "_"; auto res_str = result.str(); std::replace(res_str.begin(), res_str.end(), '-', '_'); @@ -247,56 +239,44 @@ class ShapeOfDynamicInputGPUTest : public testing::WithParamInterface(prc, inputShapes.first); - input->get_output_tensor(0).get_rt_info()["ie_legacy_preproc"] = pre_process_info; + auto input = std::make_shared(model_type, shapes.first); input->set_friendly_name("input_data"); - auto shape_of_01 = std::make_shared(input); + auto shape_of_01 = std::make_shared(input); shape_of_01->set_friendly_name("shape_of_01"); - auto shape_of_02 = std::make_shared(shape_of_01); + auto shape_of_02 = std::make_shared(shape_of_01); shape_of_02->set_friendly_name("shape_of_02"); - auto result = std::make_shared(shape_of_02); + auto result = std::make_shared(shape_of_02); result->set_friendly_name("outer_result"); - function = std::make_shared(ngraph::OutputVector{result}, ngraph::ParameterVector{input}); + function = std::make_shared(ov::OutputVector{result}, ov::ParameterVector{input}); function->set_friendly_name("shape_of_test"); } }; -TEST_P(ShapeOfDynamicInputGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(ShapeOfDynamicInputGPUTest, Inference) { run(); } -const std::vector dynamicInputShapes = { +const std::vector dynamicshapes = { ov::test::InputShape(ov::PartialShape({-1, -1, -1, -1, -1}), {{4, 1, 1, 64, 32}, {6, 1, 1, 8, 4}, {8, 1, 1, 24, 16}}), }; -const std::vector dynamicInputPrec = { - InferenceEngine::Precision::FP16, -}; - INSTANTIATE_TEST_SUITE_P(smoke_Check, ShapeOfDynamicInputGPUTest, testing::Combine( - testing::ValuesIn(dynamicInputShapes), // input shapes - testing::ValuesIn(dynamicInputPrec), // network precision + testing::ValuesIn(dynamicshapes), // input shapes + testing::Values(ov::element::f16), // network precision testing::Values(ov::test::utils::DEVICE_GPU)), // device type ShapeOfDynamicInputGPUTest::getTestCaseName); -} // namespace GPULayerTestsDefinitions +} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/softmax.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/softmax.cpp index 5de070d5fab5dd..abdd317138a966 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/softmax.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/softmax.cpp @@ -2,33 +2,33 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "shared_test_classes/single_layer/shape_of.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/softmax.hpp" -namespace GPULayerTestsDefinitions { +namespace { -typedef std::tuple - softmaxGPUTestParamsSet; +typedef std::tuple< + ov::element::Type, // model type + ov::test::InputShape, // inputShape + int64_t> // axis +softmaxGPUTestParamsSet; class SoftMaxLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { - ElementType inType; + ov::element::Type model_type; ov::test::InputShape inShape; int64_t axis; - std::tie(inType, inShape, axis) = obj.param; + std::tie(model_type, inShape, axis) = obj.param; std::ostringstream result; - result << "netPRC=" << inType << "_"; + result << "netPRC=" << model_type << "_"; result << "IS=" << ov::test::utils::partialShape2str({inShape.first}) << "_"; result << "TS="; for (const auto& shape : inShape.second) { @@ -42,40 +42,39 @@ class SoftMaxLayerGPUTest : public testing::WithParamInterfaceGetParam(); + std::tie(model_type, inShape, axis) = this->GetParam(); - if (inType == element::Type_t::f16) { + if (model_type == ov::element::f16) { abs_threshold = 0.005; } init_input_shapes({inShape}); ov::ParameterVector params; for (auto&& shape : inputDynamicShapes) - params.push_back(std::make_shared(inType, shape)); + params.push_back(std::make_shared(model_type, shape)); - const auto softMax = std::make_shared(params.at(0), axis); - auto makeFunction = [](ParameterVector ¶ms, const std::shared_ptr &lastNode) { - ResultVector results; + const auto softMax = std::make_shared(params.at(0), axis); + auto makeFunction = [](ov::ParameterVector ¶ms, const std::shared_ptr &lastNode) { + ov::ResultVector results; for (size_t i = 0; i < lastNode->get_output_size(); i++) - results.push_back(std::make_shared(lastNode->output(i))); + results.push_back(std::make_shared(lastNode->output(i))); - return std::make_shared(results, params, "ShapeOfLayerGPUTest"); + return std::make_shared(results, params, "ShapeOfLayerGPUTest"); }; function = makeFunction(params, softMax); } }; -TEST_P(SoftMaxLayerGPUTest, CompareWithRefs) { +TEST_P(SoftMaxLayerGPUTest, Inference) { run(); } -namespace { -const std::vector netPrecisions = { - ElementType::f32, ElementType::f16 +const std::vector netPrecisions = { + ov::element::f32, ov::element::f16 }; const std::vector axis2D = {0, 1}; @@ -137,6 +136,4 @@ INSTANTIATE_TEST_SUITE_P(softMaxGPUDynamicTest5D, testing::ValuesIn(inputShapes5D), testing::ValuesIn(axis5D)), SoftMaxLayerGPUTest::getTestCaseName); - } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/space_to_batch.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/space_to_batch.cpp index e66f099bf3e0bc..5de49d120c0f6c 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/space_to_batch.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/space_to_batch.cpp @@ -2,16 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/space_to_batch.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/test_constants.hpp" #include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/space_to_batch.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; struct SpaceToBatchParams { std::vector block; @@ -22,22 +23,19 @@ struct SpaceToBatchParams { typedef std::tuple< InputShape, // Input shapes SpaceToBatchParams, - ElementType, // Element type - ngraph::helpers::InputLayerType, // block/begin/end input type - std::map // Additional network configuration -> SpaceToBatchParamsLayerParamSet; + ov::element::Type, // Element type + ov::test::utils::InputLayerType> // block/begin/end input type +SpaceToBatchParamsLayerParamSet; class SpaceToBatchLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { InputShape shapes; SpaceToBatchParams params; - ElementType elementType; - ngraph::helpers::InputLayerType restInputType; - TargetDevice targetDevice; - std::map additionalConfig; - std::tie(shapes, params, elementType, restInputType, additionalConfig) = obj.param; + ov::element::Type elementType; + ov::test::utils::InputLayerType restInputType; + std::tie(shapes, params, elementType, restInputType) = obj.param; std::ostringstream results; results << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_"; @@ -49,17 +47,12 @@ class SpaceToBatchLayerGPUTest : public testing::WithParamInterface(results, params, "SpaceToBatchFuncTest"); } }; -TEST_P(SpaceToBatchLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(SpaceToBatchLayerGPUTest, Inferecne) { run(); } -namespace { - -std::map emptyAdditionalConfig; - -const std::vector inputPrecisions = { - ElementType::f32 +const std::vector inputPrecisions = { + ov::element::f32 }; -const std::vector restInputTypes = { - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER +const std::vector restInputTypes = { + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER }; const std::vector inputShapesDynamic3D = { @@ -183,8 +169,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Dynamic3D, SpaceToBatchLayerGPUTe ::testing::ValuesIn(inputShapesDynamic3D), ::testing::ValuesIn(paramsPlain3D), ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(restInputTypes), - ::testing::Values(emptyAdditionalConfig)), + ::testing::ValuesIn(restInputTypes)), SpaceToBatchLayerGPUTest::getTestCaseName); @@ -202,8 +187,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Dynamic4D, SpaceToBatchLayerGPUTe ::testing::ValuesIn(inputShapesDynamic4D), ::testing::ValuesIn(paramsPlain4D), ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(restInputTypes), - ::testing::Values(emptyAdditionalConfig)), + ::testing::ValuesIn(restInputTypes)), SpaceToBatchLayerGPUTest::getTestCaseName); const std::vector inputShapesDynamic5D = { @@ -220,9 +204,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Dynamic5D, SpaceToBatchLayerGPUTe ::testing::ValuesIn(inputShapesDynamic5D), ::testing::ValuesIn(paramsPlain5D), ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(restInputTypes), - ::testing::Values(emptyAdditionalConfig)), + ::testing::ValuesIn(restInputTypes)), SpaceToBatchLayerGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/space_to_depth.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/space_to_depth.cpp index 2976a845e2a57a..db63149353e70c 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/space_to_depth.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/space_to_depth.cpp @@ -2,23 +2,23 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/space_to_depth.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ie_precision.hpp" -#include "ov_models/builders.hpp" #include "common_test_utils/ov_tensor_utils.hpp" -#include +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ov::op::v0; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/space_to_depth.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; +using ov::op::v0::SpaceToDepth; typedef std::tuple< - InputShape, // Input shape - ElementType, // Input element type - SpaceToDepth::SpaceToDepthMode, // Mode - std::size_t // Block size + InputShape, // Input shape + ov::element::Type, // Input element type + SpaceToDepth::SpaceToDepthMode, // Mode + std::size_t // Block size > SpaceToDepthLayerGPUTestParams; class SpaceToDepthLayerGPUTest : public testing::WithParamInterface, @@ -26,10 +26,10 @@ class SpaceToDepthLayerGPUTest : public testing::WithParamInterface obj) { InputShape shapes; - ElementType inType; + ov::element::Type model_type; SpaceToDepth::SpaceToDepthMode mode; - std::size_t blockSize; - std::tie(shapes, inType, mode, blockSize) = obj.param; + std::size_t block_size; + std::tie(shapes, model_type, mode, block_size) = obj.param; std::ostringstream results; results << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_"; @@ -37,7 +37,7 @@ class SpaceToDepthLayerGPUTest : public testing::WithParamInterface +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/variadic_split.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< size_t, // Num splits int64_t, // Axis - ElementType, // Net precision + ov::element::Type, // Model type InputShape, // Input shapes std::vector // Used outputs indices > splitDynamicGPUTestParams; class SplitLayerGPUDynamicTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { std::ostringstream result; - size_t numSplits; + size_t num_splits; int64_t axis; - ElementType netPrecision; - InputShape inputShape; - std::vector outIndices; - std::tie(numSplits, axis, netPrecision, inputShape, outIndices) = obj.param; + ov::element::Type model_type; + InputShape input_shape; + std::vector out_indices; + std::tie(num_splits, axis, model_type, input_shape, out_indices) = obj.param; result << "IS="; - result << ov::test::utils::partialShape2str({inputShape.first}) << "_"; + result << ov::test::utils::partialShape2str({input_shape.first}) << "_"; result << "TS="; - for (const auto& shape : inputShape.second) { + for (const auto& shape : input_shape.second) { result << ov::test::utils::vec2str(shape) << "_"; } - result << "numSplits=" << numSplits << "_"; + result << "num_splits=" << num_splits << "_"; result << "axis=" << axis << "_"; - if (!outIndices.empty()) { - result << "outIndices" << ov::test::utils::vec2str(outIndices) << "_"; + if (!out_indices.empty()) { + result << "out_indices" << ov::test::utils::vec2str(out_indices) << "_"; } - result << "netPRC=" << netPrecision << "_"; + result << "netPRC=" << model_type << "_"; return result.str(); } @@ -54,47 +53,46 @@ class SplitLayerGPUDynamicTest : public testing::WithParamInterface outIndices; - ElementType netPrecision; - std::tie(numSplits, axis, netPrecision, inputShape, outIndices) = this->GetParam(); - if (outIndices.empty()) { - for (size_t i = 0; i < numSplits; ++i) { - outIndices.push_back(i); + size_t num_splits; + InputShape input_shape; + std::vector out_indices; + ov::element::Type model_type; + std::tie(num_splits, axis, model_type, input_shape, out_indices) = this->GetParam(); + if (out_indices.empty()) { + for (size_t i = 0; i < num_splits; ++i) { + out_indices.push_back(i); } } - init_input_shapes({inputShape}); - ov::ParameterVector dyn_params{std::make_shared(netPrecision, inputDynamicShapes[0])}; + init_input_shapes({input_shape}); + ov::ParameterVector dyn_params{std::make_shared(model_type, inputDynamicShapes[0])}; auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{axis}); - auto split = std::make_shared(dyn_params[0], split_axis_op, numSplits); + auto split = std::make_shared(dyn_params[0], split_axis_op, num_splits); - ngraph::ResultVector results; - for (size_t i = 0; i < outIndices.size(); i++) { - results.push_back(std::make_shared(split->output(outIndices[i]))); + ov::ResultVector results; + for (size_t i = 0; i < out_indices.size(); i++) { + results.push_back(std::make_shared(split->output(out_indices[i]))); } - function = std::make_shared(results, dyn_params, "split"); + function = std::make_shared(results, dyn_params, "split"); } }; -TEST_P(SplitLayerGPUDynamicTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(SplitLayerGPUDynamicTest, Inference) { run(); } -const std::vector inputShapes4d = { +const std::vector input_shapes4d = { { {-1, -1, -1, -1}, {{1, 4, 5, 7}, {3, 8, 5, 9}, {5, 16, 1, 8}} } }; -const std::vector inputShapes5d = { +const std::vector input_shapes5d = { { {-1, -1, -1, -1, -1}, {{10, 20, 30, 40, 10}, {5, 18, 3, 10, 10}, {3, 10, 6, 2, 4}} } }; -const std::vector inputShapes6d = { +const std::vector input_shapes6d = { { {-1, -1, -1, -1, -1, -1}, {{10, 32, 3, 4, 12, 24}, {5, 2, 3, 1, 32, 12}, {3, 1, 6, 2, 4, 18}} } @@ -104,63 +102,63 @@ INSTANTIATE_TEST_SUITE_P(smoke_SplitsCheck4Dr, SplitLayerGPUDynamicTest, ::testing::Combine( ::testing::Values(2), // nSplits ::testing::Values(1), // axes - ::testing::Values(ElementType::f16), // netPrec - ::testing::ValuesIn(inputShapes4d), // inShapes - ::testing::Values(std::vector({}))), // outIndices + ::testing::Values(ov::element::f16), // netPrec + ::testing::ValuesIn(input_shapes4d), // inShapes + ::testing::Values(std::vector({}))), // out_indices SplitLayerGPUDynamicTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_SplitsCheck5D, SplitLayerGPUDynamicTest, ::testing::Combine( ::testing::Values(3), // nSplits ::testing::Values(2), // axes - ::testing::Values(ElementType::f32), // netPrec - ::testing::ValuesIn(inputShapes5d), // inShapes - ::testing::Values(std::vector({}))), // outIndices + ::testing::Values(ov::element::f32), // netPrec + ::testing::ValuesIn(input_shapes5d), // inShapes + ::testing::Values(std::vector({}))), // out_indices SplitLayerGPUDynamicTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_SplitsCheck6D, SplitLayerGPUDynamicTest, ::testing::Combine( ::testing::Values(4), // nSplits ::testing::Values(4), // axes - ::testing::Values(ElementType::i8), // netPrec - ::testing::ValuesIn(inputShapes6d), // inShapes - ::testing::Values(std::vector({}))), // outIndices + ::testing::Values(ov::element::i8), // netPrec + ::testing::ValuesIn(input_shapes6d), // inShapes + ::testing::Values(std::vector({}))), // out_indices SplitLayerGPUDynamicTest::getTestCaseName); typedef std::tuple< int64_t, // Axis std::vector, // SplitLength - ElementType, // Net precision + ov::element::Type, // Model type InputShape, // Input shapes - ngraph::helpers::InputLayerType // input type of splitLength + ov::test::utils::InputLayerType // input type of split_length > varSplitDynamicGPUTestParams; class VariadicSplitLayerGPUDynamicTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { std::ostringstream result; int64_t axis; - std::vector splitLength; - ElementType netPrecision; - InputShape inputShape; - ngraph::helpers::InputLayerType inputType; - std::tie(axis, splitLength, netPrecision, inputShape, inputType) = obj.param; + std::vector split_length; + ov::element::Type model_type; + InputShape input_shape; + ov::test::utils::InputLayerType inputType; + std::tie(axis, split_length, model_type, input_shape, inputType) = obj.param; result << "IS="; - result << ov::test::utils::partialShape2str({inputShape.first}) << "_"; + result << ov::test::utils::partialShape2str({input_shape.first}) << "_"; result << "TS="; - for (const auto& shape : inputShape.second) { + for (const auto& shape : input_shape.second) { result << ov::test::utils::vec2str(shape) << "_"; } - result << "SplitLen=" << ov::test::utils::vec2str(splitLength) << "_"; + result << "SplitLen=" << ov::test::utils::vec2str(split_length) << "_"; result << "axis=" << axis << "_"; - result << "netPRC=" << netPrecision << "_"; + result << "netPRC=" << model_type << "_"; result << "restInputType=" << inputType << "_"; return result.str(); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { @@ -169,8 +167,8 @@ class VariadicSplitLayerGPUDynamicTest : public testing::WithParamInterface::value_type>(); - for (size_t i = 0; i < splitLength_vec.size(); i++) { - dataPtr[i] = splitLength_vec[i]; + for (size_t i = 0; i < split_length_vec.size(); i++) { + dataPtr[i] = split_length_vec[i]; } } else { tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); @@ -181,89 +179,88 @@ class VariadicSplitLayerGPUDynamicTest : public testing::WithParamInterface splitLength_vec; + std::vector split_length_vec; size_t inferRequestNum = 0; - ElementType netPrecision; + ov::element::Type model_type; void SetUp() override { targetDevice = ov::test::utils::DEVICE_GPU; int64_t axis; - InputShape inputShape; - std::vector splitLength; - ngraph::helpers::InputLayerType inputType; - std::tie(axis, splitLength, netPrecision, inputShape, inputType) = this->GetParam(); - - splitLength_vec = splitLength; - - std::vector inputShapes; - inputShapes.push_back(inputShape); - if (inputType == ngraph::helpers::InputLayerType::PARAMETER) { - inputShapes.push_back(InputShape({static_cast(splitLength.size())}, - std::vector(inputShape.second.size(), {splitLength.size()}))); + InputShape input_shape; + std::vector split_length; + ov::test::utils::InputLayerType inputType; + std::tie(axis, split_length, model_type, input_shape, inputType) = this->GetParam(); + + split_length_vec = split_length; + + std::vector input_shapes; + input_shapes.push_back(input_shape); + if (inputType == ov::test::utils::InputLayerType::PARAMETER) { + input_shapes.push_back(InputShape({static_cast(split_length.size())}, + std::vector(input_shape.second.size(), {split_length.size()}))); } - init_input_shapes(inputShapes); + init_input_shapes(input_shapes); - ov::ParameterVector dyn_params{std::make_shared(netPrecision, inputDynamicShapes[0])}; + ov::ParameterVector dyn_params{std::make_shared(model_type, inputDynamicShapes[0])}; - auto splitAxisOp = std::make_shared(ngraph::element::i64, ngraph::Shape{}, std::vector{static_cast(axis)}); + auto splitAxisOp = std::make_shared(ov::element::i64, ov::Shape{}, std::vector{static_cast(axis)}); - std::shared_ptr splitLengthOp; - if (inputType == ngraph::helpers::InputLayerType::PARAMETER) { - auto splitLengthNode = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{splitLength.size()}); - dyn_params.push_back(splitLengthNode); - splitLengthOp = splitLengthNode; + std::shared_ptr split_lengthOp; + if (inputType == ov::test::utils::InputLayerType::PARAMETER) { + auto split_lengthNode = std::make_shared(ov::element::i64, ov::Shape{split_length.size()}); + dyn_params.push_back(split_lengthNode); + split_lengthOp = split_lengthNode; } else { - splitLengthOp = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{splitLength.size()}, splitLength); + split_lengthOp = std::make_shared(ov::element::i64, ov::Shape{split_length.size()}, split_length); } - auto varSplit = std::make_shared(dyn_params[0], splitAxisOp, splitLengthOp); - ngraph::ResultVector results; - for (size_t i = 0; i < splitLength.size(); i++) { - results.push_back(std::make_shared(varSplit->output(i))); + auto varSplit = std::make_shared(dyn_params[0], splitAxisOp, split_lengthOp); + ov::ResultVector results; + for (size_t i = 0; i < split_length.size(); i++) { + results.push_back(std::make_shared(varSplit->output(i))); } - function = std::make_shared(results, dyn_params, "varSplit"); + function = std::make_shared(results, dyn_params, "varSplit"); } }; -TEST_P(VariadicSplitLayerGPUDynamicTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(VariadicSplitLayerGPUDynamicTest, Inference) { run(); } -const std::vector restInputTypes = { - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER +const std::vector restInputTypes = { + ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER }; INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplitsCheck4D, VariadicSplitLayerGPUDynamicTest, ::testing::Combine( ::testing::Values(1), // axes - ::testing::Values(std::vector{2, 1, -1}), // splitLength - ::testing::Values(ElementType::f16), // netPrec - ::testing::ValuesIn(inputShapes4d), // inShapes - ::testing::ValuesIn(restInputTypes)), // input type of splitLength + ::testing::Values(std::vector{2, 1, -1}), // split_length + ::testing::Values(ov::element::f16), // netPrec + ::testing::ValuesIn(input_shapes4d), // inShapes + ::testing::ValuesIn(restInputTypes)), // input type of split_length VariadicSplitLayerGPUDynamicTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplitsCheck5D, VariadicSplitLayerGPUDynamicTest, ::testing::Combine( ::testing::Values(2), // axes - ::testing::Values(std::vector{2, -1}), // splitLength - ::testing::Values(ElementType::f32), // netPrec - ::testing::ValuesIn(inputShapes5d), // inShapes - ::testing::ValuesIn(restInputTypes)), // input type of splitLength + ::testing::Values(std::vector{2, -1}), // split_length + ::testing::Values(ov::element::f32), // netPrec + ::testing::ValuesIn(input_shapes5d), // inShapes + ::testing::ValuesIn(restInputTypes)), // input type of split_length VariadicSplitLayerGPUDynamicTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplitsCheck6D, VariadicSplitLayerGPUDynamicTest, ::testing::Combine( ::testing::Values(5), // nSplits - ::testing::Values(std::vector{2, 3, 2, -1}), // splitLength - ::testing::Values(ElementType::i8), // netPrec - ::testing::ValuesIn(inputShapes6d), // inShapes - ::testing::ValuesIn(restInputTypes)), // input type of splitLength + ::testing::Values(std::vector{2, 3, 2, -1}), // split_length + ::testing::Values(ov::element::i8), // netPrec + ::testing::ValuesIn(input_shapes6d), // inShapes + ::testing::ValuesIn(restInputTypes)), // input type of split_length VariadicSplitLayerGPUDynamicTest::getTestCaseName); -const std::vector inputShapes4d_static = { +const std::vector input_shapes4d_static = { { {5, 16, 10, 8}, {{5, 16, 10, 8}, } } @@ -272,10 +269,10 @@ const std::vector inputShapes4d_static = { INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplitsCheck4D_static_input_dyn_output, VariadicSplitLayerGPUDynamicTest, ::testing::Combine( ::testing::Values(1), // axes - ::testing::Values(std::vector{2, 1, -1}), // splitLength - ::testing::Values(ElementType::f16), // netPrec - ::testing::ValuesIn(inputShapes4d_static), // inShapes - ::testing::ValuesIn(restInputTypes)), // input type of splitLength + ::testing::Values(std::vector{2, 1, -1}), // split_length + ::testing::Values(ov::element::f16), // netPrec + ::testing::ValuesIn(input_shapes4d_static), // inShapes + ::testing::ValuesIn(restInputTypes)), // input type of split_length VariadicSplitLayerGPUDynamicTest::getTestCaseName); -} // namespace GPULayerTestsDefinitions +} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/strided_slice.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/strided_slice.cpp index c6cd7e65258f34..954db256c9aad8 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/strided_slice.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/strided_slice.cpp @@ -2,16 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/strided_slice.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" -#include "common_test_utils/test_constants.hpp" #include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/strided_slice.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; struct StridedSliceParams { std::vector begin; @@ -27,22 +28,20 @@ struct StridedSliceParams { typedef std::tuple< InputShape, // Input shapes StridedSliceParams, - ElementType, // Element type - std::vector, // begin/end/stride input type - std::map // Additional network configuration + ov::element::Type, // Element type + std::vector // begin/end/stride input type > StridedSliceLayerParamSet; class StridedSliceLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { InputShape shapes; StridedSliceParams params; - ElementType elementType; - std::vector restInputType; - TargetDevice targetDevice; - std::map additionalConfig; - std::tie(shapes, params, elementType, restInputType, additionalConfig) = obj.param; + ov::element::Type model_type; + std::vector rest_input_type; + std::string targetDevice; + std::tie(shapes, params, model_type, rest_input_type) = obj.param; std::ostringstream results; results << "IS=" << ov::test::utils::partialShape2str({shapes.first}) << "_"; @@ -50,7 +49,7 @@ class StridedSliceLayerGPUTest : public testing::WithParamInterface(results, params, "StridedSlice"); } }; -TEST_P(StridedSliceLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(StridedSliceLayerGPUTest, Inference) { run(); } -namespace { - -std::map emptyAdditionalConfig; - -const std::vector inputPrecisions = { - ElementType::f32 +const std::vector model_types = { + ov::element::f32 }; -const std::vector> restInputTypes = { - {ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::CONSTANT}, - {ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::PARAMETER}, - {ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::CONSTANT}, - {ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::CONSTANT}, - {ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::PARAMETER}, - {ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::PARAMETER}, - {ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::CONSTANT, ngraph::helpers::InputLayerType::PARAMETER}, - {ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::PARAMETER, ngraph::helpers::InputLayerType::CONSTANT}, +const std::vector> rest_input_types = { + {ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT}, + {ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER}, + {ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT}, + {ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::CONSTANT}, + {ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER}, + {ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER}, + {ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::CONSTANT, ov::test::utils::InputLayerType::PARAMETER}, + {ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::PARAMETER, ov::test::utils::InputLayerType::CONSTANT}, }; const std::vector inputShapesDynamic2D = { @@ -227,20 +214,18 @@ const std::vector paramsPlain2D = { INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Plain_Static_2D, StridedSliceLayerGPUTest, ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation({{32, 20}})), + ::testing::ValuesIn(ov::test::static_shapes_to_test_representation({{32, 20}})), ::testing::ValuesIn(paramsPlain2D), - ::testing::ValuesIn(inputPrecisions), - ::testing::Values(restInputTypes[0]), - ::testing::Values(emptyAdditionalConfig)), + ::testing::ValuesIn(model_types), + ::testing::Values(rest_input_types[0])), StridedSliceLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Plain_Dynamic_2D, StridedSliceLayerGPUTest, ::testing::Combine( ::testing::ValuesIn(inputShapesDynamic2D), ::testing::ValuesIn(paramsPlain2D), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(restInputTypes), - ::testing::Values(emptyAdditionalConfig)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(rest_input_types)), StridedSliceLayerGPUTest::getTestCaseName); const std::vector testCasesCommon4D = { @@ -266,9 +251,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_4D, StridedSliceLa ::testing::Combine( ::testing::ValuesIn(inputShapesDynamic4D), ::testing::ValuesIn(testCasesCommon4D), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(restInputTypes), - ::testing::Values(emptyAdditionalConfig)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(rest_input_types)), StridedSliceLayerGPUTest::getTestCaseName); @@ -295,9 +279,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_5D, StridedSliceLa ::testing::Combine( ::testing::ValuesIn(inputShapesDynamic5D), ::testing::ValuesIn(testCasesCommon5D), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(restInputTypes), - ::testing::Values(emptyAdditionalConfig)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(rest_input_types)), StridedSliceLayerGPUTest::getTestCaseName); @@ -318,10 +301,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_6D, StridedSliceLa ::testing::Combine( ::testing::ValuesIn(inputShapesDynamic6D), ::testing::ValuesIn(testCasesCommon6D), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(restInputTypes), - ::testing::Values(emptyAdditionalConfig)), + ::testing::ValuesIn(model_types), + ::testing::ValuesIn(rest_input_types)), StridedSliceLayerGPUTest::getTestCaseName); - } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/tile.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/tile.cpp index 2c1268d76db7aa..139ba609c1b6fd 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/tile.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/tile.cpp @@ -2,26 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/tile.hpp" -#include "common_test_utils/test_constants.hpp" #include "common_test_utils/ov_tensor_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace InferenceEngine; -using namespace ov::test; - -namespace GPULayerTestsDefinitions { +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/tile.hpp" +namespace { using TileLayerTestParamsSet = typename std::tuple< std::vector, // Input shapes std::vector, // Repeats - ov::element::Type_t, // Network precision + ov::element::Type, // Model type bool, // Is Repeats input constant std::string>; // Device name @@ -32,27 +25,27 @@ class TileLayerGPUTest : public testing::WithParamInterface obj) { TileLayerTestParamsSet basicParamsSet = obj.param; - std::vector inputShapes; + std::vector input_shapes; std::vector repeats; - ov::element::Type_t netPrecision; - bool isRepeatsConst; + ov::element::Type_t model_type; + bool is_repeats_const; std::string deviceName; - std::tie(inputShapes, repeats, netPrecision, isRepeatsConst, deviceName) = basicParamsSet; + std::tie(input_shapes, repeats, model_type, is_repeats_const, deviceName) = basicParamsSet; std::ostringstream result; result << "IS=("; - for (const auto& shape : inputShapes) { + for (const auto& shape : input_shapes) { result << ov::test::utils::partialShape2str({shape.first}) << "_"; } result << ")_TS=("; - for (const auto& shape : inputShapes) { + for (const auto& shape : input_shapes) { for (const auto& item : shape.second) { result << ov::test::utils::vec2str(item) << "_"; } } result << "Repeats=" << ov::test::utils::vec2str(repeats) << "_"; - result << "netPrec=" << netPrecision << "_"; - result << "constRepeats=" << (isRepeatsConst ? "True" : "False") << "_"; + result << "netPrec=" << model_type << "_"; + result << "constRepeats=" << (is_repeats_const ? "True" : "False") << "_"; result << "trgDev=" << deviceName; return result.str(); @@ -62,31 +55,31 @@ class TileLayerGPUTest : public testing::WithParamInterfaceGetParam(); - std::vector inputShapes; - ov::element::Type_t netPrecision; - bool isRepeatsConst; - std::tie(inputShapes, repeatsData, netPrecision, isRepeatsConst, targetDevice) = basicParamsSet; + std::vector input_shapes; + ov::element::Type_t model_type; + bool is_repeats_const; + std::tie(input_shapes, repeatsData, model_type, is_repeats_const, targetDevice) = basicParamsSet; - if (inputShapes.front().first.rank() != 0) { - inputDynamicShapes.push_back(inputShapes.front().first); - if (!isRepeatsConst) { + if (input_shapes.front().first.rank() != 0) { + inputDynamicShapes.push_back(input_shapes.front().first); + if (!is_repeats_const) { inputDynamicShapes.push_back({ static_cast(repeatsData.size()) }); } } - const size_t targetStaticShapeSize = inputShapes.front().second.size(); + const size_t targetStaticShapeSize = input_shapes.front().second.size(); targetStaticShapes.resize(targetStaticShapeSize); for (size_t i = 0lu; i < targetStaticShapeSize; ++i) { - targetStaticShapes[i].push_back(inputShapes.front().second[i]); - if (!isRepeatsConst) + targetStaticShapes[i].push_back(input_shapes.front().second[i]); + if (!is_repeats_const) targetStaticShapes[i].push_back({ repeatsData.size() }); } ov::ParameterVector functionParams; if (inputDynamicShapes.empty()) { - functionParams.push_back(std::make_shared(netPrecision, targetStaticShapes.front().front())); + functionParams.push_back(std::make_shared(model_type, targetStaticShapes.front().front())); } else { - functionParams.push_back(std::make_shared(netPrecision, inputDynamicShapes.front())); - if (!isRepeatsConst) { + functionParams.push_back(std::make_shared(model_type, inputDynamicShapes.front())); + if (!is_repeats_const) { functionParams.push_back(std::make_shared(ov::element::i64, inputDynamicShapes[1])); functionParams.back()->set_friendly_name("repeats"); } @@ -94,22 +87,22 @@ class TileLayerGPUTest : public testing::WithParamInterfaceset_friendly_name("data"); std::shared_ptr tileNode; - if (isRepeatsConst) { + if (is_repeats_const) { tileNode = std::make_shared(functionParams[0], ov::op::v0::Constant::create(ov::element::i64, { repeatsData.size() }, repeatsData)); } else { tileNode = std::make_shared(functionParams[0], functionParams[1]); } - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < tileNode->get_output_size(); i++) { - results.push_back(std::make_shared(tileNode->output(i))); + results.push_back(std::make_shared(tileNode->output(i))); } - function = std::make_shared(results, functionParams, "Tile"); + function = std::make_shared(results, functionParams, "Tile"); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0lu; i < funcInputs.size(); i++) { @@ -123,8 +116,11 @@ class TileLayerGPUTest : public testing::WithParamInterface repeatsData; }; -TEST_P(TileLayerGPUTest, CompareWithRefs) { +TEST_P(TileLayerGPUTest, Inference) { run(); } -namespace { - -const std::vector netPrecisions = { +const std::vector model_types = { ov::element::f32, ov::element::f16, }; -const std::vector> dynamicInputShapes4D = { +const std::vector> dynamic_input_shapes4D = { { { // Origin dynamic shapes {ov::Dimension(1, 20), ov::Dimension(10, 20), ov::Dimension(1, 20), ov::Dimension(1, 20)}, @@ -169,7 +163,7 @@ const std::vector> dynamicInputShapes4D = { } }; -const std::vector> dynamicInputShapes5D = { +const std::vector> dynamic_input_shapes5D = { { { // Origin dynamic shapes {ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 70)}, @@ -212,22 +206,20 @@ const std::vector> repeats5D = { INSTANTIATE_TEST_CASE_P(DynamicShape4D, TileLayerGPUTest, ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes4D), + ::testing::ValuesIn(dynamic_input_shapes4D), ::testing::ValuesIn(repeats4D), - ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(model_types), ::testing::Values(true, false), ::testing::Values(ov::test::utils::DEVICE_GPU)), TileLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_CASE_P(DynamicShape5D, TileLayerGPUTest, ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes5D), + ::testing::ValuesIn(dynamic_input_shapes5D), ::testing::ValuesIn(repeats5D), - ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(model_types), ::testing::Values(true, false), ::testing::Values(ov::test::utils::DEVICE_GPU)), TileLayerGPUTest::getTestCaseName); } // namespace - -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/top_k.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/top_k.cpp index 7ff69b3db00f3e..d260b66331fa51 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/top_k.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/top_k.cpp @@ -2,64 +2,62 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/topk.hpp" -#include "common_test_utils/test_constants.hpp" +#include + #include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/test_enums.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/topk.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< - int64_t, // keepK - int64_t, // axis - ngraph::opset4::TopK::Mode, // mode - ngraph::opset4::TopK::SortType, // sort - ElementType, // Net precision - ElementType, // Input precision - ElementType, // Output precision - InputShape, // inputShape - TargetDevice, // Device name - ngraph::helpers::InputLayerType // Input type + int64_t, // keepK + int64_t, // axis + ov::op::v1::TopK::Mode, // mode + ov::op::v1::TopK::SortType, // sort + ov::element::Type, // Model type + ov::element::Type, // Input precision + ov::element::Type, // Output precision + InputShape, // input_shape + std::string, // Device name + ov::test::utils::InputLayerType // Input type > TopKLayerTestParamsSet; class TopKLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { TopKLayerTestParamsSet basicParamsSet = obj.param; int64_t keepK, axis; - ngraph::opset4::TopK::Mode mode; - ngraph::opset4::TopK::SortType sort; - ElementType netPrecision, inPrc, outPrc; - InputShape inputShape; - TargetDevice targetDevice; - ngraph::helpers::InputLayerType inputType; - std::tie(keepK, axis, mode, sort, netPrecision, inPrc, outPrc, inputShape, targetDevice, inputType) = basicParamsSet; + ov::op::v1::TopK::Mode mode; + ov::op::v1::TopK::SortType sort; + ov::element::Type model_type, inPrc, outPrc; + InputShape input_shape; + std::string targetDevice; + ov::test::utils::InputLayerType input_type; + std::tie(keepK, axis, mode, sort, model_type, inPrc, outPrc, input_shape, targetDevice, input_type) = basicParamsSet; std::ostringstream result; result << "k=" << keepK << "_"; result << "axis=" << axis << "_"; result << "mode=" << mode << "_"; result << "sort=" << sort << "_"; - result << "netPRC=" << netPrecision << "_"; + result << "netPRC=" << model_type << "_"; result << "inPRC=" << inPrc << "_"; result << "outPRC=" << outPrc << "_"; - result << "IS=" << ov::test::utils::partialShape2str({inputShape.first}) << "_" << "TS=("; - for (const auto& shape : inputShape.second) { + result << "IS=" << ov::test::utils::partialShape2str({input_shape.first}) << "_" << "TS=("; + for (const auto& shape : input_shape.second) { result << ov::test::utils::vec2str(shape) << "_"; } result << ")_"; - result << "inputType=" << inputType; + result << "input_type=" << input_type; result << "TargetDevice=" << targetDevice; return result.str(); @@ -70,43 +68,43 @@ class TopKLayerGPUTest : public testing::WithParamInterfaceGetParam(); int64_t keepK; - ngraph::opset4::TopK::Mode mode; - ngraph::opset4::TopK::SortType sort; - ElementType inPrc, outPrc; - InputShape inputShape; - std::tie(keepK, axis, mode, sort, netPrecision, inPrc, outPrc, inputShape, targetDevice, inputType) = basicParamsSet; - - if (inputType == ngraph::helpers::InputLayerType::CONSTANT) { - init_input_shapes({inputShape}); + ov::op::v1::TopK::Mode mode; + ov::op::v1::TopK::SortType sort; + ov::element::Type inPrc, outPrc; + InputShape input_shape; + std::tie(keepK, axis, mode, sort, model_type, inPrc, outPrc, input_shape, targetDevice, input_type) = basicParamsSet; + + if (input_type == ov::test::utils::InputLayerType::CONSTANT) { + init_input_shapes({input_shape}); } else { - inputDynamicShapes = {inputShape.first, {}}; - for (size_t i = 0; i < inputShape.second.size(); ++i) { - targetStaticShapes.push_back({inputShape.second[i], {}}); + inputDynamicShapes = {input_shape.first, {}}; + for (size_t i = 0; i < input_shape.second.size(); ++i) { + targetStaticShapes.push_back({input_shape.second[i], {}}); } } - ov::ParameterVector params{std::make_shared(netPrecision, inputDynamicShapes[0])}; + ov::ParameterVector params{std::make_shared(model_type, inputDynamicShapes[0])}; - std::shared_ptr topk; - if (inputType == ngraph::helpers::InputLayerType::CONSTANT) { - auto k = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{}, &keepK); - topk = std::dynamic_pointer_cast(std::make_shared(params[0], k, axis, mode, sort)); + std::shared_ptr topk; + if (input_type == ov::test::utils::InputLayerType::CONSTANT) { + auto k = std::make_shared(ov::element::i64, ov::Shape{}, &keepK); + topk = std::dynamic_pointer_cast(std::make_shared(params[0], k, axis, mode, sort)); } else { - auto k = std::make_shared(ngraph::element::Type_t::i64, inputDynamicShapes[1]); + auto k = std::make_shared(ov::element::i64, inputDynamicShapes[1]); params.push_back(k); - topk = std::dynamic_pointer_cast( - std::make_shared(params[0], k, axis, mode, sort)); + topk = std::dynamic_pointer_cast( + std::make_shared(params[0], k, axis, mode, sort)); } - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < topk->get_output_size(); i++) { - results.push_back(std::make_shared(topk->output(i))); + results.push_back(std::make_shared(topk->output(i))); } - function = std::make_shared(results, params, "TopK"); + function = std::make_shared(results, params, "TopK"); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); auto shape = targetInputStaticShapes.front(); @@ -114,7 +112,7 @@ class TopKLayerGPUTest : public testing::WithParamInterface data(size); int start = - static_cast(size / 2); @@ -127,18 +125,19 @@ class TopKLayerGPUTest : public testing::WithParamInterface(data[i]); } } else { - FAIL() << "generate_inputs for " << netPrecision << " precision isn't supported"; + FAIL() << "generate_inputs for " << model_type << " precision isn't supported"; } inputs.insert({funcInputs[0].get_node_shared_ptr(), tensor}); - if (inputType == ngraph::helpers::InputLayerType::PARAMETER) { + if (input_type == ov::test::utils::InputLayerType::PARAMETER) { const auto& kPrecision = funcInputs[1].get_element_type(); const auto& kShape = targetInputStaticShapes[1]; - const size_t startFrom = 1; - const size_t range = targetInputStaticShapes[0][axis]; - const size_t seed = inferRequestNum++; - const auto kTensor = ov::test::utils::create_and_fill_tensor(kPrecision, kShape, range, startFrom, 1, seed); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 1; + in_data.range = targetInputStaticShapes[0][axis]; + in_data.seed = inferRequestNum++; + const auto kTensor = ov::test::utils::create_and_fill_tensor(kPrecision, kShape, in_data); inputs.insert({funcInputs[1].get_node_shared_ptr(), kTensor}); } @@ -147,36 +146,32 @@ class TopKLayerGPUTest : public testing::WithParamInterface netPrecisions = { - ElementType::f32, +const std::vector model_types = { + ov::element::f32, }; const std::vector axes = {0, 3}; const std::vector k = {3, 5, 7}; -const std::vector modes = { - ngraph::opset4::TopK::Mode::MIN, - ngraph::opset4::TopK::Mode::MAX +const std::vector modes = { + ov::op::v1::TopK::Mode::MIN, + ov::op::v1::TopK::Mode::MAX }; -const std::vector sortTypes = { - ngraph::opset4::TopK::SortType::SORT_VALUES, - ngraph::opset4::TopK::SortType::SORT_INDICES, +const std::vector sortTypes = { + ov::op::v1::TopK::SortType::SORT_VALUES, + ov::op::v1::TopK::SortType::SORT_INDICES, }; -std::vector inputShapesDynamic = { +std::vector input_shapesDynamic = { { {ov::PartialShape::dynamic(4), {{7, 7, 7, 7}, {7, 8, 7, 9}}}, {{-1, -1, -1, -1}, {{8, 9, 10, 11}, {11, 7, 8, 9}}} @@ -189,12 +184,12 @@ INSTANTIATE_TEST_CASE_P(smoke_TopK_constant_dynamic, TopKLayerGPUTest, ::testing::ValuesIn(axes), ::testing::ValuesIn(modes), ::testing::ValuesIn(sortTypes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesDynamic), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::ValuesIn(input_shapesDynamic), ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT)), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT)), TopKLayerGPUTest::getTestCaseName); INSTANTIATE_TEST_CASE_P(smoke_TopK_parameter_dynamic, TopKLayerGPUTest, @@ -203,13 +198,13 @@ INSTANTIATE_TEST_CASE_P(smoke_TopK_parameter_dynamic, TopKLayerGPUTest, ::testing::ValuesIn(axes), ::testing::ValuesIn(modes), ::testing::ValuesIn(sortTypes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesDynamic), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::undefined), + ::testing::ValuesIn(input_shapesDynamic), ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER)), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER)), TopKLayerGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions + diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/unique.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/unique.cpp index 9e60d64fb7d5f5..db5add50ded52d 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/unique.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/unique.cpp @@ -3,82 +3,83 @@ // #include "common_test_utils/ov_tensor_utils.hpp" -#include "ov_models/builders.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/unique.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple, // Input shapes std::tuple, // Is flattened and axis bool, // Sorted - ElementType // Data precision - > + ov::element::Type> // Model type UniqueDynamicGPUTestParams; class UniqueLayerDynamicGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { - std::vector inputShapes; - std::tuple flatOrAxis; + std::vector input_shapes; + std::tuple flat_or_axis; bool sorted; - ElementType dataPrecision; - std::tie(inputShapes, flatOrAxis, sorted, dataPrecision) = obj.param; + ov::element::Type model_type; + std::tie(input_shapes, flat_or_axis, sorted, model_type) = obj.param; std::ostringstream result; result << "IS=("; - for (size_t i = 0lu; i < inputShapes.size(); i++) { - result << ov::test::utils::partialShape2str({inputShapes[i].first}) - << (i < inputShapes.size() - 1lu ? "_" : ""); + for (size_t i = 0lu; i < input_shapes.size(); i++) { + result << ov::test::utils::partialShape2str({input_shapes[i].first}) + << (i < input_shapes.size() - 1lu ? "_" : ""); } result << ")_TS="; - for (size_t i = 0lu; i < inputShapes.front().second.size(); i++) { + for (size_t i = 0lu; i < input_shapes.front().second.size(); i++) { result << "{"; - for (size_t j = 0lu; j < inputShapes.size(); j++) { - result << ov::test::utils::vec2str(inputShapes[j].second[i]) - << (j < inputShapes.size() - 1lu ? "_" : ""); + for (size_t j = 0lu; j < input_shapes.size(); j++) { + result << ov::test::utils::vec2str(input_shapes[j].second[i]) + << (j < input_shapes.size() - 1lu ? "_" : ""); } result << "}_"; } - if (!std::get<0>(flatOrAxis)) { - result << "axis=" << std::get<1>(flatOrAxis) << "_"; + if (!std::get<0>(flat_or_axis)) { + result << "axis=" << std::get<1>(flat_or_axis) << "_"; } else { result << "flattened" << "_"; } result << "sorted=" << (sorted ? "True" : "False") << "_"; - result << "dataPrc=" << dataPrecision; + result << "dataPrc=" << model_type; return result.str(); } protected: void SetUp() override { - std::vector inputShapes; - std::tuple flatOrAxis; + std::vector input_shapes; + std::tuple flat_or_axis; bool sorted, flattened; int axis; - ElementType dataPrecision; + ov::element::Type model_type; - std::tie(inputShapes, flatOrAxis, sorted, dataPrecision) = this->GetParam(); + std::tie(input_shapes, flat_or_axis, sorted, model_type) = this->GetParam(); targetDevice = ov::test::utils::DEVICE_GPU; - init_input_shapes(inputShapes); - flattened = std::get<0>(flatOrAxis); + init_input_shapes(input_shapes); + flattened = std::get<0>(flat_or_axis); ov::ParameterVector params; for (auto&& shape : inputDynamicShapes) { - params.push_back(std::make_shared(dataPrecision, shape)); + params.push_back(std::make_shared(model_type, shape)); } params[0]->set_friendly_name("data"); std::shared_ptr uniqueNode; if (flattened) { uniqueNode = std::make_shared(params[0], sorted); } else { - axis = std::get<1>(flatOrAxis); + axis = std::get<1>(flat_or_axis); uniqueNode = std::make_shared( params[0], ov::op::v0::Constant::create(ov::element::i64, ov::Shape({1}), {axis}), @@ -86,12 +87,12 @@ class UniqueLayerDynamicGPUTest : public testing::WithParamInterfaceget_output_size(); ++i) { - results.push_back(std::make_shared(uniqueNode->output(i))); + results.push_back(std::make_shared(uniqueNode->output(i))); } - function = std::make_shared(results, params, "Unique"); + function = std::make_shared(results, params, "Unique"); } void generate_inputs(const std::vector& targetInputStaticShapes) override { @@ -107,30 +108,26 @@ class UniqueLayerDynamicGPUTest : public testing::WithParamInterface()); - tensor = utils::create_and_fill_tensor(funcInput.get_element_type(), - targetInputStaticShapes[0], - range, - -range / 2, - 1); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -range / 2; + in_data.range = range; + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[0], in_data); } inputs.insert({funcInput.get_node_shared_ptr(), tensor}); } } }; -TEST_P(UniqueLayerDynamicGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(UniqueLayerDynamicGPUTest, Inference) { run(); } -namespace { - -const std::vector dataPrecision = { - ElementType::f16, - ElementType::i32, +const std::vector model_types = { + ov::element::f16, + ov::element::i32, }; -std::vector> flatOrAxis{{true, 0}, {false, 0}, {false, 1}, {false, -1}}; +std::vector> flat_or_axis{{true, 0}, {false, 0}, {false, 1}, {false, -1}}; std::vector sorted{true, false}; @@ -145,9 +142,9 @@ std::vector> getStaticShapes() { INSTANTIATE_TEST_SUITE_P(smoke_static, UniqueLayerDynamicGPUTest, ::testing::Combine(::testing::ValuesIn(getStaticShapes()), - ::testing::ValuesIn(flatOrAxis), + ::testing::ValuesIn(flat_or_axis), ::testing::ValuesIn(sorted), - ::testing::ValuesIn(dataPrecision)), + ::testing::ValuesIn(model_types)), UniqueLayerDynamicGPUTest::getTestCaseName); std::vector> getDynamicShapes() { @@ -162,10 +159,9 @@ std::vector> getDynamicShapes() { INSTANTIATE_TEST_SUITE_P(smoke_dynamic, UniqueLayerDynamicGPUTest, ::testing::Combine(::testing::ValuesIn(getDynamicShapes()), - ::testing::ValuesIn(flatOrAxis), + ::testing::ValuesIn(flat_or_axis), ::testing::ValuesIn(sorted), - ::testing::ValuesIn(dataPrecision)), + ::testing::ValuesIn(model_types)), UniqueLayerDynamicGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/tensor_iterator.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/tensor_iterator.cpp index 4bd41f87cd51a6..97b98da4d66633 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/tensor_iterator.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/tensor_iterator.cpp @@ -2,26 +2,17 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "common_test_utils/test_constants.hpp" -#include "ie_api.h" -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" - -using namespace InferenceEngine; -using Config = std::pair>; - -namespace LayerTestsDefinitions { +// +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/file_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "common_test_utils/node_builders/convolution.hpp" +#include "common_test_utils/test_enums.hpp" +#include "common_test_utils/node_builders/gru_cell.hpp" +#include "common_test_utils/node_builders/lstm_cell.hpp" +#include "common_test_utils/node_builders/rnn_cell.hpp" +namespace { using TensorIteratorWithConfigParams = typename std::tuple< size_t, // seq_lengths size_t, // batch @@ -30,13 +21,13 @@ using TensorIteratorWithConfigParams = typename std::tuple< //size_t, // input size size_t, // sequence axis float, // clip - ngraph::helpers::TensorIteratorBody, // body type - ngraph::op::RecurrentSequenceDirection, // direction - InferenceEngine::Precision, // Network precision - Config>; // Target device name & Configuration + ov::test::utils::TensorIteratorBody, // body type + ov::op::RecurrentSequenceDirection, // direction + ov::element::Type, // Model type + std::string>; // Device class TensorIteratorWithConfigTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { + virtual public ov::test::SubgraphBaseStaticTest { public: static std::string getTestCaseName(const testing::TestParamInfo &obj) { size_t seq_lengths; @@ -44,29 +35,28 @@ class TensorIteratorWithConfigTest : public testing::WithParamInterface> config; - std::tie(seq_lengths, batch, hidden_size, sequence_axis, clip, ti_body, direction, netPrecision, - config) = obj.param; + ov::op::RecurrentSequenceDirection direction; + ov::element::Type model_type; + std::string target_device; + std::tie(seq_lengths, batch, hidden_size, sequence_axis, clip, ti_body, direction, model_type, target_device) = obj.param; std::vector> inputShapes = {}; switch (ti_body) { - case ngraph::helpers::TensorIteratorBody::LSTM: + case ov::test::utils::TensorIteratorBody::LSTM: inputShapes = { {{batch, input_size}, {batch, hidden_size}, {batch, hidden_size}, {4 * hidden_size, input_size}, {4 * hidden_size, hidden_size}, {4 * hidden_size}}, }; break; - case ngraph::helpers::TensorIteratorBody::GRU: + case ov::test::utils::TensorIteratorBody::GRU: inputShapes = { {{batch, input_size}, {batch, hidden_size}, {3 * hidden_size, input_size}, {3 * hidden_size, hidden_size}, {3 * hidden_size}}, }; break; - case ngraph::helpers::TensorIteratorBody::RNN: + case ov::test::utils::TensorIteratorBody::RNN: inputShapes = {{batch, input_size}, {batch, hidden_size}, {hidden_size, input_size}, {hidden_size, hidden_size}, {hidden_size}}; break; @@ -82,17 +72,8 @@ class TensorIteratorWithConfigTest : public testing::WithParamInterface(lstm_cell->output(0), axis); - ngraph::ResultVector results{std::make_shared(unsqueeze), - std::make_shared(lstm_cell->output(0)), - std::make_shared(lstm_cell->output(1))}; - auto body = std::make_shared(results, body_params, "lstm_cell"); + ov::ParameterVector body_params{std::make_shared(model_type, ov::Shape(inputShapes[0])), + std::make_shared(model_type, ov::Shape(inputShapes[1])), + std::make_shared(model_type, ov::Shape(inputShapes[2]))}; + auto squeeze = std::make_shared(body_params[0], axis); + std::vector WRB = {inputShapes[3], inputShapes[4], inputShapes[5]}; + ov::OutputVector out_vector = {squeeze, body_params[1], body_params[2]}; + auto lstm_cell = ov::test::utils::make_lstm(out_vector, WRB, hidden_size, {"sigmoid", "tanh", "tanh"}, {}, {}, clip); + auto unsqueeze = std::make_shared(lstm_cell->output(0), axis); + ov::ResultVector results{std::make_shared(unsqueeze), + std::make_shared(lstm_cell->output(0)), + std::make_shared(lstm_cell->output(1))}; + auto body = std::make_shared(results, body_params, "lstm_cell"); tensor_iterator->set_function(body); // 2. Set PortMap - if (direction == ngraph::op::RecurrentSequenceDirection::FORWARD) { + if (direction == ov::op::RecurrentSequenceDirection::FORWARD) { tensor_iterator->set_sliced_input(body_params[0], outer_params[0], 0, 1, 1, -1, sequence_axis); tensor_iterator->get_concatenated_slices(results[0], 0, 1, 1, -1, sequence_axis); - } else if (direction == ngraph::op::RecurrentSequenceDirection::REVERSE) { + } else if (direction == ov::op::RecurrentSequenceDirection::REVERSE) { tensor_iterator->set_sliced_input(body_params[0], outer_params[0], -1, -1, 1, 0, sequence_axis); tensor_iterator->get_concatenated_slices(results[0], -1, -1, 1, 0, sequence_axis); } else { - NGRAPH_CHECK(false, "Bidirectional case is not supported."); + OPENVINO_THROW("Bidirectional case is not supported."); } tensor_iterator->set_merged_input(body_params[1], outer_params[1], results[1]); @@ -169,11 +146,11 @@ class TensorIteratorWithConfigTest : public testing::WithParamInterfaceget_iter_value(results[2]); // 3. Outer function - function = std::make_shared(ngraph::OutputVector{tensor_iterator->output(0), tensor_iterator->output(1), + function = std::make_shared(ov::OutputVector{tensor_iterator->output(0), tensor_iterator->output(1), tensor_iterator->output(2)}, outer_params); break; } - case ngraph::helpers::TensorIteratorBody::GRU: { + case ov::test::utils::TensorIteratorBody::GRU: { inputShapes = { {{batch, seq_lengths, input_size}, {batch, hidden_size}, {3 * hidden_size, input_size}, {3 * hidden_size, hidden_size}, {3 * hidden_size}}, @@ -182,43 +159,43 @@ class TensorIteratorWithConfigTest : public testing::WithParamInterface(ngPrc, ov::Shape(inputShapes[0])), - std::make_shared(ngPrc, ov::Shape(inputShapes[1]))}; + ov::ParameterVector outer_params{std::make_shared(model_type, ov::Shape(inputShapes[0])), + std::make_shared(model_type, ov::Shape(inputShapes[1]))}; // 1. Create TensorIterator body. inputShapes[0][sequence_axis] = 1; // sliced dimension - ov::ParameterVector body_params{std::make_shared(ngPrc, ov::Shape(inputShapes[0])), - std::make_shared(ngPrc, ov::Shape(inputShapes[1]))}; - std::vector WRB = {inputShapes[2], inputShapes[3], inputShapes[4]}; - auto squeeze = std::make_shared(body_params[0], axis); - ngraph::OutputVector out_vector = {squeeze, body_params[1]}; - auto gru_cell = ngraph::builder::makeGRU(out_vector, WRB, hidden_size, {"sigmoid", "tanh"}, - {}, {}, clip, false); - auto unsqueeze = std::make_shared(gru_cell->output(0), axis); - ngraph::ResultVector results{std::make_shared(gru_cell->output(0)), - std::make_shared(unsqueeze)}; - auto body = std::make_shared(results, body_params, "gru_cell"); + ov::ParameterVector body_params{std::make_shared(model_type, ov::Shape(inputShapes[0])), + std::make_shared(model_type, ov::Shape(inputShapes[1]))}; + std::vector WRB = {inputShapes[2], inputShapes[3], inputShapes[4]}; + auto squeeze = std::make_shared(body_params[0], axis); + ov::OutputVector out_vector = {squeeze, body_params[1]}; + auto gru_cell = ov::test::utils::make_gru(out_vector, WRB, hidden_size, {"sigmoid", "tanh"}, + {}, {}, clip, false); + auto unsqueeze = std::make_shared(gru_cell->output(0), axis); + ov::ResultVector results{std::make_shared(gru_cell->output(0)), + std::make_shared(unsqueeze)}; + auto body = std::make_shared(results, body_params, "gru_cell"); tensor_iterator->set_function(body); // 2. Set PortMap - if (direction == ngraph::op::RecurrentSequenceDirection::FORWARD) { + if (direction == ov::op::RecurrentSequenceDirection::FORWARD) { tensor_iterator->set_sliced_input(body_params[0], outer_params[0], 0, 1, 1, -1, sequence_axis); tensor_iterator->get_concatenated_slices(results[1], 0, 1, 1, -1, sequence_axis); - } else if (direction == ngraph::op::RecurrentSequenceDirection::REVERSE) { + } else if (direction == ov::op::RecurrentSequenceDirection::REVERSE) { tensor_iterator->set_sliced_input(body_params[0], outer_params[0], -1, -1, 1, 0, sequence_axis); tensor_iterator->get_concatenated_slices(results[1], -1, -1, 1, 0, sequence_axis); } else { - NGRAPH_CHECK(false, "Bidirectional case is not supported."); + OPENVINO_THROW("Bidirectional case is not supported."); } tensor_iterator->set_merged_input(body_params[1], outer_params[1], results[0]); tensor_iterator->get_iter_value(results[0]); // 3. Outer function - function = std::make_shared(ngraph::OutputVector{tensor_iterator->output(0), tensor_iterator->output(1)}, outer_params); + function = std::make_shared(ov::OutputVector{tensor_iterator->output(0), tensor_iterator->output(1)}, outer_params); break; } - case ngraph::helpers::TensorIteratorBody::RNN: { + case ov::test::utils::TensorIteratorBody::RNN: { inputShapes = {{batch, seq_lengths, input_size}, {batch, hidden_size}, {hidden_size, input_size}, @@ -228,77 +205,69 @@ class TensorIteratorWithConfigTest : public testing::WithParamInterface(ngPrc, ov::Shape(inputShapes[0])), - std::make_shared(ngPrc, ov::Shape(inputShapes[1]))}; + ov::ParameterVector outer_params{std::make_shared(model_type, ov::Shape(inputShapes[0])), + std::make_shared(model_type, ov::Shape(inputShapes[1]))}; // 1. Create TensorIterator body. inputShapes[0][sequence_axis] = 1; // sliced dimension - ov::ParameterVector body_params{std::make_shared(ngPrc, ov::Shape(inputShapes[0])), - std::make_shared(ngPrc, ov::Shape(inputShapes[1]))}; - std::vector WRB = {inputShapes[2], inputShapes[3], inputShapes[4]}; - auto squeeze = std::make_shared(body_params[0], axis); - ngraph::OutputVector out_vector = {squeeze, body_params[1]}; - auto rnn_cell = ngraph::builder::makeRNN(out_vector, WRB, hidden_size, {"tanh"}, {}, {}, clip); - auto unsqueeze = std::make_shared(rnn_cell->output(0), axis); - ngraph::ResultVector results{std::make_shared(rnn_cell), - std::make_shared(unsqueeze)}; - auto body = std::make_shared(results, body_params, "rnn_cell"); + ov::ParameterVector body_params{std::make_shared(model_type, ov::Shape(inputShapes[0])), + std::make_shared(model_type, ov::Shape(inputShapes[1]))}; + std::vector WRB = {inputShapes[2], inputShapes[3], inputShapes[4]}; + auto squeeze = std::make_shared(body_params[0], axis); + ov::OutputVector out_vector = {squeeze, body_params[1]}; + auto rnn_cell = ov::test::utils::make_rnn(out_vector, WRB, hidden_size, {"tanh"}, {}, {}, clip); + auto unsqueeze = std::make_shared(rnn_cell->output(0), axis); + ov::ResultVector results{std::make_shared(rnn_cell), + std::make_shared(unsqueeze)}; + auto body = std::make_shared(results, body_params, "rnn_cell"); tensor_iterator->set_function(body); // 2. Set PortMap - if (direction == ngraph::op::RecurrentSequenceDirection::FORWARD) { + if (direction == ov::op::RecurrentSequenceDirection::FORWARD) { tensor_iterator->set_sliced_input(body_params[0], outer_params[0], 0, 1, 1, -1, sequence_axis); tensor_iterator->get_concatenated_slices(results[1], 0, 1, 1, -1, sequence_axis); - } else if (direction == ngraph::op::RecurrentSequenceDirection::REVERSE) { + } else if (direction == ov::op::RecurrentSequenceDirection::REVERSE) { tensor_iterator->set_sliced_input(body_params[0], outer_params[0], -1, -1, 1, 0, sequence_axis); tensor_iterator->get_concatenated_slices(results[1], -1, -1, 1, 0, sequence_axis); } else { - NGRAPH_CHECK(false, "Bidirectional case is not supported."); + OPENVINO_THROW("Bidirectional case is not supported."); } tensor_iterator->set_merged_input(body_params[1], outer_params[1], results[0]); tensor_iterator->get_iter_value(results[0]); // 3. Outer function - function = std::make_shared(ngraph::OutputVector{tensor_iterator->output(0), tensor_iterator->output(1)}, outer_params); + function = std::make_shared(ov::OutputVector{tensor_iterator->output(0), tensor_iterator->output(1)}, outer_params); break; } } } }; -TEST_P(TensorIteratorWithConfigTest, CompareWithRefs) { - Run(); +TEST_P(TensorIteratorWithConfigTest, Inference) { + run(); }; -} // namespace LayerTestsDefinitions - -using namespace LayerTestsDefinitions; - -namespace { - INSTANTIATE_TEST_SUITE_P(smoke_TensorIteratorCommon, TensorIteratorWithConfigTest, - ::testing::Combine( - ::testing::ValuesIn(std::vector {2, 4}), // seq lengths - ::testing::ValuesIn(std::vector {1}), // only single batch supported - ::testing::ValuesIn(std::vector {2, 4}), // hidden size - ::testing::ValuesIn(std::vector {0, 1}), // seq axis - ::testing::ValuesIn(std::vector {0.f}), // clip - not used - ::testing::ValuesIn(std::vector { - ngraph::helpers::TensorIteratorBody::LSTM, - ngraph::helpers::TensorIteratorBody::RNN, - ngraph::helpers::TensorIteratorBody::GRU, - }), // body type - ::testing::ValuesIn(std::vector{ - ngraph::op::RecurrentSequenceDirection::FORWARD, - ngraph::op::RecurrentSequenceDirection::REVERSE, - }), - ::testing::ValuesIn(std::vector { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, - }), // precision - ::testing::ValuesIn(std::vector { - {ov::test::utils::DEVICE_GPU, {{GPUConfigParams::KEY_GPU_ENABLE_LOOP_UNROLLING, PluginConfigParams::YES}}}, - {ov::test::utils::DEVICE_GPU, {{GPUConfigParams::KEY_GPU_ENABLE_LOOP_UNROLLING, PluginConfigParams::NO}}} - })), // configuration - TensorIteratorWithConfigTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_TensorIteratorCommon, TensorIteratorWithConfigTest, + ::testing::Combine( + ::testing::ValuesIn(std::vector {2, 4}), // seq lengths + ::testing::ValuesIn(std::vector {1}), // only single batch supported + ::testing::ValuesIn(std::vector {2, 4}), // hidden size + ::testing::ValuesIn(std::vector {0, 1}), // seq axis + ::testing::ValuesIn(std::vector {0.f}), // clip - not used + ::testing::ValuesIn(std::vector { + ov::test::utils::TensorIteratorBody::LSTM, + ov::test::utils::TensorIteratorBody::RNN, + ov::test::utils::TensorIteratorBody::GRU, + }), // body type + ::testing::ValuesIn(std::vector{ + ov::op::RecurrentSequenceDirection::FORWARD, + ov::op::RecurrentSequenceDirection::REVERSE, + }), + ::testing::ValuesIn(std::vector { + ov::element::f32, + ov::element::f16, + }), + ::testing::Values(ov::test::utils::DEVICE_GPU)), + TensorIteratorWithConfigTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/condition.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/condition.cpp index 99359ba07aa6ba..45d5a355d5ee20 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/condition.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/condition.cpp @@ -2,26 +2,27 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" #include "common_test_utils/test_constants.hpp" -#include "shared_test_classes/base/utils/ranges.hpp" -#include -#include "shared_test_classes/base/utils/compare_results.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include "openvino/pass/constant_folding.hpp" - - -using namespace InferenceEngine; -using namespace ov::test; - -namespace GPULayerTestsDefinitions { - +#include "ov_models/utils/ov_helpers.hpp" +#include "common_test_utils/node_builders/reduce.hpp" + +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/greater_eq.hpp" +#include "openvino/op/if.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/max_pool.hpp" +#include "openvino/op/avg_pool.hpp" + +namespace { class InnerBodyGenerator { public: using ptr = std::shared_ptr; @@ -65,140 +66,143 @@ enum InnerBodyType { public: InnerBodyGenerator() { } - virtual std::shared_ptr get_function() { return _func; } - virtual std::shared_ptr get_input() { return _param; } - virtual std::shared_ptr get_result() { return _result; } + virtual std::shared_ptr get_function() { return _func; } + virtual std::shared_ptr get_input() { return _param; } + virtual std::shared_ptr get_result() { return _result; } - // virtual void create_body(ngraph::Shape input_shape, ngraph::element::Type prc) { - virtual void create_body(ov::PartialShape& input_shape, ngraph::element::Type prc) { + // virtual void create_body(ov::Shape input_shape, ov::element::Type prc) { + virtual void create_body(ov::PartialShape& input_shape, ov::element::Type prc) { _func = generate(input_shape, prc); _param = (_func->get_parameters().size() > 0)? _func->get_parameters().front() : nullptr; _result = _func->get_results().front(); } protected: - virtual std::shared_ptr generate(ov::PartialShape& input_shape, ngraph::element::Type prc) = 0; + virtual std::shared_ptr generate(ov::PartialShape& input_shape, ov::element::Type prc) = 0; - std::shared_ptr _func; - std::shared_ptr _param; - std::shared_ptr _result; + std::shared_ptr _func; + std::shared_ptr _param; + std::shared_ptr _result; }; class InnerBodyType01 : public InnerBodyGenerator { protected: - std::shared_ptr generate(ov::PartialShape& input_shape, ngraph::element::Type prc) override { - auto constantA = ngraph::opset9::Constant::create(prc, ov::Shape(input_shape.rank().get_length(), 2), {2.0f}); + std::shared_ptr generate(ov::PartialShape& input_shape, ov::element::Type prc) override { + auto constantA = ov::op::v0::Constant::create(prc, ov::Shape(input_shape.rank().get_length(), 2), {2.0f}); constantA->set_friendly_name("body1_constantA"); - auto constantB = ngraph::opset9::Constant::create(prc, ov::Shape(input_shape.rank().get_length(), 2), {12.0f}); + + auto constantB = ov::op::v0::Constant::create(prc, ov::Shape(input_shape.rank().get_length(), 2), {12.0f}); constantB->set_friendly_name("body1_constantB"); - auto add = std::make_shared(constantA, constantB); + + auto add = std::make_shared(constantA, constantB); add->set_friendly_name("body1_add"); - auto result = std::make_shared(add); - auto o_layout = result->get_layout(); + + auto result = std::make_shared(add); result->set_friendly_name("body1_result"); - auto body = std::make_shared( - ngraph::OutputVector {result}, - ngraph::ParameterVector{}, - "constant"); + + auto body = std::make_shared(ov::OutputVector{result}, ov::ParameterVector{}, "constant"); return body; } }; class InnerBodyType02 : public InnerBodyGenerator { protected: - std::shared_ptr generate(ov::PartialShape& input_shape, ngraph::element::Type prc) override { - auto constant = std::make_shared(prc, ngraph::Shape{}, 10.0f); + std::shared_ptr generate(ov::PartialShape& input_shape, ov::element::Type prc) override { + auto constant = std::make_shared(prc, ov::Shape{}, 10.0f); constant->set_friendly_name("body2_const"); - auto data = std::make_shared(prc, input_shape); + + auto data = std::make_shared(prc, input_shape); data->set_friendly_name("body2_data"); - auto sum = std::make_shared(data, constant); + + auto sum = std::make_shared(data, constant); sum->set_friendly_name("body2_mul"); - auto result = std::make_shared(sum); + + auto result = std::make_shared(sum); result->set_friendly_name("body2_result"); - auto body = std::make_shared( - ngraph::OutputVector {result}, - ngraph::ParameterVector{data}, - "eltwise_mul"); + + auto body = std::make_shared(ov::OutputVector{result}, ov::ParameterVector{data}, "eltwise_mul"); return body; } }; class InnerBodyType03 : public InnerBodyGenerator { protected: - std::shared_ptr generate(ov::PartialShape& input_shape, ngraph::element::Type prc) override { - auto constant = std::make_shared(prc, ngraph::Shape{}, 2.0f); + std::shared_ptr generate(ov::PartialShape& input_shape, ov::element::Type prc) override { + auto constant = std::make_shared(prc, ov::Shape{}, 2.0f); constant->set_friendly_name("body3_constant"); - auto data = std::make_shared(prc, input_shape); + + auto data = std::make_shared(prc, input_shape); data->set_friendly_name("body3_data"); - auto add = std::make_shared(data, constant); + + auto add = std::make_shared(data, constant); add->set_friendly_name("body3_add"); - auto result = std::make_shared(add); + + auto result = std::make_shared(add); result->set_friendly_name("body3_result"); - auto body = std::make_shared( - ngraph::OutputVector {result}, - ngraph::ParameterVector{data}, - "eltwise_sum"); + + auto body = std::make_shared(ov::OutputVector{result}, ov::ParameterVector{data}, "eltwise_sum"); return body; } }; class InnerBodyType04 : public InnerBodyGenerator { protected: - std::shared_ptr generate(ov::PartialShape& input_shape, ngraph::element::Type prc) override { - auto scale = std::make_shared(prc, ngraph::Shape{}, 2.0f); + std::shared_ptr generate(ov::PartialShape& input_shape, ov::element::Type prc) override { + auto scale = std::make_shared(prc, ov::Shape{}, 2.0f); scale->set_friendly_name("body4_scale"); - auto data = std::make_shared(prc, input_shape); + + auto data = std::make_shared(prc, input_shape); data->set_friendly_name("body4_data"); - auto mul = std::make_shared(data, scale); + + auto mul = std::make_shared(data, scale); mul->set_friendly_name("body4_mul"); - auto pooling = generate_pooling(mul, input_shape); + + auto pooling = generate_pooling(mul, input_shape); pooling->set_friendly_name("body4_pool"); - auto result = std::make_shared(pooling); + + auto result = std::make_shared(pooling); result->set_friendly_name("body4_result"); - auto body = std::make_shared( - ngraph::OutputVector {result}, - ngraph::ParameterVector{data}, - "eltwise_mul_pooling"); + + auto body = std::make_shared(ov::OutputVector{result}, ov::ParameterVector{data}, "eltwise_mul_pooling"); return body; } - struct poolSpecificParams { - ngraph::helpers::PoolingTypes pooling_type; // Pooling type, max or avg - std::vector kernel_size; // Kernel size - std::vector stride; // Stride - std::vector pad_begin; // Pad begin - std::vector pad_end; // Pad end - ngraph::op::RoundingType rounding_type; // Rounding type - ngraph::op::PadType pad_type; // Pad type - bool exclued_pad; // Exclude pad + ov::test::utils::PoolingTypes pooling_type; // Pooling type, max or avg + std::vector kernel_size; // Kernel size + std::vector stride; // Stride + std::vector pad_begin; // Pad begin + std::vector pad_end; // Pad end + ov::op::RoundingType rounding_type; // Rounding type + ov::op::PadType pad_type; // Pad type + bool exclued_pad; // Exclude pad }; - std::shared_ptr generate_pooling(const ngraph::Output &in, ov::PartialShape& input_shape) { + std::shared_ptr generate_pooling(const ov::Output &in, ov::PartialShape& input_shape) { poolSpecificParams params; switch (input_shape.rank().get_length()) { case 5: { - params = poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, + params = poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2, 2, 2}, {1, 1, 1}, {0, 0, 0}, {0, 0, 0}, - ngraph::op::RoundingType::CEIL, - ngraph::op::PadType::SAME_LOWER, true }; + ov::op::RoundingType::CEIL, + ov::op::PadType::SAME_LOWER, true }; break; } case 4: { - params = poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, + params = poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2, 2}, {2, 2}, {0, 0}, {0, 0}, - ngraph::op::RoundingType::CEIL, - ngraph::op::PadType::SAME_LOWER, true }; + ov::op::RoundingType::CEIL, + ov::op::PadType::SAME_LOWER, true }; break; } case 3: { - params = poolSpecificParams{ ngraph::helpers::PoolingTypes::MAX, + params = poolSpecificParams{ ov::test::utils::PoolingTypes::MAX, {2}, {2}, {0}, {0}, - ngraph::op::RoundingType::CEIL, - ngraph::op::PadType::SAME_LOWER, true }; + ov::op::RoundingType::CEIL, + ov::op::PadType::SAME_LOWER, true }; break; } default: @@ -229,29 +233,33 @@ class InnerBodyType04 : public InnerBodyGenerator { class InnerBodyType05 : public InnerBodyGenerator { protected: - std::shared_ptr generate(ov::PartialShape& input_shape, ngraph::element::Type prc) override { - auto constant = std::make_shared(prc, ngraph::Shape{}, 2.0f); + std::shared_ptr generate(ov::PartialShape& input_shape, ov::element::Type prc) override { + auto constant = std::make_shared(prc, ov::Shape{}, 2.0f); constant->set_friendly_name("body5_constant"); - auto data = std::make_shared(prc, input_shape); + + auto data = std::make_shared(prc, input_shape); data->set_friendly_name("body5_data"); - auto add = std::make_shared(data, constant); + + auto add = std::make_shared(data, constant); add->set_friendly_name("body5_add"); + std::vector axes; for (int i = 0, r = 0; i < input_shape.rank().get_length(); i++) { axes.push_back(r--); } + std::vector shapeAxes; shapeAxes.push_back(axes.size()); - auto reductionAxesNode = std::dynamic_pointer_cast( - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape(shapeAxes), axes)); + std::shared_ptr reductionAxesNode = std::make_shared(ov::element::i64, ov::Shape(shapeAxes), axes); - const auto reduce = ngraph::builder::makeReduce(add, reductionAxesNode, false, ngraph::helpers::ReductionType::Min); + const auto reduce = ov::test::utils::make_reduce(add, reductionAxesNode, false, ov::test::utils::ReductionType::Min); reduce->set_friendly_name("body5_reduce"); - auto constant_ref = std::make_shared(prc, ngraph::Shape{}, 10.0f); + + auto constant_ref = std::make_shared(prc, ov::Shape{}, 10.0f); constant_ref->set_friendly_name("body5_ref_constant"); - auto pred = std::make_shared(reduce, constant_ref); + auto pred = std::make_shared(reduce, constant_ref); pred->set_friendly_name("nested_pred"); auto nested_body_then_generator = std::make_shared(); @@ -263,68 +271,62 @@ class InnerBodyType05 : public InnerBodyGenerator { nested_body_then_generator->get_function()->set_friendly_name("nested_then_inner_body"); nested_body_else_generator->get_function()->set_friendly_name("nested_else_inner_body"); - auto cond_nested = std::make_shared(pred); + auto cond_nested = std::make_shared(pred); cond_nested->set_friendly_name("if_operator_nested"); cond_nested->set_else_body(nested_body_else_generator->get_function()); cond_nested->set_then_body(nested_body_then_generator->get_function()); cond_nested->set_input(add, nested_body_then_generator->get_input(), nested_body_else_generator->get_input()); cond_nested->set_output(nested_body_then_generator->get_result(), nested_body_else_generator->get_result()); - auto result = std::make_shared(cond_nested); + auto result = std::make_shared(cond_nested); result->set_friendly_name("body5_result"); - auto body = std::make_shared( - ngraph::OutputVector {result}, - ngraph::ParameterVector{data}, - "eltwise_sum"); + + auto body = std::make_shared(ov::OutputVector {result}, ov::ParameterVector{data}, "eltwise_sum"); return body; } }; class InnerBodyType06 : public InnerBodyGenerator { protected: - std::shared_ptr generate(ov::PartialShape& input_shape, ngraph::element::Type prc) override { - auto constant = ngraph::opset9::Constant::create(prc, ov::Shape(input_shape.rank().get_length(), 1), {2.0f}); + std::shared_ptr generate(ov::PartialShape& input_shape, ov::element::Type prc) override { + auto constant = ov::op::v0::Constant::create(prc, ov::Shape(input_shape.rank().get_length(), 1), {2.0f}); constant->set_friendly_name("body6_constant"); - auto result = std::make_shared(constant); - auto o_layout = result->get_layout(); + + auto result = std::make_shared(constant); result->set_friendly_name("body6_result"); - auto body = std::make_shared( - ngraph::OutputVector {result}, - ngraph::ParameterVector{}, - "constant_only"); + + auto body = std::make_shared(ov::OutputVector{result}, ov::ParameterVector{}, "constant_only"); return body; } }; class InnerBodyType07 : public InnerBodyGenerator { protected: - std::shared_ptr generate(ov::PartialShape& input_shape, ngraph::element::Type prc) override { - auto constant = ngraph::opset9::Constant::create(prc, input_shape.to_shape(), {2.0f}); + std::shared_ptr generate(ov::PartialShape& input_shape, ov::element::Type prc) override { + auto constant = ov::op::v0::Constant::create(prc, input_shape.to_shape(), {2.0f}); constant->set_friendly_name("body7_constant"); - auto result = std::make_shared(constant); - auto o_layout = result->get_layout(); + + auto result = std::make_shared(constant); result->set_friendly_name("body7_result"); - auto body = std::make_shared( - ngraph::OutputVector {result}, - ngraph::ParameterVector{}, - "constant_to_result"); + + auto body = std::make_shared(ov::OutputVector{result}, ov::ParameterVector{}, "constant_to_result"); return body; } }; class InnerBodyType08 : public InnerBodyGenerator { protected: - std::shared_ptr generate(ov::PartialShape& input_shape, ngraph::element::Type prc) override { - auto constant = std::make_shared(prc, ngraph::Shape{}, 10.0f); + std::shared_ptr generate(ov::PartialShape& input_shape, ov::element::Type prc) override { + auto constant = std::make_shared(prc, ov::Shape{}, 10.0f); constant->set_friendly_name("body8_const"); - auto data = std::make_shared(prc, input_shape); + + auto data = std::make_shared(prc, input_shape); data->set_friendly_name("body8_data"); - auto result = std::make_shared(data); + + auto result = std::make_shared(data); result->set_friendly_name("body8_result"); - auto body = std::make_shared( - ngraph::OutputVector {result}, - ngraph::ParameterVector{data}, - "parameter_to_result"); + + auto body = std::make_shared(ov::OutputVector{result}, ov::ParameterVector{data}, "parameter_to_result"); return body; } }; @@ -380,66 +382,69 @@ class TestModelGenerator { public: TestModelGenerator(InnerBodyGenerator::InnerBodyType then_body_type, - InnerBodyGenerator::InnerBodyType else_body_type, - PredicateTypes pred_type, - ngraph::element::Type prc, - ov::PartialShape input_shape, - bool cond_execution_value = false) { - body_then_generator = get_inner_body_generator(then_body_type); - body_else_generator = get_inner_body_generator(else_body_type); - - body_then_generator->create_body(input_shape, prc); - body_else_generator->create_body(input_shape, prc); - body_else_generator->get_function()->set_friendly_name("else_inner_body"); - body_then_generator->get_function()->set_friendly_name("then_inner_body"); - - ngraph::ParameterVector params{}; - auto predicate = create_cond_execution(pred_type, params, ngraph::element::boolean, ngraph::Shape{}); - predicate->set_friendly_name("if_predicate"); - auto data = create_condition_input(params, prc, input_shape); - data->set_friendly_name("input_data"); - auto cond = std::make_shared(predicate); - cond->set_friendly_name("if_operator"); - cond->set_else_body(body_else_generator->get_function()); - cond->set_then_body(body_then_generator->get_function()); - cond->set_input(data, body_then_generator->get_input(), body_else_generator->get_input()); - cond->set_output(body_then_generator->get_result(), body_else_generator->get_result()); - if (then_body_type == InnerBodyGenerator::InnerBodyType::Type06 || else_body_type == InnerBodyGenerator::InnerBodyType::Type06) { - auto constant = create_condition_input(params, prc, ngraph::Shape{1}, 0, true); - auto addition = std::make_shared(cond, constant); - auto shapeof1 = std::make_shared(addition); - auto convert = std::make_shared(shapeof1, prc); - auto mul = std::make_shared(convert, constant); - auto shapePatternsNode = create_condition_input(params, ov::element::Type_t::i64, ngraph::Shape{1}, 0, true); - auto reshapeOp = std::make_shared(mul, shapePatternsNode, true); - auto result = std::make_shared(reshapeOp); - result->set_friendly_name("outer_result"); - function = std::make_shared(ngraph::OutputVector {result}, params); - } else { - auto result = std::make_shared(cond); - result->set_friendly_name("outer_result"); - function = std::make_shared(ngraph::OutputVector {result}, params); - } - } - std::shared_ptr get_function() { return function; } + InnerBodyGenerator::InnerBodyType else_body_type, + PredicateTypes pred_type, + ov::element::Type prc, + ov::PartialShape input_shape, + bool cond_execution_value = false) { + body_then_generator = get_inner_body_generator(then_body_type); + body_else_generator = get_inner_body_generator(else_body_type); + + body_then_generator->create_body(input_shape, prc); + body_else_generator->create_body(input_shape, prc); + body_else_generator->get_function()->set_friendly_name("else_inner_body"); + body_then_generator->get_function()->set_friendly_name("then_inner_body"); + + ov::ParameterVector params{}; + auto predicate = create_cond_execution(pred_type, params, ov::element::boolean, ov::Shape{}); + predicate->set_friendly_name("if_predicate"); + + auto data = create_condition_input(params, prc, input_shape); + data->set_friendly_name("input_data"); + + auto cond = std::make_shared(predicate); + cond->set_friendly_name("if_operator"); + cond->set_else_body(body_else_generator->get_function()); + cond->set_then_body(body_then_generator->get_function()); + cond->set_input(data, body_then_generator->get_input(), body_else_generator->get_input()); + cond->set_output(body_then_generator->get_result(), body_else_generator->get_result()); + + if (then_body_type == InnerBodyGenerator::InnerBodyType::Type06 || else_body_type == InnerBodyGenerator::InnerBodyType::Type06) { + auto constant = create_condition_input(params, prc, ov::Shape{1}, 0, true); + auto addition = std::make_shared(cond, constant); + auto shapeof1 = std::make_shared(addition); + auto convert = std::make_shared(shapeof1, prc); + auto mul = std::make_shared(convert, constant); + auto shapePatternsNode = create_condition_input(params, ov::element::i64, ov::Shape{1}, 0, true); + auto reshapeOp = std::make_shared(mul, shapePatternsNode, true); + auto result = std::make_shared(reshapeOp); + result->set_friendly_name("outer_result"); + function = std::make_shared(ov::OutputVector {result}, params); + } else { + auto result = std::make_shared(cond); + result->set_friendly_name("outer_result"); + function = std::make_shared(ov::OutputVector {result}, params); + } + } + std::shared_ptr get_function() { return function; } private: - std::shared_ptr create_condition_input(ngraph::ParameterVector& params, - const ngraph::element::Type prc, const ov::PartialShape& shape, + std::shared_ptr create_condition_input(ov::ParameterVector& params, + const ov::element::Type prc, const ov::PartialShape& shape, int value = 0, bool is_static = false) { if (is_static) - return std::make_shared(prc, shape.to_shape(), value); + return std::make_shared(prc, shape.to_shape(), value); - auto input = std::make_shared(prc, shape); + auto input = std::make_shared(prc, shape); params.push_back(input); return input; } - std::shared_ptr create_cond_execution(PredicateTypes pred_type, - ngraph::ParameterVector& params, - const ngraph::element::Type prc = ngraph::element::u8, - const ngraph::Shape shape = ngraph::Shape{}) { - std::shared_ptr pred; + std::shared_ptr create_cond_execution(PredicateTypes pred_type, + ov::ParameterVector& params, + const ov::element::Type prc = ov::element::u8, + const ov::Shape shape = ov::Shape{}) { + std::shared_ptr pred; switch (pred_type) { case PredicateTypes::PARAM: { @@ -450,9 +455,9 @@ class TestModelGenerator { { auto param_cond = create_condition_input(params, prc, shape); param_cond->set_friendly_name("param_cond"); - auto const_cond = create_condition_input(params, prc, ngraph::Shape{}, 1, true); + auto const_cond = create_condition_input(params, prc, ov::Shape{}, 1, true); const_cond->set_friendly_name("const_cond"); - pred = std::make_shared(param_cond, const_cond); + pred = std::make_shared(param_cond, const_cond); pred->set_friendly_name("pred"); break; } @@ -465,7 +470,7 @@ class TestModelGenerator { } private: - std::shared_ptr function; + std::shared_ptr function; InnerBodyGenerator::ptr body_then_generator; InnerBodyGenerator::ptr body_else_generator; }; @@ -543,25 +548,25 @@ static std::ostream& operator<<(std::ostream& os, const TestModelGenerator::Pred } using ConditionParams = typename std::tuple< - InferenceEngine::SizeVector, // Shape - InferenceEngine::Precision, // Precision + ov::Shape, // Shape + ov::element::Type, // Precision TestModelGenerator::PredicateTypes, // if predicate type - LayerTestsUtils::TargetDevice // Device name ->; + std::string>; // Device name + class StaticConditionLayerGPUTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { + virtual public ov::test::SubgraphBaseStaticTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::SizeVector data_shape; - InferenceEngine::Precision data_prc; + ov::Shape data_shape; + ov::element::Type model_type; TestModelGenerator::PredicateTypes pred; std::string targetDevice; - std::tie(data_shape, data_prc, pred, targetDevice) = obj.param; + std::tie(data_shape, model_type, pred, targetDevice) = obj.param; std::ostringstream result; result << "IS=" << ov::test::utils::vec2str(data_shape) << "_"; - result << "netPRC=" << std::to_string(data_prc) << "_"; + result << "netPRC=" << model_type << "_"; result << "ifCond=" << pred << "_"; result << "targetDevice=" << targetDevice << "_"; auto res_str = result.str(); @@ -573,60 +578,53 @@ class StaticConditionLayerGPUTest : public testing::WithParamInterfaceallocate(); - - if (tensor_desc.getLayout() == InferenceEngine::SCALAR) { - auto prc = tensor_desc.getPrecision(); - auto scalar_1d = ov::test::utils::make_reshape_view(blob, {1}); - if (prc == InferenceEngine::Precision::BOOL) { - auto mem_blob = dynamic_cast(blob.get()); - auto mem = mem_blob->rwmap(); - auto data_ptr = mem.as(); - *data_ptr = false; + void generate_inputs(const std::vector& target_input_static_shapes) override { + inputs.clear(); + const auto& model_inputs = function->inputs(); + for (size_t i = 0; i < model_inputs.size(); ++i) { + const auto& model_input = model_inputs[i]; + auto type = model_input.get_element_type(); + ov::Tensor tensor; + if (ov::element::boolean == type) { + tensor = ov::test::utils::create_and_fill_tensor(model_input.get_element_type(), target_input_static_shapes[i], 0, 0); } else { - ov::test::utils::fill_data_with_broadcast(scalar_1d, 0, {20.f}); + tensor = ov::test::utils::create_and_fill_tensor(model_input.get_element_type(), target_input_static_shapes[i], 0, 20); } - } else { - ov::test::utils::fill_data_with_broadcast(blob, 0, {20.f}); + inputs.insert({model_input.get_node_shared_ptr(), tensor}); } - return blob; } - InferenceEngine::SizeVector data_shape; - InferenceEngine::Precision data_prc; + ov::Shape data_shape; + ov::element::Type model_type; }; TEST_P(StaticConditionLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); + run(); } -std::vector netPrecisions_static = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::I8 +std::vector netPrecisions_static = { + ov::element::f32, + ov::element::f16, + ov::element::i8 }; -std::vector inputs_shape = { +std::vector inputs_shape = { {3, 6} }; -std::vector if_cond_types = { - GPULayerTestsDefinitions::TestModelGenerator::PredicateTypes::PARAM +std::vector if_cond_types = { + TestModelGenerator::PredicateTypes::PARAM }; INSTANTIATE_TEST_SUITE_P(smoke_ConditionGPUTest_static, StaticConditionLayerGPUTest, @@ -640,18 +638,18 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConditionGPUTest_static, StaticConditionLayerGPUT /// Static shape single layer test class StaticConditionSingleLayerGPUTest : public testing::WithParamInterface, - virtual public LayerTestsUtils::LayerTestsCommon { + virtual public ov::test::SubgraphBaseStaticTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { - InferenceEngine::SizeVector data_shape; - InferenceEngine::Precision data_prc; + ov::Shape data_shape; + ov::element::Type model_type; TestModelGenerator::PredicateTypes pred; std::string targetDevice; - std::tie(data_shape, data_prc, pred, targetDevice) = obj.param; + std::tie(data_shape, model_type, pred, targetDevice) = obj.param; std::ostringstream result; result << "IS=" << ov::test::utils::vec2str(data_shape) << "_"; - result << "netPRC=" << std::to_string(data_prc) << "_"; + result << "netPRC=" << model_type << "_"; result << "ifCond=" << pred << "_"; result << "targetDevice=" << targetDevice << "_"; auto res_str = result.str(); @@ -663,62 +661,55 @@ class StaticConditionSingleLayerGPUTest : public testing::WithParamInterfaceallocate(); - - if (tensor_desc.getLayout() == InferenceEngine::SCALAR) { - auto prc = tensor_desc.getPrecision(); - auto scalar_1d = ov::test::utils::make_reshape_view(blob, {1}); - if (prc == InferenceEngine::Precision::BOOL) { - auto mem_blob = dynamic_cast(blob.get()); - auto mem = mem_blob->rwmap(); - auto data_ptr = mem.as(); - *data_ptr = false; + void generate_inputs(const std::vector& target_input_static_shapes) override { + inputs.clear(); + const auto& model_inputs = function->inputs(); + for (size_t i = 0; i < model_inputs.size(); ++i) { + const auto& model_input = model_inputs[i]; + auto type = model_input.get_element_type(); + ov::Tensor tensor; + if (ov::element::boolean == type) { + tensor = ov::test::utils::create_and_fill_tensor(model_input.get_element_type(), target_input_static_shapes[i], 0, 0); } else { - ov::test::utils::fill_data_with_broadcast(scalar_1d, 0, {20.f}); + tensor = ov::test::utils::create_and_fill_tensor(model_input.get_element_type(), target_input_static_shapes[i], 0, 20); } - } else { - ov::test::utils::fill_data_with_broadcast(blob, 0, {20.f}); + inputs.insert({model_input.get_node_shared_ptr(), tensor}); } - return blob; } - InferenceEngine::SizeVector data_shape; - InferenceEngine::Precision data_prc; + ov::Shape data_shape; + ov::element::Type model_type; }; -TEST_P(StaticConditionSingleLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED(); - Run(); +TEST_P(StaticConditionSingleLayerGPUTest, Inference) { + run(); } -std::vector netPrecisions_static_single = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16, - InferenceEngine::Precision::I8 +std::vector model_types_static_single = { + ov::element::f32, + ov::element::f16, + ov::element::i8 }; -std::vector inputs_shape_single = { +std::vector inputs_shape_single = { {64} }; INSTANTIATE_TEST_SUITE_P(smoke_ConditionGPUTest_static, StaticConditionSingleLayerGPUTest, testing::Combine( testing::ValuesIn(inputs_shape_single), - testing::ValuesIn(netPrecisions_static_single), + testing::ValuesIn(model_types_static_single), testing::ValuesIn(if_cond_types), testing::Values(ov::test::utils::DEVICE_GPU)), StaticConditionLayerGPUTest::getTestCaseName); @@ -730,25 +721,26 @@ struct InnerBodyTypeParams { InnerBodyGenerator::InnerBodyType else_body_type; }; +using ov::test::InputShape; + using ConditionGPUParams = typename std::tuple< InputShape, // Input Shapes InnerBodyTypeParams, // Inner body type - InferenceEngine::Precision, // Precision + ov::element::Type, // Type TestModelGenerator::PredicateTypes, // if predicate type - LayerTestsUtils::TargetDevice // Device name ->; + std::string>; // Device name class DynamicConditionLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { InputShape inputShapes; InnerBodyTypeParams bodyParams; - InferenceEngine::Precision dataPrc; + ov::element::Type model_type; TestModelGenerator::PredicateTypes condType; std::string targetDevice; - std::tie(inputShapes, bodyParams, dataPrc, condType, targetDevice) = obj.param; + std::tie(inputShapes, bodyParams, model_type, condType, targetDevice) = obj.param; std::ostringstream result; result << "IS=("; result << ov::test::utils::partialShape2str({inputShapes.first}) << "_"; @@ -759,7 +751,7 @@ class DynamicConditionLayerGPUTest : public testing::WithParamInterface condSecondVec; for (size_t i = 0; i < num_second; i++) { @@ -782,11 +774,10 @@ class DynamicConditionLayerGPUTest : public testing::WithParamInterfaceset_friendly_name("if_operator_outer"); @@ -821,8 +812,7 @@ class DynamicConditionLayerGPUTest : public testing::WithParamInterfaceget_element_type(), input_shape, inGenData.range, - inGenData.start_from, inGenData.resolution, inGenData.seed); + auto tensor = ov::test::utils::create_and_fill_tensor(param->get_element_type(), input_shape, inGenData); inputs.insert({param, tensor}); } } @@ -832,18 +822,9 @@ class DynamicConditionLayerGPUTest : public testing::WithParamInterface netPrecisions_f32 = { - InferenceEngine::Precision::FP32 -}; - -const std::vector netPrecisions_f16 = { - InferenceEngine::Precision::FP16 -}; - const std::vector dynamicInputShapes_f32 = { ov::test::InputShape(ov::PartialShape({-1, -1, -1, -1, -1}), {{4, 1, 1, 64, 32}, {6, 1, 1, 8, 4}, {8, 1, 1, 24, 16}}), ov::test::InputShape(ov::PartialShape({1, 1, -1, -1}), {{1, 1, 64, 32}, {1, 1, 8, 4}, {1, 1, 24, 16}}) @@ -900,7 +881,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConditionGPUTest_dynamic_f32, DynamicConditionLay testing::Combine( testing::ValuesIn(dynamicInputShapes_f32), // input shapes testing::ValuesIn(innerBodyTypes_f32), // inner body type - testing::ValuesIn(netPrecisions_f32), // network precision + testing::Values(ov::element::f32), // network precision testing::ValuesIn(condTypes), // cond type testing::Values(ov::test::utils::DEVICE_GPU)), // device type DynamicConditionLayerGPUTest::getTestCaseName); @@ -910,7 +891,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConditionGPUTest_dynamic_f16, DynamicConditionLay testing::Combine( testing::ValuesIn(dynamicInputShapes_f16), // input shapes testing::ValuesIn(innerBodyTypes_f16), // inner body type - testing::ValuesIn(netPrecisions_f16), // network precision + testing::Values(ov::element::f16), // network precision testing::ValuesIn(condTypes), // cond type testing::Values(ov::test::utils::DEVICE_GPU)), // device type DynamicConditionLayerGPUTest::getTestCaseName); @@ -919,8 +900,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConditionGPUTest_zero_dims, DynamicConditionLayer testing::Combine( testing::ValuesIn(dynamicInputShapes_zero_dims), // input shapes testing::ValuesIn(innerBodyTypes_zero_dims), // inner body type - testing::ValuesIn(netPrecisions_f32), // network precision + testing::Values(ov::element::f32), // network precision testing::ValuesIn(condTypes_zero_dims), // cond type testing::Values(ov::test::utils::DEVICE_GPU)), // device type DynamicConditionLayerGPUTest::getTestCaseName); -} // namespace GPULayerTestsDefinitions +} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/broadcast_eltwise.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/broadcast_eltwise.cpp index 9030ea1129313d..65373ff1d76ff9 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/broadcast_eltwise.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/broadcast_eltwise.cpp @@ -2,36 +2,35 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include - -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "openvino/pass/serialize.hpp" -using namespace ngraph; -using namespace ov::test; -using namespace InferenceEngine; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/broadcast.hpp" + +namespace { +using ov::test::InputShape; -namespace GPULayerTestsDefinitions { using BroadcastEltwiseParams = std::tuple< - ElementType, // input precision - InputShape, // input shape - ov::Shape // target broadcast shape + ov::element::Type, // input type + InputShape, // input shape + ov::Shape // target broadcast shape >; -class BroadcastEltwise : virtual public SubgraphBaseTest, public testing::WithParamInterface { +class BroadcastEltwise : virtual public ov::test::SubgraphBaseTest, + public testing::WithParamInterface { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { - ElementType input_precision; + ov::element::Type model_type; InputShape input_shape; ov::Shape target_shape; - std::tie(input_precision, input_shape, target_shape) = obj.param; + std::tie(model_type, input_shape, target_shape) = obj.param; std::ostringstream result; - result << "precision=" << input_precision << "IS=(" << ov::test::utils::partialShape2str({input_shape.first}) << ")_TS=("; + result << "precision=" << model_type << "IS=(" << ov::test::utils::partialShape2str({input_shape.first}) << ")_TS=("; for (const auto& item : input_shape.second) { result << ov::test::utils::vec2str(item) << "_"; } @@ -41,23 +40,23 @@ class BroadcastEltwise : virtual public SubgraphBaseTest, public testing::WithPa protected: void SetUp() override { - ElementType input_precision; + ov::element::Type model_type; InputShape input_shape; - std::tie(input_precision, input_shape, target_shape) = GetParam(); + std::tie(model_type, input_shape, target_shape) = GetParam(); targetDevice = ov::test::utils::DEVICE_GPU; std::vector input_shapes{input_shape, {{}, {{target_shape.size()}}}}; init_input_shapes(input_shapes); - ov::element::TypeVector input_precisions{input_precision, ov::element::i64}; + ov::element::TypeVector model_types{model_type, ov::element::i64}; ov::ParameterVector params; - for (size_t i = 0; i < input_precisions.size(); i++) { - auto param_node = std::make_shared(input_precisions[i], inputDynamicShapes[i]); + for (size_t i = 0; i < model_types.size(); i++) { + auto param_node = std::make_shared(model_types[i], inputDynamicShapes[i]); params.push_back(param_node); } - const auto bcast_data = ov::opset10::Constant::create(input_precision, {}, {1.f}); - const auto bcast = std::make_shared(bcast_data, params[1]); - const auto add = std::make_shared(params[0], bcast); + const auto bcast_data = ov::op::v0::Constant::create(model_type, {}, {1.f}); + const auto bcast = std::make_shared(bcast_data, params[1]); + const auto add = std::make_shared(params[0], bcast); function = std::make_shared(add, params); } @@ -95,7 +94,6 @@ TEST_P(BroadcastEltwise, smoke_CompareWithRefs) { EXPECT_EQ(last_input_layer_type, "broadcast"); } -namespace { const std::vector input_shapes = { {{-1, -1, -1, -1}, {{1, 3, 16, 16}}}, {{-1, -1}, {{16, 16}}}, @@ -113,4 +111,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_BroadcastEltwise, ::testing::ValuesIn(target_shapes)), BroadcastEltwise::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions + diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_model_static_split_layer.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_model_static_split_layer.cpp index 658102694142a9..e2825017ea8a0d 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_model_static_split_layer.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_model_static_split_layer.cpp @@ -1,45 +1,39 @@ // Copyright (C) 2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/split.hpp" -#include +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/node_builders/eltwise.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/variadic_split.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< - std::vector, // input shapes - ElementType, // Network precision - TargetDevice, // Device name - std::map // Additional network configuration + std::vector, // input shapes + ov::element::Type, // Model type + std::string // Device name > DynamicModelStaticSplitLayerGPUTestParamsSet; -const std::vector netPrecisions = { - ElementType::f16 +const std::vector model_types = { + ov::element::f16 }; class DynamicModelStaticSplitLayerGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { DynamicModelStaticSplitLayerGPUTestParamsSet basicParamsSet = obj.param; std::ostringstream result; std::vector inputShapes; - ElementType netType; - TargetDevice targetDevice; - std::map additionalConfig; + ov::element::Type model_type; + std::string targetDevice; - std::tie(inputShapes, netType, targetDevice, additionalConfig) = basicParamsSet; + std::tie(inputShapes, model_type, targetDevice) = basicParamsSet; result << "IS="; for (const auto& shape : inputShapes) { result << ov::test::utils::partialShape2str({shape.first}) << "_"; @@ -47,66 +41,61 @@ class DynamicModelStaticSplitLayerGPUTest : public testing::WithParamInterfaceset_friendly_name("split_sizes"); - auto variadicSplitOp = std::make_shared(params[0], axis, split_sizes); + auto variadicSplitOp = std::make_shared(params[0], axis, split_sizes); variadicSplitOp->set_friendly_name("variadicSplit"); - auto addOp = ngraph::builder::makeEltwise(params[1], variadicSplitOp->output(1), ngraph::helpers::EltwiseTypes::ADD); + auto addOp = ov::test::utils::makeEltwise(params[1], variadicSplitOp->output(1), ov::test::utils::EltwiseTypes::ADD); addOp->set_friendly_name("add"); - ngraph::ResultVector results = {std::make_shared(addOp)}; - function = std::make_shared(results, params, "eltwise_add_out"); + ov::ResultVector results = {std::make_shared(addOp)}; + function = std::make_shared(results, params, "eltwise_add_out"); } }; - -TEST_P(DynamicModelStaticSplitLayerGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(DynamicModelStaticSplitLayerGPUTest, Inference) { run(); } -namespace { -std::map emptyAdditionalConfig; const std::vector> dynInputShapes = { { // Input for static VariadicSplit @@ -124,11 +113,9 @@ const std::vector> dynInputShapes = { const auto testParams_smoke = ::testing::Combine(::testing::ValuesIn(dynInputShapes), - ::testing::ValuesIn(netPrecisions), // netprec - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(emptyAdditionalConfig)); + ::testing::ValuesIn(model_types), // netprec + ::testing::Values(ov::test::utils::DEVICE_GPU)); INSTANTIATE_TEST_SUITE_P(smoke_dynamic_model_static_split, DynamicModelStaticSplitLayerGPUTest, testParams_smoke, DynamicModelStaticSplitLayerGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_gen_impl_key.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_gen_impl_key.cpp index 66c8ebe1426ef9..f28ecbdbc6b755 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_gen_impl_key.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_gen_impl_key.cpp @@ -1,47 +1,43 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/shape_of.hpp" -#include "shared_test_classes/single_layer/strided_slice.hpp" -#include -#include +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/node_builders/eltwise.hpp" +#include "common_test_utils/node_builders/reduce.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< std::vector, // input shapes - ElementType, // Network precision - TargetDevice, // Device name + ov::element::Type, // Model type + std::string, // Device name std::map // Additional network configuration > genImplKeyDynamicGPUTestParamsSet; -const std::vector netPrecisions = { - ElementType::f16, - ElementType::f32, - ElementType::i32, - ElementType::i64, +const std::vector model_types = { + ov::element::f16, + ov::element::f32, + ov::element::i32, + ov::element::i64, }; class GenlImplKeyDynamicGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { genImplKeyDynamicGPUTestParamsSet basicParamsSet = obj.param; std::ostringstream result; std::vector inputShapes; - ElementType netType; - TargetDevice targetDevice; + ov::element::Type netType; + std::string targetDevice; std::map additionalConfig; std::tie(inputShapes, netType, targetDevice, additionalConfig) = basicParamsSet; @@ -58,25 +54,25 @@ class GenlImplKeyDynamicGPUTest : public testing::WithParamInterface& targetInputStaticShapes) override { - inputs.clear(); - const auto& funcInputs = function->inputs(); - for (size_t i = 0; i < funcInputs.size(); ++i) { - const auto& funcInput = funcInputs[i]; - ov::Tensor tensor; - tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), - targetInputStaticShapes[i], - 80, - 0, - 8); - inputs.insert({funcInput.get_node_shared_ptr(), tensor}); - } + void generate_inputs(const std::vector& targetInputStaticShapes) override { + inputs.clear(); + const auto& funcInputs = function->inputs(); + for (size_t i = 0; i < funcInputs.size(); ++i) { + const auto& funcInput = funcInputs[i]; + ov::Tensor tensor; + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 80; + in_data.resolution = 8; + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], in_data); + inputs.insert({funcInput.get_node_shared_ptr(), tensor}); + } } void SetUp() override { genImplKeyDynamicGPUTestParamsSet basicParamsSet = this->GetParam(); std::vector inputShapes; - ElementType netType; + ov::element::Type netType; std::map additionalConfig; std::tie(inputShapes, netType, targetDevice, additionalConfig) = basicParamsSet; @@ -87,64 +83,60 @@ class GenlImplKeyDynamicGPUTest : public testing::WithParamInterface(netType, shape)); - auto addOp1 = ngraph::builder::makeEltwise(params[1], params[1], ngraph::helpers::EltwiseTypes::ADD); + auto addOp1 = ov::test::utils::makeEltwise(params[1], params[1], ov::test::utils::EltwiseTypes::ADD); addOp1->set_friendly_name("add1"); - auto shapeOfOp1 = std::make_shared(addOp1, ElementType::i64); + auto shapeOfOp1 = std::make_shared(addOp1, ov::element::i64); shapeOfOp1->set_friendly_name("shapeof1"); std::vector reduce_axes = {0}; - auto reduceAxesNode1 = std::dynamic_pointer_cast( - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape({1}), reduce_axes)); - auto reduceOp1 = ngraph::builder::makeReduce(shapeOfOp1, reduceAxesNode1, true, ngraph::helpers::ReductionType::Prod); + auto reduceAxesNode1 = std::dynamic_pointer_cast( + std::make_shared(ov::element::i64, ov::Shape({1}), reduce_axes)); + auto reduceOp1 = ov::test::utils::make_reduce(shapeOfOp1, reduceAxesNode1, true, ov::test::utils::ReductionType::Prod); reduceOp1->set_friendly_name("reduce1"); std::vector shapePatternFill = {-1}; - auto reshapePatternComp1 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{1}, shapePatternFill); + auto reshapePatternComp1 = std::make_shared(ov::element::i64, + ov::Shape{1}, shapePatternFill); auto concatOp1 = std::make_shared(ov::NodeVector{reduceOp1, reshapePatternComp1}, 0); concatOp1->set_friendly_name("concat1"); - auto reshapeOp1 = std::make_shared(addOp1, concatOp1, false); + auto reshapeOp1 = std::make_shared(addOp1, concatOp1, false); reshapeOp1->set_friendly_name("reshapeOp1"); - auto addOp2 = ngraph::builder::makeEltwise(params[1], params[1], ngraph::helpers::EltwiseTypes::ADD); + auto addOp2 = ov::test::utils::makeEltwise(params[1], params[1], ov::test::utils::EltwiseTypes::ADD); addOp2->set_friendly_name("add2"); - auto shapeOfOp2 = std::make_shared(addOp2, ElementType::i64); + auto shapeOfOp2 = std::make_shared(addOp2, ov::element::i64); shapeOfOp2->set_friendly_name("shapeof2"); - auto reduceAxesNode2 = std::dynamic_pointer_cast( - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape({1}), reduce_axes)); - auto reduceOp2 = ngraph::builder::makeReduce(shapeOfOp2, reduceAxesNode2, true, ngraph::helpers::ReductionType::Prod); + auto reduceAxesNode2 = std::make_shared(ov::element::i64, ov::Shape({1}), reduce_axes); + auto reduceOp2 = ov::test::utils::make_reduce(shapeOfOp2, reduceAxesNode2, true, ov::test::utils::ReductionType::Prod); reduceOp2->set_friendly_name("reduce2"); - auto reshapePatternComp2 = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{1}, shapePatternFill); + auto reshapePatternComp2 = std::make_shared(ov::element::i64, + ov::Shape{1}, shapePatternFill); auto concatOp2 = std::make_shared(ov::NodeVector{reduceOp2, reshapePatternComp2}, 0); concatOp2->set_friendly_name("concat2"); - auto reshapeOp2 = std::make_shared(addOp2, concatOp2, false); + auto reshapeOp2 = std::make_shared(addOp2, concatOp2, false); reshapeOp2->set_friendly_name("reshapeOp2"); - auto addOp3 = ngraph::builder::makeEltwise(reshapeOp1, reshapeOp2, ngraph::helpers::EltwiseTypes::ADD); + auto addOp3 = ov::test::utils::makeEltwise(reshapeOp1, reshapeOp2, ov::test::utils::EltwiseTypes::ADD); addOp3->set_friendly_name("add3"); - auto shapeOf3 = std::make_shared(addOp3, ElementType::i64); + auto shapeOf3 = std::make_shared(addOp3, ov::element::i64); shapeOf3->set_friendly_name("shapeof3"); - ngraph::ResultVector results = {std::make_shared(shapeOf3)}; - function = std::make_shared(results, params, "shapeof_out"); + ov::ResultVector results = {std::make_shared(shapeOf3)}; + function = std::make_shared(results, params, "shapeof_out"); } }; - -TEST_P(GenlImplKeyDynamicGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(GenlImplKeyDynamicGPUTest, Inference) { run(); } -namespace { std::map emptyAdditionalConfig; const std::vector> dynInputShapes = { // 1D @@ -177,13 +169,11 @@ const std::vector> dynInputShapes = { } }; - const auto testParams_smoke = ::testing::Combine(::testing::ValuesIn(dynInputShapes), - ::testing::ValuesIn(netPrecisions), // netprec + ::testing::ValuesIn(model_types), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(emptyAdditionalConfig)); INSTANTIATE_TEST_SUITE_P(smoke_dynamic_impl_key, GenlImplKeyDynamicGPUTest, testParams_smoke, GenlImplKeyDynamicGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_reduce_deconvolution_concat.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_reduce_deconvolution_concat.cpp index 8a76b155c1359c..7d8f4e089b236c 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_reduce_deconvolution_concat.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_reduce_deconvolution_concat.cpp @@ -1,48 +1,42 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/reduce_ops.hpp" -#include "shared_test_classes/single_layer/convolution_backprop_data.hpp" -#include -#include +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/node_builders/reduce.hpp" +#include "common_test_utils/node_builders/convolution_backprop_data.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/transpose.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< std::vector, // input shapes - ElementType, // Network precision - TargetDevice, // Device name - std::map // Additional network configuration + ov::element::Type, // Network precision + std::string // Device name > reduceDeconvConcatDynamicGPUTestParamsSet; -const std::vector netPrecisions = { - ElementType::f16, +const std::vector netPrecisions = { + ov::element::f16, }; // Reduce should have preferred format for ouput layout class ReduceDeconvConcatDynamicGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { reduceDeconvConcatDynamicGPUTestParamsSet basicParamsSet = obj.param; std::ostringstream result; std::vector inputShapes; - ElementType netType; - TargetDevice targetDevice; - std::map additionalConfig; + ov::element::Type model_type; + std::string targetDevice; - std::tie(inputShapes, netType, targetDevice, additionalConfig) = basicParamsSet; + std::tie(inputShapes, model_type, targetDevice) = basicParamsSet; result << "IS="; for (const auto& shape : inputShapes) { result << ov::test::utils::partialShape2str({shape.first}) << "_"; @@ -50,72 +44,65 @@ class ReduceDeconvConcatDynamicGPUTest : public testing::WithParamInterface reduce_axes = {5}; - auto reduceAxesNode = std::dynamic_pointer_cast( - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape({1}), reduce_axes)); - auto reduceOp = ngraph::builder::makeReduce(params[1], reduceAxesNode, false, ngraph::helpers::ReductionType::Max); + auto reduceAxesNode = std::make_shared(ov::element::i64, ov::Shape({1}), reduce_axes); + auto reduceOp = ov::test::utils::make_reduce(params[1], reduceAxesNode, false, ov::test::utils::ReductionType::Max); reduceOp->set_friendly_name("reduce"); auto concatOp = std::make_shared(ov::NodeVector{deconvOp, reduceOp}, 1); concatOp->set_friendly_name("concat"); std::vector transpose_order = {0, 1, 2, 4, 3}; - auto transposeOrderNode = std::dynamic_pointer_cast( - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape({5}), transpose_order)); - auto transposeOp = std::make_shared(concatOp, transposeOrderNode); + auto transposeOrderNode = std::make_shared(ov::element::i64, ov::Shape({5}), transpose_order); + auto transposeOp = std::make_shared(concatOp, transposeOrderNode); transposeOp->set_friendly_name("transpose"); - ngraph::ResultVector results = {std::make_shared(transposeOp)}; - function = std::make_shared(results, params, "transpose_out"); + ov::ResultVector results = {std::make_shared(transposeOp)}; + function = std::make_shared(results, params, "transpose_out"); } }; - -TEST_P(ReduceDeconvConcatDynamicGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(ReduceDeconvConcatDynamicGPUTest, Inference) { run(); } -namespace { -std::map emptyAdditionalConfig; const std::vector> dynInputShapes = { { // Input for Deconv @@ -128,10 +115,8 @@ const std::vector> dynInputShapes = { const auto testParams_smoke = ::testing::Combine(::testing::ValuesIn(dynInputShapes), ::testing::ValuesIn(netPrecisions), // netprec - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(emptyAdditionalConfig)); + ::testing::Values(ov::test::utils::DEVICE_GPU)); INSTANTIATE_TEST_SUITE_P(smoke_dynamic_reduce_deconv_concat, ReduceDeconvConcatDynamicGPUTest, testParams_smoke, ReduceDeconvConcatDynamicGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_shape_of_activation.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_shape_of_activation.cpp index 1a611f1f3060e2..3afe0ccdb3b4c4 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_shape_of_activation.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_shape_of_activation.cpp @@ -1,55 +1,47 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/shape_of.hpp" -#include "shared_test_classes/single_layer/reshape.hpp" -#include "shared_test_classes/single_layer/gather.hpp" -#include "shared_test_classes/single_layer/activation.hpp" -#include +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/node_builders/activation.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< InputShape, // input shapes - ElementType, // Network precision - TargetDevice, // Device name - ngraph::helpers::ActivationTypes, // Activation type + ov::element::Type, // Network precision + std::string, // Device name + ov::test::utils::ActivationTypes, // Activation type std::vector, //inShape - std::vector, //constantValue - std::map // Additional network configuration + std::vector //constantValue > shapeofActivationDynamicGPUTestParamsSet; -const std::vector netPrecisions = { - ElementType::f16, - ElementType::f32, +const std::vector model_types = { + ov::element::f16, + ov::element::f32, }; class shapeofActivationDynamicGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { shapeofActivationDynamicGPUTestParamsSet basicParamsSet = obj.param; std::ostringstream result; InputShape inputShape; - ElementType netType; - TargetDevice targetDevice; - ngraph::helpers::ActivationTypes activationType; + ov::element::Type netType; + std::string targetDevice; + ov::test::utils::ActivationTypes activationType; std::vector inShape; std::vector constantValue; - std::map additionalConfig; - std::tie(inputShape, netType, targetDevice, activationType, inShape, constantValue, additionalConfig) = basicParamsSet; + std::tie(inputShape, netType, targetDevice, activationType, inShape, constantValue) = basicParamsSet; result << "IS="; result << ov::test::utils::partialShape2str({inputShape.first}) << "_"; for (const auto& actual_shape : inputShape.second) { @@ -64,30 +56,29 @@ class shapeofActivationDynamicGPUTest : public testing::WithParamInterface& targetInputStaticShapes) override { - inputs.clear(); - const auto& funcInputs = function->inputs(); - for (size_t i = 0; i < funcInputs.size(); ++i) { - const auto& funcInput = funcInputs[i]; - ov::Tensor tensor; - tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), - targetInputStaticShapes[i], - 80, - 0, - 8); - inputs.insert({funcInput.get_node_shared_ptr(), tensor}); - } + void generate_inputs(const std::vector& targetInputStaticShapes) override { + inputs.clear(); + const auto& funcInputs = function->inputs(); + for (size_t i = 0; i < funcInputs.size(); ++i) { + const auto& funcInput = funcInputs[i]; + ov::Tensor tensor; + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 80; + in_data.resolution = 8; + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], in_data); + inputs.insert({funcInput.get_node_shared_ptr(), tensor}); + } } void SetUp() override { shapeofActivationDynamicGPUTestParamsSet basicParamsSet = this->GetParam(); InputShape inputShape; - ElementType netType; - ngraph::helpers::ActivationTypes activationType; + ov::element::Type netType; + ov::test::utils::ActivationTypes activationType; std::vector inShape; std::vector constantValue; - std::map additionalConfig; - std::tie(inputShape, netType, targetDevice, activationType, inShape, constantValue, additionalConfig) = basicParamsSet; + std::tie(inputShape, netType, targetDevice, activationType, inShape, constantValue) = basicParamsSet; init_input_shapes({inputShape}); @@ -96,46 +87,40 @@ class shapeofActivationDynamicGPUTest : public testing::WithParamInterface(netType, shape)); std::vector shape_pattern = {0, 1, -1, 0}; - auto shapePatternsNode = std::dynamic_pointer_cast( - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape({4}), shape_pattern)); - auto reshapeOp = std::make_shared(params[0], shapePatternsNode, true); + auto shapePatternsNode = std::make_shared(ov::element::i64, ov::Shape({4}), shape_pattern); + auto reshapeOp = std::make_shared(params[0], shapePatternsNode, true); reshapeOp->set_friendly_name("reshape"); - auto shapeOfOp = std::make_shared(reshapeOp, ElementType::i32); + auto shapeOfOp = std::make_shared(reshapeOp, ov::element::i32); shapeOfOp->set_friendly_name("shapeof"); std::vector indices = {0}; - auto indicesNode = std::dynamic_pointer_cast( - std::make_shared(ngraph::element::Type_t::i32, ngraph::Shape({1}), indices)); + auto indicesNode = std::make_shared(ov::element::i32, ov::Shape({1}), indices); std::vector axis = {-1}; - auto axisNode = std::dynamic_pointer_cast( - std::make_shared(ngraph::element::Type_t::i32, ngraph::Shape({1}), axis)); + auto axisNode = std::make_shared(ov::element::i32, ov::Shape({1}), axis); auto gatherOp = std::make_shared(shapeOfOp, indicesNode, axisNode, 0); gatherOp->set_friendly_name("gather"); - auto convertOp = std::make_shared(gatherOp, ElementType::f32); + auto convertOp = std::make_shared(gatherOp, ov::element::f32); convertOp->set_friendly_name("convert"); - auto activationOp = ngraph::builder::makeActivation(convertOp, - netType, - activationType, - inShape, - constantValue); + auto activationOp = ov::test::utils::make_activation(convertOp, + netType, + activationType, + inShape, + constantValue); activationOp->set_friendly_name("sqrt"); - ngraph::ResultVector results = {std::make_shared(activationOp)}; - function = std::make_shared(results, params, "result"); + ov::ResultVector results = {std::make_shared(activationOp)}; + function = std::make_shared(results, params, "result"); } }; -TEST_P(shapeofActivationDynamicGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(shapeofActivationDynamicGPUTest, Inference) { run(); } -namespace { -std::map emptyAdditionalConfig; std::vector inShapesDynamic4d = { { {-1, -1, 1, 64}, @@ -159,15 +144,13 @@ std::vector inShape_sqrt = {}; std::vector constantValue_sqrt = {}; const auto testParams_sqrt = ::testing::Combine(::testing::ValuesIn(inShapesDynamic4d), - ::testing::ValuesIn(netPrecisions), // netprec + ::testing::ValuesIn(model_types), // netprec ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(ngraph::helpers::ActivationTypes::Sqrt), + ::testing::Values(ov::test::utils::ActivationTypes::Sqrt), ::testing::Values(inShape_sqrt), - ::testing::Values(constantValue_sqrt), - ::testing::Values(emptyAdditionalConfig)); + ::testing::Values(constantValue_sqrt)); // Activation type Sqrt should be supported in activation cpu_impl whic is selected after shapeOf INSTANTIATE_TEST_SUITE_P(smoke_dynamic_shapeof_activation_sqrt, shapeofActivationDynamicGPUTest, testParams_sqrt, shapeofActivationDynamicGPUTest::getTestCaseName); -} // namespace -} // namespace GPULayerTestsDefinitions +} // namespace \ No newline at end of file diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_shape_of_reduce_reshape.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_shape_of_reduce_reshape.cpp index 456c627c38facb..4a3433660e1ac6 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_shape_of_reduce_reshape.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_shape_of_reduce_reshape.cpp @@ -1,50 +1,44 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/shape_of.hpp" -#include "shared_test_classes/single_layer/strided_slice.hpp" -#include -#include +#include "common_test_utils/ov_tensor_utils.hpp" +#include "common_test_utils/node_builders/eltwise.hpp" +#include "common_test_utils/node_builders/reduce.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< std::vector, // input shapes - ElementType, // Network precision - TargetDevice, // Device name - std::map // Additional network configuration + ov::element::Type, // Network precision + std::string // Device name > shapeOfReshapeReduceDynamicGPUTestParamsSet; -const std::vector netPrecisions = { - ElementType::f16, - ElementType::f32, - ElementType::i32, - ElementType::i64, +const std::vector model_types = { + ov::element::f16, + ov::element::f32, + ov::element::i32, + ov::element::i64, }; class ShapeOfReshapeReduceDynamicGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { shapeOfReshapeReduceDynamicGPUTestParamsSet basicParamsSet = obj.param; std::ostringstream result; std::vector inputShapes; - ElementType netType; - TargetDevice targetDevice; - std::map additionalConfig; + ov::element::Type model_type; + std::string targetDevice; - std::tie(inputShapes, netType, targetDevice, additionalConfig) = basicParamsSet; + std::tie(inputShapes, model_type, targetDevice) = basicParamsSet; result << "IS="; for (const auto& shape : inputShapes) { result << ov::test::utils::partialShape2str({shape.first}) << "_"; @@ -52,75 +46,68 @@ class ShapeOfReshapeReduceDynamicGPUTest : public testing::WithParamInterface(params[0], ElementType::i64); + auto shapeOfOp1 = std::make_shared(params[0], ov::element::i64); shapeOfOp1->set_friendly_name("shapeof1"); std::vector reduce_axes = {0}; - auto reduceAxesNode = std::dynamic_pointer_cast( - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape({1}), reduce_axes)); - auto reduceOp = ngraph::builder::makeReduce(shapeOfOp1, reduceAxesNode, true, ngraph::helpers::ReductionType::Prod); + auto reduceAxesNode = std::make_shared(ov::element::i64, ov::Shape({1}), reduce_axes); + auto reduceOp = ov::test::utils::make_reduce(shapeOfOp1, reduceAxesNode, true, ov::test::utils::ReductionType::Prod); reduceOp->set_friendly_name("reduce"); std::vector shapePatternFill = {-1}; - auto reshapePatternComp = std::make_shared(ngraph::element::Type_t::i64, - ngraph::Shape{1}, shapePatternFill); + auto reshapePatternComp = std::make_shared(ov::element::i64, ov::Shape{1}, shapePatternFill); auto concatOp = std::make_shared(ov::NodeVector{reduceOp, reshapePatternComp}, 0); concatOp->set_friendly_name("concat"); - auto reshapeOp = std::make_shared(addOp, concatOp, false); + auto reshapeOp = std::make_shared(addOp, concatOp, false); - auto shapeOf2 = std::make_shared(reshapeOp, ElementType::i64); + auto shapeOf2 = std::make_shared(reshapeOp, ov::element::i64); shapeOf2->set_friendly_name("shapeof2"); - ngraph::ResultVector results = {std::make_shared(shapeOf2)}; - function = std::make_shared(results, params, "shapeof_out"); + ov::ResultVector results = {std::make_shared(shapeOf2)}; + function = std::make_shared(results, params, "shapeof_out"); } }; - -TEST_P(ShapeOfReshapeReduceDynamicGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(ShapeOfReshapeReduceDynamicGPUTest, Inference) { run(); } -namespace { -std::map emptyAdditionalConfig; const std::vector> dynInputShapes = { // 1D { @@ -152,13 +139,10 @@ const std::vector> dynInputShapes = { } }; - const auto testParams_smoke = ::testing::Combine(::testing::ValuesIn(dynInputShapes), - ::testing::ValuesIn(netPrecisions), // netprec - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(emptyAdditionalConfig)); + ::testing::ValuesIn(model_types), // netprec + ::testing::Values(ov::test::utils::DEVICE_GPU)); INSTANTIATE_TEST_SUITE_P(smoke_dynamic_shapeof_reshape, ShapeOfReshapeReduceDynamicGPUTest, testParams_smoke, ShapeOfReshapeReduceDynamicGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_with_empty_tensor.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_with_empty_tensor.cpp index a85c6089461430..88ceb3ea6db14b 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_with_empty_tensor.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/dynamic_smoke_test_with_empty_tensor.cpp @@ -1,48 +1,41 @@ // Copyright (C) 2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/single_layer/shape_of.hpp" -#include "shared_test_classes/single_layer/strided_slice.hpp" -#include -#include "shared_test_classes/single_layer/gather.hpp" -#include +#include "common_test_utils/ov_tensor_utils.hpp" -using namespace ngraph; -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/non_zero.hpp" +#include "openvino/op/squeeze.hpp" +#include "openvino/op/gather.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; typedef std::tuple< std::vector, // input shapes - ElementType, // Network precision - TargetDevice, // Device name - std::map // Additional network configuration + ov::element::Type, // Network precision + std::string // Device name > emptyTensorTestParamsSet; -const std::vector netPrecisions = { - ElementType::i32, +const std::vector netPrecisions = { + ov::element::i32, }; class EmptyTensorDynamicGPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { emptyTensorTestParamsSet basicParamsSet = obj.param; std::ostringstream result; std::vector inputShapes; - ElementType netType; - TargetDevice targetDevice; - std::map additionalConfig; + ov::element::Type netType; + std::string targetDevice; - std::tie(inputShapes, netType, targetDevice, additionalConfig) = basicParamsSet; + std::tie(inputShapes, netType, targetDevice) = basicParamsSet; result << "IS="; for (const auto& shape : inputShapes) { result << ov::test::utils::partialShape2str({shape.first}) << "_"; @@ -56,36 +49,35 @@ class EmptyTensorDynamicGPUTest : public testing::WithParamInterface& targetInputStaticShapes) override { - inputs.clear(); - const auto& funcInputs = function->inputs(); - for (size_t i = 0; i < funcInputs.size(); ++i) { - auto node = funcInputs[i].get_node_shared_ptr(); - auto tensor = ov::runtime::Tensor(node->get_element_type(), targetInputStaticShapes[i]); - if (i == 0) { - // All zero inputs for non_zero op - auto tensor_ptr = static_cast(tensor.data()); - for (size_t j = 0; j < ov::shape_size(targetInputStaticShapes[i]); ++j) { - tensor_ptr[j] = 0; - } - } else { - // Random inputs for concat - tensor = ov::test::utils::create_and_fill_tensor(funcInputs[i].get_element_type(), - targetInputStaticShapes[i], - 80, - 0, - 8); + void generate_inputs(const std::vector& targetInputStaticShapes) override { + inputs.clear(); + const auto& funcInputs = function->inputs(); + for (size_t i = 0; i < funcInputs.size(); ++i) { + auto node = funcInputs[i].get_node_shared_ptr(); + auto tensor = ov::runtime::Tensor(node->get_element_type(), targetInputStaticShapes[i]); + if (i == 0) { + // All zero inputs for non_zero op + auto tensor_ptr = static_cast(tensor.data()); + for (size_t j = 0; j < ov::shape_size(targetInputStaticShapes[i]); ++j) { + tensor_ptr[j] = 0; } - inputs.insert({funcInputs[i].get_node_shared_ptr(), tensor}); - } + } else { + // Random inputs for concat + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 80; + in_data.resolution = 8; + tensor = ov::test::utils::create_and_fill_tensor(funcInputs[i].get_element_type(), targetInputStaticShapes[i], in_data); + } + inputs.insert({funcInputs[i].get_node_shared_ptr(), tensor}); + } } void SetUp() override { emptyTensorTestParamsSet basicParamsSet = this->GetParam(); std::vector inputShapes; - ElementType netType; - std::map additionalConfig; - std::tie(inputShapes, netType, targetDevice, additionalConfig) = basicParamsSet; + ov::element::Type netType; + std::tie(inputShapes, netType, targetDevice) = basicParamsSet; init_input_shapes(inputShapes); const auto AllZeroData = inputDynamicShapes[0]; @@ -94,10 +86,9 @@ class EmptyTensorDynamicGPUTest : public testing::WithParamInterface(netType, shape)); - const ElementType intInputsPrecision = ElementType::i32; - auto nonzeroEmptyResultOp = std::make_shared(params[0]); + auto nonzeroEmptyResultOp = std::make_shared(params[0]); - auto convertEmptyInputOp = std::make_shared(nonzeroEmptyResultOp, ElementType::i32); + auto convertEmptyInputOp = std::make_shared(nonzeroEmptyResultOp, ov::element::i32); auto concatPartialInputEmptyOp = std::make_shared(ov::NodeVector{convertEmptyInputOp, params[1], convertEmptyInputOp}, 1); // partially empty input / non empty output @@ -106,32 +97,28 @@ class EmptyTensorDynamicGPUTest : public testing::WithParamInterface squeezeDims = {0}; - auto squeezeDimsConst = - std::make_shared(ngraph::element::Type_t::i32, ngraph::Shape{1}, squeezeDims); + auto squeezeDimsConst = std::make_shared(ov::element::i32, ov::Shape{1}, squeezeDims); + + auto squeezeEmptyInputOp = std::make_shared(nonzeroEmptyResultOp, squeezeDimsConst); - auto squeezeEmptyInputOp = std::make_shared(nonzeroEmptyResultOp, squeezeDimsConst); + auto axisNode = std::make_shared(ov::element::i32, ov::Shape({1}), std::vector{0}); + auto gatherEmptyIndicesOp = std::make_shared(params[0], squeezeEmptyInputOp, axisNode, 0); - auto axisNode = ngraph::builder::makeConstant(intInputsPrecision, ov::Shape({1}), {0}); - auto gatherEmptyIndicesOp = - std::make_shared(params[0], squeezeEmptyInputOp, axisNode, 0); - auto shapeofEmptyInputOp = std::make_shared(gatherEmptyIndicesOp, ElementType::i32); - ngraph::ResultVector results = {std::make_shared(shapeofEmptyInputOp), - std::make_shared(concatPartialInputEmptyOp), - std::make_shared(concatEmptyInputEmptyOutputOp)}; - function = std::make_shared(results, params, "result"); + auto shapeofEmptyInputOp = std::make_shared(gatherEmptyIndicesOp, ov::element::i32); - auto nonzero = std::make_shared(params[0]); + ov::ResultVector results = {std::make_shared(shapeofEmptyInputOp), + std::make_shared(concatPartialInputEmptyOp), + std::make_shared(concatEmptyInputEmptyOutputOp)}; + function = std::make_shared(results, params, "result"); + + auto nonzero = std::make_shared(params[0]); } }; - -TEST_P(EmptyTensorDynamicGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(EmptyTensorDynamicGPUTest, Inference) { run(); } -namespace { -std::map emptyAdditionalConfig; const std::vector> dynInputShapes = { { // Input for NonZero @@ -141,13 +128,10 @@ const std::vector> dynInputShapes = { }, }; - const auto testParams_smoke = ::testing::Combine(::testing::ValuesIn(dynInputShapes), ::testing::ValuesIn(netPrecisions), // netprec - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(emptyAdditionalConfig)); + ::testing::Values(ov::test::utils::DEVICE_GPU)); INSTANTIATE_TEST_SUITE_P(smoke_empty_tensor, EmptyTensorDynamicGPUTest, testParams_smoke, EmptyTensorDynamicGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache.cpp index d3fea8828b2bd0..2d949cbebcd677 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache.cpp @@ -2,37 +2,31 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "shared_test_classes/base/ov_subgraph.hpp" #include "common_test_utils/ov_tensor_utils.hpp" #include "common_test_utils/file_utils.hpp" -#include "openvino/core/node_vector.hpp" -#include "openvino/core/partial_shape.hpp" -#include "openvino/core/preprocess/pre_post_process.hpp" -#include "openvino/op/concat.hpp" -#include "openvino/op/matmul.hpp" -#include "openvino/op/parameter.hpp" -#include "openvino/op/transpose.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "shared_test_classes/base/utils/compare_results.hpp" -#include "transformations/rt_info/decompression.hpp" #include "subgraphs_builders.hpp" +#include "ov_models/utils/ov_helpers.hpp" +#include "shared_test_classes/base/utils/compare_results.hpp" -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/convert.hpp" -namespace SubgraphTestsDefinitions { +namespace { +using ov::test::InputShape; using KVCacheTestParams = std::tuple, // input shapes - ov::element::Type, // in/out precision - std::map>; // additional config + ov::element::Type>; // in/out type -class KVCacheTest : public testing::WithParamInterface, public SubgraphBaseTest { +class KVCacheTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { public: static std::string get_test_case_name(testing::TestParamInfo obj) { std::vector input_shapes; ov::element::Type element_type; - std::map additional_config; - std::tie(input_shapes, element_type, additional_config) = obj.param; + std::tie(input_shapes, element_type) = obj.param; std::ostringstream result; for (const auto& shape : input_shapes) { @@ -49,13 +43,7 @@ class KVCacheTest : public testing::WithParamInterface, publi } result << ")_"; } - result << "precision=" << element_type << "_"; - result << "config=("; - for (const auto& configEntry : additional_config) { - result << configEntry.first << ", " << configEntry.second << ":"; - } - result << ")"; - + result << "precision=" << element_type; return result.str(); } @@ -65,11 +53,9 @@ class KVCacheTest : public testing::WithParamInterface, publi std::vector input_shapes; ov::element::Type element_type; - std::map additional_config; - std::tie(input_shapes, element_type, additional_config) = GetParam(); + std::tie(input_shapes, element_type) = GetParam(); - configuration.insert(additional_config.begin(), additional_config.end()); init_input_shapes(input_shapes); inType = outType = element_type; @@ -78,14 +64,11 @@ class KVCacheTest : public testing::WithParamInterface, publi } }; -TEST_P(KVCacheTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(KVCacheTest, Inference) { run(); } -TEST_P(KVCacheTest, CompareWithRefs_cached) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(KVCacheTest, Inference_cached) { std::stringstream ss; ss << "gpu_model_cache_" << std::hash{}( std::string(::testing::UnitTest::GetInstance()->current_test_info()->test_suite_name()) + @@ -95,7 +78,7 @@ TEST_P(KVCacheTest, CompareWithRefs_cached) { ov::test::utils::removeFilesWithExt(cacheDirName, "blob"); ov::test::utils::removeFilesWithExt(cacheDirName, "cl_cache"); ov::test::utils::removeDir(cacheDirName); - core->set_property(ov::cache_dir(cacheDirName)); + configuration.insert(ov::cache_dir(cacheDirName)); compile_model(); } { @@ -106,8 +89,6 @@ TEST_P(KVCacheTest, CompareWithRefs_cached) { } } -namespace { - const std::vector precisions = {ov::element::f32, ov::element::f16}; const std::vector> input_shapes_basic = { @@ -121,10 +102,8 @@ const std::vector> input_shapes_basic = { INSTANTIATE_TEST_SUITE_P(smoke_GPU_Dynamic, KVCacheTest, ::testing::Combine(::testing::ValuesIn(input_shapes_basic), - ::testing::ValuesIn(precisions), - ::testing::Values(std::map())), + ::testing::ValuesIn(precisions)), KVCacheTest::get_test_case_name); -} // namespace class KVCacheTests: public ::testing::Test { public: @@ -132,8 +111,10 @@ class KVCacheTests: public ::testing::Test { #if defined(ANDROID) GTEST_SKIP(); #endif - auto core = ov::Core(); - + auto core = ov::test::utils::PluginCache::get().core(); + ov::AnyMap properties = { + ov::hint::inference_precision(ov::element::f16) + }; std::string cacheDirName; if (is_caching_test) { std::stringstream ss; @@ -144,7 +125,7 @@ class KVCacheTests: public ::testing::Test { ov::test::utils::removeFilesWithExt(cacheDirName, "blob"); ov::test::utils::removeFilesWithExt(cacheDirName, "cl_cache"); ov::test::utils::removeDir(cacheDirName); - core.set_property(ov::cache_dir(cacheDirName)); + properties.insert(ov::cache_dir(cacheDirName)); } const size_t batch = 1; @@ -157,9 +138,9 @@ class KVCacheTests: public ::testing::Test { auto model = tests::make_llm_kv_cache_pattern(batch, n_heads, n_features, element_type); if (is_caching_test) { - core.compile_model(model, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::f16)); + core->compile_model(model, ov::test::utils::DEVICE_GPU, properties); } - auto compiled_model = core.compile_model(model, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::f16)); + auto compiled_model = core->compile_model(model, ov::test::utils::DEVICE_GPU, properties); auto input0 = model->get_parameters().at(0); auto input1 = model->get_parameters().at(1); @@ -167,13 +148,24 @@ class KVCacheTests: public ::testing::Test { auto output0 = model->get_results().at(0); auto output1 = model->get_results().at(1); - auto get_ref_results = [&model, &input0, &input1, &input2](const ov::Tensor& kv_cache, const ov::Tensor& new_token_data, + auto get_ref_results = [&](const ov::Tensor& kv_cache, const ov::Tensor& new_token_data, const ov::Tensor& matmul_data) { auto ref_model = model->clone(); ov::Tensor kv_cache_copy(kv_cache.get_element_type(), kv_cache.get_shape()); kv_cache.copy_to(kv_cache_copy); ngraph::helpers::resize_function(ref_model, {kv_cache_copy.get_shape(), new_token_data.get_shape(), matmul_data.get_shape()}); - return ngraph::helpers::interpretFunction(ref_model, {{input0, kv_cache_copy}, {input1, new_token_data}, {input2, matmul_data}}); + + auto compiled_model_ref = core->compile_model(ref_model, ov::test::utils::DEVICE_TEMPLATE); + auto inf_req_ref = compiled_model_ref.create_infer_request(); + inf_req_ref.set_tensor(input0, kv_cache_copy); + inf_req_ref.set_tensor(input1, new_token_data); + inf_req_ref.set_tensor(input2, matmul_data); + inf_req_ref.infer(); + std::vector results_ref; + for (auto&& output : ref_model->get_results()) { + results_ref.push_back(inf_req_ref.get_tensor(output)); + } + return results_ref; }; auto compare_tensors = [&model](const std::vector expected, const std::vector& actual) { @@ -187,7 +179,7 @@ class KVCacheTests: public ::testing::Test { std::shared_ptr inputNode = result->get_input_node_shared_ptr(i); if (std::dynamic_pointer_cast(inputNode)) { std::shared_ptr nextNodePtr = inputNode->get_input_node_shared_ptr(0); - if (!ngraph::is_type(nextNodePtr)) { + if (!ov::is_type(nextNodePtr)) { inputNode = nextNodePtr; } } @@ -259,11 +251,15 @@ class KVCacheTests: public ::testing::Test { } } - void test_smoke_multipleIterations_stateful(bool is_caching_test) { + void test_smoke_multipleIterations_stateful(bool is_caching_test, bool fuse_cache_reorder, bool build_state_initializer) { #if defined(ANDROID) GTEST_SKIP(); #endif - auto core = ov::Core(); + auto core = ov::test::utils::PluginCache::get().core(); + + ov::AnyMap properties = { + ov::hint::inference_precision(ov::element::f16) + }; std::string cacheDirName; if (is_caching_test) { @@ -275,7 +271,7 @@ class KVCacheTests: public ::testing::Test { ov::test::utils::removeFilesWithExt(cacheDirName, "blob"); ov::test::utils::removeFilesWithExt(cacheDirName, "cl_cache"); ov::test::utils::removeDir(cacheDirName); - core.set_property(ov::cache_dir(cacheDirName)); + properties.insert(ov::cache_dir(cacheDirName)); } const size_t batch = 1; @@ -286,23 +282,58 @@ class KVCacheTests: public ::testing::Test { ov::element::Type element_type = ov::element::f16; - auto model = tests::make_llm_kv_cache_pattern(batch, n_heads, n_features, element_type, true); - auto ref_model = tests::make_llm_kv_cache_pattern(batch, n_heads, n_features, element_type, false); + const bool stateful = true; + + auto model = tests::make_llm_kv_cache_pattern(build_state_initializer ? ov::Dimension::dynamic() : batch, + n_heads, + n_features, + element_type, + stateful, + fuse_cache_reorder, + build_state_initializer && stateful); + auto ref_model = tests::make_llm_kv_cache_pattern(build_state_initializer ? ov::Dimension::dynamic() : batch, + n_heads, + n_features, + element_type, + !stateful, + fuse_cache_reorder, + build_state_initializer && !stateful); if (is_caching_test) { - core.compile_model(model, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::f16)); + core->compile_model(model, ov::test::utils::DEVICE_GPU, properties); } - auto compiled_model = core.compile_model(model, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::f16)); + auto compiled_model = core->compile_model(model, ov::test::utils::DEVICE_GPU, properties); auto input0 = model->get_parameters().at(0); auto input1 = model->get_parameters().at(1); + auto input2 = fuse_cache_reorder ? model->get_parameters().at(2) : nullptr; auto output0 = model->get_results().at(0); - auto get_ref_results = [&ref_model](const ov::Tensor& kv_cache, const ov::Tensor& new_token_data, const ov::Tensor& matmul_data) { + auto beam_idx_shape = ov::Shape{batch}; + auto beam_idx_data = ov::Tensor(ov::element::i32, beam_idx_shape); + for (size_t i = 0; i < batch; i++) { + beam_idx_data.data()[i] = i; + } + + auto get_ref_results = [&ref_model, fuse_cache_reorder, &beam_idx_shape, &beam_idx_data](const ov::Tensor& kv_cache, + const ov::Tensor& new_token_data, + const ov::Tensor& matmul_data) { auto input0 = ref_model->get_parameters().at(0); auto input1 = ref_model->get_parameters().at(1); auto input2 = ref_model->get_parameters().at(2); - ngraph::helpers::resize_function(ref_model, {kv_cache.get_shape(), new_token_data.get_shape(), matmul_data.get_shape()}); - return ngraph::helpers::interpretFunction(ref_model, {{input0, kv_cache}, {input1, new_token_data}, {input2, matmul_data}}); + auto input3 = fuse_cache_reorder ? ref_model->get_parameters().at(3) : nullptr; + std::vector input_shapes = {kv_cache.get_shape(), new_token_data.get_shape(), matmul_data.get_shape()}; + std::map, ov::Tensor> inputs = { + {input0, kv_cache}, + {input1, new_token_data}, + {input2, matmul_data} + }; + if (fuse_cache_reorder) { + input_shapes.push_back(beam_idx_shape); + inputs.emplace(input3, beam_idx_data); + } + + ngraph::helpers::resize_function(ref_model, input_shapes); + return ngraph::helpers::interpretFunction(ref_model, inputs); }; auto compare_tensors = [&model](const std::vector expected, const std::vector& actual) { @@ -316,7 +347,7 @@ class KVCacheTests: public ::testing::Test { std::shared_ptr inputNode = result->get_input_node_shared_ptr(i); if (std::dynamic_pointer_cast(inputNode)) { std::shared_ptr nextNodePtr = inputNode->get_input_node_shared_ptr(0); - if (!ngraph::is_type(nextNodePtr)) { + if (!ov::is_type(nextNodePtr)) { inputNode = nextNodePtr; } } @@ -334,7 +365,9 @@ class KVCacheTests: public ::testing::Test { infer_request.set_tensor(input0, new_token_input); infer_request.set_tensor(input1, matmul_input); - + if (fuse_cache_reorder) { + infer_request.set_tensor(input2, beam_idx_data); + } ov::Tensor ref_kv_cache; { @@ -400,11 +433,19 @@ TEST_F(KVCacheTests, smoke_multipleIterations_cached) { this->test_smoke_multipleIterations(true); } -TEST_F(KVCacheTests, smoke_multipleIterations_stateful) { - this->test_smoke_multipleIterations_stateful(false); +TEST_F(KVCacheTests, smoke_multipleIterations_stateful_no_gather_no_initializer) { + this->test_smoke_multipleIterations_stateful(false, false, false); +} + +TEST_F(KVCacheTests, smoke_multipleIterations_stateful_no_gather_no_initializer_cached) { + this->test_smoke_multipleIterations_stateful(true, false, false); } -TEST_F(KVCacheTests, smoke_multipleIterations_stateful_cached) { - this->test_smoke_multipleIterations_stateful(true); +TEST_F(KVCacheTests, smoke_multipleIterations_stateful_gather_with_initializer) { + this->test_smoke_multipleIterations_stateful(false, true, true); } -} // namespace SubgraphTestsDefinitions + +TEST_F(KVCacheTests, smoke_multipleIterations_stateful_gather_with_initializer_cached) { + this->test_smoke_multipleIterations_stateful(true, true, true); +} +} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/matmul_weights_decompression.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/matmul_weights_decompression.cpp index 1830e07cda9a8a..9a439132af0bae 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/matmul_weights_decompression.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/matmul_weights_decompression.cpp @@ -3,16 +3,21 @@ // #include "common_test_utils/ov_tensor_utils.hpp" -#include "openvino/op/constant.hpp" -#include "openvino/op/matmul.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "transformations/rt_info/decompression.hpp" -using namespace ov; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/matmul.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/subtract.hpp" +#include "openvino/op/transpose.hpp" + +namespace { +using ov::test::InputShape; -namespace SubgraphTestsDefinitions { /* * Subtract_const(U8/NF4/U4/I4) * / @@ -44,26 +49,26 @@ struct ShapeParams { // Decompression group size. If the value is equal to -1, ordinary decompression is used int weights_group_size; }; + using MatmulWeightsDecompressionParams = std::tuple>; // additional config + bool>; // per-tensor zero-point -class MatmulWeightsDecompression : public testing::WithParamInterface, public SubgraphBaseTest { +class MatmulWeightsDecompression : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { public: static std::string get_test_case_name(testing::TestParamInfo obj) { ShapeParams shape_params; - ov::test::ElementType weights_precision; - ov::test::ElementType activations_precision; + ov::element::Type weights_precision; + ov::element::Type activations_precision; bool transpose; bool decompression_sub; bool reshape_on_decompression; bool per_tensor_zp; - std::map additional_config; std::tie(shape_params, weights_precision, @@ -71,25 +76,22 @@ class MatmulWeightsDecompression : public testing::WithParamInterface(ov::NodeVector{mat_mul}, params, "MatmulWeightsDecompression"); } std::shared_ptr init_compressed_weights_subgraph(const ov::Shape& weights_shape, @@ -155,7 +157,7 @@ class MatmulWeightsDecompression : public testing::WithParamInterface(weights_tensor); weights->set_friendly_name("Compressed_weights"); - auto weights_convert = std::make_shared(weights, data_precision); + auto weights_convert = std::make_shared(weights, data_precision); std::shared_ptr mul_parent = weights_convert; auto output_channels = *weights_shape.rbegin(); @@ -181,16 +183,20 @@ class MatmulWeightsDecompression : public testing::WithParamInterface(shift_tensor.data())[0] = 0x88; } auto shift_const = std::make_shared(shift_tensor); - std::shared_ptr shift_convert = std::make_shared(shift_const, data_precision); + std::shared_ptr shift_convert = std::make_shared(shift_const, data_precision); if (reshape_on_decompression_constant && !per_tensor_zp) { - auto shift_reshape_const = ov::opset10::Constant::create(ov::element::i32, {scaleshift_target_shape.size()}, scaleshift_target_shape); - auto shift_reshape = std::make_shared(shift_convert, shift_reshape_const, false); + auto shift_reshape_const = ov::op::v0::Constant::create(ov::element::i32, {scaleshift_target_shape.size()}, scaleshift_target_shape); + auto shift_reshape = std::make_shared(shift_convert, shift_reshape_const, false); shift_convert = shift_reshape; } - mul_parent = std::make_shared(weights_convert, shift_convert); + mul_parent = std::make_shared(weights_convert, shift_convert); } - auto scale_tensor = ov::test::utils::create_and_fill_tensor(data_precision, scaleshift_const_shape, 1, -0.5, 30000); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -0.5; + in_data.range = 1; + in_data.resolution = 30000; + auto scale_tensor = ov::test::utils::create_and_fill_tensor(data_precision, scaleshift_const_shape, in_data); for (size_t i = 0; i < scale_tensor.get_size(); i++) { if (data_precision == ov::element::f16) scale_tensor.data()[i] /= ov::float16(16.f); @@ -199,25 +205,25 @@ class MatmulWeightsDecompression : public testing::WithParamInterface scale_const = std::make_shared(scale_tensor); if (reshape_on_decompression_constant) { - auto scale_reshape_const = ov::opset10::Constant::create(ov::element::i32, {scaleshift_target_shape.size()}, scaleshift_target_shape); - auto scale_reshape = std::make_shared(scale_const, scale_reshape_const, false); + auto scale_reshape_const = ov::op::v0::Constant::create(ov::element::i32, {scaleshift_target_shape.size()}, scaleshift_target_shape); + auto scale_reshape = std::make_shared(scale_const, scale_reshape_const, false); scale_const = scale_reshape; } - std::shared_ptr last_node = std::make_shared(mul_parent, scale_const); + std::shared_ptr last_node = std::make_shared(mul_parent, scale_const); if (group_decompression) { auto reshape_target_shape = transpose_weights ? std::vector{-1, static_cast(weights_shape[0])} : std::vector{static_cast(weights_shape[0]), -1}; - auto target_shape_node = ov::opset10::Constant::create(ov::element::i32, {reshape_target_shape.size()}, reshape_target_shape); - last_node = std::make_shared(last_node, target_shape_node, false); + auto target_shape_node = ov::op::v0::Constant::create(ov::element::i32, {reshape_target_shape.size()}, reshape_target_shape); + last_node = std::make_shared(last_node, target_shape_node, false); } if (transpose_weights) { const size_t rank = last_node->get_output_partial_shape(0).size(); std::vector order(rank); std::iota(order.begin(), order.end(), 0); std::swap(*order.rbegin(), *(order.rbegin() + 1)); - auto transpose_constant = ov::opset10::Constant::create(ov::element::i32, {rank}, order); - last_node = std::make_shared(last_node, transpose_constant); + auto transpose_constant = ov::op::v0::Constant::create(ov::element::i32, {rank}, order); + last_node = std::make_shared(last_node, transpose_constant); } return last_node; } @@ -226,13 +232,12 @@ class MatmulWeightsDecompression : public testing::WithParamInterface additional_config; std::tie(shape_params, weights_precision, @@ -240,10 +245,8 @@ class MatmulWeightsDecompression : public testing::WithParamInterface& target_input_static_shapes) override { + void generate_inputs(const std::vector& target_input_static_shapes) override { inputs.clear(); const auto& model_inputs = function->inputs(); for (size_t i = 0; i < model_inputs.size(); ++i) { const auto& model_input = model_inputs[i]; - ov::Tensor tensor = ov::test::utils::create_and_fill_tensor(model_input.get_element_type(), - target_input_static_shapes[i], - 2, - -1, - 10000); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -1; + in_data.range = 2; + in_data.resolution = 10000; + ov::Tensor tensor = ov::test::utils::create_and_fill_tensor(model_input.get_element_type(), target_input_static_shapes[i], in_data); inputs.insert({model_input.get_node_shared_ptr(), tensor}); } } void check_results() { const auto& test_param = GetParam(); - ov::test::ElementType weights_precision = std::get<1>(test_param); + ov::element::Type weights_precision = std::get<1>(test_param); for (const auto& n : compiledModel.get_runtime_model()->get_ordered_ops()) { if (n->get_friendly_name() == "Compressed_weights") { ASSERT_EQ(n->get_output_element_type(0), weights_precision); @@ -291,16 +294,13 @@ class MatmulWeightsDecompression : public testing::WithParamInterface activations_precisions = {ov::element::f32, ov::element::f16}; -const std::vector weights_precisions = {ov::element::u8, ov::element::u4, ov::element::i4}; +const std::vector activations_precisions = {ov::element::f32, ov::element::f16}; +const std::vector weights_precisions = {ov::element::u8, ov::element::u4, ov::element::i4}; const std::vector transpose_weights = {true, false}; const std::vector input_shapes_basic = { {{{-1, -1, -1}, {{1, 4, 16}, {10, 16, 16}}}, {16, 32}}, @@ -318,8 +318,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_basic, ::testing::ValuesIn(transpose_weights), ::testing::Values(true), ::testing::Values(true), - ::testing::Values(false), - ::testing::Values(std::map())), + ::testing::Values(false)), MatmulWeightsDecompression::get_test_case_name); const std::vector input_shapes_corner_cases_basic = { @@ -347,8 +346,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_corner_cases_basic, ::testing::ValuesIn(transpose_weights), ::testing::ValuesIn(add_decompression_sub), ::testing::ValuesIn(reshape_on_decompression), - ::testing::ValuesIn(per_tensor_zp), - ::testing::Values(std::map{})), + ::testing::ValuesIn(per_tensor_zp)), MatmulWeightsDecompression::get_test_case_name); INSTANTIATE_TEST_SUITE_P(MatMulCompressedWeights_corner_cases_big, @@ -359,9 +357,6 @@ INSTANTIATE_TEST_SUITE_P(MatMulCompressedWeights_corner_cases_big, ::testing::ValuesIn(transpose_weights), ::testing::ValuesIn(add_decompression_sub), ::testing::ValuesIn(reshape_on_decompression), - ::testing::ValuesIn(per_tensor_zp), - ::testing::Values(std::map{})), + ::testing::ValuesIn(per_tensor_zp)), MatmulWeightsDecompression::get_test_case_name); } // namespace - -} // namespace SubgraphTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/read_value_assign.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/read_value_assign.cpp index 25a6b3c23c1412..f45009a5365167 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/read_value_assign.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/read_value_assign.cpp @@ -2,30 +2,30 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "openvino/opsets/opset1.hpp" -#include "common_test_utils/ov_tensor_utils.hpp" #include "common_test_utils/file_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace ov::test; -using namespace InferenceEngine; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/read_value.hpp" +#include "openvino/op/assign.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; using ReadValueAssignParams = std::tuple< - InputShape, // input shapes - ElementType // input precision + InputShape, // input shapes + ov::element::Type // input precision >; -class ReadValueAssignGPUTest : virtual public SubgraphBaseTest, public testing::WithParamInterface { +class ReadValueAssignGPUTest : virtual public ov::test::SubgraphBaseTest, + public testing::WithParamInterface { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { InputShape input_shapes; - ElementType input_precision; + ov::element::Type input_precision; std::tie(input_shapes, input_precision) = obj.param; std::ostringstream result; @@ -41,7 +41,7 @@ class ReadValueAssignGPUTest : virtual public SubgraphBaseTest, public testing:: protected: void SetUp() override { InputShape input_shapes; - ElementType input_precision; + ov::element::Type input_precision; std::tie(input_shapes, input_precision) = GetParam(); targetDevice = ov::test::utils::DEVICE_GPU; @@ -55,7 +55,7 @@ class ReadValueAssignGPUTest : virtual public SubgraphBaseTest, public testing:: auto add = std::make_shared(read_value, params.at(0)); auto assign = std::make_shared(add, "v0"); auto res = std::make_shared(add); - function = std::make_shared(ResultVector { res }, SinkVector { assign }, params); + function = std::make_shared(ov::ResultVector { res }, ov::SinkVector { assign }, params); } void generate_inputs(const std::vector& targetInputStaticShapes) override { @@ -71,14 +71,11 @@ class ReadValueAssignGPUTest : virtual public SubgraphBaseTest, public testing:: } }; -TEST_P(ReadValueAssignGPUTest, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() +TEST_P(ReadValueAssignGPUTest, Inference) { run(); } -TEST_P(ReadValueAssignGPUTest, CompareWithRefs_cached) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - +TEST_P(ReadValueAssignGPUTest, Inference_cached) { std::stringstream ss; ss << "gpu_model_cache_" << std::hash{}( std::string(::testing::UnitTest::GetInstance()->current_test_info()->test_suite_name()) + @@ -99,13 +96,12 @@ TEST_P(ReadValueAssignGPUTest, CompareWithRefs_cached) { } } -namespace { const std::vector input_shapes_dyn = { {{-1, -1, -1, -1}, {{7, 4, 20, 20}, {19, 4, 20, 20}}} }; INSTANTIATE_TEST_SUITE_P(smoke_ReadValueAssign_Static, ReadValueAssignGPUTest, - ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation({{7, 4, 20, 20}})), + ::testing::Combine(::testing::ValuesIn(ov::test::static_shapes_to_test_representation({{7, 4, 20, 20}})), ::testing::Values(ov::element::i32)), ReadValueAssignGPUTest::getTestCaseName); @@ -114,4 +110,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_ReadValueAssign_Dynamic, ReadValueAssignGPUTest, ::testing::Values(ov::element::i32)), ReadValueAssignGPUTest::getTestCaseName); } // namespace -} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/rms_norm_decomposition.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/rms_norm_decomposition.cpp index 60cc72020732f8..bf515ed78fec5a 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/rms_norm_decomposition.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/rms_norm_decomposition.cpp @@ -2,16 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ov_models/builders.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "common_test_utils/file_utils.hpp" -#include "random_generator.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ngraph; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/sqrt.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/power.hpp" +#include "openvino/op/reduce_mean.hpp" + +namespace { +using ov::test::InputShape; -namespace SubgraphTestsDefinitions { /* * Input(F32) Const(F32) * | \ / @@ -33,17 +41,16 @@ namespace SubgraphTestsDefinitions { * Convert(F16) */ using RMSNormDecompositionParams = std::tuple, // input shapes - ov::test::ElementType, // input precision - std::map>; // additional config + ov::element::Type>; // input precision -class RMSNormDecomposition : public testing::WithParamInterface, public SubgraphBaseTest { +class RMSNormDecomposition : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { std::vector input_shapes; - ElementType input_precision; - std::map additional_config; + ov::element::Type input_precision; - std::tie(input_shapes, input_precision, additional_config) = obj.param; + std::tie(input_shapes, input_precision) = obj.param; std::ostringstream result; result << "IS=("; @@ -61,14 +68,7 @@ class RMSNormDecomposition : public testing::WithParamInterface(mul2, ov::element::f16); + + return std::make_shared(ov::NodeVector{comp}, params, "RMSNormDecomposition"); } void SetUp() override { targetDevice = ov::test::utils::DEVICE_GPU; std::vector input_shapes; - ElementType input_precision; - std::map additional_config; + ov::element::Type input_precision; - std::tie(input_shapes, input_precision, additional_config) = GetParam(); + std::tie(input_shapes, input_precision) = GetParam(); - configuration.insert(additional_config.begin(), additional_config.end()); init_input_shapes(input_shapes); inType = outType = input_precision; @@ -128,14 +128,11 @@ class RMSNormDecomposition : public testing::WithParamInterface{}( std::string(::testing::UnitTest::GetInstance()->current_test_info()->test_suite_name()) + @@ -156,9 +153,7 @@ TEST_P(RMSNormDecomposition, CompareWithRefs_cached) { } } -namespace { - -const std::vector input_precisions = {ov::element::f32, ov::element::f16}; +const std::vector input_precisions = {ov::element::f32, ov::element::f16}; const std::vector> input_shapes_basic = { {{{-1, -1, 96}, {{1, 4, 96}}}}, @@ -170,9 +165,6 @@ const std::vector> input_shapes_basic = { INSTANTIATE_TEST_SUITE_P(smoke_RMSNormDecomposition_basic, RMSNormDecomposition, ::testing::Combine(::testing::ValuesIn(input_shapes_basic), - ::testing::ValuesIn(input_precisions), - ::testing::Values(std::map())), + ::testing::ValuesIn(input_precisions)), RMSNormDecomposition::getTestCaseName); } // namespace - -} // namespace SubgraphTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/loop.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/loop.cpp index 1ca60efa2ff1e6..8200700e0bd902 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/loop.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/loop.cpp @@ -2,25 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "common_test_utils/test_constants.hpp" -#include "shared_test_classes/base/utils/ranges.hpp" -#include -#include "shared_test_classes/base/utils/compare_results.hpp" -#include "openvino/pass/constant_folding.hpp" -#include -using namespace InferenceEngine; -using namespace ov::test; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/loop.hpp" +#include "openvino/op/less.hpp" -namespace GPULayerTestsDefinitions { +namespace { +using ov::test::InputShape; using DynamicShapeLoopParams = typename std::tuple< bool, @@ -32,17 +26,15 @@ using DynamicShapeLoopParams = typename std::tuple< >, int64_t, InputShape, - InferenceEngine::Precision, - std::string, - ov::AnyMap - >; + ov::element::Type, + std::string>; /** * Test case with Dynamic SHAPE version of loop operation. * Total iteration count is dynamic. */ class DynamicShapeLoopTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo &obj) { bool static_iter_num; @@ -52,18 +44,16 @@ class DynamicShapeLoopTest : public testing::WithParamInterface std::shared_ptr { + ov::ParameterVector params{}; + auto cond_input_create = [¶ms] (ov::element::Type model_type, + const ov::PartialShape &shape, + int value = 0, + bool is_static = false) -> std::shared_ptr { if (is_static) - return std::make_shared(prc, shape.to_shape(), value); + return std::make_shared(model_type, shape.to_shape(), value); - auto input = std::make_shared(prc, shape); + auto input = std::make_shared(model_type, shape); params.push_back(input); return input; }; - auto start_add = cond_input_create(prc, inputShape, start_value); + auto start_add = cond_input_create(model_type, inputShape, start_value); start_add->set_friendly_name("start_add"); - auto start_mul = cond_input_create(prc, inputShape, 1); + auto start_mul = cond_input_create(model_type, inputShape, 1); start_mul->set_friendly_name("start_mul"); - auto count = cond_input_create(ngraph::element::i64, scalarShape, max_iter_num, static_iter_num); + auto count = cond_input_create(ov::element::i64, scalarShape, max_iter_num, static_iter_num); count->set_friendly_name("count"); - auto skip = cond_input_create(ngraph::element::boolean, scalarShape, true, static_continue_cond); + auto skip = cond_input_create(ov::element::boolean, scalarShape, true, static_continue_cond); skip->set_friendly_name("skip"); - auto b_indx = std::make_shared(ngraph::element::i64, ngraph::Shape{}); + auto b_indx = std::make_shared(ov::element::i64, ov::Shape{}); b_indx->set_friendly_name("body_index"); - auto b_data_add = std::make_shared(prc, inputShape); + auto b_data_add = std::make_shared(model_type, inputShape); b_data_add->set_friendly_name("b_data_add"); - auto b_data_mul = std::make_shared(prc, inputShape); + auto b_data_mul = std::make_shared(model_type, inputShape); b_data_mul->set_friendly_name("b_data_mul"); - auto b_indx_cast = std::make_shared(b_indx, prc); + auto b_indx_cast = std::make_shared(b_indx, model_type); b_indx_cast->set_friendly_name("body_index_cast"); - auto b_add = std::make_shared(b_data_add, b_indx_cast); + auto b_add = std::make_shared(b_data_add, b_indx_cast); b_add->set_friendly_name("body_add"); - auto b_mul = std::make_shared(b_data_mul, b_indx_cast); + auto b_mul = std::make_shared(b_data_mul, b_indx_cast); b_mul->set_friendly_name("body_mul"); - std::shared_ptr b_cond; + std::shared_ptr b_cond; if (dynamic_exit == -1) { - b_cond = std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); + b_cond = std::make_shared(ov::element::boolean, ov::Shape{}, true); b_cond->set_friendly_name("body_condition"); } else { - auto b_exit_value = std::make_shared(ngraph::element::i64, scalarShape, dynamic_exit); + auto b_exit_value = std::make_shared(ov::element::i64, scalarShape, dynamic_exit); b_exit_value->set_friendly_name("body_exit_value"); - b_cond = std::make_shared(b_indx, b_exit_value); + b_cond = std::make_shared(b_indx, b_exit_value); b_cond->set_friendly_name("body_condition_with_exit_value"); } - auto body = std::make_shared( - ngraph::OutputVector {b_cond, b_add, b_mul}, // TODO: check with reverse - ngraph::ParameterVector {b_indx, b_data_add, b_data_mul}); // TODO: check with reverse + auto body = std::make_shared( + ov::OutputVector {b_cond, b_add, b_mul}, // TODO: check with reverse + ov::ParameterVector {b_indx, b_data_add, b_data_mul}); // TODO: check with reverse body->set_friendly_name("body_network"); - auto loop = std::make_shared(count, skip); + auto loop = std::make_shared(count, skip); loop->set_friendly_name("loop"); loop->set_function(body); loop->set_special_body_ports({0, 0}); @@ -180,13 +169,13 @@ class DynamicShapeLoopTest : public testing::WithParamInterfaceget_concatenated_slices(b_mul, 0, 1, 1, -1, axis); } - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < loop->get_output_size(); i++) { - auto res = std::make_shared(loop->output(i)); + auto res = std::make_shared(loop->output(i)); res->set_friendly_name("loop_output_" + std::to_string(i)); results.push_back(res); } - function = std::make_shared( + function = std::make_shared( results, params); function->set_friendly_name("outer_body_network"); @@ -194,18 +183,13 @@ class DynamicShapeLoopTest : public testing::WithParamInterface netPrecisions = { - InferenceEngine::Precision::FP32, - InferenceEngine::Precision::I32 -}; - -ov::AnyMap netConfigurations = { - {GPUConfigParams::KEY_GPU_ENABLE_LOOP_UNROLLING, PluginConfigParams::NO} +std::vector model_types = { + ov::element::f32, + ov::element::i32 }; static const std::vector> dynamic_loop_types_axis_0 { @@ -224,9 +208,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_DynamicShapeLoop_axis_0, DynamicShapeLoopTest, /* args_pack */ testing::ValuesIn(dynamic_loop_types_axis_0), /* start_value */ testing::Values(0), /* data_shape */ testing::ValuesIn(inputs_0), - /* data_prc */ testing::ValuesIn(netPrecisions), - /* device */ testing::Values(ov::test::utils::DEVICE_GPU), - /* configuration */ testing::Values(netConfigurations)), + /* model_type */ testing::ValuesIn(model_types), + /* device */ testing::Values(ov::test::utils::DEVICE_GPU)), DynamicShapeLoopTest::getTestCaseName); static const std::vector> dynamic_loop_types_1 { @@ -245,9 +228,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_DynamicShapeLoop_axis_1, DynamicShapeLoopTest, /* args_pack */ testing::ValuesIn(dynamic_loop_types_1), /* start_value */ testing::Values(0), /* data_shape */ testing::ValuesIn(inputs_1), - /* data_prc */ testing::ValuesIn(netPrecisions), - /* device */ testing::Values(ov::test::utils::DEVICE_GPU), - /* configuration */ testing::Values(netConfigurations)), + /* model_type */ testing::ValuesIn(model_types), + /* device */ testing::Values(ov::test::utils::DEVICE_GPU)), DynamicShapeLoopTest::getTestCaseName); static const std::vector> dynamic_loop_types_2 { @@ -266,9 +248,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_DynamicShapeLoop_axis_2, DynamicShapeLoopTest, /* args_pack */ testing::ValuesIn(dynamic_loop_types_2), /* start_value */ testing::Values(0), /* data_shape */ testing::ValuesIn(inputs_2), - /* data_prc */ testing::ValuesIn(netPrecisions), - /* device */ testing::Values(ov::test::utils::DEVICE_GPU), - /* configuration */ testing::Values(netConfigurations)), + /* model_type */ testing::ValuesIn(model_types), + /* device */ testing::Values(ov::test::utils::DEVICE_GPU)), DynamicShapeLoopTest::getTestCaseName); static const std::vector> dynamic_loop_types_no_auto_concat { @@ -287,9 +268,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_DynamicShapeLoop_no_auto_concat, DynamicShapeLoop /* args_pack */ testing::ValuesIn(dynamic_loop_types_no_auto_concat), /* start_value */ testing::Values(0), /* data_shape */ testing::ValuesIn(inputs_no_auto_concat), - /* data_prc */ testing::ValuesIn(netPrecisions), - /* device */ testing::Values(ov::test::utils::DEVICE_GPU), - /* configuration */ testing::Values(netConfigurations)), + /* model_type */ testing::ValuesIn(model_types), + /* device */ testing::Values(ov::test::utils::DEVICE_GPU)), DynamicShapeLoopTest::getTestCaseName); static const std::vector> dynamic_loop_types_dynamic_exit { @@ -310,9 +290,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_DynamicShapeLoop_dynamic_exit, DynamicShapeLoopTe /* args_pack */ testing::ValuesIn(dynamic_loop_types_dynamic_exit), /* start_value */ testing::Values(0), /* data_shape */ testing::ValuesIn(inputs_dynamic_exit), - /* data_prc */ testing::ValuesIn(netPrecisions), - /* device */ testing::Values(ov::test::utils::DEVICE_GPU), - /* configuration */ testing::Values(netConfigurations)), + /* model_type */ testing::ValuesIn(model_types), + /* device */ testing::Values(ov::test::utils::DEVICE_GPU)), DynamicShapeLoopTest::getTestCaseName); -} // namespace GPULayerTestsDefinitions \ No newline at end of file +} // namespace \ No newline at end of file diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/shared_constant.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/shared_constant.cpp index 55e69e6d32ff4d..aed8fe5672987e 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/shared_constant.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/shared_constant.cpp @@ -2,47 +2,52 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "openvino/core/coordinate_diff.hpp" +#include "openvino/core/strides.hpp" -namespace { - -using namespace ngraph; +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/convolution.hpp" +#include "openvino/op/group_conv.hpp" +#include "openvino/op/multiply.hpp" +namespace { // Validate scenario where a single Constant has multiple users (like one constant is used for Convolution, ConvolutionBackpropData, Multiply, etc.) -class SharedConstant : virtual public LayerTestsUtils::LayerTestsCommon { +class SharedConstant : virtual public ov::test::SubgraphBaseStaticTest { protected: void SetUp() override { targetDevice = ov::test::utils::DEVICE_GPU; - auto type = element::f32; - Shape constShape{4, 1, 3, 3}; - Shape convInputShape{1, 1, 5, 5}; - Shape convBackpropInputShape{1, 4, 5, 5}; - Shape constGroupConvBackpropShape{2, 2, 3, 3, 3}; - auto constant = opset8::Constant::create(type, constShape, {1}); - auto input1 = std::make_shared(type, convInputShape); - auto conv = std::make_shared(input1, constant, Strides{1, 1}, CoordinateDiff{0, 0}, CoordinateDiff{0, 0}, Strides{1, 1}); - auto input2 = std::make_shared(type, convBackpropInputShape); - auto convBprop = std::make_shared(input2, constant, Strides{1, 1}, - CoordinateDiff{0, 0}, CoordinateDiff{0, 0}, Strides{1, 1}); - auto input3 = std::make_shared(type, convBackpropInputShape); - auto constantGroupConv = opset8::Constant::create(type, constGroupConvBackpropShape, {1}); - auto groupConvBprop = std::make_shared(input3, constantGroupConv, Strides{1, 1}, - CoordinateDiff{0, 0}, CoordinateDiff{0, 0}, Strides{1, 1}); - auto input4 = std::make_shared(type, constShape); - auto mul = std::make_shared(input4, constant); - auto input5 = std::make_shared(type, constGroupConvBackpropShape); - auto mul2 = std::make_shared(input5, constantGroupConv); + auto type = ov::element::f32; + ov::Shape constShape{4, 1, 3, 3}; + ov::Shape convInputShape{1, 1, 5, 5}; + ov::Shape convBackpropInputShape{1, 4, 5, 5}; + ov::Shape constGroupConvBackpropShape{2, 2, 3, 3, 3}; + auto constant = ov::op::v0::Constant::create(type, constShape, {1}); + auto input1 = std::make_shared(type, convInputShape); + auto conv = std::make_shared( + input1, constant, ov::Strides{1, 1}, ov::CoordinateDiff{0, 0}, ov::CoordinateDiff{0, 0}, ov::Strides{1, 1}); + auto input2 = std::make_shared(type, convBackpropInputShape); + auto convBprop = std::make_shared(input2, constant, ov::Strides{1, 1}, + ov::CoordinateDiff{0, 0}, ov::CoordinateDiff{0, 0}, ov::Strides{1, 1}); + auto input3 = std::make_shared(type, convBackpropInputShape); + auto constantGroupConv = ov::op::v0::Constant::create(type, constGroupConvBackpropShape, {1}); + auto groupConvBprop = std::make_shared(input3, constantGroupConv, ov::Strides{1, 1}, + ov::CoordinateDiff{0, 0}, ov::CoordinateDiff{0, 0}, ov::Strides{1, 1}); + auto input4 = std::make_shared(type, constShape); + auto mul = std::make_shared(input4, constant); + auto input5 = std::make_shared(type, constGroupConvBackpropShape); + auto mul2 = std::make_shared(input5, constantGroupConv); // explicitly set the output name, to avoid global conflict mul2->set_friendly_name("Multiply_0"); mul->set_friendly_name("Multiply_1"); - function = std::make_shared(NodeVector{convBprop, conv, groupConvBprop, mul2, mul}, - ParameterVector{input1, input2, input3, input4, input5}); + function = std::make_shared(ov::NodeVector{convBprop, conv, groupConvBprop, mul2, mul}, + ov::ParameterVector{input1, input2, input3, input4, input5}); } }; -TEST_F(SharedConstant, smoke_SharedConstant) { - Run(); +TEST_F(SharedConstant, Inference) { + run(); } - } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/tensor_iterator.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/tensor_iterator.cpp index 628e26320f4087..dff05c614bb4fb 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/tensor_iterator.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/tensor_iterator.cpp @@ -2,66 +2,55 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include "ov_models/utils/ov_helpers.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "common_test_utils/test_constants.hpp" -#include "shared_test_classes/base/utils/ranges.hpp" -#include -#include "shared_test_classes/base/utils/compare_results.hpp" -#include "openvino/pass/constant_folding.hpp" -#include #include "shared_test_classes/base/utils/generate_inputs.hpp" -using namespace InferenceEngine; -using namespace ov::test; - -namespace GPULayerTestsDefinitions { +#include "openvino/op/parameter.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/tensor_iterator.hpp" +namespace { +using ov::test::InputShape; /* * Generate TensorIterator with LSTMCell -* @param ngPrc precision of model +* @param model_type precision of model * @param initShape initial shape {N, L(sequence length), I} * @param N batch size * @param I input size * @param H hidden layer */ -static std::shared_ptr makeTIwithLSTMcell(ov::element::Type_t ngPRC, ov::PartialShape initShape, +static std::shared_ptr makeTIwithLSTMcell(ov::element::Type_t model_type, ov::PartialShape initShape, size_t N, size_t I, size_t H, size_t sequence_axis, - ngraph::op::RecurrentSequenceDirection seq_direction) { - auto SENT = std::make_shared(ngPRC, initShape); + ov::op::RecurrentSequenceDirection seq_direction) { + auto SENT = std::make_shared(model_type, initShape); SENT->set_friendly_name("SENT"); // initial_hidden_state - auto H_init = std::make_shared(ngPRC, ov::Shape{N, 1, H}); + auto H_init = std::make_shared(model_type, ov::Shape{N, 1, H}); H_init->set_friendly_name("H_init"); // initial_cell_state - auto C_init = std::make_shared(ngPRC, ov::Shape{N, 1, H}); + auto C_init = std::make_shared(model_type, ov::Shape{N, 1, H}); C_init->set_friendly_name("C_init"); - auto H_t = std::make_shared(ngPRC, ov::Shape{N, 1, H}); + auto H_t = std::make_shared(model_type, ov::Shape{N, 1, H}); H_t->set_friendly_name("H_t"); - auto C_t = std::make_shared(ngPRC, ov::Shape{N, 1, H}); + auto C_t = std::make_shared(model_type, ov::Shape{N, 1, H}); C_t->set_friendly_name("C_t"); // Body // input data - auto X = std::make_shared(ngPRC, ov::Shape{N, 1, I}); + auto X = std::make_shared(model_type, ov::Shape{N, 1, I}); X->set_friendly_name("X"); // the weights for matrix multiplication, gate order: fico std::vector dataW(4 * H * I, 0); - auto W_body = std::make_shared(ngPRC, ov::Shape{4 * H, I}, dataW); + auto W_body = std::make_shared(model_type, ov::Shape{4 * H, I}, dataW); W_body->set_friendly_name("W_body"); // the recurrence weights for matrix multiplication, gate order: fico std::vector dataR(4 * H * H, 0); - auto R_body = std::make_shared(ngPRC, ov::Shape{4 * H, H}, dataR); + auto R_body = std::make_shared(model_type, ov::Shape{4 * H, H}, dataR); R_body->set_friendly_name("R_body"); std::vector inShape = {N, H}; @@ -100,9 +89,9 @@ static std::shared_ptr makeTIwithLSTMcell(ov::element::Type_t ngPRC, tensor_iterator->set_merged_input(C_t, C_init, C_o); // Set PortMap - if (seq_direction == ngraph::op::RecurrentSequenceDirection::FORWARD) { + if (seq_direction == ov::op::RecurrentSequenceDirection::FORWARD) { tensor_iterator->set_sliced_input(X, SENT, 0, 1, 1, -1, sequence_axis); - } else if (seq_direction == ngraph::op::RecurrentSequenceDirection::REVERSE) { + } else if (seq_direction == ov::op::RecurrentSequenceDirection::REVERSE) { tensor_iterator->set_sliced_input(X, SENT, -1, -1, 1, 0, sequence_axis); } else { OPENVINO_THROW("Bidirectional case is not supported."); @@ -115,25 +104,25 @@ static std::shared_ptr makeTIwithLSTMcell(ov::element::Type_t ngPRC, auto results = ov::ResultVector{std::make_shared(out0), std::make_shared(out1)}; - auto fn_ptr = std::make_shared(results, ov::ParameterVector{SENT, H_init, C_init}); - fn_ptr->set_friendly_name("TIwithLSTMcell"); - return fn_ptr; + auto model = std::make_shared(results, ov::ParameterVector{SENT, H_init, C_init}); + model->set_friendly_name("TIwithLSTMcell"); + return model; } /* * Generate LSTMSequence -* @param ngPrc precision of model +* @param model_type precision of model * @param initShape initial shape {N, L(sequence length), I} * @param N batch size * @param I input size * @param H hidden layer */ -static std::shared_ptr makeLSTMSequence(ov::element::Type_t ngPRC, ov::PartialShape initShape, +static std::shared_ptr makeLSTMSequence(ov::element::Type_t model_type, ov::PartialShape initShape, size_t N, size_t I, size_t H, size_t sequence_axis, - ngraph::op::RecurrentSequenceDirection seq_direction) { - auto X = std::make_shared(ngPRC, initShape); - auto Y = std::make_shared(ngPRC, ov::Shape{N, 1, H}); - auto Z = std::make_shared(ngPRC, ov::Shape{N, 1, H}); + ov::op::RecurrentSequenceDirection seq_direction) { + auto X = std::make_shared(model_type, initShape); + auto Y = std::make_shared(model_type, ov::Shape{N, 1, H}); + auto Z = std::make_shared(model_type, ov::Shape{N, 1, H}); auto shape_of = std::make_shared(X); auto indices = ov::op::v0::Constant::create(ov::element::i32, {1}, {1}); auto axis = ov::op::v0::Constant::create(ov::element::i32, {}, {0}); @@ -142,9 +131,9 @@ static std::shared_ptr makeLSTMSequence(ov::element::Type_t ngPRC, ov auto w_val = std::vector(4 * H * I, 0); auto r_val = std::vector(4 * H * H, 0); auto b_val = std::vector(4 * H, 0); - auto W = ov::op::v0::Constant::create(ngPRC, ov::Shape{N, 4 * H, I}, w_val); - auto R = ov::op::v0::Constant::create(ngPRC, ov::Shape{N, 4 * H, H}, r_val); - auto B = ov::op::v0::Constant::create(ngPRC, ov::Shape{N, 4 * H}, b_val); + auto W = ov::op::v0::Constant::create(model_type, ov::Shape{N, 4 * H, I}, w_val); + auto R = ov::op::v0::Constant::create(model_type, ov::Shape{N, 4 * H, H}, r_val); + auto B = ov::op::v0::Constant::create(model_type, ov::Shape{N, 4 * H}, b_val); auto rnn_sequence = std::make_shared(X, Y, @@ -176,33 +165,29 @@ using DynamicTensorIteratorParams = typename std::tuple< LSTMType, // LSTM type (LSTMCell, LSTMSequence) InputShape, // input shapes (N[batch], L[seq_length], I[input_size]) int32_t, // hidden size - ngraph::op::RecurrentSequenceDirection, // sequence direction + ov::op::RecurrentSequenceDirection, // sequence direction std::string, // device name - InferenceEngine::Precision, // precision - ov::AnyMap // configuration - >; + ov::element::Type>; // type /** * Test case with Dynamic SHAPE version of loop operation. * Total iteration count is dynamic. */ class DynamicTensorIteratorTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest { + virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo &obj) { LSTMType type; InputShape data_shapes; int32_t hidden_size; - ngraph::op::RecurrentSequenceDirection seq_direction; + ov::op::RecurrentSequenceDirection seq_direction; std::string target_device; - InferenceEngine::Precision data_precision; - ov::Any configuration; + ov::element::Type model_type; std::tie(type, data_shapes, hidden_size, seq_direction, target_device, - data_precision, - configuration) = obj.param; + model_type) = obj.param; std::ostringstream result; result << "TestType=" << (type == LSTMType::LSTMCell? "LSTMCell" : "LSTMSequence") << "_"; result << "IS=("; @@ -211,15 +196,15 @@ class DynamicTensorIteratorTest : public testing::WithParamInterface(init_shape[0].get_length()); input_size = static_cast(init_shape[init_shape.size()-1].get_length()); if (type == LSTMType::LSTMCell) - function = makeTIwithLSTMcell(ngPrc, init_shape, batch_size, input_size, hidden_size, sequence_axis, seq_direction); + function = makeTIwithLSTMcell(model_type, init_shape, batch_size, input_size, hidden_size, sequence_axis, seq_direction); else - function = makeLSTMSequence(ngPrc, init_shape, batch_size, input_size, hidden_size, sequence_axis, seq_direction); + function = makeLSTMSequence(model_type, init_shape, batch_size, input_size, hidden_size, sequence_axis, seq_direction); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); ov::Shape default_shape{batch_size, 1, hidden_size}; auto inputMap = ov::test::utils::getInputMap(); @@ -283,8 +260,7 @@ class DynamicTensorIteratorTest : public testing::WithParamInterface hidden_sizes = { 128 }; -ov::AnyMap net_configuration = { - {GPUConfigParams::KEY_GPU_ENABLE_LOOP_UNROLLING, PluginConfigParams::NO} -}; - -std::vector net_precision = { - InferenceEngine::Precision::FP32, +std::vector model_types = { + ov::element::f32, }; -std::vector reccurent_sequence_direction = { - ngraph::op::RecurrentSequenceDirection::FORWARD, - ngraph::op::RecurrentSequenceDirection::REVERSE, +std::vector reccurent_sequence_direction = { + ov::op::RecurrentSequenceDirection::FORWARD, + ov::op::RecurrentSequenceDirection::REVERSE, }; INSTANTIATE_TEST_SUITE_P(smoke_DynamicTensorIterator_LSTMCell, DynamicTensorIteratorTest, @@ -320,8 +292,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_DynamicTensorIterator_LSTMCell, DynamicTensorIter /* hidden_size */ testing::ValuesIn(hidden_sizes), /* direction */ testing::ValuesIn(reccurent_sequence_direction), /* device */ testing::Values(ov::test::utils::DEVICE_GPU), - /* data_prc */ testing::ValuesIn(net_precision), - /* configuration */ testing::Values(net_configuration)), + /* model_type */ testing::ValuesIn(model_types)), DynamicTensorIteratorTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_DynamicTensorIterator_LSTMSequence, DynamicTensorIteratorTest, @@ -331,7 +302,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_DynamicTensorIterator_LSTMSequence, DynamicTensor /* hidden_size */ testing::ValuesIn(hidden_sizes), /* direction */ testing::ValuesIn(reccurent_sequence_direction), /* device */ testing::Values(ov::test::utils::DEVICE_GPU), - /* data_prc */ testing::ValuesIn(net_precision), - /* configuration */ testing::Values(net_configuration)), + /* model_type */ testing::ValuesIn(model_types)), DynamicTensorIteratorTest::getTestCaseName); -} // namespace GPULayerTestsDefinitions +} // namespace diff --git a/src/plugins/intel_gpu/tests/unit/fake_alignment/fc_fake_alignment_test.cpp b/src/plugins/intel_gpu/tests/unit/fake_alignment/fc_fake_alignment_test.cpp index 5fbcea60b3da49..56d5e59076f99a 100644 --- a/src/plugins/intel_gpu/tests/unit/fake_alignment/fc_fake_alignment_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/fake_alignment/fc_fake_alignment_test.cpp @@ -38,9 +38,10 @@ TEST_P(fully_connected_fake_align_test, fake_alignment) { auto& engine = get_test_engine(); + auto input_size = p.input_layout.get_partial_shape().size(); auto input_layout_prim = std::make_shared("input", p.input_layout); auto weight_layout_prim = std::make_shared("weight", p.weight_layout); - auto fully_connected_prim = std::make_shared("output", input_info("input"), "weight", "", p.data_type); + auto fully_connected_prim = std::make_shared("output", input_info("input"), "weight", "", p.data_type, padding(), input_size); cldnn::program prog(engine); @@ -106,7 +107,51 @@ INSTANTIATE_TEST_SUITE_P(smoke, fully_connected_fake_align_test, layout{ov::PartialShape{-1, -1}, data_types::i8, format::bfyx}, // fake_aligned input layout_dgpu // dummy layout{ov::PartialShape{-1, -1}, data_types::f16, format::bfyx} // fake_aligned output layout_dgpu // dummy }, - + { + layout{ov::PartialShape{1, 55, 511}, data_types::f16, format::bfyx}, // input_layout + layout{ov::PartialShape{800, 511}, data_types::f16, format::bfyx}, // weight layout + data_types::f16, + layout{ov::PartialShape{64, 1, 511}, data_types::f16, format::bfyx}, // fake_aligned input layout_igpu + layout{ov::PartialShape{64, 1, 800}, data_types::f16, format::bfyx}, // fake_aligned output layout_igpu + layout{ov::PartialShape{56, 1, 511}, data_types::f16, format::bfyx}, // fake_aligned input layout_dgpu + layout{ov::PartialShape{56, 1, 800}, data_types::f16, format::bfyx} // fake_aligned output layout_dgpu + }, + { + layout{ov::PartialShape{2, 55, 511}, data_types::f16, format::bfyx}, // input_layout + layout{ov::PartialShape{800, 511}, data_types::f16, format::bfyx}, // weight layout + data_types::f16, + layout{ov::PartialShape{112, 1, 511}, data_types::f16, format::bfyx}, // fake_aligned input layout_igpu + layout{ov::PartialShape{112, 1, 800}, data_types::f16, format::bfyx}, // fake_aligned output layout_igpu + layout{ov::PartialShape{112, 1, 511}, data_types::f16, format::bfyx}, // fake_aligned input layout_dgpu + layout{ov::PartialShape{112, 1, 800}, data_types::f16, format::bfyx} // fake_aligned output layout_dgpu + }, + { + layout{ov::PartialShape{55, 1, 511}, data_types::f16, format::bfyx}, // input_layout + layout{ov::PartialShape{800, 511}, data_types::f16, format::bfyx}, // weight layout + data_types::f16, + layout{ov::PartialShape{64, 1, 511}, data_types::f16, format::bfyx}, // fake_aligned input layout_igpu + layout{ov::PartialShape{64, 1, 800}, data_types::f16, format::bfyx}, // fake_aligned output layout_igpu + layout{ov::PartialShape{56, 1, 511}, data_types::f16, format::bfyx}, // fake_aligned input layout_dgpu + layout{ov::PartialShape{56, 1, 800}, data_types::f16, format::bfyx} // fake_aligned output layout_dgpu + }, + { + layout{ov::PartialShape{55, 1, 511}, data_types::f16, format::bfyx, padding{{2,0,1,0}, 0}}, // input_layout + layout{ov::PartialShape{800, 511}, data_types::f16, format::bfyx}, // weight layout + data_types::f16, + layout{ov::PartialShape{64, 1, 511}, data_types::f16, format::bfyx, padding{{2,0,1,0}, 0}}, // fake_aligned input layout_igpu + layout{ov::PartialShape{64, 1, 800}, data_types::f16, format::bfyx}, // fake_aligned output layout_igpu + layout{ov::PartialShape{56, 1, 511}, data_types::f16, format::bfyx, padding{{2,0,1,0}, 0}}, // fake_aligned input layout_dgpu + layout{ov::PartialShape{56, 1, 800}, data_types::f16, format::bfyx} // fake_aligned output layout_dgpu + }, + { + layout{ov::PartialShape{55, 1, 511}, data_types::f16, format::bfyx, padding{{0,1,1,0}, 0}}, // input_layout + layout{ov::PartialShape{800, 511}, data_types::f16, format::bfyx}, // weight layout + data_types::f16, + layout{ov::PartialShape{55, 1, 511}, data_types::f16, format::bfyx, padding{{0,1,1,0}, 0}}, // fake_aligned input layout_igpu + layout{ov::PartialShape{55, 1, 800}, data_types::f16, format::bfyx}, // fake_aligned output layout_igpu + layout{ov::PartialShape{55, 1, 511}, data_types::f16, format::bfyx, padding{{0,1,1,0}, 0}}, // fake_aligned input layout_dgpu + layout{ov::PartialShape{55, 1, 800}, data_types::f16, format::bfyx} // fake_aligned output layout_dgpu + }, })); } // fake_alignment_tests diff --git a/src/plugins/intel_gpu/tests/unit/fusions/fully_connected_fusion_test.cpp b/src/plugins/intel_gpu/tests/unit/fusions/fully_connected_fusion_test.cpp index bcd5b096951def..76ce7077e76565 100644 --- a/src/plugins/intel_gpu/tests/unit/fusions/fully_connected_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/fusions/fully_connected_fusion_test.cpp @@ -71,6 +71,17 @@ class FullyConnectedFusingTest : public ::BaseFusingTest{ + fully_connected_test_params{ CASE_FC_FP16_INT4_COMP_1, 2, 3 }, +})); + class fc_int8_eltwise : public FullyConnectedFusingTest {}; TEST_P(fc_int8_eltwise, basic) { auto p = GetParam(); diff --git a/src/plugins/intel_gpu/tests/unit/fusions/fusion_test_common.hpp b/src/plugins/intel_gpu/tests/unit/fusions/fusion_test_common.hpp index 06c6f7aac1c6a1..50eace0e091bf3 100644 --- a/src/plugins/intel_gpu/tests/unit/fusions/fusion_test_common.hpp +++ b/src/plugins/intel_gpu/tests/unit/fusions/fusion_test_common.hpp @@ -117,6 +117,9 @@ class BaseFusingTest : public ::testing::TestWithParam { if (l.data_type == data_types::i8 || l.data_type == data_types::u8) { VF rnd_vec = rg.generate_random_1d(s.count(), min_random, max_random); set_values(prim, rnd_vec); + } else if (l.data_type == data_types::i4 || l.data_type == data_types::u4) { + VF rnd_vec = rg.generate_random_1d(l.bytes_count(), min_random, max_random); + set_values(prim, rnd_vec); } else if (l.data_type == data_types::f16) { VF rnd_vec = rg.generate_random_1d(s.count(), -1, 1); set_values(prim, rnd_vec); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/hash_key_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/hash_key_gpu_test.cpp index 17447be4266141..b2871745b0bab3 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/hash_key_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/hash_key_gpu_test.cpp @@ -50,11 +50,11 @@ class check_hash_value: public ::testing::Test { void test_fc_basic(bool is_caching_test) { auto& engine = get_test_engine(); - const int32_t b = 1, in_f = 128, in_x = 1, in_y = 1, out_f = 65; + const int32_t b = 1, in_f = 128, out_f = 65; - auto input_prim = engine.allocate_memory({ { b, in_f, in_y, in_x }, data_types::f32, format::bfyx }); - auto weights_prim = engine.allocate_memory({ { out_f, in_f, in_y, in_x }, data_types::f32, format::bfyx }); - auto bias_prim = engine.allocate_memory({ { 1, 1, out_f, 1 }, data_types::f32, format::bfyx }); + auto input_prim = engine.allocate_memory({ { b, in_f }, data_types::f32, format::bfyx }); + auto weights_prim = engine.allocate_memory({ { out_f, in_f }, data_types::f32, format::bfyx }); + auto bias_prim = engine.allocate_memory({ { out_f }, data_types::f32, format::bfyx }); const auto key_prim_id = "fc"; topology topology( @@ -72,10 +72,10 @@ class check_hash_value: public ::testing::Test { const auto params_hash = primitve->type->get_fake_aligned_params(*prim_inst->get_impl_params()).hash(); if (!engine.get_device_info().supports_immad) { ASSERT_EQ(primitive_hash, 14259723886449306729UL); - ASSERT_EQ(params_hash, 1637150664489130388UL); + ASSERT_EQ(params_hash, 3365957578641948513UL); } else { ASSERT_EQ(primitive_hash, 14259723886449306729UL); - ASSERT_EQ(params_hash, 6343702278017463925UL); + ASSERT_EQ(params_hash, 9831190959346679696UL); } } diff --git a/src/plugins/proxy/src/plugin.cpp b/src/plugins/proxy/src/plugin.cpp index 3a7ce0170b008a..180bb4d1dba26e 100644 --- a/src/plugins/proxy/src/plugin.cpp +++ b/src/plugins/proxy/src/plugin.cpp @@ -394,6 +394,17 @@ std::shared_ptr ov::proxy::Plugin::compile_model(const std:: return std::make_shared(device_model, plugin, remote_context); } +std::shared_ptr ov::proxy::Plugin::compile_model(const std::string& model_path, + const ov::AnyMap& properties) const { + auto dev_name = get_fallback_device(get_device_from_config(properties)); + auto device_config = construct_device_config(dev_name, m_configs, properties); + std::shared_ptr plugin = shared_from_this(); + + auto device_model = get_core()->compile_model(model_path, dev_name, device_config); + auto remote_context = create_proxy_context(device_model, properties); + return std::make_shared(device_model, plugin, remote_context); +} + std::shared_ptr ov::proxy::Plugin::compile_model( const std::shared_ptr& model, const ov::AnyMap& properties, diff --git a/src/plugins/proxy/src/plugin.hpp b/src/plugins/proxy/src/plugin.hpp index 10abf1e00a81da..b9429f94d35974 100644 --- a/src/plugins/proxy/src/plugin.hpp +++ b/src/plugins/proxy/src/plugin.hpp @@ -25,6 +25,9 @@ class Plugin : public ov::IPlugin { std::shared_ptr compile_model(const std::shared_ptr& model, const ov::AnyMap& properties) const override; + std::shared_ptr compile_model(const std::string& model_path, + const ov::AnyMap& properties) const override; + std::shared_ptr compile_model(const std::shared_ptr& model, const ov::AnyMap& properties, const ov::SoPtr& context) const override; diff --git a/src/plugins/proxy/tests/batch_compliance_test.cpp b/src/plugins/proxy/tests/batch_compliance_test.cpp index 73cf894291f6cd..f613bde1559de1 100644 --- a/src/plugins/proxy/tests/batch_compliance_test.cpp +++ b/src/plugins/proxy/tests/batch_compliance_test.cpp @@ -9,7 +9,7 @@ using namespace ov::proxy::tests; TEST_F(ProxyTests, can_parse_and_inherit_batch_property) { - register_plugin_support_reshape(core, "MOCK_DEVICE", {{ov::proxy::configuration::alias.name(), "ALIAS_MOCK"}}); + register_plugin_support_reshape(core, "MOCK_DEVICE", {{ov::proxy::configuration::alias.name(), "MOCK_DEVICE"}}); auto available_devices = core.get_available_devices(); auto model = create_model_with_add(); auto compiled_model_default = core.compile_model(model, "MOCK_DEVICE", ov::hint::performance_mode("THROUGHPUT")); @@ -31,4 +31,29 @@ TEST_F(ProxyTests, can_parse_and_inherit_batch_property) { ov::hint::performance_mode("THROUGHPUT"), ov::hint::allow_auto_batching(false)); EXPECT_ANY_THROW(compiled_model_no_batch.get_property(ov::auto_batch_timeout)); +} + +TEST_F(ProxyTests, can_parse_and_inherit_batch_property_for_device_name_with_id) { + register_plugin_support_reshape(core, "MOCK_DEVICE", {{ov::proxy::configuration::alias.name(), "MOCK_DEVICE"}}); + auto available_devices = core.get_available_devices(); + auto model = create_model_with_add(); + auto compiled_model_default = core.compile_model(model, "MOCK_DEVICE.1", ov::hint::performance_mode("THROUGHPUT")); +#ifdef ENABLE_AUTO_BATCH + EXPECT_NO_THROW(compiled_model_default.get_property(ov::auto_batch_timeout)); // batch enabled by default + EXPECT_EQ(compiled_model_default.get_property(ov::auto_batch_timeout), 1000); // default value +#endif + auto compiled_model_with_batch = core.compile_model(model, + "MOCK_DEVICE.1", + ov::hint::performance_mode("THROUGHPUT"), + ov::hint::allow_auto_batching(true), + ov::auto_batch_timeout(8)); +#ifdef ENABLE_AUTO_BATCH + EXPECT_NO_THROW(compiled_model_with_batch.get_property(ov::auto_batch_timeout)); + EXPECT_EQ(compiled_model_with_batch.get_property(ov::auto_batch_timeout), 8); +#endif + auto compiled_model_no_batch = core.compile_model(model, + "MOCK_DEVICE.2", + ov::hint::performance_mode("THROUGHPUT"), + ov::hint::allow_auto_batching(false)); + EXPECT_ANY_THROW(compiled_model_no_batch.get_property(ov::auto_batch_timeout)); } \ No newline at end of file diff --git a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index 1ae1cb74130ec4..318ca494d1b850 100644 --- a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -6,6 +6,8 @@ #include +#include "common_test_utils/subgraph_builders/split_conv_concat.hpp" + using namespace ov::test::behavior; namespace { @@ -17,7 +19,7 @@ const std::vector HeteroConfigs = {{ov::device::priorities(ov::test: INSTANTIATE_TEST_SUITE_P( smoke_BehaviorTests, OVInferRequestDynamicTests, - ::testing::Combine(::testing::Values(ngraph::builder::subgraph::makeSplitConvConcat()), + ::testing::Combine(::testing::Values(ov::test::utils::make_split_conv_concat()), ::testing::Values(std::vector, std::vector>>{ {{1, 4, 20, 20}, {1, 10, 18, 18}}, {{2, 4, 20, 20}, {2, 10, 18, 18}}}), @@ -28,7 +30,7 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( smoke_Hetero_BehaviorTests, OVInferRequestDynamicTests, - ::testing::Combine(::testing::Values(ngraph::builder::subgraph::makeSplitConvConcat()), + ::testing::Combine(::testing::Values(ov::test::utils::make_split_conv_concat()), ::testing::Values(std::vector, std::vector>>{ {{1, 4, 20, 20}, {1, 10, 18, 18}}, {{2, 4, 20, 20}, {2, 10, 18, 18}}}), diff --git a/src/plugins/template/tests/functional/shared_tests_instances/behavior/plugin/synthetic.cpp b/src/plugins/template/tests/functional/shared_tests_instances/behavior/plugin/synthetic.cpp index c81cdb24b53ce2..73d6c7b286d043 100644 --- a/src/plugins/template/tests/functional/shared_tests_instances/behavior/plugin/synthetic.cpp +++ b/src/plugins/template/tests/functional/shared_tests_instances/behavior/plugin/synthetic.cpp @@ -5,8 +5,9 @@ #include #include "behavior/plugin/hetero_synthetic.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu_non_zero.hpp" #include "ov_models/builders.hpp" -#include "ov_models/subgraph_builders.hpp" namespace { using namespace HeteroTests; @@ -21,7 +22,7 @@ INSTANTIATE_TEST_SUITE_P( {"TEMPLATE1", "openvino_template_plugin"}}), ::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::withMajorNodesFunctions( [] { - return ngraph::builder::subgraph::makeConvPool2Relu2(); + return ov::test::utils::make_conv_pool2_relu2(); }, {"Conv_1"}, true))), @@ -45,7 +46,7 @@ INSTANTIATE_TEST_SUITE_P( static std::vector()>> dynamicBuilders = { [] { - return ngraph::builder::subgraph::makeConvPoolReluNonZero(); + return ov::test::utils::make_conv_pool_relu_non_zero(); }, }; diff --git a/src/tests/functional/plugin/conformance/test_runner/README.md b/src/tests/functional/plugin/conformance/test_runner/README.md index 12701707469341..f3b770a1f742ec 100644 --- a/src/tests/functional/plugin/conformance/test_runner/README.md +++ b/src/tests/functional/plugin/conformance/test_runner/README.md @@ -86,6 +86,8 @@ The script has the following optional arguments: NOTE: Applicable only for Opset Conformance. * `sm SPECIAL_MODE, --special_mode SPECIAL_MODE` Specify shape mode (`static`, `dynamic` or ``) for Opset conformance or API scope type (`mandatory` or ``). Default value is `` +* `-e ENTITY, --entity ENTITY` + Specify validation entity: `Inference`, `ImportExport` or `QueryModel` for `OP` or `ov`. Default value is `ov_compiled_model`, `ov_infer_request` or `ov_plugin` for `API`. Default value is ``(all) * `p PARALLEL_DEVICES, --parallel_devices PARALLEL_DEVICES` Parallel over HW devices. For example run tests over `GPU.0` and `GPU.1` in case when device are the same * `f EXPECTED_FAILURES, --expected_failures EXPECTED_FAILURES` diff --git a/src/tests/functional/plugin/conformance/test_runner/conformance_infra/src/main.cpp b/src/tests/functional/plugin/conformance/test_runner/conformance_infra/src/main.cpp index 95b29fb8779938..420ea648aa7056 100644 --- a/src/tests/functional/plugin/conformance/test_runner/conformance_infra/src/main.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/conformance_infra/src/main.cpp @@ -22,7 +22,7 @@ void RegisterTestCustomQueries(void) { std::map& extTestQueries = *::PostgreSQLLink::get_ext_test_queries(); std::map& extTestNames = *::PostgreSQLLink::get_ext_test_names(); - std::string testName("checkPluginImplementationCompileModel"); + std::string testName("checkPluginImplementation"); extTestQueries[testName + "_ON_START"] = "OpImplCheck_CheckPluginImpl($__test_id, '$opName', '$opSet', " "'$targetDevice', '$targetDeviceArch', '$targetDeviceName', '$config', $__is_temp)"; diff --git a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/include/utils/models.hpp b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/include/utils/models.hpp index 7c259c8960c191..a33c9bafdf6c2d 100644 --- a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/include/utils/models.hpp +++ b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/include/utils/models.hpp @@ -55,6 +55,12 @@ get_model_paths(const std::vector& conformance_ir_paths, //Save it in a list, first value - path, second - amout of tests with this path for (auto& val : tmp_buf) { bool is_op = false; +#ifdef _WIN32 + for (auto it = val.begin(); it != val.end(); ++it) { + if (*it == '/') + val.replace(it, it + 1, ov::test::utils::FileSeparator); + } +#endif for (const auto& path_item : ov::test::utils::splitStringByDelimiter(val, ov::test::utils::FileSeparator)) { auto tmp_path_item = path_item; auto pos = tmp_path_item.find('-'); diff --git a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp index a90e4393b45d5d..5704638abe3094 100644 --- a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp @@ -74,9 +74,9 @@ std::string ReadIRTest::getTestCaseName(const testing::TestParamInfoget_default_output().get_node_shared_ptr(); auto next_node = param->get_default_output().get_target_inputs().begin()->get_node()->shared_from_this(); auto it = inputMap.find(next_node->get_type_info()); @@ -148,7 +148,7 @@ void ReadIRTest::SetUp() { const_node->set_friendly_name(param->get_friendly_name()); ov::replace_node(param, const_node); parameter_to_remove.push_back(param); - utils::ConstRanges::reset(); + ov::test::utils::reset_const_ranges(); } for (const auto& param : parameter_to_remove) { function->remove_parameter(param); @@ -192,20 +192,23 @@ void ReadIRTest::SetUp() { // Try to resolve missing info if (splittedFilename.size() > 2) { auto pos = splittedFilename[2].find('-'); - std::string op_name = "", op_version = "opset"; + std::string op_name = "", op_version = ""; if (pos != std::string::npos) { op_name = splittedFilename[2].substr(0, pos); - op_version += splittedFilename[2].substr(pos + 1); - if (ov::test::op_conformance::unique_ops.find(op_name) != ov::test::op_conformance::unique_ops.end() && - std::find(ov::test::op_conformance::unique_ops[op_name].begin(), - ov::test::op_conformance::unique_ops[op_name].end(), - op_version) != ov::test::op_conformance::unique_ops[op_name].end()) { + op_version = splittedFilename[2].substr(pos + 1); + if (unique_ops.find(op_name) != unique_ops.end() && + std::find(unique_ops[op_name].begin(), + unique_ops[op_name].end(), + op_version) != unique_ops[op_name].end()) { pgLink->set_custom_field("opName", op_name, true); pgLink->set_custom_field("opSet", op_version, true); } + } else if (splittedFilename.size() > 3 && splittedFilename[3] == "subgraph") { + pgLink->set_custom_field("opName", splittedFilename[1], true); + pgLink->set_custom_field("opSet", "subgraph", true); } else { for (const auto& path_part : splittedFilename) { - if (ov::test::op_conformance::unique_ops.find(path_part) != ov::test::op_conformance::unique_ops.end()) { + if (unique_ops.find(path_part) != unique_ops.end()) { op_name = path_part; break; } diff --git a/src/tests/functional/plugin/shared/include/base/behavior_test_utils.hpp b/src/tests/functional/plugin/shared/include/base/behavior_test_utils.hpp index 6fb1ca5e3d0a55..95311b492f3f59 100644 --- a/src/tests/functional/plugin/shared/include/base/behavior_test_utils.hpp +++ b/src/tests/functional/plugin/shared/include/base/behavior_test_utils.hpp @@ -10,6 +10,7 @@ #include "common_test_utils/file_utils.hpp" #include "openvino/util/file_util.hpp" #include "functional_test_utils/summary/api_summary.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" namespace BehaviorTestsUtils { @@ -174,7 +175,7 @@ class BehaviorTestsBasic : public BehaviorTestsBasicBase, std::tie(netPrecision, target_device, configuration) = this->GetParam(); SKIP_IF_CURRENT_TEST_IS_DISABLED() APIBaseTest::SetUp(); - function = ngraph::builder::subgraph::makeConvPoolRelu(); + function = ov::test::utils::make_conv_pool_relu(); } void TearDown() override { if (!configuration.empty()) { diff --git a/src/tests/functional/plugin/shared/include/base/multi/multi_helpers.hpp b/src/tests/functional/plugin/shared/include/base/multi/multi_helpers.hpp index 91d9398aa018ac..5bd3b7ffb7e6b9 100644 --- a/src/tests/functional/plugin/shared/include/base/multi/multi_helpers.hpp +++ b/src/tests/functional/plugin/shared/include/base/multi/multi_helpers.hpp @@ -10,6 +10,7 @@ #include "common_test_utils/test_constants.hpp" #include "ov_models/subgraph_builders.hpp" #include "openvino/util/common_util.hpp" +#include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp" using namespace ::testing; @@ -31,7 +32,7 @@ class MultiDevice_Test : public ov::test::TestsCommon, public testing::WithParam std::vector deviceNameList; std::tie(deviceNameList, _properties) = this->GetParam(); device_names = getDeviceStringWithMulti(deviceNameList); - fn_ptr = ngraph::builder::subgraph::makeSplitMultiConvConcat(); + fn_ptr = ov::test::utils::make_split_multi_conv_concat(); } public: @@ -61,7 +62,7 @@ class MultiDevice_SupportTest : public ov::test::TestsCommon, public testing::Wi std::vector deviceNameList; std::tie(deviceNameList, expected_status, _properties) = this->GetParam(); device_names = getDeviceStringWithMulti(deviceNameList); - fn_ptr = ngraph::builder::subgraph::makeSplitMultiConvConcat(); + fn_ptr = ov::test::utils::make_split_multi_conv_concat(); } public: diff --git a/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp b/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp index c2af9af227ac01..6d311ac379b30c 100644 --- a/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp +++ b/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp @@ -27,6 +27,11 @@ #include "functional_test_utils/blob_utils.hpp" #include "functional_test_utils/summary/api_summary.hpp" #include "openvino/util/file_util.hpp" +#include "common_test_utils/subgraph_builders/split_conv_concat.hpp" +#include "common_test_utils/subgraph_builders/kso_func.hpp" +#include "common_test_utils/subgraph_builders/single_concat_with_constant.hpp" +#include "common_test_utils/subgraph_builders/concat_with_params.hpp" +#include "common_test_utils/subgraph_builders/split_concat.hpp" namespace ov { namespace test { @@ -34,7 +39,7 @@ namespace behavior { inline std::shared_ptr getDefaultNGraphFunctionForTheDevice(std::vector inputShape = {1, 2, 32, 32}, ov::element::Type_t ngPrc = ov::element::Type_t::f32) { - return ngraph::builder::subgraph::makeSplitConcat(inputShape, ngPrc); + return ov::test::utils::make_split_concat(inputShape, ngPrc); } inline bool sw_plugin_in_target_device(std::string targetDevice) { @@ -175,13 +180,13 @@ class OVClassNetworkTest { void SetUp() { SKIP_IF_CURRENT_TEST_IS_DISABLED(); // Generic network - actualNetwork = ngraph::builder::subgraph::makeSplitConcat(); + actualNetwork = ov::test::utils::make_split_concat(); // Quite simple network - simpleNetwork = ngraph::builder::subgraph::makeSingleConcatWithConstant(); + simpleNetwork = ov::test::utils::make_single_concat_with_constant(); // Multinput to substruct network - multinputNetwork = ngraph::builder::subgraph::makeConcatWithParams(); + multinputNetwork = ov::test::utils::make_concat_with_params(); // Network with KSO - ksoNetwork = ngraph::builder::subgraph::makeKSOFunction(); + ksoNetwork = ov::test::utils::make_kso_function(); } virtual void setHeteroNetworkAffinity(const std::string &targetDevice) { @@ -240,7 +245,7 @@ class OVClassSetDevicePriorityConfigPropsTest : public OVPluginTestBase, std::tie(target_device, configuration) = GetParam(); SKIP_IF_CURRENT_TEST_IS_DISABLED(); APIBaseTest::SetUp(); - actualNetwork = ngraph::builder::subgraph::makeSplitConvConcat(); + actualNetwork = ov::test::utils::make_split_conv_concat(); } }; diff --git a/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp b/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp index 866b908c6734e5..5ea0f5425274da 100644 --- a/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/compiled_model/compiled_model_base.hpp @@ -14,6 +14,12 @@ #include "functional_test_utils/plugin_cache.hpp" #include "openvino/op/concat.hpp" #include "openvino/runtime/tensor.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" +#include "common_test_utils/subgraph_builders/multiple_input_outpput_double_concat.hpp" +#include "common_test_utils/subgraph_builders/single_concat_with_constant.hpp" +#include "common_test_utils/subgraph_builders/concat_with_params.hpp" +#include "common_test_utils/subgraph_builders/single_split.hpp" +#include "common_test_utils/subgraph_builders/split_concat.hpp" namespace ov { namespace test { @@ -241,7 +247,7 @@ TEST_P(OVCompiledModelBaseTest, canCompileModelwithBrace) { TEST(OVCompiledModelBaseTest, canCompileModelToDefaultDevice) { std::shared_ptr core = utils::PluginCache::get().core(); - std::shared_ptr function = ngraph::builder::subgraph::makeSingleConcatWithConstant(); + std::shared_ptr function = ov::test::utils::make_single_concat_with_constant(); EXPECT_NO_THROW(auto execNet = core->compile_model(function)); } @@ -280,12 +286,12 @@ TEST_P(OVCompiledModelBaseTest, CanCreateTwoCompiledModelsAndCheckRuntimeModel) TEST_P(OVCompiledModelBaseTest, pluginDoesNotChangeOriginalNetwork) { // compare 2 networks - auto referenceNetwork = ngraph::builder::subgraph::makeConvPoolRelu(); + auto referenceNetwork = ov::test::utils::make_conv_pool_relu(); compare_functions(function, referenceNetwork); } TEST_P(OVCompiledModelBaseTest, CanSetInputPrecisionForNetwork) { - std::shared_ptr model = ngraph::builder::subgraph::makeSingleConcatWithConstant(); + std::shared_ptr model = ov::test::utils::make_single_concat_with_constant(); ov::Core core = createCoreWithTemplate(); auto ppp = ov::preprocess::PrePostProcessor(model); ov::preprocess::InputInfo& input = ppp.input(); @@ -296,7 +302,7 @@ TEST_P(OVCompiledModelBaseTest, CanSetInputPrecisionForNetwork) { } TEST_P(OVCompiledModelBaseTest, CanSetOutputPrecisionForNetwork) { - std::shared_ptr model = ngraph::builder::subgraph::makeSingleConcatWithConstant(); + std::shared_ptr model = ov::test::utils::make_single_concat_with_constant(); ov::Core core = createCoreWithTemplate(); auto ppp = ov::preprocess::PrePostProcessor(model); ov::preprocess::OutputInfo& output = ppp.output(); @@ -451,7 +457,7 @@ TEST_P(OVCompiledModelBaseTestOptional, CheckExecGraphInfoAfterExecution) { TEST_P(OVCompiledModelBaseTest, getInputFromFunctionWithSingleInput) { ov::CompiledModel execNet; - function = ngraph::builder::subgraph::makeSplitConcat(); + function = ov::test::utils::make_split_concat(); execNet = core->compile_model(function, target_device, configuration); EXPECT_EQ(function->inputs().size(), 1); @@ -465,7 +471,7 @@ TEST_P(OVCompiledModelBaseTest, getInputFromFunctionWithSingleInput) { TEST_P(OVCompiledModelBaseTest, getOutputFromFunctionWithSingleInput) { ov::CompiledModel execNet; - function = ngraph::builder::subgraph::makeSplitConcat(); + function = ov::test::utils::make_split_concat(); execNet = core->compile_model(function, target_device, configuration); EXPECT_EQ(function->outputs().size(), 1); @@ -479,7 +485,7 @@ TEST_P(OVCompiledModelBaseTest, getOutputFromFunctionWithSingleInput) { TEST_P(OVCompiledModelBaseTest, getInputsFromFunctionWithSeveralInputs) { ov::CompiledModel execNet; - function = ngraph::builder::subgraph::makeConcatWithParams(); + function = ov::test::utils::make_concat_with_params(); execNet = core->compile_model(function, target_device, configuration); EXPECT_EQ(function->inputs().size(), 2); @@ -500,7 +506,7 @@ TEST_P(OVCompiledModelBaseTest, getInputsFromFunctionWithSeveralInputs) { TEST_P(OVCompiledModelBaseTest, getOutputsFromFunctionWithSeveralOutputs) { ov::CompiledModel execNet; - function = ngraph::builder::subgraph::makeMultipleInputOutputDoubleConcat(); + function = ov::test::utils::make_multiple_input_output_double_concat(); execNet = core->compile_model(function, target_device, configuration); EXPECT_EQ(function->outputs().size(), 2); @@ -521,7 +527,7 @@ TEST_P(OVCompiledModelBaseTest, getOutputsFromFunctionWithSeveralOutputs) { TEST_P(OVCompiledModelBaseTest, getOutputsFromSplitFunctionWithSeveralOutputs) { ov::CompiledModel execNet; - function = ngraph::builder::subgraph::makeSingleSplit(); + function = ov::test::utils::make_single_split(); execNet = core->compile_model(function, target_device, configuration); EXPECT_EQ(function->outputs().size(), 2); diff --git a/src/tests/functional/plugin/shared/include/behavior/compiled_model/properties.hpp b/src/tests/functional/plugin/shared/include/behavior/compiled_model/properties.hpp index 7ae07a6ebcee6d..586195cf25a866 100644 --- a/src/tests/functional/plugin/shared/include/behavior/compiled_model/properties.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/compiled_model/properties.hpp @@ -11,6 +11,7 @@ #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/file_utils.hpp" #include "common_test_utils/unicode_utils.hpp" +#include "common_test_utils/subgraph_builders/single_conv.hpp" namespace ov { namespace test { @@ -118,7 +119,7 @@ class OVClassCompiledModelGetPropertyTest_Priority : public ::testing::WithParam std::tie(target_device, configuration) = GetParam(); SKIP_IF_CURRENT_TEST_IS_DISABLED(); APIBaseTest::SetUp(); - simpleNetwork = ngraph::builder::subgraph::makeSingleConv(); + simpleNetwork = ov::test::utils::make_single_conv(); } }; diff --git a/src/tests/functional/plugin/shared/include/behavior/executable_network/exec_network_base.hpp b/src/tests/functional/plugin/shared/include/behavior/executable_network/exec_network_base.hpp index 3f9ff55f8fa6af..db7b2ba376a3db 100644 --- a/src/tests/functional/plugin/shared/include/behavior/executable_network/exec_network_base.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/executable_network/exec_network_base.hpp @@ -8,6 +8,7 @@ #include "common_test_utils/file_utils.hpp" #include "openvino/core/model.hpp" #include "openvino/op/relu.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" namespace BehaviorTestsDefinitions { class ExecutableNetworkBaseTest : public BehaviorTestsUtils::IEExecutableNetworkTestBase, @@ -292,7 +293,7 @@ TEST_P(ExecutableNetworkBaseTest, canExport) { TEST_P(ExecutableNetworkBaseTest, pluginDoesNotChangeOriginalNetwork) { // compare 2 networks - auto referenceNetwork = ngraph::builder::subgraph::makeConvPoolRelu(); + auto referenceNetwork = ov::test::utils::make_conv_pool_relu(); compare_functions(cnnNet.getFunction(), referenceNetwork); } @@ -303,7 +304,7 @@ class ExecNetSetPrecision : public BehaviorTestsUtils::BehaviorTestsBasicBase, std::tie(netPrecision, target_device, configuration) = this->GetParam(); SKIP_IF_CURRENT_TEST_IS_DISABLED() APIBaseTest::SetUp(); - function = ngraph::builder::subgraph::makeConvPoolRelu(); + function = ov::test::utils::make_conv_pool_relu(); } void TearDown() override { if (!configuration.empty()) { diff --git a/src/tests/functional/plugin/shared/include/behavior/infer_request/io_blob.hpp b/src/tests/functional/plugin/shared/include/behavior/infer_request/io_blob.hpp index 99c30679b3a798..ed664576d410c7 100644 --- a/src/tests/functional/plugin/shared/include/behavior/infer_request/io_blob.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/infer_request/io_blob.hpp @@ -9,6 +9,7 @@ #include "base/behavior_test_utils.hpp" #include "shared_test_classes/subgraph/basic_lstm.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" namespace BehaviorTestsDefinitions { using InferRequestIOBBlobTest = BehaviorTestsUtils::InferRequestTests; @@ -344,11 +345,11 @@ TEST_P(InferRequestIOBBlobTest, canReallocateExternalBlobViaGet) { { ngraph::PartialShape shape({1, 3, 10, 10}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; @@ -455,7 +456,7 @@ class InferRequestIOBBlobSetLayoutTest : public testing::WithParamInterfaceGetParam(); SKIP_IF_CURRENT_TEST_IS_DISABLED() - function = ngraph::builder::subgraph::makeConvPoolRelu(); + function = ov::test::utils::make_conv_pool_relu(); cnnNet = InferenceEngine::CNNNetwork(function); execNet = ie->LoadNetwork(cnnNet, target_device, configuration); } diff --git a/src/tests/functional/plugin/shared/include/behavior/infer_request/set_blob_by_type.hpp b/src/tests/functional/plugin/shared/include/behavior/infer_request/set_blob_by_type.hpp index 439f2ee0ecff0a..92282dc7ecff7d 100644 --- a/src/tests/functional/plugin/shared/include/behavior/infer_request/set_blob_by_type.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/infer_request/set_blob_by_type.hpp @@ -8,6 +8,7 @@ #include "base/behavior_test_utils.hpp" #include "common_test_utils/common_utils.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" namespace BehaviorTestsDefinitions { @@ -42,8 +43,7 @@ class InferRequestSetBlobByType : public testing::WithParamInterface function = ngraph::builder::subgraph::makeConvPoolRelu( - {4, 3, 6, 8}, ngraph::element::Type_t::u8); + std::shared_ptr function = ov::test::utils::make_conv_pool_relu({4, 3, 6, 8}, ov::element::u8); InferenceEngine::CNNNetwork cnnNetwork(function); executableNetwork = ie->LoadNetwork(cnnNetwork, target_device, config); } diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp index d946a652dbdeaf..010704bedd30b1 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp @@ -13,6 +13,7 @@ #include "common_test_utils/file_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" +#include "common_test_utils/subgraph_builders/multiple_input_outpput_double_concat.hpp" namespace ov { namespace test { @@ -76,7 +77,7 @@ TEST_P(OVExecGraphImportExportTest, importExportedFunction) { ov::CompiledModel execNet; // Create simple function - function = ngraph::builder::subgraph::makeMultipleInputOutputDoubleConcat({1, 2, 24, 24}, elementType); + function = ov::test::utils::make_multiple_input_output_double_concat({1, 2, 24, 24}, elementType); execNet = core->compile_model(function, target_device, configuration); std::stringstream strm; @@ -337,7 +338,7 @@ TEST_P(OVExecGraphImportExportTest, importExportedIENetwork) { InferenceEngine::ExecutableNetwork execNet; // Create simple function - function = ngraph::builder::subgraph::makeMultipleInputOutputDoubleConcat({1, 2, 24, 24}, elementType); + function = ov::test::utils::make_multiple_input_output_double_concat({1, 2, 24, 24}, elementType); execNet = ie->LoadNetwork(InferenceEngine::CNNNetwork(function), target_device, any_copy(configuration)); @@ -471,7 +472,7 @@ TEST_P(OVExecGraphImportExportTest, ieImportExportedFunction) { ov::CompiledModel execNet; // Create simple function - function = ngraph::builder::subgraph::makeMultipleInputOutputDoubleConcat({1, 2, 24, 24}, elementType); + function = ov::test::utils::make_multiple_input_output_double_concat({1, 2, 24, 24}, elementType); execNet = core->compile_model(function, target_device, configuration); std::stringstream strm; diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp index 592b156d3b038a..372a62a504540c 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp @@ -12,6 +12,12 @@ #include "functional_test_utils/plugin_cache.hpp" #include "openvino/op/concat.hpp" #include "openvino/runtime/tensor.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" +#include "common_test_utils/subgraph_builders/multiple_input_outpput_double_concat.hpp" +#include "common_test_utils/subgraph_builders/single_concat_with_constant.hpp" +#include "common_test_utils/subgraph_builders/concat_with_params.hpp" +#include "common_test_utils/subgraph_builders/single_split.hpp" +#include "common_test_utils/subgraph_builders/split_concat.hpp" namespace ov { namespace test { @@ -142,7 +148,7 @@ TEST_P(OVExecutableNetworkBaseTest, canLoadNetworkFromMemory) { TEST(OVExecutableNetworkBaseTest, smoke_LoadNetworkToDefaultDeviceNoThrow) { std::shared_ptr core = utils::PluginCache::get().core(); - std::shared_ptr function = ngraph::builder::subgraph::makeSingleConcatWithConstant(); + std::shared_ptr function = ov::test::utils::make_single_concat_with_constant(); EXPECT_NO_THROW(auto execNet = core->compile_model(function)); } @@ -412,14 +418,14 @@ TEST_P(OVExecutableNetworkBaseTest, canExport) { TEST_P(OVExecutableNetworkBaseTest, pluginDoesNotChangeOriginalNetwork) { // compare 2 networks - auto referenceNetwork = ngraph::builder::subgraph::makeConvPoolRelu(); + auto referenceNetwork = ov::test::utils::make_conv_pool_relu(); compare_functions(function, referenceNetwork); } TEST_P(OVExecutableNetworkBaseTest, getInputFromFunctionWithSingleInput) { ov::CompiledModel execNet; - function = ngraph::builder::subgraph::makeSplitConcat(); + function = ov::test::utils::make_split_concat(); execNet = core->compile_model(function, target_device, configuration); EXPECT_EQ(function->inputs().size(), 1); @@ -433,7 +439,7 @@ TEST_P(OVExecutableNetworkBaseTest, getInputFromFunctionWithSingleInput) { TEST_P(OVExecutableNetworkBaseTest, getOutputFromFunctionWithSingleInput) { ov::CompiledModel execNet; - function = ngraph::builder::subgraph::makeSplitConcat(); + function = ov::test::utils::make_split_concat(); execNet = core->compile_model(function, target_device, configuration); EXPECT_EQ(function->outputs().size(), 1); @@ -447,7 +453,7 @@ TEST_P(OVExecutableNetworkBaseTest, getOutputFromFunctionWithSingleInput) { TEST_P(OVExecutableNetworkBaseTest, getInputsFromFunctionWithSeveralInputs) { ov::CompiledModel execNet; - function = ngraph::builder::subgraph::makeConcatWithParams(); + function = ov::test::utils::make_concat_with_params(); execNet = core->compile_model(function, target_device, configuration); EXPECT_EQ(function->inputs().size(), 2); @@ -468,7 +474,7 @@ TEST_P(OVExecutableNetworkBaseTest, getInputsFromFunctionWithSeveralInputs) { TEST_P(OVExecutableNetworkBaseTest, getOutputsFromFunctionWithSeveralOutputs) { ov::CompiledModel execNet; - function = ngraph::builder::subgraph::makeMultipleInputOutputDoubleConcat(); + function = ov::test::utils::make_multiple_input_output_double_concat(); execNet = core->compile_model(function, target_device, configuration); EXPECT_EQ(function->outputs().size(), 2); @@ -489,7 +495,7 @@ TEST_P(OVExecutableNetworkBaseTest, getOutputsFromFunctionWithSeveralOutputs) { TEST_P(OVExecutableNetworkBaseTest, getOutputsFromSplitFunctionWithSeveralOutputs) { ov::CompiledModel execNet; - function = ngraph::builder::subgraph::makeSingleSplit(); + function = ov::test::utils::make_single_split(); execNet = core->compile_model(function, target_device, configuration); EXPECT_EQ(function->outputs().size(), 2); diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/get_metric.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/get_metric.hpp index 52e949df773301..d9a64036e8c668 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/get_metric.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/get_metric.hpp @@ -7,6 +7,7 @@ #include #include +#include "common_test_utils/subgraph_builders/single_conv.hpp" #ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT # include @@ -43,7 +44,7 @@ class OVClassExecutableNetworkGetMetricTest_Priority : public ::testing::WithPar std::tie(target_device, configuration) = GetParam(); SKIP_IF_CURRENT_TEST_IS_DISABLED(); APIBaseTest::SetUp(); - simpleNetwork = ngraph::builder::subgraph::makeSingleConv(); + simpleNetwork = ov::test::utils::make_single_conv(); } }; using OVClassExecutableNetworkGetMetricTest_DEVICE_PRIORITY = OVClassExecutableNetworkGetMetricTest_Priority; diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/wait.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/wait.hpp index 1387710d6b624d..a679ec6c7a3726 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/wait.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/wait.hpp @@ -11,6 +11,7 @@ namespace test { namespace behavior { struct OVInferRequestWaitTests : public OVInferRequestTests { void SetUp() override; + static std::string getTestCaseName(testing::TestParamInfo obj); void TearDown() override; ov::InferRequest req; ov::Output input; diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp index a6c0f73751bff7..edb49cbbc7e6cd 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp @@ -21,6 +21,8 @@ #include "openvino/op/result.hpp" #include "openvino/op/shape_of.hpp" #include "openvino/util/file_util.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu_no_reshapes.hpp" +#include "common_test_utils/subgraph_builders/split_conv_concat.hpp" #ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT # include @@ -103,7 +105,7 @@ class OVClassSetDevicePriorityConfigTest : public OVPluginTestBase, std::tie(target_device, configuration) = GetParam(); SKIP_IF_CURRENT_TEST_IS_DISABLED(); APIBaseTest::SetUp(); - actualNetwork = ngraph::builder::subgraph::makeSplitConvConcat(); + actualNetwork = ov::test::utils::make_split_conv_concat(); } }; @@ -221,7 +223,7 @@ TEST(OVClassBasicTest, smoke_createMockEngineConfigThrows) { inline void generateModelFile() { ov::pass::Manager manager; manager.register_pass("test_model.xml", "test_model.bin"); - auto function = ngraph::builder::subgraph::makeConvPoolReluNoReshapes({1, 3, 227, 227}); + auto function = ov::test::utils::make_conv_pool_relu_no_reshapes({1, 3, 227, 227}); manager.run_passes(function); } diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/auto_batching_tests.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/auto_batching_tests.hpp index 8ecb1440571873..e2d754cc155849 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/auto_batching_tests.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/auto_batching_tests.hpp @@ -14,6 +14,9 @@ #include "ov_models/subgraph_builders.hpp" #include "functional_test_utils/blob_utils.hpp" #include "base/behavior_test_utils.hpp" +#include "common_test_utils/subgraph_builders/single_conv.hpp" +#include "common_test_utils/subgraph_builders/detection_output.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" using namespace ::testing; using namespace InferenceEngine; @@ -30,8 +33,8 @@ class AutoBatching_Test : public BehaviorTestsUtils::IEPluginTestBase, public testing::WithParamInterface { void SetUp() override { std::tie(target_device, use_get_blob, num_streams, num_requests, num_batch) = this->GetParam(); - fn_ptrs = {ngraph::builder::subgraph::makeSingleConv(), - ngraph::builder::subgraph::makeMultiSingleConv()}; + fn_ptrs = {ov::test::utils::make_single_conv(), + ov::test::utils::make_multi_single_conv()}; }; public: static std::string getTestCaseName(const testing::TestParamInfo &obj) { @@ -148,8 +151,8 @@ class AutoBatching_Test_DetectionOutput : public AutoBatching_Test { public: void SetUp() override { std::tie(target_device, use_get_blob, num_streams, num_requests, num_batch) = this->GetParam(); - fn_ptrs = {ngraph::builder::subgraph::makeDetectionOutput(), - ngraph::builder::subgraph::makeDetectionOutput()}; + fn_ptrs = {ov::test::utils::make_detection_output(), + ov::test::utils::make_detection_output()}; }; static std::string getTestCaseName(const testing::TestParamInfo &obj) { diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/caching_tests.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/caching_tests.hpp index 20d923061467e0..360b676a09caf1 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/caching_tests.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/caching_tests.hpp @@ -14,6 +14,7 @@ #include "common_test_utils/unicode_utils.hpp" #include "openvino/util/common_util.hpp" #include "base/behavior_test_utils.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" #include #include @@ -80,7 +81,7 @@ class LoadNetworkCompiledKernelsCacheTest : virtual public LayerTestsUtils::Laye std::tie(targetDevice, userConfig) = GetParam(); target_device = targetDevice; APIBaseTest::SetUp(); - function = ngraph::builder::subgraph::makeConvPoolRelu(); + function = ov::test::utils::make_conv_pool_relu(); configuration = userConfig.first; std::string ext = userConfig.second; std::string::size_type pos = 0; diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/configuration_tests.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/configuration_tests.hpp index 1524ab7bc6d855..6f52af2995661d 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/configuration_tests.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/configuration_tests.hpp @@ -19,6 +19,7 @@ #include "common_test_utils/file_utils.hpp" #include "functional_test_utils/plugin_cache.hpp" #include "base/behavior_test_utils.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" namespace BehaviorTestsDefinitions { @@ -74,7 +75,7 @@ class BehaviorTestsEmptyConfig : public testing::WithParamInterface target_device = this->GetParam(); SKIP_IF_CURRENT_TEST_IS_DISABLED() APIBaseTest::SetUp(); - function = ngraph::builder::subgraph::makeConvPoolRelu(); + function = ov::test::utils::make_conv_pool_relu(); cnnNet = InferenceEngine::CNNNetwork(function); } }; @@ -139,7 +140,7 @@ class CorrectConfigTests : public testing::WithParamInterfaceGetParam(); SKIP_IF_CURRENT_TEST_IS_DISABLED(); APIBaseTest::SetUp(); - function = ngraph::builder::subgraph::makeConvPoolRelu(); + function = ov::test::utils::make_conv_pool_relu(); cnnNet = InferenceEngine::CNNNetwork(function); } @@ -164,7 +165,7 @@ class BehaviorTestsSingleOptionCustom : public testing::WithParamInterface entry; std::tie(target_device, entry) = this->GetParam(); std::tie(key, value, reference) = entry; - function = ngraph::builder::subgraph::makeConvPoolRelu(); + function = ov::test::utils::make_conv_pool_relu(); cnnNet = InferenceEngine::CNNNetwork(function); } @@ -185,7 +186,7 @@ class BehaviorTestsSingleOption : public testing::WithParamInterfaceGetParam(); SKIP_IF_CURRENT_TEST_IS_DISABLED(); APIBaseTest::SetUp(); - function = ngraph::builder::subgraph::makeConvPoolRelu(); + function = ov::test::utils::make_conv_pool_relu(); cnnNet = InferenceEngine::CNNNetwork(function); } @@ -231,7 +232,7 @@ class SetPropLoadNetWorkGetPropTests : public testing::WithParamInterfaceGetParam(); SKIP_IF_CURRENT_TEST_IS_DISABLED(); APIBaseTest::SetUp(); - function = ngraph::builder::subgraph::makeConvPoolRelu(); + function = ov::test::utils::make_conv_pool_relu(); cnnNet = InferenceEngine::CNNNetwork(function); } diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp index d49e0dfd17c2bd..192dc5da09491a 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp @@ -13,6 +13,7 @@ #include "common_test_utils/file_utils.hpp" #include "common_test_utils/unicode_utils.hpp" #include "openvino/util/file_util.hpp" +#include "common_test_utils/subgraph_builders/split_conv_concat.hpp" #ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT #include @@ -449,23 +450,23 @@ TEST_P(IEClassNetworkTestP, SetAffinityWithConstantBranches) { { ngraph::PartialShape shape({1, 84}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); auto matMulWeights = - ngraph::opset6::Constant::create(ngraph::element::Type_t::f32, {10, 84}, {1}); - auto shapeOf = std::make_shared(matMulWeights); - auto gConst1 = ngraph::opset6::Constant::create(ngraph::element::Type_t::i32, {1}, {1}); - auto gConst2 = ngraph::opset6::Constant::create(ngraph::element::Type_t::i64, {}, {0}); - auto gather = std::make_shared(shapeOf, gConst1, gConst2); - auto concatConst = ngraph::opset6::Constant::create(ngraph::element::Type_t::i64, {1}, {1}); + ov::op::v0::Constant::create(ngraph::element::Type_t::f32, {10, 84}, {1}); + auto shapeOf = std::make_shared(matMulWeights); + auto gConst1 = ov::op::v0::Constant::create(ngraph::element::Type_t::i32, {1}, {1}); + auto gConst2 = ov::op::v0::Constant::create(ngraph::element::Type_t::i64, {}, {0}); + auto gather = std::make_shared(shapeOf, gConst1, gConst2); + auto concatConst = ov::op::v0::Constant::create(ngraph::element::Type_t::i64, {1}, {1}); auto concat = - std::make_shared(ngraph::NodeVector{concatConst, gather}, 0); - auto relu = std::make_shared(param); - auto reshape = std::make_shared(relu, concat, false); - auto matMul = std::make_shared(reshape, matMulWeights, false, true); + std::make_shared(ngraph::NodeVector{concatConst, gather}, 0); + auto relu = std::make_shared(param); + auto reshape = std::make_shared(relu, concat, false); + auto matMul = std::make_shared(reshape, matMulWeights, false, true); auto matMulBias = - ngraph::opset6::Constant::create(ngraph::element::Type_t::f32, {1, 10}, {1}); - auto addBias = std::make_shared(matMul, matMulBias); - auto result = std::make_shared(addBias); + ov::op::v0::Constant::create(ngraph::element::Type_t::f32, {1, 10}, {1}); + auto addBias = std::make_shared(matMul, matMulBias); + auto result = std::make_shared(addBias); ngraph::ParameterVector params = {param}; ngraph::ResultVector results = {result}; @@ -907,7 +908,7 @@ TEST_P(IEClassQueryNetworkTest, QueryNetworkHETEROWithBigDeviceIDThrows) { TEST(IEClassBasicTest, smoke_LoadNetworkToDefaultDeviceNoThrow) { SKIP_IF_CURRENT_TEST_IS_DISABLED() InferenceEngine::CNNNetwork actualCnnNetwork; - std::shared_ptr actualNetwork = ngraph::builder::subgraph::makeSplitConvConcat(); + std::shared_ptr actualNetwork = ov::test::utils::make_split_conv_concat(); ASSERT_NO_THROW(actualCnnNetwork = InferenceEngine::CNNNetwork(actualNetwork)); InferenceEngine::Core ie = BehaviorTestsUtils::createIECoreWithTemplate(); ASSERT_NO_THROW(ie.LoadNetwork(actualCnnNetwork)); diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/core_threading.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/core_threading.hpp index 34bc6b593e307e..d4dee3c250a83f 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/core_threading.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/core_threading.hpp @@ -16,6 +16,7 @@ #include #include #include "base/behavior_test_utils.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" #include #include @@ -25,6 +26,11 @@ #include #include #include "base/ov_behavior_test_utils.hpp" +#include "common_test_utils/subgraph_builders/split_conv_concat.hpp" +#include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp" +#include "common_test_utils/subgraph_builders/single_conv.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" +#include "common_test_utils/subgraph_builders/2_input_subtract.hpp" using Device = std::string; using Config = std::map; @@ -150,7 +156,7 @@ TEST_P(CoreThreadingTests, smoke_GetMetric) { // tested function: QueryNetwork TEST_P(CoreThreadingTests, smoke_QueryNetwork) { InferenceEngine::Core ie; - InferenceEngine::CNNNetwork network(ngraph::builder::subgraph::make2InputSubtract()); + InferenceEngine::CNNNetwork network(ov::test::utils::make_2_input_subtract()); ie.SetConfig(config, target_device); InferenceEngine::QueryNetworkResult refResult = ie.QueryNetwork(network, target_device); @@ -225,14 +231,14 @@ class CoreThreadingTestsWithIterations : public testing::WithParamInterface(in_prec, ov::Shape(inputShape))}; - auto toF32 = std::make_shared(paramsIn[0], ngraph::element::Type_t::f32); + auto toF32 = std::make_shared(paramsIn[0], ngraph::element::Type_t::f32); - auto constNode = std::make_shared( + auto constNode = std::make_shared( ngraph::element::Type_t::i64, ngraph::Shape{inputShape.size()}, inputShape); std::shared_ptr reshape_input = with_extra_conv ? toF32->shared_from_this() : paramsIn[0]; - auto reshape = std::dynamic_pointer_cast( - std::make_shared(reshape_input, constNode, specialZero)); - ngraph::ResultVector results{std::make_shared(reshape)}; + auto reshape = std::dynamic_pointer_cast( + std::make_shared(reshape_input, constNode, specialZero)); + ngraph::ResultVector results{std::make_shared(reshape)}; return std::make_shared(results, paramsIn, "Reshape"); }; diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/set_preprocess.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/set_preprocess.hpp index e2e7b12bb3eba9..92d9c67231c762 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/set_preprocess.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/set_preprocess.hpp @@ -64,11 +64,11 @@ TEST_P(InferRequestPreprocessTest, SetMeanImagePreProcessGetBlob) { { ngraph::PartialShape shape({1, 3, 10, 10}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; @@ -130,11 +130,11 @@ TEST_P(InferRequestPreprocessTest, SetMeanImagePreProcessSetBlob) { { ngraph::PartialShape shape({1, 3, 10, 10}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; @@ -199,11 +199,11 @@ TEST_P(InferRequestPreprocessTest, SetMeanValuePreProcessGetBlob) { { ngraph::PartialShape shape({1, 3, 10, 10}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; @@ -259,11 +259,11 @@ TEST_P(InferRequestPreprocessTest, SetMeanValuePreProcessSetBlob) { { ngraph::PartialShape shape({1, 3, 10, 10}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; @@ -321,11 +321,11 @@ TEST_P(InferRequestPreprocessTest, ReverseInputChannelsPreProcessGetBlob) { { ngraph::PartialShape shape({1, 3, 10, 10}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; @@ -382,11 +382,11 @@ TEST_P(InferRequestPreprocessTest, ReverseInputChannelsPreProcessSetBlob) { { ngraph::PartialShape shape({1, 3, 10, 10}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; @@ -446,11 +446,11 @@ TEST_P(InferRequestPreprocessTest, SetScalePreProcessGetBlob) { { ngraph::PartialShape shape({1, 3, 10, 10}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; @@ -506,11 +506,11 @@ TEST_P(InferRequestPreprocessTest, SetScalePreProcessSetBlob) { { ngraph::PartialShape shape({1, 3, 10, 10}); ngraph::element::Type type(ngraph::element::Type_t::f32); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; @@ -656,11 +656,11 @@ TEST_P(InferRequestPreprocessConversionTest, Infer) { { ngraph::PartialShape shape({batch, channels, shape_size, shape_size}); ngraph::element::Type type(InferenceEngine::details::convertPrecision(netPrecision)); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; @@ -850,11 +850,11 @@ TEST_P(InferRequestPreprocessDynamicallyInSetBlobTest, Infer) { { ngraph::PartialShape shape({batch, channels, shape_size, shape_size}); ngraph::element::Type type(InferenceEngine::details::convertPrecision(netPrecision)); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; @@ -982,11 +982,11 @@ TEST_P(InferRequestPreprocessTest, InferWithRGB2BGRConversion) { { ngraph::PartialShape shape({batch, channels, shape_size, shape_size}); ngraph::element::Type type(InferenceEngine::details::convertPrecision(netPrecision)); - auto param = std::make_shared(type, shape); + auto param = std::make_shared(type, shape); param->set_friendly_name("param"); - auto relu = std::make_shared(param); + auto relu = std::make_shared(param); relu->set_friendly_name("relu"); - auto result = std::make_shared(relu); + auto result = std::make_shared(relu); result->set_friendly_name("result"); ngraph::ParameterVector params = {param}; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_transformation.hpp index 48c3ba7d2b8353..da93245b4fe0a4 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/concat_transformation.hpp @@ -15,10 +15,10 @@ namespace LayerTestsDefinitions { class ConcatTransformationTestValues { public: - std::shared_ptr input_constant1; + std::shared_ptr input_constant1; ngraph::builder::subgraph::FakeQuantizeOnData fqOnData1; ngraph::builder::subgraph::DequantizationOperations dequantization1; - std::shared_ptr input_constant2; + std::shared_ptr input_constant2; ngraph::builder::subgraph::FakeQuantizeOnData fqOnData2; ngraph::builder::subgraph::DequantizationOperations dequantization2; }; diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/depth_to_space_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/depth_to_space_transformation.hpp index 2b96b2e57206d3..45b1947bb37c57 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/depth_to_space_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/depth_to_space_transformation.hpp @@ -15,7 +15,7 @@ typedef std::tuple< ngraph::element::Type, ngraph::PartialShape, std::string, - ngraph::opset1::DepthToSpace::DepthToSpaceMode, + ov::op::v0::DepthToSpace::DepthToSpaceMode, size_t> DepthToSpaceTransformationParams; class DepthToSpaceTransformation : diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/pad_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/pad_transformation.hpp index bfdf719f47224b..17fb3a7663a385 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/pad_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/pad_transformation.hpp @@ -21,7 +21,7 @@ class PadTransformationParam { typedef std::tuple< ngraph::element::Type, ngraph::PartialShape, - ngraph::op::PadMode, + ov::op::PadMode, std::string, ov::pass::low_precision::LayerTransformation::Params, PadTransformationParam diff --git a/src/tests/functional/plugin/shared/include/multi/multi_remote_blob_multidevice_test.hpp b/src/tests/functional/plugin/shared/include/multi/multi_remote_blob_multidevice_test.hpp index 04639ab3ea134d..3b2f7e40aacab9 100644 --- a/src/tests/functional/plugin/shared/include/multi/multi_remote_blob_multidevice_test.hpp +++ b/src/tests/functional/plugin/shared/include/multi/multi_remote_blob_multidevice_test.hpp @@ -8,7 +8,7 @@ #include "openvino/runtime/core.hpp" #include "openvino/runtime/properties.hpp" #include "openvino/core/preprocess/pre_post_process.hpp" -#include +#include #include "common_test_utils/ov_tensor_utils.hpp" TEST_P(MultiDeviceMultipleGPU_Test, canCreateRemoteTensorThenInferWithAffinity) { diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/loop.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/loop.hpp index fab554125229db..a7d05848121acd 100644 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/loop.hpp +++ b/src/tests/functional/plugin/shared/include/single_layer_tests/loop.hpp @@ -47,19 +47,19 @@ TEST_P(TrivialLoopTest, PassThroughBody) { const auto shape = ngraph::Shape{ieShape}; const auto scalarShape = ngraph::Shape{}; - auto start = std::make_shared(prc, shape); - auto count = std::make_shared(ngraph::element::i64, scalarShape, 5); - auto icond = std::make_shared(ngraph::element::boolean, scalarShape, true); + auto start = std::make_shared(prc, shape); + auto count = std::make_shared(ngraph::element::i64, scalarShape, 5); + auto icond = std::make_shared(ngraph::element::boolean, scalarShape, true); // Loop body - auto b_data = std::make_shared(prc, shape); - auto b_cond = std::make_shared(ngraph::element::boolean, scalarShape); + auto b_data = std::make_shared(prc, shape); + auto b_cond = std::make_shared(ngraph::element::boolean, scalarShape); auto body = std::make_shared( ngraph::OutputVector {b_cond, b_data}, // | passthrough body, no data changes ngraph::ParameterVector {b_cond, b_data}); // | input -> output - auto loop = std::make_shared(count, icond); + auto loop = std::make_shared(count, icond); loop->set_function(body); loop->set_special_body_ports({-1, 0}); loop->set_invariant_input(b_cond, icond); @@ -91,20 +91,20 @@ TEST_P(TrivialLoopTest, UnusedInputBody) { const auto shape = ngraph::Shape{ieShape}; const auto scalarShape = ngraph::Shape{}; - auto start = std::make_shared(prc, shape); - auto count = std::make_shared(ngraph::element::i64, scalarShape, 5); - auto icond = std::make_shared(ngraph::element::boolean, scalarShape, true); + auto start = std::make_shared(prc, shape); + auto count = std::make_shared(ngraph::element::i64, scalarShape, 5); + auto icond = std::make_shared(ngraph::element::boolean, scalarShape, true); // Loop body - auto b_data = std::make_shared(prc, shape); - auto b_cond = std::make_shared(ngraph::element::boolean, scalarShape, true); - auto b_iter = std::make_shared(ngraph::element::i64, scalarShape); + auto b_data = std::make_shared(prc, shape); + auto b_cond = std::make_shared(ngraph::element::boolean, scalarShape, true); + auto b_iter = std::make_shared(ngraph::element::i64, scalarShape); auto body = std::make_shared( ngraph::OutputVector {b_cond, b_data}, ngraph::ParameterVector {b_data, b_iter}); - auto loop = std::make_shared(count, icond); + auto loop = std::make_shared(count, icond); loop->set_function(body); loop->set_special_body_ports({1, 0}); loop->set_invariant_input(b_data, start); diff --git a/src/tests/functional/plugin/shared/include/subgraph_tests/basic_lstm.hpp b/src/tests/functional/plugin/shared/include/subgraph_tests/basic_lstm.hpp index cabf234085ee79..f75cb259fced23 100644 --- a/src/tests/functional/plugin/shared/include/subgraph_tests/basic_lstm.hpp +++ b/src/tests/functional/plugin/shared/include/subgraph_tests/basic_lstm.hpp @@ -32,7 +32,7 @@ TEST_P(Basic_LSTM_S, CompareWithRefImpl_LowLatencyTransformation) { // todo: it is better to modify the model -> use ShapeOf() and Gather() std::vector outFormShapes1 = { 1, 1, third_dim }; - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{3}, outFormShapes1); + auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{3}, outFormShapes1); auto param_target_inputs = function->get_parameters().at(0)->output(0).get_target_inputs(); // replace hardcoded shape diff --git a/src/tests/functional/plugin/shared/src/behavior/compiled_model/properties.cpp b/src/tests/functional/plugin/shared/src/behavior/compiled_model/properties.cpp index 6fed553fef2e33..eaac5f85f317f4 100644 --- a/src/tests/functional/plugin/shared/src/behavior/compiled_model/properties.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/compiled_model/properties.cpp @@ -7,6 +7,7 @@ #include #include "openvino/runtime/properties.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" #include @@ -74,7 +75,7 @@ void OVCompileModelGetExecutionDeviceTests::SetUp() { std::tie(target_device, userConfig) = GetParam(); compileModelProperties = userConfig.first; expectedDeviceName = userConfig.second; - model = ngraph::builder::subgraph::makeConvPoolRelu(); + model = ov::test::utils::make_conv_pool_relu(); } TEST_P(OVClassCompiledModelPropertiesTests, CanUseCache) { diff --git a/src/tests/functional/plugin/shared/src/behavior/executable_network/exec_graph_info.cpp b/src/tests/functional/plugin/shared/src/behavior/executable_network/exec_graph_info.cpp index f4466c80ea9208..38072636f3d370 100644 --- a/src/tests/functional/plugin/shared/src/behavior/executable_network/exec_graph_info.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/executable_network/exec_graph_info.cpp @@ -466,9 +466,9 @@ void ExecGraphUniqueNodeNames::SetUp() { auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); auto split = std::make_shared(params[0], split_axis_op, 2); - auto concat = std::make_shared(split->outputs(), 1); + auto concat = std::make_shared(split->outputs(), 1); - ngraph::ResultVector results{std::make_shared(concat)}; + ngraph::ResultVector results{std::make_shared(concat)}; fnPtr = std::make_shared(results, params, "SplitConvConcat"); } diff --git a/src/tests/functional/plugin/shared/src/behavior/infer_request/memory_states.cpp b/src/tests/functional/plugin/shared/src/behavior/infer_request/memory_states.cpp index 413c125f82dece..ad1bac678f81de 100644 --- a/src/tests/functional/plugin/shared/src/behavior/infer_request/memory_states.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/infer_request/memory_states.cpp @@ -39,18 +39,18 @@ InferenceEngine::CNNNetwork InferRequestVariableStateTest::getNetwork() { ngraph::Shape shape = {1, 200}; ngraph::element::Type type = ngraph::element::f32; - auto input = std::make_shared(type, shape); - auto mem_i1 = std::make_shared(type, shape, 0); - auto mem_r1 = std::make_shared(mem_i1, "r_1-3"); - auto mul1 = std::make_shared(mem_r1, input); - - auto mem_i2 = std::make_shared(type, shape, 0); - auto mem_r2 = std::make_shared(mem_i2, "c_1-3"); - auto mul2 = std::make_shared(mem_r2, mul1); - auto mem_w2 = std::make_shared(mul2, "c_1-3"); - - auto mem_w1 = std::make_shared(mul2, "r_1-3"); - auto sigm = std::make_shared(mul2); + auto input = std::make_shared(type, shape); + auto mem_i1 = std::make_shared(type, shape, 0); + auto mem_r1 = std::make_shared(mem_i1, "r_1-3"); + auto mul1 = std::make_shared(mem_r1, input); + + auto mem_i2 = std::make_shared(type, shape, 0); + auto mem_r2 = std::make_shared(mem_i2, "c_1-3"); + auto mul2 = std::make_shared(mem_r2, mul1); + auto mem_w2 = std::make_shared(mul2, "c_1-3"); + + auto mem_w1 = std::make_shared(mul2, "r_1-3"); + auto sigm = std::make_shared(mul2); sigm->set_friendly_name("sigmod_state"); mem_r1->set_friendly_name("Memory_1"); mem_w1->add_control_dependency(mem_r1); diff --git a/src/tests/functional/plugin/shared/src/behavior/infer_request/set_io_blob_precision.cpp b/src/tests/functional/plugin/shared/src/behavior/infer_request/set_io_blob_precision.cpp index f73267628dc8d9..494056d5c21af2 100644 --- a/src/tests/functional/plugin/shared/src/behavior/infer_request/set_io_blob_precision.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/infer_request/set_io_blob_precision.cpp @@ -105,9 +105,9 @@ void SetBlobTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(precNg); ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(IS))}; - auto axisNode = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{}, std::vector{-1})->output(0); + auto axisNode = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{}, std::vector{-1})->output(0); auto cumSum = std::make_shared(params[0], axisNode, false, false); - ngraph::ResultVector results{std::make_shared(cumSum)}; + ngraph::ResultVector results{std::make_shared(cumSum)}; function = std::make_shared(results, params, "InferSetBlob"); } diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp index 7a6e30368bca8e..f252aa8792f327 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp @@ -52,14 +52,14 @@ std::shared_ptr OVInferRequestBatchedTests::create_n_inputs(size_t n, ele ParameterVector params; for (size_t i = 0; i < n; i++) { auto index_str = std::to_string(i); - auto data1 = std::make_shared(type, shape); + auto data1 = std::make_shared(type, shape); data1->set_friendly_name("input" + index_str); data1->get_output_tensor(0).set_names({"tensor_input" + index_str}); data1->set_layout(layout); auto constant = opset8::Constant::create(type, {1}, {1}); - auto op1 = std::make_shared(data1, constant); + auto op1 = std::make_shared(data1, constant); op1->set_friendly_name("Add" + index_str); - auto res1 = std::make_shared(op1); + auto res1 = std::make_shared(op1); res1->set_friendly_name("Result" + index_str); res1->get_output_tensor(0).set_names({"tensor_output" + index_str}); params.push_back(data1); diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_request_dynamic.cpp index 029c903aa3a1f6..7390c0726747fb 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -93,7 +93,10 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetwork) { ov::InferRequest req; const std::string outputname = function->outputs().back().get_any_name(); for (auto& shape : vectorShapes) { - ov::runtime::Tensor inTensor = ov::test::utils::create_and_fill_tensor(element::f32, shape, 100, -50); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -50; + in_data.range = 100; + ov::runtime::Tensor inTensor = ov::test::utils::create_and_fill_tensor(element::f32, shape, in_data); OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); OV_ASSERT_NO_THROW(req.set_tensor("input_tensor", inTensor)); OV_ASSERT_NO_THROW(req.infer()); @@ -115,11 +118,15 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkSetUnexpectedOutputTensorB ov::runtime::Tensor tensor, otensor; const std::string outputname = function->outputs().back().get_any_name(); OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); - tensor = ov::test::utils::create_and_fill_tensor(element::f32, refShape, 100, -50); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -50; + in_data.range = 100; + tensor = ov::test::utils::create_and_fill_tensor(element::f32, refShape, in_data); OV_ASSERT_NO_THROW(req.set_tensor("input_tensor", tensor)); auto outShape = refOutShape; outShape[0] += 1; - otensor = ov::test::utils::create_and_fill_tensor(element::f32, outShape, 100, 50); + in_data.start_from = 50; + otensor = ov::test::utils::create_and_fill_tensor(element::f32, outShape, in_data); OV_ASSERT_NO_THROW(req.set_tensor(outputname, otensor)); OV_ASSERT_NO_THROW(req.infer()); ASSERT_EQ(otensor.get_shape(), refOutShape); @@ -140,7 +147,10 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkSetOutputTensorPreAllocate ov::runtime::Tensor tensor; const std::string outputname = function->outputs().back().get_any_name(); OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); - tensor = ov::test::utils::create_and_fill_tensor(element::f32, refShape, 100, -50); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -50; + in_data.range = 100; + tensor = ov::test::utils::create_and_fill_tensor(element::f32, refShape, in_data); OV_ASSERT_NO_THROW(req.set_tensor("input_tensor", tensor)); float ptr[5000]; ov::runtime::Tensor otensor(element::f32, refOutShape, ptr); @@ -165,7 +175,10 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkSetOutputShapeBeforeInfer) ov::runtime::Tensor tensor, otensor; const std::string outputname = function->outputs().back().get_any_name(); OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); - tensor = ov::test::utils::create_and_fill_tensor(element::f32, refShape, 100, -50); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -50; + in_data.range = 100; + tensor = ov::test::utils::create_and_fill_tensor(element::f32, refShape, in_data); OV_ASSERT_NO_THROW(req.set_tensor("input_tensor", tensor)); OV_ASSERT_NO_THROW(otensor = req.get_tensor(outputname)); OV_ASSERT_NO_THROW(otensor.set_shape(refOutShape)); @@ -189,7 +202,10 @@ TEST_P(OVInferRequestDynamicTests, InferDynamicNetworkGetOutputThenSetOutputTens ov::runtime::Tensor tensor; const std::string outputname = function->outputs().back().get_any_name(); OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); - tensor = ov::test::utils::create_and_fill_tensor(element::f32, refShape, 100, -50); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -50; + in_data.range = 100; + tensor = ov::test::utils::create_and_fill_tensor(element::f32, refShape, in_data); OV_ASSERT_NO_THROW(req.set_tensor("input_tensor", tensor)); // first, get ouput tensor OV_ASSERT_NO_THROW(req.infer()); diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference.cpp index 71c4629bbb7def..ac4197bec8af25 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference.cpp @@ -30,13 +30,13 @@ std::shared_ptr OVInferRequestInferenceTests::create_n_inputs(size_t n, ParameterVector params; for (size_t i = 0; i < n; i++) { auto index_str = std::to_string(i); - auto data1 = std::make_shared(type, shape); + auto data1 = std::make_shared(type, shape); data1->set_friendly_name("input" + index_str); data1->get_output_tensor(0).set_names({"tensor_input" + index_str}); auto constant = opset8::Constant::create(type, {1}, {1}); - auto op1 = std::make_shared(data1, constant); + auto op1 = std::make_shared(data1, constant); op1->set_friendly_name("Add" + index_str); - auto res1 = std::make_shared(op1); + auto res1 = std::make_shared(op1); res1->set_friendly_name("Result" + index_str); res1->get_output_tensor(0).set_names({"tensor_output" + index_str}); params.push_back(data1); diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp index c205ec0c1f4e26..c1bfefc2eb2e43 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp @@ -11,6 +11,9 @@ #include "openvino/op/parameter.hpp" #include "openvino/op/concat.hpp" #include "openvino/op/result.hpp" +#include "common_test_utils/subgraph_builders/multiple_input_outpput_double_concat.hpp" +#include "common_test_utils/subgraph_builders/single_split.hpp" +#include "common_test_utils/subgraph_builders/split_concat.hpp" namespace ov { namespace test { @@ -282,7 +285,7 @@ void OVInferRequestIOTensorSetPrecisionTest::SetUp() { std::tie(element_type, target_device, config) = this->GetParam(); SKIP_IF_CURRENT_TEST_IS_DISABLED() APIBaseTest::SetUp(); - function = ngraph::builder::subgraph::makeSplitConcat(); + function = ov::test::utils::make_split_concat(); execNet = core->compile_model(function, target_device, config); req = execNet.create_infer_request(); } @@ -384,7 +387,7 @@ void OVInferRequestCheckTensorPrecision::TearDown() { } TEST_P(OVInferRequestCheckTensorPrecision, getInputFromFunctionWithSingleInput) { - model = ngraph::builder::subgraph::makeSplitConcat({1, 4, 24, 24}, element_type); + model = ov::test::utils::make_split_concat({1, 4, 24, 24}, element_type); createInferRequest(); ov::Tensor tensor1, tensor2; @@ -400,7 +403,7 @@ TEST_P(OVInferRequestCheckTensorPrecision, getInputFromFunctionWithSingleInput) } TEST_P(OVInferRequestCheckTensorPrecision, getOutputFromFunctionWithSingleInput) { - model = ngraph::builder::subgraph::makeSplitConcat({1, 4, 24, 24}, element_type); + model = ov::test::utils::make_split_concat({1, 4, 24, 24}, element_type); createInferRequest(); ov::Tensor tensor1, tensor2; @@ -416,7 +419,7 @@ TEST_P(OVInferRequestCheckTensorPrecision, getOutputFromFunctionWithSingleInput) } TEST_P(OVInferRequestCheckTensorPrecision, getInputsFromFunctionWithSeveralInputs) { - model = ngraph::builder::subgraph::makeMultipleInputOutputDoubleConcat({1, 1, 32, 32}, element_type); + model = ov::test::utils::make_multiple_input_output_double_concat({1, 1, 32, 32}, element_type); createInferRequest(); ov::Tensor tensor1, tensor2; @@ -447,7 +450,7 @@ TEST_P(OVInferRequestCheckTensorPrecision, getInputsFromFunctionWithSeveralInput } TEST_P(OVInferRequestCheckTensorPrecision, getOutputsFromFunctionWithSeveralOutputs) { - model = ngraph::builder::subgraph::makeMultipleInputOutputDoubleConcat({1, 1, 32, 32}, element_type); + model = ov::test::utils::make_multiple_input_output_double_concat({1, 1, 32, 32}, element_type); createInferRequest(); ov::Tensor tensor1, tensor2; @@ -478,7 +481,7 @@ TEST_P(OVInferRequestCheckTensorPrecision, getOutputsFromFunctionWithSeveralOutp } TEST_P(OVInferRequestCheckTensorPrecision, getOutputsFromSplitFunctionWithSeveralOutputs) { - model = ngraph::builder::subgraph::makeSingleSplit({1, 4, 24, 24}, element_type); + model = ov::test::utils::make_single_split({1, 4, 24, 24}, element_type); createInferRequest(); ov::Tensor tensor1, tensor2; diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/perf_counters.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/perf_counters.cpp index e5023c6e2f588a..d32c0667cfd7e1 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/perf_counters.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/perf_counters.cpp @@ -5,6 +5,7 @@ #include "behavior/ov_infer_request/perf_counters.hpp" #include "openvino/runtime/profiling_info.hpp" +#include "common_test_utils/subgraph_builders/concat_with_params.hpp" namespace ov { namespace test { @@ -13,7 +14,7 @@ void OVInferRequestPerfCountersTest::SetUp() { std::tie(target_device, configuration) = this->GetParam(); SKIP_IF_CURRENT_TEST_IS_DISABLED() APIBaseTest::SetUp(); - function = ngraph::builder::subgraph::makeConcatWithParams(); + function = ov::test::utils::make_concat_with_params(); configuration.insert(ov::enable_profiling(true)); execNet = core->compile_model(function, target_device, configuration); req = execNet.create_infer_request(); diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/wait.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/wait.cpp index 898fd9c94ff717..8ff25a6b3771e8 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/wait.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/wait.cpp @@ -17,6 +17,10 @@ void OVInferRequestWaitTests::SetUp() { output = execNet.output(); } +std::string OVInferRequestWaitTests::getTestCaseName(testing::TestParamInfo obj) { + return OVInferRequestTests::getTestCaseName(obj); +} + void OVInferRequestWaitTests::TearDown() { req = {}; input = {}; @@ -76,6 +80,12 @@ TEST_P(OVInferRequestWaitTests, throwExceptionOnGetTensorAfterAsyncInfer) { OV_ASSERT_NO_THROW(req.wait()); } +TEST_P(OVInferRequestWaitTests, FailedAsyncInferWithNegativeTimeForWait) { + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.start_async()); + ASSERT_THROW(req.wait_for(std::chrono::milliseconds{-1}), ov::Exception); +} + } // namespace behavior } // namespace test } // namespace ov diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/caching_tests.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/caching_tests.cpp index ab8b5602224ca0..7496cc9d5fdc13 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/caching_tests.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/caching_tests.cpp @@ -13,12 +13,22 @@ #include "common_test_utils/file_utils.hpp" #include "functional_test_utils/skip_tests_config.hpp" #include "functional_test_utils/summary/api_summary.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" #include "ov_models/builders.hpp" #include "ov_models/subgraph_builders.hpp" #include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" #include "openvino/core/node_vector.hpp" #include "openvino/op/parameter.hpp" +#include "common_test_utils/subgraph_builders/split_conv_concat.hpp" +#include "common_test_utils/subgraph_builders/kso_func.hpp" +#include "common_test_utils/subgraph_builders/ti_with_lstm_cell.hpp" +#include "common_test_utils/subgraph_builders/single_conv.hpp" +#include "common_test_utils/subgraph_builders/2_input_subtract.hpp" +#include "common_test_utils/subgraph_builders/nested_split_conv_concat.hpp" +#include "common_test_utils/subgraph_builders/conv_bias.hpp" +#include "common_test_utils/subgraph_builders/read_concat_split_assign.hpp" +#include "common_test_utils/subgraph_builders/matmul_bias.hpp" #define GTEST_COUT std::cout << "[ ] [ INFO ] " @@ -77,37 +87,37 @@ std::vector CompileModelCacheTestBase::getNumericTypeOnlyFuncti res.push_back(ovModelWithName { simple_function_multiply, "SimpleFunctionMultiply"}); res.push_back(ovModelWithName { simple_function_relu, "SimpleFunctionRelu"}); res.push_back(ovModelWithName { - inputShapeWrapper(ngraph::builder::subgraph::makeConvPoolRelu, {1, 1, 32, 32}), + inputShapeWrapper(ov::test::utils::make_conv_pool_relu, {1, 1, 32, 32}), "ConvPoolRelu"}); res.push_back(ovModelWithName { - inputShapeWrapper(ngraph::builder::subgraph::makeSplitConvConcat, {1, 4, 20, 20}), + inputShapeWrapper(ov::test::utils::make_split_conv_concat, {1, 4, 20, 20}), "SplitConvConcat"}); res.push_back(ovModelWithName { - inputShapeWrapper(ngraph::builder::subgraph::makeKSOFunction, {1, 4, 20, 20}), + inputShapeWrapper(ov::test::utils::make_kso_function, {1, 4, 20, 20}), "KSOFunction"}); res.push_back(ovModelWithName { - inputShapeWrapper(ngraph::builder::subgraph::makeSingleConv, {1, 3, 24, 24}), + inputShapeWrapper(ov::test::utils::make_single_conv, {1, 3, 24, 24}), "SingleConv"}); res.push_back(ovModelWithName { - inputShapeWrapper(ngraph::builder::subgraph::make2InputSubtract, {1, 3, 24, 24}), + inputShapeWrapper(ov::test::utils::make_2_input_subtract, {1, 3, 24, 24}), "2InputSubtract"}); res.push_back(ovModelWithName { - inputShapeWrapper(ngraph::builder::subgraph::makeNestedSplitConvConcat, {1, 4, 20, 20}), + inputShapeWrapper(ov::test::utils::make_nested_split_conv_concat, {1, 4, 20, 20}), "NestedSplitConvConcat"}); res.push_back(ovModelWithName { - inputShapeWrapper(ngraph::builder::subgraph::makeSplitConvConcatInputInBranch, {1, 4, 20, 20}), + inputShapeWrapper(ov::test::utils::make_cplit_conv_concat_input_in_branch, {1, 4, 20, 20}), "SplitConvConcatInputInBranch"}); res.push_back(ovModelWithName { - inputShapeWrapper(ngraph::builder::subgraph::makeSplitConvConcatNestedInBranch, {1, 4, 20, 20}), + inputShapeWrapper(ov::test::utils::make_cplit_conv_concat_nested_in_branch, {1, 4, 20, 20}), "SplitConvConcatNestedInBranch"}); res.push_back(ovModelWithName { - inputShapeWrapper(ngraph::builder::subgraph::makeSplitConvConcatNestedInBranchNestedOut, {1, 4, 20, 20}), + inputShapeWrapper(ov::test::utils::make_cplit_conv_concat_nested_in_branch_nested_out, {1, 4, 20, 20}), "SplitConvConcatNestedInBranchNestedOut"}); res.push_back(ovModelWithName { - inputShapeWrapper(ngraph::builder::subgraph::makeConvBias, {1, 3, 24, 24}), + inputShapeWrapper(ov::test::utils::make_conv_bias, {1, 3, 24, 24}), "ConvBias"}); res.push_back(ovModelWithName{ - inputShapeWrapper(ngraph::builder::subgraph::makeMatMulBias, {1, 3, 24, 24}), + inputShapeWrapper(ov::test::utils::make_matmul_bias, {1, 3, 24, 24}), "MatMulBias" }); return res; } @@ -115,7 +125,7 @@ std::vector CompileModelCacheTestBase::getNumericTypeOnlyFuncti std::vector CompileModelCacheTestBase::getAnyTypeOnlyFunctions() { std::vector res; res.push_back(ovModelWithName { - inputShapeWrapper(ngraph::builder::subgraph::makeReadConcatSplitAssign, {1, 1, 2, 4}), + inputShapeWrapper(ov::test::utils::make_read_concat_split_assign, {1, 1, 2, 4}), "ReadConcatSplitAssign"}); return res; } @@ -123,7 +133,7 @@ std::vector CompileModelCacheTestBase::getAnyTypeOnlyFunctions( std::vector CompileModelCacheTestBase::getFloatingPointOnlyFunctions() { std::vector res; res.push_back(ovModelWithName { [](ngraph::element::Type type, size_t batchSize) { - return ngraph::builder::subgraph::makeTIwithLSTMcell(type, batchSize); + return ov::test::utils::make_ti_with_lstm_cell(type, batchSize); }, "TIwithLSTMcell1"}); return res; } @@ -293,8 +303,7 @@ void CompileModelLoadFromFileTestBase::SetUp() { core->set_property(ov::cache_dir()); ov::pass::Manager manager; manager.register_pass(m_modelName, m_weightsName); - manager.run_passes(ngraph::builder::subgraph::makeConvPoolRelu( - {1, 3, 227, 227}, InferenceEngine::details::convertPrecision(InferenceEngine::Precision::FP32))); + manager.run_passes(ov::test::utils::make_conv_pool_relu({1, 3, 227, 227}, ov::element::f32)); } void CompileModelLoadFromFileTestBase::TearDown() { @@ -376,9 +385,7 @@ void CompileModelLoadFromMemoryTestBase::SetUp() { core->set_property(ov::cache_dir()); ov::pass::Manager manager; manager.register_pass(m_modelName, m_weightsName); - manager.run_passes(ngraph::builder::subgraph::makeConvPoolRelu( - {1, 3, 227, 227}, - InferenceEngine::details::convertPrecision(InferenceEngine::Precision::FP32))); + manager.run_passes(ov::test::utils::make_conv_pool_relu({1, 3, 227, 227}, ov::element::f32)); try { std::ifstream model_file(m_modelName, std::ios::binary); @@ -500,7 +507,7 @@ std::string CompiledKernelsCacheTest::getTestCaseName(testing::TestParamInfo userConfig; std::tie(targetDevice, userConfig) = GetParam(); target_device = targetDevice; diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/life_time.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/life_time.cpp index 9e2c472e30a1e7..690acee8afdf42 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/life_time.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/life_time.cpp @@ -6,6 +6,7 @@ #include #include "behavior/ov_plugin/life_time.hpp" +#include "common_test_utils/subgraph_builders/split_concat.hpp" namespace ov { namespace test { @@ -24,7 +25,7 @@ void OVHoldersTest::SetUp() { if (deathTestStyle == "fast") { ::testing::GTEST_FLAG(death_test_style) = "threadsafe"; } - function = ngraph::builder::subgraph::makeSplitConcat(); + function = ov::test::utils::make_split_concat(); } void OVHoldersTest::TearDown() { @@ -139,7 +140,7 @@ void OVHoldersTestOnImportedNetwork::SetUp() { if (deathTestStyle == "fast") { ::testing::GTEST_FLAG(death_test_style) = "threadsafe"; } - function = ngraph::builder::subgraph::makeSplitConcat(); + function = ov::test::utils::make_split_concat(); } void OVHoldersTestOnImportedNetwork::TearDown() { diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp index 34cdd1757b2f8e..cf1566dc5e4809 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp @@ -2,9 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // +#include + #include "behavior/ov_plugin/properties_tests.hpp" #include "openvino/runtime/properties.hpp" -#include +#include "common_test_utils/subgraph_builders/split_concat.hpp" namespace ov { namespace test { @@ -27,7 +29,7 @@ void OVPropertiesTests::SetUp() { std::tie(target_device, properties) = this->GetParam(); APIBaseTest::SetUp(); SKIP_IF_CURRENT_TEST_IS_DISABLED(); - model = ngraph::builder::subgraph::makeSplitConcat(); + model = ov::test::utils::make_split_concat(); } void OVPropertiesTests::TearDown() { @@ -57,7 +59,7 @@ std::string OVSetPropComplieModleGetPropTests::getTestCaseName(testing::TestPara void OVSetPropComplieModleGetPropTests::SetUp() { SKIP_IF_CURRENT_TEST_IS_DISABLED(); std::tie(target_device, properties, compileModelProperties) = this->GetParam(); - model = ngraph::builder::subgraph::makeSplitConcat(); + model = ov::test::utils::make_split_concat(); } std::string OVPropertiesTestsWithCompileModelProps::getTestCaseName(testing::TestParamInfo obj) { @@ -96,7 +98,7 @@ void OVPropertiesTestsWithCompileModelProps::SetUp() { compileModelProperties = {{ CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , hw_device}}; } - model = ngraph::builder::subgraph::makeSplitConcat(); + model = ov::test::utils::make_split_concat(); APIBaseTest::SetUp(); } @@ -607,6 +609,16 @@ TEST_P(OVGetMetricPropsTest, GetMetricAndPrintNoThrow_AVAILABLE_DEVICES) { OV_ASSERT_PROPERTY_SUPPORTED(ov::available_devices); } +TEST_P(OVGetMetricPropsTest, GetMetriDeviceFullNameWithoutAdditionalTerminatorChars) { + ov::Core core = createCoreWithTemplate(); + auto supported_properties = core.get_property(target_device, ov::supported_properties); + if (util::contains(supported_properties, ov::device::full_name)) { + std::string full_name; + OV_ASSERT_NO_THROW(full_name = core.get_property(target_device, ov::device::full_name)); + EXPECT_EQ(full_name.size(), strlen(full_name.c_str())); + } +} + TEST_P(OVGetMetricPropsTest, GetMetricAndPrintNoThrow_OPTIMIZATION_CAPABILITIES) { ov::Core ie = createCoreWithTemplate(); std::vector capabilities; diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/remote.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/remote.cpp index 932a8163e825a2..48abe578a6dd3a 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/remote.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/remote.cpp @@ -5,7 +5,7 @@ #include "behavior/ov_plugin/remote.hpp" #include "transformations/utils/utils.hpp" #include "functional_test_utils/skip_tests_config.hpp" -#include "ov_models/subgraph_builders.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" namespace ov { namespace test { @@ -45,7 +45,7 @@ void OVRemoteTest::SetUp() { std::pair param_pair; std::tie(element_type, target_device, config, param_pair) = GetParam(); std::tie(context_parameters, tensor_parameters) = param_pair; - function = ngraph::builder::subgraph::makeConvPoolRelu({1, 1, 32, 32}, element_type); + function = ov::test::utils::make_conv_pool_relu({1, 1, 32, 32}, element_type); exec_network = core.compile_model(function, target_device, config); infer_request = exec_network.create_infer_request(); input = function->get_parameters().front(); diff --git a/src/tests/functional/plugin/shared/src/behavior/plugin/caching_tests.cpp b/src/tests/functional/plugin/shared/src/behavior/plugin/caching_tests.cpp index 3ca54eb5c80566..4b8f27afd780f1 100644 --- a/src/tests/functional/plugin/shared/src/behavior/plugin/caching_tests.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/plugin/caching_tests.cpp @@ -10,6 +10,14 @@ #include "common_test_utils/file_utils.hpp" #include "ov_models/builders.hpp" #include "ov_models/subgraph_builders.hpp" +#include "common_test_utils/subgraph_builders/split_conv_concat.hpp" +#include "common_test_utils/subgraph_builders/kso_func.hpp" +#include "common_test_utils/subgraph_builders/ti_with_lstm_cell.hpp" +#include "common_test_utils/subgraph_builders/single_conv.hpp" +#include "common_test_utils/subgraph_builders/2_input_subtract.hpp" +#include "common_test_utils/subgraph_builders/nested_split_conv_concat.hpp" +#include "common_test_utils/subgraph_builders/conv_bias.hpp" +#include "common_test_utils/subgraph_builders/matmul_bias.hpp" using namespace InferenceEngine::details; using namespace InferenceEngine; @@ -20,16 +28,16 @@ namespace LayerTestsDefinitions { static std::shared_ptr simple_function_multiply(ngraph::element::Type type, size_t batchSize) { // Create Parameter operation with static shape - auto data = std::make_shared(type, ngraph::Shape{batchSize, 2}); + auto data = std::make_shared(type, ngraph::Shape{batchSize, 2}); data->set_friendly_name("Parameter"); - auto constant = ngraph::opset6::Constant::create(type, ngraph::Shape{1}, {2}); + auto constant = ov::op::v0::Constant::create(type, ngraph::Shape{1}, {2}); constant->set_friendly_name("constant"); - auto mul = std::make_shared(data, constant); + auto mul = std::make_shared(data, constant); mul->set_friendly_name("mul"); // Create Result operation - auto res = std::make_shared(mul); + auto res = std::make_shared(mul); res->set_friendly_name("res"); // Create nGraph function @@ -40,14 +48,14 @@ static std::shared_ptr simple_function_multiply(ngraph::elemen static std::shared_ptr simple_function_relu(ngraph::element::Type type, size_t batchSize) { // Create Parameter operation with static shape - auto data = std::make_shared(type, ngraph::Shape{batchSize, 2}); + auto data = std::make_shared(type, ngraph::Shape{batchSize, 2}); data->set_friendly_name("Parameter"); - auto relu = std::make_shared(data); + auto relu = std::make_shared(data); relu->set_friendly_name("relu"); // Create Result operation - auto res = std::make_shared(relu); + auto res = std::make_shared(relu); res->set_friendly_name("res"); // Create nGraph function @@ -72,34 +80,34 @@ std::vector LoadNetworkCacheTestBase::getNumericTypeOnly inputShapeWrapper(ngraph::builder::subgraph::makeConvPoolRelu, {1, 1, 32, 32}), "ConvPoolRelu"}); res.push_back(nGraphFunctionWithName { - inputShapeWrapper(ngraph::builder::subgraph::makeSplitConvConcat, {1, 4, 20, 20}), + inputShapeWrapper(ov::test::utils::make_split_conv_concat, {1, 4, 20, 20}), "SplitConvConcat"}); res.push_back(nGraphFunctionWithName { - inputShapeWrapper(ngraph::builder::subgraph::makeKSOFunction, {1, 4, 20, 20}), + inputShapeWrapper(ov::test::utils::make_kso_function, {1, 4, 20, 20}), "KSOFunction"}); res.push_back(nGraphFunctionWithName { - inputShapeWrapper(ngraph::builder::subgraph::makeSingleConv, {1, 3, 24, 24}), + inputShapeWrapper(ov::test::utils::make_single_conv, {1, 3, 24, 24}), "SingleConv"}); res.push_back(nGraphFunctionWithName { - inputShapeWrapper(ngraph::builder::subgraph::make2InputSubtract, {1, 3, 24, 24}), + inputShapeWrapper(ov::test::utils::make_2_input_subtract, {1, 3, 24, 24}), "2InputSubtract"}); res.push_back(nGraphFunctionWithName { - inputShapeWrapper(ngraph::builder::subgraph::makeNestedSplitConvConcat, {1, 4, 20, 20}), + inputShapeWrapper(ov::test::utils::make_nested_split_conv_concat, {1, 4, 20, 20}), "NestedSplitConvConcat"}); res.push_back(nGraphFunctionWithName { - inputShapeWrapper(ngraph::builder::subgraph::makeSplitConvConcatInputInBranch, {1, 4, 20, 20}), + inputShapeWrapper(ov::test::utils::make_cplit_conv_concat_input_in_branch, {1, 4, 20, 20}), "SplitConvConcatInputInBranch"}); res.push_back(nGraphFunctionWithName { - inputShapeWrapper(ngraph::builder::subgraph::makeSplitConvConcatNestedInBranch, {1, 4, 20, 20}), + inputShapeWrapper(ov::test::utils::make_cplit_conv_concat_nested_in_branch, {1, 4, 20, 20}), "SplitConvConcatNestedInBranch"}); res.push_back(nGraphFunctionWithName { - inputShapeWrapper(ngraph::builder::subgraph::makeSplitConvConcatNestedInBranchNestedOut, {1, 4, 20, 20}), + inputShapeWrapper(ov::test::utils::make_cplit_conv_concat_nested_in_branch_nested_out, {1, 4, 20, 20}), "SplitConvConcatNestedInBranchNestedOut"}); res.push_back(nGraphFunctionWithName { - inputShapeWrapper(ngraph::builder::subgraph::makeConvBias, {1, 3, 24, 24}), + inputShapeWrapper(ov::test::utils::make_conv_bias, {1, 3, 24, 24}), "ConvBias"}); res.push_back(nGraphFunctionWithName{ - inputShapeWrapper(ngraph::builder::subgraph::makeMatMulBias, {1, 3, 24, 24}), + inputShapeWrapper(ov::test::utils::make_matmul_bias, {1, 3, 24, 24}), "MatMulBias" }); return res; } @@ -113,7 +121,7 @@ std::vector LoadNetworkCacheTestBase::getAnyTypeOnlyFunc std::vector LoadNetworkCacheTestBase::getFloatingPointOnlyFunctions() { std::vector res; res.push_back(nGraphFunctionWithName { [](ngraph::element::Type type, size_t batchSize) { - return ngraph::builder::subgraph::makeTIwithLSTMcell(type, batchSize); + return ov::test::utils::make_ti_with_lstm_cell(type, batchSize); }, "TIwithLSTMcell1"}); return res; } diff --git a/src/tests/functional/plugin/shared/src/behavior/plugin/hetero_synthetic.cpp b/src/tests/functional/plugin/shared/src/behavior/plugin/hetero_synthetic.cpp index bd165465edc894..5e257bee4903db 100644 --- a/src/tests/functional/plugin/shared/src/behavior/plugin/hetero_synthetic.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/plugin/hetero_synthetic.cpp @@ -10,15 +10,19 @@ #include "openvino/util/file_util.hpp" #include #include "ie_algorithm.hpp" +#include "common_test_utils/subgraph_builders/split_conv_concat.hpp" +#include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp" +#include "common_test_utils/subgraph_builders/nested_branch_conv_concat.hpp" +#include "common_test_utils/subgraph_builders/nested_split_conv_concat.hpp" namespace HeteroTests { static std::vector()>> builders = { - [] {return ngraph::builder::subgraph::makeSplitMultiConvConcat();}, - [] {return ngraph::builder::subgraph::makeNestedSplitConvConcat();}, - [] {return ngraph::builder::subgraph::makeSplitConvConcatNestedInBranch();}, - [] {return ngraph::builder::subgraph::makeSplitConvConcatNestedInBranchNestedOut();}, - [] {return ngraph::builder::subgraph::makeNestedBranchConvConcat();}, + [] {return ov::test::utils::make_split_multi_conv_concat();}, + [] {return ov::test::utils::make_nested_split_conv_concat();}, + [] {return ov::test::utils::make_cplit_conv_concat_nested_in_branch();}, + [] {return ov::test::utils::make_cplit_conv_concat_nested_in_branch_nested_out();}, + [] {return ov::test::utils::make_nested_branch_conv_concat();}, }; std::vector HeteroSyntheticTest::withMajorNodesFunctions( diff --git a/src/tests/functional/plugin/shared/src/behavior/plugin/life_time.cpp b/src/tests/functional/plugin/shared/src/behavior/plugin/life_time.cpp index af13b3bfe5107f..646ee9598e21d6 100644 --- a/src/tests/functional/plugin/shared/src/behavior/plugin/life_time.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/plugin/life_time.cpp @@ -4,7 +4,7 @@ #include -#include +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" #include #include "behavior/plugin/life_time.hpp" @@ -31,7 +31,7 @@ namespace BehaviorTestsDefinitions { std::tie(target_device, order) = this->GetParam(); APIBaseTest::SetUp(); SKIP_IF_CURRENT_TEST_IS_DISABLED(); - function = ngraph::builder::subgraph::makeConvPoolRelu(); + function = ov::test::utils::make_conv_pool_relu(); } void release_order_test(std::vector order, const std::string &target_device, @@ -109,7 +109,7 @@ namespace BehaviorTestsDefinitions { void HoldersTestOnImportedNetwork::SetUp() { target_device = this->GetParam(); APIBaseTest::SetUp(); - function = ngraph::builder::subgraph::makeConvPoolRelu(); + function = ov::test::utils::make_conv_pool_relu(); SKIP_IF_CURRENT_TEST_IS_DISABLED(); } diff --git a/src/tests/functional/plugin/shared/src/behavior/plugin/stress_tests.cpp b/src/tests/functional/plugin/shared/src/behavior/plugin/stress_tests.cpp index 94a017ae58ecdc..3be75d71952ec3 100644 --- a/src/tests/functional/plugin/shared/src/behavior/plugin/stress_tests.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/plugin/stress_tests.cpp @@ -4,6 +4,7 @@ #include "behavior/plugin/stress_tests.hpp" #include "ov_models/subgraph_builders.hpp" +#include "common_test_utils/subgraph_builders/split_conv_concat.hpp" namespace LayerTestsDefinitions { @@ -21,7 +22,7 @@ std::string MultipleAllocations::getTestCaseName(const testing::TestParamInfoGetParam(); - function = ngraph::builder::subgraph::makeSplitConvConcat(); + function = ov::test::utils::make_split_conv_concat(); } TEST_P(MultipleAllocations, InferWorksCorrectAfterAllocations) { diff --git a/src/tests/functional/plugin/shared/src/execution_graph_tests/keep_assign.cpp b/src/tests/functional/plugin/shared/src/execution_graph_tests/keep_assign.cpp index 049bacd5a14787..379ce5943b4b0b 100644 --- a/src/tests/functional/plugin/shared/src/execution_graph_tests/keep_assign.cpp +++ b/src/tests/functional/plugin/shared/src/execution_graph_tests/keep_assign.cpp @@ -30,15 +30,14 @@ TEST_P(ExecGraphKeepAssignNode, KeepAssignNode) { ngraph::element::Type type = ngraph::element::f32; using std::make_shared; - using namespace ngraph::opset5; // Some simple graph with Memory(Assign) node // in read // - auto input = make_shared(type, shape); // | \ / // - auto mem_i = make_shared(type, shape, 0); // | mul // - auto mem_r = make_shared(mem_i, "id"); // | / \ // - auto mul = make_shared(mem_r, input); // sum assign // - auto mem_w = make_shared(mul, "id"); // | // - auto sum = make_shared(mul, input); // out // + auto input = make_shared(type, shape); // | \ / // + auto mem_i = make_shared(type, shape, 0); // | mul // + auto mem_r = make_shared(mem_i, "id"); // | / \ // + auto mul = make_shared(mem_r, input); // sum assign // + auto mem_w = make_shared(mul, "id"); // | // + auto sum = make_shared(mul, input); // out // mem_w->add_control_dependency(mem_r); sum->add_control_dependency(mem_w); diff --git a/src/tests/functional/plugin/shared/src/execution_graph_tests/nms_transformation_for_last_node.cpp b/src/tests/functional/plugin/shared/src/execution_graph_tests/nms_transformation_for_last_node.cpp index 78052b1f7e8fe0..8f4e535455043e 100644 --- a/src/tests/functional/plugin/shared/src/execution_graph_tests/nms_transformation_for_last_node.cpp +++ b/src/tests/functional/plugin/shared/src/execution_graph_tests/nms_transformation_for_last_node.cpp @@ -44,8 +44,8 @@ TEST_P(ExecGraphNmsTransformLastNode, CheckIfCanBeInfered) { float in_boxes[8] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; float in_scores[8] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; - auto boxes = std::make_shared(element::f32, boxes_shape); - auto scores = std::make_shared(element::f32, scores_shape); + auto boxes = std::make_shared(element::f32, boxes_shape); + auto scores = std::make_shared(element::f32, scores_shape); auto max_output_boxes_per_class = opset5::Constant::create(element::i64, Shape{}, {10}); auto iou_threshold = opset5::Constant::create(element::f32, Shape{}, {0.75}); auto score_threshold = opset5::Constant::create(element::f32, Shape{}, {0.7}); @@ -53,7 +53,7 @@ TEST_P(ExecGraphNmsTransformLastNode, CheckIfCanBeInfered) { iou_threshold, score_threshold, opset5::NonMaxSuppression::BoxEncodingType::CORNER, true, element::i64); ngraph::ResultVector results { - std::make_shared(nms->output(0)), + std::make_shared(nms->output(0)), }; auto f = std::make_shared(results, ParameterVector{boxes, scores}, "NMS"); diff --git a/src/tests/functional/plugin/shared/src/execution_graph_tests/num_inputs_fusing_bin_conv.cpp b/src/tests/functional/plugin/shared/src/execution_graph_tests/num_inputs_fusing_bin_conv.cpp index e8e35d3c872dfa..474d6b7869954c 100644 --- a/src/tests/functional/plugin/shared/src/execution_graph_tests/num_inputs_fusing_bin_conv.cpp +++ b/src/tests/functional/plugin/shared/src/execution_graph_tests/num_inputs_fusing_bin_conv.cpp @@ -28,7 +28,7 @@ void ExecGraphInputsFusingBinConv::SetUp() { const size_t numOutChannels = 16, numGroups = 16; const std::vector strides = {1, 1}, dilations = {1, 1}; const std::vector padsBegin = {1, 1}, padsEnd = {0, 0}; - const ngraph::op::PadType paddingType = ngraph::op::PadType::EXPLICIT; + const ov::op::PadType paddingType = ov::op::PadType::EXPLICIT; const float padValue = 1.0; targetDevice = this->GetParam(); @@ -38,9 +38,9 @@ void ExecGraphInputsFusingBinConv::SetUp() { auto conv = ngraph::builder::makeGroupConvolution(binConv, ngraph::element::f32, convKernelSize, strides, padsBegin, padsEnd, dilations, paddingType, numOutChannels, numGroups); - auto biasNode = std::make_shared(ngraph::element::f32, std::vector{16, 1, 1}); - auto add = std::make_shared(conv, biasNode); - ngraph::ResultVector results{std::make_shared(add)}; + auto biasNode = std::make_shared(ngraph::element::f32, std::vector{16, 1, 1}); + auto add = std::make_shared(conv, biasNode); + ngraph::ResultVector results{std::make_shared(add)}; fnPtr = std::make_shared(results, params, "BinConvFuseConv"); } diff --git a/src/tests/functional/plugin/shared/src/execution_graph_tests/remove_parameter.cpp b/src/tests/functional/plugin/shared/src/execution_graph_tests/remove_parameter.cpp index dd29dd66c55b27..66ca2aa3243eb3 100644 --- a/src/tests/functional/plugin/shared/src/execution_graph_tests/remove_parameter.cpp +++ b/src/tests/functional/plugin/shared/src/execution_graph_tests/remove_parameter.cpp @@ -42,8 +42,8 @@ TEST_P(ExecGraphRemoveParameterNode, RemoveParameterNode) { // out // auto input = make_shared(type, shape); auto input2 = make_shared(type, shape); - auto mul = make_shared(input2, input); - auto sum = make_shared(mul, input); + auto mul = make_shared(input2, input); + auto sum = make_shared(mul, input); auto function = std::make_shared( ngraph::NodeVector{sum}, ngraph::ParameterVector{input2, input}, diff --git a/src/tests/functional/plugin/shared/src/execution_graph_tests/runtime_precision.cpp b/src/tests/functional/plugin/shared/src/execution_graph_tests/runtime_precision.cpp index 97071f12363855..16fcd73411f726 100644 --- a/src/tests/functional/plugin/shared/src/execution_graph_tests/runtime_precision.cpp +++ b/src/tests/functional/plugin/shared/src/execution_graph_tests/runtime_precision.cpp @@ -47,10 +47,10 @@ std::shared_ptr makeFakeQuantizeReluFunction(const std::vector auto inputHighNode = ngraph::builder::makeConstant(ngraph::element::f32, {1, 1, 1, 1}, {255}); auto outputLowNode = ngraph::builder::makeConstant(ngraph::element::f32, {1, 1, 1, 1}, {0}); auto outputHighNode = ngraph::builder::makeConstant(ngraph::element::f32, {1, 1, 1, 1}, {255}); - auto fakeQuantize = std::make_shared(inputs[0], inputLowNode, inputHighNode, outputLowNode, outputHighNode, 256); + auto fakeQuantize = std::make_shared(inputs[0], inputLowNode, inputHighNode, outputLowNode, outputHighNode, 256); fakeQuantize->set_friendly_name("FakeQuantize"); - auto relu = std::make_shared(fakeQuantize); + auto relu = std::make_shared(fakeQuantize); relu->set_friendly_name("Relu"); auto function = std::make_shared(relu, inputs, "FakeQuantizeRelu"); @@ -66,10 +66,10 @@ std::shared_ptr makeFakeQuantizeBinaryConvolutionFunction(cons auto inputHighNode = ngraph::builder::makeConstant(ngraph::element::f32, {1, 1, 1, 1}, {1}); auto outputLowNode = ngraph::builder::makeConstant(ngraph::element::f32, {1, 1, 1, 1}, {0}); auto outputHighNode = ngraph::builder::makeConstant(ngraph::element::f32, {1, 1, 1, 1}, {1}); - auto fakeQuantize = std::make_shared(inputs[0], inputLowNode, inputHighNode, outputLowNode, outputHighNode, 2); + auto fakeQuantize = std::make_shared(inputs[0], inputLowNode, inputHighNode, outputLowNode, outputHighNode, 2); fakeQuantize->set_friendly_name("FakeQuantize"); - auto binConv = ngraph::builder::makeBinaryConvolution(fakeQuantize, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, ngraph::op::PadType::EXPLICIT, 32, 0); + auto binConv = ngraph::builder::makeBinaryConvolution(fakeQuantize, {3, 3}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, ov::op::PadType::EXPLICIT, 32, 0); binConv->set_friendly_name("BinaryConvolution"); auto function = std::make_shared(binConv, inputs, "FakeQuantizeBinaryConvolution"); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp index d7336b247a3026..84a35de492b7f6 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/depth_to_space_transformation.cpp @@ -28,20 +28,18 @@ #include "ov_lpt_models/depth_to_space.hpp" -using namespace ngraph::opset1; - namespace LayerTestsDefinitions { std::string DepthToSpaceTransformation::getTestCaseName(const testing::TestParamInfo& obj) { - static std::map names = { - {DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, "BLOCKS_FIRST"}, - {DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, "DEPTH_FIRST"}, + static std::map names = { + {ov::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, "BLOCKS_FIRST"}, + {ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, "DEPTH_FIRST"}, }; ngraph::element::Type precision; ngraph::PartialShape inputShape; std::string targetDevice; - DepthToSpace::DepthToSpaceMode mode; + ov::op::v0::DepthToSpace::DepthToSpaceMode mode; size_t blockSize; auto params = LayerTestsUtils::LayerTransformationParamsNGraphFactory::createParamsU8I8(); std::tie(precision, inputShape, targetDevice, mode, blockSize) = obj.param; @@ -55,7 +53,7 @@ std::string DepthToSpaceTransformation::getTestCaseName(const testing::TestParam void DepthToSpaceTransformation::SetUp() { ngraph::element::Type precision; ngraph::PartialShape inputShape; - DepthToSpace::DepthToSpaceMode mode; + ov::op::v0::DepthToSpace::DepthToSpaceMode mode; size_t blockSize; std::tie(precision, inputShape, targetDevice, mode, blockSize) = this->GetParam(); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/interpolate_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/interpolate_transformation.cpp index 56012b6d91289f..c3ca18b5bf485c 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/interpolate_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/interpolate_transformation.cpp @@ -55,7 +55,7 @@ void InterpolateTransformation::SetUp() { interpAttributes attributes; std::tie(precision, shapes, targetDevice, attributes) = this->GetParam(); - ngraph::op::InterpolateAttrs interpAttrs; + ov::op::v0::Interpolate::Attributes interpAttrs; interpAttrs.axes = attributes.axes; interpAttrs.mode = attributes.mode; interpAttrs.align_corners = attributes.align_corners; diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp index f91f934de24c99..9339d4c7fab388 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat.cpp @@ -70,7 +70,7 @@ void OutputLayersConcat::SetUp() { auto ngPrecision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - const auto input1 = std::make_shared(ngPrecision, ngraph::Shape(inputShape1)); + const auto input1 = std::make_shared(ngPrecision, ngraph::Shape(inputShape1)); input1->set_friendly_name("input1"); const auto fakeQuantize1 = ngraph::builder::makeFakeQuantize( @@ -80,7 +80,7 @@ void OutputLayersConcat::SetUp() { ASSERT_EQ(4ul, inputShape1.size()) << "unexpected input layout"; const InferenceEngine::SizeVector inputShape2 = { inputShape1[0], inputShape1[1] * 2ul, inputShape1[2], inputShape1[3] }; - const auto input2 = std::make_shared(ngPrecision, ngraph::Shape(inputShape2)); + const auto input2 = std::make_shared(ngPrecision, ngraph::Shape(inputShape2)); input2->set_friendly_name("input2"); const auto fakeQuantize2 = ngraph::builder::makeFakeQuantize( @@ -88,12 +88,12 @@ void OutputLayersConcat::SetUp() { { 0.f }, { 255.f / 2.f }, { 0.f }, { 255.f / 2.f }); fakeQuantize2->set_friendly_name("fakeQuantize2"); - const std::shared_ptr concat = std::make_shared( + const std::shared_ptr concat = std::make_shared( ngraph::OutputVector{ fakeQuantize1->output(0), fakeQuantize2->output(0)}, 1); concat->set_friendly_name("concat"); const float k = 1.f; - const auto weights = ngraph::opset1::Constant::create( + const auto weights = ov::op::v0::Constant::create( ngPrecision, ngraph::Shape{ inputShape1[1ul] + inputShape2[1ul], inputShape1[1ul] + inputShape2[1ul], 1ul, 1ul }, std::vector((inputShape1[1ul] + inputShape2[1ul]) * (inputShape1[1ul] + inputShape2[1ul]), 1ul)); @@ -103,7 +103,7 @@ void OutputLayersConcat::SetUp() { { -128.f / k }, { 127.f / k }, { -128.f / k }, { 127.f / k }); fakeQuantizeOnWeights->set_friendly_name("fakeQuantizeOnWeights"); - const std::shared_ptr convolution = std::make_shared( + const std::shared_ptr convolution = std::make_shared( concat->output(0), fakeQuantizeOnWeights, ngraph::Strides{ 1ul, 1ul }, @@ -113,9 +113,9 @@ void OutputLayersConcat::SetUp() { convolution->set_friendly_name("convolution"); ngraph::ResultVector results { - std::make_shared(concat), - std::make_shared(convolution), - std::make_shared(fakeQuantize2) + std::make_shared(concat), + std::make_shared(convolution), + std::make_shared(fakeQuantize2) }; function = std::make_shared(results, ngraph::ParameterVector { input1, input2 }, "OutputLayersHandling"); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp index bb82dbdaf77459..eb095e6ab5ecd1 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_concat_multi_channel.cpp @@ -82,7 +82,7 @@ void OutputLayersConcatMultiChannel::SetUp() { auto ngPrecision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - const auto input1 = std::make_shared(ngPrecision, ngraph::Shape(inputShape1)); + const auto input1 = std::make_shared(ngPrecision, ngraph::Shape(inputShape1)); input1->set_friendly_name("input1"); const auto fakeQuantize1 = ngraph::builder::makeFakeQuantize(input1->output(0), ngPrecision, 256ul, { 1ul }); @@ -90,24 +90,24 @@ void OutputLayersConcatMultiChannel::SetUp() { ASSERT_EQ(4ul, inputShape1.size()) << "unexpected input layout"; const InferenceEngine::SizeVector inputShape2 = { inputShape1[0], inputShape1[1] * 2ul, inputShape1[2], inputShape1[3] }; - const auto input2 = std::make_shared(ngPrecision, ngraph::Shape(inputShape2)); + const auto input2 = std::make_shared(ngPrecision, ngraph::Shape(inputShape2)); input2->set_friendly_name("input2"); const auto fakeQuantize2 = ngraph::builder::makeFakeQuantize(input2->output(0), ngPrecision, 256ul, { 1ul }); fakeQuantize2->set_friendly_name("fakeQuantize2"); - const std::shared_ptr concat = std::make_shared( + const std::shared_ptr concat = std::make_shared( ngraph::OutputVector{ fakeQuantize1->output(0), fakeQuantize2->output(0)}, 1); concat->set_friendly_name("concat"); - auto const1 = ngraph::opset1::Constant::create(ngPrecision, ngraph::Shape{ 1, 1, 1, 1 }, { 1 }); - std::shared_ptr convolution = std::make_shared(concat, const1); + auto const1 = ov::op::v0::Constant::create(ngPrecision, ngraph::Shape{ 1, 1, 1, 1 }, { 1 }); + std::shared_ptr convolution = std::make_shared(concat, const1); convolution->set_friendly_name("convolution"); ngraph::ResultVector results { - std::make_shared(concat), - std::make_shared(convolution), - std::make_shared(fakeQuantize2) + std::make_shared(concat), + std::make_shared(convolution), + std::make_shared(fakeQuantize2) }; function = std::make_shared(results, ngraph::ParameterVector { input1, input2 }, "OutputLayersHandling"); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp index 67ca66f65a63c3..ac78e3246fb7e5 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp @@ -53,7 +53,7 @@ void OutputLayers::SetUp() { std::tie(netPrecision, inputShape, targetDevice, params) = this->GetParam(); auto ngPrecision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - const auto input = std::make_shared(ngPrecision, ngraph::Shape(inputShape)); + const auto input = std::make_shared(ngPrecision, ngraph::Shape(inputShape)); input->set_friendly_name("input"); const float k = 1.f; @@ -62,7 +62,7 @@ void OutputLayers::SetUp() { { 0.f }, { 255.f / k }, { 0.f }, { 255.f / k }); fakeQuantizeOnActivations->set_friendly_name("fakeQuantizeOnActivations"); - const auto weights = ngraph::opset1::Constant::create( + const auto weights = ov::op::v0::Constant::create( ngPrecision, ngraph::Shape{ inputShape[1ul], inputShape[1ul], 1ul, 1ul }, std::vector(inputShape[1ul] * inputShape[1ul], 1ul)); @@ -72,7 +72,7 @@ void OutputLayers::SetUp() { { -128.f / k }, { 127.f / k }, { -128.f / k }, { 127.f / k }); fakeQuantizeOnWeights->set_friendly_name("fakeQuantizeOnWeights"); - std::shared_ptr convolution = std::make_shared( + std::shared_ptr convolution = std::make_shared( fakeQuantizeOnActivations, fakeQuantizeOnWeights, ngraph::Strides{ 1ul, 1ul }, @@ -82,8 +82,8 @@ void OutputLayers::SetUp() { convolution->set_friendly_name("convolution"); ngraph::ResultVector results { - std::make_shared(convolution), - std::make_shared(fakeQuantizeOnActivations) + std::make_shared(convolution), + std::make_shared(fakeQuantizeOnActivations) }; function = std::make_shared(results, ngraph::ParameterVector { input }, "OutputLayersHandling"); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/pad_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/pad_transformation.cpp index 57a5dde6799bee..da6e3ba2af3666 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/pad_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/pad_transformation.cpp @@ -15,7 +15,7 @@ namespace LayerTestsDefinitions { std::string PadTransformation::getTestCaseName(const testing::TestParamInfo& obj) { ngraph::element::Type netPrecision; ngraph::PartialShape inputShape; - ngraph::op::PadMode padMode; + ov::op::PadMode padMode; std::string targetDevice; ov::pass::low_precision::LayerTransformation::Params params; PadTransformationParam param; @@ -25,14 +25,14 @@ std::string PadTransformation::getTestCaseName(const testing::TestParamInfoGetParam(); diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp index 25c7ec5bee2de7..9980d12ac8199b 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_max_transformation.cpp @@ -41,7 +41,7 @@ void ReduceMaxTransformation::SetUp() { ngraph::builder::subgraph::DequantizationOperations dequantizationBefore; ngraph::builder::subgraph::DequantizationOperations dequantizationAfter; - function = ngraph::builder::subgraph::ReduceFunction::get( + function = ngraph::builder::subgraph::ReduceFunction::get( netPrecision, inputShape, param.fakeQuantize, diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp index b5139f40821269..35f54b7cc8a128 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_mean_transformation.cpp @@ -49,7 +49,7 @@ void ReduceMeanTransformation::SetUp() { ReduceMeanTransformationParam param; std::tie(netPrecision, inputShape, targetDevice, params, param) = GetParam(); - function = ngraph::builder::subgraph::ReduceFunction::get( + function = ngraph::builder::subgraph::ReduceFunction::get( netPrecision, inputShape, param.fakeQuantize, diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp index 4e4448dac1e1cf..a1b3b9cb24fb28 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_min_transformation.cpp @@ -41,7 +41,7 @@ void ReduceMinTransformation::SetUp() { ngraph::builder::subgraph::DequantizationOperations dequantizationBefore; ngraph::builder::subgraph::DequantizationOperations dequantizationAfter; - function = ngraph::builder::subgraph::ReduceFunction::get( + function = ngraph::builder::subgraph::ReduceFunction::get( netPrecision, inputShape, param.fakeQuantize, diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp index f3536cdb67f5af..14d9e3cc97f0a6 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/reduce_sum_transformation.cpp @@ -41,7 +41,7 @@ void ReduceSumTransformation::SetUp() { ngraph::builder::subgraph::DequantizationOperations dequantizationBefore; ngraph::builder::subgraph::DequantizationOperations dequantizationAfter; - function = ngraph::builder::subgraph::ReduceFunction::get( + function = ngraph::builder::subgraph::ReduceFunction::get( netPrecision, inputShape, param.fakeQuantize, diff --git a/src/tests/functional/plugin/shared/src/snippets/codegen_bert.cpp b/src/tests/functional/plugin/shared/src/snippets/codegen_bert.cpp index fa0d79e691fe63..418d184ecd2132 100644 --- a/src/tests/functional/plugin/shared/src/snippets/codegen_bert.cpp +++ b/src/tests/functional/plugin/shared/src/snippets/codegen_bert.cpp @@ -45,25 +45,25 @@ namespace snippets { std::tie(netPrecision, inputShape0, inputShape1, targetDevice) = this->GetParam(); auto shape = ngraph::Shape{inputShape0}; - auto input1 = std::make_shared(netPrecision, shape); - auto input2 = std::make_shared(netPrecision, shape); + auto input1 = std::make_shared(netPrecision, shape); + auto input2 = std::make_shared(netPrecision, shape); auto shapeMM = ngraph::Shape{inputShape1}; - auto input3 = std::make_shared(netPrecision, shapeMM); + auto input3 = std::make_shared(netPrecision, shapeMM); - auto add = std::make_shared(input1, input2); - auto mm = std::make_shared(add, input3); + auto add = std::make_shared(input1, input2); + auto mm = std::make_shared(add, input3); std::vector vals(ngraph::shape_size(shape)); for (int i = 0; i < vals.size(); i++) { vals[i] = static_cast(i)*vals.size(); } - auto c0 = std::make_shared(netPrecision, shape); - auto add2 = std::make_shared(mm, c0); + auto c0 = std::make_shared(netPrecision, shape); + auto add2 = std::make_shared(mm, c0); - auto add3 = std::make_shared(add, add2); - auto result = std::make_shared(add3); + auto add3 = std::make_shared(add, add2); + auto result = std::make_shared(add3); function = std::make_shared( ngraph::ResultVector{result}, diff --git a/src/tests/functional/plugin/shared/src/snippets/convert.cpp b/src/tests/functional/plugin/shared/src/snippets/convert.cpp index af37875d50f345..0112c18c16245e 100644 --- a/src/tests/functional/plugin/shared/src/snippets/convert.cpp +++ b/src/tests/functional/plugin/shared/src/snippets/convert.cpp @@ -86,8 +86,11 @@ void Convert::generate_inputs(const std::vector& targetInputStaticSha ov::Tensor tensor; int32_t startFrom, range, resolution; std::tie(startFrom, range, resolution) = params[i]; - tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], - range, startFrom, resolution); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = startFrom; + in_data.range = range; + in_data.resolution = resolution; + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], in_data); inputs.insert({funcInput.get_node_shared_ptr(), tensor}); } } diff --git a/src/tests/functional/plugin/shared/src/snippets/fake_quantize_decomposition_test.cpp b/src/tests/functional/plugin/shared/src/snippets/fake_quantize_decomposition_test.cpp index e5b3c55391ed44..8d5d6c51bf69f6 100644 --- a/src/tests/functional/plugin/shared/src/snippets/fake_quantize_decomposition_test.cpp +++ b/src/tests/functional/plugin/shared/src/snippets/fake_quantize_decomposition_test.cpp @@ -25,7 +25,7 @@ std::string FakeQuantizeDecompositionTest::getTestCaseName(testing::TestParamInf const auto targetDevice = std::get<3>(obj.param); const auto type_info = operation.first->get_type_info(); - const auto operationString = ngraph::is_type(operation.first) ? + const auto operationString = ngraph::is_type(operation.first) ? "nullptr" : (std::string(type_info.name) + "_" + std::string(type_info.version_id)); @@ -55,7 +55,7 @@ void FakeQuantizeDecompositionTest::SetUp() { init_input_shapes({{values.inputShape, {values.inputShape}}}); - std::shared_ptr op = ngraph::is_type(operation.first) ? nullptr : operation.first; + std::shared_ptr op = ngraph::is_type(operation.first) ? nullptr : operation.first; function = ov::test::snippets::FakeQuantizeFunction::getOperationAndFakeQuantize( {values.inputShape}, values.inputType, diff --git a/src/tests/functional/plugin/shared/src/snippets/mha.cpp b/src/tests/functional/plugin/shared/src/snippets/mha.cpp index f98241202da98a..6603a1d8183e6e 100644 --- a/src/tests/functional/plugin/shared/src/snippets/mha.cpp +++ b/src/tests/functional/plugin/shared/src/snippets/mha.cpp @@ -81,9 +81,12 @@ void MHA::generate_inputs(const std::vector& targetInputStaticSha for (int i = 0; i < model_inputs.size(); ++i) { const auto& model_input = model_inputs[i]; ov::Tensor tensor; + ov::test::utils::InputGenerateData in_data; // To avoid big relative errors in the vicinity of zero, only positive values are generated for bf16 precision - int start_from = model_input.get_element_type() == ov::element::bf16 ? 0 : -1; - tensor = ov::test::utils::create_and_fill_tensor(model_input.get_element_type(), model_input.get_shape(), 2, start_from, 256); + in_data.start_from = model_input.get_element_type() == ov::element::bf16 ? 0 : -1; + in_data.range = 2; + in_data.resolution = 256; + tensor = ov::test::utils::create_and_fill_tensor(model_input.get_element_type(), model_input.get_shape(), in_data); inputs.insert({model_input.get_node_shared_ptr(), tensor}); } } @@ -101,10 +104,19 @@ void MHASelect::generate_inputs(const std::vector& targetInputSta ov::Tensor tensor; int seed = 0; if (name.find("less") != std::string::npos) { - tensor = ov::test::utils::create_and_fill_tensor(model_input.get_element_type(), model_input.get_shape(), 5 + seed, -2, 10, seed); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -2; + in_data.range = 5 + seed; + in_data.resolution = 10; + in_data.seed = seed; + tensor = ov::test::utils::create_and_fill_tensor(model_input.get_element_type(), model_input.get_shape(), in_data); seed++; } else { - tensor = ov::test::utils::create_and_fill_tensor(model_input.get_element_type(), model_input.get_shape(), 2, -1, 256); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -1; + in_data.range = 2; + in_data.resolution = 256; + tensor = ov::test::utils::create_and_fill_tensor(model_input.get_element_type(), model_input.get_shape(), in_data); } inputs.insert({node_input, tensor}); } diff --git a/src/tests/functional/plugin/shared/src/snippets/select.cpp b/src/tests/functional/plugin/shared/src/snippets/select.cpp index 37911036a85bd9..ea5ab8e1f0a30a 100644 --- a/src/tests/functional/plugin/shared/src/snippets/select.cpp +++ b/src/tests/functional/plugin/shared/src/snippets/select.cpp @@ -16,9 +16,21 @@ namespace { void generate_data(std::map, ov::Tensor>& data_inputs, const std::vector>& model_inputs, const std::vector& targetInputStaticShapes) { data_inputs.clear(); - auto tensor_bool = ov::test::utils::create_and_fill_tensor(model_inputs[0].get_element_type(), targetInputStaticShapes[0], 3, -1, 2); - auto tensor0 = ov::test::utils::create_and_fill_tensor(model_inputs[1].get_element_type(), targetInputStaticShapes[1], 10, -10, 2); - auto tensor1 = ov::test::utils::create_and_fill_tensor(model_inputs[2].get_element_type(), targetInputStaticShapes[2], 10, 0, 2); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -1; + in_data.range = 3; + in_data.resolution = 2; + auto tensor_bool = ov::test::utils::create_and_fill_tensor(model_inputs[0].get_element_type(), targetInputStaticShapes[0], in_data); + + in_data.start_from = -10; + in_data.range = 10; + in_data.resolution = 2; + auto tensor0 = ov::test::utils::create_and_fill_tensor(model_inputs[1].get_element_type(), targetInputStaticShapes[1], in_data); + + in_data.start_from = 0; + in_data.range = 10; + in_data.resolution = 2; + auto tensor1 = ov::test::utils::create_and_fill_tensor(model_inputs[2].get_element_type(), targetInputStaticShapes[2], in_data); data_inputs.insert({model_inputs[0].get_node_shared_ptr(), tensor_bool}); data_inputs.insert({model_inputs[1].get_node_shared_ptr(), tensor0}); data_inputs.insert({model_inputs[2].get_node_shared_ptr(), tensor1}); diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/generate_inputs.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/generate_inputs.hpp index 0ea5ec20e78972..980483e2317689 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/generate_inputs.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/generate_inputs.hpp @@ -13,6 +13,9 @@ namespace ov { namespace test { namespace utils { +void set_const_ranges(double _min, double _max); +void reset_const_ranges(); + using InputsMap = std::map& node, size_t port, diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp index e134bd7f018fd9..bdcdd94fd27dd8 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp @@ -7,22 +7,25 @@ #include #include -#include "ngraph/node.hpp" -#include "ngraph/op/proposal.hpp" -#include "ngraph/op/power.hpp" -#include "ngraph/op/mod.hpp" -#include "ngraph/op/floor_mod.hpp" -#include "ngraph/op/divide.hpp" -#include "ngraph/op/erf.hpp" -#include "ngraph/op/non_max_suppression.hpp" -#include "ngraph/op/reduce_l1.hpp" -#include "ngraph/op/reduce_l2.hpp" -#include "ngraph/op/reduce_sum.hpp" -#include "ngraph/op/reduce_prod.hpp" -#include "ngraph/op/reduce_mean.hpp" -#include "ngraph/op/max.hpp" -#include "ngraph/op/min.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "openvino/core/node.hpp" +#include "openvino/op/proposal.hpp" +#include "openvino/op/power.hpp" +#include "openvino/op/mod.hpp" +#include "openvino/op/floor_mod.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/erf.hpp" +#include "openvino/op/non_max_suppression.hpp" +#include "openvino/op/reduce_l1.hpp" +#include "openvino/op/reduce_l2.hpp" +#include "openvino/op/reduce_sum.hpp" +#include "openvino/op/reduce_prod.hpp" +#include "openvino/op/reduce_mean.hpp" +#include "openvino/op/maximum.hpp" +#include "openvino/op/minimum.hpp" +#include "openvino/op/reduce_max.hpp" +#include "openvino/op/reduce_min.hpp" #include "openvino/op/dft.hpp" #include "openvino/op/idft.hpp" #include "openvino/op/logical_and.hpp" @@ -72,45 +75,7 @@ namespace ov { namespace test { namespace utils { -// todo: remove w/a to generate correct constant data (replace parameter to const) in conformance with defined range -struct ConstRanges { - static double max, min; - static bool is_defined; - - static void set(double _min, double _max) { - min = _min; - max = _max; - is_defined = true; - } - - static void reset() { - min = std::numeric_limits::max(); - max = std::numeric_limits::min(); - is_defined = false; - } -}; - -struct InputGenerateData { - double_t start_from; - uint32_t range; - int32_t resolution; - int seed; - - InputGenerateData(double_t _start_from = 0, uint32_t _range = 10, int32_t _resolution = 1, int _seed = 1) - : start_from(_start_from), range(_range), resolution(_resolution), seed(_seed) { - if (ConstRanges::is_defined) { - auto min_orig = start_from; - auto max_orig = start_from + range * resolution; - auto min_ref = ConstRanges::min; - auto max_ref = ConstRanges::max; - if (min_orig < min_ref || min_orig == 0) - start_from = min_ref; - range = (max_orig > max_ref || max_orig == 10 ? max_ref : max_orig - start_from) - start_from; - } - } -}; - -static std::map>> inputRanges = { +static std::map>> inputRanges = { // NodeTypeInfo: {IntRanges{}, RealRanges{}} (Ranges are used by generate) { ov::op::v0::Erf::get_type_info_static(), {{{-3, 6}}, {{-3, 6, 10}}} }, { ov::op::v1::Divide::get_type_info_static(), {{{101, 100}}, {{2, 2, 128}}} }, diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/binary_convolution.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/binary_convolution.hpp index 48562853f67f73..b57cee5745ba25 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/binary_convolution.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/binary_convolution.hpp @@ -22,7 +22,7 @@ using binConvSpecificParams = std::tuple< std::vector, // Pads end InferenceEngine::SizeVector, // Dilations size_t, // Num Output channels - ngraph::op::PadType, // Padding type + ov::op::PadType, // Padding type float>; // Padding value using binaryConvolutionTestParamsSet = std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/broadcast.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/broadcast.hpp index 9b0e293a8d52a7..38cf1bbe94ed2e 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/broadcast.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/broadcast.hpp @@ -16,7 +16,7 @@ namespace LayerTestsDefinitions { using BroadcastParamsTuple = typename std::tuple< InferenceEngine::SizeVector, // target shape ngraph::AxisSet, // axes mapping - ngraph::op::BroadcastType, // broadcast mode + ov::op::BroadcastType, // broadcast mode InferenceEngine::SizeVector, // Input shape InferenceEngine::Precision, // Network precision std::string>; // Device name diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution.hpp index 6cdc9b5e323988..95b0a68c5914b3 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution.hpp @@ -23,7 +23,7 @@ typedef std::tuple< std::vector, // Pad end InferenceEngine::SizeVector, // Dilation size_t, // Num out channels - ngraph::op::PadType // Padding type + ov::op::PadType // Padding type > convSpecificParams; typedef std::tuple< convSpecificParams, diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop.hpp index ef896760a42fe8..61503f7797b7b9 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop.hpp @@ -22,7 +22,7 @@ typedef std::tuple< std::vector, // Pad end InferenceEngine::SizeVector, // Dilation size_t, // Num out channels - ngraph::op::PadType, // Padding type + ov::op::PadType, // Padding type std::vector // Output padding > convBackpropSpecificParams; typedef std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop_data.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop_data.hpp index 4ce4dc1decb687..5b28cca7187b98 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop_data.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/convolution_backprop_data.hpp @@ -24,7 +24,7 @@ typedef std::tuple< std::vector, // Pad end InferenceEngine::SizeVector, // Dilation size_t, // Num out channels - ngraph::op::PadType, // Padding type + ov::op::PadType, // Padding type std::vector // Output padding > convBackpropDataSpecificParams; typedef std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_convolution.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_convolution.hpp index ad164b7e6d07ba..c93764b6dde36c 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_convolution.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/deformable_convolution.hpp @@ -25,7 +25,7 @@ typedef std::tuple< size_t, // Groups size_t, // Deformable groups size_t, // Num out channels - ngraph::op::PadType, // Padding type + ov::op::PadType, // Padding type bool, // Bilinear interpolation pad bool // Modulation > deformableConvSpecificParams; diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/depth_to_space.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/depth_to_space.hpp index 486a0ae7684ce5..26d972933f8851 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/depth_to_space.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/depth_to_space.hpp @@ -19,7 +19,7 @@ namespace LayerTestsDefinitions { using depthToSpaceParamsTuple = typename std::tuple< std::vector, // Input shape InferenceEngine::Precision, // Input precision - ngraph::opset3::DepthToSpace::DepthToSpaceMode, // Mode + ov::op::v0::DepthToSpace::DepthToSpaceMode, // Mode std::size_t, // Block size std::string>; // Device name> diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp index af1f75c09b027c..e5d8a854ffa087 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/detection_output.hpp @@ -14,7 +14,7 @@ namespace LayerTestsDefinitions { -std::ostream& operator <<(std::ostream& os, const ngraph::op::DetectionOutputAttrs& inputShape); +std::ostream& operator <<(std::ostream& os, const ov::op::v0::DetectionOutput::Attributes& inputShape); enum { idxLocation, @@ -62,7 +62,7 @@ using DetectionOutputParams = std::tuple< class DetectionOutputLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); - ngraph::op::DetectionOutputAttrs attrs; + ov::op::v0::DetectionOutput::Attributes attrs; std::vector inShapes; void GenerateInputs() override; void Compare(const std::vector>> &expectedOutputs, diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/extract_image_patches.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/extract_image_patches.hpp index 4d1502cb7366ea..8240652b6182fb 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/extract_image_patches.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/extract_image_patches.hpp @@ -18,7 +18,7 @@ using extractImagePatchesTuple = typename std::tuple< std::vector, // kernel size std::vector, // strides std::vector, // rates - ngraph::op::PadType, // pad type + ov::op::PadType, // pad type InferenceEngine::Precision, // Network precision InferenceEngine::Precision, // Input precision InferenceEngine::Precision, // Output precision diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/fake_quantize.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/fake_quantize.hpp index 241b9492e38868..66e5d3b0ef485f 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/fake_quantize.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/fake_quantize.hpp @@ -32,7 +32,7 @@ typedef std::tuple< std::vector, // fake quantize inputs shape std::vector, // fake quantize (inputLow, inputHigh, outputLow, outputHigh) or empty for random std::vector, // input generator data (low, high, resolution) or empty for default - ngraph::op::AutoBroadcastSpec // fake quantize broadcast mode + ov::op::AutoBroadcastSpec // fake quantize broadcast mode > fqSpecificParams; typedef std::tuple< fqSpecificParams, diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grid_sample.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grid_sample.hpp index 24b54c1a3b1dd9..19e06b49d5f443 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grid_sample.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/grid_sample.hpp @@ -13,9 +13,9 @@ namespace LayerTestsDefinitions { using GridSampleParams = std::tuple; // Device name diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution.hpp index add3010c44bac6..de90196085c575 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution.hpp @@ -22,7 +22,7 @@ typedef std::tuple< InferenceEngine::SizeVector, size_t, size_t, - ngraph::op::PadType> groupConvSpecificParams; + ov::op::PadType> groupConvSpecificParams; typedef std::tuple< groupConvSpecificParams, InferenceEngine::Precision, diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution_backprop_data.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution_backprop_data.hpp index bb694c120e8e87..03017430c08572 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution_backprop_data.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_convolution_backprop_data.hpp @@ -24,7 +24,7 @@ using groupConvBackpropDataSpecificParams = std::tuple< InferenceEngine::SizeVector, // dilations size_t, // num output channels size_t, // num groups - ngraph::op::PadType>; // padding type + ov::op::PadType>; // padding type using groupConvBackpropDataLayerTestParamsSet = std::tuple< groupConvBackpropDataSpecificParams, InferenceEngine::Precision, // Network precision @@ -52,7 +52,7 @@ using groupConvBackpropSpecificParams = std::tuple< InferenceEngine::SizeVector, // dilations size_t, // num output channels size_t, // num groups - ngraph::op::PadType, // padding type + ov::op::PadType, // padding type std::vector>; // output padding using groupConvBackpropLayerTestParamsSet = std::tuple< groupConvBackpropSpecificParams, diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_normalization.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_normalization.hpp index 27873d36c80098..28fe5588ae9b24 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_normalization.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/group_normalization.hpp @@ -68,7 +68,7 @@ class GroupNormalizationTest : public testing::WithParamInterface(groupNormalization)}; + const ngraph::ResultVector results{std::make_shared(groupNormalization)}; // TODO: This workaround is needed as there is no full support for f16 type in the reference implementation if (ngPrc == element::Type_t::f16) { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_sequence.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_sequence.hpp index 225b5d6c2f616a..8a3d91f3969fc8 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_sequence.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/gru_sequence.hpp @@ -26,7 +26,7 @@ using GRUSequenceParams = typename std::tuple< std::vector, // activations float, // clip bool, // linear_before_reset - ngraph::op::RecurrentSequenceDirection, // direction + ov::op::RecurrentSequenceDirection, // direction ngraph::helpers::InputLayerType, // WRB input type (Constant or Parameter) InferenceEngine::Precision, // Network precision std::string>; // Device name diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_sequence.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_sequence.hpp index 0f520392f88efd..e22e30bac0b20b 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_sequence.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/lstm_sequence.hpp @@ -23,7 +23,7 @@ using LSTMSequenceParams = typename std::tuple< size_t, // input size std::vector, // activations float, // clip - ngraph::op::RecurrentSequenceDirection, // direction + ov::op::RecurrentSequenceDirection, // direction ngraph::helpers::InputLayerType, // WRB input type (Constant or Parameter) InferenceEngine::Precision, // Network precision std::string>; // Device name diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/matrix_nms.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/matrix_nms.hpp index 586d39dfa3c1e7..d051d1ad37e1e2 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/matrix_nms.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/matrix_nms.hpp @@ -29,13 +29,13 @@ using ThresholdParams = std::tuple, // Params using to create 1st and 2nd inputs InputPrecisions, // Input precisions - ngraph::op::v8::MatrixNms::SortResultType, // Order of output elements + ov::op::v8::MatrixNms::SortResultType, // Order of output elements ngraph::element::Type, // Output type TopKParams, // Maximum number of boxes topk params ThresholdParams, // Thresholds: score_threshold, gaussian_sigma, post_threshold int, // Background class id bool, // If boxes are normalized - ngraph::op::v8::MatrixNms::DecayFunction, // Decay function + ov::op::v8::MatrixNms::DecayFunction, // Decay function bool, // make output shape static std::string>; // Device name @@ -51,7 +51,7 @@ class MatrixNmsLayerTest : public testing::WithParamInterface, private: void GetOutputParams(size_t& numBatches, size_t& maxOutputBoxesPerBatch); - ngraph::op::v8::MatrixNms::Attributes m_attrs; + ov::op::v8::MatrixNms::Attributes m_attrs; bool m_outStaticShape; }; diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/memory.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/memory.hpp index 5ac8c35e078c17..d39931e79366a8 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/memory.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/memory.hpp @@ -35,11 +35,11 @@ class MemoryTest : public testing::WithParamInterface, virtual void Infer() override; virtual std::shared_ptr CreateReadValueOp( const ov::Output& value, const std::shared_ptr& variable) const { - return std::make_shared(value, variable); + return std::make_shared(value, variable); } virtual std::shared_ptr CreateAssignOp( const ov::Output& value, const std::shared_ptr& variable) const { - return std::make_shared(value, variable); + return std::make_shared(value, variable); } virtual void CreateCommonFunc(); @@ -62,12 +62,12 @@ class MemoryTestV3 : public MemoryTest { protected: std::shared_ptr CreateReadValueOp( const ov::Output& value, const std::shared_ptr& variable) const override { - return std::make_shared(value, variable->get_info().variable_id); + return std::make_shared(value, variable->get_info().variable_id); } std::shared_ptr CreateAssignOp( const ov::Output& value, const std::shared_ptr& variable) const override { - return std::make_shared(value, variable->get_info().variable_id); + return std::make_shared(value, variable->get_info().variable_id); } }; diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multiclass_nms.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multiclass_nms.hpp index 4c936209d72783..c467cfe52f75c6 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multiclass_nms.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/multiclass_nms.hpp @@ -36,7 +36,7 @@ using MulticlassNmsParams = std::tuple, int32_t, // background_class int32_t, // keep_top_k ngraph::element::Type, // Output type - ngraph::op::util::MulticlassNmsBase::SortResultType, // SortResultType + ov::op::util::MulticlassNmsBase::SortResultType, // SortResultType InputboolVar, // Sort result across batch, normalized bool, // make output shape static std::string>; diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp index 54d2ea05f831f3..a8081ed3747240 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/non_max_suppression.hpp @@ -14,7 +14,7 @@ namespace testing { namespace internal { template <> -inline void PrintTo(const ::ngraph::op::v5::NonMaxSuppression::BoxEncodingType& value, ::std::ostream* os) {} +inline void PrintTo(const ::ov::op::v5::NonMaxSuppression::BoxEncodingType& value, ::std::ostream* os) {} } // namespace internal } // namespace testing @@ -36,7 +36,7 @@ using NmsParams = std::tuple; // Device name diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/normalize_l2.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/normalize_l2.hpp index 3adc1ddc9dac77..3c46d1c5cf3522 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/normalize_l2.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/normalize_l2.hpp @@ -16,7 +16,7 @@ namespace LayerTestsDefinitions { using NormalizeL2LayerTestParams = std::tuple< std::vector, // axes float, // eps - ngraph::op::EpsMode, // eps_mode + ov::op::EpsMode, // eps_mode InferenceEngine::SizeVector, // inputShape InferenceEngine::Precision, // netPrecision std::string // targetDevice diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pooling.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pooling.hpp index 82c28c09a9ed2b..8eb712eb6b57c3 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pooling.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/pooling.hpp @@ -22,8 +22,8 @@ typedef std::tuple< std::vector, // Stride std::vector, // Pad begin std::vector, // Pad end - ngraph::op::RoundingType, // Rounding type - ngraph::op::PadType, // Pad type + ov::op::RoundingType, // Rounding type + ov::op::PadType, // Pad type bool // Exclude pad > poolSpecificParams; typedef std::tuple< @@ -56,8 +56,8 @@ typedef std::tuple< std::vector, // Pad end ngraph::element::Type_t, // Index element type int64_t, // Axis - ngraph::op::RoundingType, // Rounding type - ngraph::op::PadType // Pad type + ov::op::RoundingType, // Rounding type + ov::op::PadType // Pad type > maxPoolV8SpecificParams; typedef std::tuple< diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_sequence.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_sequence.hpp index a3788d9b1cb1da..d9e2b0138d9e4a 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_sequence.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/rnn_sequence.hpp @@ -23,7 +23,7 @@ using RNNSequenceParams = typename std::tuple< size_t, // input size std::vector, // activations float, // clip - ngraph::op::RecurrentSequenceDirection, // direction + ov::op::RecurrentSequenceDirection, // direction ngraph::helpers::InputLayerType, // WRB input type (Constant or Parameter) InferenceEngine::Precision, // Network precision std::string>; // Device name diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/select.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/select.hpp index e27f2853fc6186..7426dc04a0ca03 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/select.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/select.hpp @@ -15,7 +15,7 @@ namespace LayerTestsDefinitions { typedef std::tuple< std::vector>, // mask, then, else shapes InferenceEngine::Precision, // then, else precision - ngraph::op::AutoBroadcastSpec, // broadcast + ov::op::AutoBroadcastSpec, // broadcast std::string> selectTestParams; // device name class SelectLayerTest : public testing::WithParamInterface, virtual public LayerTestsUtils::LayerTestsCommon { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/space_to_depth.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/space_to_depth.hpp index 665baa01dc7265..361ff9fa41015c 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/space_to_depth.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/space_to_depth.hpp @@ -19,7 +19,7 @@ namespace LayerTestsDefinitions { using spaceToDepthParamsTuple = typename std::tuple< std::vector, // Input shape InferenceEngine::Precision, // Input precision - ngraph::opset3::SpaceToDepth::SpaceToDepthMode, // Mode + ov::op::v0::SpaceToDepth::SpaceToDepthMode, // Mode std::size_t, // Block size std::string>; // Device name> diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tensor_iterator.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tensor_iterator.hpp index f7dbd4322aa3ea..157c4fca461320 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tensor_iterator.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/tensor_iterator.hpp @@ -25,7 +25,7 @@ using TensorIteratorParams = typename std::tuple< size_t, // sequence axis float, // clip ngraph::helpers::TensorIteratorBody, // body type - ngraph::op::RecurrentSequenceDirection, // direction + ov::op::RecurrentSequenceDirection, // direction InferenceEngine::Precision, // Network precision std::string>; // Device name diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/topk.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/topk.hpp index 1892ad7f92bccd..3759c0bef6d569 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/topk.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/topk.hpp @@ -14,8 +14,8 @@ namespace LayerTestsDefinitions { typedef std::tuple< int64_t, // keepK int64_t, // axis - ngraph::opset4::TopK::Mode, // mode - ngraph::opset4::TopK::SortType, // sort + ov::op::v3::TopK::Mode, // mode + ov::op::v3::TopK::SortType, // sort InferenceEngine::Precision, // Net precision InferenceEngine::Precision, // Input precision InferenceEngine::Precision, // Output precision diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/invalid_cases/proposal.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/invalid_cases/proposal.hpp similarity index 100% rename from src/tests/functional/plugin/shared/include/single_layer_tests/invalid_cases/proposal.hpp rename to src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/invalid_cases/proposal.hpp diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/permute_concat_concat_permute.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/permute_concat_concat_permute.hpp index ed15880da7bdcc..4bcecd86963e6b 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/permute_concat_concat_permute.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/permute_concat_concat_permute.hpp @@ -28,7 +28,7 @@ class PermuteConcatConcatPermute : public testing::WithParamInterface CreateConst(const std::vector& input_shape, + static std::shared_ptr CreateConst(const std::vector& input_shape, const ::ngraph::element::Type& precision, bool use_1_as_first_dimension); template diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/stateful_model.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/stateful_model.hpp index ebdd0ba5d95d14..ee7659655f1f03 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/stateful_model.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/subgraph/stateful_model.hpp @@ -27,9 +27,7 @@ class StatefulModelTest : public SubgraphBaseTest, public testing::WithParamInte } void reset_state() { - for (auto&& state : inferRequest.query_state()) { - state.reset(); - } + inferRequest.reset_state(); } static void float_compare(const float* expected_res, const float* actual_res, size_t size) { diff --git a/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp b/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp index b1c5eb9e9832ac..c33642d48d76f9 100644 --- a/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp +++ b/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp @@ -217,13 +217,14 @@ void SubgraphBaseTest::import_export() { std::stringstream strm; compiledModel.export_model(strm); ov::CompiledModel importedModel = core->import_model(strm, targetDevice, configuration); - auto importedFunction = importedModel.get_runtime_model()->clone(); + const auto importedFunction = importedModel.get_runtime_model()->clone(); + const auto runtimeModel = compiledModel.get_runtime_model()->clone(); auto comparator = FunctionsComparator::with_default() .enable(FunctionsComparator::ATTRIBUTES) .enable(FunctionsComparator::NAMES) .enable(FunctionsComparator::CONST_VALUES); - auto res = comparator.compare(importedFunction, function); + auto res = comparator.compare(importedFunction, runtimeModel); if (!res.valid) { throw std::runtime_error(res.message); } diff --git a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp index 885c9daaf1f51f..c61d9d5ee39578 100644 --- a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp +++ b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp @@ -20,9 +20,23 @@ namespace ov { namespace test { namespace utils { -double ConstRanges::max = std::numeric_limits::min(); -double ConstRanges::min = std::numeric_limits::max(); -bool ConstRanges::is_defined = false; +namespace { +struct { + double max = 0; + double min = 0; + bool is_defined = false; +} const_range; +} // namespace + +void set_const_ranges(double _min, double _max) { + const_range.max = _max; + const_range.min = _min; + const_range.is_defined = true; +} + +void reset_const_ranges() { + const_range.is_defined = false; +} namespace { @@ -43,6 +57,10 @@ namespace { * * All the generated numbers completely fit into the data type without truncation */ + +using ov::test::utils::InputGenerateData; + + static inline void set_real_number_generation_data(InputGenerateData& inGenData) { inGenData.range = 8; inGenData.resolution = 32; @@ -53,6 +71,17 @@ ov::runtime::Tensor generate(const std::shared_ptr& node, const ov::element::Type& elemType, const ov::Shape& targetShape) { InputGenerateData inGenData; + + if (const_range.is_defined) { + auto min_orig = inGenData.start_from; + auto max_orig = inGenData.start_from + inGenData.range * inGenData.resolution; + auto min_ref = const_range.min; + auto max_ref = const_range.max; + if (min_orig < min_ref || min_orig == 0) + inGenData.start_from = min_ref; + inGenData.range = (max_orig > max_ref || max_orig == 10 ? max_ref : max_orig - inGenData.start_from) - inGenData.start_from; + } + if (elemType.is_real()) { set_real_number_generation_data(inGenData); } @@ -67,8 +96,7 @@ ov::runtime::Tensor generate(const std::shared_ptr& node, const auto& range = ranges.at(elemType.is_real()); inGenData = range.size() < inNodeCnt ? range.front() : range.at(port); } - return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData.range, - inGenData.start_from, inGenData.resolution, inGenData.seed); + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData); } namespace Activation { @@ -79,20 +107,26 @@ ov::runtime::Tensor generate(const ov::element::Type& elemType, inGenData.range = 15; inGenData.start_from = 0; } - return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData.range, inGenData.start_from, inGenData.resolution, inGenData.seed); + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData); } } // namespace Activation -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { switch (port) { case 1: { - return ov::test::utils::create_and_fill_tensor(elemType, targetShape, 0, 0.2f); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0.2; + in_data.range = 0; + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, in_data); } case 2: { - return ov::test::utils::create_and_fill_tensor(elemType, targetShape, 0, 0.5f); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0.5; + in_data.range = 0; + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, in_data); } default: { return Activation::generate(elemType, targetShape); @@ -102,7 +136,7 @@ ov::runtime::Tensor generate(const std::shared_ptr& return Activation::generate(elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -110,9 +144,15 @@ ov::runtime::Tensor generate(const std::shared_ptr& node, case 1: { auto name = node->input(1).get_node()->get_friendly_name(); if (0 == name.compare("leakySlope")) { - return ov::test::utils::create_and_fill_tensor(elemType, targetShape, 0, 0.01f, 100); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0.01; + in_data.range = 0; + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, in_data); } else if (0 == name.compare("negativeSlope")) { - return ov::test::utils::create_and_fill_tensor(elemType, targetShape, 0, -0.01f, 100); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -0.01; + in_data.range = 0; + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, in_data); } else { return Activation::generate(elemType, targetShape); } @@ -123,7 +163,7 @@ ov::runtime::Tensor generate(const std::shared_ptr& node, } } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -142,7 +182,7 @@ ov::runtime::Tensor generate(const std::shared_ptr& node, } } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -166,10 +206,10 @@ ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -225,7 +265,7 @@ ov::runtime::Tensor generate(const std::shared_ptr inGenData.resolution = 1.0f; inGenData.seed = seed; - return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData.range, inGenData.start_from, inGenData.resolution, inGenData.seed); + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData); } } } @@ -294,7 +334,7 @@ ov::runtime::Tensor generate(const std::shared_ptr& node } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -307,65 +347,68 @@ ov::runtime::Tensor generate(const std::shared_ptr& InputGenerateData inGenData; inGenData.start_from = maxBeamIndx / 2; inGenData.range = maxBeamIndx; - return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData.range, inGenData.start_from, inGenData.resolution, inGenData.seed); + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData); } default: InputGenerateData inGenData; inGenData.range = maxBeamIndx; - return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData.range, inGenData.start_from, inGenData.resolution, inGenData.seed); + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData); } } namespace LogicalOp { ov::runtime::Tensor generate(const ov::element::Type& elemType, const ov::Shape& targetShape) { - return create_and_fill_tensor(elemType, targetShape, 2, 0); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 2; + return create_and_fill_tensor(elemType, targetShape, in_data); } } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { return LogicalOp::generate(elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { return LogicalOp::generate(elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { return LogicalOp::generate(elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { return LogicalOp::generate(elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { return LogicalOp::generate(elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { return LogicalOp::generate(elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -373,7 +416,12 @@ ov::runtime::Tensor generate(const std::shared_ptr& n switch (port) { case 0: { auto data_size = shape_size(targetShape); - return create_and_fill_tensor(elemType, targetShape, data_size * 5, 0, 10, 7235346); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = data_size * 5; + in_data.resolution = 10; + in_data.seed = 7235346; + return create_and_fill_tensor(elemType, targetShape, in_data); } case 1: { return create_and_fill_tensor_unique_sequence(elemType, targetShape, 0, 10, 8234231); @@ -414,7 +462,7 @@ ov::runtime::Tensor generate(const std::shared_ptr& node, } } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -434,20 +482,22 @@ ov::runtime::Tensor generate(const std::shared_ptr& no return generate(std::dynamic_pointer_cast(node), port, elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { return ov::test::utils::create_and_fill_tensor_consistently(elemType, targetShape, 3, 0, 1); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { if (port == 2) { - unsigned int m_max_seq_len = 10; - return ov::test::utils::create_and_fill_tensor(elemType, targetShape, m_max_seq_len, 0); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 10; // max_seq_len + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, in_data); } return generate(std::dynamic_pointer_cast(node), port, elemType, targetShape); } @@ -457,17 +507,21 @@ ov::runtime::Tensor generate(const std::shared_ptr& no const ov::element::Type& elemType, const ov::Shape& targetShape) { if (port == 2) { - unsigned int m_max_seq_len = 10; - return ov::test::utils::create_and_fill_tensor(elemType, targetShape, m_max_seq_len, 0); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 10; // max_seq_len + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, in_data); } if (port == 3 && node->input(0).get_partial_shape().is_static()) { - auto seq_len = node->input(0).get_shape()[1]; - return ov::test::utils::create_and_fill_tensor(elemType, targetShape, seq_len); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = node->input(0).get_shape()[1]; // seq_len + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, in_data); } return generate(std::dynamic_pointer_cast(node), port, elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -571,7 +625,7 @@ ov::runtime::Tensor generate_unique_possibilities(const ov::Shape &targetShape) return tensor; } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -588,18 +642,20 @@ ov::runtime::Tensor generate(const std::shared_ptr(node), port, elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { if (port == 2) { - unsigned int m_max_seq_len = 10; - return ov::test::utils::create_and_fill_tensor(elemType, targetShape, m_max_seq_len, 0); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 10; // max_seq_len + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, in_data); } return generate(std::dynamic_pointer_cast(node), port, elemType, targetShape); } -ov::runtime::Tensor generate(const std::shared_ptr& node, +ov::runtime::Tensor generate(const std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -637,7 +693,7 @@ ov::runtime::Tensor generate(const } ov::runtime::Tensor generate(const - std::shared_ptr& node, + std::shared_ptr& node, size_t port, const ov::element::Type& elemType, const ov::Shape& targetShape) { @@ -769,8 +825,7 @@ ov::runtime::Tensor generate(const in_gen_data.start_from = 0; in_gen_data.resolution = 20; } - return ov::test::utils::create_and_fill_tensor(elemType, targetShape, in_gen_data.range, - in_gen_data.start_from, in_gen_data.resolution, in_gen_data.seed); + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, in_gen_data); } namespace comparison { @@ -972,7 +1027,7 @@ ov::runtime::Tensor generate(const const ov::Shape& targetShape) { if (port == 0) { InputGenerateData inGenData(-5, 10, 7, 222); - return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData.range, inGenData.start_from, inGenData.resolution, inGenData.seed); + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData); } return generate(std::dynamic_pointer_cast(node), port, elemType, targetShape); } @@ -983,7 +1038,7 @@ ov::runtime::Tensor generate(const const ov::element::Type& elemType, const ov::Shape& targetShape) { InputGenerateData inGenData(1, 0, 1, 1); - auto tensor = ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData.range, inGenData.start_from, inGenData.resolution, inGenData.seed); + auto tensor = ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData); if (0 == port || 1 == port) { #define CASE(X) case X: { \ @@ -1017,7 +1072,7 @@ ov::runtime::Tensor generate(const const ov::element::Type& elemType, const ov::Shape& targetShape) { InputGenerateData inGenData(1, 0, 1, 1); - return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData.range, inGenData.start_from, inGenData.resolution, inGenData.seed); + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData); } ov::runtime::Tensor generate(const @@ -1031,7 +1086,7 @@ ov::runtime::Tensor generate(const inGenData.range = 200; inGenData.resolution = 2; } - return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData.range, inGenData.start_from, inGenData.resolution, inGenData.seed); + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData); } ov::runtime::Tensor generate(const @@ -1041,7 +1096,7 @@ ov::runtime::Tensor generate(const const ov::Shape& targetShape) { if (port == 1) { InputGenerateData inGenData(0, 1, 1000, 1); - return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData.range, inGenData.start_from, inGenData.resolution, inGenData.seed); + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData); } return generate(std::dynamic_pointer_cast(node), port, elemType, targetShape); } @@ -1053,7 +1108,7 @@ ov::runtime::Tensor generate(const const ov::Shape& targetShape) { if (port == 1) { InputGenerateData inGenData(0, 1, 1000, 1); - return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData.range, inGenData.start_from, inGenData.resolution, inGenData.seed); + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData); } return generate(std::dynamic_pointer_cast(node), port, elemType, targetShape); } @@ -1065,7 +1120,7 @@ ov::runtime::Tensor generate(const const ov::Shape& targetShape) { if (port == 1) { InputGenerateData inGenData(0, 1, 1000, 1); - return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData.range, inGenData.start_from, inGenData.resolution, inGenData.seed); + return ov::test::utils::create_and_fill_tensor(elemType, targetShape, inGenData); } return generate(std::dynamic_pointer_cast(node), port, elemType, targetShape); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/activation.cpp b/src/tests/functional/shared_test_classes/src/single_layer/activation.cpp index 56d48726f82bbd..0840dce4a91176 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/activation.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/activation.cpp @@ -218,7 +218,7 @@ void ActivationParamLayerTest::SetUp() { params.insert(params.end(), activationParams.begin(), activationParams.end()); auto activation = ngraph::builder::makeActivation(params, ngPrc, activationType); - ngraph::ResultVector results{std::make_shared(activation)}; + ngraph::ResultVector results{std::make_shared(activation)}; function = std::make_shared(results, params); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp b/src/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp index d9e6d3c7b23ade..27f914487b4c93 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/adaptive_pooling.cpp @@ -45,8 +45,8 @@ void AdaPoolLayerTest::SetUp() { auto pooledParam = ngraph::builder::makeConstant(ngraph::element::i32, pooledShape, pooledSpatialShape); // we cannot create abstract Op to use polymorphism - auto adapoolMax = std::make_shared(params[0], pooledParam, ngraph::element::i32); - auto adapoolAvg = std::make_shared(params[0], pooledParam); + auto adapoolMax = std::make_shared(params[0], pooledParam, ngraph::element::i32); + auto adapoolAvg = std::make_shared(params[0], pooledParam); function = (poolingMode == "max" ? std::make_shared(adapoolMax->outputs(), params, "AdaPoolMax") : std::make_shared(adapoolAvg->outputs(), params, "AdaPoolAvg")); diff --git a/src/tests/functional/shared_test_classes/src/single_layer/batch_to_space.cpp b/src/tests/functional/shared_test_classes/src/single_layer/batch_to_space.cpp index 10549f356cbe14..7a012d4721fd14 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/batch_to_space.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/batch_to_space.cpp @@ -39,7 +39,7 @@ void BatchToSpaceLayerTest::SetUp() { OPENVINO_SUPPRESS_DEPRECATED_START auto b2s = ngraph::builder::makeBatchToSpace(params[0], ngPrc, blockShape, cropsBegin, cropsEnd); OPENVINO_SUPPRESS_DEPRECATED_END - ngraph::ResultVector results{std::make_shared(b2s)}; + ngraph::ResultVector results{std::make_shared(b2s)}; function = std::make_shared(results, params, "BatchToSpace"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/binary_convolution.cpp b/src/tests/functional/shared_test_classes/src/single_layer/binary_convolution.cpp index 4ffe6c5a32ced9..62ceb9b62a70b7 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/binary_convolution.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/binary_convolution.cpp @@ -16,7 +16,7 @@ std::string BinaryConvolutionLayerTest::getTestCaseName(const testing::TestParam std::tie(binConvParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = obj.param; - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels; @@ -61,7 +61,7 @@ void BinaryConvolutionLayerTest::SetUp() { std::tie(binConvParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernelSize, strides, dilations; std::vector padsBegin, padsEnd; size_t numOutChannels; @@ -74,7 +74,7 @@ void BinaryConvolutionLayerTest::SetUp() { // TODO: refactor build BinaryConvolution op to accept filters input as Parameter auto binConv = ngraph::builder::makeBinaryConvolution(params[0], kernelSize, strides, padsBegin, padsEnd, dilations, padType, numOutChannels, padValue); - ngraph::ResultVector results{std::make_shared(binConv)}; + ngraph::ResultVector results{std::make_shared(binConv)}; function = std::make_shared(results, params, "BinaryConvolution"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/bucketize.cpp b/src/tests/functional/shared_test_classes/src/single_layer/bucketize.cpp index 381a2c9f55fcf7..4d0bd144bccf85 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/bucketize.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/bucketize.cpp @@ -56,11 +56,11 @@ namespace LayerTestsDefinitions { auto ngInDataPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inDataPrc); auto ngInBucketsPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inBucketsPrc); auto ngNetPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrc); - auto data = std::make_shared(ngInDataPrc, ngraph::Shape(dataShape)); + auto data = std::make_shared(ngInDataPrc, ngraph::Shape(dataShape)); data->set_friendly_name("a_data"); - auto buckets = std::make_shared(ngInBucketsPrc, ngraph::Shape(bucketsShape)); + auto buckets = std::make_shared(ngInBucketsPrc, ngraph::Shape(bucketsShape)); buckets->set_friendly_name("b_buckets"); - auto bucketize = std::make_shared(data, buckets, ngNetPrc, with_right_bound); - function = std::make_shared(std::make_shared(bucketize), ngraph::ParameterVector{data, buckets}, "Bucketize"); + auto bucketize = std::make_shared(data, buckets, ngNetPrc, with_right_bound); + function = std::make_shared(std::make_shared(bucketize), ngraph::ParameterVector{data, buckets}, "Bucketize"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/clamp.cpp b/src/tests/functional/shared_test_classes/src/single_layer/clamp.cpp index d1adf624faa3f7..df0aa4541caff6 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/clamp.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/clamp.cpp @@ -32,9 +32,9 @@ void ClampLayerTest::SetUp() { std::tie(inShape, interval, netPrc, targetDevice) = this->GetParam(); auto ngNetPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrc); - auto input = std::make_shared(ngNetPrc, ngraph::Shape(inShape)); - auto clamp = std::make_shared(input, interval.first, interval.second); - function = std::make_shared(std::make_shared(clamp), ngraph::ParameterVector{input}); + auto input = std::make_shared(ngNetPrc, ngraph::Shape(inShape)); + auto clamp = std::make_shared(input, interval.first, interval.second); + function = std::make_shared(std::make_shared(clamp), ngraph::ParameterVector{input}); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/concat.cpp b/src/tests/functional/shared_test_classes/src/single_layer/concat.cpp index d57b4c66c8908c..4d6a570eefe5c5 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/concat.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/concat.cpp @@ -39,8 +39,8 @@ void ConcatLayerTest::SetUp() { params.push_back(param); paramsOuts.push_back(param); } - auto concat = std::make_shared(paramsOuts, axis); - ngraph::ResultVector results{std::make_shared(concat)}; + auto concat = std::make_shared(paramsOuts, axis); + ngraph::ResultVector results{std::make_shared(concat)}; function = std::make_shared(results, params, "concat"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/constant.cpp b/src/tests/functional/shared_test_classes/src/single_layer/constant.cpp index dffa6ab742c623..b758f8ff435a27 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/constant.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/constant.cpp @@ -37,8 +37,8 @@ void ConstantLayerTest::SetUp() { std::tie(data_shape, data_precision, data_elements, targetDevice) = this->GetParam(); const auto precision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(data_precision); - auto constant = ngraph::op::Constant::create(precision, data_shape, data_elements); - ngraph::ResultVector results{std::make_shared(constant)}; + auto constant = ov::op::v0::Constant::create(precision, data_shape, data_elements); + ngraph::ResultVector results{std::make_shared(constant)}; function = std::make_shared(results, ngraph::ParameterVector{}, "constant"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/convolution.cpp b/src/tests/functional/shared_test_classes/src/single_layer/convolution.cpp index 6dd2b56dce0210..000699a711ab67 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/convolution.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/convolution.cpp @@ -15,7 +15,7 @@ std::string ConvolutionLayerTest::getTestCaseName(const testing::TestParamInfo padBegin, padEnd; size_t convOutChannels; @@ -45,7 +45,7 @@ void ConvolutionLayerTest::SetUp() { auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; std::tie(convParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels; @@ -58,10 +58,10 @@ void ConvolutionLayerTest::SetUp() { filter_weights = ov::test::utils::generate_float_numbers(convOutChannels * inputShape[1] * filter_size, -0.1f, 0.1f); } - auto conv = std::dynamic_pointer_cast( + auto conv = std::dynamic_pointer_cast( ngraph::builder::makeConvolution(params[0], ngPrc, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels, false, filter_weights)); - ngraph::ResultVector results{std::make_shared(conv)}; + ngraph::ResultVector results{std::make_shared(conv)}; function = std::make_shared(results, params, "convolution"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop.cpp b/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop.cpp index f7f1597e0ea300..4979cf3e8eae60 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop.cpp @@ -15,7 +15,7 @@ std::string ConvolutionBackpropLayerTest::getTestCaseName(const testing::TestPar InferenceEngine::SizeVector outputShapes; std::string targetDevice; std::tie(convBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, outputShapes, targetDevice) = obj.param; - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels; @@ -47,23 +47,23 @@ void ConvolutionBackpropLayerTest::SetUp() { std::vector outputShape; auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; std::tie(convBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, outputShape, targetDevice) = this->GetParam(); - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType, outPadding) = convBackpropDataParams; auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto convBackpropData = std::dynamic_pointer_cast( + auto convBackpropData = std::dynamic_pointer_cast( ngraph::builder::makeConvolutionBackpropData(params[0], ngPrc, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels, false, outPadding)); if (!outputShape.empty()) { - auto outShape = ngraph::opset3::Constant::create(ngraph::element::i64, {outputShape.size()}, outputShape); - convBackpropData = std::dynamic_pointer_cast( + auto outShape = ov::op::v0::Constant::create(ngraph::element::i64, {outputShape.size()}, outputShape); + convBackpropData = std::dynamic_pointer_cast( ngraph::builder::makeConvolutionBackpropData(params[0], outShape, ngPrc, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels)); } - ngraph::ResultVector results{std::make_shared(convBackpropData)}; + ngraph::ResultVector results{std::make_shared(convBackpropData)}; function = std::make_shared(results, params, "convolutionBackpropData"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop_data.cpp b/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop_data.cpp index ca86a0333b19b0..d02ee1cf6b4257 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop_data.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/convolution_backprop_data.cpp @@ -17,7 +17,7 @@ std::string ConvolutionBackpropDataLayerTest::getTestCaseName(const testing::Tes InferenceEngine::SizeVector outputShapes; std::string targetDevice; std::tie(convBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, outputShapes, targetDevice) = obj.param; - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels; @@ -49,23 +49,23 @@ void ConvolutionBackpropDataLayerTest::SetUp() { std::vector outputShape; auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; std::tie(convBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, outputShape, targetDevice) = this->GetParam(); - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType, outPadding) = convBackpropDataParams; auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto convBackpropData = std::dynamic_pointer_cast( + auto convBackpropData = std::dynamic_pointer_cast( ngraph::builder::makeConvolutionBackpropData(params[0], ngPrc, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels, false, outPadding)); if (!outputShape.empty()) { - auto outShape = ngraph::opset3::Constant::create(ngraph::element::i64, {outputShape.size()}, outputShape); - convBackpropData = std::dynamic_pointer_cast( + auto outShape = ov::op::v0::Constant::create(ngraph::element::i64, {outputShape.size()}, outputShape); + convBackpropData = std::dynamic_pointer_cast( ngraph::builder::makeConvolutionBackpropData(params[0], outShape, ngPrc, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels)); } - ngraph::ResultVector results{std::make_shared(convBackpropData)}; + ngraph::ResultVector results{std::make_shared(convBackpropData)}; function = std::make_shared(results, params, "convolutionBackpropData"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder.cpp b/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder.cpp index 6850a3e6f74eb7..5b48032bb98699 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder.cpp @@ -47,11 +47,11 @@ void CTCGreedyDecoderLayerTest::SetUp() { ov::ParameterVector paramsIn {std::make_shared(ngPrc, ov::Shape(inputShapes))}; OPENVINO_SUPPRESS_DEPRECATED_START - auto ctcGreedyDecoder = std::dynamic_pointer_cast( + auto ctcGreedyDecoder = std::dynamic_pointer_cast( ngraph::builder::makeCTCGreedyDecoder(paramsIn[0], mergeRepeated)); OPENVINO_SUPPRESS_DEPRECATED_END - ngraph::ResultVector results{ std::make_shared(ctcGreedyDecoder) }; + ngraph::ResultVector results{ std::make_shared(ctcGreedyDecoder) }; function = std::make_shared(results, paramsIn, "CTCGreedyDecoder"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder_seq_len.cpp b/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder_seq_len.cpp index 8c674b72aa831d..66d71ff16e6bd8 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder_seq_len.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/ctc_greedy_decoder_seq_len.cpp @@ -81,14 +81,14 @@ void CTCGreedyDecoderSeqLenLayerTest::SetUp() { blankIndex = std::min(blankIndex, C - 1); OPENVINO_SUPPRESS_DEPRECATED_START - auto ctcGreedyDecoderSeqLen = std::dynamic_pointer_cast( + auto ctcGreedyDecoderSeqLen = std::dynamic_pointer_cast( ngraph::builder::makeCTCGreedyDecoderSeqLen(paramsIn[0], sequenceLenNode, blankIndex, mergeRepeated, ngIdxPrc)); OPENVINO_SUPPRESS_DEPRECATED_END ngraph::ResultVector results; for (int i = 0; i < ctcGreedyDecoderSeqLen->get_output_size(); i++) { - results.push_back(std::make_shared(ctcGreedyDecoderSeqLen->output(i))); + results.push_back(std::make_shared(ctcGreedyDecoderSeqLen->output(i))); } function = std::make_shared(results, paramsIn, "CTCGreedyDecoderSeqLen"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/ctc_loss.cpp b/src/tests/functional/shared_test_classes/src/single_layer/ctc_loss.cpp index 268d25ff19f320..c7462c9d9ce48a 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/ctc_loss.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/ctc_loss.cpp @@ -52,12 +52,12 @@ void CTCLossLayerTest::SetUp() { ov::ParameterVector params {std::make_shared(ngFpPrc, ov::Shape(logitsShapes))}; OPENVINO_SUPPRESS_DEPRECATED_START - auto ctcLoss = std::dynamic_pointer_cast( + auto ctcLoss = std::dynamic_pointer_cast( ngraph::builder::makeCTCLoss(params[0], logitsLength, labels, labelsLength, blankIndex, ngFpPrc, ngIntPrc, preprocessCollapseRepeated, ctcMergeRepeated, unique)); OPENVINO_SUPPRESS_DEPRECATED_END - ngraph::ResultVector results{std::make_shared(ctcLoss)}; + ngraph::ResultVector results{std::make_shared(ctcLoss)}; function = std::make_shared(results, params, "CTCLoss"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/cum_sum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/cum_sum.cpp index 79fdc2cd3ba134..64f298fa36d47b 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/cum_sum.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/cum_sum.cpp @@ -31,11 +31,11 @@ void CumSumLayerTest::SetUp() { int64_t axis; std::tie(inputShapes, inputPrecision, axis, exclusive, reverse, targetDevice) = this->GetParam(); const auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - const auto paramData = std::make_shared(inType, ngraph::Shape(inputShapes)); - const auto axisNode = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{}, std::vector{axis})->output(0); - const auto cumSum = std::make_shared(paramData, axisNode, exclusive, reverse); + const auto paramData = std::make_shared(inType, ngraph::Shape(inputShapes)); + const auto axisNode = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{}, std::vector{axis})->output(0); + const auto cumSum = std::make_shared(paramData, axisNode, exclusive, reverse); - ngraph::ResultVector results{std::make_shared(cumSum)}; + ngraph::ResultVector results{std::make_shared(cumSum)}; function = std::make_shared(results, ngraph::ParameterVector{paramData}, "cumsum"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/deformable_convolution.cpp b/src/tests/functional/shared_test_classes/src/single_layer/deformable_convolution.cpp index ae68e2f3d713e5..3f928e191284f8 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/deformable_convolution.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/deformable_convolution.cpp @@ -13,7 +13,7 @@ std::string DeformableConvolutionLayerTest::getTestCaseName(const testing::TestP std::string targetDevice; std::tie(convParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = obj.param; - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector offsets, filter, stride, dilation; std::vector padBegin, padEnd; size_t groups, deformable_groups, convOutChannels; @@ -64,7 +64,7 @@ void DeformableConvolutionLayerTest::SetUp() { InferenceEngine::Precision netPrecision; std::tie(convParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector offsets, filter, stride, dilation; std::vector padBegin, padEnd; size_t groups, deformable_groups, convOutChannels; @@ -76,30 +76,30 @@ void DeformableConvolutionLayerTest::SetUp() { for (auto&& shape : {inputShape, offsets, filter}) { params.push_back(std::make_shared(ngPrc, ov::Shape(shape))); } - auto data = std::make_shared(ngPrc, ngraph::Shape(inputShape)); + auto data = std::make_shared(ngPrc, ngraph::Shape(inputShape)); data->set_friendly_name("a_data"); - auto offset_vals = std::make_shared(ngPrc, ngraph::Shape(offsets)); + auto offset_vals = std::make_shared(ngPrc, ngraph::Shape(offsets)); offset_vals->set_friendly_name("b_offset_vals"); - auto filter_vals = std::make_shared(ngPrc, ngraph::Shape(filter)); + auto filter_vals = std::make_shared(ngPrc, ngraph::Shape(filter)); filter_vals->set_friendly_name("c_filter_vals"); ngraph::ParameterVector parameters{data, offset_vals, filter_vals}; std::shared_ptr deformable_conv; if (with_modulation) { auto modulation_shape = ngraph::Shape(offsets); modulation_shape[1] = offsets[1] / 2; - auto modulation_scalars = std::make_shared(ngPrc, modulation_shape); + auto modulation_scalars = std::make_shared(ngPrc, modulation_shape); modulation_scalars->set_friendly_name("c_modulation_scalars"); - deformable_conv = std::make_shared(data, offset_vals, filter_vals, modulation_scalars, stride, padBegin, + deformable_conv = std::make_shared(data, offset_vals, filter_vals, modulation_scalars, stride, padBegin, padEnd, dilation, padType, groups, deformable_groups, with_bilinear_interpolation_pad); parameters.push_back(modulation_scalars); } else { - deformable_conv = std::make_shared(data, offset_vals, filter_vals, stride, padBegin, padEnd, dilation, + deformable_conv = std::make_shared(data, offset_vals, filter_vals, stride, padBegin, padEnd, dilation, padType, groups, deformable_groups, with_bilinear_interpolation_pad); } - ngraph::ResultVector results{std::make_shared(deformable_conv)}; + ngraph::ResultVector results{std::make_shared(deformable_conv)}; function = std::make_shared(results, parameters, "deformable_convolution"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/deformable_psroi_pooling.cpp b/src/tests/functional/shared_test_classes/src/single_layer/deformable_psroi_pooling.cpp index 3349348e5115c4..38a038ca64ab4e 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/deformable_psroi_pooling.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/deformable_psroi_pooling.cpp @@ -97,7 +97,7 @@ namespace LayerTestsDefinitions { if (offsetsShape.empty()) { // Test without optional third input (offsets) params = ov::ParameterVector{std::make_shared(ngPrc, ov::Shape(dataShape)), std::make_shared(ngPrc, ov::Shape(roisShape))}; - defomablePSROIPooling = std::make_shared(params[0], + defomablePSROIPooling = std::make_shared(params[0], params[1], outputDim, spatialScale_, @@ -111,7 +111,7 @@ namespace LayerTestsDefinitions { params = ov::ParameterVector{std::make_shared(ngPrc, ov::Shape(dataShape)), std::make_shared(ngPrc, ov::Shape(roisShape)), std::make_shared(ngPrc, ov::Shape(offsetsShape))}; - defomablePSROIPooling = std::make_shared(params[0], + defomablePSROIPooling = std::make_shared(params[0], params[1], params[2], outputDim, @@ -124,7 +124,7 @@ namespace LayerTestsDefinitions { part_size); } - ngraph::ResultVector results{std::make_shared(defomablePSROIPooling)}; + ngraph::ResultVector results{std::make_shared(defomablePSROIPooling)}; function = std::make_shared(results, params, "deformable_psroi_pooling"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/depth_to_space.cpp b/src/tests/functional/shared_test_classes/src/single_layer/depth_to_space.cpp index ce7f04d7935c8e..894c7bda8f9cdc 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/depth_to_space.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/depth_to_space.cpp @@ -7,12 +7,10 @@ namespace LayerTestsDefinitions { -using namespace ngraph::opset3; - -static inline std::string DepthToSpaceModeToString(const DepthToSpace::DepthToSpaceMode& mode) { - static std::map names = { - {DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, "BLOCKS_FIRST"}, - {DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, "DEPTH_FIRST"}, +static inline std::string DepthToSpaceModeToString(const ov::op::v0::DepthToSpace::DepthToSpaceMode& mode) { + static std::map names = { + {ov::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, "BLOCKS_FIRST"}, + {ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, "DEPTH_FIRST"}, }; auto i = names.find(mode); @@ -24,7 +22,7 @@ static inline std::string DepthToSpaceModeToString(const DepthToSpace::DepthToSp std::string DepthToSpaceLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { std::vector inShape; - DepthToSpace::DepthToSpaceMode mode; + ov::op::v0::DepthToSpace::DepthToSpaceMode mode; std::size_t blockSize; InferenceEngine::Precision inputPrecision; std::string targetName; @@ -40,14 +38,14 @@ std::string DepthToSpaceLayerTest::getTestCaseName(const testing::TestParamInfo< void DepthToSpaceLayerTest::SetUp() { std::vector inShape; - DepthToSpace::DepthToSpaceMode mode; + ov::op::v0::DepthToSpace::DepthToSpaceMode mode; std::size_t blockSize; InferenceEngine::Precision inputPrecision; std::tie(inShape, inputPrecision, mode, blockSize, targetDevice) = this->GetParam(); auto inPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); ov::ParameterVector params {std::make_shared(inPrc, ov::Shape(inShape))}; auto d2s = std::make_shared(params[0], mode, blockSize); - ngraph::ResultVector results{std::make_shared(d2s)}; + ngraph::ResultVector results{std::make_shared(d2s)}; function = std::make_shared(results, params, "DepthToSpace"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/detection_output.cpp b/src/tests/functional/shared_test_classes/src/single_layer/detection_output.cpp index fafaa6fe6eeb1e..9ab980acc3a687 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/detection_output.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/detection_output.cpp @@ -7,7 +7,7 @@ namespace LayerTestsDefinitions { -std::ostream& operator <<(std::ostream& result, const ngraph::op::DetectionOutputAttrs& attrs) { +std::ostream& operator <<(std::ostream& result, const ov::op::v0::DetectionOutput::Attributes& attrs) { result << "Classes=" << attrs.num_classes << "_"; result << "backgrId=" << attrs.background_label_id << "_"; result << "topK=" << attrs.top_k << "_"; @@ -30,7 +30,7 @@ std::ostream& operator <<(std::ostream& result, const ngraph::op::DetectionOutpu std::string DetectionOutputLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { DetectionOutputAttributes commonAttrs; ParamsWhichSizeDepends specificAttrs; - ngraph::op::DetectionOutputAttrs attrs; + ov::op::v0::DetectionOutput::Attributes attrs; size_t batch; std::string targetDevice; std::tie(commonAttrs, specificAttrs, batch, attrs.objectness_score, targetDevice) = obj.param; @@ -164,7 +164,7 @@ void DetectionOutputLayerTest::SetUp() { else OPENVINO_THROW("DetectionOutput layer supports only 3 or 5 inputs"); - ngraph::ResultVector results{std::make_shared(detOut)}; + ngraph::ResultVector results{std::make_shared(detOut)}; function = std::make_shared(results, params, "DetectionOutput"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/dft.cpp b/src/tests/functional/shared_test_classes/src/single_layer/dft.cpp index 9e4e872befa35d..7cf196ffb37c65 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/dft.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/dft.cpp @@ -34,13 +34,13 @@ void DFTLayerTest::SetUp() { std::tie(inputShapes, inputPrecision, axes, signalSize, opType, targetDevice) = this->GetParam(); auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); ngraph::ParameterVector paramVector; - auto paramData = std::make_shared(inType, ngraph::Shape(inputShapes)); + auto paramData = std::make_shared(inType, ngraph::Shape(inputShapes)); paramVector.push_back(paramData); auto dft = ngraph::builder::makeDFT(paramVector[0], axes, signalSize, opType); - ngraph::ResultVector results{std::make_shared(dft)}; + ngraph::ResultVector results{std::make_shared(dft)}; function = std::make_shared(results, paramVector, "DFT"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/einsum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/einsum.cpp index 4fdaee9ba47e12..2bf206027bce10 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/einsum.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/einsum.cpp @@ -42,7 +42,7 @@ void EinsumLayerTest::SetUp() { } const auto einsum = std::make_shared(paramsOuts, equation); - const ngraph::ResultVector results{std::make_shared(einsum)}; + const ngraph::ResultVector results{std::make_shared(einsum)}; function = std::make_shared(results, params, "einsum"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_offsets_sum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_offsets_sum.cpp index 574ee1e2b859f4..426d16885e4694 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_offsets_sum.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_offsets_sum.cpp @@ -42,13 +42,13 @@ void EmbeddingBagOffsetsSumLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); auto ngIdxPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(indPrecision); - auto emb_table_node = std::make_shared(ngPrc, ngraph::Shape(embTableShape)); + auto emb_table_node = std::make_shared(ngPrc, ngraph::Shape(embTableShape)); ngraph::ParameterVector params = {emb_table_node}; - auto embBag = std::dynamic_pointer_cast( + auto embBag = std::dynamic_pointer_cast( ngraph::builder::makeEmbeddingBagOffsetsSum( ngPrc, ngIdxPrc, emb_table_node, indices, offsets, defaultIndex, withWeights, withDefIndex)); - ngraph::ResultVector results{std::make_shared(embBag)}; + ngraph::ResultVector results{std::make_shared(embBag)}; function = std::make_shared(results, params, "embeddingBagOffsetsSum"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_packed_sum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_packed_sum.cpp index 4f8deac2409064..bad040c2e74beb 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_packed_sum.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/embedding_bag_packed_sum.cpp @@ -39,11 +39,11 @@ void EmbeddingBagPackedSumLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); auto ngIdxPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(indPrecision); - auto emb_table_node = std::make_shared(ngPrc, ngraph::Shape(embTableShape)); + auto emb_table_node = std::make_shared(ngPrc, ngraph::Shape(embTableShape)); ngraph::ParameterVector params = {emb_table_node}; auto embBag = ov::test::utils::make_embedding_bag_packed_sum(ngPrc, ngIdxPrc, emb_table_node, indices, withWeights); - ngraph::ResultVector results{std::make_shared(embBag)}; + ngraph::ResultVector results{std::make_shared(embBag)}; function = std::make_shared(results, params, "embeddingBagPackedSum"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/embedding_segments_sum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/embedding_segments_sum.cpp index 59686188962528..87491598e75503 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/embedding_segments_sum.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/embedding_segments_sum.cpp @@ -44,13 +44,13 @@ void EmbeddingSegmentsSumLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); auto ngIdxPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(indPrecision); - auto emb_table_node = std::make_shared(ngPrc, ngraph::Shape(embTableShape)); + auto emb_table_node = std::make_shared(ngPrc, ngraph::Shape(embTableShape)); ngraph::ParameterVector params = {emb_table_node}; - auto embBag = std::dynamic_pointer_cast( + auto embBag = std::dynamic_pointer_cast( ngraph::builder::makeEmbeddingSegmentsSum( ngPrc, ngIdxPrc, emb_table_node, indices, segmentIds, numSegments, defaultIndex, withWeights, withDefIndex)); - ngraph::ResultVector results{std::make_shared(embBag)}; + ngraph::ResultVector results{std::make_shared(embBag)}; function = std::make_shared(results, params, "embeddingSegmentsSum"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_detection_output.cpp b/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_detection_output.cpp index 63e95b4c33bb94..160bace40ce018 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_detection_output.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/experimental_detectron_detection_output.cpp @@ -12,7 +12,7 @@ namespace test { namespace subgraph { namespace { - std::ostream& operator <<(std::ostream& ss, const ngraph::opset6::ExperimentalDetectronDetectionOutput::Attributes& attributes) { + std::ostream& operator <<(std::ostream& ss, const ov::op::v6::ExperimentalDetectronDetectionOutput::Attributes& attributes) { ss << "score_threshold=" << attributes.score_threshold << "_"; ss << "nms_threshold=" << attributes.nms_threshold << "_"; ss << "max_delta_log_wh=" << attributes.max_delta_log_wh << "_"; @@ -28,7 +28,7 @@ namespace { std::string ExperimentalDetectronDetectionOutputLayerTest::getTestCaseName( const testing::TestParamInfo& obj) { std::vector inputShapes; - ngraph::opset6::ExperimentalDetectronDetectionOutput::Attributes attributes; + ov::op::v6::ExperimentalDetectronDetectionOutput::Attributes attributes; ElementType netPrecision; std::string targetName; std::tie( @@ -61,7 +61,7 @@ std::string ExperimentalDetectronDetectionOutputLayerTest::getTestCaseName( void ExperimentalDetectronDetectionOutputLayerTest::SetUp() { std::vector inputShapes; - ngraph::opset6::ExperimentalDetectronDetectionOutput::Attributes attributes; + ov::op::v6::ExperimentalDetectronDetectionOutput::Attributes attributes; ElementType netPrecision; std::string targetName; @@ -90,7 +90,7 @@ void ExperimentalDetectronDetectionOutputLayerTest::SetUp() { for (auto&& shape : inputDynamicShapes) params.push_back(std::make_shared(netPrecision, shape)); - auto experimentalDetectron = std::make_shared( + auto experimentalDetectron = std::make_shared( params[0], // input_rois params[1], // input_deltas params[2], // input_scores diff --git a/src/tests/functional/shared_test_classes/src/single_layer/extract_image_patches.cpp b/src/tests/functional/shared_test_classes/src/single_layer/extract_image_patches.cpp index 436eaf0477c39c..8dece91ed85f29 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/extract_image_patches.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/extract_image_patches.cpp @@ -10,7 +10,7 @@ namespace LayerTestsDefinitions { std::string ExtractImagePatchesTest::getTestCaseName(const testing::TestParamInfo &obj) { std::vector inputShape, kernel, strides, rates; - ngraph::op::PadType pad_type; + ov::op::PadType pad_type; InferenceEngine::Precision netPrc; InferenceEngine::Precision inPrc, outPrc; InferenceEngine::Layout inLayout; @@ -32,17 +32,17 @@ std::string ExtractImagePatchesTest::getTestCaseName(const testing::TestParamInf void ExtractImagePatchesTest::SetUp() { std::vector inputShape, kernel, strides, rates; - ngraph::op::PadType pad_type; + ov::op::PadType pad_type; InferenceEngine::Precision netPrecision; std::tie(inputShape, kernel, strides, rates, pad_type, netPrecision, inPrc, outPrc, inLayout, targetDevice) = this->GetParam(); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - auto inputNode = std::make_shared(ngPrc, ngraph::Shape(inputShape)); + auto inputNode = std::make_shared(ngPrc, ngraph::Shape(inputShape)); ngraph::ParameterVector params = {inputNode}; - auto extImgPatches = std::make_shared( + auto extImgPatches = std::make_shared( inputNode, ngraph::Shape(kernel), ngraph::Strides(strides), ngraph::Shape(rates), pad_type); - ngraph::ResultVector results{std::make_shared(extImgPatches)}; + ngraph::ResultVector results{std::make_shared(extImgPatches)}; function = std::make_shared(results, params, "ExtractImagePatches"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp b/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp index e6c560c2aa6739..484a010da483f3 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/eye.cpp @@ -51,7 +51,7 @@ void EyeLayerTest::SetUp() { col_num = eye_par[1]; shift = eye_par[2]; - std::shared_ptr eye_operation; + std::shared_ptr eye_operation; auto rows_const = std::make_shared(ngraph::element::i32, input_shapes[0], &row_num); rows_const->set_friendly_name("rows"); @@ -66,13 +66,13 @@ void EyeLayerTest::SetUp() { out_batch_shape.data()); batch_shape_par->set_friendly_name("batchShape"); eye_operation = - std::make_shared(rows_const, cols_const, diag_const, batch_shape_par, net_precision); + std::make_shared(rows_const, cols_const, diag_const, batch_shape_par, net_precision); } else { - eye_operation = std::make_shared(rows_const, cols_const, diag_const, net_precision); + eye_operation = std::make_shared(rows_const, cols_const, diag_const, net_precision); } // Without this call the eye operation will be calculated by CPU and substituted by Constant operator ov::pass::disable_constant_folding(eye_operation); - ngraph::ResultVector results{std::make_shared(eye_operation)}; + ngraph::ResultVector results{std::make_shared(eye_operation)}; function = std::make_shared(results, ngraph::ParameterVector{}, "eye"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/fake_quantize.cpp b/src/tests/functional/shared_test_classes/src/single_layer/fake_quantize.cpp index 04dfe2540390ae..feb4a6875c81b3 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/fake_quantize.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/fake_quantize.cpp @@ -20,7 +20,7 @@ std::string FakeQuantizeLayerTest::getTestCaseName(const testing::TestParamInfo< std::vector constShape; std::vector fqDirectArgs; std::vector inputArg; - ngraph::op::AutoBroadcastSpec broadcast; + ov::op::AutoBroadcastSpec broadcast; std::tie(levels, constShape, fqDirectArgs, inputArg, broadcast) = fqParams; std::ostringstream result; @@ -57,7 +57,7 @@ void FakeQuantizeLayerTest::SetUp() { std::vector constShape; std::vector fqDirectArg; std::vector inputArg; - ngraph::op::AutoBroadcastSpec broadcast; + ov::op::AutoBroadcastSpec broadcast; std::tie(levels, constShape, fqDirectArg, inputArg, broadcast) = fqParams; if (inputArg.size() == 3) { inputDataMin = inputArg[0]; @@ -92,9 +92,9 @@ void FakeQuantizeLayerTest::SetUp() { {fqDirectArg[2]}, {fqDirectArg[3]}); } - auto fq = std::dynamic_pointer_cast(fakeQNode); + auto fq = std::dynamic_pointer_cast(fakeQNode); - ngraph::ResultVector results{std::make_shared(fq)}; + ngraph::ResultVector results{std::make_shared(fq)}; function = std::make_shared(results, params, "fakeQuantize"); configuration = config.second; } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/gather.cpp b/src/tests/functional/shared_test_classes/src/single_layer/gather.cpp index bd7f75e20b48d8..50fe5f1aa5bff0 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/gather.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/gather.cpp @@ -16,10 +16,10 @@ void GatherLayerTestBase::SetUp(const gatherParamsTuple& params) { ASSERT_EQ(ngraph::shape_size(indicesShape), indices.size()) << "Indices vector size and provided indices shape doesn't fit each other"; auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector functionParams {std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto indicesNode = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape(indicesShape), indices); - auto axisNode = ngraph::opset3::Constant::create(ngraph::element::i64, ngraph::Shape({}), {axis}); - auto gather = std::make_shared(functionParams[0], indicesNode, axisNode); - ngraph::ResultVector results{std::make_shared(gather)}; + auto indicesNode = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape(indicesShape), indices); + auto axisNode = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape({}), {axis}); + auto gather = std::make_shared(functionParams[0], indicesNode, axisNode); + ngraph::ResultVector results{std::make_shared(gather)}; function = std::make_shared(results, functionParams, "gather"); } @@ -85,9 +85,9 @@ void Gather7LayerTest::SetUp() { ov::ParameterVector functionParams {std::make_shared(ngPrc, ov::Shape(inputShape))}; auto indicesNode = ngraph::builder::makeConstant(ngraph::element::i64, indicesShape, {}, true, inputShape[axis < 0 ? axis + inputShape.size() : axis] - 1, 0); - auto axisNode = ngraph::opset7::Constant::create(ngraph::element::i64, ngraph::Shape({}), { axis }); - auto gather = std::make_shared(functionParams[0], indicesNode, axisNode, batchIdx); - ngraph::ResultVector results{ std::make_shared(gather) }; + auto axisNode = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape({}), { axis }); + auto gather = std::make_shared(functionParams[0], indicesNode, axisNode, batchIdx); + ngraph::ResultVector results{ std::make_shared(gather) }; function = std::make_shared(results, functionParams, "gather"); } @@ -127,9 +127,9 @@ void Gather8LayerTest::SetUp() { auto indicesNode = ngraph::builder::makeConstant(ngraph::element::i64, indicesShape, {}, true, inputShape[axis < 0 ? axis + inputShape.size() : axis] - 1, -static_cast(inputShape[axis < 0 ? axis + inputShape.size() : axis])); - auto axisNode = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape({}), { axis }); - auto gather = std::make_shared(functionParams[0], indicesNode, axisNode, batchIdx); - ngraph::ResultVector results{ std::make_shared(gather) }; + auto axisNode = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape({}), { axis }); + auto gather = std::make_shared(functionParams[0], indicesNode, axisNode, batchIdx); + ngraph::ResultVector results{ std::make_shared(gather) }; function = std::make_shared(results, functionParams, "gather"); } @@ -166,11 +166,11 @@ void Gather8IndiceScalarLayerTest::SetUp() { int batchIdx = std::get<1>(axis_batchIdx); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector functionParams {std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto indicesNode = ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{}, {inputShape[axis] - 1})->output(0); + auto indicesNode = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{}, {inputShape[axis] - 1})->output(0); - auto axisNode = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape({}), { axis }); - auto gather = std::make_shared(functionParams[0], indicesNode, axisNode, batchIdx); - ngraph::ResultVector results{ std::make_shared(gather) }; + auto axisNode = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape({}), { axis }); + auto gather = std::make_shared(functionParams[0], indicesNode, axisNode, batchIdx); + ngraph::ResultVector results{ std::make_shared(gather) }; function = std::make_shared(results, functionParams, "gather"); } @@ -219,9 +219,9 @@ void Gather8withIndicesDataLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector functionParams {std::make_shared(ngPrc, ov::Shape(inputShape))}; auto indicesNode = ngraph::builder::makeConstant(ngraph::element::i64, indicesShape, indicesData); - auto axisNode = ngraph::opset8::Constant::create(ngraph::element::i64, ngraph::Shape({}), { axis }); - auto gather = std::make_shared(functionParams[0], indicesNode, axisNode, batchIdx); - ngraph::ResultVector results{ std::make_shared(gather) }; + auto axisNode = ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape({}), { axis }); + auto gather = std::make_shared(functionParams[0], indicesNode, axisNode, batchIdx); + ngraph::ResultVector results{ std::make_shared(gather) }; function = std::make_shared(results, functionParams, "gather"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/gather_elements.cpp b/src/tests/functional/shared_test_classes/src/single_layer/gather_elements.cpp index 09ee2fb120e70a..e0512e2337a6a1 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/gather_elements.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/gather_elements.cpp @@ -48,12 +48,15 @@ void GatherElementsLayerTest::SetUp() { posAxis += dataShape.size(); const auto axisDim = dataShape[posAxis]; - auto indicesValues = ov::test::utils::create_and_fill_tensor(ov::element::i32, indicesShape, axisDim - 1, 0); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = axisDim - 1; + auto indicesValues = ov::test::utils::create_and_fill_tensor(ov::element::i32, indicesShape, in_data); auto indicesNode = std::make_shared(indicesValues); auto gather = std::make_shared(params[0], indicesNode, axis); - ngraph::ResultVector results{std::make_shared(gather)}; + ngraph::ResultVector results{std::make_shared(gather)}; function = std::make_shared(results, params, "gatherEl"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/gather_nd.cpp b/src/tests/functional/shared_test_classes/src/single_layer/gather_nd.cpp index a0d9d40705ab3d..df4ed4c90efb67 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/gather_nd.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/gather_nd.cpp @@ -46,9 +46,9 @@ void GatherNDLayerTest::SetUp() { ov::ParameterVector params {std::make_shared(ngDPrc, ov::Shape(dataShape))}; auto dataNode = params[0]; - auto gather = std::dynamic_pointer_cast( + auto gather = std::dynamic_pointer_cast( ngraph::builder::makeGatherND(dataNode, indicesShape, ngIPrc, batchDims)); - ngraph::ResultVector results{std::make_shared(gather)}; + ngraph::ResultVector results{std::make_shared(gather)}; function = std::make_shared(results, params, "gatherND"); } @@ -70,9 +70,9 @@ void GatherND8LayerTest::SetUp() { ov::ParameterVector params {std::make_shared(ngDPrc, ov::Shape(dataShape))}; auto dataNode = params[0]; - auto gather = std::dynamic_pointer_cast( + auto gather = std::dynamic_pointer_cast( ngraph::builder::makeGatherND8(dataNode, indicesShape, ngIPrc, batchDims)); - ngraph::ResultVector results{ std::make_shared(gather) }; + ngraph::ResultVector results{ std::make_shared(gather) }; function = std::make_shared(results, params, "gatherND"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/gather_tree.cpp b/src/tests/functional/shared_test_classes/src/single_layer/gather_tree.cpp index fc98359d8e3907..11e69d99b695e1 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/gather_tree.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/gather_tree.cpp @@ -60,9 +60,9 @@ void GatherTreeLayerTest::SetUp() { throw std::runtime_error("Unsupported inputType"); } - auto operationResult = std::make_shared(paramsIn.front(), inp2, inp3, inp4); + auto operationResult = std::make_shared(paramsIn.front(), inp2, inp3, inp4); - ngraph::ResultVector results{std::make_shared(operationResult)}; + ngraph::ResultVector results{std::make_shared(operationResult)}; function = std::make_shared(results, paramsIn, "GatherTree"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/grid_sample.cpp b/src/tests/functional/shared_test_classes/src/single_layer/grid_sample.cpp index c2a55b66ba00d7..1a68ff8f332625 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/grid_sample.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/grid_sample.cpp @@ -9,9 +9,9 @@ namespace LayerTestsDefinitions { std::string GridSampleLayerTest::getTestCaseName(const testing::TestParamInfo& obj) { InferenceEngine::SizeVector dataShape; InferenceEngine::SizeVector gridShape; - decltype(ngraph::op::v9::GridSample::Attributes::align_corners) alignCorners; - decltype(ngraph::op::v9::GridSample::Attributes::mode) mode; - decltype(ngraph::op::v9::GridSample::Attributes::padding_mode) paddingMode; + decltype(ov::op::v9::GridSample::Attributes::align_corners) alignCorners; + decltype(ov::op::v9::GridSample::Attributes::mode) mode; + decltype(ov::op::v9::GridSample::Attributes::padding_mode) paddingMode; InferenceEngine::Precision inDataPrc; InferenceEngine::Precision inGridPrc; std::string targetDevice; @@ -33,9 +33,9 @@ std::string GridSampleLayerTest::getTestCaseName(const testing::TestParamInfo(ngInDataPrc, ngraph::Shape(dataShape)); - auto grid = std::make_shared(ngInGridPrc, ngraph::Shape(gridShape)); - auto gridSample = std::make_shared( + auto data = std::make_shared(ngInDataPrc, ngraph::Shape(dataShape)); + auto grid = std::make_shared(ngInGridPrc, ngraph::Shape(gridShape)); + auto gridSample = std::make_shared( data, grid, - ngraph::op::v9::GridSample::Attributes(alignCorners, mode, paddingMode)); - function = std::make_shared(std::make_shared(gridSample), + ov::op::v9::GridSample::Attributes(alignCorners, mode, paddingMode)); + function = std::make_shared(std::make_shared(gridSample), ngraph::ParameterVector{data, grid}, "GridSample"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/grn.cpp b/src/tests/functional/shared_test_classes/src/single_layer/grn.cpp index 54fe36b363170b..f1e135f06c47d2 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/grn.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/grn.cpp @@ -37,8 +37,8 @@ void GrnLayerTest::SetUp() { std::tie(netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, bias, targetDevice) = GetParam(); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector paramsIn {std::make_shared(ngPrc, ov::Shape(inputShapes))}; - auto grn = std::make_shared(paramsIn[0], bias); - ngraph::ResultVector results{ std::make_shared(grn) }; + auto grn = std::make_shared(paramsIn[0], bias); + ngraph::ResultVector results{ std::make_shared(grn) }; function = std::make_shared(results, paramsIn, "Grn"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/group_convolution.cpp b/src/tests/functional/shared_test_classes/src/single_layer/group_convolution.cpp index 14408094c21dff..696f74a2ff7c32 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/group_convolution.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/group_convolution.cpp @@ -14,7 +14,7 @@ std::string GroupConvolutionLayerTest::getTestCaseName(const testing::TestParamI InferenceEngine::SizeVector inputShapes; std::string targetDevice; std::tie(groupConvParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = obj.param; - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels, numGroups; @@ -44,17 +44,17 @@ void GroupConvolutionLayerTest::SetUp() { std::vector inputShape; auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; std::tie(groupConvParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels, numGroups; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, numGroups, padType) = groupConvParams; auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto groupConv = std::dynamic_pointer_cast( + auto groupConv = std::dynamic_pointer_cast( ngraph::builder::makeGroupConvolution(params[0], ngPrc, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels, numGroups)); - ngraph::ResultVector results{std::make_shared(groupConv)}; + ngraph::ResultVector results{std::make_shared(groupConv)}; function = std::make_shared(results, params, "groupConvolution"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/group_convolution_backprop_data.cpp b/src/tests/functional/shared_test_classes/src/single_layer/group_convolution_backprop_data.cpp index de4f784b4c6ec4..fc56ef8a87b72f 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/group_convolution_backprop_data.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/group_convolution_backprop_data.cpp @@ -16,7 +16,7 @@ std::string GroupConvBackpropDataLayerTest::getTestCaseName(const testing::TestP InferenceEngine::SizeVector inputShapes; std::string targetDevice; std::tie(groupConvBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = obj.param; - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels, numGroups; @@ -46,17 +46,17 @@ void GroupConvBackpropDataLayerTest::SetUp() { std::vector inputShape; auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; std::tie(groupConvBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels, numGroups; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, numGroups, padType) = groupConvBackpropDataParams; auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto groupConvBackpropData = std::dynamic_pointer_cast( + auto groupConvBackpropData = std::dynamic_pointer_cast( ngraph::builder::makeGroupConvolutionBackpropData(params[0], ngPrc, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels, numGroups)); - ngraph::ResultVector results{std::make_shared(groupConvBackpropData)}; + ngraph::ResultVector results{std::make_shared(groupConvBackpropData)}; function = std::make_shared(results, params, "GroupConvolutionBackpropData"); } @@ -68,7 +68,7 @@ std::string GroupConvBackpropLayerTest::getTestCaseName(testing::TestParamInfo padBegin, padEnd, outPadding; size_t convOutChannels, numGroups; @@ -100,25 +100,25 @@ void GroupConvBackpropLayerTest::SetUp() { std::vector inputShape, outputShape; auto netPrecision = InferenceEngine::Precision::UNSPECIFIED; std::tie(groupConvBackpropDataParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, outputShape, targetDevice) = this->GetParam(); - ngraph::op::PadType padType; + ov::op::PadType padType; InferenceEngine::SizeVector kernel, stride, dilation; std::vector padBegin, padEnd, outPadding; size_t convOutChannels, numGroups; std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, numGroups, padType, outPadding) = groupConvBackpropDataParams; auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - std::shared_ptr groupConvBackpropData; + std::shared_ptr groupConvBackpropData; if (!outputShape.empty()) { - auto outShape = ngraph::opset3::Constant::create(ngraph::element::i64, {outputShape.size()}, outputShape); - groupConvBackpropData = std::dynamic_pointer_cast( + auto outShape = ov::op::v0::Constant::create(ngraph::element::i64, {outputShape.size()}, outputShape); + groupConvBackpropData = std::dynamic_pointer_cast( ngraph::builder::makeGroupConvolutionBackpropData(params[0], outShape, ngPrc, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels, numGroups, false, outPadding)); } else { - groupConvBackpropData = std::dynamic_pointer_cast( + groupConvBackpropData = std::dynamic_pointer_cast( ngraph::builder::makeGroupConvolutionBackpropData(params[0], ngPrc, kernel, stride, padBegin, padEnd, dilation, padType, convOutChannels, numGroups, false, outPadding)); } - ngraph::ResultVector results{std::make_shared(groupConvBackpropData)}; + ngraph::ResultVector results{std::make_shared(groupConvBackpropData)}; function = std::make_shared(results, params, "GroupConvolutionBackpropData"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/gru_cell.cpp b/src/tests/functional/shared_test_classes/src/single_layer/gru_cell.cpp index 7343ad51749b0b..f1fff2faee7463 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/gru_cell.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/gru_cell.cpp @@ -105,7 +105,7 @@ void GRUCellTest::SetUp() { auto gru_cell = std::make_shared(params[0], params[1], W, R, B, hidden_size, activations, activations_alpha, activations_beta, clip, linear_before_reset); - ngraph::ResultVector results{std::make_shared(gru_cell->output(0))}; + ngraph::ResultVector results{std::make_shared(gru_cell->output(0))}; function = std::make_shared(results, params, "gru_cell"); if (should_decompose) { ngraph::pass::Manager m; diff --git a/src/tests/functional/shared_test_classes/src/single_layer/gru_sequence.cpp b/src/tests/functional/shared_test_classes/src/single_layer/gru_sequence.cpp index a741b6263fde15..2052edfd863ba8 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/gru_sequence.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/gru_sequence.cpp @@ -23,7 +23,7 @@ namespace LayerTestsDefinitions { std::vector activations_beta; float clip; bool linear_before_reset; - ngraph::op::RecurrentSequenceDirection direction; + ov::op::RecurrentSequenceDirection direction; InputLayerType WRBType; InferenceEngine::Precision netPrecision; std::string targetDevice; @@ -59,12 +59,12 @@ namespace LayerTestsDefinitions { std::vector activations_beta; float clip; bool linear_before_reset; - ngraph::op::RecurrentSequenceDirection direction; + ov::op::RecurrentSequenceDirection direction; InputLayerType WRBType; InferenceEngine::Precision netPrecision; std::tie(m_mode, seq_lengths, batch, hidden_size, activations, clip, linear_before_reset, direction, WRBType, netPrecision, targetDevice) = this->GetParam(); - size_t num_directions = direction == ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; + size_t num_directions = direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; std::vector inputShapes = { {{batch, seq_lengths, input_size}, {batch, num_directions, hidden_size}, {batch}, {num_directions, 3 * hidden_size, input_size}, {num_directions, 3 * hidden_size, hidden_size}, @@ -115,15 +115,15 @@ namespace LayerTestsDefinitions { auto gru_sequence = std::make_shared(params[0], params[1], seq_lengths_node, W, R, B, hidden_size, direction, activations, activations_alpha, activations_beta, clip, linear_before_reset); - ngraph::ResultVector results{std::make_shared(gru_sequence->output(0)), - std::make_shared(gru_sequence->output(1))}; + ngraph::ResultVector results{std::make_shared(gru_sequence->output(0)), + std::make_shared(gru_sequence->output(1))}; function = std::make_shared(results, params, "gru_sequence"); bool is_pure_sequence = (m_mode == SequenceTestsMode::PURE_SEQ || m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM || m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST); if (!is_pure_sequence) { ngraph::pass::Manager manager; - if (direction == ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL) + if (direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL) manager.register_pass(); manager.register_pass(); manager.run_passes(function); diff --git a/src/tests/functional/shared_test_classes/src/single_layer/interpolate.cpp b/src/tests/functional/shared_test_classes/src/single_layer/interpolate.cpp index 0382268acb8cc7..9a07facc117300 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/interpolate.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/interpolate.cpp @@ -24,10 +24,10 @@ std::string InterpolateLayerTest::getTestCaseName(const testing::TestParamInfo axes; std::vector scales; bool antialias; - ngraph::op::v4::Interpolate::InterpolateMode mode; - ngraph::op::v4::Interpolate::ShapeCalcMode shapeCalcMode; - ngraph::op::v4::Interpolate::CoordinateTransformMode coordinateTransformMode; - ngraph::op::v4::Interpolate::NearestMode nearestMode; + ov::op::v4::Interpolate::InterpolateMode mode; + ov::op::v4::Interpolate::ShapeCalcMode shapeCalcMode; + ov::op::v4::Interpolate::CoordinateTransformMode coordinateTransformMode; + ov::op::v4::Interpolate::NearestMode nearestMode; double cubeCoef; std::tie(mode, shapeCalcMode, coordinateTransformMode, nearestMode, antialias, padBegin, padEnd, cubeCoef, axes, scales) = interpolateParams; std::ostringstream result; @@ -62,10 +62,10 @@ void InterpolateLayerTest::SetUp() { std::vector axes; std::vector scales; bool antialias; - ngraph::op::v4::Interpolate::InterpolateMode mode; - ngraph::op::v4::Interpolate::ShapeCalcMode shapeCalcMode; - ngraph::op::v4::Interpolate::CoordinateTransformMode coordinateTransformMode; - ngraph::op::v4::Interpolate::NearestMode nearestMode; + ov::op::v4::Interpolate::InterpolateMode mode; + ov::op::v4::Interpolate::ShapeCalcMode shapeCalcMode; + ov::op::v4::Interpolate::CoordinateTransformMode coordinateTransformMode; + ov::op::v4::Interpolate::NearestMode nearestMode; configuration.insert(additional_config.begin(), additional_config.end()); @@ -75,32 +75,32 @@ void InterpolateLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto sizesConst = ngraph::opset3::Constant(ngraph::element::Type_t::i64, {targetShape.size()}, targetShape); - auto sizesInput = std::make_shared(sizesConst); + auto sizesConst = ov::op::v0::Constant(ngraph::element::Type_t::i64, {targetShape.size()}, targetShape); + auto sizesInput = std::make_shared(sizesConst); - auto scales_const = ngraph::opset3::Constant(ngraph::element::Type_t::f32, {scales.size()}, scales); - auto scalesInput = std::make_shared(scales_const); + auto scales_const = ov::op::v0::Constant(ngraph::element::Type_t::f32, {scales.size()}, scales); + auto scalesInput = std::make_shared(scales_const); - ngraph::op::v4::Interpolate::InterpolateAttrs interpolateAttributes{mode, shapeCalcMode, padBegin, + ov::op::v4::Interpolate::InterpolateAttrs interpolateAttributes{mode, shapeCalcMode, padBegin, padEnd, coordinateTransformMode, nearestMode, antialias, cubeCoef}; - std::shared_ptr interpolate; + std::shared_ptr interpolate; if (axes.empty()) { - interpolate = std::make_shared(params[0], + interpolate = std::make_shared(params[0], sizesInput, scalesInput, interpolateAttributes); } else { - auto axesConst = ngraph::opset3::Constant(ngraph::element::Type_t::i64, {axes.size()}, axes); - auto axesInput = std::make_shared(axesConst); + auto axesConst = ov::op::v0::Constant(ngraph::element::Type_t::i64, {axes.size()}, axes); + auto axesInput = std::make_shared(axesConst); - interpolate = std::make_shared(params[0], + interpolate = std::make_shared(params[0], sizesInput, scalesInput, axesInput, interpolateAttributes); } - const ngraph::ResultVector results{std::make_shared(interpolate)}; + const ngraph::ResultVector results{std::make_shared(interpolate)}; function = std::make_shared(results, params, "interpolate"); } @@ -149,12 +149,12 @@ void Interpolate1LayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto sizesConst = ngraph::opset3::Constant(ngraph::element::Type_t::i64, {targetShape.size()}, targetShape); - auto sizesInput = std::make_shared(sizesConst); + auto sizesConst = ov::op::v0::Constant(ngraph::element::Type_t::i64, {targetShape.size()}, targetShape); + auto sizesInput = std::make_shared(sizesConst); bool align_corners = true; - ngraph::op::v0::InterpolateAttrs interpolateAttributes; + ov::op::v0::Interpolate::Attributes interpolateAttributes; interpolateAttributes.axes = axes; interpolateAttributes.mode = mode; interpolateAttributes.align_corners = align_corners; @@ -162,9 +162,9 @@ void Interpolate1LayerTest::SetUp() { interpolateAttributes.pads_begin = pads; interpolateAttributes.pads_end = pads; - auto interpolate = std::make_shared(params[0], sizesInput, interpolateAttributes); + auto interpolate = std::make_shared(params[0], sizesInput, interpolateAttributes); - const ngraph::ResultVector results{std::make_shared(interpolate)}; + const ngraph::ResultVector results{std::make_shared(interpolate)}; function = std::make_shared(results, params, "interpolate"); } @@ -211,13 +211,13 @@ std::string InterpolateLayerTest::getTestCaseName(const testing::TestParamInfo makeScalesOrSizesInput(ov::op::util::InterpolateBase::ShapeCalcMode shapeCalcMode, +static std::shared_ptr makeScalesOrSizesInput(ov::op::util::InterpolateBase::ShapeCalcMode shapeCalcMode, const std::vector& sizes, const std::vector& scales) { if (shapeCalcMode == ov::op::util::InterpolateBase::ShapeCalcMode::SIZES) - return std::make_shared(ngraph::element::Type_t::i64, ov::Shape{sizes.size()}, sizes); + return std::make_shared(ngraph::element::Type_t::i64, ov::Shape{sizes.size()}, sizes); else - return std::make_shared(ngraph::element::Type_t::f32, ov::Shape{scales.size()}, scales); + return std::make_shared(ngraph::element::Type_t::f32, ov::Shape{scales.size()}, scales); } void InterpolateLayerTest::SetUp() { @@ -248,20 +248,20 @@ void InterpolateLayerTest::SetUp() { ov::op::util::InterpolateBase::InterpolateAttrs interpolateAttributes{mode, shapeCalcMode, padBegin, padEnd, coordinateTransformMode, nearestMode, antialias, cubeCoef}; - std::shared_ptr interpolate{}; + std::shared_ptr interpolate{}; if (axes.empty()) { - interpolate = std::make_shared(params[0], + interpolate = std::make_shared(params[0], scalesOrSizesInput, interpolateAttributes); } else { - auto axesInput = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{axes.size()}, axes); + auto axesInput = std::make_shared(ngraph::element::Type_t::i64, ov::Shape{axes.size()}, axes); - interpolate = std::make_shared(params[0], + interpolate = std::make_shared(params[0], scalesOrSizesInput, axesInput, interpolateAttributes); } - const ngraph::ResultVector results{std::make_shared(interpolate)}; + const ngraph::ResultVector results{std::make_shared(interpolate)}; function = std::make_shared(results, params, "interpolate"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/log_softmax.cpp b/src/tests/functional/shared_test_classes/src/single_layer/log_softmax.cpp index 843c8945aab6ac..ab0859022090df 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/log_softmax.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/log_softmax.cpp @@ -41,9 +41,9 @@ void LogSoftmaxLayerTest::SetUp() { const ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - const auto logSoftmax = std::make_shared(params.at(0), axis); + const auto logSoftmax = std::make_shared(params.at(0), axis); - const ngraph::ResultVector results {std::make_shared(logSoftmax)}; + const ngraph::ResultVector results {std::make_shared(logSoftmax)}; function = std::make_shared(results, params, "logSoftmax"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/logical.cpp b/src/tests/functional/shared_test_classes/src/single_layer/logical.cpp index c692ad4525e903..1136600a88f973 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/logical.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/logical.cpp @@ -72,7 +72,7 @@ void LogicalLayerTest::SetUp() { auto secondInput = ngraph::builder::makeInputLayer(ngInputsPrc, secondInputType, inputShapes.second); OPENVINO_SUPPRESS_DEPRECATED_END if (secondInputType == ngraph::helpers::InputLayerType::PARAMETER) { - inputs.push_back(std::dynamic_pointer_cast(secondInput)); + inputs.push_back(std::dynamic_pointer_cast(secondInput)); } logicalNode = ngraph::builder::makeLogical(inputs[0], secondInput, logicalOpType); } else { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/loop.cpp b/src/tests/functional/shared_test_classes/src/single_layer/loop.cpp index 25f4bfbd90f6dc..844d73cba73292 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/loop.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/loop.cpp @@ -59,9 +59,9 @@ namespace LayerTestsDefinitions { types_separate.push_back(el.second); } // Example: - /* auto X = std::make_shared(ngraph::element::f32, ngraph::Shape{32, 1, 10}); - auto Y = std::make_shared(ngraph::element::f32, ngraph::Shape{32, 1, 10}); - auto M = std::make_shared(ngraph::element::f32, ngraph::Shape{32, 1, 10});*/ + /* auto X = std::make_shared(ngraph::element::f32, ngraph::Shape{32, 1, 10}); + auto Y = std::make_shared(ngraph::element::f32, ngraph::Shape{32, 1, 10}); + auto M = std::make_shared(ngraph::element::f32, ngraph::Shape{32, 1, 10});*/ ov::ParameterVector params; for (auto&& shape : inputs_separate) { params.push_back(std::make_shared(ngPrc, ov::Shape(shape))); @@ -70,55 +70,55 @@ namespace LayerTestsDefinitions { // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters const std::vector body_params_shapes(inputs_separate.size(), ngraph::PartialShape::dynamic()); - auto current_iteration = std::make_shared(ngraph::element::i64, ngraph::Shape{1}); + auto current_iteration = std::make_shared(ngraph::element::i64, ngraph::Shape{1}); //Example: -/* auto Xi = std::make_shared(ngraph::element::f32, ngraph::PartialShape::dynamic()); - auto Yi = std::make_shared(ngraph::element::f32, ngraph::PartialShape::dynamic()); - auto M_body = std::make_shared(ngraph::element::f32, ngraph::PartialShape::dynamic());*/ +/* auto Xi = std::make_shared(ngraph::element::f32, ngraph::PartialShape::dynamic()); + auto Yi = std::make_shared(ngraph::element::f32, ngraph::PartialShape::dynamic()); + auto M_body = std::make_shared(ngraph::element::f32, ngraph::PartialShape::dynamic());*/ ngraph::ParameterVector body_params; for (const auto &pshape : body_params_shapes) { - auto paramNode = std::make_shared(ngPrc, pshape); + auto paramNode = std::make_shared(ngPrc, pshape); body_params.push_back(paramNode); } std::shared_ptr body_condition_const; if (is_body_condition_const) { if (body_condition) { - body_condition_const = std::make_shared( + body_condition_const = std::make_shared( ngraph::element::boolean, ngraph::Shape{1}, true); } else { - body_condition_const = std::make_shared( + body_condition_const = std::make_shared( ngraph::element::boolean, ngraph::Shape{1}, false); } } auto trip_count_const = - std::make_shared(ngraph::element::i64, ngraph::Shape{1}, trip_count); + std::make_shared(ngraph::element::i64, ngraph::Shape{1}, trip_count); std::shared_ptr exec_condition; if (execute_first_iteration) { - exec_condition = std::make_shared( + exec_condition = std::make_shared( ngraph::element::boolean, ngraph::Shape{1}, true); } else { - exec_condition = std::make_shared( + exec_condition = std::make_shared( ngraph::element::boolean, ngraph::Shape{1}, false); } // Body std::shared_ptr Zo = body_params[0]; for (int i = 1; i < body_params.size(); ++i) { - Zo = std::make_shared(body_params[i], Zo); + Zo = std::make_shared(body_params[i], Zo); } // body_params.insert(body_params.begin(), current_iteration); auto body = std::make_shared(ngraph::OutputVector{body_condition_const, Zo}, body_params); - auto loop = std::make_shared(trip_count_const, exec_condition); + auto loop = std::make_shared(trip_count_const, exec_condition); loop->set_function(body); - loop->set_special_body_ports(ngraph::opset5::Loop::SpecialBodyPorts{-1, 0}); + loop->set_special_body_ports(ov::op::v5::Loop::SpecialBodyPorts{-1, 0}); for (int i = 0; i < body_params.size(); ++i) { if (types_separate[i] == LOOP_IN_TYPE::INVARIANT) { @@ -137,9 +137,9 @@ namespace LayerTestsDefinitions { // start=0, stride=1, part_size=1, end=-1, axis=1 auto out2 = loop->get_concatenated_slices(Zo, 0, 1, 1, -1, 1); - auto result0 = std::make_shared(out0); - auto result1 = std::make_shared(out1); - auto result2 = std::make_shared(out2); + auto result0 = std::make_shared(out0); + auto result1 = std::make_shared(out1); + auto result2 = std::make_shared(out2); function = std::make_shared(ngraph::ResultVector{result0, result1, result2}, params, "loop"); } @@ -205,9 +205,9 @@ namespace LayerTestsDefinitions { auto cond_input_create = [¶ms] (ngraph::element::Type prc, const ngraph::Shape &shape, int value = 0, bool is_static = false) -> std::shared_ptr { if (is_static) - return std::make_shared(prc, shape, value); + return std::make_shared(prc, shape, value); - auto input = std::make_shared(prc, shape); + auto input = std::make_shared(prc, shape); params.push_back(input); return input; }; @@ -230,24 +230,24 @@ namespace LayerTestsDefinitions { // Full loop Dynamic exit loop // n_iter = count n_iter = ex_val // - auto b_indx = std::make_shared(ngraph::element::i64, ngraph::Shape{}); - auto b_data = std::make_shared(prc, ngShape); - auto b_indx_cast = std::make_shared(b_indx, prc); - auto b_add = std::make_shared(b_data, b_indx_cast); + auto b_indx = std::make_shared(ngraph::element::i64, ngraph::Shape{}); + auto b_data = std::make_shared(prc, ngShape); + auto b_indx_cast = std::make_shared(b_indx, prc); + auto b_add = std::make_shared(b_data, b_indx_cast); std::shared_ptr b_cond; if (dynamic_exit == -1) { - b_cond = std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); + b_cond = std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); } else { - auto b_exit_value = std::make_shared(ngraph::element::i64, scalarShape, dynamic_exit); - b_cond = std::make_shared(b_indx, b_exit_value); + auto b_exit_value = std::make_shared(ngraph::element::i64, scalarShape, dynamic_exit); + b_cond = std::make_shared(b_indx, b_exit_value); } auto body = std::make_shared( ngraph::OutputVector {b_cond, b_add}, // TODO: check with reverse ngraph::ParameterVector {b_indx, b_data}); // TODO: check with reverse - auto loop = std::make_shared(count, skip); + auto loop = std::make_shared(count, skip); loop->set_function(body); loop->set_special_body_ports({0, 0}); loop->set_merged_input(b_data, start, b_add); @@ -330,22 +330,22 @@ namespace LayerTestsDefinitions { auto to_slice_shape = ngraph::Shape{ieShape}; to_slice_shape[0] = batch_size; - auto to_slice = std::make_shared(prc, to_slice_shape); - auto start = std::make_shared(prc, shape, 0); - auto count = std::make_shared(ngraph::element::i64, scalarShape, num_iteration); - auto icond = std::make_shared(ngraph::element::boolean, scalarShape, true); + auto to_slice = std::make_shared(prc, to_slice_shape); + auto start = std::make_shared(prc, shape, 0); + auto count = std::make_shared(ngraph::element::i64, scalarShape, num_iteration); + auto icond = std::make_shared(ngraph::element::boolean, scalarShape, true); // Loop body - auto b_data = std::make_shared(prc, shape); - auto b_recu = std::make_shared(prc, shape); - auto b_add = std::make_shared(b_data, b_recu); - auto b_cond = std::make_shared(ngraph::element::boolean, scalarShape, true); + auto b_data = std::make_shared(prc, shape); + auto b_recu = std::make_shared(prc, shape); + auto b_add = std::make_shared(b_data, b_recu); + auto b_cond = std::make_shared(ngraph::element::boolean, scalarShape, true); auto body = std::make_shared( ngraph::OutputVector {b_cond, b_add}, ngraph::ParameterVector {b_data, b_recu}); - auto loop = std::make_shared(count, icond); + auto loop = std::make_shared(count, icond); loop->set_function(body); loop->set_special_body_ports({-1, 0}); loop->set_sliced_input(b_data, to_slice, 0, 1, 1, -1, 0); @@ -366,25 +366,25 @@ namespace LayerTestsDefinitions { const auto prc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(iePrc); const auto scalarShape = ngraph::Shape{}; - auto to_slice = std::make_shared(prc, to_slice_shape); - auto start = std::make_shared(prc, shape, 0); - auto exit_on = std::make_shared(ngraph::element::i64, scalarShape, num_iteration); - auto count = std::make_shared(ngraph::element::i64, scalarShape, trip_count); - auto icond = std::make_shared(ngraph::element::boolean, scalarShape, true); + auto to_slice = std::make_shared(prc, to_slice_shape); + auto start = std::make_shared(prc, shape, 0); + auto exit_on = std::make_shared(ngraph::element::i64, scalarShape, num_iteration); + auto count = std::make_shared(ngraph::element::i64, scalarShape, trip_count); + auto icond = std::make_shared(ngraph::element::boolean, scalarShape, true); // Loop body - auto b_data = std::make_shared(prc, shape); - auto b_recu = std::make_shared(prc, shape); - auto b_add = std::make_shared(b_data, b_recu); - auto b_iter = std::make_shared(ngraph::element::i64, scalarShape); - auto b_exit_on = std::make_shared(ngraph::element::i64, scalarShape); - auto b_cond = std::make_shared(b_iter, b_exit_on); + auto b_data = std::make_shared(prc, shape); + auto b_recu = std::make_shared(prc, shape); + auto b_add = std::make_shared(b_data, b_recu); + auto b_iter = std::make_shared(ngraph::element::i64, scalarShape); + auto b_exit_on = std::make_shared(ngraph::element::i64, scalarShape); + auto b_cond = std::make_shared(b_iter, b_exit_on); auto body = std::make_shared( ngraph::OutputVector {b_cond, b_add}, ngraph::ParameterVector {b_data, b_recu, b_iter, b_exit_on}); - auto loop = std::make_shared(count, icond); + auto loop = std::make_shared(count, icond); loop->set_function(body); loop->set_special_body_ports({2, 0}); loop->set_sliced_input(b_data, to_slice, 0, 1, 1, -1, 0); diff --git a/src/tests/functional/shared_test_classes/src/single_layer/low_precision.cpp b/src/tests/functional/shared_test_classes/src/single_layer/low_precision.cpp index a68b292df93b43..7c1ad191f9a0d4 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/low_precision.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/low_precision.cpp @@ -32,7 +32,7 @@ void LowPrecisionTest::SetUp() { auto weights2Shape = ngraph::Shape{ 128, 32 }; // fully connected 1 - auto input = std::make_shared(ngPrc, inputShape); + auto input = std::make_shared(ngPrc, inputShape); std::vector weights1Data(ngraph::shape_size(weights1Shape), 0.0f); for (size_t i = 0; i < 16; i++) { @@ -40,38 +40,38 @@ void LowPrecisionTest::SetUp() { } auto weights1 = ngraph::builder::makeConstant(ngPrc, weights1Shape, weights1Data); - auto fc1 = std::make_shared(input, weights1); + auto fc1 = std::make_shared(input, weights1); fc1->set_friendly_name("FullyConnected_1"); // bias 1 std::vector bias1Data(ngraph::shape_size(inputShape), 0.0f); auto bias1 = ngraph::builder::makeConstant(ngPrc, inputShape, bias1Data); - auto add1 = std::make_shared(fc1, bias1); + auto add1 = std::make_shared(fc1, bias1); add1->set_friendly_name("Add_1"); #if 0 // ReLU 1 - auto relu1 = std::make_shared(add1); + auto relu1 = std::make_shared(add1); relu1->set_friendly_name("Relu_1"); //// fully connected 2 std::vector weights2Data(ngraph::shape_size(weights2Shape), 0.0f); std::fill(weights2Data.begin(), weights2Data.end(), 0.0001f); auto weights2 = ngraph::builder::makeConstant(ngPrc, weights2Shape, weights2Data); - auto fc2 = std::make_shared(relu1, weights2); + auto fc2 = std::make_shared(relu1, weights2); fc2->set_friendly_name("FullyConnected_2"); //// bias 2 std::vector bias2Data(ngraph::shape_size(weights2Shape), 0.0f); auto bias2 = ngraph::builder::makeConstant(ngPrc, weights2Shape, bias2Data); - auto add2 = std::make_shared(fc2, bias2); + auto add2 = std::make_shared(fc2, bias2); add2->set_friendly_name("Add_2"); //// ReLU 2 - auto relu2 = std::make_shared(add2); + auto relu2 = std::make_shared(add2); relu2->set_friendly_name("Relu_2"); #endif configuration = config.second; - function = std::make_shared(ngraph::ResultVector{std::make_shared(add1)}, + function = std::make_shared(ngraph::ResultVector{std::make_shared(add1)}, ngraph::ParameterVector{input}, "LowPrecisionTest"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/lrn.cpp b/src/tests/functional/shared_test_classes/src/single_layer/lrn.cpp index b594de81572777..d38edf9dce59ee 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/lrn.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/lrn.cpp @@ -43,9 +43,9 @@ void LrnLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShapes))}; - auto axes_node = std::make_shared(ngraph::element::i64, ngraph::Shape{axes.size()}, axes.data()); - auto lrn = std::make_shared(params[0], axes_node, alpha, beta, bias, size); - ngraph::ResultVector results {std::make_shared(lrn)}; + auto axes_node = std::make_shared(ngraph::element::i64, ngraph::Shape{axes.size()}, axes.data()); + auto lrn = std::make_shared(params[0], axes_node, alpha, beta, bias, size); + ngraph::ResultVector results {std::make_shared(lrn)}; function = std::make_shared(results, params, "lrn"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell.cpp b/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell.cpp index f028f5f3681f75..67c81880006fab 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell.cpp @@ -100,8 +100,8 @@ void LSTMCellTest::SetUp() { auto lstm_cell = std::make_shared(params[0], params[1], params[2], W, R, B, hidden_size, activations, activations_alpha, activations_beta, clip); - ngraph::ResultVector results{std::make_shared(lstm_cell->output(0)), - std::make_shared(lstm_cell->output(1))}; + ngraph::ResultVector results{std::make_shared(lstm_cell->output(0)), + std::make_shared(lstm_cell->output(1))}; function = std::make_shared(results, params, "lstm_cell"); if (should_decompose) { ngraph::pass::Manager m; diff --git a/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell_basic.cpp b/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell_basic.cpp index 795e3b8ef9228c..88a36406acb948 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell_basic.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/lstm_cell_basic.cpp @@ -70,8 +70,8 @@ void LSTMCellBasicTest::SetUp() { std::vector WRB = {inputShapes[3], inputShapes[4], inputShapes[5]}; auto lstm_cell = ngraph::builder::makeLSTM(paramsOuts, WRB, hidden_size, activations, {}, {}, clip); - ngraph::ResultVector results{std::make_shared(lstm_cell->output(0)), - std::make_shared(lstm_cell->output(1))}; + ngraph::ResultVector results{std::make_shared(lstm_cell->output(0)), + std::make_shared(lstm_cell->output(1))}; function = std::make_shared(results, params, "lstm_cell"); if (should_decompose) { ngraph::pass::Manager m; diff --git a/src/tests/functional/shared_test_classes/src/single_layer/lstm_sequence.cpp b/src/tests/functional/shared_test_classes/src/single_layer/lstm_sequence.cpp index 4946f738367e2a..086b514dc65cfe 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/lstm_sequence.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/lstm_sequence.cpp @@ -21,7 +21,7 @@ namespace LayerTestsDefinitions { std::vector activations_alpha; std::vector activations_beta; float clip; - ngraph::op::RecurrentSequenceDirection direction; + ov::op::RecurrentSequenceDirection direction; InputLayerType WRBType; InferenceEngine::Precision netPrecision; std::string targetDevice; @@ -59,12 +59,12 @@ namespace LayerTestsDefinitions { std::vector activations_alpha; std::vector activations_beta; float clip; - ngraph::op::RecurrentSequenceDirection direction; + ov::op::RecurrentSequenceDirection direction; InputLayerType WRBType; InferenceEngine::Precision netPrecision; std::tie(m_mode, seq_lengths, batch, hidden_size, input_size, activations, clip, direction, WRBType, netPrecision, targetDevice) = this->GetParam(); - size_t num_directions = direction == ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; + size_t num_directions = direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; m_max_seq_len = seq_lengths; std::vector inputShapes = { {{batch, seq_lengths, input_size}, {batch, num_directions, hidden_size}, {batch, num_directions, hidden_size}, @@ -116,16 +116,16 @@ namespace LayerTestsDefinitions { auto lstm_sequence = std::make_shared(params[0], params[1], params[2], seq_lengths_node, W, R, B, hidden_size, direction, std::vector{}, std::vector{}, activations, clip); - ngraph::ResultVector results{std::make_shared(lstm_sequence->output(0)), - std::make_shared(lstm_sequence->output(1)), - std::make_shared(lstm_sequence->output(2))}; + ngraph::ResultVector results{std::make_shared(lstm_sequence->output(0)), + std::make_shared(lstm_sequence->output(1)), + std::make_shared(lstm_sequence->output(2))}; function = std::make_shared(results, params, "lstm_sequence"); bool is_pure_sequence = (m_mode == SequenceTestsMode::PURE_SEQ || m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM || m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST); if (!is_pure_sequence) { ngraph::pass::Manager manager; - if (direction == ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL) + if (direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL) manager.register_pass(); manager.register_pass(); manager.run_passes(function); diff --git a/src/tests/functional/shared_test_classes/src/single_layer/mat_mul.cpp b/src/tests/functional/shared_test_classes/src/single_layer/mat_mul.cpp index 5148815930508d..75543ba36c31a8 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/mat_mul.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/mat_mul.cpp @@ -67,10 +67,10 @@ void MatMulTest::SetUp() { auto secondaryInput = ngraph::builder::makeInputLayer(ngPrc, secondaryInputType, shapeRelatedParams.input2.first); OPENVINO_SUPPRESS_DEPRECATED_END if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) { - params.push_back(std::dynamic_pointer_cast(secondaryInput)); + params.push_back(std::dynamic_pointer_cast(secondaryInput)); } auto MatMul = std::make_shared(params[0], secondaryInput, shapeRelatedParams.input1.second, shapeRelatedParams.input2.second); - ngraph::ResultVector results{std::make_shared(MatMul)}; + ngraph::ResultVector results{std::make_shared(MatMul)}; function = std::make_shared(results, params, "MatMul"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/matrix_nms.cpp b/src/tests/functional/shared_test_classes/src/single_layer/matrix_nms.cpp index c4677606a469e5..4ed7e20fd79916 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/matrix_nms.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/matrix_nms.cpp @@ -319,7 +319,7 @@ void MatrixNmsLayerTest::SetUp() { for (auto&& shape : inputDynamicShapes) { params.push_back(std::make_shared(paramsPrec, shape)); } - auto nms = std::make_shared(params[0], params[1], m_attrs); + auto nms = std::make_shared(params[0], params[1], m_attrs); function = std::make_shared(nms, params, "MatrixNMS"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/minimum_maximum.cpp b/src/tests/functional/shared_test_classes/src/single_layer/minimum_maximum.cpp index 00432a511f7f3b..bd46c5e412d1c7 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/minimum_maximum.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/minimum_maximum.cpp @@ -43,7 +43,7 @@ namespace LayerTestsDefinitions { auto secondaryInput = ngraph::builder::makeInputLayer(ngPrc, inputType, {inputShapes[1]}); OPENVINO_SUPPRESS_DEPRECATED_END if (inputType == ngraph::helpers::InputLayerType::PARAMETER) { - input.push_back(std::dynamic_pointer_cast(secondaryInput)); + input.push_back(std::dynamic_pointer_cast(secondaryInput)); } OPENVINO_SUPPRESS_DEPRECATED_START diff --git a/src/tests/functional/shared_test_classes/src/single_layer/multinomial.cpp b/src/tests/functional/shared_test_classes/src/single_layer/multinomial.cpp index 60a172271987de..42c631194bcf87 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/multinomial.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/multinomial.cpp @@ -54,10 +54,10 @@ void MultinomialTest::SetUp() { params.push_back(std::make_shared(ngPrc, shape)); } - auto numSamplesConstant = std::make_shared( + auto numSamplesConstant = std::make_shared( ngraph::element::Type_t::i64, ov::Shape{1}, numSamples); const auto paramOuts = - ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(params)); + ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(params)); const auto multinomial = std::make_shared( paramOuts.at(0), diff --git a/src/tests/functional/shared_test_classes/src/single_layer/mvn.cpp b/src/tests/functional/shared_test_classes/src/single_layer/mvn.cpp index 720dfd811b24d2..cf0ec9e587390c 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/mvn.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/mvn.cpp @@ -39,12 +39,12 @@ void Mvn1LayerTest::SetUp() { auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); ov::ParameterVector param {std::make_shared(inType, ov::Shape(inputShapes))}; OPENVINO_SUPPRESS_DEPRECATED_START - auto mvn = std::dynamic_pointer_cast(ngraph::builder::makeMVN(param[0], acrossChanels, normalizeVariance, eps)); + auto mvn = std::dynamic_pointer_cast(ngraph::builder::makeMVN(param[0], acrossChanels, normalizeVariance, eps)); if (!axes.empty()) { - mvn = std::dynamic_pointer_cast(ngraph::builder::makeMVN(param[0], axes, normalizeVariance, eps)); + mvn = std::dynamic_pointer_cast(ngraph::builder::makeMVN(param[0], axes, normalizeVariance, eps)); } OPENVINO_SUPPRESS_DEPRECATED_END - ngraph::ResultVector results{std::make_shared(mvn)}; + ngraph::ResultVector results{std::make_shared(mvn)}; function = std::make_shared(results, param, "MVN1"); } @@ -87,7 +87,7 @@ void Mvn6LayerTest::SetUp() { OPENVINO_SUPPRESS_DEPRECATED_START auto mvn = ngraph::builder::makeMVN6(param[0], axesNode, normalizeVariance, eps, epsMode); OPENVINO_SUPPRESS_DEPRECATED_END - ngraph::ResultVector results{std::make_shared(mvn)}; + ngraph::ResultVector results{std::make_shared(mvn)}; function = std::make_shared(results, param, "MVN6"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/non_max_suppression.cpp b/src/tests/functional/shared_test_classes/src/single_layer/non_max_suppression.cpp index 2dbdeb4234eef7..12114e53b71c04 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/non_max_suppression.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/non_max_suppression.cpp @@ -17,7 +17,7 @@ std::string NmsLayerTest::getTestCaseName(const testing::TestParamInfo(nms->output(0), opset5::Constant::create(outType, Shape{1}, {1})); + std::make_shared(nms->output(0), ov::op::v0::Constant::create(outType, Shape{1}, {1})); auto nms_1_identity = - std::make_shared(nms->output(1), opset5::Constant::create(ngPrc, Shape{1}, {1})); + std::make_shared(nms->output(1), ov::op::v0::Constant::create(ngPrc, Shape{1}, {1})); auto nms_2_identity = - std::make_shared(nms->output(2), opset5::Constant::create(outType, Shape{1}, {1})); + std::make_shared(nms->output(2), ov::op::v0::Constant::create(outType, Shape{1}, {1})); nms_0_identity->set_friendly_name("Multiply_0"); nms_1_identity->set_friendly_name("Multiply_1"); nms_2_identity->set_friendly_name("Multiply_2"); @@ -349,7 +350,7 @@ void Nms9LayerTest::SetUp() { InputPrecisions inPrecisions; size_t maxOutBoxesPerClass; float iouThr, scoreThr, softNmsSigma; - op::v5::NonMaxSuppression::BoxEncodingType boxEncoding; + ov::op::v5::NonMaxSuppression::BoxEncodingType boxEncoding; bool sortResDescend; element::Type outType; std::tie(inShapeParams, diff --git a/src/tests/functional/shared_test_classes/src/single_layer/nonzero.cpp b/src/tests/functional/shared_test_classes/src/single_layer/nonzero.cpp index 2c57450a1265b6..7d1a279a228978 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/nonzero.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/nonzero.cpp @@ -29,11 +29,11 @@ void NonZeroLayerTest::SetUp() { configuration.insert(additionalConfig.cbegin(), additionalConfig.cend()); const auto& precision = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); - const auto& paramNode = std::make_shared(precision, ngraph::Shape(inputShape)); + const auto& paramNode = std::make_shared(precision, ngraph::Shape(inputShape)); - auto nonZeroOp = std::make_shared(paramNode->output(0)); + auto nonZeroOp = std::make_shared(paramNode->output(0)); - ngraph::ResultVector results{std::make_shared(nonZeroOp)}; + ngraph::ResultVector results{std::make_shared(nonZeroOp)}; function = std::make_shared(results, ngraph::ParameterVector{paramNode}, "non_zero"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/normalize_l2.cpp b/src/tests/functional/shared_test_classes/src/single_layer/normalize_l2.cpp index 771fe5c2d0f965..8ffbed114d4182 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/normalize_l2.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/normalize_l2.cpp @@ -9,7 +9,7 @@ namespace LayerTestsDefinitions { std::string NormalizeL2LayerTest::getTestCaseName(const testing::TestParamInfo& obj) { std::vector axes; float eps; - ngraph::op::EpsMode epsMode; + ov::op::EpsMode epsMode; InferenceEngine::SizeVector inputShape; InferenceEngine::Precision netPrecision; std::string targetDevice; @@ -40,7 +40,7 @@ void NormalizeL2LayerTest::SetUp() { InferenceEngine::SizeVector inputShape; std::vector axes; float eps; - ngraph::op::EpsMode epsMode; + ov::op::EpsMode epsMode; InferenceEngine::Precision netPrecision; std::tie(axes, eps, epsMode, inputShape, netPrecision, targetDevice) = this->GetParam(); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); @@ -51,7 +51,7 @@ void NormalizeL2LayerTest::SetUp() { auto normAxes = std::make_shared(ov::element::i64, ov::Shape{axes.size()}, axes); auto norm = std::make_shared(data_input, normAxes, eps, epsMode); - ngraph::ResultVector results{std::make_shared(norm)}; + ngraph::ResultVector results{std::make_shared(norm)}; function = std::make_shared(results, params, "NormalizeL2"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/one_hot.cpp b/src/tests/functional/shared_test_classes/src/single_layer/one_hot.cpp index 10c1a3b5f92274..1ad2b718a532c6 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/one_hot.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/one_hot.cpp @@ -48,7 +48,7 @@ void OneHotLayerTest::SetUp() { auto off_value_const = std::make_shared(set_type, ov::Shape{}, off_val); auto onehot = std::make_shared(params[0], depth_const, on_value_const, off_value_const, axis); - ngraph::ResultVector results{std::make_shared(onehot)}; + ngraph::ResultVector results{std::make_shared(onehot)}; function = std::make_shared(results, params, "OneHot"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/pad.cpp b/src/tests/functional/shared_test_classes/src/single_layer/pad.cpp index 09f98d4f157b79..3c6db7a66fc8ad 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/pad.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/pad.cpp @@ -45,7 +45,7 @@ void PadLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; auto pad = CreatePadOp(params[0], padsBegin, padsEnd, argPadValue, padMode); - ngraph::ResultVector results{std::make_shared(pad)}; + ngraph::ResultVector results{std::make_shared(pad)}; function = std::make_shared(results, params, "pad"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/pooling.cpp b/src/tests/functional/shared_test_classes/src/single_layer/pooling.cpp index f7a1a93f25fb84..86c57ea0765358 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/pooling.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/pooling.cpp @@ -17,8 +17,8 @@ std::string PoolingLayerTest::getTestCaseName(const testing::TestParamInfo kernel, stride; std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; + ov::op::PadType padType; + ov::op::RoundingType roundingType; bool excludePad; std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = poolParams; @@ -59,8 +59,8 @@ std::string GlobalPoolingLayerTest::getTestCaseName(const testing::TestParamInfo ngraph::helpers::PoolingTypes poolType; std::vector kernel, stride; std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; + ov::op::PadType padType; + ov::op::RoundingType roundingType; bool excludePad; std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = poolParams; @@ -81,7 +81,7 @@ std::string GlobalPoolingLayerTest::getTestCaseName(const testing::TestParamInfo result << "S" << ov::test::utils::vec2str(stride) << "_"; result << "PB" << ov::test::utils::vec2str(padBegin) << "_"; result << "PE" << ov::test::utils::vec2str(padEnd) << "_"; - if (padType == ngraph::op::PadType::EXPLICIT) { + if (padType == ov::op::PadType::EXPLICIT) { result << "Rounding=" << roundingType << "_"; } result << "AutoPad=" << padType << "_"; @@ -104,8 +104,8 @@ std::string MaxPoolingV8LayerTest::getTestCaseName(const testing::TestParamInfo< std::tie(poolParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShapes, targetDevice) = obj.param; std::vector kernel, stride, dilation; std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; + ov::op::PadType padType; + ov::op::RoundingType roundingType; ngraph::element::Type indexElementType; int64_t axis; std::tie(kernel, stride, dilation, padBegin, padEnd, indexElementType, axis, roundingType, padType) = poolParams; @@ -138,8 +138,8 @@ void PoolingLayerTest::SetUp() { ngraph::helpers::PoolingTypes poolType; std::vector kernel, stride; std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; + ov::op::PadType padType; + ov::op::RoundingType roundingType; bool excludePad; std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = poolParams; @@ -158,7 +158,7 @@ void PoolingLayerTest::SetUp() { poolType); OPENVINO_SUPPRESS_DEPRECATED_END - ngraph::ResultVector results{std::make_shared(pooling)}; + ngraph::ResultVector results{std::make_shared(pooling)}; function = std::make_shared(results, params, "pooling"); } @@ -170,8 +170,8 @@ void GlobalPoolingLayerTest::SetUp() { ngraph::helpers::PoolingTypes poolType; std::vector kernel, stride; std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; + ov::op::PadType padType; + ov::op::RoundingType roundingType; bool excludePad; std::tie(poolType, kernel, stride, padBegin, padEnd, roundingType, padType, excludePad) = poolParams; @@ -192,7 +192,7 @@ void GlobalPoolingLayerTest::SetUp() { poolType); OPENVINO_SUPPRESS_DEPRECATED_END - ngraph::ResultVector results{std::make_shared(pooling)}; + ngraph::ResultVector results{std::make_shared(pooling)}; function = std::make_shared(results, params, "pooling"); } @@ -203,8 +203,8 @@ void MaxPoolingV8LayerTest::SetUp() { std::tie(poolParams, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); std::vector kernel, stride, dilation; std::vector padBegin, padEnd; - ngraph::op::PadType padType; - ngraph::op::RoundingType roundingType; + ov::op::PadType padType; + ov::op::RoundingType roundingType; ngraph::element::Type indexElementType; int64_t axis; std::tie(kernel, stride, dilation, padBegin, padEnd, indexElementType, axis, roundingType, padType) = poolParams; @@ -219,10 +219,10 @@ void MaxPoolingV8LayerTest::SetUp() { const auto maxPoolV8_second_output_is_supported = targetDevice == ov::test::utils::DEVICE_GPU; ngraph::ResultVector results; if (maxPoolV8_second_output_is_supported) { - results = {std::make_shared(maxPool->output(0)), - std::make_shared(maxPool->output(1))}; + results = {std::make_shared(maxPool->output(0)), + std::make_shared(maxPool->output(1))}; } else { - results = { std::make_shared(maxPool->output(0)) }; + results = { std::make_shared(maxPool->output(0)) }; } function = std::make_shared(results, params, "MaxPoolV8"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/power.cpp b/src/tests/functional/shared_test_classes/src/single_layer/power.cpp index 6cb0251c00b872..e61b7f1f1cf8d8 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/power.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/power.cpp @@ -36,8 +36,8 @@ namespace LayerTestsDefinitions { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector paramsIn{std::make_shared(ngPrc, ov::Shape(inputShapes[0]))}; - auto power_const = std::make_shared(ngPrc, ngraph::Shape{ 1 }, power); - auto pow = std::make_shared(paramsIn[0], power_const); + auto power_const = std::make_shared(ngPrc, ngraph::Shape{ 1 }, power); + auto pow = std::make_shared(paramsIn[0], power_const); function = std::make_shared(pow, paramsIn, "power"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/prior_box.cpp b/src/tests/functional/shared_test_classes/src/single_layer/prior_box.cpp index 4cd4b12ab28ac8..db3ef84aa0c944 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/prior_box.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/prior_box.cpp @@ -69,7 +69,7 @@ void PriorBoxLayerTest::SetUp() { ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShapes)), std::make_shared(ngPrc, ov::Shape(imageShapes))}; - ngraph::op::v8::PriorBox::Attributes attributes; + ov::op::v8::PriorBox::Attributes attributes; attributes.min_size = min_size; attributes.max_size = max_size; attributes.aspect_ratio = aspect_ratio; @@ -84,16 +84,16 @@ void PriorBoxLayerTest::SetUp() { attributes.scale_all_sizes = scale_all_sizes; attributes.min_max_aspect_ratios_order = min_max_aspect_ratios_order; - auto shape_of_1 = std::make_shared(params[0]); - auto shape_of_2 = std::make_shared(params[1]); - auto priorBox = std::make_shared( + auto shape_of_1 = std::make_shared(params[0]); + auto shape_of_2 = std::make_shared(params[1]); + auto priorBox = std::make_shared( shape_of_1, shape_of_2, attributes); ov::pass::disable_constant_folding(priorBox); - ngraph::ResultVector results{std::make_shared(priorBox)}; + ngraph::ResultVector results{std::make_shared(priorBox)}; function = std::make_shared (results, params, "PriorBoxFunction"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/prior_box_clustered.cpp b/src/tests/functional/shared_test_classes/src/single_layer/prior_box_clustered.cpp index aaf00952633cf4..c9453be82b6ce4 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/prior_box_clustered.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/prior_box_clustered.cpp @@ -76,7 +76,7 @@ void PriorBoxClusteredLayerTest::SetUp() { ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShapes)), std::make_shared(ngPrc, ov::Shape(inputShapes))}; - ngraph::op::PriorBoxClusteredAttrs attributes; + ov::op::v0::PriorBoxClustered::Attributes attributes; attributes.widths = widths; attributes.heights = heights; attributes.clip = clip; @@ -86,14 +86,14 @@ void PriorBoxClusteredLayerTest::SetUp() { attributes.offset = offset; attributes.variances = variances; - auto shape_of_1 = std::make_shared(params[0]); - auto shape_of_2 = std::make_shared(params[1]); - auto priorBoxClustered = std::make_shared( + auto shape_of_1 = std::make_shared(params[0]); + auto shape_of_2 = std::make_shared(params[1]); + auto priorBoxClustered = std::make_shared( shape_of_1, shape_of_2, attributes); - ngraph::ResultVector results{ std::make_shared(priorBoxClustered) }; + ngraph::ResultVector results{ std::make_shared(priorBoxClustered) }; function = std::make_shared(results, params, "PB_Clustered"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/proposal.cpp b/src/tests/functional/shared_test_classes/src/single_layer/proposal.cpp index dddc9271909748..8eeef77d076a46 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/proposal.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/proposal.cpp @@ -153,7 +153,7 @@ void ProposalLayerTest::SetUp() { params[1]->set_friendly_name("b_boxes"); OPENVINO_SUPPRESS_DEPRECATED_START - auto proposal = std::dynamic_pointer_cast( + auto proposal = std::dynamic_pointer_cast( ngraph::builder::makeProposal(params[0], params[1], img_info, ngPrc, base_size, pre_nms_topn, @@ -172,8 +172,8 @@ void ProposalLayerTest::SetUp() { OPENVINO_SUPPRESS_DEPRECATED_END ngraph::ResultVector results{ - std::make_shared(proposal->output(0)), - std::make_shared(proposal->output(1))}; + std::make_shared(proposal->output(0)), + std::make_shared(proposal->output(1))}; function = std::make_shared(results, params, "proposal"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/psroi_pooling.cpp b/src/tests/functional/shared_test_classes/src/single_layer/psroi_pooling.cpp index 9f1cf2313cf60a..c258ce10941952 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/psroi_pooling.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/psroi_pooling.cpp @@ -109,7 +109,7 @@ void PSROIPoolingLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape)), std::make_shared(ngPrc, ov::Shape(coordsShape))}; - std::shared_ptr psroiPooling = std::make_shared(params[0], + std::shared_ptr psroiPooling = std::make_shared(params[0], params[1], outputDim, groupSize_, @@ -117,7 +117,7 @@ void PSROIPoolingLayerTest::SetUp() { spatialBinsX_, spatialBinsY_, mode_); - ngraph::ResultVector results{std::make_shared(psroiPooling)}; + ngraph::ResultVector results{std::make_shared(psroiPooling)}; function = std::make_shared(results, params, "psroi_pooling"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp b/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp index d94885cb2ebd6a..27026e7b02dd84 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/random_uniform.cpp @@ -71,13 +71,13 @@ void RandomUniformLayerTest::SetUp() { auto min_value = createConstant(randomUniformParams.precision, randomUniformParams.min_value); auto max_value = createConstant(randomUniformParams.precision, randomUniformParams.max_value); - auto random_uniform = std::make_shared(shape_of, + auto random_uniform = std::make_shared(shape_of, min_value, max_value, precision, global_seed, op_seed); - ngraph::ResultVector results{std::make_shared(random_uniform)}; + ngraph::ResultVector results{std::make_shared(random_uniform)}; function = std::make_shared(results, ngraph::ParameterVector{input}, "random_uniform"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/range.cpp b/src/tests/functional/shared_test_classes/src/single_layer/range.cpp index 5ab0fad424e925..910c7f5bee42c0 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/range.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/range.cpp @@ -57,10 +57,10 @@ void RangeLayerTest::SetUp() { param->set_friendly_name(shape.first); params.push_back(param); } - auto range = std::make_shared(params[0], params[1], params[2], ngPrc); + auto range = std::make_shared(params[0], params[1], params[2], ngPrc); function = std::make_shared( - std::make_shared(range), + std::make_shared(range), params, "Range"); } @@ -117,8 +117,8 @@ void RangeNumpyLayerTest::SetUp() { params[1]->set_friendly_name("stop"); params[2]->set_friendly_name("step"); - auto range = std::make_shared(params[0], params[1], params[2], ngNetPrc); - const ngraph::ResultVector results{std::make_shared(range)}; + auto range = std::make_shared(params[0], params[1], params[2], ngNetPrc); + const ngraph::ResultVector results{std::make_shared(range)}; function = std::make_shared(results, params, "Range"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/rdft.cpp b/src/tests/functional/shared_test_classes/src/single_layer/rdft.cpp index a953a84086dccd..5a858f86d30736 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/rdft.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/rdft.cpp @@ -34,12 +34,12 @@ void RDFTLayerTest::SetUp() { std::tie(inputShapes, inputPrecision, axes, signalSize, opType, targetDevice) = this->GetParam(); auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); ngraph::ParameterVector paramVector; - auto paramData = std::make_shared(inType, ngraph::Shape(inputShapes)); + auto paramData = std::make_shared(inType, ngraph::Shape(inputShapes)); paramVector.push_back(paramData); auto rdft = ngraph::builder::makeRDFT(paramVector[0], axes, signalSize, opType); - ngraph::ResultVector results{std::make_shared(rdft)}; + ngraph::ResultVector results{std::make_shared(rdft)}; function = std::make_shared(results, paramVector, "RDFT"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/reduce_ops.cpp b/src/tests/functional/shared_test_classes/src/single_layer/reduce_ops.cpp index f9c40c7b60fe2a..069de325e3db87 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/reduce_ops.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/reduce_ops.cpp @@ -58,10 +58,10 @@ void ReduceOpsLayerTest::SetUp() { FAIL() << "Reduce op doesn't support operation type: " << opType; } auto reductionAxesNode = std::dynamic_pointer_cast( - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape(shapeAxes), axes)); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape(shapeAxes), axes)); const auto reduce = ngraph::builder::makeReduce(params[0], reductionAxesNode, keepDims, reductionType); - const ngraph::ResultVector results{std::make_shared(reduce)}; + const ngraph::ResultVector results{std::make_shared(reduce)}; function = std::make_shared(results, params, "Reduce"); } InferenceEngine::Blob::Ptr ReduceOpsLayerTest::GenerateInput(const InferenceEngine::InputInfo &info) const { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/region_yolo.cpp b/src/tests/functional/shared_test_classes/src/single_layer/region_yolo.cpp index c4f7e13d5c8030..adaaaaca2d5927 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/region_yolo.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/region_yolo.cpp @@ -43,9 +43,9 @@ void RegionYoloLayerTest::SetUp() { InferenceEngine::Precision netPrecision; std::tie(inputShape, classes, coords, num_regions, do_softmax, mask, start_axis, end_axis, netPrecision, targetDevice) = this->GetParam(); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - auto param = std::make_shared(ngPrc, inputShape); - auto region_yolo = std::make_shared(param, coords, classes, num_regions, do_softmax, mask, start_axis, end_axis); - function = std::make_shared(std::make_shared(region_yolo), ngraph::ParameterVector{param}, "RegionYolo"); + auto param = std::make_shared(ngPrc, inputShape); + auto region_yolo = std::make_shared(param, coords, classes, num_regions, do_softmax, mask, start_axis, end_axis); + function = std::make_shared(std::make_shared(region_yolo), ngraph::ParameterVector{param}, "RegionYolo"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/reorg_yolo.cpp b/src/tests/functional/shared_test_classes/src/single_layer/reorg_yolo.cpp index 6deda839ec4fc4..0f918de70bc9a5 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/reorg_yolo.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/reorg_yolo.cpp @@ -25,9 +25,9 @@ void ReorgYoloLayerTest::SetUp() { size_t stride; InferenceEngine::Precision netPrecision; std::tie(inputShape, stride, netPrecision, targetDevice) = this->GetParam(); - auto param = std::make_shared(ngraph::element::f32, inputShape); - auto reorg_yolo = std::make_shared(param, stride); - function = std::make_shared(std::make_shared(reorg_yolo), ngraph::ParameterVector{param}, "ReorgYolo"); + auto param = std::make_shared(ngraph::element::f32, inputShape); + auto reorg_yolo = std::make_shared(param, stride); + function = std::make_shared(std::make_shared(reorg_yolo), ngraph::ParameterVector{param}, "ReorgYolo"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/reshape.cpp b/src/tests/functional/shared_test_classes/src/single_layer/reshape.cpp index 31578cd9379062..69e1ef64100b11 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/reshape.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/reshape.cpp @@ -37,11 +37,11 @@ void ReshapeLayerTest::SetUp() { this->GetParam(); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector paramsIn {std::make_shared(ngPrc, ov::Shape(inputShapes))}; - auto constNode = std::make_shared( + auto constNode = std::make_shared( ngraph::element::Type_t::i64, ngraph::Shape{outFormShapes.size()}, outFormShapes); - auto reshape = std::dynamic_pointer_cast( - std::make_shared(paramsIn[0], constNode, specialZero)); - ngraph::ResultVector results{std::make_shared(reshape)}; + auto reshape = std::dynamic_pointer_cast( + std::make_shared(paramsIn[0], constNode, specialZero)); + ngraph::ResultVector results{std::make_shared(reshape)}; function = std::make_shared(results, paramsIn, "Reshape"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/result.cpp b/src/tests/functional/shared_test_classes/src/single_layer/result.cpp index 71eb0dfe193143..cca0500eee4e9e 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/result.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/result.cpp @@ -29,7 +29,7 @@ void ResultLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - const ngraph::ResultVector results{std::make_shared(params[0])}; + const ngraph::ResultVector results{std::make_shared(params[0])}; function = std::make_shared(results, params, "result"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp b/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp index 9ab223e16c2b14..b6f506092b16c0 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/reverse.cpp @@ -52,7 +52,7 @@ void ReverseLayerTest::SetUp() { axes_constant = std::make_shared(ov::element::boolean, ov::Shape{axesMask.size()}, axesMask); } - const auto reverse = std::make_shared(params[0], axes_constant, mode); + const auto reverse = std::make_shared(params[0], axes_constant, mode); function = std::make_shared(reverse->outputs(), params, "reverse"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/reverse_sequence.cpp b/src/tests/functional/shared_test_classes/src/single_layer/reverse_sequence.cpp index 05af8ca3377c84..199f0cd111da4c 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/reverse_sequence.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/reverse_sequence.cpp @@ -45,11 +45,11 @@ void ReverseSequenceLayerTest::SetUp() { auto secondaryInput = ngraph::builder::makeInputLayer(secondPrc, secondaryInputType, secondInputShape); OPENVINO_SUPPRESS_DEPRECATED_END if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) { - paramsIn.push_back(std::dynamic_pointer_cast(secondaryInput)); + paramsIn.push_back(std::dynamic_pointer_cast(secondaryInput)); } - auto reverse = std::make_shared(paramsIn[0], secondaryInput, batchAxisIndx, seqAxisIndx); - ngraph::ResultVector results{std::make_shared(reverse)}; + auto reverse = std::make_shared(paramsIn[0], secondaryInput, batchAxisIndx, seqAxisIndx); + ngraph::ResultVector results{std::make_shared(reverse)}; function = std::make_shared(results, paramsIn, "ReverseSequence"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/rnn_cell.cpp b/src/tests/functional/shared_test_classes/src/single_layer/rnn_cell.cpp index 5325d57f69e0d9..7643c51aff7145 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/rnn_cell.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/rnn_cell.cpp @@ -92,7 +92,7 @@ void RNNCellTest::SetUp() { auto rnn_cell = std::make_shared(params[0], params[1], W, R, B, hidden_size, activations, activations_alpha, activations_beta, clip); - ngraph::ResultVector results{std::make_shared(rnn_cell)}; + ngraph::ResultVector results{std::make_shared(rnn_cell)}; function = std::make_shared(results, params, "rnn_cell"); if (should_decompose) { ngraph::pass::Manager m; diff --git a/src/tests/functional/shared_test_classes/src/single_layer/rnn_sequence.cpp b/src/tests/functional/shared_test_classes/src/single_layer/rnn_sequence.cpp index de1af3726c9026..7427345fa59445 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/rnn_sequence.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/rnn_sequence.cpp @@ -20,7 +20,7 @@ namespace LayerTestsDefinitions { std::vector activations_alpha; std::vector activations_beta; float clip; - ngraph::op::RecurrentSequenceDirection direction; + ov::op::RecurrentSequenceDirection direction; InferenceEngine::Precision netPrecision; InputLayerType WRBType; std::string targetDevice; @@ -55,12 +55,12 @@ namespace LayerTestsDefinitions { std::vector activations_alpha; std::vector activations_beta; float clip; - ngraph::op::RecurrentSequenceDirection direction; + ov::op::RecurrentSequenceDirection direction; InputLayerType WRBType; InferenceEngine::Precision netPrecision; std::tie(m_mode, seq_lengths, batch, hidden_size, input_size, activations, clip, direction, WRBType, netPrecision, targetDevice) = this->GetParam(); - size_t num_directions = direction == ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; + size_t num_directions = direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL ? 2 : 1; std::vector inputShapes = { {{batch, seq_lengths, input_size}, {batch, num_directions, hidden_size}, {batch}, {num_directions, hidden_size, input_size}, {num_directions, hidden_size, hidden_size}, @@ -110,15 +110,15 @@ namespace LayerTestsDefinitions { auto rnn_sequence = std::make_shared(params[0], params[1], seq_lengths_node, W, R, B, hidden_size, direction, activations, activations_alpha, activations_beta, clip); - ngraph::ResultVector results{std::make_shared(rnn_sequence->output(0)), - std::make_shared(rnn_sequence->output(1))}; + ngraph::ResultVector results{std::make_shared(rnn_sequence->output(0)), + std::make_shared(rnn_sequence->output(1))}; function = std::make_shared(results, params, "rnn_sequence"); bool is_pure_sequence = (m_mode == SequenceTestsMode::PURE_SEQ || m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM || m_mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST); if (!is_pure_sequence) { ngraph::pass::Manager manager; - if (direction == ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL) + if (direction == ov::op::RecurrentSequenceDirection::BIDIRECTIONAL) manager.register_pass(); manager.register_pass(); manager.run_passes(function); diff --git a/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp b/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp index 97d39c93f0b548..683246fc841970 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/roi_align.cpp @@ -101,10 +101,10 @@ void ROIAlignLayerTest::SetUp() { fillIdxTensor(roiIdxVector, inputShape[0]); ngraph::Shape idxShape = {coordsShape[0]}; - auto coords = std::make_shared(ngPrc, coordsShape, proposalVector.data()); - auto roisIdx = std::make_shared(ngraph::element::i32, idxShape, roiIdxVector.data()); + auto coords = std::make_shared(ngPrc, coordsShape, proposalVector.data()); + auto roisIdx = std::make_shared(ngraph::element::i32, idxShape, roiIdxVector.data()); - std::shared_ptr roiAlign = std::make_shared(params[0], + std::shared_ptr roiAlign = std::make_shared(params[0], coords, roisIdx, pooledH, @@ -112,7 +112,7 @@ void ROIAlignLayerTest::SetUp() { poolingRatio, spatialScale, poolingMode); - ngraph::ResultVector results{std::make_shared(roiAlign)}; + ngraph::ResultVector results{std::make_shared(roiAlign)}; function = std::make_shared(results, params, "roi_align"); } @@ -186,10 +186,10 @@ void ROIAlignV9LayerTest::SetUp() { ROIAlignLayerTest::fillIdxTensor(roiIdxVector, inputShape[0]); ngraph::Shape idxShape = {coordsShape[0]}; - auto coords = std::make_shared(ngPrc, coordsShape, proposalVector.data()); - auto roisIdx = std::make_shared(ngraph::element::i32, idxShape, roiIdxVector.data()); + auto coords = std::make_shared(ngPrc, coordsShape, proposalVector.data()); + auto roisIdx = std::make_shared(ngraph::element::i32, idxShape, roiIdxVector.data()); - std::shared_ptr roiAlign = std::make_shared( + std::shared_ptr roiAlign = std::make_shared( params[0], coords, roisIdx, @@ -197,10 +197,10 @@ void ROIAlignV9LayerTest::SetUp() { pooledW, poolingRatio, spatialScale, - ov::EnumNames::as_enum(poolingMode), - ov::EnumNames::as_enum(roiAlignedMode)); + ov::EnumNames::as_enum(poolingMode), + ov::EnumNames::as_enum(roiAlignedMode)); - ngraph::ResultVector results{std::make_shared(roiAlign)}; + ngraph::ResultVector results{std::make_shared(roiAlign)}; function = std::make_shared(results, params, "roi_align"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/roi_pooling.cpp b/src/tests/functional/shared_test_classes/src/single_layer/roi_pooling.cpp index 8a1c38f60962ef..494cb435bf12c9 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/roi_pooling.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/roi_pooling.cpp @@ -80,7 +80,7 @@ namespace LayerTestsDefinitions { } else { roi_pooling = std::make_shared(params[0], params[1], poolShape, spatial_scale, "bilinear"); } - ngraph::ResultVector results{std::make_shared(roi_pooling)}; + ngraph::ResultVector results{std::make_shared(roi_pooling)}; function = std::make_shared(results, params, "roi_pooling"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/roll.cpp b/src/tests/functional/shared_test_classes/src/single_layer/roll.cpp index 10771dbb86b3ab..67f1d091e5103f 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/roll.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/roll.cpp @@ -31,15 +31,15 @@ void RollLayerTest::SetUp() { std::tie(inputShapes, inputPrecision, shift, axes, targetDevice) = this->GetParam(); auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); ngraph::ParameterVector paramVector; - auto paramData = std::make_shared(inType, ngraph::Shape(inputShapes)); + auto paramData = std::make_shared(inType, ngraph::Shape(inputShapes)); paramVector.push_back(paramData); - auto shiftNode = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shift.size()}, shift)->output(0); - auto axesNode = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{axes.size()}, axes)->output(0); + auto shiftNode = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shift.size()}, shift)->output(0); + auto axesNode = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{axes.size()}, axes)->output(0); - auto roll = std::make_shared(paramVector[0], shiftNode, axesNode); + auto roll = std::make_shared(paramVector[0], shiftNode, axesNode); - ngraph::ResultVector results{std::make_shared(roll)}; + ngraph::ResultVector results{std::make_shared(roll)}; function = std::make_shared(results, paramVector, "roll"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/scatter_ND_update.cpp b/src/tests/functional/shared_test_classes/src/single_layer/scatter_ND_update.cpp index 55e0e453ea0d55..2e2d481530f0ed 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/scatter_ND_update.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/scatter_ND_update.cpp @@ -62,15 +62,15 @@ void ScatterNDUpdateLayerTest::SetUp() { auto inPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); auto idxPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(indicesPrecision); ngraph::ParameterVector paramVector; - auto inputParams = std::make_shared(inPrc, ngraph::Shape(inShape)); + auto inputParams = std::make_shared(inPrc, ngraph::Shape(inShape)); paramVector.push_back(inputParams); - auto updateParams = std::make_shared(inPrc, ngraph::Shape(updateShape)); + auto updateParams = std::make_shared(inPrc, ngraph::Shape(updateShape)); paramVector.push_back(updateParams); auto indicesNode = std::make_shared(idxPrc, ov::Shape(indicesShape), indicesValue); auto s2d = std::make_shared(paramVector[0], indicesNode, paramVector[1]); - ngraph::ResultVector results{std::make_shared(s2d)}; + ngraph::ResultVector results{std::make_shared(s2d)}; function = std::make_shared(results, paramVector, "ScatterNDUpdate"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/scatter_elements_update.cpp b/src/tests/functional/shared_test_classes/src/single_layer/scatter_elements_update.cpp index 9b3a4ca7bd421e..21369540823b8f 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/scatter_elements_update.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/scatter_elements_update.cpp @@ -53,16 +53,16 @@ void ScatterElementsUpdateLayerTest::SetUp() { auto inPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); auto idxPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(indicesPrecision); ngraph::ParameterVector paramVector; - auto inputParams = std::make_shared(inPrc, ngraph::Shape(inShape)); + auto inputParams = std::make_shared(inPrc, ngraph::Shape(inShape)); paramVector.push_back(inputParams); - auto updateParams = std::make_shared(inPrc, ngraph::Shape(indicesShape)); + auto updateParams = std::make_shared(inPrc, ngraph::Shape(indicesShape)); paramVector.push_back(updateParams); auto indicesNode = std::make_shared(idxPrc, ov::Shape(indicesShape), indicesValue); auto axis_node = std::make_shared(ov::element::i32, ov::Shape{}, std::vector{axis}); auto s2d = std::make_shared(paramVector[0], indicesNode, paramVector[1], axis_node); - ngraph::ResultVector results{std::make_shared(s2d)}; + ngraph::ResultVector results{std::make_shared(s2d)}; function = std::make_shared(results, paramVector, "ScatterElementsUpdate"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/scatter_update.cpp b/src/tests/functional/shared_test_classes/src/single_layer/scatter_update.cpp index d250ddbefbd2dd..2ce083343f3f97 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/scatter_update.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/scatter_update.cpp @@ -72,16 +72,16 @@ void ScatterUpdateLayerTest::SetUp() { auto inPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); auto idxPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(indicesPrecision); ngraph::ParameterVector paramVector; - auto inputParams = std::make_shared(inPrc, ngraph::Shape(inShape)); + auto inputParams = std::make_shared(inPrc, ngraph::Shape(inShape)); paramVector.push_back(inputParams); - auto updateParams = std::make_shared(inPrc, ngraph::Shape(updateShape)); + auto updateParams = std::make_shared(inPrc, ngraph::Shape(updateShape)); paramVector.push_back(updateParams); auto indicesNode = std::make_shared(idxPrc, ov::Shape(indicesShape), indicesValue); auto axis_node = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{axis}); auto s2d = std::make_shared(paramVector[0], indicesNode, paramVector[1], axis_node); - ngraph::ResultVector results{std::make_shared(s2d)}; + ngraph::ResultVector results{std::make_shared(s2d)}; function = std::make_shared(results, paramVector, "ScatterUpdate"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/select.cpp b/src/tests/functional/shared_test_classes/src/single_layer/select.cpp index 58e1bd3b46ebcc..f25ac5f6cf98e8 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/select.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/select.cpp @@ -10,7 +10,7 @@ namespace LayerTestsDefinitions { std::string SelectLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { std::vector> dataShapes(3); InferenceEngine::Precision dataType; - ngraph::op::AutoBroadcastSpec broadcast; + ov::op::AutoBroadcastSpec broadcast; std::string targetDevice; std::tie(dataShapes, dataType, broadcast, targetDevice) = obj.param; std::ostringstream result; @@ -25,19 +25,19 @@ namespace LayerTestsDefinitions { void SelectLayerTest::SetUp() { std::vector> inputShapes(numOfInputs); InferenceEngine::Precision inputPrecision; - ngraph::op::AutoBroadcastSpec broadcast; + ov::op::AutoBroadcastSpec broadcast; std::tie(inputShapes, inputPrecision, broadcast, targetDevice) = this->GetParam(); ngraph::ParameterVector paramNodesVector; - auto paramNode = std::make_shared(ngraph::element::Type_t::boolean, ngraph::Shape(inputShapes[CONDITION])); + auto paramNode = std::make_shared(ngraph::element::Type_t::boolean, ngraph::Shape(inputShapes[CONDITION])); paramNodesVector.push_back(paramNode); auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); for (size_t i = 1; i < inputShapes.size(); i++) { - paramNode = std::make_shared(inType, ngraph::Shape(inputShapes[i])); + paramNode = std::make_shared(inType, ngraph::Shape(inputShapes[i])); paramNodesVector.push_back(paramNode); } auto select = std::make_shared(paramNodesVector[0], paramNodesVector[1], paramNodesVector[2], broadcast); - ngraph::ResultVector results{std::make_shared(select)}; + ngraph::ResultVector results{std::make_shared(select)}; function = std::make_shared(results, paramNodesVector, "select"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/shape_of.cpp b/src/tests/functional/shared_test_classes/src/single_layer/shape_of.cpp index 8d92da3a114f0d..d036f95bfe6467 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/shape_of.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/shape_of.cpp @@ -27,8 +27,8 @@ namespace LayerTestsDefinitions { auto inType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); auto outType = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(outPrc); ov::ParameterVector param {std::make_shared(inType, ov::Shape(inputShapes))}; - auto shapeOf = std::make_shared(param[0], outType); - ngraph::ResultVector results{std::make_shared(shapeOf)}; + auto shapeOf = std::make_shared(param[0], outType); + ngraph::ResultVector results{std::make_shared(shapeOf)}; function = std::make_shared(results, param, "shapeOf"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/shuffle_channels.cpp b/src/tests/functional/shared_test_classes/src/single_layer/shuffle_channels.cpp index 16538163fd18d4..cb0b951746d5e0 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/shuffle_channels.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/shuffle_channels.cpp @@ -41,7 +41,7 @@ void ShuffleChannelsLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; auto shuffleChannels = std::make_shared(params[0], axis, group); - ngraph::ResultVector results{std::make_shared(shuffleChannels)}; + ngraph::ResultVector results{std::make_shared(shuffleChannels)}; function = std::make_shared(results, params, "shuffleChannels"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/space_to_batch.cpp b/src/tests/functional/shared_test_classes/src/single_layer/space_to_batch.cpp index 63d66585ef1d02..db436f71b103cd 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/space_to_batch.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/space_to_batch.cpp @@ -40,7 +40,7 @@ void SpaceToBatchLayerTest::SetUp() { OPENVINO_SUPPRESS_DEPRECATED_START auto s2b = ngraph::builder::makeSpaceToBatch(params[0], ngPrc, blockShape, padsBegin, padsEnd); OPENVINO_SUPPRESS_DEPRECATED_END - ngraph::ResultVector results{std::make_shared(s2b)}; + ngraph::ResultVector results{std::make_shared(s2b)}; function = std::make_shared(results, params, "SpaceToBatch"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/space_to_depth.cpp b/src/tests/functional/shared_test_classes/src/single_layer/space_to_depth.cpp index bdc4b6eba59673..68058208ddc092 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/space_to_depth.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/space_to_depth.cpp @@ -7,12 +7,10 @@ namespace LayerTestsDefinitions { -using namespace ngraph::opset3; - -static inline std::string SpaceToDepthModeToString(const SpaceToDepth::SpaceToDepthMode& mode) { - static std::map names = { - {SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST, "BLOCKS_FIRST"}, - {SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST, "DEPTH_FIRST"}, +static inline std::string SpaceToDepthModeToString(const ov::op::v0::SpaceToDepth::SpaceToDepthMode& mode) { + static std::map names = { + {ov::op::v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST, "BLOCKS_FIRST"}, + {ov::op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST, "DEPTH_FIRST"}, }; auto i = names.find(mode); @@ -24,7 +22,7 @@ static inline std::string SpaceToDepthModeToString(const SpaceToDepth::SpaceToDe std::string SpaceToDepthLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { std::vector inShape; - SpaceToDepth::SpaceToDepthMode mode; + ov::op::v0::SpaceToDepth::SpaceToDepthMode mode; std::size_t blockSize; InferenceEngine::Precision inputPrecision; std::string targetName; @@ -40,14 +38,14 @@ std::string SpaceToDepthLayerTest::getTestCaseName(const testing::TestParamInfo< void SpaceToDepthLayerTest::SetUp() { std::vector inShape; - SpaceToDepth::SpaceToDepthMode mode; + ov::op::v0::SpaceToDepth::SpaceToDepthMode mode; std::size_t blockSize; InferenceEngine::Precision inputPrecision; std::tie(inShape, inputPrecision, mode, blockSize, targetDevice) = this->GetParam(); auto inPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(inputPrecision); ov::ParameterVector params {std::make_shared(inPrc, ov::Shape(inShape))}; auto s2d = std::make_shared(params[0], mode, blockSize); - ngraph::ResultVector results{std::make_shared(s2d)}; + ngraph::ResultVector results{std::make_shared(s2d)}; function = std::make_shared(results, params, "SpaceToDepth"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/split.cpp b/src/tests/functional/shared_test_classes/src/single_layer/split.cpp index 6c09bf04b55b76..aad1da53dc228a 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/split.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/split.cpp @@ -45,12 +45,12 @@ void SplitLayerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; OPENVINO_SUPPRESS_DEPRECATED_START - auto split = std::dynamic_pointer_cast(ngraph::builder::makeSplit(params[0], + auto split = std::dynamic_pointer_cast(ngraph::builder::makeSplit(params[0], ngPrc, numSplits, axis)); OPENVINO_SUPPRESS_DEPRECATED_END ngraph::ResultVector results; for (int i = 0; i < outIndices.size(); i++) { - results.push_back(std::make_shared(split->output(outIndices[i]))); + results.push_back(std::make_shared(split->output(outIndices[i]))); } function = std::make_shared(results, params, "split"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/squeeze_unsqueeze.cpp b/src/tests/functional/shared_test_classes/src/single_layer/squeeze_unsqueeze.cpp index 1f229118a6b920..14c196ba2089a7 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/squeeze_unsqueeze.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/squeeze_unsqueeze.cpp @@ -42,14 +42,14 @@ void SqueezeUnsqueezeLayerTest::SetUp() { std::shared_ptr op; if (axesVector.empty() && opType == ngraph::helpers::SqueezeOpType::SQUEEZE) { - op = std::make_shared(params.front()); + op = std::make_shared(params.front()); } else { OPENVINO_SUPPRESS_DEPRECATED_START op = ngraph::builder::makeSqueezeUnsqueeze(params.front(), ngraph::element::i64, axesVector, opType); OPENVINO_SUPPRESS_DEPRECATED_END } - const ngraph::ResultVector results{std::make_shared(op)}; + const ngraph::ResultVector results{std::make_shared(op)}; function = std::make_shared(results, params, "Squeeze"); } } // namespace LayerTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/single_layer/strided_slice.cpp b/src/tests/functional/shared_test_classes/src/single_layer/strided_slice.cpp index 5f8a4ffe9fb199..1ae8b3110f8d3e 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/strided_slice.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/strided_slice.cpp @@ -59,7 +59,7 @@ void StridedSliceLayerTest::SetUp() { ssParams.shrinkAxisMask, ssParams.ellipsisAxisMask); - ngraph::ResultVector results{std::make_shared(ss)}; + ngraph::ResultVector results{std::make_shared(ss)}; function = std::make_shared(results, params, "StridedSlice"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/tensor_iterator.cpp b/src/tests/functional/shared_test_classes/src/single_layer/tensor_iterator.cpp index 0df383f00bf3dd..3a67c205ddec24 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/tensor_iterator.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/tensor_iterator.cpp @@ -16,7 +16,7 @@ namespace LayerTestsDefinitions { size_t sequence_axis; ngraph::helpers::TensorIteratorBody ti_body; float clip; - ngraph::op::RecurrentSequenceDirection direction; + ov::op::RecurrentSequenceDirection direction; InferenceEngine::Precision netPrecision; std::string targetDevice; std::tie(should_decompose, seq_lengths, batch, hidden_size, sequence_axis, clip, ti_body, direction, netPrecision, @@ -67,19 +67,19 @@ namespace LayerTestsDefinitions { size_t sequence_axis; ngraph::helpers::TensorIteratorBody ti_body; float clip; - ngraph::op::RecurrentSequenceDirection direction; + ov::op::RecurrentSequenceDirection direction; InferenceEngine::Precision netPrecision; std::tie(should_decompose, seq_lengths, batch, hidden_size, sequence_axis, clip, ti_body, direction, netPrecision, targetDevice) = this->GetParam(); std::vector> inputShapes; auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - auto tensor_iterator = std::make_shared(); + auto tensor_iterator = std::make_shared(); // Each case consist of 3 steps: // 1. Create TensorIterator body. // 2. Set PortMap // 3. Create outer function - auto axis = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, + auto axis = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, std::vector{static_cast(sequence_axis)}); switch (ti_body) { case ngraph::helpers::TensorIteratorBody::LSTM: { @@ -101,22 +101,22 @@ namespace LayerTestsDefinitions { std::make_shared(ngPrc, ov::Shape(inputShapes[1])), std::make_shared(ngPrc, ov::Shape(inputShapes[2]))}; - auto squeeze = std::make_shared(body_params[0], axis); + auto squeeze = std::make_shared(body_params[0], axis); std::vector WRB = {inputShapes[3], inputShapes[4], inputShapes[5]}; ngraph::OutputVector out_vector = {squeeze, body_params[1], body_params[2]}; auto lstm_cell = ngraph::builder::makeLSTM(out_vector, WRB, hidden_size, {"sigmoid", "tanh", "tanh"}, {}, {}, clip); - auto unsqueeze = std::make_shared(lstm_cell->output(0), axis); - ngraph::ResultVector results{std::make_shared(unsqueeze), - std::make_shared(lstm_cell->output(0)), - std::make_shared(lstm_cell->output(1))}; + auto unsqueeze = std::make_shared(lstm_cell->output(0), axis); + ngraph::ResultVector results{std::make_shared(unsqueeze), + std::make_shared(lstm_cell->output(0)), + std::make_shared(lstm_cell->output(1))}; auto body = std::make_shared(results, body_params, "lstm_cell"); tensor_iterator->set_function(body); // 2. Set PortMap - if (direction == ngraph::op::RecurrentSequenceDirection::FORWARD) { + if (direction == ov::op::RecurrentSequenceDirection::FORWARD) { tensor_iterator->set_sliced_input(body_params[0], outer_params[0], 0, 1, 1, -1, sequence_axis); tensor_iterator->get_concatenated_slices(results[0], 0, 1, 1, -1, sequence_axis); - } else if (direction == ngraph::op::RecurrentSequenceDirection::REVERSE) { + } else if (direction == ov::op::RecurrentSequenceDirection::REVERSE) { tensor_iterator->set_sliced_input(body_params[0], outer_params[0], -1, -1, 1, 0, sequence_axis); tensor_iterator->get_concatenated_slices(results[0], -1, -1, 1, 0, sequence_axis); } else { @@ -151,21 +151,21 @@ namespace LayerTestsDefinitions { std::make_shared(ngPrc, ov::Shape(inputShapes[1]))}; std::vector WRB = {inputShapes[2], inputShapes[3], inputShapes[4]}; - auto squeeze = std::make_shared(body_params[0], axis); + auto squeeze = std::make_shared(body_params[0], axis); ngraph::OutputVector out_vector = {squeeze, body_params[1]}; auto gru_cell = ngraph::builder::makeGRU(out_vector, WRB, hidden_size, {"sigmoid", "tanh"}, {}, {}, clip, false); - auto unsqueeze = std::make_shared(gru_cell->output(0), axis); - ngraph::ResultVector results{std::make_shared(gru_cell->output(0)), - std::make_shared(unsqueeze)}; + auto unsqueeze = std::make_shared(gru_cell->output(0), axis); + ngraph::ResultVector results{std::make_shared(gru_cell->output(0)), + std::make_shared(unsqueeze)}; auto body = std::make_shared(results, body_params, "gru_cell"); tensor_iterator->set_function(body); // 2. Set PortMap - if (direction == ngraph::op::RecurrentSequenceDirection::FORWARD) { + if (direction == ov::op::RecurrentSequenceDirection::FORWARD) { tensor_iterator->set_sliced_input(body_params[0], outer_params[0], 0, 1, 1, -1, sequence_axis); tensor_iterator->get_concatenated_slices(results[1], 0, 1, 1, -1, sequence_axis); - } else if (direction == ngraph::op::RecurrentSequenceDirection::REVERSE) { + } else if (direction == ov::op::RecurrentSequenceDirection::REVERSE) { tensor_iterator->set_sliced_input(body_params[0], outer_params[0], -1, -1, 1, 0, sequence_axis); tensor_iterator->get_concatenated_slices(results[1], -1, -1, 1, 0, sequence_axis); } else { @@ -197,20 +197,20 @@ namespace LayerTestsDefinitions { ov::ParameterVector body_params{std::make_shared(ngPrc, ov::Shape(inputShapes[0])), std::make_shared(ngPrc, ov::Shape(inputShapes[1]))}; std::vector WRB = {inputShapes[2], inputShapes[3], inputShapes[4]}; - auto squeeze = std::make_shared(body_params[0], axis); + auto squeeze = std::make_shared(body_params[0], axis); ngraph::OutputVector out_vector = {squeeze, body_params[1]}; auto rnn_cell = ngraph::builder::makeRNN(out_vector, WRB, hidden_size, {"tanh"}, {}, {}, clip); - auto unsqueeze = std::make_shared(rnn_cell->output(0), axis); - ngraph::ResultVector results{std::make_shared(rnn_cell), - std::make_shared(unsqueeze)}; + auto unsqueeze = std::make_shared(rnn_cell->output(0), axis); + ngraph::ResultVector results{std::make_shared(rnn_cell), + std::make_shared(unsqueeze)}; auto body = std::make_shared(results, body_params, "rnn_cell"); tensor_iterator->set_function(body); // 2. Set PortMap - if (direction == ngraph::op::RecurrentSequenceDirection::FORWARD) { + if (direction == ov::op::RecurrentSequenceDirection::FORWARD) { tensor_iterator->set_sliced_input(body_params[0], outer_params[0], 0, 1, 1, -1, sequence_axis); tensor_iterator->get_concatenated_slices(results[1], 0, 1, 1, -1, sequence_axis); - } else if (direction == ngraph::op::RecurrentSequenceDirection::REVERSE) { + } else if (direction == ov::op::RecurrentSequenceDirection::REVERSE) { tensor_iterator->set_sliced_input(body_params[0], outer_params[0], -1, -1, 1, 0, sequence_axis); tensor_iterator->get_concatenated_slices(results[1], -1, -1, 1, 0, sequence_axis); } else { diff --git a/src/tests/functional/shared_test_classes/src/single_layer/tile.cpp b/src/tests/functional/shared_test_classes/src/single_layer/tile.cpp index 9c5afdd310bf38..1acd10bbda24e1 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/tile.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/tile.cpp @@ -38,7 +38,7 @@ void TileLayerTest::SetUp() { auto repeatsNode = std::make_shared(ov::element::i64, std::vector{tileParams.size()}, tileParams); auto tile = std::make_shared(params[0], repeatsNode); - ngraph::ResultVector results{std::make_shared(tile)}; + ngraph::ResultVector results{std::make_shared(tile)}; function = std::make_shared(results, params, "tile"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/topk.cpp b/src/tests/functional/shared_test_classes/src/single_layer/topk.cpp index b4897117cbfb37..7f1b1ef5bfe679 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/topk.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/topk.cpp @@ -12,8 +12,8 @@ namespace LayerTestsDefinitions { InferenceEngine::SizeVector inputShape; std::string targetDevice; int64_t keepK, axis; - ngraph::opset4::TopK::Mode mode; - ngraph::opset4::TopK::SortType sort; + ov::op::v3::TopK::Mode mode; + ov::op::v3::TopK::SortType sort; std::tie(keepK, axis, mode, sort, netPrecision, inPrc, outPrc, inLayout, inputShape, targetDevice) = obj.param; std::ostringstream result; result << "IS=" << ov::test::utils::vec2str(inputShape) << "_"; @@ -33,20 +33,20 @@ void TopKLayerTest::SetUp() { InferenceEngine::SizeVector inputShape; InferenceEngine::Precision netPrecision; int64_t keepK, axis; - ngraph::opset4::TopK::Mode mode; - ngraph::opset4::TopK::SortType sort; + ov::op::v3::TopK::Mode mode; + ov::op::v3::TopK::SortType sort; std::tie(keepK, axis, mode, sort, netPrecision, inPrc, outPrc, inLayout, inputShape, targetDevice) = this->GetParam(); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto k = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{}, &keepK); - auto topk = std::dynamic_pointer_cast( - std::make_shared(params[0], k, axis, mode, sort)); + auto k = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{}, &keepK); + auto topk = std::dynamic_pointer_cast( + std::make_shared(params[0], k, axis, mode, sort)); ngraph::ResultVector results; for (size_t i = 0; i < topk->get_output_size(); i++) { - results.push_back(std::make_shared(topk->output(i))); + results.push_back(std::make_shared(topk->output(i))); } function = std::make_shared(results, params, "TopK"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/transpose.cpp b/src/tests/functional/shared_test_classes/src/single_layer/transpose.cpp index 2fc4131e834f76..16226bcaf9a88f 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/transpose.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/transpose.cpp @@ -34,11 +34,11 @@ void TransposeLayerTest::SetUp() { ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; const auto inOrderShape = inputOrder.empty() ? ngraph::Shape({0}) : ngraph::Shape({inputShape.size()}); - const auto inputOrderOp = std::make_shared(ngraph::element::i64, + const auto inputOrderOp = std::make_shared(ngraph::element::i64, inOrderShape, inputOrder); - const auto transpose = std::make_shared(params.at(0), inputOrderOp); - const ngraph::ResultVector results{std::make_shared(transpose)}; + const auto transpose = std::make_shared(params.at(0), inputOrderOp); + const ngraph::ResultVector results{std::make_shared(transpose)}; function = std::make_shared(results, params, "Transpose"); } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/variadic_split.cpp b/src/tests/functional/shared_test_classes/src/single_layer/variadic_split.cpp index c424b57bc99a73..2a4ef3f757bea5 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/variadic_split.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/variadic_split.cpp @@ -36,11 +36,11 @@ namespace LayerTestsDefinitions { std::tie(numSplits, axis, netPrecision, inPrc, outPrc, inLayout, outLayout, inputShape, targetDevice) = this->GetParam(); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto VariadicSplit = std::dynamic_pointer_cast(ngraph::builder::makeVariadicSplit(params[0], numSplits, + auto VariadicSplit = std::dynamic_pointer_cast(ngraph::builder::makeVariadicSplit(params[0], numSplits, axis)); ngraph::ResultVector results; for (int i = 0; i < numSplits.size(); i++) { - results.push_back(std::make_shared(VariadicSplit->output(i))); + results.push_back(std::make_shared(VariadicSplit->output(i))); } function = std::make_shared(results, params, "VariadicSplit"); } diff --git a/src/tests/functional/shared_test_classes/src/single_op/batch_norm.cpp b/src/tests/functional/shared_test_classes/src/single_op/batch_norm.cpp index f2df32ba5048f4..1305d115fc634c 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/batch_norm.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/batch_norm.cpp @@ -40,18 +40,23 @@ void BatchNormLayerTest::SetUp() { init_input_shapes(shapes); ov::ParameterVector params {std::make_shared(model_type, inputDynamicShapes.front())}; + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = 1; + auto constant_shape = ov::Shape{params[0]->get_shape().at(1)}; - auto gamma_tensor = ov::test::utils::create_and_fill_tensor(model_type, constant_shape, 1, 0); + auto gamma_tensor = ov::test::utils::create_and_fill_tensor(model_type, constant_shape, in_data); auto gamma = std::make_shared(gamma_tensor); - auto beta_tensor = ov::test::utils::create_and_fill_tensor(model_type, constant_shape, 1, 0); + auto beta_tensor = ov::test::utils::create_and_fill_tensor(model_type, constant_shape, in_data); auto beta = std::make_shared(beta_tensor); - auto mean_tensor = ov::test::utils::create_and_fill_tensor(model_type, constant_shape, 1, 0); + auto mean_tensor = ov::test::utils::create_and_fill_tensor(model_type, constant_shape, in_data); auto mean = std::make_shared(mean_tensor); // Fill the vector for variance with positive values - auto variance_tensor = ov::test::utils::create_and_fill_tensor(model_type, constant_shape, 10, 0); + in_data.range = 10; + auto variance_tensor = ov::test::utils::create_and_fill_tensor(model_type, constant_shape, in_data); auto variance = std::make_shared(variance_tensor); auto batch_norm = std::make_shared(params[0], gamma, beta, mean, variance, epsilon); diff --git a/src/tests/functional/shared_test_classes/src/single_op/broadcast.cpp b/src/tests/functional/shared_test_classes/src/single_op/broadcast.cpp index 084eaf1fd0be6d..d89fac28335797 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/broadcast.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/broadcast.cpp @@ -51,7 +51,7 @@ void BroadcastLayerTest::SetUp() { ov::ParameterVector params{std::make_shared(model_type, inputDynamicShapes.front())}; std::shared_ptr broadcast; - if (mode == ngraph::op::BroadcastType::NONE) { + if (mode == ov::op::BroadcastType::NONE) { auto axis_set_const = ov::op::v0::Constant::create(ngraph::element::i64, {axes_mapping.size()}, axes_mapping.to_vector()); broadcast = std::make_shared(params[0], target_shape_const, diff --git a/src/tests/functional/shared_test_classes/src/single_op/convolution.cpp b/src/tests/functional/shared_test_classes/src/single_op/convolution.cpp index 0e44df42406b60..bfb521871548b7 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/convolution.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/convolution.cpp @@ -19,7 +19,7 @@ std::string ConvolutionLayerTest::getTestCaseName(const testing::TestParamInfo shapes; std::string targetDevice; std::tie(conv_params, model_type, shapes, targetDevice) = obj.param; - ngraph::op::PadType pad_type; + ov::op::PadType pad_type; InferenceEngine::SizeVector kernel, stride, dilation; std::vector pad_begin, pad_end; size_t conv_out_channels; diff --git a/src/tests/functional/shared_test_classes/src/single_op/eltwise.cpp b/src/tests/functional/shared_test_classes/src/single_op/eltwise.cpp index f4790c91d473bf..1f9c837995e19e 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/eltwise.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/eltwise.cpp @@ -105,21 +105,28 @@ void EltwiseLayerTest::SetUp() { parameters.push_back(param); } else { ov::Shape shape = inputDynamicShapes.back().get_max_shape(); + ov::test::utils::InputGenerateData in_data; switch (eltwise_type) { case EltwiseTypes::DIVIDE: case EltwiseTypes::MOD: case EltwiseTypes::FLOOR_MOD: { - auto tensor = ov::test::utils::create_and_fill_tensor(model_type, shape, 8, 2); + in_data.start_from = 2; + in_data.range = 8; + auto tensor = ov::test::utils::create_and_fill_tensor(model_type, shape, in_data); secondary_input = std::make_shared(tensor); break; } case EltwiseTypes::POWER: { - auto tensor = ov::test::utils::create_and_fill_tensor(model_type, shape, 2, 1); + in_data.start_from = 1; + in_data.range = 2; + auto tensor = ov::test::utils::create_and_fill_tensor(model_type, shape, in_data); secondary_input = std::make_shared(tensor); break; } default: { - auto tensor = ov::test::utils::create_and_fill_tensor(model_type, shape, 9, 1); + in_data.start_from = 1; + in_data.range = 9; + auto tensor = ov::test::utils::create_and_fill_tensor(model_type, shape, in_data); secondary_input = std::make_shared(tensor); } } diff --git a/src/tests/functional/shared_test_classes/src/single_op/gather.cpp b/src/tests/functional/shared_test_classes/src/single_op/gather.cpp index f4c91752190e19..d320aa16806b23 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/gather.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/gather.cpp @@ -104,7 +104,10 @@ void Gather7LayerTest::SetUp() { auto param = std::make_shared(model_type, inputDynamicShapes.front()); int axis_dim = targetStaticShapes[0][0][axis < 0 ? axis + targetStaticShapes[0][0].size() : axis]; - auto indices_node_tensor = ov::test::utils::create_and_fill_tensor(ov::element::i64, indices_shape, axis_dim - 1); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = axis_dim - 1; + auto indices_node_tensor = ov::test::utils::create_and_fill_tensor(ov::element::i64, indices_shape, in_data); auto indices_node = std::make_shared(indices_node_tensor); auto axis_node = ov::op::v0::Constant::create(ov::element::i64, ov::Shape(), {axis}); @@ -132,7 +135,10 @@ void Gather8LayerTest::SetUp() { auto param = std::make_shared(model_type, inputDynamicShapes.front()); int axis_dim = targetStaticShapes[0][0][axis < 0 ? axis + targetStaticShapes[0][0].size() : axis]; - auto indices_node_tensor = ov::test::utils::create_and_fill_tensor(ov::element::i64, indices_shape, 2 * axis_dim, -axis_dim); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -axis_dim; + in_data.range = 2 * axis_dim; + auto indices_node_tensor = ov::test::utils::create_and_fill_tensor(ov::element::i64, indices_shape, in_data); auto indices_node = std::make_shared(indices_node_tensor); auto axis_node = ov::op::v0::Constant::create(ov::element::i64, ov::Shape(), {axis}); diff --git a/src/tests/functional/shared_test_classes/src/single_op/gather_elements.cpp b/src/tests/functional/shared_test_classes/src/single_op/gather_elements.cpp index 1366ff6505bc6f..5d617bde3a42ca 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/gather_elements.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/gather_elements.cpp @@ -52,7 +52,10 @@ void GatherElementsLayerTest::SetUp() { auto param = std::make_shared(model_type, inputDynamicShapes.front()); auto axis_dim = targetStaticShapes[0][0][axis < 0 ? axis + targetStaticShapes[0][0].size() : axis]; - auto indices_node_tensor = ov::test::utils::create_and_fill_tensor(indices_type, indices_shape, axis_dim - 1); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = axis_dim - 1; + auto indices_node_tensor = ov::test::utils::create_and_fill_tensor(indices_type, indices_shape, in_data); auto indices_node = std::make_shared(indices_node_tensor); auto gather_el = std::make_shared(param, indices_node, axis); diff --git a/src/tests/functional/shared_test_classes/src/single_op/gather_tree.cpp b/src/tests/functional/shared_test_classes/src/single_op/gather_tree.cpp index 3847b1f6bae3d3..d98ca8a62b0a30 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/gather_tree.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/gather_tree.cpp @@ -59,7 +59,10 @@ void GatherTreeLayerTest::SetUp() { } for (const auto& shape : constant_shapes_static) { - auto tensor = ov::test::utils::create_and_fill_tensor(model_type, shape, input_shape.at(2) - 2, 1); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 1; + in_data.range = input_shape.at(2) - 2; + auto tensor = ov::test::utils::create_and_fill_tensor(model_type, shape, in_data); auto constant = std::make_shared(tensor); inputs.push_back(constant); } diff --git a/src/tests/functional/shared_test_classes/src/single_op/gru_sequence.cpp b/src/tests/functional/shared_test_classes/src/single_op/gru_sequence.cpp index 81ae8ceb1758d9..e7037267cdbed3 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/gru_sequence.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/gru_sequence.cpp @@ -97,7 +97,10 @@ void GRUSequenceTest::SetUp() { seq_lengths_node = param; } else if (mode == SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST || mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST) { - auto tensor = ov::test::utils::create_and_fill_tensor(ov::element::i64, targetStaticShapes[0][2], seq_lengths, 0); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = seq_lengths; + auto tensor = ov::test::utils::create_and_fill_tensor(ov::element::i64, targetStaticShapes[0][2], in_data); seq_lengths_node = std::make_shared(tensor); } else { std::vector lengths(batch, seq_lengths); diff --git a/src/tests/functional/plugin/shared/src/single_layer_tests/invalid_cases/proposal.cpp b/src/tests/functional/shared_test_classes/src/single_op/invalid_cases/proposal.cpp similarity index 98% rename from src/tests/functional/plugin/shared/src/single_layer_tests/invalid_cases/proposal.cpp rename to src/tests/functional/shared_test_classes/src/single_op/invalid_cases/proposal.cpp index d3924f5480965a..24815e005d1f45 100644 --- a/src/tests/functional/plugin/shared/src/single_layer_tests/invalid_cases/proposal.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/invalid_cases/proposal.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "single_layer_tests/invalid_cases/proposal.hpp" +#include "shared_test_classes/single_op/invalid_cases/proposal.hpp" namespace ov { namespace test { diff --git a/src/tests/functional/shared_test_classes/src/single_op/lstm_sequence.cpp b/src/tests/functional/shared_test_classes/src/single_op/lstm_sequence.cpp index 514557ead7a72d..2f680cacfea305 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/lstm_sequence.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/lstm_sequence.cpp @@ -116,7 +116,10 @@ void LSTMSequenceTest::SetUp() { params.push_back(param); } else if (mode == SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST || mode == SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST) { - auto tensor = ov::test::utils::create_and_fill_tensor(ov::element::i64, inputShapes[3], seq_lengths); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = seq_lengths; + auto tensor = ov::test::utils::create_and_fill_tensor(ov::element::i64, inputShapes[3], in_data); seq_lengths_node = std::make_shared(tensor); } else { std::vector lengths(inputShapes[3][0], seq_lengths); diff --git a/src/tests/functional/shared_test_classes/src/single_op/multiclass_nms.cpp b/src/tests/functional/shared_test_classes/src/single_op/multiclass_nms.cpp index 3aa95a616911b0..7a5829fd570955 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/multiclass_nms.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/multiclass_nms.cpp @@ -66,7 +66,8 @@ void MulticlassNmsLayerTest::generate_inputs(const std::vector& targe const size_t start_from = 0; const size_t k = 1000; const int seed = 1; - tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], range, start_from, k, seed); + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], + ov::test::utils::InputGenerateData(start_from, range, k, seed)); } else if (i == 0) { // bboxes tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); } else { // roisnum diff --git a/src/tests/functional/shared_test_classes/src/single_op/normalize_l2.cpp b/src/tests/functional/shared_test_classes/src/single_op/normalize_l2.cpp index b3db915a59a035..d53d0b7f846375 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/normalize_l2.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/normalize_l2.cpp @@ -14,7 +14,7 @@ namespace test { std::string NormalizeL2LayerTest::getTestCaseName(const testing::TestParamInfo& obj) { std::vector axes; float eps; - ngraph::op::EpsMode eps_mode; + ov::op::EpsMode eps_mode; std::vector shapes; ov::element::Type model_type; std::string targetDevice; @@ -45,7 +45,7 @@ void NormalizeL2LayerTest::SetUp() { std::vector shapes; std::vector axes; float eps; - ngraph::op::EpsMode eps_mode; + ov::op::EpsMode eps_mode; ov::element::Type model_type; std::tie(axes, eps, eps_mode, shapes, model_type, targetDevice) = this->GetParam(); init_input_shapes(shapes); diff --git a/src/tests/functional/shared_test_classes/src/single_op/rnn_sequence.cpp b/src/tests/functional/shared_test_classes/src/single_op/rnn_sequence.cpp index 109aacdacbc277..e79cd38ab65c5c 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/rnn_sequence.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/rnn_sequence.cpp @@ -85,7 +85,10 @@ void RNNSequenceTest::SetUp() { seq_lengths_node = param; } else if (mode == ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST || mode == ov::test::utils::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST) { - auto tensor = ov::test::utils::create_and_fill_tensor(ov::element::i64, input_shapes[2], static_cast(seq_lengths), 0.f); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = seq_lengths; + auto tensor = ov::test::utils::create_and_fill_tensor(ov::element::i64, input_shapes[2], in_data); seq_lengths_node = std::make_shared(tensor); } else { std::vector lengths(batch, seq_lengths); diff --git a/src/tests/functional/shared_test_classes/src/subgraph/activation_concats_eltwise.cpp b/src/tests/functional/shared_test_classes/src/subgraph/activation_concats_eltwise.cpp index 0eef9adde8fe92..81313bca9fb32f 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/activation_concats_eltwise.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/activation_concats_eltwise.cpp @@ -52,8 +52,8 @@ void ActivationConcatsEltwise::SetUp() { auto eltw = ngraph::builder::makeEltwise(concat_1, concat_2, ngraph::helpers::EltwiseTypes::ADD); - auto reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{2}, std::vector({1, inputSize + concatSize})); - auto final_reshape = std::make_shared(eltw, reshape_pattern, false); + auto reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{2}, std::vector({1, inputSize + concatSize})); + auto final_reshape = std::make_shared(eltw, reshape_pattern, false); function = std::make_shared(final_reshape, input, "ActivationConcatsEltwise"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/activation_fq.cpp b/src/tests/functional/shared_test_classes/src/subgraph/activation_fq.cpp index eda78cc0525999..d9df9b10f5356d 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/activation_fq.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/activation_fq.cpp @@ -69,9 +69,9 @@ namespace SubgraphTestsDefinitions { auto FQNode = ngraph::builder::makeFakeQuantize(act, ngraph::element::f32, levels[0], constShape[0], { inputDataMin }, { inputDataMax }, { inputDataMin }, { inputDataMax }); - auto FQ = std::dynamic_pointer_cast(FQNode); + auto FQ = std::dynamic_pointer_cast(FQNode); - ngraph::ResultVector results{std::make_shared(FQ)}; + ngraph::ResultVector results{std::make_shared(FQ)}; function = std::make_shared(results, params, "ActivationFakeQuantizeSubgraph"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/basic_lstm.cpp b/src/tests/functional/shared_test_classes/src/subgraph/basic_lstm.cpp index d9814b2869dc82..fc04b12b3dc556 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/basic_lstm.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/basic_lstm.cpp @@ -68,32 +68,32 @@ std::shared_ptr Basic_LSTM_S::GetNetwork(size_t thirdDimOut, //Reshape_1 [1,thirdDimOut*num_cells] -> [1, num_cells, thirdDimOut] std::vector outFormShapes1 = { batch_size, num_cells, thirdDimOut }; - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 3 }, outFormShapes1); - auto reshape1 = std::make_shared(params[0], pattern1, false); + auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 3 }, outFormShapes1); + auto reshape1 = std::make_shared(params[0], pattern1, false); auto reshape1_shape = reshape1->output(0).get_shape(); auto H_init = ngraph::builder::makeConstant(ngPrc, { batch_size, hiddenSize }, {}, true, weights_range.second, weights_range.first); auto C_init = ngraph::builder::makeConstant(ngPrc, { batch_size, hiddenSize }, {}, true, weights_range.second, weights_range.first); if (hidden_memory_init_out != nullptr) { - *hidden_memory_init_out = std::static_pointer_cast(H_init)->cast_vector(); + *hidden_memory_init_out = std::static_pointer_cast(H_init)->cast_vector(); } if (cell_memory_init_out != nullptr) { - *cell_memory_init_out = std::static_pointer_cast(C_init)->cast_vector(); + *cell_memory_init_out = std::static_pointer_cast(C_init)->cast_vector(); } - auto H_t = std::make_shared(ngPrc, ngraph::Shape{ batch_size, hiddenSize }); - auto C_t = std::make_shared(ngPrc, ngraph::Shape{ batch_size, hiddenSize }); + auto H_t = std::make_shared(ngPrc, ngraph::Shape{ batch_size, hiddenSize }); + auto C_t = std::make_shared(ngPrc, ngraph::Shape{ batch_size, hiddenSize }); H_t->set_friendly_name("hidden_state_1"); C_t->set_friendly_name("cell_state_1"); //Body - auto X = std::make_shared(ngPrc, ngraph::Shape{ batch_size, 1, reshape1_shape[2] }); + auto X = std::make_shared(ngPrc, ngraph::Shape{ batch_size, 1, reshape1_shape[2] }); auto weightsNode = ngraph::builder::makeConstant(ngPrc, { 4 * hiddenSize, reshape1_shape[2] }, {}, true, weights_range.second, weights_range.first); auto reccurrenceWeightsNode = ngraph::builder::makeConstant(ngPrc, { 4 * hiddenSize, hiddenSize }, {}, true, weights_range.second, weights_range.first); //lstm [1, 10], [1, 118], [1, 118] -> [1, 118], [1, 118] outFormShapes1 = { batch_size, reshape1_shape[2] }; - auto constantX = std::make_shared(ngraph::element::i64, ngraph::Shape{ 2 }, outFormShapes1); - auto lstm1 = std::make_shared(std::make_shared(X, constantX, false), + auto constantX = std::make_shared(ngraph::element::i64, ngraph::Shape{ 2 }, outFormShapes1); + auto lstm1 = std::make_shared(std::make_shared(X, constantX, false), H_t, C_t, weightsNode, reccurrenceWeightsNode, hiddenSize); @@ -104,7 +104,7 @@ std::shared_ptr Basic_LSTM_S::GetNetwork(size_t thirdDimOut, auto body = std::make_shared( ngraph::OutputVector{ H_o, C_o }, ngraph::ParameterVector{ X, H_t, C_t }); - auto tensor_iterator = std::make_shared(); + auto tensor_iterator = std::make_shared(); tensor_iterator->set_body(body); //input tensor shape: [1, num_cells, thirdDimOut] chunk shape: [1, 1, thirdDimOut] @@ -117,7 +117,7 @@ std::shared_ptr Basic_LSTM_S::GetNetwork(size_t thirdDimOut, const size_t output_size = 12; auto fc1 = ngraph::builder::makeFullyConnected(out0, ngPrc, output_size, true, { hiddenSize, output_size }, { weights_range.second }, { 0.f }); - ngraph::ResultVector results{ std::make_shared(fc1) }; + ngraph::ResultVector results{ std::make_shared(fc1) }; return std::make_shared(results, params, "Basic_LSTM_S"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/broadcast_power.cpp b/src/tests/functional/shared_test_classes/src/subgraph/broadcast_power.cpp index 3a57f3fd6b7543..80008290c98945 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/broadcast_power.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/broadcast_power.cpp @@ -29,16 +29,16 @@ void BroadcastPowerTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputs_shapes[0]))}; - auto reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{inputs_shapes[1].size()}, + auto reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{inputs_shapes[1].size()}, inputs_shapes[1]); - auto reshape = std::make_shared(params[0], reshape_pattern, false); + auto reshape = std::make_shared(params[0], reshape_pattern, false); auto const_mult2 = ngraph::builder::makeConstant(ngPrc, {}, {-1.0f}); auto sum = ngraph::builder::makeEltwise(reshape, const_mult2, ngraph::helpers::EltwiseTypes::MULTIPLY); - auto reshape_pattern_2 = std::make_shared(ngraph::element::i64, ngraph::Shape{inputs_shapes[0].size()}, + auto reshape_pattern_2 = std::make_shared(ngraph::element::i64, ngraph::Shape{inputs_shapes[0].size()}, inputs_shapes[0]); - auto reshape_2 = std::make_shared(sum, reshape_pattern_2, false); + auto reshape_2 = std::make_shared(sum, reshape_pattern_2, false); function = std::make_shared(reshape_2, params, "BroadcastPowerPass"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/cascade_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/cascade_concat.cpp index 33a101d1e0c8b0..76a1f0147b3145 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/cascade_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/cascade_concat.cpp @@ -36,10 +36,10 @@ void CascadeConcat::SetUp() { std::make_shared(ngPrc, ov::Shape(input2[0])), std::make_shared(ngPrc, ov::Shape(input3[0]))}; - auto relu1 = std::make_shared(input[0]); - auto relu2 = std::make_shared(input[1]); - auto relu3 = std::make_shared(input[2]); - auto concat = std::make_shared(ov::OutputVector{relu1->output(0), + auto relu1 = std::make_shared(input[0]); + auto relu2 = std::make_shared(input[1]); + auto relu3 = std::make_shared(input[2]); + auto concat = std::make_shared(ov::OutputVector{relu1->output(0), relu2->output(0)}, 1); @@ -48,18 +48,18 @@ void CascadeConcat::SetUp() { auto reshape2_constant = std::make_shared(ov::element::i64, ov::Shape{1}, std::vector{0}); auto reshape2 = std::make_shared(reshape, reshape2_constant); - auto concat2 = std::make_shared(ov::OutputVector{reshape2->output(0), + auto concat2 = std::make_shared(ov::OutputVector{reshape2->output(0), relu3->output(0)}, 1); ngraph::ResultVector results; if (multioutput) { auto const_mult = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{1, input1[0][1]+input2[0][1]}, std::vector{1.01f}); - auto mult = std::make_shared(concat, const_mult); - results = ngraph::ResultVector{std::make_shared(concat2), - std::make_shared(mult)}; + auto mult = std::make_shared(concat, const_mult); + results = ngraph::ResultVector{std::make_shared(concat2), + std::make_shared(mult)}; } else { - results = ngraph::ResultVector{std::make_shared(concat2)}; + results = ngraph::ResultVector{std::make_shared(concat2)}; } function = std::make_shared(results, input, "concat_reshape_reshape_concat_mul"); } @@ -108,14 +108,14 @@ void CascadeConcatWithMultiConnReshape::SetUp() { auto inputShapeSqueezed = inputShape; inputShapeSqueezed.insert(std::begin(inputShapeSqueezed), 1); ov::ParameterVector input {std::make_shared(ngPrc, ov::Shape(inputShapeSqueezed))}; - auto relu = std::make_shared(input[0]); + auto relu = std::make_shared(input[0]); auto const1 = ngraph::builder::makeConstant(ngPrc, inputShapeSqueezed, std::vector{}, true); auto concat1 = std::make_shared(ov::NodeVector{relu, const1}, inputShapeSqueezed.size() - 1); auto squeeze_constant = std::make_shared(ov::element::i64, ov::Shape{1}, std::vector{0}); auto squeeze = std::make_shared(concat1, squeeze_constant); - auto relu1 = std::make_shared(squeeze); + auto relu1 = std::make_shared(squeeze); auto unsqueeze1_constant = std::make_shared(ov::element::i64, ov::Shape{1}, std::vector{0}); auto unsqueeze1 = std::make_shared(relu1, unsqueeze1_constant); @@ -125,13 +125,13 @@ void CascadeConcatWithMultiConnReshape::SetUp() { // Change concat name to make it the second connection in the map of squeeze output connections concat2->set_friendly_name("XConcat"); - auto relu2 = std::make_shared(concat2); + auto relu2 = std::make_shared(concat2); auto unsqueeze2_constant = std::make_shared(ov::element::i64, ov::Shape{1}, std::vector{0}); auto unsqueeze2 = std::make_shared(relu2, unsqueeze2_constant); - ngraph::ResultVector results = {std::make_shared(unsqueeze1), - std::make_shared(unsqueeze2)}; + ngraph::ResultVector results = {std::make_shared(unsqueeze1), + std::make_shared(unsqueeze2)}; function = std::make_shared(results, input, "CascadeConcatWithMultiConnReshapeTest"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/clamp_fq.cpp b/src/tests/functional/shared_test_classes/src/subgraph/clamp_fq.cpp index b8dfc25f12f1fe..6d533c76d95b44 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/clamp_fq.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/clamp_fq.cpp @@ -63,16 +63,16 @@ namespace SubgraphTestsDefinitions { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto clamp = std::make_shared(params[0], clamp_min_max[0], clamp_min_max[1]); + auto clamp = std::make_shared(params[0], clamp_min_max[0], clamp_min_max[1]); auto FQNode = ngraph::builder::makeFakeQuantize(clamp, ngraph::element::f32, levels[0], constShape[0], { inputDataMin }, { inputDataMax }, { inputDataMin }, { inputDataMax }); - auto FQ = std::dynamic_pointer_cast(FQNode); - auto sigmoid = std::make_shared(FQ); + auto FQ = std::dynamic_pointer_cast(FQNode); + auto sigmoid = std::make_shared(FQ); - ngraph::ResultVector results{std::make_shared(sigmoid)}; + ngraph::ResultVector results{std::make_shared(sigmoid)}; function = std::make_shared(results, params, "fakeQuantizeSubgraph"); configuration = config.second; } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/concat_conv.cpp b/src/tests/functional/shared_test_classes/src/subgraph/concat_conv.cpp index 215a33f47e6484..f42edf1e3b7926 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/concat_conv.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/concat_conv.cpp @@ -62,15 +62,15 @@ void ConcatConvTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto relu1 = std::make_shared(params[0]); + auto relu1 = std::make_shared(params[0]); auto const_values = ov::test::utils::generate_float_numbers(inputShape[1], -2.0f, 2.0f); auto constant = ngraph::builder::makeConstant(ngPrc, inputShape, const_values); auto concat = std::make_shared(ov::NodeVector{constant, relu1}, 1); std::vector convInputShape = {1, inputChannels, 1, 2 * inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(concat, reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(concat, reshapePattern1, false); auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], -0.2f, 0.2f); @@ -79,13 +79,13 @@ void ConcatConvTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes = {1, outputChannels * widthAfterConv }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(conv, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape2 = std::make_shared(conv, reshapePattern2, false); function = std::make_shared(reshape2, params, "ConcatConvTest"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/concat_multi_input.cpp b/src/tests/functional/shared_test_classes/src/subgraph/concat_multi_input.cpp index b6808f06c3c255..410b6d789ba5a6 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/concat_multi_input.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/concat_multi_input.cpp @@ -39,33 +39,33 @@ void ConcatMultiInput::SetUp() { void ConcatMultiInput::GenerateStridedSliceModel() { ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(paramSize))}; - auto stride = std::make_shared(ngraph::element::i64, ngraph::Shape{ 2 }, std::vector{ 1, 1 }); + auto stride = std::make_shared(ngraph::element::i64, ngraph::Shape{ 2 }, std::vector{ 1, 1 }); std::vector newAxis = { 0, 0 }; std::vector begin_mask = { 0, 0 }; std::vector end_mask = { 0, 0 }; - std::vector> ssArray; + std::vector> ssArray; ngraph::OutputVector concatInput; - auto relu = std::make_shared(params[0]); + auto relu = std::make_shared(params[0]); std::vector startOffset = { 0, 0 }; for (size_t i = 0; i < inputShapes.size(); ++i) { std::vector shape = { static_cast(inputShapes[i][0]), static_cast(inputShapes[i][1]) }; std::vector endoffset = { static_cast(inputShapes[i][0]) + startOffset[0], static_cast(inputShapes[i][1]) + startOffset[1]}; - auto begin = std::make_shared(ngraph::element::i64, ngraph::Shape{ 2 }, startOffset); - auto end = std::make_shared(ngraph::element::i64, ngraph::Shape{ 2 }, endoffset); - auto ss = std::make_shared(relu, begin, end, stride, begin_mask, end_mask, newAxis); + auto begin = std::make_shared(ngraph::element::i64, ngraph::Shape{ 2 }, startOffset); + auto end = std::make_shared(ngraph::element::i64, ngraph::Shape{ 2 }, endoffset); + auto ss = std::make_shared(relu, begin, end, stride, begin_mask, end_mask, newAxis); ssArray.push_back(ss); concatInput.push_back(ssArray[i]); startOffset[1] += shape[1]; } - auto concat = std::make_shared(concatInput, 1); + auto concat = std::make_shared(concatInput, 1); - ngraph::ResultVector results{ std::make_shared(concat) }; + ngraph::ResultVector results{ std::make_shared(concat) }; function = std::make_shared(results, params, "ConcatMultiInput"); } @@ -104,7 +104,7 @@ void ConcatMultiInput::GenerateConstOnlyModel() { auto concat = std::make_shared(concatInputs, 1); - ngraph::ResultVector results{ std::make_shared(concat) }; + ngraph::ResultVector results{ std::make_shared(concat) }; function = std::make_shared(results, input_vector, "ConcatConstOnly"); } @@ -114,17 +114,17 @@ void ConcatMultiInput::GenerateMemoryModel() { auto variable = std::make_shared(ngraph::VariableInfo{ov::Shape(inputShapes[0]), ngraph::element::dynamic, "concat_input_memory"}); - auto mem_i = std::make_shared(ngPrc, inputShapes[0]); - auto mem_r = std::make_shared(mem_i, variable); + auto mem_i = std::make_shared(ngPrc, inputShapes[0]); + auto mem_r = std::make_shared(mem_i, variable); ngraph::OutputVector concat_input; concat_input.push_back(mem_r); concat_input.push_back(input.at(0)); - auto concat = std::make_shared(concat_input, axis); + auto concat = std::make_shared(concat_input, axis); - auto mem_w = std::make_shared(input.at(0), variable); + auto mem_w = std::make_shared(input.at(0), variable); - auto res = std::make_shared(concat); + auto res = std::make_shared(concat); function = std::make_shared(ngraph::ResultVector{res}, ngraph::SinkVector{mem_w}, input, "ConcatMemory"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/concat_quantization_during_memory_requantization.cpp b/src/tests/functional/shared_test_classes/src/subgraph/concat_quantization_during_memory_requantization.cpp index 7678ee5db24e41..fae76e845a6e3d 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/concat_quantization_during_memory_requantization.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/concat_quantization_during_memory_requantization.cpp @@ -38,30 +38,30 @@ namespace SubgraphTestsDefinitions { ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape{1, inputSize})}; - auto mem_1_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_1_init); - auto mem_1_read = std::make_shared(mem_1_const, "memory_1"); + auto mem_1_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_1_init); + auto mem_1_read = std::make_shared(mem_1_const, "memory_1"); - auto concat_1 = std::make_shared(ngraph::OutputVector{ mem_1_read, input[0] }, 1); + auto concat_1 = std::make_shared(ngraph::OutputVector{ mem_1_read, input[0] }, 1); // Revert concat names to set the needed order of scale factors calculation concat_1->set_friendly_name("concat2"); auto split_1 = ngraph::builder::makeVariadicSplit(concat_1, { inputSize, hiddenSize }, 1); - auto mul_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, + auto mul_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, ov::test::utils::generate_float_numbers(hiddenSize, -0.2f, 0.0f)); auto mul = ngraph::builder::makeEltwise(split_1->output(1), mul_const, ngraph::helpers::EltwiseTypes::MULTIPLY); - auto mem_1_write = std::make_shared(mul, "memory_1"); + auto mem_1_write = std::make_shared(mul, "memory_1"); - auto mem_2_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_2_init); - auto mem_2_read = std::make_shared(mem_2_const, "memory_2"); + auto mem_2_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_2_init); + auto mem_2_read = std::make_shared(mem_2_const, "memory_2"); - auto concat_2 = std::make_shared(ngraph::OutputVector{ mem_2_read, mul }, 1); + auto concat_2 = std::make_shared(ngraph::OutputVector{ mem_2_read, mul }, 1); // Revert concat names to set the needed order of scale factors calculation concat_2->set_friendly_name("concat1"); auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); auto split_2 = std::make_shared(concat_2, split_axis_op, 2); - auto mem_2_write = std::make_shared(split_2->output(0), "memory_2"); - auto sigm = std::make_shared(split_2->output(1)); + auto mem_2_write = std::make_shared(split_2->output(0), "memory_2"); + auto sigm = std::make_shared(split_2->output(1)); mem_1_write->add_control_dependency(mem_1_read); sigm->add_control_dependency(mem_1_write); @@ -85,20 +85,20 @@ namespace SubgraphTestsDefinitions { ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape{1, inputSize})}; - auto mem_1_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_1_init); - auto concat_1 = std::make_shared(ngraph::OutputVector{ mem_1_const, input[0] }, 1); + auto mem_1_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_1_init); + auto concat_1 = std::make_shared(ngraph::OutputVector{ mem_1_const, input[0] }, 1); auto split_1 = ngraph::builder::makeVariadicSplit(concat_1, { inputSize, hiddenSize }, 1); - auto mul_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, + auto mul_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, ov::test::utils::generate_float_numbers(hiddenSize, -0.2f, 0.0f)); auto mul = ngraph::builder::makeEltwise(split_1->output(1), mul_const, ngraph::helpers::EltwiseTypes::MULTIPLY); - auto mem_2_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_2_init); - auto concat_2 = std::make_shared(ngraph::OutputVector{ mem_2_const, mul }, 1); + auto mem_2_const = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_2_init); + auto concat_2 = std::make_shared(ngraph::OutputVector{ mem_2_const, mul }, 1); auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); auto split_2 = std::make_shared(concat_2, split_axis_op, 2); - auto sigm = std::make_shared(split_2->output(1)); + auto sigm = std::make_shared(split_2->output(1)); function = std::make_shared(sigm, input, "concat_quant_during_memory_requant_nomemory"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/concat_qunatization.cpp b/src/tests/functional/shared_test_classes/src/subgraph/concat_qunatization.cpp index 31e212e0d9fb83..74598aabc0cd6f 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/concat_qunatization.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/concat_qunatization.cpp @@ -32,29 +32,29 @@ void ConcatQuantization::SetUp() { ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape{1, 160})}; std::vector outFormShapes1 = { 1, 5, 32 }; - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 3 }, outFormShapes1); - auto reshape1 = std::make_shared(params[0], pattern1, false); + auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 3 }, outFormShapes1); + auto reshape1 = std::make_shared(params[0], pattern1, false); - auto tanh = std::make_shared(reshape1); + auto tanh = std::make_shared(reshape1); std::vector outFormShapes2 = { 1, 160 }; - auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes2); - auto reshape2 = std::make_shared(tanh, pattern2, false); + auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes2); + auto reshape2 = std::make_shared(tanh, pattern2, false); auto scale = ngraph::builder::makeConstant(ngPrc, outFormShapes2, {}, true); - //For ngraph::op::ScaleShift: Cannot cast ngraph node ScaleShift to CNNLayer! - auto scale_shift = std::make_shared(reshape2, scale); + //For ov::op::v0::ScaleShift: Cannot cast ngraph node ScaleShift to CNNLayer! + auto scale_shift = std::make_shared(reshape2, scale); std::vector outFormShapes3 = { 5, 32 }; - auto pattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes3); - auto reshape3 = std::make_shared(scale_shift, pattern3, false); + auto pattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes3); + auto reshape3 = std::make_shared(scale_shift, pattern3, false); - auto pattern4 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes3); - auto reshape4 = std::make_shared(tanh, pattern4, false); + auto pattern4 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes3); + auto reshape4 = std::make_shared(tanh, pattern4, false); - auto concat = std::make_shared(ngraph::OutputVector{ reshape3, reshape4 }, 0); + auto concat = std::make_shared(ngraph::OutputVector{ reshape3, reshape4 }, 0); concat->set_friendly_name("concat"); - ngraph::ResultVector results{std::make_shared(concat)}; + ngraph::ResultVector results{std::make_shared(concat)}; function = std::make_shared(results, params, "ConcatQuantization"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/connect_split_concat_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/connect_split_concat_concat.cpp index 420cb9fbcf51bd..745665c51c5876 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/connect_split_concat_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/connect_split_concat_concat.cpp @@ -26,18 +26,18 @@ void SplitConcatConcatTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape{1, 256})}; - auto relu_start = std::make_shared(params[0]); + auto relu_start = std::make_shared(params[0]); auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); auto split = std::make_shared(relu_start, split_axis_op, 2); auto const_concat = ngraph::builder::makeConstant(ngPrc, {1, 96}, std::vector{0}); auto const_concat_2 = ngraph::builder::makeConstant(ngPrc, {1, 96}, std::vector{0}); - auto concat = std::make_shared(ngraph::OutputVector{split->output(0), const_concat}, 1); - auto concat_2 = std::make_shared(ngraph::OutputVector{concat, const_concat_2}, + auto concat = std::make_shared(ngraph::OutputVector{split->output(0), const_concat}, 1); + auto concat_2 = std::make_shared(ngraph::OutputVector{concat, const_concat_2}, 1); - auto relu = std::make_shared(concat_2); + auto relu = std::make_shared(concat_2); ngraph::ResultVector resultVector{ - std::make_shared(relu) + std::make_shared(relu) }; function = std::make_shared(resultVector, params, "Multiple_connection_split_concat"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/const_conv_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/const_conv_concat.cpp index 7dabd62e98fca4..c8cb41a79327ef 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/const_conv_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/const_conv_concat.cpp @@ -64,8 +64,8 @@ void ConstConvConcatTest::SetUp() { ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; std::vector convInputShape = {inputShape[0], inputChannels, 1, inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(params[0], reshapePattern1, false); auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], 0.0f, 0.1f); @@ -74,7 +74,7 @@ void ConstConvConcatTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes = {1, outputChannels * widthAfterConv }; @@ -83,9 +83,9 @@ void ConstConvConcatTest::SetUp() { auto constant = ngraph::builder::makeConstant(ngPrc, {1, outputChannels, 1, widthAfterConv}, const_values); auto concat = std::make_shared(ov::NodeVector{constant, conv}, 3); - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, std::vector{1, 2 * outputChannels * widthAfterConv }); - auto reshape2 = std::make_shared(concat, reshapePattern2, false); + auto reshape2 = std::make_shared(concat, reshapePattern2, false); function = std::make_shared(reshape2, params, "ConstConvConcatTest"); functionRefs = ngraph::clone_function(*function); diff --git a/src/tests/functional/shared_test_classes/src/subgraph/conv_eltwise_fusion.cpp b/src/tests/functional/shared_test_classes/src/subgraph/conv_eltwise_fusion.cpp index 625c187dffc06e..593e2efaa12c9b 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/conv_eltwise_fusion.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/conv_eltwise_fusion.cpp @@ -6,7 +6,6 @@ #include "common_test_utils/graph_comparator.hpp" #include "openvino/core/node.hpp" -#include "openvino/opsets/opset11.hpp" #include "openvino/pass/constant_folding.hpp" #include "openvino/pass/manager.hpp" #include "ov_models/builders.hpp" @@ -54,7 +53,7 @@ void ConvEltwiseFusion::SetUp() { pass::Manager manager; { - auto param = std::make_shared(precision, input_shape); + auto param = std::make_shared(precision, input_shape); auto spatial_dims = input_shape.size() - 2; Shape strides(spatial_dims, 1); @@ -66,17 +65,17 @@ void ConvEltwiseFusion::SetUp() { const_shape, std::vector(shape_size(const_shape), 3)); std::shared_ptr conv; - if (conv_type == opset11::Convolution::get_type_info_static()) { - conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); - } else if (conv_type == opset11::GroupConvolution::get_type_info_static()) { - conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); - } else if (conv_type == opset11::ConvolutionBackpropData::get_type_info_static()) { + if (conv_type == ov::op::v1::Convolution::get_type_info_static()) { + conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); + } else if (conv_type == ov::op::v1::GroupConvolution::get_type_info_static()) { + conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); + } else if (conv_type == ov::op::v1::ConvolutionBackpropData::get_type_info_static()) { if (num_inputs == 3) { - auto output_shape = std::make_shared( + auto output_shape = std::make_shared( element::u64, Shape{spatial_dims}, std::vector(input_shape.begin() + 2, input_shape.end())); - conv = std::make_shared(param, + conv = std::make_shared(param, weights, output_shape, strides, @@ -84,20 +83,20 @@ void ConvEltwiseFusion::SetUp() { pad_end, strides); } else { - conv = std::make_shared(param, + conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); } - } else if (conv_type == opset11::GroupConvolutionBackpropData::get_type_info_static()) { + } else if (conv_type == ov::op::v1::GroupConvolutionBackpropData::get_type_info_static()) { if (num_inputs == 3) { - auto output_shape = std::make_shared( + auto output_shape = std::make_shared( element::u64, Shape{spatial_dims}, std::vector(input_shape.begin() + 2, input_shape.end())); - conv = std::make_shared(param, + conv = std::make_shared(param, weights, output_shape, strides, @@ -105,7 +104,7 @@ void ConvEltwiseFusion::SetUp() { pad_end, strides); } else { - conv = std::make_shared(param, + conv = std::make_shared(param, weights, strides, pad_begin, @@ -117,14 +116,14 @@ void ConvEltwiseFusion::SetUp() { } std::shared_ptr eltwise; - if (eltwise_type == opset11::Multiply::get_type_info_static()) { - eltwise = std::make_shared(conv, eltwise_const); + if (eltwise_type == ov::op::v1::Multiply::get_type_info_static()) { + eltwise = std::make_shared(conv, eltwise_const); manager.register_pass(); manager.register_pass(); manager.register_pass(); manager.register_pass(); - } else if (eltwise_type == opset11::Add::get_type_info_static()) { - eltwise = std::make_shared(conv, eltwise_const); + } else if (eltwise_type == ov::op::v1::Add::get_type_info_static()) { + eltwise = std::make_shared(conv, eltwise_const); // manager.register_pass(); // manager.register_pass(); } else { @@ -139,7 +138,7 @@ void ConvEltwiseFusion::SetUp() { std::shared_ptr function_ref; if (!negative) { - auto param = std::make_shared(precision, input_shape); + auto param = std::make_shared(precision, input_shape); auto spatial_dims = input_shape.size() - 2; Shape strides(spatial_dims, 1); @@ -148,17 +147,17 @@ void ConvEltwiseFusion::SetUp() { weights_shape, std::vector(shape_size(weights_shape), 6)); std::shared_ptr conv; - if (conv_type == opset11::Convolution::get_type_info_static()) { - conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); - } else if (conv_type == opset11::GroupConvolution::get_type_info_static()) { - conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); - } else if (conv_type == opset11::ConvolutionBackpropData::get_type_info_static()) { + if (conv_type == ov::op::v1::Convolution::get_type_info_static()) { + conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); + } else if (conv_type == ov::op::v1::GroupConvolution::get_type_info_static()) { + conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); + } else if (conv_type == ov::op::v1::ConvolutionBackpropData::get_type_info_static()) { if (num_inputs == 3) { - auto output_shape = std::make_shared( + auto output_shape = std::make_shared( element::u64, Shape{spatial_dims}, std::vector(input_shape.begin() + 2, input_shape.end())); - conv = std::make_shared(param, + conv = std::make_shared(param, weights, output_shape, strides, @@ -166,20 +165,20 @@ void ConvEltwiseFusion::SetUp() { pad_end, strides); } else { - conv = std::make_shared(param, + conv = std::make_shared(param, weights, strides, pad_begin, pad_end, strides); } - } else if (conv_type == opset11::GroupConvolutionBackpropData::get_type_info_static()) { + } else if (conv_type == ov::op::v1::GroupConvolutionBackpropData::get_type_info_static()) { if (num_inputs == 3) { - auto output_shape = std::make_shared( + auto output_shape = std::make_shared( element::u64, Shape{spatial_dims}, std::vector(input_shape.begin() + 2, input_shape.end())); - conv = std::make_shared(param, + conv = std::make_shared(param, weights, output_shape, strides, @@ -187,7 +186,7 @@ void ConvEltwiseFusion::SetUp() { pad_end, strides); } else { - conv = std::make_shared(param, + conv = std::make_shared(param, weights, strides, pad_begin, diff --git a/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_eltwise.cpp b/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_eltwise.cpp index a2519f47629deb..dc4d937842b53f 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_eltwise.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_eltwise.cpp @@ -77,8 +77,8 @@ void ConvFqEltwiseTest::SetUp() { std::mt19937 gen(seed); std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(params[0], reshapePattern1, false); float weightVal = 0.2; auto filterWeightsNode = ngraph::builder::makeConstant(ngPrc, {outputChannels, inputChannels, kernelShape[0], kernelShape[1]}, @@ -87,14 +87,14 @@ void ConvFqEltwiseTest::SetUp() { ngraph::builder::makeConstant(ngraph::element::f32, std::vector{1}, std::vector{-convFQValue}); auto convHighNode = ngraph::builder::makeConstant(ngraph::element::f32, std::vector{1}, std::vector{convFQValue}); - auto convWeightsFQNode = std::make_shared(filterWeightsNode, + auto convWeightsFQNode = std::make_shared(filterWeightsNode, convLowNode, convHighNode, convLowNode, convHighNode, levels); - auto convWeightsFQ = std::dynamic_pointer_cast(convWeightsFQNode); - auto conv = std::make_shared(reshape1, convWeightsFQ, strides, std::vector{ 0, 0 }, + auto convWeightsFQ = std::dynamic_pointer_cast(convWeightsFQNode); + auto conv = std::make_shared(reshape1, convWeightsFQ, strides, std::vector{ 0, 0 }, std::vector{ 0, 0 }, std::vector{ 1, 1 }, - ngraph::op::PadType::VALID); + ov::op::PadType::VALID); auto biasesWeightsNode = ngraph::builder::makeConstant(ngPrc, {}, std::vector{ 0.0f }); - auto add_1 = std::make_shared(conv, biasesWeightsNode); + auto add_1 = std::make_shared(conv, biasesWeightsNode); auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / strides[1] + 1; auto heightAfterConv = (convInputShape[2] - kernelShape[0]) / strides[0] + 1; @@ -104,13 +104,13 @@ void ConvFqEltwiseTest::SetUp() { std::vector{inputDataMin * weightVal * kernelShape[1] * 1.5f}); auto highNode = ngraph::builder::makeConstant(ngraph::element::f32, std::vector{ 1 }, std::vector{inputDataMax * weightVal * kernelShape[1] * 1.5f}); - auto fq = std::make_shared(add_1, lowNode, highNode, lowNode, highNode, levels); + auto fq = std::make_shared(add_1, lowNode, highNode, lowNode, highNode, levels); auto constNode = ngraph::builder::makeConstant(ngPrc, {}, std::vector{ 0.5f }); - auto add_2 = std::make_shared(fq, constNode); + auto add_2 = std::make_shared(fq, constNode); - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(add_2, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape2 = std::make_shared(add_2, reshapePattern2, false); function = std::make_shared(reshape2, params, "convFqEltwise"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_relu.cpp b/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_relu.cpp index ad2a8ad441199b..d108635c790794 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_relu.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/conv_fq_relu.cpp @@ -77,8 +77,8 @@ void ConvFqReluTest::SetUp() { std::mt19937 gen(seed); std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(params[0], reshapePattern1, false); float weightVal = 0.2; auto filterWeightsNode = ngraph::builder::makeConstant(ngPrc, {outputChannels, inputChannels, kernelShape[0], kernelShape[1]}, @@ -87,14 +87,14 @@ void ConvFqReluTest::SetUp() { ngraph::builder::makeConstant(ngraph::element::f32, std::vector{1}, std::vector{-convFQValue}); auto convHighNode = ngraph::builder::makeConstant(ngraph::element::f32, std::vector{1}, std::vector{convFQValue}); - auto convWeightsFQNode = std::make_shared(filterWeightsNode, + auto convWeightsFQNode = std::make_shared(filterWeightsNode, convLowNode, convHighNode, convLowNode, convHighNode, levels); - auto convWeightsFQ = std::dynamic_pointer_cast(convWeightsFQNode); - auto conv = std::make_shared(reshape1, convWeightsFQ, strides, std::vector{ 0, 0 }, + auto convWeightsFQ = std::dynamic_pointer_cast(convWeightsFQNode); + auto conv = std::make_shared(reshape1, convWeightsFQ, strides, std::vector{ 0, 0 }, std::vector{ 0, 0 }, std::vector{ 1, 1 }, - ngraph::op::PadType::VALID); + ov::op::PadType::VALID); auto biasesWeightsNode = ngraph::builder::makeConstant(ngPrc, {}, std::vector{ 0.0f }); - auto add_1 = std::make_shared(conv, biasesWeightsNode); + auto add_1 = std::make_shared(conv, biasesWeightsNode); auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / strides[1] + 1; auto heightAfterConv = (convInputShape[2] - kernelShape[0]) / strides[0] + 1; @@ -104,12 +104,12 @@ void ConvFqReluTest::SetUp() { std::vector{inputDataMin * weightVal * kernelShape[1] * 1.5f}); auto highNode = ngraph::builder::makeConstant(ngraph::element::f32, std::vector{ 1 }, std::vector{inputDataMax * weightVal * kernelShape[1] * 1.5f}); - auto fq = std::make_shared(add_1, lowNode, highNode, lowNode, highNode, levels); + auto fq = std::make_shared(add_1, lowNode, highNode, lowNode, highNode, levels); - auto relu = std::make_shared(fq); + auto relu = std::make_shared(fq); - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(relu, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape2 = std::make_shared(relu, reshapePattern2, false); function = std::make_shared(reshape2, params, "convFqEltwise"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/convolution_relu_sequence.cpp b/src/tests/functional/shared_test_classes/src/subgraph/convolution_relu_sequence.cpp index b3f0c42713c717..3a83e80c427533 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/convolution_relu_sequence.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/convolution_relu_sequence.cpp @@ -73,12 +73,12 @@ void ConvolutionReluSequenceTest::SetUp() { ngraph::builder::makeConvolution( lastOutputs, ngPrc, single.kernelSize, single.strides, single.padBegin, single.padEnd, - dilation, ngraph::op::PadType::EXPLICIT, single.numOutChannels, addBiases, filter_weights, biases)); - lastOutputs = std::make_shared(conv); + dilation, ov::op::PadType::EXPLICIT, single.numOutChannels, addBiases, filter_weights, biases)); + lastOutputs = std::make_shared(conv); if (single.poolingWindow.size() == 2 && (single.poolingWindow[0] != 1 || single.poolingWindow[1] != 1)) { - lastOutputs = std::make_shared(lastOutputs, single.poolingStride, + lastOutputs = std::make_shared(lastOutputs, single.poolingStride, ngraph::Shape{ 0, 0 }, ngraph::Shape{ 0, 0 }, single.poolingWindow); @@ -86,7 +86,7 @@ void ConvolutionReluSequenceTest::SetUp() { inputChannels = single.numOutChannels; } - ngraph::ResultVector results{std::make_shared(lastOutputs)}; + ngraph::ResultVector results{std::make_shared(lastOutputs)}; function = std::make_shared(results, params, "convolution_relu_sequence"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/copy_before_squeeze.cpp b/src/tests/functional/shared_test_classes/src/subgraph/copy_before_squeeze.cpp index 83866052440052..1362295cd1dbd2 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/copy_before_squeeze.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/copy_before_squeeze.cpp @@ -27,22 +27,22 @@ namespace SubgraphTestsDefinitions { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector input {std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto reshape_0_pattern = std::make_shared(ngraph::element::i64, + auto reshape_0_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{3}, std::vector{1, inputShape[1] / 64, 64}); - auto reshape_0 = std::make_shared(input[0], reshape_0_pattern, false); - auto relu = std::make_shared(reshape_0); + auto reshape_0 = std::make_shared(input[0], reshape_0_pattern, false); + auto relu = std::make_shared(reshape_0); - auto constant_squeeze = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); - auto reshape_pattern = std::make_shared(ngraph::element::i64, + auto constant_squeeze = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); + auto reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, inputShape[1]}); - auto squeeze_1 = std::make_shared(relu, constant_squeeze); - auto reshape_1 = std::make_shared(squeeze_1, reshape_pattern, false); - auto squeeze_2 = std::make_shared(relu, constant_squeeze); - auto reshape_2 = std::make_shared(squeeze_2, reshape_pattern, false); + auto squeeze_1 = std::make_shared(relu, constant_squeeze); + auto reshape_1 = std::make_shared(squeeze_1, reshape_pattern, false); + auto squeeze_2 = std::make_shared(relu, constant_squeeze); + auto reshape_2 = std::make_shared(squeeze_2, reshape_pattern, false); - auto concat = std::make_shared(ngraph::OutputVector{reshape_1, reshape_2}, 1); + auto concat = std::make_shared(ngraph::OutputVector{reshape_1, reshape_2}, 1); function = std::make_shared(concat, input, "copy_before_squeeze"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/delayed_copy_layer.cpp b/src/tests/functional/shared_test_classes/src/subgraph/delayed_copy_layer.cpp index bd2aa5bc46856a..ddff4c2837ea8d 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/delayed_copy_layer.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/delayed_copy_layer.cpp @@ -72,16 +72,16 @@ namespace SubgraphTestsDefinitions { memory_init = ov::test::utils::generate_float_numbers(memory_size, -0.2f, 0.2f); - auto mem_c = std::make_shared(ngPrc, ngraph::Shape{1, memory_size}, memory_init); + auto mem_c = std::make_shared(ngPrc, ngraph::Shape{1, memory_size}, memory_init); - auto mem_r = std::make_shared(mem_c, "id"); + auto mem_r = std::make_shared(mem_c, "id"); - auto concat = std::make_shared(ngraph::OutputVector{mem_r, input[0]}, 1); + auto concat = std::make_shared(ngraph::OutputVector{mem_r, input[0]}, 1); auto split = ngraph::builder::makeVariadicSplit(concat, {3 * memory_size, memory_size}, 1); - auto mem_w = std::make_shared(split->output(1), "id"); + auto mem_w = std::make_shared(split->output(1), "id"); auto VariadicSplit = ngraph::builder::makeVariadicSplit(concat, {memory_size / 2, 3 * memory_size + memory_size / 2}, 1); - auto relu2 = std::make_shared(VariadicSplit->output(1)); + auto relu2 = std::make_shared(VariadicSplit->output(1)); mem_w->add_control_dependency(mem_r); relu2->add_control_dependency(mem_w); @@ -101,12 +101,12 @@ namespace SubgraphTestsDefinitions { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector input {std::make_shared(ngPrc, ov::Shape{1, 3 * memory_size})}; - auto mem_c = std::make_shared(ngPrc, ngraph::Shape{1, memory_size}, memory_init); - auto concat = std::make_shared(ngraph::OutputVector{mem_c, input[0]}, 1); + auto mem_c = std::make_shared(ngPrc, ngraph::Shape{1, memory_size}, memory_init); + auto concat = std::make_shared(ngraph::OutputVector{mem_c, input[0]}, 1); auto split = ngraph::builder::makeVariadicSplit(concat, {3 * memory_size, memory_size}, 1); auto VariadicSplit = ngraph::builder::makeVariadicSplit(concat, {memory_size / 2, 3 * memory_size + memory_size / 2}, 1); - auto relu2 = std::make_shared(VariadicSplit->output(1)); + auto relu2 = std::make_shared(VariadicSplit->output(1)); function = std::make_shared(relu2, input, "delayed_copy_layer_nonmemory"); } @@ -126,21 +126,21 @@ namespace SubgraphTestsDefinitions { memory_init = ov::test::utils::generate_float_numbers(memory_size, -0.2f, 0.2f); auto mem_c = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{8, memory_size / 8}, memory_init); - auto mem_r = std::make_shared(mem_c, "id"); + auto mem_r = std::make_shared(mem_c, "id"); auto reshape_pattern1 = ngraph::builder::makeConstant(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, memory_size}); - auto reshape1 = std::make_shared(mem_r, reshape_pattern1, false); + auto reshape1 = std::make_shared(mem_r, reshape_pattern1, false); auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); auto split = std::make_shared(reshape1, split_axis_op, 2); - auto concat = std::make_shared(ngraph::OutputVector{split->output(0), input[0]}, 1); + auto concat = std::make_shared(ngraph::OutputVector{split->output(0), input[0]}, 1); auto reshape_pattern2 = ngraph::builder::makeConstant(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{8, memory_size / 8}); - auto reshape2 = std::make_shared(concat, reshape_pattern2, false); + auto reshape2 = std::make_shared(concat, reshape_pattern2, false); - auto mem_w = std::make_shared(reshape2, "id"); + auto mem_w = std::make_shared(reshape2, "id"); - auto relu = std::make_shared(reshape2); + auto relu = std::make_shared(reshape2); auto reshape_pattern3 = ngraph::builder::makeConstant(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, memory_size}); - auto reshape3 = std::make_shared(relu, reshape_pattern3, false); + auto reshape3 = std::make_shared(relu, reshape_pattern3, false); mem_w->add_control_dependency(mem_r); reshape3->add_control_dependency(mem_w); @@ -162,17 +162,17 @@ namespace SubgraphTestsDefinitions { auto mem_c = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{1, memory_size}, memory_init); auto reshape_pattern1 = ngraph::builder::makeConstant(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, memory_size}); - auto reshape1 = std::make_shared(mem_c, reshape_pattern1, false); + auto reshape1 = std::make_shared(mem_c, reshape_pattern1, false); auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); auto split = std::make_shared(reshape1, split_axis_op, 2); - auto concat = std::make_shared(ngraph::OutputVector{split->output(0), input[0]}, 1); + auto concat = std::make_shared(ngraph::OutputVector{split->output(0), input[0]}, 1); auto reshape_pattern2 = ngraph::builder::makeConstant(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{8, memory_size / 8}); - auto reshape2 = std::make_shared(concat, reshape_pattern2, false); + auto reshape2 = std::make_shared(concat, reshape_pattern2, false); - auto relu = std::make_shared(reshape2); + auto relu = std::make_shared(reshape2); auto reshape_pattern3 = ngraph::builder::makeConstant(ngraph::element::i64, ngraph::Shape{2}, ngraph::Shape{1, memory_size}); - auto reshape3 = std::make_shared(relu, reshape_pattern3, false); + auto reshape3 = std::make_shared(relu, reshape_pattern3, false); function = std::make_shared(reshape3, input, "delayed_copy_layer_reshape_nonmemory"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/eltwise_conv_eltwise.cpp b/src/tests/functional/shared_test_classes/src/subgraph/eltwise_conv_eltwise.cpp index 5b548fad75b645..8c7121310e1771 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/eltwise_conv_eltwise.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/eltwise_conv_eltwise.cpp @@ -64,8 +64,8 @@ void EltwiseAfterConvTest::SetUp() { ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(params[0], reshapePattern1, false); auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], -0.2f, 0.2f); @@ -74,20 +74,20 @@ void EltwiseAfterConvTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes = {1, outputChannels * widthAfterConv }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(conv, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape2 = std::make_shared(conv, reshapePattern2, false); auto scale = ov::test::utils::generate_float_numbers(outFormShapes[1], -2.0f, 2.0f); auto shift = ov::test::utils::generate_float_numbers(outFormShapes[1], -2.0f, 2.0f); - auto mul_const = std::make_shared(ngPrc, outFormShapes, scale); - auto mul = std::make_shared(reshape2, mul_const); - auto add_const = std::make_shared(ngPrc, outFormShapes, shift); - auto add = std::make_shared(mul, add_const); + auto mul_const = std::make_shared(ngPrc, outFormShapes, scale); + auto mul = std::make_shared(reshape2, mul_const); + auto add_const = std::make_shared(ngPrc, outFormShapes, shift); + auto add = std::make_shared(mul, add_const); function = std::make_shared(mul, params, "EltwiseAfterConvTest"); } @@ -150,14 +150,14 @@ void EltwiseBeforeConvTest::SetUp() { auto scale = ov::test::utils::generate_float_numbers(inputShape[1], -2.0f, 2.0f); auto shift = ov::test::utils::generate_float_numbers(inputShape[1], -2.0f, 2.0f); - auto mul_const = std::make_shared(ngPrc, inputShape, scale); - auto mul = std::make_shared(params[0], mul_const); - auto add_const = std::make_shared(ngPrc, inputShape, shift); - auto add = std::make_shared(mul, add_const); + auto mul_const = std::make_shared(ngPrc, inputShape, scale); + auto mul = std::make_shared(params[0], mul_const); + auto add_const = std::make_shared(ngPrc, inputShape, shift); + auto add = std::make_shared(mul, add_const); std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(mul, reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(mul, reshapePattern1, false); auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], -0.2f, 0.2f); @@ -166,12 +166,12 @@ void EltwiseBeforeConvTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); auto widthAfterReshape = (convInputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes = {1, outputChannels * widthAfterReshape }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(conv, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape2 = std::make_shared(conv, reshapePattern2, false); function = std::make_shared(reshape2, params, "EltwiseBeforeConvTest"); } @@ -234,8 +234,8 @@ void EltwiseWithTwoConvsAsInputsTest::SetUp() { std::make_shared(ngPrc, ov::Shape(inputShape))}; std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(params[0], reshapePattern1, false); auto filterWeights1 = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], -0.2f, 0.2f); @@ -245,15 +245,15 @@ void EltwiseWithTwoConvsAsInputsTest::SetUp() { {kernelShape[0], kernelShape[1]}, {stride_h, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights1); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights1); auto widthAfterReshape = (convInputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes = {1, outputChannels * widthAfterReshape }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(conv1, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape2 = std::make_shared(conv1, reshapePattern2, false); - auto reshapePattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape3 = std::make_shared(params[1], reshapePattern3, false); + auto reshapePattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape3 = std::make_shared(params[1], reshapePattern3, false); auto filterWeights2 = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], -0.2f, 0.2f); @@ -262,12 +262,12 @@ void EltwiseWithTwoConvsAsInputsTest::SetUp() { {kernelShape[0], kernelShape[1]}, {stride_h, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights2); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights2); - auto reshapePattern4 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape4 = std::make_shared(conv2, reshapePattern4, false); + auto reshapePattern4 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape4 = std::make_shared(conv2, reshapePattern4, false); - auto add = std::make_shared(reshape2, reshape4); + auto add = std::make_shared(reshape2, reshape4); function = std::make_shared(add, params, "EltwiseWithTwoConvsAsInputsTest"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/eltwise_reshape_activation.cpp b/src/tests/functional/shared_test_classes/src/subgraph/eltwise_reshape_activation.cpp index 7e6e6f0147397f..9ee715f8409bab 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/eltwise_reshape_activation.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/eltwise_reshape_activation.cpp @@ -39,13 +39,13 @@ void EltwiseReshapeActivation::SetUp() { std::make_shared(ngPrc, ov::Shape(shapes[0]))}; auto eltw = ngraph::builder::makeEltwise(input[0], input[1], ngraph::helpers::EltwiseTypes::ADD); - auto reshape_pattern1 = std::make_shared(ngraph::element::i64, ngraph::Shape{shapes[1].size()}, shapes[1]); - auto reshape1 = std::make_shared(eltw, reshape_pattern1, false); + auto reshape_pattern1 = std::make_shared(ngraph::element::i64, ngraph::Shape{shapes[1].size()}, shapes[1]); + auto reshape1 = std::make_shared(eltw, reshape_pattern1, false); auto relu = ngraph::builder::makeActivation(reshape1, ngPrc, ngraph::helpers::ActivationTypes::Relu); - auto reshape_pattern2 = std::make_shared(ngraph::element::i64, ngraph::Shape{shapes[0].size()}, shapes[0]); - auto reshape2 = std::make_shared(relu, reshape_pattern2, false); + auto reshape_pattern2 = std::make_shared(ngraph::element::i64, ngraph::Shape{shapes[0].size()}, shapes[0]); + auto reshape2 = std::make_shared(relu, reshape_pattern2, false); function = std::make_shared(reshape2, input, "EltwiseReshapeActivation"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/fc_conv_fc.cpp b/src/tests/functional/shared_test_classes/src/subgraph/fc_conv_fc.cpp index a6b5491ea28197..d4ff136b32d262 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/fc_conv_fc.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/fc_conv_fc.cpp @@ -64,8 +64,8 @@ void FcAfterConvTest::SetUp() { ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(params[0], reshapePattern1, false); auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], -0.1f, 0.1f); @@ -74,13 +74,13 @@ void FcAfterConvTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes = {1, outputChannels * widthAfterConv }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(conv, reshapePattern2, false); - auto relu1 = std::make_shared(reshape2); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape2 = std::make_shared(conv, reshapePattern2, false); + auto relu1 = std::make_shared(reshape2); std::vector fc3_weights = ov::test::utils::generate_float_numbers(outFormShapes[1] * outFormShapes[1], -0.1f, 0.1f); auto fc3 = ngraph::builder::makeFullyConnected(relu1, ngPrc, outFormShapes[1], false, {}, fc3_weights); @@ -154,8 +154,8 @@ void FcBeforeConvTest::SetUp() { auto fc2 = ngraph::builder::makeFullyConnected(fc1, ngPrc, inputShape[1], false, {}, fc2_weights); std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(fc2, reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(fc2, reshapePattern1, false); auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], -0.1f, 0.1f); @@ -164,12 +164,12 @@ void FcBeforeConvTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes = {1, outputChannels * widthAfterConv }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(conv, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape2 = std::make_shared(conv, reshapePattern2, false); function = std::make_shared(reshape2, params, "FcBeforeConvTest"); } @@ -231,8 +231,8 @@ void FcBetweenConvsTest::SetUp() { ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; std::vector conv1InputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, conv1InputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, conv1InputShape); + auto reshape1 = std::make_shared(params[0], reshapePattern1, false); auto filter1Weights = ov::test::utils::generate_float_numbers(outputChannels * conv1InputShape[1] * kernelShape[0] * kernelShape[1], -0.2f, 0.2f); @@ -241,30 +241,30 @@ void FcBetweenConvsTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filter1Weights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filter1Weights); auto widthAfterConv1 = (conv1InputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes1 = {1, outputChannels * widthAfterConv1 }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes1); - auto reshape2 = std::make_shared(conv1, reshapePattern2, false); - auto relu = std::make_shared(reshape2); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes1); + auto reshape2 = std::make_shared(conv1, reshapePattern2, false); + auto relu = std::make_shared(reshape2); auto fc_weights = ov::test::utils::generate_float_numbers(outFormShapes1[1] * outFormShapes1[1], -0.2f, 0.2f); auto fc = ngraph::builder::makeFullyConnected(relu, ngPrc, outFormShapes1[1], false, {}, fc_weights); std::vector conv2InputShape = {1, outputChannels, 1, widthAfterConv1}; - auto reshapePattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, conv2InputShape); - auto reshape3 = std::make_shared(fc, reshapePattern3, false); + auto reshapePattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, conv2InputShape); + auto reshape3 = std::make_shared(fc, reshapePattern3, false); auto filter2Weights = ov::test::utils::generate_float_numbers(outputChannels * conv2InputShape[1], -0.2f, 0.2f); auto conv2 = ngraph::builder::makeConvolution(reshape3, ngPrc, { 1, 1 }, { 1, 1 }, { 0, 0 }, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filter2Weights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filter2Weights); std::vector outFormShapes2 = {1, outputChannels * conv2InputShape[3]}; - auto reshapePattern4 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes2); - auto reshape4 = std::make_shared(conv2, reshapePattern4, false); + auto reshapePattern4 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes2); + auto reshape4 = std::make_shared(conv2, reshapePattern4, false); function = std::make_shared(reshape4, params, "FcBetweenConvsTest"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/first_connect_input_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/first_connect_input_concat.cpp index a819f39d3410fe..6416f82ce5f8fe 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/first_connect_input_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/first_connect_input_concat.cpp @@ -35,10 +35,10 @@ void ConcatFirstInputTest::SetUp() { params.push_back(std::make_shared(ngPrc, ov::Shape(shape))); } auto const_second_param = ngraph::builder::makeConstant(ngPrc, {1, 8}, std::vector{-1.0f}); - auto concat = std::make_shared(ngraph::OutputVector{params[0], const_second_param}, 1); - auto relu = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{params[0], const_second_param}, 1); + auto relu = std::make_shared(concat); - ngraph::ResultVector results{std::make_shared(relu)}; + ngraph::ResultVector results{std::make_shared(relu)}; function = std::make_shared(results, params, "ConcatMultiInput"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/fq_conv_fq_affine.cpp b/src/tests/functional/shared_test_classes/src/subgraph/fq_conv_fq_affine.cpp index ac480f6b567c4a..082e7b382914c6 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/fq_conv_fq_affine.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/fq_conv_fq_affine.cpp @@ -78,26 +78,26 @@ void FqConvFqAffineTest::SetUp() { auto inputFQNode = ngraph::builder::makeFakeQuantize(params[0], ngraph::element::f32, levels[0], std::vector{}, { inputDataMin }, { inputDataMax }, { inputDataMin }, { inputDataMax }); - auto inputFQ = std::dynamic_pointer_cast(inputFQNode); + auto inputFQ = std::dynamic_pointer_cast(inputFQNode); std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(inputFQ, reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(inputFQ, reshapePattern1, false); auto filterWeightsNode = ngraph::builder::makeConstant(ngPrc, {outputChannels, inputChannels, kernelShape[0], kernelShape[1]}, { 1.0f }); auto convLowNode = ngraph::builder::makeConstant(ngraph::element::f32, std::vector{ 1 }, std::vector{inputDataMin}); auto convHighNode = ngraph::builder::makeConstant(ngraph::element::f32, std::vector{ 1 }, std::vector{inputDataMax}); - auto convWeightsFQNode = std::make_shared(filterWeightsNode, + auto convWeightsFQNode = std::make_shared(filterWeightsNode, convLowNode, convHighNode, convLowNode, convHighNode, levels[1]); - auto convWeightsFQ = std::dynamic_pointer_cast(convWeightsFQNode); + auto convWeightsFQ = std::dynamic_pointer_cast(convWeightsFQNode); - auto conv = std::make_shared(reshape1, convWeightsFQ, strides, std::vector{ 0, 0 }, + auto conv = std::make_shared(reshape1, convWeightsFQ, strides, std::vector{ 0, 0 }, std::vector{ 0, 0 }, std::vector{ 1, 1 }, - ngraph::op::PadType::VALID); + ov::op::PadType::VALID); auto biasesWeightsNode = ngraph::builder::makeConstant(ngPrc, {}, std::vector{ 0.0f }); - auto add = std::make_shared(conv, biasesWeightsNode); + auto add = std::make_shared(conv, biasesWeightsNode); auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / strides[1] + 1; auto heightAfterConv = (convInputShape[2] - kernelShape[0]) / strides[0] + 1; @@ -105,26 +105,26 @@ void FqConvFqAffineTest::SetUp() { ngraph::Output nodeBeforeReshape; if (permute) { - auto permuteOrder = std::make_shared(ngraph::element::i64, + auto permuteOrder = std::make_shared(ngraph::element::i64, ngraph::Shape{4}, ngraph::Shape{{0, 3, 2, 1}}); - auto transpose = std::make_shared(add, permuteOrder); + auto transpose = std::make_shared(add, permuteOrder); nodeBeforeReshape = transpose; } else { nodeBeforeReshape = add; } - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(nodeBeforeReshape, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape2 = std::make_shared(nodeBeforeReshape, reshapePattern2, false); auto matMulWeightsNode = ngraph::builder::makeConstant(ngPrc, {outFormShapes[1], outFormShapes[1]}, { 1.0f }); auto matMulLowNode = ngraph::builder::makeConstant(ngraph::element::f32, std::vector{ 1 }, std::vector{inputDataMin}); auto matMulHighNode = ngraph::builder::makeConstant(ngraph::element::f32, std::vector{ 1 }, std::vector{inputDataMax}); - auto matMulWeightsFQNode = std::make_shared(matMulWeightsNode, + auto matMulWeightsFQNode = std::make_shared(matMulWeightsNode, matMulLowNode, matMulHighNode, matMulLowNode, matMulHighNode, levels[1]); - auto matMulWeightsFQ = std::dynamic_pointer_cast(matMulWeightsFQNode); + auto matMulWeightsFQ = std::dynamic_pointer_cast(matMulWeightsFQNode); - auto matmul = std::make_shared(reshape2, matMulWeightsFQ, false, true); + auto matmul = std::make_shared(reshape2, matMulWeightsFQ, false, true); function = std::make_shared(matmul, params, "fqConvfqAffine"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/fq_with_mixed_levels.cpp b/src/tests/functional/shared_test_classes/src/subgraph/fq_with_mixed_levels.cpp index 8599967c535104..02ea5d6fe24d66 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/fq_with_mixed_levels.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/fq_with_mixed_levels.cpp @@ -35,10 +35,10 @@ void FqWithMixedLevelsTest::SetUp() { size_t level1, const std::vector>& data1, size_t level2, const std::vector>& data2, size_t level3, const std::vector>& data3) { - auto sigmoid = std::make_shared(input); + auto sigmoid = std::make_shared(input); auto fake1 = ngraph::builder::makeFakeQuantize(sigmoid, ngPrc, level1, { 1 }, data1[0], data1[1], data1[2], data1[3]); std::vector weights = ov::test::utils::generate_float_numbers(shapes[1][0] * shapes[1][1], weights_min, weights_max); - auto constant = std::make_shared(ngPrc, ngraph::Shape{shapes[1][0], shapes[1][1]}, weights); + auto constant = std::make_shared(ngPrc, ngraph::Shape{shapes[1][0], shapes[1][1]}, weights); auto fake2 = ngraph::builder::makeFakeQuantize(constant, ngPrc, level2, { 1 }, data2[0], data2[1], data2[2], data2[3]); auto matmul = std::make_shared(fake1, fake2, false, true); auto bias = ngraph::builder::makeConstant(ngPrc, std::vector{shapes[0][0], shapes[1][0]}, std::vector{ 1.0 }); @@ -67,7 +67,7 @@ void FqWithMixedLevelsTest::SetUp() { std::numeric_limits::max(), {{ -1.0 }, { 1.0 }, { -1.0 }, { 1.0 }}, std::numeric_limits::max(), {{ -2.5 }, { 2.5 }, { -2.5 }, { 2.5 }}, std::numeric_limits::max(), {{ -5. } , { 5. }, { -5. }, { 5. }}); - auto result = std::make_shared(input); + auto result = std::make_shared(input); function = std::make_shared(ngraph::ResultVector{result}, params, "FqWithMixedLevelsTest"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/handling_orientation_conv.cpp b/src/tests/functional/shared_test_classes/src/subgraph/handling_orientation_conv.cpp index a8d791fe931ed4..0d3ecb60be5025 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/handling_orientation_conv.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/handling_orientation_conv.cpp @@ -26,32 +26,32 @@ namespace SubgraphTestsDefinitions { std::make_shared(ngPrc, ov::Shape{1, 336})}; std::vector outFormShapes1 = { 1, 1, 168, 2 }; std::vector outFormShapes2 = { 1, 336, 1, 1 }; - auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes1); - auto reshape1 = std::make_shared(params[0], pattern1, false); + auto pattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes1); + auto reshape1 = std::make_shared(params[0], pattern1, false); - auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes2); - auto reshape2 = std::make_shared(params[1], pattern2, false); + auto pattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes2); + auto reshape2 = std::make_shared(params[1], pattern2, false); - auto permute1 = std::make_shared(reshape1, - ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 3, 1, 2 })); + auto permute1 = std::make_shared(reshape1, + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 3, 1, 2 })); auto conv1 = ngraph::builder::makeConvolution(permute1, ngPrc, { 1, 8 }, { 1, 1 }, { 0, 0 }, { 0, 0 }, { 1, 1 }, - ngraph::op::PadType::VALID, 12); + ov::op::PadType::VALID, 12); - auto permute2 = std::make_shared(conv1, - ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 2, 3, 1 })); + auto permute2 = std::make_shared(conv1, + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{ 4 }, { 0, 2, 3, 1 })); auto conv2 = ngraph::builder::makeConvolution(reshape2, ngPrc, { 1, 1 }, { 1, 1 }, { 0, 0 }, { 0, 0 }, { 1, 1 }, - ngraph::op::PadType::VALID, 336); + ov::op::PadType::VALID, 336); std::vector outFormShapes3 = { 1, 1932 }; std::vector outFormShapes4 = { 1, 336 }; - auto pattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes3); - auto pattern4 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes4); - auto reshape3 = std::make_shared(permute2, pattern3, false); - auto reshape4 = std::make_shared(conv2, pattern4, false); - ngraph::ResultVector results{ std::make_shared(reshape3), - std::make_shared(reshape4)}; + auto pattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes3); + auto pattern4 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes4); + auto reshape3 = std::make_shared(permute2, pattern3, false); + auto reshape4 = std::make_shared(conv2, pattern4, false); + ngraph::ResultVector results{ std::make_shared(reshape3), + std::make_shared(reshape4)}; function = std::make_shared(results, params, "RemovePermutationPass"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/input_conv.cpp b/src/tests/functional/shared_test_classes/src/subgraph/input_conv.cpp index 6ec9cb30bc7791..58381184679c25 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/input_conv.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/input_conv.cpp @@ -86,7 +86,7 @@ void InputConvTest::SetUp() { {0, 0}, {0, 0}, {1, 1}, - ngraph::op::PadType::VALID, + ov::op::PadType::VALID, outputChannels, true, generateWeights(outputChannels, kernelShape[1])); @@ -94,13 +94,13 @@ void InputConvTest::SetUp() { if (addReshape) { size_t numOutputWidth = (((inputShape[1] * inputShape[2] * inputShape[3] - kernelShape[1] * kernelShape[0]) / (inputShape[1] * stride)) + 1); std::vector outFormShapes0 = { 1, outputChannels * numOutputWidth }; - auto pattern0 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes0); - auto reshape0 = std::make_shared(conv0, pattern0, false); + auto pattern0 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes0); + auto reshape0 = std::make_shared(conv0, pattern0, false); - ngraph::ResultVector results{ std::make_shared(reshape0) }; + ngraph::ResultVector results{ std::make_shared(reshape0) }; function = std::make_shared(results, params, "InputConvTest"); } else { - ngraph::ResultVector results{ std::make_shared(conv0) }; + ngraph::ResultVector results{ std::make_shared(conv0) }; function = std::make_shared(results, params, "InputConvTest"); } } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/input_split_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/input_split_concat.cpp index 08202b7a982d95..ffc2dc218da338 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/input_split_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/input_split_concat.cpp @@ -36,14 +36,14 @@ void InputSplitConcatTest::SetUp() { auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); auto split = std::make_shared(params[0], split_axis_op, 2); - auto relu1 = std::make_shared(split->output(0)); + auto relu1 = std::make_shared(split->output(0)); auto const_vals = ov::test::utils::generate_float_numbers(inputShape[1], -5.0f, 5.0f); auto constant = ngraph::builder::makeConstant(ngPrc, inputShape, const_vals); - auto concat = std::make_shared(ngraph::OutputVector{constant, split->output(1)}, 1); - auto relu2 = std::make_shared(concat); + auto concat = std::make_shared(ngraph::OutputVector{constant, split->output(1)}, 1); + auto relu2 = std::make_shared(concat); - ngraph::ResultVector results{ std::make_shared(relu1), std::make_shared(relu2) }; + ngraph::ResultVector results{ std::make_shared(relu1), std::make_shared(relu2) }; function = std::make_shared(results, params, "InputSplitConcatTest"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/matmul_act_add.cpp b/src/tests/functional/shared_test_classes/src/subgraph/matmul_act_add.cpp index 4b33f924169e59..adc4de27a10e49 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/matmul_act_add.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/matmul_act_add.cpp @@ -37,11 +37,11 @@ void MatMulActAddTest::SetUp() { auto mul_const = ngraph::builder::makeConstant(ngPrc, { outFormShapes[1], inputSize }, ov::test::utils::generate_float_numbers(outFormShapes[1] * inputSize, -0.5f, 0.5f), false); - auto matmul = std::make_shared(params[0], mul_const, false, true); + auto matmul = std::make_shared(params[0], mul_const, false, true); - auto tanh = std::make_shared(matmul); - auto eltw = std::make_shared(matmul, tanh); - auto res = std::make_shared(eltw); + auto tanh = std::make_shared(matmul); + auto eltw = std::make_shared(matmul, tanh); + auto res = std::make_shared(eltw); function = std::make_shared(res, params, "MatMul_Act_Add"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/matmul_squeeze_add.cpp b/src/tests/functional/shared_test_classes/src/subgraph/matmul_squeeze_add.cpp index 01b628d63cf8fd..098d33a76226df 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/matmul_squeeze_add.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/matmul_squeeze_add.cpp @@ -45,24 +45,24 @@ void MatmulSqueezeAddTest::SetUp() { {outputSize, inputShape[1]}, ov::test::utils::generate_float_numbers(outputSize * inputShape[1], 0, 1, seed), false); - auto matmul_0 = std::make_shared(params[0], constant_0, false, true); + auto matmul_0 = std::make_shared(params[0], constant_0, false, true); auto constant_1 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); - auto unsqueeze_0 = std::make_shared(matmul_0, constant_1); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); + auto unsqueeze_0 = std::make_shared(matmul_0, constant_1); auto constant_2 = ngraph::builder::makeConstant( element_type, {1, inputShape[0], outputSize}, ov::test::utils::generate_float_numbers(inputShape[0] * outputSize, 0, 1, seed), false); - auto add_0 = std::make_shared(unsqueeze_0, constant_2); + auto add_0 = std::make_shared(unsqueeze_0, constant_2); auto constant_3 = - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); - auto squeeze_0 = std::make_shared(add_0, constant_3); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); + auto squeeze_0 = std::make_shared(add_0, constant_3); - ngraph::ResultVector results{std::make_shared(squeeze_0)}; + ngraph::ResultVector results{std::make_shared(squeeze_0)}; function = std::make_shared(results, params, "MatmulSqueezeAddTest"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp b/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp index 65d50b50aaaab3..28a8880fd4dfff 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/memory_LSTMCell.cpp @@ -13,7 +13,6 @@ #include "ov_models/builders.hpp" using namespace ngraph; -using namespace opset7; namespace ov { namespace test { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/memory_eltwise_reshape_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/memory_eltwise_reshape_concat.cpp index 314fc9bac749d1..bd2a0177ef20e8 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/memory_eltwise_reshape_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/memory_eltwise_reshape_concat.cpp @@ -55,18 +55,18 @@ void MemoryEltwiseReshapeConcatTest::initTestModel() { auto memory_constant = ngraph::builder::makeConstant(ngPrc, input_dims, memory_init); memory_constant->set_friendly_name("memory_constant"); - auto memory_read = std::make_shared(memory_constant, "memory"); + auto memory_read = std::make_shared(memory_constant, "memory"); memory_read->set_friendly_name("memory_read"); auto mul = ngraph::builder::makeEltwise(input_parameter[0], memory_read, ngraph::helpers::EltwiseTypes::MULTIPLY); mul->set_friendly_name("multiplication"); - auto memory_write = std::make_shared(mul, "memory"); + auto memory_write = std::make_shared(mul, "memory"); memory_write->set_friendly_name("memory_write"); - auto reshape_1_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{2}, std::vector({inputSize, concatSize})); + auto reshape_1_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{2}, std::vector({inputSize, concatSize})); reshape_1_pattern->set_friendly_name("reshape_pattern"); - auto reshape_1 = std::make_shared(mul, reshape_1_pattern, false); + auto reshape_1 = std::make_shared(mul, reshape_1_pattern, false); reshape_1->set_friendly_name("reshape"); auto concat_constant = ngraph::builder::makeConstant(ngPrc, {1, concatSize}, concat_vals); @@ -77,9 +77,9 @@ void MemoryEltwiseReshapeConcatTest::initTestModel() { memory_write->add_control_dependency(memory_read); concat->add_control_dependency(memory_write); - auto final_reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{4}, + auto final_reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{4}, std::vector({1, 1, inputSize + 1, concatSize})); - auto final_reshape = std::make_shared(concat, final_reshape_pattern, false); + auto final_reshape = std::make_shared(concat, final_reshape_pattern, false); function = std::make_shared(final_reshape, input_parameter, "memory_multiply_reshape_concat"); } @@ -94,14 +94,14 @@ void MemoryEltwiseReshapeConcatTest::initNgraphFriendlyModel() { auto mul = ngraph::builder::makeEltwise(input_parameter[0], memory_constant, ngraph::helpers::EltwiseTypes::MULTIPLY); mul->set_friendly_name("multiplication"); - auto reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{3}, std::vector({1, inputSize, concatSize})); + auto reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{3}, std::vector({1, inputSize, concatSize})); reshape_pattern->set_friendly_name("reshape_pattern"); - auto reshape = std::make_shared(mul, reshape_pattern, false); + auto reshape = std::make_shared(mul, reshape_pattern, false); reshape->set_friendly_name("reshape"); - auto squeeze_const = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 0); + auto squeeze_const = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 0); squeeze_const->set_friendly_name("squeeze_const"); - auto squeeze = std::make_shared(reshape, squeeze_const); + auto squeeze = std::make_shared(reshape, squeeze_const); squeeze->set_friendly_name("squeeze"); auto concat_constant = ngraph::builder::makeConstant(ngPrc, {1, concatSize}, concat_vals); diff --git a/src/tests/functional/shared_test_classes/src/subgraph/memory_fq_concat_prelu.cpp b/src/tests/functional/shared_test_classes/src/subgraph/memory_fq_concat_prelu.cpp index 4382efcd8491c6..f23a1fca68166c 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/memory_fq_concat_prelu.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/memory_fq_concat_prelu.cpp @@ -98,7 +98,7 @@ void MemoryFqConcatPrelu::SetUp() { input.push_back(std::make_shared(ngPrc, ov::Shape(shape))); } auto memory_read = ngraph::builder::makeConstant(ngPrc, {inputs[0]}, {0}); - auto read = std::make_shared(memory_read, "variable1"); + auto read = std::make_shared(memory_read, "variable1"); auto fake_constatnt = ngraph::builder::makeConstant(ngPrc, {inputs[0]}, {0}); auto fake = ngraph::builder::makeFakeQuantize(fake_constatnt, ngPrc, std::get<0>(fake_quantize_params), @@ -108,8 +108,8 @@ void MemoryFqConcatPrelu::SetUp() { std::get<4>(fake_quantize_params), std::get<5>(fake_quantize_params)); auto concat = std::make_shared(ov::OutputVector{read, fake, input[0]}, 1); - auto prelu_constant = ngraph::op::Constant::create(ngPrc, {1}, {-2}); - auto prelu = std::make_shared(concat, prelu_constant); + auto prelu_constant = ov::op::v0::Constant::create(ngPrc, {1}, {-2}); + auto prelu = std::make_shared(concat, prelu_constant); auto begin = std::get<0>(strided_slice_params); auto end = std::get<1>(strided_slice_params); @@ -130,8 +130,8 @@ void MemoryFqConcatPrelu::SetUp() { std::vector{}, std::vector{}); - auto assign = std::make_shared(slice, "variable1"); - auto result = std::make_shared(prelu); + auto assign = std::make_shared(slice, "variable1"); + auto result = std::make_shared(prelu); assign->add_control_dependency(read); result->add_control_dependency(assign); function = std::make_shared(ngraph::ResultVector{result}, input, "memory_fq_concat_prelu"); diff --git a/src/tests/functional/shared_test_classes/src/subgraph/multi_crops_to_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/multi_crops_to_concat.cpp index e77ec44724b992..05daf1dfcf54ab 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/multi_crops_to_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/multi_crops_to_concat.cpp @@ -68,7 +68,7 @@ void MultiCropsToConcatTest::SetUp() { std::vector{ 0, 0 }, std::vector{ 0, 0 }); - auto concat1 = std::make_shared(ngraph::OutputVector{crop1, crop2}, 1); + auto concat1 = std::make_shared(ngraph::OutputVector{crop1, crop2}, 1); std::shared_ptr result; // Case with 3 crops @@ -87,10 +87,10 @@ void MultiCropsToConcatTest::SetUp() { std::vector{ 0, 0 }, std::vector{ 0, 0 }); - auto concat2 = std::make_shared(ngraph::OutputVector{crop1, crop2}, 1); - result = std::make_shared(concat2); + auto concat2 = std::make_shared(ngraph::OutputVector{crop1, crop2}, 1); + result = std::make_shared(concat2); } else { - result = std::make_shared(concat1); + result = std::make_shared(concat1); } function = std::make_shared(result, params, "InputSplitConcatTest"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/multi_input_scale.cpp b/src/tests/functional/shared_test_classes/src/subgraph/multi_input_scale.cpp index e66bb94703f230..094f487ca738b9 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/multi_input_scale.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/multi_input_scale.cpp @@ -46,7 +46,7 @@ void MultipleInputScaleTest::SetUp() { auto add = ngraph::builder::makeEltwise(fc1, fc2, ngraph::helpers::EltwiseTypes::ADD); - auto result = std::make_shared(add); + auto result = std::make_shared(add); function = std::make_shared(result, input, "multiple_input_scale"); functionRefs = ngraph::clone_function(*function); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/multioutput_eltwise_squeeze_eltwise.cpp b/src/tests/functional/shared_test_classes/src/subgraph/multioutput_eltwise_squeeze_eltwise.cpp index 3404f0a4597d22..0e6c6fd03e6037 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/multioutput_eltwise_squeeze_eltwise.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/multioutput_eltwise_squeeze_eltwise.cpp @@ -36,7 +36,7 @@ namespace SubgraphTestsDefinitions { auto eltwise_const = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{input[0]->get_shape()}, std::vector{-1.0f}); - auto eltwise = std::make_shared(input[0], eltwise_const); + auto eltwise = std::make_shared(input[0], eltwise_const); auto squeeze_constant = std::make_shared(ov::element::i64, ov::Shape{1}, std::vector{0}); auto squeeze = std::make_shared(eltwise, squeeze_constant); @@ -45,10 +45,10 @@ namespace SubgraphTestsDefinitions { auto eltwise_const2 = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{1}, std::vector{1.01f}); auto eltwise_const3 = ngraph::builder::makeConstant(ngPrc, ngraph::Shape{1}, std::vector{1.01f}); - auto eltwise2 = std::make_shared(eltwise, eltwise_const2); - auto eltwise3 = std::make_shared(unsqueeze, eltwise_const3); - ngraph::ResultVector results{std::make_shared(eltwise2), - std::make_shared(eltwise3)}; + auto eltwise2 = std::make_shared(eltwise, eltwise_const2); + auto eltwise3 = std::make_shared(unsqueeze, eltwise_const3); + ngraph::ResultVector results{std::make_shared(eltwise2), + std::make_shared(eltwise3)}; function = std::make_shared(results, input, "eltwise_reshape_eltwise_multioutput"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/multiple_connect_split_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/multiple_connect_split_concat.cpp index d416ee03fdc5aa..01794994ce83d2 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/multiple_connect_split_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/multiple_connect_split_concat.cpp @@ -26,18 +26,18 @@ void MultipleConnectSplitConcatTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape{1, 256})}; - auto relu_start = std::make_shared(params[0]); + auto relu_start = std::make_shared(params[0]); auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); auto split = std::make_shared(relu_start, split_axis_op, 1); - auto concat = std::make_shared(ngraph::OutputVector{split->output(0), split->output(0)}, 1); - auto concat_2 = std::make_shared(ngraph::OutputVector{split->output(0), split->output(0)}, + auto concat = std::make_shared(ngraph::OutputVector{split->output(0), split->output(0)}, 1); + auto concat_2 = std::make_shared(ngraph::OutputVector{split->output(0), split->output(0)}, 1); - auto relu = std::make_shared(concat); - auto relu_2 = std::make_shared(concat_2); + auto relu = std::make_shared(concat); + auto relu_2 = std::make_shared(concat_2); ngraph::ResultVector resultVector{ - std::make_shared(relu), - std::make_shared(relu_2) + std::make_shared(relu), + std::make_shared(relu_2) }; function = std::make_shared(resultVector, params, "Multiple_connection_split_concat"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/multiple_input_fq.cpp b/src/tests/functional/shared_test_classes/src/subgraph/multiple_input_fq.cpp index cf1d06993e46f0..cfee87e744a466 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/multiple_input_fq.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/multiple_input_fq.cpp @@ -58,7 +58,7 @@ void MultipleInputTest::SetUp() { auto fake_add4 = ngraph::builder::makeFakeQuantize(add4, ngPrc, std::numeric_limits::max(), { 1 }, { 5 * minInput }, { 5 * maxInput }, { 5 * minInput }, { 5 * maxInput }); - auto result = std::make_shared(fake_add4); + auto result = std::make_shared(fake_add4); function = std::make_shared(ngraph::ResultVector{result}, input, "multiple_input"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/negative_memory_layer_offset.cpp b/src/tests/functional/shared_test_classes/src/subgraph/negative_memory_layer_offset.cpp index bc276d28dbd5d3..7ed5349556274c 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/negative_memory_layer_offset.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/negative_memory_layer_offset.cpp @@ -36,14 +36,14 @@ namespace SubgraphTestsDefinitions { memory_init.emplace_back(static_cast(dist(gen))); ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape{1, inputSize})}; - auto mem_c = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_init); - auto mem_r = std::make_shared(mem_c, "memory"); + auto mem_c = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_init); + auto mem_r = std::make_shared(mem_c, "memory"); // Use memory layer as the second input of 'concat' to get negative offset - auto concat = std::make_shared(ngraph::OutputVector{ input[0], mem_r }, 1); + auto concat = std::make_shared(ngraph::OutputVector{ input[0], mem_r }, 1); auto split = ngraph::builder::makeVariadicSplit(concat, { hiddenSize, inputSize }, 1); - auto mem_w = std::make_shared(split->output(0), "memory"); - auto sigm = std::make_shared(split->output(1)); + auto mem_w = std::make_shared(split->output(0), "memory"); + auto sigm = std::make_shared(split->output(1)); mem_w->add_control_dependency(mem_r); sigm->add_control_dependency(mem_w); @@ -58,10 +58,10 @@ namespace SubgraphTestsDefinitions { std::tie(netPrecision, targetDevice, inputSize, hiddenSize, std::ignore) = this->GetParam(); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector input{std::make_shared(ngPrc, ov::Shape{1, inputSize})}; - auto mem_c = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_init); - auto concat = std::make_shared(ngraph::OutputVector{ input[0], mem_c }, 1); + auto mem_c = std::make_shared(ngPrc, ngraph::Shape{ 1, hiddenSize }, memory_init); + auto concat = std::make_shared(ngraph::OutputVector{ input[0], mem_c }, 1); auto split = ngraph::builder::makeVariadicSplit(concat, { hiddenSize, inputSize }, 1); - auto sigm = std::make_shared(split->output(1)); + auto sigm = std::make_shared(split->output(1)); function = std::make_shared(sigm, input, "negative_memory_layer_offset_nonmemory"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/parameter_reshape_result.cpp b/src/tests/functional/shared_test_classes/src/subgraph/parameter_reshape_result.cpp index ce18ea90f050c9..7504596faf75a4 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/parameter_reshape_result.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/parameter_reshape_result.cpp @@ -35,9 +35,9 @@ void ParamReshapeResult::SetUp() { auto shape = inputShape; shape[shape.size() - 2] *= 2; shape[shape.size() - 1] /= 2; - auto reshape_const = std::make_shared(ngraph::element::Type_t::i64, + auto reshape_const = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shape.size()}, shape); - auto reshape = std::make_shared(params[0], reshape_const, false); + auto reshape = std::make_shared(params[0], reshape_const, false); function = std::make_shared(reshape, params, "ParamReshapeResult"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp b/src/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp index e11bc877cd4605..1bf29f54c76b1a 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp @@ -23,8 +23,8 @@ std::string ParameterResultSubgraphTestBase::getTestCaseName(const testing::Test } std::shared_ptr ParameterResultSubgraphTestBase::createModel(const ov::PartialShape& shape) { - auto parameter = std::make_shared(ov::element::f32, shape); - const ngraph::ResultVector results{std::make_shared(parameter)}; + auto parameter = std::make_shared(ov::element::f32, shape); + const ngraph::ResultVector results{std::make_shared(parameter)}; ngraph::ParameterVector params = {parameter}; auto model = std::make_shared(results, params, "ParameterResult"); return model; diff --git a/src/tests/functional/shared_test_classes/src/subgraph/parameter_shapeof_result.cpp b/src/tests/functional/shared_test_classes/src/subgraph/parameter_shapeof_result.cpp index 3a4b591b4f447b..c68222efcf659b 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/parameter_shapeof_result.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/parameter_shapeof_result.cpp @@ -24,9 +24,9 @@ void ParameterShapeOfResultSubgraphTest::SetUp() { std::tie(inType, targetDevice) = this->GetParam(); inPrc = InferenceEngine::details::convertPrecision(inType); - const auto parameter = std::make_shared(inType, ngraph::Shape{1, 3, 10, 10}); - const auto shapeOf = std::make_shared(parameter); - const ngraph::ResultVector results{std::make_shared(shapeOf)}; + const auto parameter = std::make_shared(inType, ngraph::Shape{1, 3, 10, 10}); + const auto shapeOf = std::make_shared(parameter); + const ngraph::ResultVector results{std::make_shared(shapeOf)}; ngraph::ParameterVector params = {parameter}; function = std::make_shared(results, params, "ParameterShapeOfResult"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_concat_permute.cpp b/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_concat_permute.cpp index 8f0c34ec088df8..1c2f28ea17618b 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_concat_permute.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_concat_permute.cpp @@ -33,19 +33,19 @@ void PermuteConcatConcatPermute::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(net_precision); - auto input_param = std::make_shared(ngPrc, ngraph::Shape{input_shape}); + auto input_param = std::make_shared(ngPrc, ngraph::Shape{input_shape}); std::vector permute_param = {1, 0}; auto permute_params = - ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape{permute_param.size()}, permute_param); - auto permute_1 = std::make_shared(input_param, permute_params); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{permute_param.size()}, permute_param); + auto permute_1 = std::make_shared(input_param, permute_params); auto const_input_1 = CreateConst(input_shape, ngPrc, false); - auto concat_1 = std::make_shared(ngraph::OutputVector{const_input_1, permute_1}, 0); + auto concat_1 = std::make_shared(ngraph::OutputVector{const_input_1, permute_1}, 0); auto const_input_2 = CreateConst(input_shape, ngPrc, true); - auto concat_2 = std::make_shared(ngraph::OutputVector{concat_1, const_input_2}, 0); + auto concat_2 = std::make_shared(ngraph::OutputVector{concat_1, const_input_2}, 0); - auto permute_2 = std::make_shared(concat_2, permute_params); + auto permute_2 = std::make_shared(concat_2, permute_params); function = std::make_shared(permute_2, ngraph::ParameterVector{input_param}, @@ -53,7 +53,7 @@ void PermuteConcatConcatPermute::SetUp() { range_ = InferenceEngine::details::product(input_shape); } -std::shared_ptr PermuteConcatConcatPermute::CreateConst( +std::shared_ptr PermuteConcatConcatPermute::CreateConst( const std::vector& input_shape, const ::ngraph::element::Type& precision, bool use_1_as_first_dimension) { @@ -75,7 +75,7 @@ std::shared_ptr PermuteConcatConcatPermute::CreateCons const auto const_input_shape = ngraph::Shape{const_input_shape_vec}; auto const_input_values_size = InferenceEngine::details::product(const_input_shape_vec); auto const_input_values = std::vector(const_input_values_size, 0); - return ngraph::opset9::Constant::create(precision, const_input_shape, const_input_values); + return ov::op::v0::Constant::create(precision, const_input_shape, const_input_values); } void PermuteConcatConcatPermute::Validate() { diff --git a/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_permute.cpp b/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_permute.cpp index a469c997608a0f..1066e19bba4763 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_permute.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/permute_concat_permute.cpp @@ -38,11 +38,11 @@ void PermuteConcatPermute::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); - auto input_param = std::make_shared(ngPrc, ngraph::Shape{input_shape}); + auto input_param = std::make_shared(ngPrc, ngraph::Shape{input_shape}); auto permute_params_1 = - ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape{permute_1_param.size()}, permute_1_param); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{permute_1_param.size()}, permute_1_param); - auto permute_1 = std::make_shared(input_param, permute_params_1); + auto permute_1 = std::make_shared(input_param, permute_params_1); auto const_input_shape_vec = std::vector{1}; const_input_shape_vec.insert(const_input_shape_vec.end(), input_shape.begin(), std::prev(input_shape.end())); @@ -50,16 +50,16 @@ void PermuteConcatPermute::SetUp() { auto const_input_values_size = InferenceEngine::details::product(const_input_shape_vec); auto const_input_values = std::vector(const_input_values_size, 0); - auto const_input_1 = ngraph::opset9::Constant::create(ngPrc, constinput_shape, const_input_values); - auto const_input_2 = ngraph::opset9::Constant::create(ngPrc, constinput_shape, const_input_values); - auto const_input_3 = ngraph::opset9::Constant::create(ngPrc, constinput_shape, const_input_values); + auto const_input_1 = ov::op::v0::Constant::create(ngPrc, constinput_shape, const_input_values); + auto const_input_2 = ov::op::v0::Constant::create(ngPrc, constinput_shape, const_input_values); + auto const_input_3 = ov::op::v0::Constant::create(ngPrc, constinput_shape, const_input_values); - auto concat = std::make_shared( + auto concat = std::make_shared( ngraph::OutputVector{const_input_1, const_input_2, permute_1, const_input_3}, 0); auto permute_params_2 = - ngraph::opset9::Constant::create(ngraph::element::i64, ngraph::Shape{permute_2_param.size()}, permute_2_param); - auto permute_2 = std::make_shared(concat, permute_params_2); + ov::op::v0::Constant::create(ngraph::element::i64, ngraph::Shape{permute_2_param.size()}, permute_2_param); + auto permute_2 = std::make_shared(concat, permute_params_2); function = std::make_shared(permute_2, ngraph::ParameterVector{input_param}, "permute_concat_permute"); diff --git a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp index 0e48dba65c233d..a210c2091e7652 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp @@ -15,7 +15,7 @@ std::string QuantConvBackpropDataLayerTest::getTestCaseName(const testing::TestP ov::Shape inputShapes; std::string targetDevice; std::tie(groupConvBackpropDataParams, element_type, inputShapes, targetDevice) = obj.param; - ngraph::op::PadType padType; + ov::op::PadType padType; ov::Shape kernel, stride, dilation; std::vector padBegin, padEnd; size_t convOutChannels; diff --git a/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution_backprop_data.cpp b/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution_backprop_data.cpp index a97952389171a2..3c5134b0dbdd6d 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution_backprop_data.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution_backprop_data.cpp @@ -76,7 +76,7 @@ void QuantGroupConvBackpropDataLayerTest::SetUp() { auto weightsFq = ngraph::builder::makeFakeQuantize(weightsNode, element_type, quantLevels, weightsFqConstShapes); - auto groupConvBackpropData = std::dynamic_pointer_cast( + auto groupConvBackpropData = std::dynamic_pointer_cast( ngraph::builder::makeGroupConvolutionBackpropData(dataFq, weightsFq, element_type, stride, padBegin, padEnd, dilation, padType)); ov::ResultVector results{std::make_shared(groupConvBackpropData)}; diff --git a/src/tests/functional/shared_test_classes/src/subgraph/reduce_eltwise.cpp b/src/tests/functional/shared_test_classes/src/subgraph/reduce_eltwise.cpp index 4c8dbd44e041b8..ca4bec6cacdd64 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/reduce_eltwise.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/reduce_eltwise.cpp @@ -50,16 +50,16 @@ void ReduceEltwiseTest::SetUp() { FAIL() << "Reduce op doesn't support operation type: " << opType; } auto reductionAxesNode = std::dynamic_pointer_cast( - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape(shapeAxes), axes)); + std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape(shapeAxes), axes)); - auto reduce = std::make_shared(params[0], reductionAxesNode, keepDims); + auto reduce = std::make_shared(params[0], reductionAxesNode, keepDims); std::vector constShape(reduce.get()->get_output_partial_shape(0).rank().get_length(), 1); ASSERT_GT(constShape.size(), 2); constShape[2] = inputShape.back(); auto constant = ngraph::builder::makeConstant(ngPrc, constShape, {}, true); auto eltw = ngraph::builder::makeEltwise(reduce, constant, ngraph::helpers::EltwiseTypes::MULTIPLY); - ngraph::ResultVector results{std::make_shared(eltw)}; + ngraph::ResultVector results{std::make_shared(eltw)}; function = std::make_shared(results, params, "ReduceEltwise"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/relu_split_reshape.cpp b/src/tests/functional/shared_test_classes/src/subgraph/relu_split_reshape.cpp index c14250547f4de8..1779f62ec1ff44 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/relu_split_reshape.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/relu_split_reshape.cpp @@ -35,7 +35,7 @@ void ReluSplitReshape::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto relu = std::make_shared(params[0]); + auto relu = std::make_shared(params[0]); auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{static_cast(splitAxis)}); auto split = std::make_shared(relu, split_axis_op, splitNum); @@ -43,9 +43,9 @@ void ReluSplitReshape::SetUp() { auto shape = split->get_output_shape(0); shape[shape.size() - 2] *= 2; shape[shape.size() - 1] /= 2; - auto reshape_const = std::make_shared(ngraph::element::Type_t::i64, + auto reshape_const = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{shape.size()}, shape); - auto reshape = std::make_shared(split->output(0), reshape_const, false); + auto reshape = std::make_shared(split->output(0), reshape_const, false); function = std::make_shared(reshape, params, "ReluSplitReshape"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/reshape_permute_reshape.cpp b/src/tests/functional/shared_test_classes/src/subgraph/reshape_permute_reshape.cpp index 541bbcbd7ba08d..36ea849035635c 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/reshape_permute_reshape.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/reshape_permute_reshape.cpp @@ -27,18 +27,18 @@ namespace SubgraphTestsDefinitions { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); std::vector shape_input{1, input_dim}; ov::ParameterVector input {std::make_shared(ngPrc, ov::Shape(shape_input))}; - auto reshape1_pattern = std::make_shared(ngraph::element::i64, + auto reshape1_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{inputs[0].size()}, inputs[0]); - auto reshape1 = std::make_shared(input[0], reshape1_pattern, false); - auto permute_params = std::make_shared(ngraph::element::i64, + auto reshape1 = std::make_shared(input[0], reshape1_pattern, false); + auto permute_params = std::make_shared(ngraph::element::i64, ngraph::Shape{inputs[1].size()}, inputs[1]); - auto permute = std::make_shared(reshape1, permute_params); - auto reshape2_pattern = std::make_shared(ngraph::element::i64, + auto permute = std::make_shared(reshape1, permute_params); + auto reshape2_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{2}, std::vector{1, input_dim}); - auto reshape2 = std::make_shared(permute, reshape2_pattern, false); + auto reshape2 = std::make_shared(permute, reshape2_pattern, false); function = std::make_shared(reshape2, input, "reshape_permute_reshape"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/scale_shift.cpp b/src/tests/functional/shared_test_classes/src/subgraph/scale_shift.cpp index 45109a87d2349c..87c3ec6a3c53f5 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/scale_shift.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/scale_shift.cpp @@ -32,10 +32,10 @@ namespace SubgraphTestsDefinitions { paramsShape = ngraph::Shape(inputShapes[1]); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector paramsIn{std::make_shared(ngPrc, ov::Shape(inputShapes[0]))}; - auto mul_const = std::make_shared(ngPrc, paramsShape, scale); - auto mul = std::make_shared(paramsIn[0], mul_const); - auto add_const = std::make_shared(ngPrc, paramsShape, shift); - auto add = std::make_shared(mul, add_const); + auto mul_const = std::make_shared(ngPrc, paramsShape, scale); + auto mul = std::make_shared(paramsIn[0], mul_const); + auto add_const = std::make_shared(ngPrc, paramsShape, shift); + auto add = std::make_shared(mul, add_const); function = std::make_shared(add, paramsIn, "scale_shift"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/scaleshift_conv_scaleshift.cpp b/src/tests/functional/shared_test_classes/src/subgraph/scaleshift_conv_scaleshift.cpp index a03023d57cfbe5..b92caa7bfb06f6 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/scaleshift_conv_scaleshift.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/scaleshift_conv_scaleshift.cpp @@ -64,8 +64,8 @@ void ScaleShiftAfterConvTest::SetUp() { ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(params[0], reshapePattern1, false); auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], -0.2f, 0.2f); @@ -74,24 +74,24 @@ void ScaleShiftAfterConvTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes = { 1, outputChannels * widthAfterConv, 1, 1 }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes); - auto reshape2 = std::make_shared(conv, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, outFormShapes); + auto reshape2 = std::make_shared(conv, reshapePattern2, false); auto scale = ov::test::utils::generate_float_numbers(outputChannels * widthAfterConv, -2.0f, 2.0f); auto shift = ov::test::utils::generate_float_numbers(outputChannels * widthAfterConv, -2.0f, 2.0f); - auto mul_const = std::make_shared(ngPrc, outFormShapes, scale); - auto mul = std::make_shared(reshape2, mul_const); - auto add_const = std::make_shared(ngPrc, outFormShapes, shift); - auto add = std::make_shared(mul, add_const); + auto mul_const = std::make_shared(ngPrc, outFormShapes, scale); + auto mul = std::make_shared(reshape2, mul_const); + auto add_const = std::make_shared(ngPrc, outFormShapes, shift); + auto add = std::make_shared(mul, add_const); outFormShapes = {1, outputChannels * widthAfterConv }; - auto reshapePattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape3 = std::make_shared(add, reshapePattern3, false); + auto reshapePattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape3 = std::make_shared(add, reshapePattern3, false); function = std::make_shared(mul, params, "ScaleShiftAfterConvTest"); } @@ -153,19 +153,19 @@ void ScaleShiftBeforeConvTest::SetUp() { ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; std::vector convInputShape = {1, inputShape[1], 1, 1}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(params[0], reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(params[0], reshapePattern1, false); auto scale = ov::test::utils::generate_float_numbers(convInputShape[1], -2.0f, 2.0f); auto shift = ov::test::utils::generate_float_numbers(convInputShape[1], -2.0f, 2.0f); - auto mul_const = std::make_shared(ngPrc, convInputShape, scale); - auto mul = std::make_shared(reshape1, mul_const); - auto add_const = std::make_shared(ngPrc, convInputShape, shift); - auto add = std::make_shared(mul, add_const); + auto mul_const = std::make_shared(ngPrc, convInputShape, scale); + auto mul = std::make_shared(reshape1, mul_const); + auto add_const = std::make_shared(ngPrc, convInputShape, shift); + auto add = std::make_shared(mul, add_const); convInputShape = {1, inputChannels, 1, inputShape[1] / inputChannels}; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape2 = std::make_shared(mul, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape2 = std::make_shared(mul, reshapePattern2, false); auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], -0.1f, 0.1f); @@ -174,12 +174,12 @@ void ScaleShiftBeforeConvTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); auto widthAfterReshape = (convInputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes = {1, outputChannels * widthAfterReshape }; - auto reshapePattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape3 = std::make_shared(conv, reshapePattern3, false); + auto reshapePattern3 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape3 = std::make_shared(conv, reshapePattern3, false); function = std::make_shared(reshape3, params, "ScaleShiftBeforeConvTest"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/simple_if.cpp b/src/tests/functional/shared_test_classes/src/subgraph/simple_if.cpp index 7d2fc5a1d9c180..20c51a963ac754 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/simple_if.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/simple_if.cpp @@ -175,10 +175,10 @@ void SimpleIfNotConstConditionTest::generate_inputs(const std::vector auto* dataPtr = tensor.data(); dataPtr[0] = condition; } else { - tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), - targetInputStaticShapes[i], - 10, - -5); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -5; + in_data.range = 10; + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], in_data); } inputs.insert({funcInput.get_node_shared_ptr(), tensor}); diff --git a/src/tests/functional/shared_test_classes/src/subgraph/softsign.cpp b/src/tests/functional/shared_test_classes/src/subgraph/softsign.cpp index c24296d44dd017..efd77ea28a09f1 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/softsign.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/softsign.cpp @@ -36,16 +36,16 @@ void SoftsignTest::SetUp() { ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto abs = std::make_shared(params[0]); + auto abs = std::make_shared(params[0]); - auto const_1 = ngraph::opset1::Constant::create(ngPrc, ngraph::Shape{}, {1}); - auto const_neg_1 = ngraph::opset1::Constant::create(ngPrc, ngraph::Shape{}, {-1}); + auto const_1 = ov::op::v0::Constant::create(ngPrc, ngraph::Shape{}, {1}); + auto const_neg_1 = ov::op::v0::Constant::create(ngPrc, ngraph::Shape{}, {-1}); - auto add = std::make_shared(abs, const_1); - auto power = std::make_shared(add, const_neg_1); + auto add = std::make_shared(abs, const_1); + auto power = std::make_shared(add, const_neg_1); - auto mul = std::make_shared(power, params[0]); - ngraph::ResultVector results{ std::make_shared(mul) }; + auto mul = std::make_shared(power, params[0]); + ngraph::ResultVector results{ std::make_shared(mul) }; function = std::make_shared(results, params, "SoftSignTest"); } @@ -66,14 +66,14 @@ std::shared_ptr SoftsignTest::GenerateNgraphFriendlySoftSign() auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto abs = std::make_shared(params[0]); + auto abs = std::make_shared(params[0]); auto constant_0 = ngraph::builder::makeConstant(ngPrc, inputShape, { 1 }); - auto add = std::make_shared(abs, constant_0); + auto add = std::make_shared(abs, constant_0); auto constant_1 = ngraph::builder::makeConstant(ngPrc, inputShape, { -1 }); - auto power = std::make_shared(add, constant_1); - auto mul = std::make_shared(power, params[0]); + auto power = std::make_shared(add, constant_1); + auto mul = std::make_shared(power, params[0]); - ngraph::ResultVector results{ std::make_shared(mul) }; + ngraph::ResultVector results{ std::make_shared(mul) }; return std::make_shared(results, params, "SoftSignTest"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/split_concat_multi_inputs.cpp b/src/tests/functional/shared_test_classes/src/subgraph/split_concat_multi_inputs.cpp index bbe77145aeabc4..0c520da17e8d09 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/split_concat_multi_inputs.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/split_concat_multi_inputs.cpp @@ -45,12 +45,12 @@ void SplitConcatMultiInputsTest::SetUp() { ngraph::OutputVector concatInputs = split->outputs(); - auto concat = std::make_shared(concatInputs, 1); + auto concat = std::make_shared(concatInputs, 1); if (withFC) { auto mul_const = ngraph::builder::makeConstant(ngPrc, { 10, inputShape[1] }, ov::test::utils::generate_float_numbers(10 * inputShape[1], -0.2f, 0.2f), false); - auto matmul = std::make_shared(concat, mul_const, false, true); + auto matmul = std::make_shared(concat, mul_const, false, true); function = std::make_shared(matmul, params, "SplitConcatMultiInputs"); } else { function = std::make_shared(concat, params, "SplitConcatMultiInputs"); diff --git a/src/tests/functional/shared_test_classes/src/subgraph/split_conv.cpp b/src/tests/functional/shared_test_classes/src/subgraph/split_conv.cpp index ac082acad8b5f3..2085fd03403ae2 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/split_conv.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/split_conv.cpp @@ -67,12 +67,12 @@ void SplitConvTest::SetUp() { auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{splitAxis}); auto split = std::make_shared(params[0], split_axis_op, splitsNum); - auto relu1 = std::make_shared(split->output(0)); + auto relu1 = std::make_shared(split->output(0)); - auto relu2 = std::make_shared(split->output(1)); + auto relu2 = std::make_shared(split->output(1)); std::vector convInputShape = {1, inputChannels, 1, inputShape[0] * inputShape[1] / inputChannels / 2}; - auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); - auto reshape1 = std::make_shared(relu2, reshapePattern1, false); + auto reshapePattern1 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 4 }, convInputShape); + auto reshape1 = std::make_shared(relu2, reshapePattern1, false); auto filterWeights = ov::test::utils::generate_float_numbers(outputChannels * convInputShape[1] * kernelShape[0] * kernelShape[1], -0.2f, 0.2f); @@ -81,16 +81,16 @@ void SplitConvTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); auto widthAfterConv = (convInputShape[3] - kernelShape[1]) / stride + 1; std::vector outFormShapes = {1, outputChannels * widthAfterConv }; - auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); - auto reshape2 = std::make_shared(conv, reshapePattern2, false); + auto reshapePattern2 = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{ 2 }, outFormShapes); + auto reshape2 = std::make_shared(conv, reshapePattern2, false); - ngraph::ResultVector results{std::make_shared(relu1), - std::make_shared(reshape2)}; + ngraph::ResultVector results{std::make_shared(relu1), + std::make_shared(reshape2)}; function = std::make_shared(results, params, "SplitConvTest"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/split_relu.cpp b/src/tests/functional/shared_test_classes/src/subgraph/split_relu.cpp index cb9e32b049170a..b885ceb2343c2a 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/split_relu.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/split_relu.cpp @@ -38,8 +38,8 @@ namespace SubgraphTestsDefinitions { ngraph::ResultVector results; for (size_t i : connect_index) { - auto relu = std::make_shared(split->output(i)); - results.push_back(std::make_shared(relu)); + auto relu = std::make_shared(split->output(i)); + results.push_back(std::make_shared(relu)); } function = std::make_shared(results, input, "split_relu"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/split_trivial_permute_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/split_trivial_permute_concat.cpp index b03a01d175f763..d371c48b4c83ad 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/split_trivial_permute_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/split_trivial_permute_concat.cpp @@ -38,13 +38,13 @@ namespace SubgraphTestsDefinitions { std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{static_cast(splitAxis)}); auto split = std::make_shared(input[0], split_axis_op, 2); - auto permute_in_params = std::make_shared(ngraph::element::i64, + auto permute_in_params = std::make_shared(ngraph::element::i64, ngraph::Shape{ 4 }, ngraph::Shape{ {0, 3, 2, 1} }); - auto permute_0 = std::make_shared(split->output(0), permute_in_params); - auto permute_1 = std::make_shared(split->output(1), permute_in_params); + auto permute_0 = std::make_shared(split->output(0), permute_in_params); + auto permute_1 = std::make_shared(split->output(1), permute_in_params); - auto concat = std::make_shared(ngraph::OutputVector{ permute_0, permute_1 }, concatAxis); + auto concat = std::make_shared(ngraph::OutputVector{ permute_0, permute_1 }, concatAxis); auto act = ngraph::builder::makeActivation(concat, ngPrc, ngraph::helpers::ActivationTypes::Relu); function = std::make_shared(act, input, "split_trivial_permute_concat"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/strided_slice.cpp b/src/tests/functional/shared_test_classes/src/subgraph/strided_slice.cpp index c7515c058eab99..bcb66ebd598bee 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/strided_slice.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/strided_slice.cpp @@ -47,7 +47,7 @@ void StridedSliceTest::SetUp() { auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector params {std::make_shared(ngPrc, ov::Shape(ssParams.inputShape))}; - auto relu = std::make_shared(params[0]); + auto relu = std::make_shared(params[0]); ov::Shape constShape = {ssParams.begin.size()}; auto beginNode = std::make_shared(ov::element::i64, constShape, ssParams.begin.data()); @@ -64,7 +64,7 @@ void StridedSliceTest::SetUp() { ssParams.shrinkAxisMask, ssParams.ellipsisAxisMask); - ngraph::ResultVector results{std::make_shared(ss)}; + ngraph::ResultVector results{std::make_shared(ss)}; function = std::make_shared(results, params, "strided_slice"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_concat.cpp index dcd42366206b0e..e369bf8d9629c8 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_concat.cpp @@ -46,7 +46,7 @@ void SliceConcatTest::SetUp() { ngraph::Output input = params[0]; if (inputShape[0] != 1 || inputShape.size() != 2) { - input = std::make_shared(params[0], + input = std::make_shared(params[0], ngraph::builder::makeConstant(ngraph::element::i64, ngraph::Shape{inputShape.size()}, inputShape), false); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_conv.cpp b/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_conv.cpp index b5fbec7eb83143..af8fffa45d2588 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_conv.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/stridedslice_conv.cpp @@ -81,7 +81,7 @@ void SliceConvTest::SetUp() { {kernelShape[0], kernelShape[1]}, {kernelShape[0] > 1 ? stride : 1, stride}, {0, 0}, - { 0, 0 }, { 1, 1 }, ngraph::op::PadType::VALID, outputChannels, false, filterWeights); + { 0, 0 }, { 1, 1 }, ov::op::PadType::VALID, outputChannels, false, filterWeights); function = std::make_shared(conv, params, "StridedSliceConvTest"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp b/src/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp index eaacac921bcb30..4506e49464e4bd 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/tensor_names.cpp @@ -17,16 +17,16 @@ std::string TensorNamesTest::getTestCaseName(const testing::TestParamInfoGetParam(); - auto parameter = std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10}); + auto parameter = std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10}); parameter->set_friendly_name("parameter"); parameter->get_output_tensor(0).set_names({"input"}); - auto relu_prev = std::make_shared(parameter); + auto relu_prev = std::make_shared(parameter); relu_prev->set_friendly_name("relu_prev"); relu_prev->get_output_tensor(0).set_names({"relu,prev_t", "identity_prev_t"}); - auto relu = std::make_shared(relu_prev); + auto relu = std::make_shared(relu_prev); relu->set_friendly_name("relu"); relu->get_output_tensor(0).set_names({"relu,t", "identity"}); - const ngraph::ResultVector results{std::make_shared(relu)}; + const ngraph::ResultVector results{std::make_shared(relu)}; results[0]->set_friendly_name("out"); ngraph::ParameterVector params{parameter}; function = std::make_shared(results, params, "TensorNames"); diff --git a/src/tests/functional/shared_test_classes/src/subgraph/transpose_add.cpp b/src/tests/functional/shared_test_classes/src/subgraph/transpose_add.cpp index 64ee23719dce53..e5391c666116b4 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/transpose_add.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/transpose_add.cpp @@ -36,12 +36,12 @@ void TransposeAdd::SetUp() { ngraph::Shape permute_order(input_shape.size()); std::iota(std::begin(permute_order), std::end(permute_order), 0); std::iter_swap(std::end(permute_order) - 2, std::end(permute_order) - 1); - auto transpose_in_params = std::make_shared(ngraph::element::i64, + auto transpose_in_params = std::make_shared(ngraph::element::i64, ngraph::Shape{permute_order.size()}, permute_order); - auto transpose_in = std::make_shared(params[0], transpose_in_params); + auto transpose_in = std::make_shared(params[0], transpose_in_params); auto add_const = ngraph::builder::makeConstant(ngPrc, transpose_in->get_output_shape(0), {}, true); - auto add = std::make_shared(transpose_in, add_const); + auto add = std::make_shared(transpose_in, add_const); function = std::make_shared(add, params, "transpose_add"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/transpose_conv_transpose_squeeze.cpp b/src/tests/functional/shared_test_classes/src/subgraph/transpose_conv_transpose_squeeze.cpp index 67a9401dea826a..716ffaba3aa2f4 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/transpose_conv_transpose_squeeze.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/transpose_conv_transpose_squeeze.cpp @@ -52,29 +52,29 @@ void TransposeConvTest::SetUp() { std::vector nchw_order = { 0, 3, 1, 2 }; std::vector nhwc_order = { 0, 2, 3, 1 }; std::vector conv_input_shape = {1, 1, input_shape[0] * input_shape[1] / input_channels, input_channels}; - auto reshape_pattern = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{conv_input_shape.size()}, conv_input_shape); - auto reshape = std::make_shared(params[0], reshape_pattern, false); + auto reshape_pattern = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{conv_input_shape.size()}, conv_input_shape); + auto reshape = std::make_shared(params[0], reshape_pattern, false); - const auto input_order1 = std::make_shared(ngraph::element::i64, + const auto input_order1 = std::make_shared(ngraph::element::i64, ngraph::Shape({conv_input_shape.size()}), nchw_order); - auto transpose1 = std::make_shared(reshape, input_order1); + auto transpose1 = std::make_shared(reshape, input_order1); float weight_val = 0.02; auto filter_weights_node = ngraph::builder::makeConstant(ng_prc, {output_channels, input_channels, kernel_shape[0], kernel_shape[1]}, { weight_val }); - auto conv = std::make_shared(transpose1, filter_weights_node, strides, std::vector{ 0, 0 }, + auto conv = std::make_shared(transpose1, filter_weights_node, strides, std::vector{ 0, 0 }, std::vector{ 0, 0 }, std::vector{ 1, 1 }, - ngraph::op::PadType::VALID); + ov::op::PadType::VALID); - const auto input_order2 = std::make_shared(ngraph::element::i64, + const auto input_order2 = std::make_shared(ngraph::element::i64, ngraph::Shape({conv_input_shape.size()}), nhwc_order); - auto transpose2 = std::make_shared(conv, input_order2); + auto transpose2 = std::make_shared(conv, input_order2); - auto constant_squeeze = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); - auto squeeze = std::make_shared(transpose2, constant_squeeze); + auto constant_squeeze = std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape{1}, std::vector{0}); + auto squeeze = std::make_shared(transpose2, constant_squeeze); function = std::make_shared(squeeze, params, "transposeConv"); } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/trivial_concat.cpp b/src/tests/functional/shared_test_classes/src/subgraph/trivial_concat.cpp index 47fcbcccf7ecf8..a43946decdf7cd 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/trivial_concat.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/trivial_concat.cpp @@ -32,24 +32,24 @@ void TrivialConcatLayerTest::SetUp() { auto input_relu = ngraph::builder::makeActivation(params[0], ngPrc, ngraph::helpers::ActivationTypes::Relu); - auto input_reshape_pattern = std::make_shared(ngraph::element::i64, + auto input_reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{inputShape.size()}, std::vector(inputShape)); - auto input = std::make_shared(input_relu, input_reshape_pattern, false); + auto input = std::make_shared(input_relu, input_reshape_pattern, false); auto constant_values = ov::test::utils::generate_float_numbers(total_size, 15.5f, 16.1f); auto constant = ngraph::builder::makeConstant(ngPrc, std::vector({1, total_size}), constant_values); - auto first_reshape = std::make_shared(constant, input_reshape_pattern, false); + auto first_reshape = std::make_shared(constant, input_reshape_pattern, false); - auto concat = std::make_shared(ngraph::OutputVector({first_reshape, input}), axis); + auto concat = std::make_shared(ngraph::OutputVector({first_reshape, input}), axis); - auto final_reshape_pattern = std::make_shared(ngraph::element::i64, + auto final_reshape_pattern = std::make_shared(ngraph::element::i64, ngraph::Shape{2}, std::vector({1, 2 * total_size})); - auto final_reshape = std::make_shared(concat, final_reshape_pattern, false); + auto final_reshape = std::make_shared(concat, final_reshape_pattern, false); auto act = ngraph::builder::makeActivation(final_reshape, ngPrc, ngraph::helpers::ActivationTypes::Relu); - ngraph::ResultVector results{std::make_shared(act)}; + ngraph::ResultVector results{std::make_shared(act)}; function = std::make_shared(results, params, "trivial_concat"); } } // namespace SubgraphTestsDefinitions diff --git a/src/tests/functional/shared_test_classes/src/subgraph/two_fake_quantize_to_fullyconnected.cpp b/src/tests/functional/shared_test_classes/src/subgraph/two_fake_quantize_to_fullyconnected.cpp index 7fa90952ec1dd5..a02956f2b8f609 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/two_fake_quantize_to_fullyconnected.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/two_fake_quantize_to_fullyconnected.cpp @@ -116,25 +116,25 @@ void FakeQuantizeSubgraphTest::SetUp() { auto inputFQNode = ngraph::builder::makeFakeQuantize(params[0], ngraph::element::f32, levels[0], constShape[0], { inputDataMin }, { inputDataMax }, { inputDataMin }, { inputDataMax }); - auto weightsFQNode = std::make_shared(const_param, + auto weightsFQNode = std::make_shared(const_param, lowNode, highNode, lowNode, highNode, levels[1]); - auto inputFQ = std::dynamic_pointer_cast(inputFQNode); - auto weightsFQ = std::dynamic_pointer_cast(weightsFQNode); - auto matmul = std::make_shared(inputFQ, weightsFQ, false, true); + auto inputFQ = std::dynamic_pointer_cast(inputFQNode); + auto weightsFQ = std::dynamic_pointer_cast(weightsFQNode); + auto matmul = std::make_shared(inputFQ, weightsFQ, false, true); std::shared_ptr biases_node; if (biases) { auto const_bias = ngraph::builder::makeConstant(ngPrc, {1, constShape[1][0]}, std::vector{ -1.0f }); - biases_node = std::make_shared(matmul, const_bias); + biases_node = std::make_shared(matmul, const_bias); } else { biases_node = matmul; } - auto sigmoid = std::make_shared(biases_node); - ngraph::ResultVector results{std::make_shared(sigmoid)}; + auto sigmoid = std::make_shared(biases_node); + ngraph::ResultVector results{std::make_shared(sigmoid)}; if (biases) { - auto sigmoid_2 = std::make_shared(inputFQ); - results.push_back(std::make_shared(sigmoid_2)); + auto sigmoid_2 = std::make_shared(inputFQ); + results.push_back(std::make_shared(sigmoid_2)); } function = std::make_shared(results, params, "fakeQuantizeSubgraph"); configuration = config.second; diff --git a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/markup_bias.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/markup_bias.hpp index 7fdee03ef0fa8e..399dde070c22f3 100644 --- a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/markup_bias.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/markup_bias.hpp @@ -5,7 +5,7 @@ #pragma once #include -#include +#include "openvino/core/model.hpp" #include "common/builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/align_concat_quantization_parameters.cpp b/src/tests/ov_helpers/ov_lpt_models/src/align_concat_quantization_parameters.cpp index a50223294c9cfb..b6ab0d86ca5d54 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/align_concat_quantization_parameters.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/align_concat_quantization_parameters.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/align_concat_quantization_parameters.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/assign_and_read_value.cpp b/src/tests/ov_helpers/ov_lpt_models/src/assign_and_read_value.cpp index 13b4ce29715630..52b53745eaa27b 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/assign_and_read_value.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/assign_and_read_value.cpp @@ -6,12 +6,12 @@ #include -#include -#include -#include +#include "openvino/opsets/opset1.hpp" +#include "openvino/opsets/opset3.hpp" +#include "openvino/opsets/opset6.hpp" #include "ov_models/subgraph_builders.hpp" #include "openvino/op/util/variable.hpp" -#include +#include "openvino/op/util/assign_base.hpp" #include "ov_lpt_models/common/builders.hpp" #include "ov_lpt_models/assign_and_read_value.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/avg_pool.cpp b/src/tests/ov_helpers/ov_lpt_models/src/avg_pool.cpp index c3f409a4acd883..b8a8d468550e62 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/avg_pool.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/avg_pool.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "openvino/opsets/opset1.hpp" #include #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/batch_to_space.cpp b/src/tests/ov_helpers/ov_lpt_models/src/batch_to_space.cpp index aa6fa431276b98..b0817bd996207e 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/batch_to_space.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/batch_to_space.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/batch_to_space.hpp" -#include +#include "openvino/opsets/opset2.hpp" #include "ov_lpt_models/common/builders.hpp" namespace ngraph { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/clamp.cpp b/src/tests/ov_helpers/ov_lpt_models/src/clamp.cpp index c48804a97c0973..be204b15bfb88f 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/clamp.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/clamp.cpp @@ -6,7 +6,7 @@ #include -#include +#include "openvino/opsets/opset1.hpp" #include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/common/builders.hpp" #include "ov_lpt_models/clamp.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/common/builders.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/builders.cpp index a14ef51efefbce..d008502387c3d1 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/common/builders.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/builders.cpp @@ -7,7 +7,7 @@ #include #include -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/common/dequantization_operations.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/dequantization_operations.cpp index 249e97041a402c..bd93064a6a7ebd 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/common/dequantization_operations.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/dequantization_operations.cpp @@ -3,7 +3,7 @@ // #include "ov_lpt_models/common/dequantization_operations.hpp" -#include +#include "openvino/opsets/opset1.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_data.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_data.cpp index 498e81d229886e..a6c67623bc2c68 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_data.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_data.cpp @@ -3,7 +3,7 @@ // #include "ov_lpt_models/common/fake_quantize_on_data.hpp" -#include +#include "openvino/opsets/opset1.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_weights.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_weights.cpp index 049e3fd9f90457..9fe23b69f308b6 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_weights.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_weights.cpp @@ -3,7 +3,7 @@ // #include "ov_lpt_models/common/fake_quantize_on_weights.hpp" -#include +#include "openvino/opsets/opset1.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/common/multiply.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/multiply.cpp index 2dabc7c52d725b..d2987bd0255dfc 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/common/multiply.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/multiply.cpp @@ -3,7 +3,7 @@ // #include "ov_lpt_models/common/multiply.hpp" -#include +#include "openvino/opsets/opset1.hpp" namespace ngraph { namespace builder { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/compose_fake_quantize.cpp b/src/tests/ov_helpers/ov_lpt_models/src/compose_fake_quantize.cpp index 8346192eee8460..7e3ddc5a8247b9 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/compose_fake_quantize.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/compose_fake_quantize.cpp @@ -5,7 +5,7 @@ #include "ov_lpt_models/compose_fake_quantize.hpp" #include "low_precision/network_helper.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/common/builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/concat.cpp b/src/tests/ov_helpers/ov_lpt_models/src/concat.cpp index e27e92fca9d05a..8c924f9f638fe1 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/concat.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/concat.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/concat.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "low_precision/network_helper.hpp" #include "low_precision/rt_info/precision_preserved_attribute.hpp" @@ -758,7 +758,7 @@ std::shared_ptr ConcatFunction::getOriginalWithIntermediateWithConsta attributes.antialias = false; attributes.pads_begin = { 0 }; attributes.pads_end = { 0 }; - const auto outputShape = op::Constant::create( + const auto outputShape = ov::opset1::Constant::create( ov::element::i64, ov::Shape{ 2 }, ov::Shape{ inputShape[2].is_dynamic() ? 9ul : static_cast(inputShape[2].get_length()), @@ -1870,7 +1870,7 @@ std::shared_ptr ConcatFunction::getReferenceWithIntermediateWithConst attributes.pads_begin = { 0 }; attributes.pads_end = { 0 }; - const auto outputShape = op::Constant::create( + const auto outputShape = ov::opset1::Constant::create( ov::element::i64, ov::Shape{ 2 }, ov::Shape{ inputShape[2].is_dynamic() ? 9ul : static_cast(inputShape[2].get_length()), diff --git a/src/tests/ov_helpers/ov_lpt_models/src/convolution.cpp b/src/tests/ov_helpers/ov_lpt_models/src/convolution.cpp index c00354a25b8773..aef4d1b951c7c5 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/convolution.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/convolution.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/convolution.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include #include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" @@ -52,7 +52,7 @@ std::shared_ptr ConvolutionFunction::getOriginal( if (weights->cast_vector().size() == 1ul) { auto targetShape = ov::Shape{ outputChannelsCount, inputChannelsCount, 1, 1 }; weights = ov::as_type_ptr(fold( - weights, op::Constant::create(ov::element::i64, Shape{ targetShape.size() }, targetShape))); + weights, ov::opset1::Constant::create(ov::element::i64, Shape{ targetShape.size() }, targetShape))); } std::shared_ptr convertedWeights; @@ -266,7 +266,7 @@ std::shared_ptr ConvolutionFunction::getReference( if (weights->cast_vector().size() == 1ul) { auto targetShape = ov::Shape{ outputChannelsCount, inputChannelsCount, 1, 1 }; weights = ov::as_type_ptr(fold( - weights, op::Constant::create(ov::element::i64, Shape{ targetShape.size() }, targetShape))); + weights, ov::opset1::Constant::create(ov::element::i64, Shape{ targetShape.size() }, targetShape))); } const auto convertOnWeights = std::make_shared(weights, netPrecision); diff --git a/src/tests/ov_helpers/ov_lpt_models/src/convolution_backprop_data.cpp b/src/tests/ov_helpers/ov_lpt_models/src/convolution_backprop_data.cpp index e748f31872e27e..758f3cfea8eae1 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/convolution_backprop_data.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/convolution_backprop_data.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/convolution_backprop_data.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include #include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/elementwise_with_multi_parent_dequantization.cpp b/src/tests/ov_helpers/ov_lpt_models/src/elementwise_with_multi_parent_dequantization.cpp index eee4982142f526..53c95244acfb83 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/elementwise_with_multi_parent_dequantization.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/elementwise_with_multi_parent_dequantization.cpp @@ -5,7 +5,7 @@ #include "ov_lpt_models/elementwise_with_multi_parent_dequantization.hpp" #include "low_precision/network_helper.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_models/builders.hpp" #include "ov_models/subgraph_builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize.cpp index e86a5c71582321..b9d54f2b549eb8 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/fake_quantize.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_convolution.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_convolution.cpp index 8ad42a8e9d5ec4..3275b3f517809d 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_convolution.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_convolution.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/fake_quantize_and_convolution.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/common/builders.hpp" @@ -209,7 +209,7 @@ std::shared_ptr FakeQuantizeAndConvolutionFunction::get( if (multiplyAfter) { const auto& O = lastOperation->get_shape()[1]; std::vector weights_val(O, 1); - auto constant = op::Constant::create(element::f32, Shape{O, 1, 1}, weights_val); + auto constant = ov::opset1::Constant::create(element::f32, Shape{O, 1, 1}, weights_val); lastOperation = std::make_shared(lastOperation, constant); } } else { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_two_output_branches_with_convolution.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_two_output_branches_with_convolution.cpp index b158c55fc01964..58a6abf775faca 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_two_output_branches_with_convolution.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_two_output_branches_with_convolution.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "openvino/opsets/opset1.hpp" #include "ov_lpt_models/common/builders.hpp" #include "ov_lpt_models/fake_quantize_and_two_output_branches_with_convolution.hpp" #include "ov_lpt_models/common/fake_quantize_on_weights.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_on_weights_and_unsupported_child.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_on_weights_and_unsupported_child.cpp index f601957f5b3568..b8d0e6fd17ca05 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_on_weights_and_unsupported_child.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_on_weights_and_unsupported_child.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "openvino/opsets/opset1.hpp" #include "ov_lpt_models/common/builders.hpp" #include "ov_lpt_models/fake_quantize_on_weights_and_unsupported_child.hpp" #include "ov_lpt_models/common/fake_quantize_on_weights.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_precision_selection.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_precision_selection.cpp index 4308283f073d54..5c422352c88a83 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_precision_selection.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_precision_selection.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/fake_quantize_precision_selection.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include #include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/common/builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fold_fake_quantize.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fold_fake_quantize.cpp index 5fbf63e2eada48..e199e7f0b207f2 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fold_fake_quantize.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fold_fake_quantize.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/fold_fake_quantize.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "ov_models/subgraph_builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fuse_convert.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fuse_convert.cpp index 9efef8ad4c158f..edadc20e8f5f7e 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fuse_convert.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fuse_convert.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/fuse_convert.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/common/builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize.cpp index 05274aa9568ab7..47eb5fc2dc77b9 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/fuse_fake_quantize.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "low_precision/network_helper.hpp" #include "ov_models/subgraph_builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize_and_scale_shift.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize_and_scale_shift.cpp index 94fb132962a273..4bdddc2eef0803 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize_and_scale_shift.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize_and_scale_shift.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/fuse_fake_quantize_and_scale_shift.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_models/builders.hpp" namespace ngraph { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fuse_multiply_to_fake_quantize.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fuse_multiply_to_fake_quantize.cpp index 5164d9ca1a51e3..c5f7caa6413ee2 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fuse_multiply_to_fake_quantize.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fuse_multiply_to_fake_quantize.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/fuse_multiply_to_fake_quantize.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fuse_subtract_to_fake_quantize.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fuse_subtract_to_fake_quantize.cpp index f39b90130c1d68..9618c5f2827f37 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fuse_subtract_to_fake_quantize.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fuse_subtract_to_fake_quantize.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/fuse_subtract_to_fake_quantize.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/gather.cpp b/src/tests/ov_helpers/ov_lpt_models/src/gather.cpp index 4ae1f31278806d..4c745d2d726e24 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/gather.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/gather.cpp @@ -4,9 +4,9 @@ #include "ov_lpt_models/gather.hpp" -#include -#include -#include +#include "openvino/opsets/opset1.hpp" +#include "openvino/opsets/opset7.hpp" +#include "openvino/opsets/opset8.hpp" #include "ov_lpt_models/common/builders.hpp" namespace ngraph { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/group_convolution.cpp b/src/tests/ov_helpers/ov_lpt_models/src/group_convolution.cpp index cbb08e1fc6c1ed..076cc2a12a106e 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/group_convolution.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/group_convolution.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/group_convolution.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include #include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/markup_avg_pool_precisions.cpp b/src/tests/ov_helpers/ov_lpt_models/src/markup_avg_pool_precisions.cpp index 5b429ac22054bf..2d60507b328297 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/markup_avg_pool_precisions.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/markup_avg_pool_precisions.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "openvino/opsets/opset1.hpp" #include #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/markup_bias.cpp b/src/tests/ov_helpers/ov_lpt_models/src/markup_bias.cpp index 185ff9683a6f61..2df17e9ac806a4 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/markup_bias.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/markup_bias.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "openvino/opsets/opset1.hpp" #include "ov_lpt_models/markup_bias.hpp" #include "ov_models/utils/ov_helpers.hpp" #include "ov_models/builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/mat_mul.cpp b/src/tests/ov_helpers/ov_lpt_models/src/mat_mul.cpp index 285a5f7f6de649..1a52e287fe1f52 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/mat_mul.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/mat_mul.cpp @@ -7,7 +7,7 @@ #include #include -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/mat_mul_with_optimized_constant_fake_quantize.cpp b/src/tests/ov_helpers/ov_lpt_models/src/mat_mul_with_optimized_constant_fake_quantize.cpp index b9b3471a3ffa60..a5ce7b0790c5f3 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/mat_mul_with_optimized_constant_fake_quantize.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/mat_mul_with_optimized_constant_fake_quantize.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/mat_mul_with_optimized_constant_fake_quantize.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_models/builders.hpp" namespace ngraph { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/max_pool.cpp b/src/tests/ov_helpers/ov_lpt_models/src/max_pool.cpp index 4bc4dbd44e2281..919706a5b8f637 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/max_pool.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/max_pool.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/max_pool.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include #include "low_precision/network_helper.hpp" #include "ov_models/subgraph_builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/move_dequantization_after.cpp b/src/tests/ov_helpers/ov_lpt_models/src/move_dequantization_after.cpp index 0548636f09bbc9..708475fe9e6a37 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/move_dequantization_after.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/move_dequantization_after.cpp @@ -5,7 +5,7 @@ #include "ov_lpt_models/move_dequantization_after.hpp" #include "low_precision/network_helper.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/common/builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/move_fake_quantize.cpp b/src/tests/ov_helpers/ov_lpt_models/src/move_fake_quantize.cpp index e76a42e551fa07..de07b0eed9fb57 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/move_fake_quantize.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/move_fake_quantize.cpp @@ -5,7 +5,7 @@ #include "ov_lpt_models/move_fake_quantize.hpp" #include -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/multiply.cpp b/src/tests/ov_helpers/ov_lpt_models/src/multiply.cpp index b36629e0b66920..6dbdd101aef262 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/multiply.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/multiply.cpp @@ -6,7 +6,7 @@ #include -#include +#include "openvino/opsets/opset1.hpp" #include #include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/multiply_partial_function.cpp b/src/tests/ov_helpers/ov_lpt_models/src/multiply_partial_function.cpp index af7f1718f263eb..1c2614f060216d 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/multiply_partial_function.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/multiply_partial_function.cpp @@ -6,7 +6,7 @@ #include -#include +#include "openvino/opsets/opset1.hpp" #include #include "ov_models/subgraph_builders.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/multiply_with_one_parent.cpp b/src/tests/ov_helpers/ov_lpt_models/src/multiply_with_one_parent.cpp index da0b6957f31b82..ef3ef183c3da6c 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/multiply_with_one_parent.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/multiply_with_one_parent.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/multiply_with_one_parent.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_models/builders.hpp" namespace ngraph { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/mvn.cpp b/src/tests/ov_helpers/ov_lpt_models/src/mvn.cpp index 02be02a1384b18..d3082b0a4d6f18 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/mvn.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/mvn.cpp @@ -33,7 +33,7 @@ std::shared_ptr MVNFunction::getOriginal( std::make_shared(element::i64, Shape{reductionAxes.size()}, reductionAxes.to_vector()), normalizeVariance, 1e-9, - op::MVNEpsMode::INSIDE_SQRT); + ov::op::MVNEpsMode::INSIDE_SQRT); } mvn->set_friendly_name("output"); auto& rtInfo = mvn->get_rt_info(); @@ -78,7 +78,7 @@ std::shared_ptr MVNFunction::getReference( std::shared_ptr mvn; if (opset_version == 2) { mvn = std::make_shared>( - op::MVN(dequantizationOpBefore, reductionAxes, normalizeVariance), + ov::op::v0::MVN(dequantizationOpBefore, reductionAxes, normalizeVariance), dequantizationAfter.empty() ? precision : element::f32); } else if (opset_version == 6) { mvn = std::make_shared>( @@ -86,7 +86,7 @@ std::shared_ptr MVNFunction::getReference( std::make_shared(element::i64, Shape{reductionAxes.size()}, reductionAxes.to_vector()), normalizeVariance, 1e-9, - op::MVNEpsMode::INSIDE_SQRT), + ov::op::MVNEpsMode::INSIDE_SQRT), dequantizationAfter.empty() ? precision : element::f32); } auto& rtInfo = mvn->get_rt_info(); diff --git a/src/tests/ov_helpers/ov_lpt_models/src/normalize_l2.cpp b/src/tests/ov_helpers/ov_lpt_models/src/normalize_l2.cpp index 54b8e000d1be43..a3b94a49567152 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/normalize_l2.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/normalize_l2.cpp @@ -5,7 +5,7 @@ #include "ov_lpt_models/normalize_l2.hpp" #include -#include +#include "openvino/opsets/opset1.hpp" #include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/common/builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/precision_propagation.cpp b/src/tests/ov_helpers/ov_lpt_models/src/precision_propagation.cpp index ebd318d7bf8742..4ae654fe774cca 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/precision_propagation.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/precision_propagation.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/precision_propagation.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "low_precision/network_helper.hpp" #include "low_precision/rt_info/precision_preserved_attribute.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/precomp.hpp b/src/tests/ov_helpers/ov_lpt_models/src/precomp.hpp index de698a6d2f9588..c41df1a99372ba 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/precomp.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/precomp.hpp @@ -4,8 +4,8 @@ #pragma once -#include -#include +#include "openvino/openvino.hpp" +#include "openvino/op/ops.hpp" #include #include diff --git a/src/tests/ov_helpers/ov_lpt_models/src/prelu.cpp b/src/tests/ov_helpers/ov_lpt_models/src/prelu.cpp index b6e733714bdd06..876a810151fc39 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/prelu.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/prelu.cpp @@ -6,7 +6,7 @@ #include -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "ov_models/subgraph_builders.hpp" #include "ov_lpt_models/common/builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/relu.cpp b/src/tests/ov_helpers/ov_lpt_models/src/relu.cpp index c5a2a241aac42c..c5328cc9abfcb4 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/relu.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/relu.cpp @@ -6,7 +6,7 @@ #include -#include +#include "openvino/opsets/opset1.hpp" #include "ov_ops/type_relaxed.hpp" #include "ov_lpt_models/common/builders.hpp" #include "low_precision/network_helper.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/reshape.cpp b/src/tests/ov_helpers/ov_lpt_models/src/reshape.cpp index 02cd23c503640e..ccab249a9e18a4 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/reshape.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/reshape.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/reshape.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_lpt_models/common/builders.hpp" namespace ngraph { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/round.cpp b/src/tests/ov_helpers/ov_lpt_models/src/round.cpp index 5eff5046f78e04..c8858744424a4c 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/round.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/round.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "openvino/opsets/opset1.hpp" #include "ov_lpt_models/round.hpp" #include "ov_lpt_models/common/builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/shuffle_channels.cpp b/src/tests/ov_helpers/ov_lpt_models/src/shuffle_channels.cpp index 48847c1f33a7b0..6ef40243193b2b 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/shuffle_channels.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/shuffle_channels.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "openvino/opsets/opset1.hpp" #include "low_precision/network_helper.hpp" #include "ov_lpt_models/common/builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/space_to_batch.cpp b/src/tests/ov_helpers/ov_lpt_models/src/space_to_batch.cpp index b6468f45249fde..ffe47dc3b75067 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/space_to_batch.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/space_to_batch.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/space_to_batch.hpp" -#include +#include "openvino/opsets/opset2.hpp" #include "ov_lpt_models/common/builders.hpp" namespace ngraph { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/squeeze.cpp b/src/tests/ov_helpers/ov_lpt_models/src/squeeze.cpp index ed4298207e3c25..6812c7463f9219 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/squeeze.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/squeeze.cpp @@ -63,7 +63,7 @@ std::shared_ptr SqueezeFunction::getReference( const std::shared_ptr dequantizationOpBefore = makeDequantization(input, dequantizationBefore); const auto squeeze = std::make_shared>( - op::Squeeze(dequantizationOpBefore, std::make_shared(element::i64, Shape{ axes.size() }, axes)), + ov::opset1::Squeeze(dequantizationOpBefore, std::make_shared(element::i64, Shape{ axes.size() }, axes)), precisionAfterOperation); const std::shared_ptr dequantizationOpAfter = makeDequantization(squeeze, dequantizationAfter); dequantizationOpAfter->set_friendly_name("output"); diff --git a/src/tests/ov_helpers/ov_lpt_models/src/subtract.cpp b/src/tests/ov_helpers/ov_lpt_models/src/subtract.cpp index 4b058335d620d9..f4eaf38b1ef0fa 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/subtract.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/subtract.cpp @@ -5,7 +5,7 @@ #include "ov_lpt_models/subtract.hpp" #include "low_precision/network_helper.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_lpt_models/common/builders.hpp" #include "ov_models/subgraph_builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/subtract_multiply_to_multiply_add.cpp b/src/tests/ov_helpers/ov_lpt_models/src/subtract_multiply_to_multiply_add.cpp index 99e49c247eda13..6cf23a0a875acb 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/subtract_multiply_to_multiply_add.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/subtract_multiply_to_multiply_add.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/subtract_multiply_to_multiply_add.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_lpt_models/common/builders.hpp" using namespace ov::pass::low_precision; diff --git a/src/tests/ov_helpers/ov_lpt_models/src/transformations_after_split.cpp b/src/tests/ov_helpers/ov_lpt_models/src/transformations_after_split.cpp index 0895968b5b3ab5..43003eab6aa914 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/transformations_after_split.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/transformations_after_split.cpp @@ -6,7 +6,7 @@ #include -#include +#include "openvino/opsets/opset1.hpp" #include "ov_lpt_models/common/fake_quantize_on_data.hpp" #include "ov_lpt_models/common/dequantization_operations.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/transpose.cpp b/src/tests/ov_helpers/ov_lpt_models/src/transpose.cpp index aaf1093e6c52e9..7b67a00a71ccd7 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/transpose.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/transpose.cpp @@ -4,7 +4,7 @@ #include "ov_lpt_models/transpose.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_lpt_models/common/builders.hpp" namespace ngraph { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/transpose_after_mat_mul.cpp b/src/tests/ov_helpers/ov_lpt_models/src/transpose_after_mat_mul.cpp index 5724e21251edf5..8abff7dc5eef9b 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/transpose_after_mat_mul.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/transpose_after_mat_mul.cpp @@ -5,7 +5,7 @@ #include "ov_lpt_models/transpose_after_mat_mul.hpp" #include "low_precision/network_helper.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "ov_lpt_models/common/builders.hpp" #include "ov_models/subgraph_builders.hpp" diff --git a/src/tests/ov_helpers/ov_lpt_models/src/unsqueeze.cpp b/src/tests/ov_helpers/ov_lpt_models/src/unsqueeze.cpp index 5341019edd5ffc..2e7f888fe3725b 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/unsqueeze.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/unsqueeze.cpp @@ -63,7 +63,7 @@ std::shared_ptr UnsqueezeFunction::getReference( const std::shared_ptr dequantizationOpBefore = makeDequantization(input, dequantizationBefore); const auto unsqueeze = std::make_shared>( - op::v0::Unsqueeze(dequantizationOpBefore, std::make_shared(element::i64, Shape{ axes.size() }, axes)), + ov::op::v0::Unsqueeze(dequantizationOpBefore, std::make_shared(element::i64, Shape{ axes.size() }, axes)), precisionAfterOperation); const std::shared_ptr dequantizationOpAfter = makeDequantization(unsqueeze, dequantizationAfter); dequantizationOpAfter->set_friendly_name("output"); diff --git a/src/tests/ov_helpers/ov_models/include/ov_models/builders.hpp b/src/tests/ov_helpers/ov_models/include/ov_models/builders.hpp index 6c3731e125c031..d6f7eb167a3fe7 100644 --- a/src/tests/ov_helpers/ov_models/include/ov_models/builders.hpp +++ b/src/tests/ov_helpers/ov_models/include/ov_models/builders.hpp @@ -5,20 +5,8 @@ #pragma once #include -#include - -// TODO: Temporary solution to fix compilation of plugin tests -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -// TODO: Temporary solution to fix compilation of plugin tests +#include #include "common_test_utils/test_enums.hpp" #include "openvino/core/node.hpp" diff --git a/src/tests/ov_helpers/ov_models/include/ov_models/subgraph_builders.hpp b/src/tests/ov_helpers/ov_models/include/ov_models/subgraph_builders.hpp index 65e424a77fcf46..fe1c8d539503ac 100644 --- a/src/tests/ov_helpers/ov_models/include/ov_models/subgraph_builders.hpp +++ b/src/tests/ov_helpers/ov_models/include/ov_models/subgraph_builders.hpp @@ -16,9 +16,6 @@ namespace subgraph { std::shared_ptr makeConvPoolRelu(std::vector inputShape = {1, 1, 32, 32}, ov::element::Type_t ngPrc = ov::element::Type_t::f32); -std::shared_ptr makeConvPoolReluNoReshapes(std::vector inputShape = {1, 1, 32, 32}, - ov::element::Type_t ngPrc = ov::element::Type_t::f32); - std::shared_ptr makeConvPool2Relu2(std::vector inputShape = {1, 1, 32, 32}, ov::element::Type_t ngPrc = ov::element::Type_t::f32); @@ -31,73 +28,11 @@ std::shared_ptr makeSplitConvConcat(std::vector inputShape = std::shared_ptr makeKSOFunction(std::vector inputShape = {1, 4, 20, 20}, ov::element::Type_t ngPrc = ov::element::Type_t::f32); -std::shared_ptr makeSplitMultiConvConcat(std::vector inputShape = {1, 4, 20, 20}, - ov::element::Type_t ngPrc = ov::element::Type_t::f32); - -std::shared_ptr makeTIwithLSTMcell(ov::element::Type_t ngPRC = ov::element::Type_t::f32, - size_t N = 32, // Batch size - size_t L = 10, // Sequence length - size_t I = 8, // Input size - size_t H = 32); // Hidden size - -std::shared_ptr makeSingleConv(std::vector inputShape = {1, 3, 24, 24}, - ov::element::Type_t type = ov::element::Type_t::f32); - -std::shared_ptr makeDetectionOutput(ov::element::Type_t type = ov::element::Type_t::f32); - -std::shared_ptr makeMultiSingleConv(std::vector inputShape = {1, 3, 24, 24}, - ov::element::Type type = ov::element::Type_t::f32); - -std::shared_ptr make2InputSubtract(std::vector inputShape = {1, 3, 24, 24}, - ov::element::Type_t type = ov::element::Type_t::f32); - -std::shared_ptr makeNestedBranchConvConcat(std::vector inputShape = {1, 4, 20, 20}, - ov::element::Type ngPrc = ov::element::Type_t::f32); - -std::shared_ptr makeNestedSplitConvConcat(std::vector inputShape = {1, 4, 20, 20}, - ov::element::Type ngPrc = ov::element::Type_t::f32); - -std::shared_ptr makeSplitConvConcatInputInBranch(std::vector inputShape = {1, 4, 20, 20}, - ov::element::Type ngPrc = ov::element::Type_t::f32); - -std::shared_ptr makeSplitConvConcatNestedInBranch(std::vector inputShape = {1, 4, 20, 20}, - ov::element::Type ngPrc = ov::element::Type_t::f32); - -std::shared_ptr makeSplitConvConcatNestedInBranchNestedOut( - std::vector inputShape = {1, 4, 20, 20}, - ov::element::Type ngPrc = ov::element::Type_t::f32); - -std::shared_ptr makeConvBias(std::vector inputShape = {1, 3, 24, 24}, - ov::element::Type type = ov::element::Type_t::f32); - -std::shared_ptr makeReadConcatSplitAssign(std::vector inputShape = {1, 1, 2, 4}, - ov::element::Type type = ov::element::Type_t::f32); - -std::shared_ptr makeMatMulBias(std::vector inputShape = {1, 3, 24, 24}, - ov::element::Type type = ov::element::Type_t::f32); - -std::shared_ptr makeConvertTranspose(std::vector inputShape = {1, 3, 24, 24}, - std::vector inputOrder = {0, 1, 2, 3}, - ov::element::Type type = ov::element::Type_t::f32); - -std::shared_ptr makeMultipleInputOutputReLU(std::vector inputShape = {1, 1, 32, 32}, - ov::element::Type_t type = ov::element::Type_t::f32); - -std::shared_ptr makeMultipleInputOutputDoubleConcat(std::vector inputShape = {1, 1, 32, 32}, - ov::element::Type_t type = ov::element::Type_t::f32); - std::shared_ptr makeSingleConcatWithConstant(std::vector inputShape = {1, 1, 2, 4}, ov::element::Type type = ov::element::Type_t::f32); std::shared_ptr makeConcatWithParams(std::vector inputShape = {1, 1, 32, 32}, ov::element::Type_t type = ov::element::Type_t::f32); - -std::shared_ptr makeSingleSplit(std::vector inputShape = {1, 4, 32, 32}, - ov::element::Type_t type = ov::element::Type_t::f32); - -std::shared_ptr makeSplitConcat(std::vector inputShape = {1, 4, 24, 24}, - ov::element::Type_t type = ov::element::Type_t::f32); - } // namespace subgraph } // namespace builder } // namespace ngraph diff --git a/src/tests/ov_helpers/ov_models/include/ov_models/utils/ov_helpers.hpp b/src/tests/ov_helpers/ov_models/include/ov_models/utils/ov_helpers.hpp index 668c11afd258e4..d140301b2367ae 100644 --- a/src/tests/ov_helpers/ov_models/include/ov_models/utils/ov_helpers.hpp +++ b/src/tests/ov_helpers/ov_models/include/ov_models/utils/ov_helpers.hpp @@ -10,11 +10,11 @@ #endif #include -#include -#include #include #include "common_test_utils/test_enums.hpp" +#include "openvino/opsets/opset1.hpp" +#include "openvino/runtime/tensor.hpp" namespace ngraph { namespace helpers { diff --git a/src/tests/ov_helpers/ov_models/src/eltwise.cpp b/src/tests/ov_helpers/ov_models/src/eltwise.cpp index 17838f6a5d702a..223f9a9f46ca00 100644 --- a/src/tests/ov_helpers/ov_models/src/eltwise.cpp +++ b/src/tests/ov_helpers/ov_models/src/eltwise.cpp @@ -3,9 +3,9 @@ // #include -#include #include "common_test_utils/test_enums.hpp" +#include "openvino/opsets/opset13.hpp" #include "ov_models/utils/ov_helpers.hpp" namespace ngraph { diff --git a/src/tests/ov_helpers/ov_models/src/subgraph_builders.cpp b/src/tests/ov_helpers/ov_models/src/subgraph_builders.cpp index bde40117899751..f8cd46cdffe613 100644 --- a/src/tests/ov_helpers/ov_models/src/subgraph_builders.cpp +++ b/src/tests/ov_helpers/ov_models/src/subgraph_builders.cpp @@ -78,40 +78,6 @@ std::shared_ptr makeConvPoolRelu(std::vector inputShape, ov:: return fnPtr; } -std::shared_ptr makeConvPoolReluNoReshapes(std::vector inputShape, ov::element::Type_t ngPrc) { - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - params.front()->set_friendly_name("Param_1"); - params.front()->output(0).get_tensor().set_names({"data"}); - auto conv1 = ngraph::builder::makeConvolution(params.front(), - ngPrc, - {1, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 4); - conv1->set_friendly_name("Conv_1"); - conv1->output(0).get_tensor().set_names({"conv"}); - std::vector stride{1, 1}, padB{0, 0}, padE = padB, kernel{1, 2}; - auto pool1 = std::make_shared(conv1, - stride, - padB, - padE, - kernel, - ov::op::RoundingType::FLOOR, - ov::op::PadType::EXPLICIT); - pool1->output(0).get_tensor().set_names({"pool"}); - pool1->set_friendly_name("Pool_1"); - auto relu1 = std::make_shared(pool1); - relu1->set_friendly_name("Relu_1"); - relu1->output(0).get_tensor().set_names({"relu"}); - ov::Shape reluShape = relu1->outputs()[0].get_tensor().get_shape(); - ov::ResultVector results{std::make_shared(relu1)}; - std::shared_ptr fnPtr = std::make_shared(results, params); - return fnPtr; -} - std::shared_ptr makeConvPool2Relu2(std::vector inputShape, ov::element::Type_t ngPrc) { ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; params.front()->set_friendly_name("Param_1"); @@ -297,1058 +263,6 @@ std::shared_ptr makeKSOFunction(std::vector inputShape, ov::e return fnPtr; } -std::shared_ptr makeSplitMultiConvConcat(std::vector inputShape, ov::element::Type_t ngPrc) { - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - params.front()->set_friendly_name("Param_1"); - params.front()->get_output_tensor(0).set_names({"input_tensor"}); - auto split_axis_op = - std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); - auto split = std::make_shared(params[0], split_axis_op, 2); - - auto conv1_0 = ngraph::builder::makeConvolution(split->output(0), - ngPrc, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto relu1_0 = std::make_shared(conv1_0); - auto conv1_1 = ngraph::builder::makeConvolution(relu1_0, - ngPrc, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto relu1_1 = std::make_shared(conv1_1); - auto conv1_2 = ngraph::builder::makeConvolution(relu1_1, - ngPrc, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto relu1_2 = std::make_shared(conv1_2); - auto conv1_3 = ngraph::builder::makeConvolution(relu1_2, - ngPrc, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto relu1_3 = std::make_shared(conv1_3); - auto conv1_4 = ngraph::builder::makeConvolution(relu1_2, - ngPrc, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto relu1_4 = std::make_shared(conv1_4); - - auto conv2_0 = ngraph::builder::makeConvolution(split->output(1), - ngPrc, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto relu2_0 = std::make_shared(conv2_0); - auto conv2_1 = ngraph::builder::makeConvolution(relu2_0, - ngPrc, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto relu2_1 = std::make_shared(conv2_1); - auto conv2_2 = ngraph::builder::makeConvolution(relu2_1, - ngPrc, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto relu2_2 = std::make_shared(conv2_2); - auto conv2_3 = ngraph::builder::makeConvolution(relu2_2, - ngPrc, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto relu2_3 = std::make_shared(conv2_3); - auto conv2_4 = ngraph::builder::makeConvolution(relu2_2, - ngPrc, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto relu2_4 = std::make_shared(conv2_4); - - auto concat = std::make_shared(ov::OutputVector{relu1_4->output(0), relu2_4->output(0)}, 1); - ov::ResultVector results{std::make_shared(concat)}; - std::shared_ptr fnPtr = std::make_shared(results, params); - fnPtr->set_friendly_name("SplitMultiConvConcat"); - return fnPtr; -} - -std::shared_ptr makeTIwithLSTMcell(ov::element::Type_t ngPRC, size_t N, size_t L, size_t I, size_t H) { - auto SENT = std::make_shared(ngPRC, ov::Shape{N, L, I}); - - auto H_init = std::make_shared(ngPRC, ov::Shape{N, 1, H}); - auto C_init = std::make_shared(ngPRC, ov::Shape{N, 1, H}); - - auto H_t = std::make_shared(ngPRC, ov::Shape{N, 1, H}); - auto C_t = std::make_shared(ngPRC, ov::Shape{N, 1, H}); - - // Body - auto X = std::make_shared(ngPRC, ov::Shape{N, 1, I}); - std::vector dataW(4 * H * I, 0); - auto W_body = std::make_shared(ngPRC, ov::Shape{4 * H, I}, dataW); - std::vector dataR(4 * H * H, 0); - auto R_body = std::make_shared(ngPRC, ov::Shape{4 * H, H}, dataR); - std::vector inShape = {N, H}; - auto constantH = std::make_shared(ov::element::i64, ov::Shape{2}, inShape); - inShape = {N, I}; - auto constantX = std::make_shared(ov::element::i64, ov::Shape{2}, inShape); - auto LSTM_cell = - std::make_shared(std::make_shared(X, constantX, false), - std::make_shared(H_t, constantH, false), - std::make_shared(C_t, constantH, false), - W_body, - R_body, - H); - inShape = {N, 1, H}; - auto constantHo = std::make_shared(ov::element::i64, ov::Shape{3}, inShape); - auto H_o = std::make_shared(LSTM_cell->output(0), constantHo, false); - auto C_o = std::make_shared(LSTM_cell->output(1), constantHo, false); - auto body = std::make_shared(ov::OutputVector{H_o, C_o}, ov::ParameterVector{X, H_t, C_t}); - - auto tensor_iterator = std::make_shared(); - tensor_iterator->set_body(body); - // start=0, stride=1, part_size=1, end=39, axis=1 - tensor_iterator->set_sliced_input(X, SENT, 0, 1, 1, -1, 1); - // H_t is Hinit on the first iteration, Ho after that - tensor_iterator->set_merged_input(H_t, H_init, H_o); - tensor_iterator->set_merged_input(C_t, C_init, C_o); - - // Output 0 is last Ho, result 0 of body - auto out0 = tensor_iterator->get_iter_value(H_o, -1); - // Output 1 is last Co, result 1 of body - auto out1 = tensor_iterator->get_iter_value(C_o, -1); - - auto results = - ov::ResultVector{std::make_shared(out0), std::make_shared(out1)}; - auto fn_ptr = std::make_shared(results, ov::ParameterVector{SENT, H_init, C_init}); - fn_ptr->set_friendly_name("TIwithLSTMcell"); - return fn_ptr; -} - -std::shared_ptr makeSingleConv(std::vector inputShape, ov::element::Type_t type) { - auto param0 = std::make_shared(type, ov::Shape(inputShape)); - - auto conv1 = ngraph::builder::makeConvolution(param0, - type, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 4); - auto result = std::make_shared(conv1); - auto fn_ptr = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param0}); - fn_ptr->set_friendly_name("SingleConv"); - return fn_ptr; -} - -std::shared_ptr makeDetectionOutput(ov::element::Type_t type) { - const auto& data = std::make_shared(type, ov::Shape{1, 4, 10, 10}); - - const auto& constant_0 = std::make_shared(type, ov::Shape{1, 1, 1, 1}); - const auto& mul_0 = std::make_shared(data, constant_0); - - const auto& filters = std::make_shared(type, ov::Shape{1, 4, 1, 1}); - const auto& conv = std::make_shared(mul_0, - filters, - ov::Strides{1, 1}, - ov::CoordinateDiff{0, 0}, - ov::CoordinateDiff{0, 0}, - ov::Strides{1, 1}); - - const auto& box_logits_reshape = - std::make_shared(ov::element::i64, ov::Shape{2}, std::vector{0, -1}); - const auto& box_logits = std::make_shared(conv, box_logits_reshape, true); - - const auto& four_times = std::make_shared( - box_logits, - std::make_shared(ov::element::i64, ov::Shape{2}, std::vector{1, 4})); - - const auto& third_input_reshape = - std::make_shared(ov::element::i64, ov::Shape{3}, std::vector{0, 1, -1}); - const auto& third_input = std::make_shared(four_times, third_input_reshape, true); - - ov::op::v0::DetectionOutput::Attributes attr; - attr.num_classes = 4; - attr.background_label_id = 0; - attr.top_k = 75; - attr.variance_encoded_in_target = true; - attr.keep_top_k = {50}; - attr.code_type = std::string{"caffe.PriorBoxParameter.CORNER"}; - attr.share_location = true; - attr.nms_threshold = 0.5f; - attr.confidence_threshold = 0.5f; - attr.clip_after_nms = false; - attr.clip_before_nms = false; - attr.decrease_label_id = false; - attr.normalized = true; - attr.input_height = 1; - attr.input_width = 1; - attr.objectness_score = 0.4f; - const auto& detection = std::make_shared(four_times, four_times, third_input, attr); - const auto& convert = std::make_shared(detection, type); - - return std::make_shared(ov::NodeVector{convert}, ov::ParameterVector{data}, "SplitableDetectionOutput"); -} - -std::shared_ptr makeMultiSingleConv(std::vector inputShape, ov::element::Type type) { - auto param0 = std::make_shared(type, ov::Shape(inputShape)); - auto conv1 = ngraph::builder::makeConvolution(param0, - type, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto conv2 = ngraph::builder::makeConvolution(conv1, - type, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto conv3 = ngraph::builder::makeConvolution(conv2, - type, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto conv4 = ngraph::builder::makeConvolution(conv3, - type, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto conv5 = ngraph::builder::makeConvolution(conv4, - type, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto conv6 = ngraph::builder::makeConvolution(conv5, - type, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto conv7 = ngraph::builder::makeConvolution(conv6, - type, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto conv8 = ngraph::builder::makeConvolution(conv7, - type, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto conv9 = ngraph::builder::makeConvolution(conv8, - type, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto conv10 = ngraph::builder::makeConvolution(conv9, - type, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto result = std::make_shared(conv10); - auto fn_ptr = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param0}); - fn_ptr->set_friendly_name("MultiSingleConv"); - return fn_ptr; -} - -std::shared_ptr make2InputSubtract(std::vector inputShape, ov::element::Type_t type) { - auto param0 = std::make_shared(type, ov::Shape(inputShape)); - auto param1 = std::make_shared(type, ov::Shape(inputShape)); - auto subtract = std::make_shared(param0, param1); - auto result = std::make_shared(subtract); - auto fn_ptr = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param0, param1}); - fn_ptr->set_friendly_name("TwoInputSubtract"); - return fn_ptr; -} - -std::shared_ptr makeNestedBranchConvConcat(std::vector inputShape, ov::element::Type ngPrc) { - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto relu0 = std::make_shared(params[0]); - - auto conv1 = ngraph::builder::makeConvolution(relu0, - ngPrc, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto relu1 = std::make_shared(conv1); - - auto conv2 = ngraph::builder::makeConvolution(relu0, - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 10); - auto relu2 = std::make_shared(conv2); - - auto conv3 = ngraph::builder::makeConvolution(relu2, - ngPrc, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto relu3 = std::make_shared(conv3); - - auto conv4 = ngraph::builder::makeConvolution(relu2, - ngPrc, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto relu4 = std::make_shared(conv4); - - auto concat = std::make_shared(ov::OutputVector{relu3->output(0), relu4->output(0)}, 1); - - auto concat1 = std::make_shared(ov::OutputVector{relu1->output(0), concat}, 1); - ov::ResultVector results{std::make_shared(concat1)}; - std::shared_ptr fnPtr = std::make_shared(results, params); - fnPtr->set_friendly_name("NestedBranchConvConcat"); - return fnPtr; -} - -std::shared_ptr makeNestedSplitConvConcat(std::vector inputShape, ov::element::Type ngPrc) { - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto split_axis_op = - std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); - auto split = std::make_shared(params[0], split_axis_op, 2); - - auto conv1 = ngraph::builder::makeConvolution(split->output(0), - ngPrc, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto relu1 = std::make_shared(conv1); - - auto conv2 = ngraph::builder::makeConvolution(split->output(1), - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 10); - auto relu2 = std::make_shared(conv2); - - auto split2_axis_op = - std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); - auto split2 = std::make_shared(relu2, split2_axis_op, 2); - - auto conv3 = ngraph::builder::makeConvolution(split2->output(0), - ngPrc, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto relu3 = std::make_shared(conv3); - - auto conv4 = ngraph::builder::makeConvolution(split2->output(1), - ngPrc, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto relu4 = std::make_shared(conv4); - - auto concat = std::make_shared(ov::OutputVector{relu3->output(0), relu4->output(0)}, 1); - - auto concat1 = std::make_shared(ov::OutputVector{relu1->output(0), concat}, 1); - ov::ResultVector results{std::make_shared(concat1)}; - std::shared_ptr fnPtr = std::make_shared(results, params); - fnPtr->set_friendly_name("NestedSplitConvConcat"); - return fnPtr; -} - -std::shared_ptr makeSplitConvConcatInputInBranch(std::vector inputShape, ov::element::Type ngPrc) { - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape)), - std::make_shared(ngPrc, ov::Shape(inputShape))}; - auto split_axis_op = - std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); - auto split = std::make_shared(params[0], split_axis_op, 2); - - auto conv1 = ngraph::builder::makeConvolution(split->output(0), - ngPrc, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto relu1 = std::make_shared(conv1); - - auto conv2 = ngraph::builder::makeConvolution(split->output(1), - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto relu2 = std::make_shared(conv2); - - auto conv4 = ngraph::builder::makeConvolution(params[1]->output(0), - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto relu4 = std::make_shared(conv4); - - auto concat = std::make_shared(ov::OutputVector{relu4->output(0), relu2->output(0)}, 1); - - auto conv3 = ngraph::builder::makeConvolution(concat, - ngPrc, - {3, 3}, - {1, 1}, - {0, 0}, - {0, 0}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - auto relu3 = std::make_shared(conv3); - - auto concat1 = std::make_shared(ov::OutputVector{relu1->output(0), relu3->output(0)}, 1); - ov::ResultVector results{std::make_shared(concat1)}; - std::shared_ptr fnPtr = std::make_shared(results, params); - fnPtr->set_friendly_name("SplitConvConcatInputInBranch"); - return fnPtr; -} - -std::shared_ptr makeSplitConvConcatNestedInBranch(std::vector inputShape, ov::element::Type ngPrc) { - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape)), - std::make_shared(ngPrc, ov::Shape(inputShape))}; - int localId = 0; -#define SET_NAME(node) node->set_friendly_name(#node + std::to_string(localId++)); - auto split_axis_op = - std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); - auto split = std::make_shared(params[0], split_axis_op, 2); - - SET_NAME(split); - - auto conv1 = ngraph::builder::makeConvolution(split->output(0), - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - SET_NAME(conv1); - auto relu1 = std::make_shared(conv1); - SET_NAME(relu1); - - auto conv2 = ngraph::builder::makeConvolution(split->output(1), - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - SET_NAME(conv2); - auto relu2 = std::make_shared(conv2); - SET_NAME(relu2); - - auto nestedSubgraph = [&] { - auto split_axis_op = - std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); - auto split = std::make_shared(params[1], split_axis_op, 2); - - SET_NAME(split); - - auto conv1 = ngraph::builder::makeConvolution(split->output(0), - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - SET_NAME(conv1); - auto relu1 = std::make_shared(conv1); - SET_NAME(relu1); - - auto conv2 = ngraph::builder::makeConvolution(split->output(1), - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 10); - SET_NAME(conv2); - auto relu2 = std::make_shared(conv2); - SET_NAME(relu2); - - auto split2_axis_op = - std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); - auto split2 = std::make_shared(relu2, split2_axis_op, 2); - - SET_NAME(split2); - - auto conv3 = ngraph::builder::makeConvolution(split2->output(0), - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - SET_NAME(conv3); - auto relu3 = std::make_shared(conv3); - SET_NAME(relu3); - - auto conv4 = ngraph::builder::makeConvolution(split2->output(1), - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - SET_NAME(conv4); - auto relu4 = std::make_shared(conv4); - SET_NAME(relu4); - - auto concat = std::make_shared(ov::OutputVector{relu3->output(0), relu4->output(0)}, 1); - SET_NAME(concat); - - auto concat1 = std::make_shared(ov::OutputVector{relu1->output(0), concat}, 1); - SET_NAME(concat1); - - auto conv5 = ngraph::builder::makeConvolution(concat1, - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - SET_NAME(conv5); - auto relu5 = std::make_shared(conv5); - SET_NAME(relu5); - - return relu5; - }(); - auto concat = - std::make_shared(ov::OutputVector{nestedSubgraph->output(0), relu2->output(0)}, 1); - SET_NAME(concat); - - auto conv3 = ngraph::builder::makeConvolution(concat, - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - SET_NAME(conv3); - auto relu3 = std::make_shared(conv3); - SET_NAME(relu3); - - auto concat1 = std::make_shared(ov::OutputVector{relu1->output(0), relu3->output(0)}, 1); - SET_NAME(concat1); - ov::ResultVector results{std::make_shared(concat1)}; - std::shared_ptr fnPtr = std::make_shared(results, params); - fnPtr->set_friendly_name("SplitConvConcatNestedInBranch"); - return fnPtr; -} - -std::shared_ptr makeSplitConvConcatNestedInBranchNestedOut(std::vector inputShape, - ov::element::Type ngPrc) { - ov::ParameterVector params{std::make_shared(ngPrc, ov::Shape(inputShape)), - std::make_shared(ngPrc, ov::Shape(inputShape))}; - int localId = 0; -#define SET_NAME(node) node->set_friendly_name(#node + std::to_string(localId++)); - auto split_axis_op = - std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); - auto split = std::make_shared(params[0], split_axis_op, 2); - - SET_NAME(split); - - auto conv1 = ngraph::builder::makeConvolution(split->output(0), - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - SET_NAME(conv1); - auto relu1 = std::make_shared(conv1); - SET_NAME(relu1); - - auto conv2 = ngraph::builder::makeConvolution(split->output(1), - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 10); - SET_NAME(conv2); - auto relu2 = std::make_shared(conv2); - SET_NAME(relu2); - - auto split3_axis_op = - std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); - auto split3 = std::make_shared(relu2, split3_axis_op, 2); - SET_NAME(split3); - - auto conv32 = ngraph::builder::makeConvolution(split3->output(1), - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 10); - SET_NAME(conv32); - auto relu32 = std::make_shared(conv32); - SET_NAME(relu32); - - auto nestedSubgraph = [&] { - auto split_axis_op = - std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); - auto split = std::make_shared(params[1], split_axis_op, 2); - SET_NAME(split); - - auto conv1 = ngraph::builder::makeConvolution(split->output(0), - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - SET_NAME(conv1); - auto relu1 = std::make_shared(conv1); - SET_NAME(relu1); - - auto conv2 = ngraph::builder::makeConvolution(split->output(1), - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 10); - SET_NAME(conv2); - auto relu2 = std::make_shared(conv2); - SET_NAME(relu2); - - auto split2_axis_op = - std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); - auto split2 = std::make_shared(relu2, split2_axis_op, 2); - SET_NAME(split2); - - auto conv3 = ngraph::builder::makeConvolution(split2->output(0), - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - SET_NAME(conv3); - auto relu3 = std::make_shared(conv3); - SET_NAME(relu3); - - auto conv4 = ngraph::builder::makeConvolution(split2->output(1), - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - SET_NAME(conv4); - auto relu4 = std::make_shared(conv4); - SET_NAME(relu4); - - auto concat = std::make_shared(ov::OutputVector{relu3->output(0), relu4->output(0)}, 1); - SET_NAME(concat); - - auto concat1 = std::make_shared(ov::OutputVector{relu1->output(0), concat}, 1); - SET_NAME(concat1); - - auto conv5 = ngraph::builder::makeConvolution(concat1, - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - SET_NAME(conv5); - auto relu5 = std::make_shared(conv5); - SET_NAME(relu5); - - return relu5; - }(); - - auto nestedSubgraph1 = [&] { - auto split_axis_op = - std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); - auto split = std::make_shared(relu32, split_axis_op, 2); - SET_NAME(split); - - auto conv1 = ngraph::builder::makeConvolution(split->output(0), - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - SET_NAME(conv1); - auto relu1 = std::make_shared(conv1); - SET_NAME(relu1); - - auto conv2 = ngraph::builder::makeConvolution(split->output(1), - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 10); - SET_NAME(conv2); - auto relu2 = std::make_shared(conv2); - SET_NAME(relu2); - - auto split2_axis_op = - std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); - auto split2 = std::make_shared(relu2, split2_axis_op, 2); - SET_NAME(split2); - - auto conv3 = ngraph::builder::makeConvolution(split2->output(0), - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - SET_NAME(conv3); - auto relu3 = std::make_shared(conv3); - SET_NAME(relu3); - - auto conv4 = ngraph::builder::makeConvolution(split2->output(1), - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - SET_NAME(conv4); - auto relu4 = std::make_shared(conv4); - SET_NAME(relu4); - - auto concat = std::make_shared(ov::OutputVector{relu3->output(0), relu4->output(0)}, 1); - SET_NAME(concat); - - auto concat1 = std::make_shared(ov::OutputVector{relu1->output(0), concat}, 1); - SET_NAME(concat1); - - auto conv5 = ngraph::builder::makeConvolution(concat1, - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - SET_NAME(conv5); - auto relu5 = std::make_shared(conv5); - SET_NAME(relu5); - - return relu5; - }(); - - auto concat = - std::make_shared(ov::OutputVector{nestedSubgraph->output(0), split3->output(0)}, 1); - SET_NAME(concat); - - auto conv3 = ngraph::builder::makeConvolution(concat, - ngPrc, - {3, 3}, - {1, 1}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 5); - SET_NAME(conv3); - auto relu3 = std::make_shared(conv3); - SET_NAME(relu3); - - auto concat1 = std::make_shared(ov::OutputVector{relu1->output(0), relu3->output(0)}, 1); - SET_NAME(concat1); - ov::ResultVector results{std::make_shared(concat1), - std::make_shared(nestedSubgraph1)}; - std::shared_ptr fnPtr = std::make_shared(results, params); - fnPtr->set_friendly_name("SplitConvConcatNestedInBranchNestedOut"); - return fnPtr; -} - -std::shared_ptr makeConvBias(std::vector inputShape, ov::element::Type type) { - ov::ParameterVector parameter{std::make_shared(type, ov::Shape(inputShape))}; - parameter[0]->set_friendly_name("parameter"); - auto weights = ov::op::v0::Constant::create(type, ov::Shape{6, 3, 1, 1}, {1}); - auto biases = ov::op::v0::Constant::create(type, ov::Shape{6, 1, 1}, {1}); - auto conv = std::make_shared(parameter[0], - weights, - ov::Strides{1, 1}, - ov::CoordinateDiff{0, 0}, - ov::CoordinateDiff{0, 0}, - ov::Strides{1, 1}); - conv->set_friendly_name("conv"); - auto add = std::make_shared(conv, biases); - add->set_friendly_name("add"); - auto result = std::make_shared(add); - result->set_friendly_name("result"); - std::shared_ptr fn_ptr = - std::make_shared(ov::ResultVector{result}, ov::ParameterVector{parameter}); - fn_ptr->set_friendly_name("ConvBias"); - return fn_ptr; -} - -std::shared_ptr makeReadConcatSplitAssign(std::vector inputShape, ov::element::Type type) { - ov::ParameterVector parameter{std::make_shared(type, ov::Shape(inputShape))}; - parameter[0]->set_friendly_name("parameter"); - auto init_const = ov::op::v0::Constant::create(type, inputShape, {0}); - auto read = std::make_shared(init_const, "v0"); - read->set_friendly_name("read"); - std::vector> args = {parameter[0], read}; - auto conc = std::make_shared(args, 3); - conc->set_friendly_name("concat"); - auto res = std::make_shared(conc); - res->set_friendly_name("result"); - const auto axis = ov::op::v0::Constant::create(element::i64, Shape{}, {3}); - axis->set_friendly_name("axis"); - auto crop = std::make_shared(conc, axis, 2); - crop->set_friendly_name("split"); - auto assign = std::make_shared(crop, "v0"); - assign->set_friendly_name("assign"); - std::shared_ptr fn_ptr = - std::make_shared(ov::ResultVector({res}), ov::SinkVector({assign}), ov::ParameterVector{parameter}); - fn_ptr->set_friendly_name("ReadConcatSplitAssign"); - return fn_ptr; -} - -std::shared_ptr makeMatMulBias(std::vector inputShape, ov::element::Type type) { - ov::ParameterVector parameter{std::make_shared(type, ov::Shape(inputShape))}; - parameter[0]->set_friendly_name("parameter"); - auto weights = ov::op::v0::Constant::create(type, ov::Shape{24, 24}, {1}); - auto biases = ov::op::v0::Constant::create(type, ov::Shape{1, 24}, {1}); - auto matmul = std::make_shared(parameter[0], weights); - matmul->set_friendly_name("matmul"); - auto add = std::make_shared(matmul, biases); - add->set_friendly_name("add"); - auto result = std::make_shared(add); - result->set_friendly_name("result"); - std::shared_ptr fn_ptr = - std::make_shared(ov::ResultVector{result}, ov::ParameterVector{parameter}); - fn_ptr->set_friendly_name("MatMulBias"); - return fn_ptr; -} - -std::shared_ptr makeConvertTranspose(std::vector inputShape, - std::vector inputOrder, - ov::element::Type type) { - ov::ParameterVector params{std::make_shared(type, ov::Shape(inputShape))}; - params.front()->set_friendly_name("Param_1"); - params.front()->output(0).get_tensor().set_names({"data"}); - const auto order = ov::op::v0::Constant::create(element::i32, {inputOrder.size()}, inputOrder); - - auto convert = std::make_shared(params.front(), type); - convert->set_friendly_name("convert"); - auto transpose = std::make_shared(convert, order); - transpose->set_friendly_name("transpose"); - auto result = std::make_shared(transpose); - result->set_friendly_name("result"); - - std::shared_ptr fn_ptr = - std::make_shared(ov::ResultVector{result}, ov::ParameterVector{params}); - fn_ptr->set_friendly_name("ConvertTranspose"); - return fn_ptr; -} - -std::shared_ptr makeMultipleInputOutputReLU(std::vector inputShape, ov::element::Type_t type) { - auto param1 = std::make_shared(type, ov::Shape(inputShape)); - param1->set_friendly_name("param1"); - param1->output(0).get_tensor().set_names({"data1"}); - auto param2 = std::make_shared(type, ov::Shape(inputShape)); - param2->set_friendly_name("param2"); - param2->output(0).get_tensor().set_names({"data2"}); - auto relu = std::make_shared(param1); - relu->set_friendly_name("relu_op"); - relu->output(0).get_tensor().set_names({"relu"}); - auto result1 = std::make_shared(relu); - result1->set_friendly_name("result1"); - auto concat = std::make_shared(OutputVector{relu, param2}, 1); - concat->set_friendly_name("concat_op"); - concat->output(0).get_tensor().set_names({"concat"}); - auto result2 = std::make_shared(concat); - result2->set_friendly_name("result2"); - auto fn_ptr = std::make_shared(ov::ResultVector{result1, result2}, ov::ParameterVector{param1, param2}); - fn_ptr->set_friendly_name("MultipleInputOutputReLU"); - return fn_ptr; -} - -std::shared_ptr makeMultipleInputOutputDoubleConcat(std::vector inputShape, - ov::element::Type_t type) { - auto param1 = std::make_shared(type, ov::Shape{inputShape}); - param1->set_friendly_name("param1"); - param1->output(0).get_tensor().set_names({"data1"}); - auto param2 = std::make_shared(type, ov::Shape(inputShape)); - param2->set_friendly_name("param2"); - param2->output(0).get_tensor().set_names({"data2"}); - auto concat1 = std::make_shared(OutputVector{param1, param2}, 1); - concat1->set_friendly_name("concat_op1"); - concat1->output(0).get_tensor().set_names({"concat1"}); - auto result1 = std::make_shared(concat1); - result1->set_friendly_name("result1"); - auto concat2 = std::make_shared(OutputVector{concat1, param2}, 1); - concat2->set_friendly_name("concat_op2"); - concat2->output(0).get_tensor().set_names({"concat2"}); - auto result2 = std::make_shared(concat2); - result2->set_friendly_name("result2"); - auto fn_ptr = std::make_shared(ov::ResultVector{result1, result2}, ov::ParameterVector{param1, param2}); - fn_ptr->set_friendly_name("makeMultipleInputOutputDoubleConcat"); - return fn_ptr; -} - std::shared_ptr makeSingleConcatWithConstant(std::vector inputShape, ov::element::Type type) { ov::ParameterVector parameter{std::make_shared(type, ov::Shape(inputShape))}; parameter[0]->set_friendly_name("Param_1"); @@ -1383,45 +297,6 @@ std::shared_ptr makeConcatWithParams(std::vector inputShape, fn_ptr->set_friendly_name("SingleConcatWithParams"); return fn_ptr; } - -std::shared_ptr makeSingleSplit(std::vector inputShape, ov::element::Type_t type) { - auto param1 = std::make_shared(type, ov::Shape{inputShape}); - param1->set_friendly_name("param1"); - param1->output(0).get_tensor().set_names({"data1"}); - auto axis_node = ov::op::v0::Constant::create(element::i64, Shape{}, {1}); - auto split = std::make_shared(param1, axis_node, 2); - split->set_friendly_name("split"); - split->output(0).get_tensor().set_names({"tensor_split_1"}); - split->output(1).get_tensor().set_names({"tensor_split_2"}); - auto result1 = std::make_shared(split->output(0)); - result1->set_friendly_name("result1"); - auto result2 = std::make_shared(split->output(1)); - result2->set_friendly_name("result2"); - auto fn_ptr = std::make_shared(ov::ResultVector{result1, result2}, ov::ParameterVector{param1}); - fn_ptr->set_friendly_name("SingleSplit"); - return fn_ptr; -} - -std::shared_ptr makeSplitConcat(std::vector inputShape, ov::element::Type_t type) { - auto param1 = std::make_shared(type, ov::Shape{inputShape}); - param1->set_friendly_name("Param1"); - param1->output(0).get_tensor().set_names({"data1"}); - auto axis_node = ov::op::v0::Constant::create(element::i64, Shape{}, {1}); - auto split = std::make_shared(param1, axis_node, 2); - split->set_friendly_name("Split"); - split->output(0).get_tensor().set_names({"tensor_split_1"}); - split->output(1).get_tensor().set_names({"tensor_split_2"}); - - auto concat = std::make_shared(OutputVector{split->output(0), split->output(1)}, 1); - concat->set_friendly_name("Concat_op"); - concat->output(0).get_tensor().set_names({"Concat"}); - auto result = std::make_shared(concat); - result->set_friendly_name("Result"); - auto fn_ptr = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param1}); - fn_ptr->set_friendly_name("SplitConcat"); - return fn_ptr; -} - } // namespace subgraph } // namespace builder } // namespace ngraph diff --git a/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation.cpp b/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation.cpp index b842c56da5966e..be736a1c1fd66c 100644 --- a/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation.cpp @@ -4,7 +4,7 @@ #include "precision_propagation.hpp" #include -#include +#include "openvino/opsets/opset1.hpp" namespace ov { namespace test { diff --git a/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation_convertion.cpp b/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation_convertion.cpp index e73194071cf4e6..1d3eb20a4791c3 100644 --- a/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation_convertion.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation_convertion.cpp @@ -4,7 +4,7 @@ #include "precision_propagation_convertion.hpp" #include -#include +#include "openvino/opsets/opset1.hpp" namespace ov { namespace test { diff --git a/src/tests/ov_helpers/ov_snippets_models/src/precomp.hpp b/src/tests/ov_helpers/ov_snippets_models/src/precomp.hpp index 6e9e85edfaf0f2..4e526c519954a2 100644 --- a/src/tests/ov_helpers/ov_snippets_models/src/precomp.hpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/precomp.hpp @@ -4,8 +4,8 @@ #pragma once -#include -#include +#include "openvino/openvino.hpp" +#include "openvino/op/ops.hpp" #include #include diff --git a/src/tests/ov_helpers/ov_snippets_models/src/subgraph_lowered.cpp b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_lowered.cpp index 6dcf8621df3396..78f61752e66c81 100644 --- a/src/tests/ov_helpers/ov_snippets_models/src/subgraph_lowered.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_lowered.cpp @@ -15,7 +15,7 @@ std::shared_ptr AddFunctionLoweredBroadcast::initLowered() const { auto data0 = std::make_shared(precision, input_shapes[0]); std::shared_ptr add_input0 = nullptr; if (!broadcast_shapes[0].empty() && broadcast_shapes[0].back() != input_shapes[0].rbegin()->get_length()) { - add_input0 = std::make_shared(data0, broadcast_shapes[0]); + add_input0 = std::make_shared(data0, *broadcast_shapes[0].rbegin()); } else { add_input0 = std::make_shared(data0); } @@ -23,7 +23,7 @@ std::shared_ptr AddFunctionLoweredBroadcast::initLowered() const { auto data1 = std::make_shared(precision, input_shapes[1]); std::shared_ptr add_input1 = nullptr; if (!broadcast_shapes[1].empty() && broadcast_shapes[1].back() != input_shapes[1].rbegin()->get_length()) { - add_input1 = std::make_shared(data1, broadcast_shapes[1]); + add_input1 = std::make_shared(data1, *broadcast_shapes[1].rbegin()); } else { add_input1 = std::make_shared(data1); } @@ -45,13 +45,13 @@ std::shared_ptr EltwiseThreeInputsLoweredFunction::initLowered() cons } else { // The last dim is processed by vector Tile, so BroadcastLoad is required if the last dim being broadcasted if (input_shapes[i].rbegin()->get_length() == 1 && broadcast_shapes[i].back() != 1) { - return std::make_shared(input_params[i], broadcast_shapes[i]); + return std::make_shared(input_params[i], *broadcast_shapes[i].rbegin()); // Todo: Cover this logics with functional tests, Review FakeBroadcast Emitter // Broadcasting of other dims is handled by BroadcastMove. Strictly speaking, broadcasting is achieved via // appropriate pointer arithmetics in this case. } else { auto load = std::make_shared(input_params[i]); - return std::make_shared(load, broadcast_shapes[i]); + return std::make_shared(load, *broadcast_shapes[i].rbegin()); } } }; @@ -66,7 +66,7 @@ std::shared_ptr EltwiseThreeInputsLoweredFunction::initLowered() cons if (broadcast_shapes[2].empty()) sub_out = sub; else - sub_out = std::make_shared(sub, broadcast_shapes[2]); + sub_out = std::make_shared(sub, *broadcast_shapes[2].rbegin()); auto mul = std::make_shared(add, sub_out); auto store = std::make_shared(mul); return std::make_shared(NodeVector{store}, input_params); @@ -119,9 +119,7 @@ std::shared_ptr BroadcastAddLoweredFunction::initLowered() const { ov::NodeVector loads(datas.size(), nullptr); for (auto i = 0; i < datas.size(); i++) { if (input_shapes[i].get_shape().back() != last_dim) { - auto new_shape = input_shapes[i]; - new_shape[new_shape.size() - 1] = last_dim; - loads[i] = std::make_shared(datas[i], new_shape); + loads[i] = std::make_shared(datas[i], ov::Dimension(last_dim)); } else { loads[i] = std::make_shared(datas[i]); } diff --git a/src/tests/ov_helpers/ov_snippets_models/src/subgraph_roll_matmul_roll.cpp b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_roll_matmul_roll.cpp index 6953fdb5adc25a..89562cb6c14457 100644 --- a/src/tests/ov_helpers/ov_snippets_models/src/subgraph_roll_matmul_roll.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_roll_matmul_roll.cpp @@ -4,7 +4,7 @@ #include "subgraph_roll_matmul_roll.hpp" #include -#include +#include "openvino/opsets/opset1.hpp" namespace ov { namespace test { diff --git a/src/tests/ov_helpers/ov_snippets_models/src/two_binary_ops.cpp b/src/tests/ov_helpers/ov_snippets_models/src/two_binary_ops.cpp index 3cf17045bc1b50..942a4a5e3b1dc8 100644 --- a/src/tests/ov_helpers/ov_snippets_models/src/two_binary_ops.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/two_binary_ops.cpp @@ -3,7 +3,7 @@ // #include "two_binary_ops.hpp" -#include +#include "openvino/opsets/opset1.hpp" #include "snippets/op/convert_saturation.hpp" namespace ov { diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/activation.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/activation.hpp index 5a6b13d422bd81..61d5c586a55492 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/activation.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/activation.hpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#pragma once #pragma once diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/augru_cell.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/augru_cell.hpp index 4c35345db5178f..a200cb22752d0e 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/augru_cell.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/augru_cell.hpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#pragma once #pragma once diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/binary_convolution.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/binary_convolution.hpp index 9cb3f8d56714cd..82bb0e640db3a6 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/binary_convolution.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/binary_convolution.hpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#pragma once #pragma once diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/comparison.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/comparison.hpp index 24e6abffae1131..da722586d77076 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/comparison.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/comparison.hpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#pragma once #pragma once diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/convolution.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/convolution.hpp index 34d29174bcca7f..1ca630c77a74f8 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/convolution.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/convolution.hpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#pragma once #pragma once diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/convolution_backprop_data.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/convolution_backprop_data.hpp index 4362ea90720fb9..c3e63ac29c0aa9 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/convolution_backprop_data.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/convolution_backprop_data.hpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#pragma once #pragma once diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/dft.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/dft.hpp index c90259bbe2f800..71e5f341da7ee5 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/dft.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/dft.hpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#pragma once #pragma once diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/eltwise.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/eltwise.hpp index 8e491be7144bc4..658c62bea4a329 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/eltwise.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/eltwise.hpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#pragma once #pragma once diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/embedding_bag_offsets_sum.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/embedding_bag_offsets_sum.hpp index 281022fd74fe04..a86c3482575274 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/embedding_bag_offsets_sum.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/embedding_bag_offsets_sum.hpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#pragma once #pragma once diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/embedding_bag_packed_sum.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/embedding_bag_packed_sum.hpp index 05ecb45b95a792..bf95e26ecd9c17 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/embedding_bag_packed_sum.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/embedding_bag_packed_sum.hpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#pragma once #pragma once diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/embedding_segments_sum.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/embedding_segments_sum.hpp index ad36485b0b306e..b91643c9f2a65e 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/embedding_segments_sum.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/embedding_segments_sum.hpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#pragma once #pragma once diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/fully_connected.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/fully_connected.hpp index 59449b6a6d57c3..e6d2c3aed3a610 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/fully_connected.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/fully_connected.hpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#pragma once #pragma once diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/gather_nd.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/gather_nd.hpp index ee4041eac628d9..e24de6bdceaede 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/gather_nd.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/gather_nd.hpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#pragma once #pragma once diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/group_convolution.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/group_convolution.hpp index 5790a7769d7ec3..8b2aeb09d10a2a 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/group_convolution.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/group_convolution.hpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#pragma once #pragma once diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/group_convolution_backprop_data.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/group_convolution_backprop_data.hpp index 190e367b6f2a85..cf71c157ca736c 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/group_convolution_backprop_data.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/group_convolution_backprop_data.hpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#pragma once #pragma once diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/gru_cell.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/gru_cell.hpp index 3e2a06e6e47379..b781c2eb3d198c 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/gru_cell.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/gru_cell.hpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#pragma once #pragma once diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/logical.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/logical.hpp index eb5d6684cb3297..443266e0a8655d 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/logical.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/logical.hpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#pragma once #pragma once diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/lstm_cell.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/lstm_cell.hpp index 058b92388b5bb4..23658f510de1e8 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/lstm_cell.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/lstm_cell.hpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#pragma once #pragma once diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/rdft.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/rdft.hpp index 425b6cb30bab8d..80ad0332ab3551 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/rdft.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/rdft.hpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#pragma once #pragma once diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/reduce.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/reduce.hpp index 06c062460c306a..8472410b9cf2ce 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/reduce.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/reduce.hpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#pragma once #pragma once diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/rnn_cell.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/rnn_cell.hpp index 463a4930f5c27f..65e304a8ee9030 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/rnn_cell.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/rnn_cell.hpp @@ -1,6 +1,7 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // +#pragma once #pragma once diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp index 5bb12e821ad4c4..9b2897bb049b1d 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp @@ -9,9 +9,28 @@ namespace ov { namespace test { namespace utils { +struct InputGenerateData { + double start_from = 0; + uint32_t range = 10; + int32_t resolution = 1; + int32_t seed = 1; + + InputGenerateData(double _start_from = 0, uint32_t _range = 10, int32_t _resolution = 1, int32_t _seed = 1) + : start_from(_start_from), + range(_range), + resolution(_resolution), + seed(_seed){}; +}; + +ov::Tensor create_and_fill_tensor(const ov::element::Type element_type, + const ov::Shape& shape, + const InputGenerateData& inGenData = InputGenerateData(0, 10, 1, 1)); + +// Legacy impl for contrig repo +// todo: remove this after dependent repos clean up ov::Tensor create_and_fill_tensor(const ov::element::Type element_type, const ov::Shape& shape, - const uint32_t range = 10, + const uint32_t range, const double_t start_from = 0, const int32_t resolution = 1, const int seed = 1); diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/2_input_subtract.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/2_input_subtract.hpp new file mode 100644 index 00000000000000..46001065f260ee --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/2_input_subtract.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_2_input_subtract(ov::Shape input_shape = {1, 3, 24, 24}, + ov::element::Type type = ov::element::f32); +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/concat_with_params.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/concat_with_params.hpp new file mode 100644 index 00000000000000..6daf65331e5ef3 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/concat_with_params.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_concat_with_params(ov::Shape input_shape = {1, 1, 32, 32}, + ov::element::Type type = ov::element::f32); +} // namespace utils +} // namespace test +} // namespace ov diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/conv_bias.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/conv_bias.hpp new file mode 100644 index 00000000000000..03d244fc189dff --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/conv_bias.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_conv_bias(ov::Shape input_shape = {1, 3, 24, 24}, + ov::element::Type type = ov::element::f32); +} // namespace utils +} // namespace test +} // namespace ov diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/conv_pool_relu.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/conv_pool_relu.hpp new file mode 100644 index 00000000000000..c869671e20c2e7 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/conv_pool_relu.hpp @@ -0,0 +1,18 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_conv_pool_relu(ov::Shape input_shape = {1, 1, 32, 32}, + ov::element::Type type = ov::element::f32); + +std::shared_ptr make_conv_pool2_relu2(ov::Shape input_shape = {1, 1, 32, 32}, + ov::element::Type type = ov::element::f32); +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/conv_pool_relu_no_reshapes.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/conv_pool_relu_no_reshapes.hpp new file mode 100644 index 00000000000000..4f4d54d2e93af0 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/conv_pool_relu_no_reshapes.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_conv_pool_relu_no_reshapes(ov::Shape input_shape = {1, 1, 32, 32}, + ov::element::Type type = ov::element::f32); +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/conv_pool_relu_non_zero.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/conv_pool_relu_non_zero.hpp new file mode 100644 index 00000000000000..24f8a72525a599 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/conv_pool_relu_non_zero.hpp @@ -0,0 +1,16 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_conv_pool_relu_non_zero(ov::Shape input_shape = {1, 1, 32, 32}, + ov::element::Type type = ov::element::f32); + +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/convert_transpose.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/convert_transpose.hpp new file mode 100644 index 00000000000000..92fdebe5f3e004 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/convert_transpose.hpp @@ -0,0 +1,16 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_convert_transpose(ov::Shape input_shape = {1, 3, 24, 24}, + std::vector input_order = {0, 1, 2, 3}, + ov::element::Type type = ov::element::f32); +} // namespace utils +} // namespace test +} // namespace ov diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/detection_output.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/detection_output.hpp new file mode 100644 index 00000000000000..562c49180cb1be --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/detection_output.hpp @@ -0,0 +1,14 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_detection_output(ov::element::Type type = ov::element::f32); +} // namespace utils +} // namespace test +} // namespace ov diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/kso_func.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/kso_func.hpp new file mode 100644 index 00000000000000..adf4cf47992950 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/kso_func.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_kso_function(ov::Shape input_shape = {1, 4, 20, 20}, + ov::element::Type type = ov::element::f32); +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/matmul_bias.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/matmul_bias.hpp new file mode 100644 index 00000000000000..3844abf9f98fc5 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/matmul_bias.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_matmul_bias(ov::Shape input_shape = {1, 3, 24, 24}, + ov::element::Type type = ov::element::f32); +} // namespace utils +} // namespace test +} // namespace ov diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/multi_single_conv.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/multi_single_conv.hpp new file mode 100644 index 00000000000000..cd399b1e92cb0a --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/multi_single_conv.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_multi_single_conv(ov::Shape input_shape = {1, 3, 24, 24}, + ov::element::Type type = ov::element::f32); +} // namespace utils +} // namespace test +} // namespace ov diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/multiple_input_outpput_double_concat.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/multiple_input_outpput_double_concat.hpp new file mode 100644 index 00000000000000..35f15f0a62bff4 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/multiple_input_outpput_double_concat.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_multiple_input_output_double_concat(ov::Shape input_shape = {1, 1, 32, 32}, + ov::element::Type type = ov::element::f32); +} // namespace utils +} // namespace test +} // namespace ov diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/nested_branch_conv_concat.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/nested_branch_conv_concat.hpp new file mode 100644 index 00000000000000..1472007771a918 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/nested_branch_conv_concat.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_nested_branch_conv_concat(ov::Shape input_shape = {1, 4, 20, 20}, + ov::element::Type type = ov::element::f32); +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/nested_split_conv_concat.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/nested_split_conv_concat.hpp new file mode 100644 index 00000000000000..ac02ce41d24ace --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/nested_split_conv_concat.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_nested_split_conv_concat(ov::Shape input_shape = {1, 4, 20, 20}, + ov::element::Type type = ov::element::f32); +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/read_concat_split_assign.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/read_concat_split_assign.hpp new file mode 100644 index 00000000000000..4c193e365133de --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/read_concat_split_assign.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_read_concat_split_assign(ov::Shape input_shape = {1, 1, 2, 4}, + ov::element::Type type = ov::element::f32); +} // namespace utils +} // namespace test +} // namespace ov diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/single_concat_with_constant.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/single_concat_with_constant.hpp new file mode 100644 index 00000000000000..959c2bb3490699 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/single_concat_with_constant.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_single_concat_with_constant(ov::Shape input_shape = {1, 1, 2, 4}, + ov::element::Type type = ov::element::f32); +} // namespace utils +} // namespace test +} // namespace ov diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/single_conv.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/single_conv.hpp new file mode 100644 index 00000000000000..0fd9472404decc --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/single_conv.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_single_conv(ov::Shape input_shape = {1, 3, 24, 24}, + ov::element::Type type = ov::element::f32); +} // namespace utils +} // namespace test +} // namespace ov diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/single_split.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/single_split.hpp new file mode 100644 index 00000000000000..461620c4eb1443 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/single_split.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_single_split(ov::Shape input_shape = {1, 4, 32, 32}, + ov::element::Type type = ov::element::f32); +} // namespace utils +} // namespace test +} // namespace ov diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/split_concat.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/split_concat.hpp new file mode 100644 index 00000000000000..cbd3ff84bbd815 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/split_concat.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_split_concat(ov::Shape input_shape = {1, 4, 24, 24}, + ov::element::Type type = ov::element::f32); +} // namespace utils +} // namespace test +} // namespace ov diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/split_conv_concat.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/split_conv_concat.hpp new file mode 100644 index 00000000000000..ba2c28a4c947de --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/split_conv_concat.hpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_split_conv_concat(ov::Shape input_shape = {1, 4, 20, 20}, + ov::element::Type type = ov::element::f32); + +std::shared_ptr make_cplit_conv_concat_input_in_branch(ov::Shape input_shape = {1, 4, 20, 20}, + ov::element::Type type = ov::element::f32); + +std::shared_ptr make_cplit_conv_concat_nested_in_branch(ov::Shape input_shape = {1, 4, 20, 20}, + ov::element::Type type = ov::element::f32); + +std::shared_ptr make_cplit_conv_concat_nested_in_branch_nested_out( + ov::Shape input_shape = {1, 4, 20, 20}, + ov::element::Type type = ov::element::f32); +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/split_multi_conv_concat.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/split_multi_conv_concat.hpp new file mode 100644 index 00000000000000..31446b3cf64b02 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/split_multi_conv_concat.hpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_split_multi_conv_concat(ov::Shape input_shape = {1, 4, 20, 20}, + ov::element::Type type = ov::element::f32); +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/ti_with_lstm_cell.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/ti_with_lstm_cell.hpp new file mode 100644 index 00000000000000..43d9806fccd7ae --- /dev/null +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/subgraph_builders/ti_with_lstm_cell.hpp @@ -0,0 +1,18 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include "openvino/core/model.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_ti_with_lstm_cell(ov::element::Type type = ov::element::f32, + size_t N = 32, // Batch size + size_t L = 10, // Sequence length + size_t I = 8, // Input size + size_t H = 32); // Hidden size +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/node_builders/augru_cell.cpp b/src/tests/test_utils/common_test_utils/src/node_builders/augru_cell.cpp index 41bfa327109461..0a55d743d08b9d 100644 --- a/src/tests/test_utils/common_test_utils/src/node_builders/augru_cell.cpp +++ b/src/tests/test_utils/common_test_utils/src/node_builders/augru_cell.cpp @@ -54,11 +54,11 @@ std::shared_ptr make_augru(const OutputVector& in, } case ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST: case ov::test::utils::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST: { + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = in[0].get_shape()[1]; auto seq_lengths_tensor = - ov::test::utils::create_and_fill_tensor(ov::element::i64, - constants[3], - static_cast(in[0].get_shape()[1]), - 0); + ov::test::utils::create_and_fill_tensor(ov::element::i64, constants[3], in_data); seq_lengths = std::make_shared(seq_lengths_tensor); } case ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_PARAM: diff --git a/src/tests/test_utils/common_test_utils/src/node_builders/convolution.cpp b/src/tests/test_utils/common_test_utils/src/node_builders/convolution.cpp index 29592d1e053968..e760bef588be51 100644 --- a/src/tests/test_utils/common_test_utils/src/node_builders/convolution.cpp +++ b/src/tests/test_utils/common_test_utils/src/node_builders/convolution.cpp @@ -32,7 +32,7 @@ std::shared_ptr make_convolution(const ov::Output& in, if (!filter_weights.empty()) { filter_weights_node = std::make_shared(type, filter_weights_shape, filter_weights); } else { - auto tensor = create_and_fill_tensor(type, filter_weights_shape); + auto tensor = create_and_fill_tensor(type, filter_weights_shape, 9, 1); filter_weights_node = std::make_shared(tensor); } @@ -49,7 +49,7 @@ std::shared_ptr make_convolution(const ov::Output& in, biases_weights_node = std::make_shared(type, ov::Shape{1, num_out_channels, 1, 1}, biases_weights); } else { - auto tensor = create_and_fill_tensor(type, ov::Shape{1, num_out_channels, 1, 1}); + auto tensor = create_and_fill_tensor(type, ov::Shape{1, num_out_channels, 1, 1}, 9, 1); biases_weights_node = std::make_shared(tensor); } @@ -86,7 +86,7 @@ std::shared_ptr make_convolution(const ov::Output& in_data, biases_weights_node = std::make_shared(type, ov::Shape{1, num_out_channels, 1, 1}, biases_weights); } else { - auto tensor = create_and_fill_tensor(type, ov::Shape{1, num_out_channels, 1, 1}); + auto tensor = create_and_fill_tensor(type, ov::Shape{1, num_out_channels, 1, 1}, 9, 1); biases_weights_node = std::make_shared(tensor); } diff --git a/src/tests/test_utils/common_test_utils/src/node_builders/gru_cell.cpp b/src/tests/test_utils/common_test_utils/src/node_builders/gru_cell.cpp index 93d446804af736..653a33b358c602 100644 --- a/src/tests/test_utils/common_test_utils/src/node_builders/gru_cell.cpp +++ b/src/tests/test_utils/common_test_utils/src/node_builders/gru_cell.cpp @@ -69,11 +69,11 @@ std::shared_ptr make_gru(const OutputVector& in, } case ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST: case ov::test::utils::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST: { + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = in[0].get_shape()[1]; auto seq_lengths_tensor = - ov::test::utils::create_and_fill_tensor(ov::element::i64, - constants[3], - static_cast(in[0].get_shape()[1]), - 0); + ov::test::utils::create_and_fill_tensor(ov::element::i64, constants[3], in_data); seq_lengths = std::make_shared(seq_lengths_tensor); break; } diff --git a/src/tests/test_utils/common_test_utils/src/node_builders/lstm_cell.cpp b/src/tests/test_utils/common_test_utils/src/node_builders/lstm_cell.cpp index edcb82437b44e3..747693084772f8 100644 --- a/src/tests/test_utils/common_test_utils/src/node_builders/lstm_cell.cpp +++ b/src/tests/test_utils/common_test_utils/src/node_builders/lstm_cell.cpp @@ -31,14 +31,16 @@ std::shared_ptr make_lstm(const std::vector>& in, auto B = std::make_shared(b_tensor); if (WRB_range > 0) { - w_tensor = - ov::test::utils::create_and_fill_tensor(in[0].get_element_type(), constants[0], 2 * WRB_range, -WRB_range); + ov::test::utils::InputGenerateData in_data; + in_data.start_from = -WRB_range; + in_data.range = 2 * WRB_range; + w_tensor = ov::test::utils::create_and_fill_tensor(in[0].get_element_type(), constants[0], in_data); W = std::make_shared(w_tensor); - r_tensor = - ov::test::utils::create_and_fill_tensor(in[0].get_element_type(), constants[1], 2 * WRB_range, -WRB_range); + + r_tensor = ov::test::utils::create_and_fill_tensor(in[0].get_element_type(), constants[1], in_data); R = std::make_shared(r_tensor); - b_tensor = - ov::test::utils::create_and_fill_tensor(in[0].get_element_type(), constants[2], 2 * WRB_range, -WRB_range); + + b_tensor = ov::test::utils::create_and_fill_tensor(in[0].get_element_type(), constants[2], in_data); B = std::make_shared(b_tensor); } if (!make_sequence) { @@ -80,11 +82,11 @@ std::shared_ptr make_lstm(const std::vector>& in, } case ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST: case ov::test::utils::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST: { + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = in[0].get_shape()[1]; auto seq_lengths_tensor = - ov::test::utils::create_and_fill_tensor(ov::element::i64, - constants[3], - static_cast(in[0].get_shape()[1]), - 0); + ov::test::utils::create_and_fill_tensor(ov::element::i64, constants[3], in_data); seq_lengths = std::make_shared(seq_lengths_tensor); break; } diff --git a/src/tests/test_utils/common_test_utils/src/node_builders/rnn_cell.cpp b/src/tests/test_utils/common_test_utils/src/node_builders/rnn_cell.cpp index 77ab22e4e2bb6f..17ac89a81bc797 100644 --- a/src/tests/test_utils/common_test_utils/src/node_builders/rnn_cell.cpp +++ b/src/tests/test_utils/common_test_utils/src/node_builders/rnn_cell.cpp @@ -67,11 +67,11 @@ std::shared_ptr make_rnn(const OutputVector& in, } case ov::test::utils::SequenceTestsMode::CONVERT_TO_TI_RAND_SEQ_LEN_CONST: case ov::test::utils::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_CONST: { + ov::test::utils::InputGenerateData in_data; + in_data.start_from = 0; + in_data.range = in[0].get_shape()[1]; auto seq_lengths_tensor = - ov::test::utils::create_and_fill_tensor(ov::element::i64, - constants[3], - static_cast(in[0].get_shape()[1]), - 0); + ov::test::utils::create_and_fill_tensor(ov::element::i64, constants[3], in_data); seq_lengths = std::make_shared(seq_lengths_tensor); break; } diff --git a/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp b/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp index f559e8f945274e..b39a53bb296fcf 100644 --- a/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp +++ b/src/tests/test_utils/common_test_utils/src/ov_tensor_utils.cpp @@ -13,44 +13,43 @@ namespace test { namespace utils { ov::Tensor create_and_fill_tensor(const ov::element::Type element_type, const ov::Shape& shape, - const uint32_t range, - const double_t start_from, - const int32_t resolution, - const int seed) { - auto tensor = ov::Tensor{element_type, shape}; -#define CASE(X) \ - case X: \ - fill_data_random(tensor.data::value_type>(), \ - shape_size(shape), \ - range, \ - start_from, \ - resolution, \ - seed); \ + const InputGenerateData& inGenData) { + auto tensor = ov::Tensor(element_type, shape); + +#define CASE(X) \ + case X: \ + fill_data_random(tensor.data>(), \ + shape_size(shape), \ + inGenData.range, \ + inGenData.start_from, \ + inGenData.resolution, \ + inGenData.seed); \ break; + switch (element_type) { - CASE(ov::element::Type_t::boolean) - CASE(ov::element::Type_t::i8) - CASE(ov::element::Type_t::i16) - CASE(ov::element::Type_t::i32) - CASE(ov::element::Type_t::i64) - CASE(ov::element::Type_t::u8) - CASE(ov::element::Type_t::u16) - CASE(ov::element::Type_t::u32) - CASE(ov::element::Type_t::u64) - CASE(ov::element::Type_t::bf16) - CASE(ov::element::Type_t::f16) - CASE(ov::element::Type_t::f32) - CASE(ov::element::Type_t::f64) + CASE(ov::element::boolean) + CASE(ov::element::i8) + CASE(ov::element::i16) + CASE(ov::element::i32) + CASE(ov::element::i64) + CASE(ov::element::u8) + CASE(ov::element::u16) + CASE(ov::element::u32) + CASE(ov::element::u64) + CASE(ov::element::bf16) + CASE(ov::element::f16) + CASE(ov::element::f32) + CASE(ov::element::f64) case ov::element::Type_t::u1: case ov::element::Type_t::i4: case ov::element::Type_t::u4: case ov::element::Type_t::nf4: fill_data_random(static_cast(tensor.data()), tensor.get_byte_size(), - range, - start_from, - resolution, - seed); + inGenData.range, + inGenData.start_from, + inGenData.resolution, + inGenData.seed); break; default: OPENVINO_THROW("Unsupported element type: ", element_type); @@ -59,6 +58,19 @@ ov::Tensor create_and_fill_tensor(const ov::element::Type element_type, return tensor; } +// Legacy impl for contrig repo +// todo: remove this after dependent repos clean up +ov::Tensor create_and_fill_tensor(const ov::element::Type element_type, + const ov::Shape& shape, + const uint32_t range, + const double_t start_from, + const int32_t resolution, + const int seed) { + return create_and_fill_tensor(element_type, + shape, + ov::test::utils::InputGenerateData(start_from, range, resolution, seed)); +} + ov::Tensor create_and_fill_tensor_unique_sequence(const ov::element::Type element_type, const ov::Shape& shape, const int32_t start_from, diff --git a/src/tests/test_utils/common_test_utils/src/postgres_helpers.cpp b/src/tests/test_utils/common_test_utils/src/postgres_helpers.cpp index d31605a154bb2d..74861d36e16957 100644 --- a/src/tests/test_utils/common_test_utils/src/postgres_helpers.cpp +++ b/src/tests/test_utils/common_test_utils/src/postgres_helpers.cpp @@ -459,7 +459,7 @@ std::string get_hostname(void) { #endif } return cHostName; -} +} // namespace PostgreSQLHelpers void add_pair(std::map& keyValues, const std::string& key, const std::string& value) { size_t dPos; @@ -480,10 +480,22 @@ void add_pair(std::map& keyValues, const std::string& keyValues["opSet"] = value.substr(dPos + 1); } } + // Defining a subgraph extractors as an operations + if (key == "Extractor") { + keyValues["opName"] = value; + keyValues["opSet"] = "subgraph"; // Need to set later + } // Parse IR for opName and hash if (key == "IR") { - keyValues["hashXml"] = value; - keyValues["pathXml"] = value + ".xml"; + if ((dPos = value.find_last_of('/')) != std::string::npos || + (dPos = value.find_last_of('\\')) != std::string::npos) { + dPos += 1; // Ignores slash + keyValues["hashXml"] = value.substr(dPos, value.length() - dPos - 4); // exclude extension + keyValues["pathXml"] = value.substr(dPos); + } else { + keyValues["hashXml"] = value; + keyValues["pathXml"] = value + ".xml"; + } return; } // Parse Function for opName and opSet diff --git a/src/tests/test_utils/common_test_utils/src/postgres_link.cpp b/src/tests/test_utils/common_test_utils/src/postgres_link.cpp index 30b5a1f3ec1524..a6403abefe6ea5 100644 --- a/src/tests/test_utils/common_test_utils/src/postgres_link.cpp +++ b/src/tests/test_utils/common_test_utils/src/postgres_link.cpp @@ -474,7 +474,7 @@ class PostgreSQLEventListener : public ::testing::EmptyTestEventListener { std::stringstream sstr; sstr << "DELETE FROM test_results_temp WHERE tr_id=" << this->testId; auto pgresult = connectionKeeper->query(sstr.str().c_str(), PGRES_COMMAND_OK); - CHECK_PGRESULT(pgresult, "Cannot remove waste results", return); + CHECK_PGRESULT(pgresult, "Cannot remove waste results", return ); this->testId = 0; testDictionary.clear(); @@ -557,7 +557,7 @@ class PostgreSQLEventListener : public ::testing::EmptyTestEventListener { << ", run_count=" << test_suite.test_to_run_count() << ", total_count=" << test_suite.total_test_count() << " WHERE sr_id=" << this->testSuiteId; auto pgresult = connectionKeeper->query(sstr.str().c_str(), PGRES_COMMAND_OK); - CHECK_PGRESULT(pgresult, "Cannot update test suite results", return); + CHECK_PGRESULT(pgresult, "Cannot update test suite results", return ); this->testSuiteId = 0; if (reportingLevel == REPORT_LVL_FAST) { @@ -641,13 +641,13 @@ class PostgreSQLEventListener : public ::testing::EmptyTestEventListener { std::stringstream sstr; sstr << "UPDATE runs SET end_time=NOW() WHERE run_id=" << this->testRunId << " AND end_timequery(sstr.str().c_str(), PGRES_COMMAND_OK); - CHECK_PGRESULT(pgresult, "Cannot update run finish info", return); + CHECK_PGRESULT(pgresult, "Cannot update run finish info", return ); sstr.str(""); sstr.clear(); sstr << "UPDATE sessions SET end_time=NOW() WHERE session_id=" << this->sessionId << " AND end_timequery(sstr.str().c_str(), PGRES_COMMAND_OK); - CHECK_PGRESULT(pgresult, "Cannot update session finish info", return); + CHECK_PGRESULT(pgresult, "Cannot update session finish info", return ); } /* Prohobit creation outsize of class, need to make a Singleton */ diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/2_input_subtract.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/2_input_subtract.cpp new file mode 100644 index 00000000000000..cec78a6fa65141 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/2_input_subtract.cpp @@ -0,0 +1,27 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/subgraph_builders/2_input_subtract.hpp" + +#include "common_test_utils/node_builders/convolution.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/subtract.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_2_input_subtract(ov::Shape input_shape, ov::element::Type type) { + auto param0 = std::make_shared(type, input_shape); + auto param1 = std::make_shared(type, input_shape); + auto subtract = std::make_shared(param0, param1); + auto result = std::make_shared(subtract); + + auto model = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param0, param1}); + model->set_friendly_name("TwoInputSubtract"); + return model; +} +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/concat_with_params.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/concat_with_params.cpp new file mode 100644 index 00000000000000..914fd06e2c635f --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/concat_with_params.cpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/subgraph_builders/concat_with_params.hpp" + +#include "common_test_utils/node_builders/convolution.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_concat_with_params(ov::Shape input_shape, ov::element::Type type) { + auto parameter1 = std::make_shared(type, input_shape); + parameter1->set_friendly_name("param1"); + parameter1->output(0).get_tensor().set_names({"data1"}); + + auto parameter2 = std::make_shared(type, input_shape); + parameter2->set_friendly_name("param2"); + parameter2->output(0).get_tensor().set_names({"data2"}); + + auto concat = std::make_shared(OutputVector{parameter1, parameter2}, 1); + concat->set_friendly_name("concat_op"); + concat->output(0).get_tensor().set_names({"concat"}); + + auto result = std::make_shared(concat); + result->set_friendly_name("result"); + + auto model = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{parameter1, parameter2}); + model->set_friendly_name("SingleConcatWithParams"); + return model; +} +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/conv_bias.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/conv_bias.cpp new file mode 100644 index 00000000000000..079f91d87683bd --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/conv_bias.cpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/subgraph_builders/conv_bias.hpp" + +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convolution.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_conv_bias(ov::Shape input_shape, ov::element::Type type) { + ov::ParameterVector parameter{std::make_shared(type, input_shape)}; + parameter[0]->set_friendly_name("parameter"); + + auto weights = ov::op::v0::Constant::create(type, ov::Shape{6, 3, 1, 1}, {1}); + auto biases = ov::op::v0::Constant::create(type, ov::Shape{6, 1, 1}, {1}); + auto conv = std::make_shared(parameter[0], + weights, + ov::Strides{1, 1}, + ov::CoordinateDiff{0, 0}, + ov::CoordinateDiff{0, 0}, + ov::Strides{1, 1}); + conv->set_friendly_name("conv"); + + auto add = std::make_shared(conv, biases); + add->set_friendly_name("add"); + + auto result = std::make_shared(add); + result->set_friendly_name("result"); + + std::shared_ptr model = + std::make_shared(ov::ResultVector{result}, ov::ParameterVector{parameter}); + model->set_friendly_name("ConvBias"); + return model; +} +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/conv_pool_relu.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/conv_pool_relu.cpp new file mode 100644 index 00000000000000..d5f3f18f792f58 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/conv_pool_relu.cpp @@ -0,0 +1,161 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" + +#include "common_test_utils/node_builders/convolution.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/max_pool.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/relu.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/result.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_conv_pool_relu(ov::Shape input_shape, ov::element::Type type) { + ov::ParameterVector params{std::make_shared(type, input_shape)}; + params.front()->set_friendly_name("Param_1"); + params.front()->output(0).get_tensor().set_names({"data"}); + + ov::Shape const_shape = {input_shape[0], input_shape[2], input_shape[1], input_shape[3]}; + auto const1 = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, const_shape); + const1->set_friendly_name("Const_1"); + const1->output(0).get_tensor().set_names({"const1"}); + + auto reshape1 = std::make_shared(params.front(), const1, false); + reshape1->set_friendly_name("Reshape_1"); + reshape1->output(0).get_tensor().set_names({"reshape1"}); + + auto conv1 = ov::test::utils::make_convolution(reshape1, + type, + {1, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 4); + conv1->set_friendly_name("Conv_1"); + conv1->output(0).get_tensor().set_names({"conv"}); + + std::vector stride{1, 1}, padB{0, 0}, padE = padB, kernel{1, 2}; + auto pool1 = std::make_shared(conv1, + stride, + padB, + padE, + kernel, + ov::op::RoundingType::FLOOR, + ov::op::PadType::EXPLICIT); + pool1->output(0).get_tensor().set_names({"pool"}); + pool1->set_friendly_name("Pool_1"); + + auto relu1 = std::make_shared(pool1); + relu1->set_friendly_name("Relu_1"); + relu1->output(0).get_tensor().set_names({"relu"}); + + ov::Shape reluShape = relu1->outputs()[0].get_tensor().get_shape(); + std::vector constShape2 = {1, ov::shape_size(reluShape)}; + auto const2 = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{2}, constShape2); + const2->output(0).get_tensor().set_names({"const2"}); + const2->set_friendly_name("Const_2"); + + auto reshape2 = std::make_shared(relu1, const2, false); + reshape2->output(0).get_tensor().set_names({"reshape2"}); + reshape2->set_friendly_name("Reshape_2"); + + ov::ResultVector results{std::make_shared(reshape2)}; + std::shared_ptr model = std::make_shared(results, params); + return model; +} + +std::shared_ptr make_conv_pool2_relu2(ov::Shape input_shape, ov::element::Type type) { + ov::ParameterVector params{std::make_shared(type, input_shape)}; + params.front()->set_friendly_name("Param_1"); + params.front()->output(0).get_tensor().set_names({"data"}); + + std::vector constShape = {input_shape[0], input_shape[2], input_shape[1], input_shape[3]}; + auto const1 = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, constShape); + const1->set_friendly_name("Const_1"); + const1->output(0).get_tensor().set_names({"const1"}); + + auto reshape1 = std::make_shared(params.front(), const1, false); + reshape1->set_friendly_name("Reshape_1"); + reshape1->output(0).get_tensor().set_names({"reshape1"}); + + auto conv1 = ov::test::utils::make_convolution(reshape1, + type, + {1, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 4); + conv1->set_friendly_name("Conv_1"); + conv1->output(0).get_tensor().set_names({"conv"}); + std::vector stride{1, 1}, padB{0, 0}, padE = padB, kernel{1, 2}; + + ov::ResultVector results; + { + auto pool1 = std::make_shared(conv1, + stride, + padB, + padE, + kernel, + ov::op::RoundingType::FLOOR, + ov::op::PadType::EXPLICIT); + pool1->output(0).get_tensor().set_names({"pool_0"}); + pool1->set_friendly_name("Pool_1_0"); + + auto relu1 = std::make_shared(pool1); + relu1->set_friendly_name("Relu_1_0"); + relu1->output(0).get_tensor().set_names({"relu_0"}); + + ov::Shape reluShape = relu1->outputs()[0].get_tensor().get_shape(); + std::vector constShape2 = {1, ov::shape_size(reluShape)}; + auto const2 = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{2}, constShape2); + const2->output(0).get_tensor().set_names({"const2_0"}); + const2->set_friendly_name("Const_2_0"); + + auto reshape2 = std::make_shared(relu1, const2, false); + reshape2->output(0).get_tensor().set_names({"reshape2_0"}); + reshape2->set_friendly_name("Reshape_2_0"); + + results.push_back(std::make_shared(reshape2)); + } + { + auto pool1 = std::make_shared(conv1, + stride, + padB, + padE, + kernel, + ov::op::RoundingType::FLOOR, + ov::op::PadType::EXPLICIT); + pool1->output(0).get_tensor().set_names({"pool_1"}); + pool1->set_friendly_name("Pool_1_1"); + + auto relu1 = std::make_shared(pool1); + relu1->set_friendly_name("Relu_1_1"); + relu1->output(0).get_tensor().set_names({"relu_1"}); + + ov::Shape reluShape = relu1->outputs()[0].get_tensor().get_shape(); + std::vector constShape2 = {1, ov::shape_size(reluShape)}; + auto const2 = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{2}, constShape2); + const2->output(0).get_tensor().set_names({"const2_1"}); + const2->set_friendly_name("Const_2_1"); + + auto reshape2 = std::make_shared(relu1, const2, false); + reshape2->output(0).get_tensor().set_names({"reshape2_1"}); + reshape2->set_friendly_name("Reshape_2_1"); + + results.push_back(std::make_shared(reshape2)); + } + std::shared_ptr model = std::make_shared(results, params); + return model; +} +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/conv_pool_relu_no_reshapes.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/conv_pool_relu_no_reshapes.cpp new file mode 100644 index 00000000000000..e3b8d56de66d4d --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/conv_pool_relu_no_reshapes.cpp @@ -0,0 +1,56 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/node_builders/convolution.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/max_pool.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/relu.hpp" +#include "openvino/op/result.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_conv_pool_relu_no_reshapes(ov::Shape input_shape, ov::element::Type type) { + ov::ParameterVector params{std::make_shared(type, ov::Shape(input_shape))}; + params.front()->set_friendly_name("Param_1"); + params.front()->output(0).get_tensor().set_names({"data"}); + + auto conv1 = ov::test::utils::make_convolution(params.front(), + type, + {1, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 4); + conv1->set_friendly_name("Conv_1"); + conv1->output(0).get_tensor().set_names({"conv"}); + + std::vector stride{1, 1}, padB{0, 0}, padE = padB, kernel{1, 2}; + auto pool1 = std::make_shared(conv1, + stride, + padB, + padE, + kernel, + ov::op::RoundingType::FLOOR, + ov::op::PadType::EXPLICIT); + pool1->output(0).get_tensor().set_names({"pool"}); + pool1->set_friendly_name("Pool_1"); + + auto relu1 = std::make_shared(pool1); + relu1->set_friendly_name("Relu_1"); + relu1->output(0).get_tensor().set_names({"relu"}); + + ov::Shape reluShape = relu1->outputs()[0].get_tensor().get_shape(); + ov::ResultVector results{std::make_shared(relu1)}; + std::shared_ptr fnPtr = std::make_shared(results, params); + return fnPtr; +} + +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/conv_pool_relu_non_zero.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/conv_pool_relu_non_zero.cpp new file mode 100644 index 00000000000000..097486723f32b1 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/conv_pool_relu_non_zero.cpp @@ -0,0 +1,73 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/node_builders/convolution.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/max_pool.hpp" +#include "openvino/op/non_zero.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/relu.hpp" +#include "openvino/op/result.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_conv_pool_relu_non_zero(ov::Shape input_shape, ov::element::Type type) { + ov::ParameterVector params{std::make_shared(type, input_shape)}; + params.front()->set_friendly_name("Param_1"); + params.front()->output(0).get_tensor().set_names({"data"}); + + auto conv1 = ov::test::utils::make_convolution(params.front(), + type, + {1, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 4); + conv1->set_friendly_name("Conv_1"); + conv1->output(0).get_tensor().set_names({"conv"}); + + std::vector stride{1, 1}, padB{0, 0}, padE = padB, kernel{1, 2}; + auto pool1 = std::make_shared(conv1, + stride, + padB, + padE, + kernel, + ov::op::RoundingType::FLOOR, + ov::op::PadType::EXPLICIT); + pool1->output(0).get_tensor().set_names({"pool"}); + pool1->set_friendly_name("Pool_1"); + + auto relu1 = std::make_shared(pool1); + relu1->set_friendly_name("Relu_1"); + relu1->output(0).get_tensor().set_names({"relu"}); + + auto non_zero = std::make_shared(relu1); + non_zero->set_friendly_name("nonZero_1"); + non_zero->output(0).get_tensor().set_names({"nonZero"}); + + auto gather_indices = + std::make_shared(ov::element::i64, ov::Shape{1}, std::vector{0}); + gather_indices->set_friendly_name("gather_indices_1"); + gather_indices->output(0).get_tensor().set_names({"gather_indices"}); + + auto gather_axis = std::make_shared(ov::element::i64, ov::Shape{1}, std::vector{1}); + gather_axis->set_friendly_name("gather_axis_1"); + gather_axis->output(0).get_tensor().set_names({"gather_axis"}); + + auto gather = std::make_shared(non_zero->output(0), gather_indices, gather_axis); + gather->set_friendly_name("gather_1"); + gather->output(0).get_tensor().set_names({"gather"}); + + ov::ResultVector results{std::make_shared(gather)}; + std::shared_ptr model = std::make_shared(results, params); + return model; +} +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/convert_transpose.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/convert_transpose.cpp new file mode 100644 index 00000000000000..56b70c71679758 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/convert_transpose.cpp @@ -0,0 +1,42 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/subgraph_builders/convert_transpose.hpp" + +#include "common_test_utils/node_builders/convolution.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/transpose.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_convert_transpose(ov::Shape input_shape, + std::vector input_order, + ov::element::Type type) { + ov::ParameterVector params{std::make_shared(type, input_shape)}; + params.front()->set_friendly_name("Param_1"); + params.front()->output(0).get_tensor().set_names({"data"}); + + const auto order = ov::op::v0::Constant::create(element::i32, {input_order.size()}, input_order); + + auto convert = std::make_shared(params.front(), type); + convert->set_friendly_name("convert"); + + auto transpose = std::make_shared(convert, order); + transpose->set_friendly_name("transpose"); + + auto result = std::make_shared(transpose); + result->set_friendly_name("result"); + + std::shared_ptr model = + std::make_shared(ov::ResultVector{result}, ov::ParameterVector{params}); + model->set_friendly_name("ConvertTranspose"); + return model; +} +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/detection_output.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/detection_output.cpp new file mode 100644 index 00000000000000..754bbad3cc1613 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/detection_output.cpp @@ -0,0 +1,71 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/subgraph_builders/detection_output.hpp" + +#include "common_test_utils/node_builders/convolution.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/convolution.hpp" +#include "openvino/op/detection_output.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/tile.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_detection_output(ov::element::Type type) { + const auto& data = std::make_shared(type, ov::Shape{1, 4, 10, 10}); + + const auto& constant_0 = std::make_shared(type, ov::Shape{1, 1, 1, 1}); + const auto& mul_0 = std::make_shared(data, constant_0); + + const auto& filters = std::make_shared(type, ov::Shape{1, 4, 1, 1}); + const auto& conv = std::make_shared(mul_0, + filters, + ov::Strides{1, 1}, + ov::CoordinateDiff{0, 0}, + ov::CoordinateDiff{0, 0}, + ov::Strides{1, 1}); + + const auto& box_logits_reshape = + std::make_shared(ov::element::i64, ov::Shape{2}, std::vector{0, -1}); + const auto& box_logits = std::make_shared(conv, box_logits_reshape, true); + + const auto& four_times = std::make_shared( + box_logits, + std::make_shared(ov::element::i64, ov::Shape{2}, std::vector{1, 4})); + + const auto& third_input_reshape = + std::make_shared(ov::element::i64, ov::Shape{3}, std::vector{0, 1, -1}); + const auto& third_input = std::make_shared(four_times, third_input_reshape, true); + + ov::op::v0::DetectionOutput::Attributes attr; + attr.num_classes = 4; + attr.background_label_id = 0; + attr.top_k = 75; + attr.variance_encoded_in_target = true; + attr.keep_top_k = {50}; + attr.code_type = std::string{"caffe.PriorBoxParameter.CORNER"}; + attr.share_location = true; + attr.nms_threshold = 0.5f; + attr.confidence_threshold = 0.5f; + attr.clip_after_nms = false; + attr.clip_before_nms = false; + attr.decrease_label_id = false; + attr.normalized = true; + attr.input_height = 1; + attr.input_width = 1; + attr.objectness_score = 0.4f; + const auto& detection = std::make_shared(four_times, four_times, third_input, attr); + const auto& convert = std::make_shared(detection, type); + + return std::make_shared(ov::NodeVector{convert}, ov::ParameterVector{data}, "SplitableDetectionOutput"); +} +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/kso_func.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/kso_func.cpp new file mode 100644 index 00000000000000..001ad9e90e9de3 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/kso_func.cpp @@ -0,0 +1,51 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/subgraph_builders/kso_func.hpp" + +#include "common_test_utils/node_builders/convolution.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/relu.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/shape_of.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_kso_function(ov::Shape input_shape, ov::element::Type type) { + ov::ParameterVector params{std::make_shared(type, input_shape), + std::make_shared(type, input_shape)}; + + auto shape_of = std::make_shared(params[0]); + auto convert = std::make_shared(shape_of, type); + auto new_shape = + std::make_shared(ov::element::i64, ov::Shape{4}, std::vector{1, 4, 1, 1}); + auto reshape = std::make_shared(convert, new_shape, false); + + auto conv1 = ov::test::utils::make_convolution(params[1], + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 4); + + auto relu1 = std::make_shared(conv1); + auto add = std::make_shared(relu1, reshape); + + ov::ResultVector results{std::make_shared(add)}; + + std::shared_ptr model = std::make_shared(results, params); + model->set_friendly_name("KSOFunction"); + return model; +} +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/matmul_bias.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/matmul_bias.cpp new file mode 100644 index 00000000000000..9446695ce73b11 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/matmul_bias.cpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/subgraph_builders/matmul_bias.hpp" + +#include "common_test_utils/node_builders/convolution.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/matmul.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_matmul_bias(ov::Shape input_shape, ov::element::Type type) { + ov::ParameterVector parameter{std::make_shared(type, input_shape)}; + parameter[0]->set_friendly_name("parameter"); + + auto weights = ov::op::v0::Constant::create(type, ov::Shape{24, 24}, {1}); + auto biases = ov::op::v0::Constant::create(type, ov::Shape{1, 24}, {1}); + + auto matmul = std::make_shared(parameter[0], weights); + matmul->set_friendly_name("matmul"); + + auto add = std::make_shared(matmul, biases); + add->set_friendly_name("add"); + + auto result = std::make_shared(add); + result->set_friendly_name("result"); + + std::shared_ptr model = + std::make_shared(ov::ResultVector{result}, ov::ParameterVector{parameter}); + model->set_friendly_name("MatMulBias"); + return model; +} +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/multi_single_conv.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/multi_single_conv.cpp new file mode 100644 index 00000000000000..43168103abe613 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/multi_single_conv.cpp @@ -0,0 +1,113 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" + +#include "common_test_utils/node_builders/convolution.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_multi_single_conv(ov::Shape input_shape, ov::element::Type type) { + auto param0 = std::make_shared(type, input_shape); + auto conv1 = ov::test::utils::make_convolution(param0, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto conv2 = ov::test::utils::make_convolution(conv1, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto conv3 = ov::test::utils::make_convolution(conv2, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto conv4 = ov::test::utils::make_convolution(conv3, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto conv5 = ov::test::utils::make_convolution(conv4, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto conv6 = ov::test::utils::make_convolution(conv5, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto conv7 = ov::test::utils::make_convolution(conv6, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto conv8 = ov::test::utils::make_convolution(conv7, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto conv9 = ov::test::utils::make_convolution(conv8, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto conv10 = ov::test::utils::make_convolution(conv9, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto result = std::make_shared(conv10); + auto model = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param0}); + model->set_friendly_name("MultiSingleConv"); + return model; +} +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/multiple_input_output_double_concat.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/multiple_input_output_double_concat.cpp new file mode 100644 index 00000000000000..f1aad099681b0a --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/multiple_input_output_double_concat.cpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/node_builders/convolution.hpp" +#include "common_test_utils/subgraph_builders/multiple_input_outpput_double_concat.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_multiple_input_output_double_concat(ov::Shape input_shape, ov::element::Type type) { + auto param1 = std::make_shared(type, input_shape); + param1->set_friendly_name("param1"); + param1->output(0).get_tensor().set_names({"data1"}); + + auto param2 = std::make_shared(type, input_shape); + param2->set_friendly_name("param2"); + param2->output(0).get_tensor().set_names({"data2"}); + + auto concat1 = std::make_shared(OutputVector{param1, param2}, 1); + concat1->set_friendly_name("concat_op1"); + concat1->output(0).get_tensor().set_names({"concat1"}); + + auto result1 = std::make_shared(concat1); + result1->set_friendly_name("result1"); + + auto concat2 = std::make_shared(OutputVector{concat1, param2}, 1); + concat2->set_friendly_name("concat_op2"); + concat2->output(0).get_tensor().set_names({"concat2"}); + + auto result2 = std::make_shared(concat2); + result2->set_friendly_name("result2"); + + auto model = std::make_shared(ov::ResultVector{result1, result2}, ov::ParameterVector{param1, param2}); + model->set_friendly_name("makeMultipleInputOutputDoubleConcat"); + return model; +} +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/nested_branch_conv_concat.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/nested_branch_conv_concat.cpp new file mode 100644 index 00000000000000..4d206df3ba333b --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/nested_branch_conv_concat.cpp @@ -0,0 +1,76 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/subgraph_builders/nested_branch_conv_concat.hpp" + +#include "common_test_utils/node_builders/convolution.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/relu.hpp" +#include "openvino/op/result.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_nested_branch_conv_concat(ov::Shape input_shape, ov::element::Type type) { + ov::ParameterVector params{std::make_shared(type, input_shape)}; + auto relu0 = std::make_shared(params[0]); + + auto conv1 = ov::test::utils::make_convolution(relu0, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto relu1 = std::make_shared(conv1); + + auto conv2 = ov::test::utils::make_convolution(relu0, + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 10); + auto relu2 = std::make_shared(conv2); + + auto conv3 = ov::test::utils::make_convolution(relu2, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto relu3 = std::make_shared(conv3); + + auto conv4 = ov::test::utils::make_convolution(relu2, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto relu4 = std::make_shared(conv4); + + auto concat = std::make_shared(ov::OutputVector{relu3->output(0), relu4->output(0)}, 1); + + auto concat1 = std::make_shared(ov::OutputVector{relu1->output(0), concat}, 1); + ov::ResultVector results{std::make_shared(concat1)}; + + std::shared_ptr model = std::make_shared(results, params); + model->set_friendly_name("NestedBranchConvConcat"); + return model; +} + +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/nested_split_conv_concat.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/nested_split_conv_concat.cpp new file mode 100644 index 00000000000000..924ce6a157667b --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/nested_split_conv_concat.cpp @@ -0,0 +1,83 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/subgraph_builders/nested_split_conv_concat.hpp" + +#include "common_test_utils/node_builders/convolution.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/relu.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/split.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_nested_split_conv_concat(ov::Shape input_shape, ov::element::Type type) { + ov::ParameterVector params{std::make_shared(type, input_shape)}; + auto split_axis_op = + std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); + auto split = std::make_shared(params[0], split_axis_op, 2); + + auto conv1 = ov::test::utils::make_convolution(split->output(0), + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto relu1 = std::make_shared(conv1); + + auto conv2 = ov::test::utils::make_convolution(split->output(1), + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 10); + auto relu2 = std::make_shared(conv2); + + auto split2_axis_op = + std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); + auto split2 = std::make_shared(relu2, split2_axis_op, 2); + + auto conv3 = ov::test::utils::make_convolution(split2->output(0), + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto relu3 = std::make_shared(conv3); + + auto conv4 = ov::test::utils::make_convolution(split2->output(1), + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto relu4 = std::make_shared(conv4); + + auto concat = std::make_shared(ov::OutputVector{relu3->output(0), relu4->output(0)}, 1); + + auto concat1 = std::make_shared(ov::OutputVector{relu1->output(0), concat}, 1); + ov::ResultVector results{std::make_shared(concat1)}; + + std::shared_ptr model = std::make_shared(results, params); + model->set_friendly_name("NestedSplitConvConcat"); + return model; +} +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/read_concat_split_assign.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/read_concat_split_assign.cpp new file mode 100644 index 00000000000000..8d06410814acdb --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/read_concat_split_assign.cpp @@ -0,0 +1,47 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/subgraph_builders/read_concat_split_assign.hpp" + +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/split.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_read_concat_split_assign(ov::Shape input_shape, ov::element::Type type) { + ov::ParameterVector parameter{std::make_shared(type, input_shape)}; + parameter[0]->set_friendly_name("parameter"); + + auto init_const = ov::op::v0::Constant::create(type, input_shape, {0}); + auto read = std::make_shared(init_const, "v0"); + read->set_friendly_name("read"); + + std::vector> args = {parameter[0], read}; + auto conc = std::make_shared(args, 3); + conc->set_friendly_name("concat"); + + auto res = std::make_shared(conc); + res->set_friendly_name("result"); + + const auto axis = ov::op::v0::Constant::create(element::i64, Shape{}, {3}); + axis->set_friendly_name("axis"); + + auto crop = std::make_shared(conc, axis, 2); + crop->set_friendly_name("split"); + + auto assign = std::make_shared(crop, "v0"); + assign->set_friendly_name("assign"); + + std::shared_ptr model = + std::make_shared(ov::ResultVector({res}), ov::SinkVector({assign}), ov::ParameterVector{parameter}); + model->set_friendly_name("ReadConcatSplitAssign"); + return model; +} +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/single_concat_with_constant.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/single_concat_with_constant.cpp new file mode 100644 index 00000000000000..90d703c0cd0552 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/single_concat_with_constant.cpp @@ -0,0 +1,37 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/subgraph_builders/single_concat_with_constant.hpp" + +#include "common_test_utils/node_builders/convolution.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_single_concat_with_constant(ov::Shape input_shape, ov::element::Type type) { + ov::ParameterVector parameter{std::make_shared(type, input_shape)}; + parameter[0]->set_friendly_name("Param_1"); + parameter[0]->output(0).get_tensor().set_names({"data"}); + + auto init_const = ov::op::v0::Constant::create(type, input_shape, {0}); + + std::vector> args = {parameter[0], init_const}; + auto conc = std::make_shared(args, 3); + conc->set_friendly_name("concat"); + + auto res = std::make_shared(conc); + res->set_friendly_name("result"); + + std::shared_ptr model = + std::make_shared(ov::ResultVector({res}), ov::ParameterVector{parameter}); + model->set_friendly_name("SingleConcatWithConstant"); + return model; +} +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/single_conv.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/single_conv.cpp new file mode 100644 index 00000000000000..8f570eb725fb9c --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/single_conv.cpp @@ -0,0 +1,33 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/node_builders/convolution.hpp" +#include "common_test_utils/subgraph_builders/ti_with_lstm_cell.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_single_conv(ov::Shape input_shape, ov::element::Type type) { + auto param0 = std::make_shared(type, ov::Shape(input_shape)); + + auto conv1 = ov::test::utils::make_convolution(param0, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 4); + auto result = std::make_shared(conv1); + + auto model = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param0}); + model->set_friendly_name("SingleConv"); + return model; +} +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/single_split.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/single_split.cpp new file mode 100644 index 00000000000000..79c2993dc7a084 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/single_split.cpp @@ -0,0 +1,38 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/subgraph_builders/single_split.hpp" + +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/split.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_single_split(ov::Shape input_shape, ov::element::Type type) { + auto param1 = std::make_shared(type, input_shape); + param1->set_friendly_name("param1"); + param1->output(0).get_tensor().set_names({"data1"}); + + auto axis_node = ov::op::v0::Constant::create(element::i64, Shape{}, {1}); + auto split = std::make_shared(param1, axis_node, 2); + split->set_friendly_name("split"); + split->output(0).get_tensor().set_names({"tensor_split_1"}); + split->output(1).get_tensor().set_names({"tensor_split_2"}); + + auto result1 = std::make_shared(split->output(0)); + result1->set_friendly_name("result1"); + + auto result2 = std::make_shared(split->output(1)); + result2->set_friendly_name("result2"); + + auto model = std::make_shared(ov::ResultVector{result1, result2}, ov::ParameterVector{param1}); + model->set_friendly_name("SingleSplit"); + return model; +} +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/split_concat.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/split_concat.cpp new file mode 100644 index 00000000000000..639c7cd04bcf81 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/split_concat.cpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/subgraph_builders/split_concat.hpp" + +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/split.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_split_concat(ov::Shape input_shape, ov::element::Type type) { + auto param1 = std::make_shared(type, input_shape); + param1->set_friendly_name("Param1"); + param1->output(0).get_tensor().set_names({"data1"}); + + auto axis_node = ov::op::v0::Constant::create(element::i64, Shape{}, {1}); + auto split = std::make_shared(param1, axis_node, 2); + split->set_friendly_name("Split"); + split->output(0).get_tensor().set_names({"tensor_split_1"}); + split->output(1).get_tensor().set_names({"tensor_split_2"}); + + auto concat = std::make_shared(OutputVector{split->output(0), split->output(1)}, 1); + concat->set_friendly_name("Concat_op"); + concat->output(0).get_tensor().set_names({"Concat"}); + + auto result = std::make_shared(concat); + result->set_friendly_name("Result"); + + auto model = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param1}); + model->set_friendly_name("SplitConcat"); + return model; +} +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/split_conv_concat.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/split_conv_concat.cpp new file mode 100644 index 00000000000000..3b9912b22d61bc --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/split_conv_concat.cpp @@ -0,0 +1,544 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/subgraph_builders/split_conv_concat.hpp" + +#include "common_test_utils/node_builders/convolution.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/relu.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/split.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_split_conv_concat(ov::Shape input_shape, ov::element::Type type) { + ov::ParameterVector params{std::make_shared(type, input_shape)}; + params.front()->set_friendly_name("Param_1"); + params.front()->get_output_tensor(0).set_names({"input_tensor"}); + + auto split_axis_op = std::make_shared(ov::element::i64, ov::Shape{}, std::vector{1}); + auto split = std::make_shared(params[0], split_axis_op, 2); + + auto conv1 = ov::test::utils::make_convolution(split->output(0), + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto relu1 = std::make_shared(conv1); + + auto conv2 = ov::test::utils::make_convolution(split->output(1), + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + + auto relu2 = std::make_shared(conv2); + + auto concat = std::make_shared(ov::OutputVector{relu1->output(0), relu2->output(0)}, 1); + concat->get_output_tensor(0).set_names({"concat_tensor"}); + + ov::ResultVector results{std::make_shared(concat)}; + + std::shared_ptr model = std::make_shared(results, params); + model->set_friendly_name("SplitConvConcat"); + return model; +} + +std::shared_ptr make_cplit_conv_concat_input_in_branch(ov::Shape input_shape, ov::element::Type type) { + ov::ParameterVector params{std::make_shared(type, input_shape), + std::make_shared(type, input_shape)}; + auto split_axis_op = std::make_shared(ov::element::i64, ov::Shape{}, std::vector{1}); + auto split = std::make_shared(params[0], split_axis_op, 2); + + auto conv1 = ov::test::utils::make_convolution(split->output(0), + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto relu1 = std::make_shared(conv1); + + auto conv2 = ov::test::utils::make_convolution(split->output(1), + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto relu2 = std::make_shared(conv2); + + auto conv4 = ov::test::utils::make_convolution(params[1]->output(0), + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto relu4 = std::make_shared(conv4); + + auto concat = std::make_shared(ov::OutputVector{relu4->output(0), relu2->output(0)}, 1); + + auto conv3 = ov::test::utils::make_convolution(concat, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto relu3 = std::make_shared(conv3); + + auto concat1 = std::make_shared(ov::OutputVector{relu1->output(0), relu3->output(0)}, 1); + ov::ResultVector results{std::make_shared(concat1)}; + + std::shared_ptr model = std::make_shared(results, params); + model->set_friendly_name("SplitConvConcatInputInBranch"); + return model; +} + +std::shared_ptr make_cplit_conv_concat_nested_in_branch(ov::Shape input_shape, ov::element::Type type) { + ov::ParameterVector params{std::make_shared(type, input_shape), + std::make_shared(type, input_shape)}; + + int localId = 0; +#define SET_NAME(node) node->set_friendly_name(#node + std::to_string(localId++)); + + auto split_axis_op = std::make_shared(ov::element::i64, ov::Shape{}, std::vector{1}); + + auto split = std::make_shared(params[0], split_axis_op, 2); + SET_NAME(split); + + auto conv1 = ov::test::utils::make_convolution(split->output(0), + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + SET_NAME(conv1); + + auto relu1 = std::make_shared(conv1); + SET_NAME(relu1); + + auto conv2 = ov::test::utils::make_convolution(split->output(1), + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + SET_NAME(conv2); + + auto relu2 = std::make_shared(conv2); + SET_NAME(relu2); + + auto nested_subgraph = [&] { + auto split_axis_op = + std::make_shared(ov::element::i64, ov::Shape{}, std::vector{1}); + auto split = std::make_shared(params[1], split_axis_op, 2); + SET_NAME(split); + + auto conv1 = ov::test::utils::make_convolution(split->output(0), + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + SET_NAME(conv1); + + auto relu1 = std::make_shared(conv1); + SET_NAME(relu1); + + auto conv2 = ov::test::utils::make_convolution(split->output(1), + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 10); + SET_NAME(conv2); + + auto relu2 = std::make_shared(conv2); + SET_NAME(relu2); + + auto split2_axis_op = + std::make_shared(ov::element::i64, ov::Shape{}, std::vector{1}); + auto split2 = std::make_shared(relu2, split2_axis_op, 2); + SET_NAME(split2); + + auto conv3 = ov::test::utils::make_convolution(split2->output(0), + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + SET_NAME(conv3); + + auto relu3 = std::make_shared(conv3); + SET_NAME(relu3); + + auto conv4 = ov::test::utils::make_convolution(split2->output(1), + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + SET_NAME(conv4); + + auto relu4 = std::make_shared(conv4); + SET_NAME(relu4); + + auto concat = std::make_shared(ov::OutputVector{relu3->output(0), relu4->output(0)}, 1); + SET_NAME(concat); + + auto concat1 = std::make_shared(ov::OutputVector{relu1->output(0), concat}, 1); + SET_NAME(concat1); + + auto conv5 = ov::test::utils::make_convolution(concat1, + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + SET_NAME(conv5); + + auto relu5 = std::make_shared(conv5); + SET_NAME(relu5); + + return relu5; + }(); + + auto concat = + std::make_shared(ov::OutputVector{nested_subgraph->output(0), relu2->output(0)}, 1); + SET_NAME(concat); + + auto conv3 = ov::test::utils::make_convolution(concat, + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + SET_NAME(conv3); + + auto relu3 = std::make_shared(conv3); + SET_NAME(relu3); + + auto concat1 = std::make_shared(ov::OutputVector{relu1->output(0), relu3->output(0)}, 1); + SET_NAME(concat1); + + ov::ResultVector results{std::make_shared(concat1)}; + + std::shared_ptr model = std::make_shared(results, params); + model->set_friendly_name("SplitConvConcatNestedInBranch"); + return model; +} + +std::shared_ptr make_cplit_conv_concat_nested_in_branch_nested_out(ov::Shape input_shape, + ov::element::Type type) { + ov::ParameterVector params{std::make_shared(type, input_shape), + std::make_shared(type, input_shape)}; + + int localId = 0; +#define SET_NAME(node) node->set_friendly_name(#node + std::to_string(localId++)); + + auto split_axis_op = std::make_shared(ov::element::i64, ov::Shape{}, std::vector{1}); + auto split = std::make_shared(params[0], split_axis_op, 2); + SET_NAME(split); + + auto conv1 = ov::test::utils::make_convolution(split->output(0), + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + SET_NAME(conv1); + + auto relu1 = std::make_shared(conv1); + SET_NAME(relu1); + + auto conv2 = ov::test::utils::make_convolution(split->output(1), + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 10); + SET_NAME(conv2); + + auto relu2 = std::make_shared(conv2); + SET_NAME(relu2); + + auto split3_axis_op = + std::make_shared(ov::element::i64, ov::Shape{}, std::vector{1}); + auto split3 = std::make_shared(relu2, split3_axis_op, 2); + SET_NAME(split3); + + auto conv32 = ov::test::utils::make_convolution(split3->output(1), + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 10); + SET_NAME(conv32); + + auto relu32 = std::make_shared(conv32); + SET_NAME(relu32); + + auto nested_subgraph = [&] { + auto split_axis_op = + std::make_shared(ov::element::i64, ov::Shape{}, std::vector{1}); + auto split = std::make_shared(params[1], split_axis_op, 2); + SET_NAME(split); + + auto conv1 = ov::test::utils::make_convolution(split->output(0), + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + SET_NAME(conv1); + + auto relu1 = std::make_shared(conv1); + SET_NAME(relu1); + + auto conv2 = ov::test::utils::make_convolution(split->output(1), + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 10); + SET_NAME(conv2); + + auto relu2 = std::make_shared(conv2); + SET_NAME(relu2); + + auto split2_axis_op = + std::make_shared(ov::element::i64, ov::Shape{}, std::vector{1}); + auto split2 = std::make_shared(relu2, split2_axis_op, 2); + SET_NAME(split2); + + auto conv3 = ov::test::utils::make_convolution(split2->output(0), + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + SET_NAME(conv3); + + auto relu3 = std::make_shared(conv3); + SET_NAME(relu3); + + auto conv4 = ov::test::utils::make_convolution(split2->output(1), + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + SET_NAME(conv4); + + auto relu4 = std::make_shared(conv4); + SET_NAME(relu4); + + auto concat = std::make_shared(ov::OutputVector{relu3->output(0), relu4->output(0)}, 1); + SET_NAME(concat); + + auto concat1 = std::make_shared(ov::OutputVector{relu1->output(0), concat}, 1); + SET_NAME(concat1); + + auto conv5 = ov::test::utils::make_convolution(concat1, + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + SET_NAME(conv5); + + auto relu5 = std::make_shared(conv5); + SET_NAME(relu5); + return relu5; + }(); + + auto nested_subgraph1 = [&] { + auto split_axis_op = + std::make_shared(ov::element::i64, ov::Shape{}, std::vector{1}); + auto split = std::make_shared(relu32, split_axis_op, 2); + SET_NAME(split); + + auto conv1 = ov::test::utils::make_convolution(split->output(0), + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + SET_NAME(conv1); + + auto relu1 = std::make_shared(conv1); + SET_NAME(relu1); + + auto conv2 = ov::test::utils::make_convolution(split->output(1), + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 10); + SET_NAME(conv2); + + auto relu2 = std::make_shared(conv2); + SET_NAME(relu2); + + auto split2_axis_op = + std::make_shared(ov::element::i64, ov::Shape{}, std::vector{1}); + auto split2 = std::make_shared(relu2, split2_axis_op, 2); + SET_NAME(split2); + + auto conv3 = ov::test::utils::make_convolution(split2->output(0), + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + SET_NAME(conv3); + + auto relu3 = std::make_shared(conv3); + SET_NAME(relu3); + + auto conv4 = ov::test::utils::make_convolution(split2->output(1), + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + SET_NAME(conv4); + + auto relu4 = std::make_shared(conv4); + SET_NAME(relu4); + + auto concat = std::make_shared(ov::OutputVector{relu3->output(0), relu4->output(0)}, 1); + SET_NAME(concat); + + auto concat1 = std::make_shared(ov::OutputVector{relu1->output(0), concat}, 1); + SET_NAME(concat1); + + auto conv5 = ov::test::utils::make_convolution(concat1, + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + SET_NAME(conv5); + + auto relu5 = std::make_shared(conv5); + SET_NAME(relu5); + return relu5; + }(); + + auto concat = + std::make_shared(ov::OutputVector{nested_subgraph->output(0), split3->output(0)}, 1); + SET_NAME(concat); + + auto conv3 = ov::test::utils::make_convolution(concat, + type, + {3, 3}, + {1, 1}, + {1, 1}, + {1, 1}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + SET_NAME(conv3); + + auto relu3 = std::make_shared(conv3); + SET_NAME(relu3); + + auto concat1 = std::make_shared(ov::OutputVector{relu1->output(0), relu3->output(0)}, 1); + SET_NAME(concat1); + + ov::ResultVector results{std::make_shared(concat1), + std::make_shared(nested_subgraph1)}; + + std::shared_ptr model = std::make_shared(results, params); + model->set_friendly_name("SplitConvConcatNestedInBranchNestedOut"); + return model; +} +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/split_multi_conv_concat.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/split_multi_conv_concat.cpp new file mode 100644 index 00000000000000..98d2f2791b190a --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/split_multi_conv_concat.cpp @@ -0,0 +1,146 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/subgraph_builders/split_multi_conv_concat.hpp" + +#include "common_test_utils/node_builders/convolution.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/relu.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/split.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_split_multi_conv_concat(ov::Shape input_shape, ov::element::Type type) { + ov::ParameterVector params{std::make_shared(type, input_shape)}; + params.front()->set_friendly_name("Param_1"); + params.front()->get_output_tensor(0).set_names({"input_tensor"}); + + auto split_axis_op = + std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); + auto split = std::make_shared(params[0], split_axis_op, 2); + + auto conv1_0 = ov::test::utils::make_convolution(split->output(0), + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto relu1_0 = std::make_shared(conv1_0); + + auto conv1_1 = ov::test::utils::make_convolution(relu1_0, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto relu1_1 = std::make_shared(conv1_1); + + auto conv1_2 = ov::test::utils::make_convolution(relu1_1, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto relu1_2 = std::make_shared(conv1_2); + + auto conv1_3 = ov::test::utils::make_convolution(relu1_2, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto relu1_3 = std::make_shared(conv1_3); + + auto conv1_4 = ov::test::utils::make_convolution(relu1_2, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto relu1_4 = std::make_shared(conv1_4); + + auto conv2_0 = ov::test::utils::make_convolution(split->output(1), + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto relu2_0 = std::make_shared(conv2_0); + + auto conv2_1 = ov::test::utils::make_convolution(relu2_0, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto relu2_1 = std::make_shared(conv2_1); + + auto conv2_2 = ov::test::utils::make_convolution(relu2_1, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto relu2_2 = std::make_shared(conv2_2); + + auto conv2_3 = ov::test::utils::make_convolution(relu2_2, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto relu2_3 = std::make_shared(conv2_3); + + auto conv2_4 = ov::test::utils::make_convolution(relu2_2, + type, + {3, 3}, + {1, 1}, + {0, 0}, + {0, 0}, + {1, 1}, + ov::op::PadType::EXPLICIT, + 5); + auto relu2_4 = std::make_shared(conv2_4); + + auto concat = std::make_shared(ov::OutputVector{relu1_4->output(0), relu2_4->output(0)}, 1); + ov::ResultVector results{std::make_shared(concat)}; + + std::shared_ptr model = std::make_shared(results, params); + model->set_friendly_name("SplitMultiConvConcat"); + return model; +} +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/common_test_utils/src/subgraph_builders/ti_with_lstm_cell.cpp b/src/tests/test_utils/common_test_utils/src/subgraph_builders/ti_with_lstm_cell.cpp new file mode 100644 index 00000000000000..00d9a4924825e7 --- /dev/null +++ b/src/tests/test_utils/common_test_utils/src/subgraph_builders/ti_with_lstm_cell.cpp @@ -0,0 +1,72 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/subgraph_builders/ti_with_lstm_cell.hpp" + +#include "openvino/op/constant.hpp" +#include "openvino/op/lstm_cell.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/result.hpp" +#include "openvino/op/tensor_iterator.hpp" + +namespace ov { +namespace test { +namespace utils { +std::shared_ptr make_ti_with_lstm_cell(ov::element::Type type, size_t N, size_t L, size_t I, size_t H) { + auto SENT = std::make_shared(type, ov::Shape{N, L, I}); + + auto H_init = std::make_shared(type, ov::Shape{N, 1, H}); + auto C_init = std::make_shared(type, ov::Shape{N, 1, H}); + + auto H_t = std::make_shared(type, ov::Shape{N, 1, H}); + auto C_t = std::make_shared(type, ov::Shape{N, 1, H}); + + // Body + auto X = std::make_shared(type, ov::Shape{N, 1, I}); + std::vector dataW(4 * H * I, 0); + auto W_body = std::make_shared(type, ov::Shape{4 * H, I}, dataW); + std::vector dataR(4 * H * H, 0); + auto R_body = std::make_shared(type, ov::Shape{4 * H, H}, dataR); + std::vector inShape = {N, H}; + auto constantH = std::make_shared(ov::element::i64, ov::Shape{2}, inShape); + inShape = {N, I}; + auto constantX = std::make_shared(ov::element::i64, ov::Shape{2}, inShape); + auto LSTM_cell = + std::make_shared(std::make_shared(X, constantX, false), + std::make_shared(H_t, constantH, false), + std::make_shared(C_t, constantH, false), + W_body, + R_body, + H); + inShape = {N, 1, H}; + auto constantHo = std::make_shared(ov::element::i64, ov::Shape{3}, inShape); + auto H_o = std::make_shared(LSTM_cell->output(0), constantHo, false); + auto C_o = std::make_shared(LSTM_cell->output(1), constantHo, false); + auto body = std::make_shared(ov::OutputVector{H_o, C_o}, ov::ParameterVector{X, H_t, C_t}); + + auto tensor_iterator = std::make_shared(); + tensor_iterator->set_body(body); + // start=0, stride=1, part_size=1, end=39, axis=1 + tensor_iterator->set_sliced_input(X, SENT, 0, 1, 1, -1, 1); + // H_t is Hinit on the first iteration, Ho after that + tensor_iterator->set_merged_input(H_t, H_init, H_o); + tensor_iterator->set_merged_input(C_t, C_init, C_o); + + // Output 0 is last Ho, result 0 of body + auto out0 = tensor_iterator->get_iter_value(H_o, -1); + // Output 1 is last Co, result 1 of body + auto out1 = tensor_iterator->get_iter_value(C_o, -1); + + auto results = + ov::ResultVector{std::make_shared(out0), std::make_shared(out1)}; + + auto model = std::make_shared(results, ov::ParameterVector{SENT, H_init, C_init}); + model->set_friendly_name("TIwithLSTMcell"); + return model; +} + +} // namespace utils +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/rename_conformance_ir.py b/src/tests/test_utils/functional_test_utils/layer_tests_summary/rename_conformance_ir.py index 83dfe0eb1ca15a..d0797a59fa4af5 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/rename_conformance_ir.py +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/rename_conformance_ir.py @@ -106,9 +106,16 @@ def generate_node_hash(node): str_to_hash += re.sub(r"[\s+\[\]\{\}\']", "", str(node.get_attributes())) except: logger.error(f"Impossible to get attributes for {node.name}") - try: partial_shape = input.get_partial_shape() + + if 'Convolution' in str(input_node.get_type_info().name): + offset = 2 + if 'GroupConvolution' in str(input_node.get_type_info().name) or\ + 'GroupConvolutionBackpropData' in str(input_node.get_type_info().name): + offset = 3 + shape_str += '[' + ','.join([str(val) for val in list(partial_shape)[offset:]]) + ']' + shape_str += str(len(partial_shape)) shape_str += str(partial_shape.is_dynamic) except: diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_conformance.py b/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_conformance.py index 3fb3434e7b593d..d209e60c53bd87 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_conformance.py +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_conformance.py @@ -53,6 +53,8 @@ def parse_arguments(): gtest_filter_helper = "Specify gtest filter to apply for a test run. E.g. *Add*:*BinaryConv*. The default value is None" ov_config_path_helper = "Specify path to a plugin config file as `.lst` file. Default value is ``" special_mode_help = "Specify shape mode (`static`, `dynamic` or ``) for Opset conformance or API scope type (`mandatory` or ``). Default value is ``" + entity_help = "Specify validation entity: `Inference`, `ImportExport` or `QueryModel` for `OP` or "\ + "`ov`. Default value is `ov_compiled_model`, `ov_infer_request` or `ov_plugin` for `API`. Default value is ``(all)" parallel_help = "Parallel over HW devices. For example run tests over GPU.0 and GPU.1 in case when device are the same" expected_failures_help = "Excepted failures list file path as csv" cache_path_help = "Path to the cache file with test_name list sorted by execution time as `.lst` file!" @@ -70,6 +72,7 @@ def parse_arguments(): parser.add_argument("-c", "--ov_config_path", help=ov_config_path_helper, type=str, required=False, default="") parser.add_argument("-s", "--dump_graph", help=dump_graph_help, type=int, required=False, default=0) parser.add_argument("-sm", "--special_mode", help=special_mode_help, type=str, required=False, default="") + parser.add_argument("-e", "--entity", help=entity_help, type=str, required=False, default="") parser.add_argument("-p", "--parallel_devices", help=parallel_help, type=bool, required=False, default=False) parser.add_argument("-f", "--expected_failures", help=expected_failures_help, type=str, required=False, default="") parser.add_argument("-u", "--expected_failures_update", help=expected_failures_update_help, required=False, @@ -82,7 +85,7 @@ def parse_arguments(): class Conformance: def __init__(self, device: str, model_path: os.path, ov_path: os.path, type: str, workers: int, gtest_filter: str, working_dir: os.path, ov_config_path: os.path, special_mode: str, - cache_path: str, parallel_devices: bool, expected_failures_file: str, + entity:str, cache_path: str, parallel_devices: bool, expected_failures_file: str, expected_failures_update: bool): self._device = device self._model_path = model_path @@ -96,19 +99,30 @@ def __init__(self, device: str, model_path: os.path, ov_path: os.path, type: str rmtree(self._working_dir) os.mkdir(self._working_dir) self._cache_path = cache_path if os.path.isfile(cache_path) else "" + self.__entity = "" if type == constants.OP_CONFORMANCE: + if entity == "Inference" or entity == "QueryModel" or entity == "ImportExport" or entity == "": + self.__entity = entity + else: + logger.error(f'Incorrect value to set entity type: {special_mode}. Please check `help` to get possible values') + exit(-1) if special_mode == "static" or special_mode == "dynamic" or special_mode == "": self._special_mode = special_mode else: - logger.error(f'Incorrect value to set shape mode: {special_mode}. Please check to get possible values') + logger.error(f'Incorrect value to set shape mode: {special_mode}. Please check `help` to get possible values') exit(-1) - self._gtest_filter = gtest_filter + self._gtest_filter = f"*{self.__entity}*{gtest_filter}*"#:*OpImpl*" elif type == constants.API_CONFORMANCE: + if entity == "ov_compiled_model" or entity == "ov_plugin" or entity == "ov_infer_request" or entity == "": + self.__entity = entity + else: + logger.error(f'Incorrect value to set shape mode: {special_mode}. Please check to get possible values') + exit(-1) self._special_mode = "" if special_mode == "mandatory": - self._gtest_filter = f"*mandatory*{gtest_filter}*:*{gtest_filter}*mandatory*" + self._gtest_filter = f"*{self.__entity}*mandatory*{gtest_filter}*:*{self.__entity}*{gtest_filter}*mandatory*" elif special_mode == "": - self._gtest_filter = gtest_filter + self._gtest_filter = f"*{self.__entity}*{gtest_filter}*" else: logger.error(f'Incorrect value to set API scope: {special_mode}. Please check to get possible values') exit(-1) @@ -304,6 +318,7 @@ def run(self, dump_models: bool): logger.info(f"[ARGUMENTS] --models_path = {self._model_path}") logger.info(f"[ARGUMENTS] --dump_graph = {dump_models}") logger.info(f"[ARGUMENTS] --shape_mode = {self._special_mode}") + logger.info(f"[ARGUMENTS] --entity = {self.__entity}") logger.info(f"[ARGUMENTS] --parallel_devices = {self._is_parallel_over_devices}") logger.info(f"[ARGUMENTS] --cache_path = {self._cache_path}") logger.info(f"[ARGUMENTS] --expected_failures = {self._expected_failures_file}") @@ -338,9 +353,9 @@ def run(self, dump_models: bool): args.ov_path, args.type, args.workers, args.gtest_filter, args.working_dir, args.ov_config_path, - args.special_mode, args.cache_path, - args.parallel_devices, args.expected_failures, - args.expected_failures_update) + args.special_mode, args.entity, + args.cache_path, args.parallel_devices, + args.expected_failures, args.expected_failures_update) conformance.run(args.dump_graph) if not conformance.is_successful_run: exit(-1) diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py b/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py index 42bde59030f9e2..27f276fdee97d8 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/run_parallel.py @@ -219,7 +219,7 @@ def init_worker(self): self._process_list.append( Popen( args, - shell=constants.IS_WIN, + shell=False, stdout=log_file, stderr=log_file, ) @@ -527,9 +527,6 @@ def __generate_test_lists(self, test_dict_cache: dict, test_dict_runtime: dict): def __prepare_smart_filters(self, proved_test_dict: dict): def_length = len(self._command) + len(" --gtest_filter=") - if constants.IS_WIN: - # subprocess add cmd.exe to the command line on Windows if shell=True - def_length += len(f'{os.environ.get("COMSPEC", "cmd.exe")} /C ') longest_device = "" for device in self._available_devices: diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/utils/file_utils.py b/src/tests/test_utils/functional_test_utils/layer_tests_summary/utils/file_utils.py index edd74d330751d5..cbdff2783709f1 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/utils/file_utils.py +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/utils/file_utils.py @@ -87,5 +87,5 @@ def get_ov_path(script_dir_path: os.path, ov_dir=None, is_bin=False): if is_bin: ov_dir = os.path.join(ov_dir, find_latest_dir(ov_dir, 'bin')) ov_dir = os.path.join(ov_dir, find_latest_dir(ov_dir)) - ov_dir = os.path.join(ov_dir, find_latest_dir(ov_dir, [constants.DEBUG_DIR, constants.RELEASE_DIR])) + ov_dir = os.path.join(ov_dir, find_latest_dir(ov_dir)) return ov_dir diff --git a/src/tests/test_utils/functional_test_utils/src/summary/op_summary.cpp b/src/tests/test_utils/functional_test_utils/src/summary/op_summary.cpp index f78e923c37f6e7..57062d41d63cbe 100644 --- a/src/tests/test_utils/functional_test_utils/src/summary/op_summary.cpp +++ b/src/tests/test_utils/functional_test_utils/src/summary/op_summary.cpp @@ -141,9 +141,6 @@ std::map OpSummary::getStatisticFromReport() { } void OpSummary::updateOPsStats(const std::shared_ptr& model, const PassRate::Statuses& status, double k) { - if (model->get_parameters().empty()) { - return; - } bool isFunctionalGraph = false; for (const auto& op : model->get_ordered_ops()) { if (!std::dynamic_pointer_cast(op) && diff --git a/src/tests/test_utils/functional_test_utils/src/test_model/test_model.cpp b/src/tests/test_utils/functional_test_utils/src/test_model/test_model.cpp index b5d63dfd6ef8a5..a2ef41f1f9a18e 100644 --- a/src/tests/test_utils/functional_test_utils/src/test_model/test_model.cpp +++ b/src/tests/test_utils/functional_test_utils/src/test_model/test_model.cpp @@ -4,10 +4,10 @@ #include "functional_test_utils/test_model/test_model.hpp" +#include "common_test_utils/subgraph_builders/conv_pool_relu.hpp" #include "openvino/core/partial_shape.hpp" #include "openvino/pass/manager.hpp" #include "openvino/pass/serialize.hpp" -#include "ov_models/subgraph_builders.hpp" namespace ov { namespace test { @@ -19,7 +19,7 @@ void generate_test_model(const std::string& model_path, const ov::PartialShape& input_shape) { ov::pass::Manager manager; manager.register_pass(model_path, weights_path); - manager.run_passes(ngraph::builder::subgraph::makeConvPoolRelu(input_shape.to_shape(), input_type)); + manager.run_passes(ov::test::utils::make_conv_pool_relu(input_shape.to_shape(), input_type)); } } // namespace utils diff --git a/tests/layer_tests/pytorch_tests/pytorch_layer_test_class.py b/tests/layer_tests/pytorch_tests/pytorch_layer_test_class.py index adee1fb3ccc4c9..be361704c5a621 100644 --- a/tests/layer_tests/pytorch_tests/pytorch_layer_test_class.py +++ b/tests/layer_tests/pytorch_tests/pytorch_layer_test_class.py @@ -53,6 +53,8 @@ def _test(self, model, ref_net, kind, ie_device, precision, ir_version, infer_ti def numpy_to_torch_recursively(x): if isinstance(x, tuple): return tuple(numpy_to_torch_recursively(y) for y in x) + elif isinstance(x, dict): + return dict((k, numpy_to_torch_recursively(y)) for k, y in x.items()) elif isinstance(x, np.ndarray): return torch.from_numpy(x) else: @@ -81,9 +83,11 @@ def use_torch_compile_backend(): freeze_model = kwargs.get('freeze_model', True) with torch.no_grad(): if kwargs.get('use_convert_model', False): - smodel, converted_model = self.convert_via_mo(model, torch_inputs, trace_model, dynamic_shapes, ov_inputs, freeze_model) + smodel, converted_model = self.convert_via_mo( + model, torch_inputs, trace_model, dynamic_shapes, ov_inputs, freeze_model) else: - smodel, converted_model = self.convert_directly_via_frontend(model, torch_inputs, trace_model, dynamic_shapes, ov_inputs, freeze_model) + smodel, converted_model = self.convert_directly_via_frontend( + model, torch_inputs, trace_model, dynamic_shapes, ov_inputs, freeze_model) if kind is not None and not isinstance(kind, (tuple, list)): kind = [kind] @@ -124,7 +128,7 @@ def use_torch_compile_backend(): continue assert ov_type == fw_type, f"dtype validation failed: {ov_type} != {fw_type}" continue - ov_tensor_fw_format = torch.tensor(np.array(ov_tensor)) + ov_tensor_fw_format = torch.tensor(np.array(ov_tensor)) assert ov_tensor_fw_format.dtype == fw_tensor.dtype, f"dtype validation failed: {ov_tensor_fw_format.dtype} != {fw_tensor.dtype}" # Compare Ie results with Framework results @@ -137,15 +141,17 @@ def use_torch_compile_backend(): assert 'quant_size' in kwargs, "quant size must be specified for quantized_ops flag" quant_size = kwargs['quant_size'] for i in range(len(infer_res)): - cur_fw_res = flatten_fw_res[i].contiguous().numpy(force=True) if isinstance(flatten_fw_res[i], torch.Tensor) else flatten_fw_res[i] + cur_fw_res = flatten_fw_res[i].contiguous().numpy(force=True) if isinstance( + flatten_fw_res[i], torch.Tensor) else flatten_fw_res[i] if np.array(cur_fw_res).size == 0: continue cur_ov_res = infer_res[compiled.output(i)] print(f"fw_res: {cur_fw_res};\n ov_res: {cur_ov_res}") n_is_not_close = np.array(cur_fw_res).size - np.isclose(cur_ov_res, cur_fw_res, - atol=fw_eps, - rtol=fw_eps, equal_nan=True).sum() - max_diff = np.array(abs(np.array(cur_ov_res, dtype=np.float32) - np.array(cur_fw_res, dtype=np.float32))).max() + atol=fw_eps, + rtol=fw_eps, equal_nan=True).sum() + max_diff = np.array(abs(np.array( + cur_ov_res, dtype=np.float32) - np.array(cur_fw_res, dtype=np.float32))).max() if not quantized_ops and n_is_not_close > 0: is_ok = False print("Max diff is {}".format(max_diff)) @@ -166,11 +172,15 @@ def _prepare_input(self): def convert_via_mo(self, model, example_input, trace_model, dynamic_shapes, ov_inputs, freeze_model): from openvino import convert_model, PartialShape if trace_model: - decoder = TorchScriptPythonDecoder(model, example_input=example_input, skip_freeze=not freeze_model) - kwargs = {"example_input": example_input if len(example_input) > 1 else example_input[0]} + decoder = TorchScriptPythonDecoder( + model, example_input=example_input, skip_freeze=not freeze_model) + kwargs = {"example_input": example_input if len( + example_input) > 1 or isinstance(example_input[0], dict) else example_input[0]} else: - decoder = TorchScriptPythonDecoder(model, skip_freeze=not freeze_model) - kwargs = {"input": [(i.dtype, PartialShape([-1] * len(i.shape))) for i in example_input]} + decoder = TorchScriptPythonDecoder( + model, skip_freeze=not freeze_model) + kwargs = {"input": [(i.dtype, PartialShape( + [-1] * len(i.shape))) for i in example_input]} smodel = decoder.pt_module print(smodel.inlined_graph) if not dynamic_shapes: @@ -185,9 +195,11 @@ def convert_directly_via_frontend(self, model, example_input, trace_model, dynam fe = fe_manager.load_by_framework('pytorch') if trace_model: - decoder = TorchScriptPythonDecoder(model, example_input=example_input, skip_freeze=not freeze_model) + decoder = TorchScriptPythonDecoder( + model, example_input=example_input, skip_freeze=not freeze_model) else: - decoder = TorchScriptPythonDecoder(model, skip_freeze=not freeze_model) + decoder = TorchScriptPythonDecoder( + model, skip_freeze=not freeze_model) smodel = decoder.pt_module print(smodel.inlined_graph) im = fe.load(decoder) @@ -206,7 +218,8 @@ def _resolve_input_shape_dtype(self, om, ov_inputs, dynamic_shapes): inp = ov_inputs[i] assert inp.dtype.name in self._type_map, f"Unknown type {inp.dtype}." if params[i].get_node().get_element_type().is_dynamic(): - params[i].get_node().set_element_type(self._type_map[inp.dtype.name]) + params[i].get_node().set_element_type( + self._type_map[inp.dtype.name]) shape = [-1] * len(inp.shape) if dynamic_shapes else inp.shape params[i].get_node().set_partial_shape(PartialShape(shape)) om.validate_nodes_and_infer_types() @@ -235,7 +248,6 @@ def torch_compile_backend_test(self, model, inputs, custom_eps): flatten_ov_res ), f'number of outputs are not equal, {len(flatten_fw_res)} != {len(flatten_ov_res)}' - # Check if output data types match for fw_tensor, ov_tensor in zip(flatten_fw_res, flatten_ov_res): if not isinstance(fw_tensor, torch.Tensor) and not isinstance(ov_tensor, torch.Tensor): @@ -264,7 +276,6 @@ def torch_compile_backend_test(self, model, inputs, custom_eps): assert is_ok, "Accuracy validation failed" - def get_params(ie_device=None, precision=None): """ :param ie_device: list of devices @@ -309,4 +320,4 @@ def flattenize_outputs(res): def flattenize_inputs(res): - return flattenize(res, [tuple]) + return flattenize(res, [tuple, dict]) diff --git a/tests/layer_tests/pytorch_tests/test_clamp.py b/tests/layer_tests/pytorch_tests/test_clamp.py index 5c860f9be65ff1..3a4ce5c6c8de37 100644 --- a/tests/layer_tests/pytorch_tests/test_clamp.py +++ b/tests/layer_tests/pytorch_tests/test_clamp.py @@ -42,7 +42,7 @@ def forward_clip_(self, x): return aten_clamp(minimum, maximum, as_tensors, op_type), ref_net, op_name @pytest.mark.parametrize("minimum,maximum", - [(0., 1.), (-0.5, 1.5), (None, 10.), (None, -10.), (10., None), (-10., None), (100, 200)]) + [(0., 1.), (-0.5, 1.5), (None, 10.), (None, -10.), (10., None), (-10., None), (100, 200), (1.0, 0.0)]) @pytest.mark.parametrize("as_tensors", [True, False]) @pytest.mark.parametrize("op_type", ["clamp", "clamp_"]) @pytest.mark.nightly @@ -50,11 +50,6 @@ def test_clamp(self, minimum, maximum, as_tensors, op_type, ie_device, precision self._test(*self.create_model(minimum, maximum, as_tensors, op_type), ie_device, precision, ir_version) - @pytest.mark.xfail(reason='OpenVINO clamp does not support min > max') - def test_clamp_min_greater(self, ie_device, precision, ir_version): - self._test(*self.create_model(1.0, 0.0), - ie_device, precision, ir_version) - class TestClampMin(PytorchLayerTest): def _prepare_input(self): diff --git a/tests/layer_tests/pytorch_tests/test_dict.py b/tests/layer_tests/pytorch_tests/test_dict.py index 6e4db9dea825bd..4dfbf0f85c68c7 100644 --- a/tests/layer_tests/pytorch_tests/test_dict.py +++ b/tests/layer_tests/pytorch_tests/test_dict.py @@ -4,6 +4,7 @@ import numpy as np import pytest import torch +from typing import Dict from pytorch_layer_test_class import PytorchLayerTest @@ -15,7 +16,7 @@ def _prepare_input(self): def create_model(self): class aten_dict(torch.nn.Module): - def forward(self, x): + def forward(self, x): return {"b": x, "a": x + x, "c": 2 * x}, x / 2 return aten_dict(), None, "prim::DictConstruct" @@ -23,4 +24,41 @@ def forward(self, x): @pytest.mark.nightly @pytest.mark.precommit def test_dict(self, ie_device, precision, ir_version): - self._test(*self.create_model(), ie_device, precision, ir_version, use_convert_model=True) + self._test(*self.create_model(), ie_device, precision, + ir_version, use_convert_model=True) + + +class aten_dict_with_types(torch.nn.Module): + def forward(self, x_dict: Dict[str, torch.Tensor]): + return x_dict["x1"].to(torch.float32) + x_dict["x2"].to(torch.float32) + + +class aten_dict_no_types(torch.nn.Module): + def forward(self, x_dict: Dict[str, torch.Tensor]): + return x_dict["x1"] + x_dict["x2"] + + +class TestDictParam(PytorchLayerTest): + + def _prepare_input(self): + return ({"x1": np.random.randn(2, 5, 3, 4).astype(np.float32), + "x2": np.random.randn(2, 5, 3, 4).astype(np.float32)},) + + @pytest.mark.nightly + @pytest.mark.precommit + def test_dict_param(self, ie_device, precision, ir_version): + self._test(aten_dict_with_types(), None, "aten::__getitem__", ie_device, precision, + ir_version, trace_model=True) + + @pytest.mark.nightly + @pytest.mark.precommit + def test_dict_param_convert_model(self, ie_device, precision, ir_version): + self._test(aten_dict_with_types(), None, "aten::__getitem__", ie_device, precision, + ir_version, trace_model=True, use_convert_model=True) + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.xfail(reason="Type is not propagated from PtFrameworkNode.") + def test_dict_param_no_types(self, ie_device, precision, ir_version): + self._test(aten_dict_no_types(), None, "aten::__getitem__", ie_device, precision, + ir_version, trace_model=True, freeze_model=False) diff --git a/tests/layer_tests/pytorch_tests/test_erfc.py b/tests/layer_tests/pytorch_tests/test_erfc.py new file mode 100644 index 00000000000000..85da9e41b759b0 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_erfc.py @@ -0,0 +1,57 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestErfc(PytorchLayerTest): + def _prepare_input(self, input_dtype, out=False): + import numpy as np + x = np.linspace(-3, 3).astype(input_dtype) + if not out: + return (x, ) + return (x, np.zeros_like(x).astype(input_dtype)) + + def create_model(self, mode="", input_dtype="float32"): + import torch + dtypes = { + "float32": torch.float32, + "float64": torch.float64, + "int32": torch.int32 + } + + dtype = dtypes[input_dtype] + class aten_erfc(torch.nn.Module): + def __init__(self, mode, dtype): + super(aten_erfc, self).__init__() + self.dtype = dtype + if mode == "out": + self.forward = self.forward_out + elif mode == "inplace": + self.forward = self.forward_inplace + + def forward(self, x): + return torch.special.erfc(x.to(self.dtype)) + + def forward_out(self, x, y): + return torch.special.erfc(x.to(self.dtype), out=y), y + + def forward_inplace(self, x): + x = x.to(self.dtype) + return x.erfc_(), x + + ref_net = None + + return aten_erfc(mode, dtype), ref_net, "aten::erfc" if mode != "inplace" else "aten::erfc_" + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.parametrize("mode,input_dtype", [ + ("", "float32"), ("", "float64"), ("", "int32"), + ("out", "float32"), ("out", "float64"), + ("inplace", "float32"), ("inplace", "float64")]) + def test_erfc(self, mode, input_dtype, ie_device, precision, ir_version): + self._test(*self.create_model(mode, input_dtype), ie_device, precision, ir_version, + kwargs_to_prepare_input={"input_dtype": input_dtype, "out": mode == "out"} ) \ No newline at end of file diff --git a/tests/layer_tests/pytorch_tests/test_listunpack.py b/tests/layer_tests/pytorch_tests/test_listunpack.py index 39d72bfe54c6e9..6f3df968eb921d 100644 --- a/tests/layer_tests/pytorch_tests/test_listunpack.py +++ b/tests/layer_tests/pytorch_tests/test_listunpack.py @@ -126,6 +126,7 @@ def test_listconstruct_getitem_listunpack( use_convert_model=True, ) + class TestMeshgridListUnpack(PytorchLayerTest): def _prepare_input(self): return ( @@ -189,7 +190,8 @@ def __init__(self, idx): super(prim_listunpack, self).__init__() def forward(self, in1, in2, in3, in4): - a, b, c, d = torch.meshgrid(in1, in2, in3, in4, indexing=self.idx) + a, b, c, d = torch.meshgrid( + in1, in2, in3, in4, indexing=self.idx) return a, b, c, d ref_net = None @@ -215,7 +217,8 @@ def create_model(self): class meshgrid_model(torch.nn.Module): def forward(self, x): h, w = x.shape - coords1, coords2 = torch.meshgrid(torch.arange(h), torch.arange(w), indexing="ij") + coords1, coords2 = torch.meshgrid( + torch.arange(h), torch.arange(w), indexing="ij") coords = torch.stack([coords2, coords1], dim=0) return coords.float() @@ -225,3 +228,102 @@ def forward(self, x): @pytest.mark.precommit def test_meshgrid_subgraph(self, ie_device, precision, ir_version): self._test(*self.create_model(), ie_device, precision, ir_version) + + +class TestListUnpackParameterSingle(PytorchLayerTest): + def _prepare_input(self): + def tensor_gen(): + return np.random.uniform(0, 50, (1, 2, 10)).astype(np.float32) + return ((tensor_gen(), tensor_gen()), ) + + def create_model(self): + import torch + from typing import List + + class model(torch.nn.Module): + + def forward(self, x: List[torch.Tensor]): + x1, x2 = x + return x1, x2 + + return model(), None, ["prim::ListUnpack"] + + @pytest.mark.nightly + def test(self, ie_device, precision, ir_version): + self._test(*self.create_model(), ie_device, precision, ir_version) + + +class TestListUnpackParameterSingleMixed(PytorchLayerTest): + def _prepare_input(self): + def tensor_gen(): + return np.random.uniform(0, 50, (1, 2, 10)).astype(np.float32) + # generate tensor with a different shape for easier mismatch detection in case of mixed input order + + def tensor_gen_2(): + return np.random.uniform(0, 50, (2, 3)).astype(np.float32) + return (tensor_gen_2(), (tensor_gen(), tensor_gen()), tensor_gen_2()) + + def create_model(self): + import torch + from typing import List + + class model(torch.nn.Module): + + def forward(self, y1, x: List[torch.Tensor], y2): + x1, x2 = x + return x1, x2, y1, y2 + + return model(), None, ["prim::ListUnpack"] + + @pytest.mark.nightly + def test(self, ie_device, precision, ir_version): + self._test(*self.create_model(), ie_device, precision, ir_version) + + +class TestListUnpackParameterNested(PytorchLayerTest): + def _prepare_input(self): + def tensor_gen(): + return np.random.uniform(0, 50, (1, 2, 10)).astype(np.float32) + return (((tensor_gen(), tensor_gen()), (tensor_gen(), tensor_gen())), ) + + def create_model(self): + import torch + from typing import List + + class model(torch.nn.Module): + + def forward(self, x: List[List[torch.Tensor]]): + x1, x2 = x + y1, y2 = x1 + y3, y4 = x2 + return y1, y2, y3, y4 + + return model(), None, ["prim::ListUnpack"] + + @pytest.mark.nightly + def test(self, ie_device, precision, ir_version): + self._test(*self.create_model(), ie_device, precision, ir_version) + + +class TestListUnpackParameterMultiple(PytorchLayerTest): + def _prepare_input(self): + def tensor_gen(): + return np.random.uniform(0, 50, (1, 2, 10)).astype(np.float32) + return ((tensor_gen(), tensor_gen()), (tensor_gen(), tensor_gen())) + + def create_model(self): + import torch + from typing import List + + class model(torch.nn.Module): + + def forward(self, x: List[torch.Tensor], y: List[torch.Tensor]): + z1, z2 = x + z3, z4 = y + return z1, z2, z3, z4 + + return model(), None, ["prim::ListUnpack"] + + @pytest.mark.nightly + def test(self, ie_device, precision, ir_version): + self._test(*self.create_model(), ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_lstm.py b/tests/layer_tests/pytorch_tests/test_lstm.py new file mode 100644 index 00000000000000..3fef1b1e761d25 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_lstm.py @@ -0,0 +1,140 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +class aten_lstm(torch.nn.Module): + def __init__(self, input_size, hidden_size, num_layers, has_bias, bidirectional, batch_first): + torch.nn.Module.__init__(self) + self.lstm = torch.nn.LSTM(input_size, + hidden_size, + num_layers, + has_bias, + batch_first, + bidirectional=bidirectional) + + def forward(self, input_tensor, h0, c0): + return self.lstm(input_tensor, (h0, c0)) + + +class aten_gru(torch.nn.Module): + def __init__(self, input_size, hidden_size, num_layers, has_bias, bidirectional, batch_first): + torch.nn.Module.__init__(self) + self.gru = torch.nn.GRU(input_size, + hidden_size, + num_layers, + has_bias, + batch_first, + bidirectional=bidirectional) + + def forward(self, input_tensor, h0): + return self.gru(input_tensor, h0) + + +class aten_rnn(torch.nn.Module): + def __init__(self, input_size, hidden_size, num_layers, has_bias, bidirectional, batch_first, nonlinearity): + torch.nn.Module.__init__(self) + self.rnn = torch.nn.RNN(input_size, + hidden_size, + num_layers, + nonlinearity=nonlinearity, + bias=has_bias, + batch_first=batch_first, + bidirectional=bidirectional) + + def forward(self, input_tensor, h0): + return self.rnn(input_tensor, h0) + + +class TestLSTM(PytorchLayerTest): + def _prepare_input(self): + n = self.num_layers + if self.bidirectional: + n *= 2 + if self.batch_first: + input = np.random.randn(3, 5, self.input_size).astype(np.float32) + else: + input = np.random.randn(5, 3, self.input_size).astype(np.float32) + h0 = np.random.randn(n, 3, self.hidden_size).astype(np.float32) + c0 = np.random.randn(n, 3, self.hidden_size).astype(np.float32) + return (input, h0, c0) + + @pytest.mark.parametrize("input_size,hidden_size", [(10, 20),]) + @pytest.mark.parametrize("num_layers", [1, 2, 7]) + @pytest.mark.parametrize("has_bias", [True, False]) + @pytest.mark.parametrize("bidirectional", [True, False]) + @pytest.mark.parametrize("batch_first", [True, False]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_lstm(self, input_size, hidden_size, num_layers, has_bias, bidirectional, batch_first, ie_device, precision, ir_version): + self.input_size = input_size + self.hidden_size = hidden_size + self.num_layers = num_layers + self.bidirectional = bidirectional + self.batch_first = batch_first + self._test(aten_lstm(input_size, hidden_size, num_layers, has_bias, bidirectional, batch_first), None, "aten::lstm", + ie_device, precision, ir_version, trace_model=True) + + +class TestGRU(PytorchLayerTest): + def _prepare_input(self): + n = self.num_layers + if self.bidirectional: + n *= 2 + if self.batch_first: + input = np.random.randn(3, 5, self.input_size).astype(np.float32) + else: + input = np.random.randn(5, 3, self.input_size).astype(np.float32) + h0 = np.random.randn(n, 3, self.hidden_size).astype(np.float32) + return (input, h0) + + @pytest.mark.parametrize("input_size,hidden_size", [(10, 20),]) + @pytest.mark.parametrize("num_layers", [1, 2, 7]) + @pytest.mark.parametrize("has_bias", [True, False]) + @pytest.mark.parametrize("bidirectional", [True, False]) + @pytest.mark.parametrize("batch_first", [True, False]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_gru(self, input_size, hidden_size, num_layers, has_bias, bidirectional, batch_first, ie_device, precision, ir_version): + self.input_size = input_size + self.hidden_size = hidden_size + self.num_layers = num_layers + self.bidirectional = bidirectional + self.batch_first = batch_first + self._test(aten_gru(input_size, hidden_size, num_layers, has_bias, bidirectional, batch_first), None, "aten::gru", + ie_device, precision, ir_version, trace_model=True) + + +class TestRNN(PytorchLayerTest): + def _prepare_input(self): + n = self.num_layers + if self.bidirectional: + n *= 2 + if self.batch_first: + input = np.random.randn(3, 5, self.input_size).astype(np.float32) + else: + input = np.random.randn(5, 3, self.input_size).astype(np.float32) + h0 = np.random.randn(n, 3, self.hidden_size).astype(np.float32) + return (input, h0) + + @pytest.mark.parametrize("input_size,hidden_size", [(10, 20),]) + @pytest.mark.parametrize("num_layers", [1, 2, 7]) + @pytest.mark.parametrize("has_bias", [True, False]) + @pytest.mark.parametrize("bidirectional", [True, False]) + @pytest.mark.parametrize("batch_first", [True, False]) + @pytest.mark.parametrize("nonlinearity", ["tanh", "relu"]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_rnn(self, input_size, hidden_size, num_layers, has_bias, bidirectional, batch_first, nonlinearity, ie_device, precision, ir_version): + self.input_size = input_size + self.hidden_size = hidden_size + self.num_layers = num_layers + self.bidirectional = bidirectional + self.batch_first = batch_first + self._test(aten_rnn(input_size, hidden_size, num_layers, has_bias, bidirectional, batch_first, nonlinearity), None, f"aten::rnn_{nonlinearity}", + ie_device, precision, ir_version, trace_model=True) diff --git a/tests/layer_tests/pytorch_tests/test_pow.py b/tests/layer_tests/pytorch_tests/test_pow.py index 92e65898e353eb..2284e0561044e4 100644 --- a/tests/layer_tests/pytorch_tests/test_pow.py +++ b/tests/layer_tests/pytorch_tests/test_pow.py @@ -8,16 +8,16 @@ from pytorch_layer_test_class import PytorchLayerTest -@pytest.mark.parametrize('test_input', [(np.array([[1, 2], [3, 4]], dtype=np.float32), - np.array([[1, 1], [2, 2]], dtype=np.float32),), - (np.array([[1, 2], [3, 4]], dtype=np.float32), - np.array([2, 3], dtype=np.float32),), - (np.array([[1, 2], [3, 4]], dtype=np.float32), - np.array([2], dtype=np.float32),), - (np.array([5, 6], dtype=np.float32), - np.array([[1, 2], [3, 4]], dtype=np.float32),), - (np.array([5], dtype=np.float32), - np.array([[1, 2], [3, 4]], dtype=np.float32),)]) +@pytest.mark.parametrize('test_input,inplace', [ + ((np.array([[1, 2], [3, 4]], dtype=np.float32), np.array([[1, 1], [2, 2]], dtype=np.float32)), False), + ((np.array([[1, 2], [3, 4]], dtype=np.float32), np.array([2, 3], dtype=np.float32)), False), + ((np.array([[1, 2], [3, 4]], dtype=np.float32), np.array([2], dtype=np.float32)), False), + ((np.array([[1, 2], [3, 4]], dtype=np.float32), np.array([[1, 1], [2, 2]], dtype=np.float32)), True), + ((np.array([[1, 2], [3, 4]], dtype=np.float32), np.array([2, 3], dtype=np.float32)), True), + ((np.array([[1, 2], [3, 4]], dtype=np.float32), np.array([2], dtype=np.float32)), True), + ((np.array([5, 6], dtype=np.float32), np.array([[1, 2], [3, 4]], dtype=np.float32)), False), + ((np.array([5], dtype=np.float32), np.array([[1, 2], [3, 4]], dtype=np.float32)), False), + ]) class TestPow(PytorchLayerTest): """ Input test data contains five test cases - elementwise power, broadcast exponent, one exponent, @@ -27,21 +27,29 @@ class TestPow(PytorchLayerTest): def _prepare_input(self): return self.test_input - def create_model(self): + def create_model(self, inplace): class aten_pow(torch.nn.Module): + def __init__(self, inplace): + super(aten_pow, self).__init__() + if inplace: + self.forward = self.forward_inplace + else: + self.forward = self.forward_ - def forward(self, input_data, exponent): + def forward_(self, input_data, exponent): return torch.pow(input_data, exponent) - ref_net = None + def forward_inplace(self, input_data, exponent): + return input_data.pow_(exponent) - return aten_pow(), ref_net, "aten::pow" + return aten_pow(inplace), None, "aten::pow_" if inplace else "aten::pow" @pytest.mark.nightly @pytest.mark.precommit - def test_pow(self, ie_device, precision, ir_version, test_input): + def test_pow(self, inplace, ie_device, precision, ir_version, test_input): self.test_input = test_input - self._test(*self.create_model(), ie_device, precision, ir_version, use_convert_model=True) + self._test(*self.create_model(inplace), ie_device, precision, + ir_version, use_convert_model=True) class TestPowMixedTypes(PytorchLayerTest): @@ -105,9 +113,10 @@ def test_pow_mixed_types(self, ie_device, precision, ir_version, lhs_type, lhs_s self._test(*self.create_model(lhs_type, lhs_shape, rhs_type, rhs_shape), ie_device, precision, ir_version) + class TestPowMixedTypesScalars(PytorchLayerTest): def _prepare_input(self): - return (torch.randn([1,2,3,4]).numpy(),) + return (torch.randn([1, 2, 3, 4]).numpy(),) def create_model(self): diff --git a/tests/layer_tests/pytorch_tests/test_rand.py b/tests/layer_tests/pytorch_tests/test_rand.py index 75677992fb6da2..08f6201cdb323c 100644 --- a/tests/layer_tests/pytorch_tests/test_rand.py +++ b/tests/layer_tests/pytorch_tests/test_rand.py @@ -88,3 +88,57 @@ def test_inplace_normal(self, model, inputs, ie_device, precision, ir_version): self.inputs = inputs self._test(model, None, "aten::normal", ie_device, precision, ir_version, custom_eps=1e30) + + +class TestStatistics(): + class aten_normal(torch.nn.Module): + def forward(self, mean, std): + return torch.normal(mean, std) + + class aten_randn(torch.nn.Module): + def forward(self, size): + return torch.randn(*size) + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.parametrize("fw_model,inputs", [ + (aten_normal(), (0, 1, (1000000,))), + (aten_normal(), (0, 1, (10000, 100))), + (aten_normal(), (0, 3, (100000, 100))), + (aten_normal(), (1, 6, (100000, 100))), + (aten_normal(), (-20, 2, (10000, 100))), + (aten_normal(), (-20, 100, (10000, 100))), + + (aten_randn(), (0, 1, (1000000,))), + (aten_randn(), (0, 1, (10000, 100))), + (aten_randn(), (0, 1, (100000, 100))), + ]) + def test_normal_statistics(self, fw_model, inputs, ie_device, precision): + import numpy.testing as npt + import numpy as np + import openvino as ov + mean_scalar, std_scalar, size = inputs + mean = torch.full(size, mean_scalar, dtype=torch.float32) + std = torch.full(size, std_scalar, dtype=torch.float32) + + if isinstance(fw_model, self.aten_randn): + example_input = (torch.tensor(size), ) + input_size = [len(size)] + else: + example_input = (mean, std) + input_size = [size, size] + + ov_model = ov.convert_model(input_model=fw_model, example_input=example_input, input=input_size) + if ie_device == 'GPU' and precision == 'FP32': + config = {'INFERENCE_PRECISION_HINT': 'f32'} + else: + config = {} + compiled_model = ov.Core().compile_model(ov_model, ie_device, config) + + fw_res = fw_model(*example_input) + ov_res = compiled_model(example_input)[0] + + x_min, x_max = mean_scalar - 2 * std_scalar, mean_scalar + 2 * std_scalar + hist_fw, _ = np.histogram(fw_res.numpy(), bins=100, range=(x_min, x_max)) + hist_ov, _ = np.histogram(ov_res, bins=100, range=(x_min, x_max)) + npt.assert_allclose(hist_fw, hist_ov, atol=0.2, rtol=0.2) diff --git a/tests/layer_tests/pytorch_tests/test_scatter.py b/tests/layer_tests/pytorch_tests/test_scatter.py index f02aa74b88b9ab..fd69a8b309d57d 100644 --- a/tests/layer_tests/pytorch_tests/test_scatter.py +++ b/tests/layer_tests/pytorch_tests/test_scatter.py @@ -219,3 +219,65 @@ def test_scatter_reduce(self, dim, index, src, dtype, inplace, has_out, reduce, kwargs_to_prepare_input={"dtype": dtype, "out": has_out}, freeze_model=freeze ) + +class TestScatterAdd(PytorchLayerTest): + def _prepare_input(self, dtype): + return (np.random.randn(6, 6).astype(dtype),) + + def create_model(self, dim, index, src, inplace): + class aten_scatter_reduce(torch.nn.Module): + def __init__(self, dim, index, src, inplace): + super(aten_scatter_reduce, self).__init__() + self.dim = dim + self.use_empty_index = False + if index is None: + self.use_empty_index = True + # Placeholder + self.index = torch.empty([1]) + else: + self.index = index + self.src = src + self.inplace = inplace + + def forward(self, x: torch.Tensor): + if self.use_empty_index: + index = torch.empty([0, 0]) + else: + index = self.index + if self.inplace: + return x.scatter_add_(self.dim, index, self.src) + else: + return x.scatter_add(self.dim, index, self.src) + + op_name = "aten::scatter_add_" if inplace else "aten::scatter_add" + + return aten_scatter_reduce(dim, index, src, inplace), None, op_name + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.parametrize("dim", [1, -1, 0]) + @pytest.mark.parametrize( + "index", + [ + None, # Empty tensor scenario. + torch.tensor([[0, 1, 2, 3]]), + torch.tensor([[0, 5], [4, 1], [2, 3]]), + ], + ) + @pytest.mark.parametrize("src", [torch.arange(1, 26).reshape(5, 5)]) + @pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"]) + @pytest.mark.parametrize("inplace", [True, False]) + def test_scatter_reduce(self, dim, index, src, dtype, inplace, ie_device, precision, ir_version): + if isinstance(src, torch.Tensor): + src = src.to(getattr(torch, dtype)) + if index is None: + pytest.skip( + "Cannot test reduce parameters with empty indexes due to issues with empty constant tensor or issues with prim::GetAttr str inputs." + ) + self._test( + *self.create_model(dim, index, src, inplace), + ie_device, + precision, + ir_version, + kwargs_to_prepare_input={"dtype": dtype}, + ) diff --git a/tests/layer_tests/pytorch_tests/test_square.py b/tests/layer_tests/pytorch_tests/test_square.py new file mode 100644 index 00000000000000..f0a85829d362fe --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_square.py @@ -0,0 +1,36 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestSquareTypes(PytorchLayerTest): + + def _prepare_input(self): + return (torch.randn(self.shape).to(self.type).numpy(),) + + def create_model(self, type): + + class aten_square(torch.nn.Module): + def __init__(self, type): + super().__init__() + self.type = type + + def forward(self, lhs): + return torch.square(lhs.to(self.type)) + + return aten_square(type), None, "aten::square" + + @pytest.mark.parametrize(("type"), [torch.int32, torch.int64, torch.float32]) + @pytest.mark.parametrize(("shape"), [[2, 3], [],]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_square_types(self, ie_device, precision, ir_version, type, shape): + self.type = type + self.shape = shape + self._test(*self.create_model(type), + ie_device, precision, ir_version) diff --git a/tests/layer_tests/requirements.txt b/tests/layer_tests/requirements.txt index fd15d8bf32405d..fc55322ccb8b32 100644 --- a/tests/layer_tests/requirements.txt +++ b/tests/layer_tests/requirements.txt @@ -8,5 +8,5 @@ torchvision transformers pytest tensorflow-addons; python_version <= '3.10' -jax; sys_platform == "linux" -jaxlib; sys_platform == "linux" +jax; sys_platform == "linux" and platform_machine == "x86_64" # https://jax.readthedocs.io/en/latest/installation.html#pip-installation-cpu - wheels are for "x86_64" only +jaxlib; sys_platform == "linux" and platform_machine == "x86_64" # https://jax.readthedocs.io/en/latest/installation.html#pip-installation-cpu - wheels are for "x86_64" only diff --git a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_lstm_2d.py b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_lstm_2d.py index d9166d0b069cb2..35f440c38234d8 100644 --- a/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_lstm_2d.py +++ b/tests/layer_tests/tensorflow2_keras_tests/test_tf2_keras_conv_lstm_2d.py @@ -15,19 +15,11 @@ def _prepare_input(self, inputs_info): assert len(input_names) == 1, "Test expects only one input" x_shape = inputs_info[input_names[0]] inputs_data = {} - inputs_data[input_names[0]] = np.random.uniform(-1, 1, x_shape) + inputs_data[input_names[0]] = np.random.uniform(-1, 1, x_shape).astype(np.float32) return inputs_data def create_keras_conv_lstm_2d_net(self, params, input_shapes): - activation_func_structure = { - "relu": tf.nn.relu, - "swish": tf.nn.swish, - "elu": tf.nn.elu, - } - if "activation" in params: - params["activation"] = activation_func_structure[params["activation"]] - # create TensorFlow 2 model with Keras ConvLSTM2D operation tf.keras.backend.clear_session() @@ -38,43 +30,31 @@ def create_keras_conv_lstm_2d_net(self, params, input_shapes): return tf2_net, None test_data_basic = [ - pytest.param(dict(params=dict(filters=4, kernel_size=(3, 3), padding='same', return_sequences=False, - activation="swish"), - input_shapes=[[2, 5, 20, 30, 2]]), marks=pytest.mark.skip(reason="*-108786")), - pytest.param(dict(params=dict(filters=6, kernel_size=(2, 3), padding='valid', dilation_rate=3, - recurrent_activation="elu", return_sequences=True, use_bias=True, - data_format="channels_first"), - input_shapes=[[2, 5, 1, 40, 30]]), marks=pytest.mark.skip(reason="110006")), - ] - - @pytest.mark.parametrize("params", test_data_basic) - @pytest.mark.precommit_tf_fe - @pytest.mark.nightly - def test_keras_conv_lstm_2d_basic(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): - self._test(*self.create_keras_conv_lstm_2d_net(**params), ie_device, - precision, - temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, - use_new_frontend=use_new_frontend, **params) - - test_data_others = [ + dict(params=dict(filters=4, kernel_size=(3, 3), padding='same', return_sequences=False, + activation=tf.nn.swish), + input_shapes=[[2, 5, 20, 30, 2]]), + dict(params=dict(filters=6, kernel_size=(2, 3), padding='valid', dilation_rate=3, + recurrent_activation=tf.nn.elu, return_sequences=True, use_bias=True, + data_format="channels_first"), + input_shapes=[[2, 5, 1, 40, 30]]), dict(params=dict(filters=3, kernel_size=(3, 3), padding='valid', return_sequences=False), input_shapes=[[2, 5, 20, 30, 1]]), - dict(params=dict(filters=2, kernel_size=(2, 2), padding='same', return_sequences=False, activation="swish"), + dict(params=dict(filters=2, kernel_size=(2, 2), padding='same', return_sequences=False, activation=tf.nn.swish), input_shapes=[[2, 5, 25, 15, 3]]), dict(params=dict(filters=3, kernel_size=(3, 3), padding='valid', strides=(2, 2), return_sequences=True), input_shapes=[[2, 5, 10, 15, 2]]), dict(params=dict(filters=5, kernel_size=(2, 2), padding='valid', dilation_rate=3, - activation="relu", return_sequences=False, use_bias=True, + activation=tf.nn.relu, return_sequences=False, use_bias=True, data_format="channels_last"), input_shapes=[[2, 5, 18, 17, 1]]) ] - @pytest.mark.parametrize("params", test_data_others) + @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.precommit_tf_fe @pytest.mark.nightly - def test_keras_conv_lstm_2d_others(self, params, ie_device, precision, ir_version, temp_dir, - use_old_api, use_new_frontend): + def test_keras_conv_lstm_2d_basic(self, params, ie_device, precision, ir_version, temp_dir, + use_old_api, use_new_frontend): self._test(*self.create_keras_conv_lstm_2d_net(**params), ie_device, precision, temp_dir=temp_dir, use_old_api=use_old_api, ir_version=ir_version, diff --git a/tests/layer_tests/tensorflow_tests/test_tf_ArgMinMax.py b/tests/layer_tests/tensorflow_tests/test_tf_ArgMinMax.py index ca4f7f051e1079..da60c1ef21a79e 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_ArgMinMax.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_ArgMinMax.py @@ -1,6 +1,8 @@ # Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import platform + import numpy as np import pytest import tensorflow as tf @@ -50,6 +52,10 @@ def create_argmin_max_net(self, input_shape, dimension, input_type, output_type, @pytest.mark.parametrize("op_type", [tf.raw_ops.ArgMax, tf.raw_ops.ArgMin]) @pytest.mark.precommit_tf_fe @pytest.mark.nightly + @pytest.mark.xfail(condition=platform.system() == 'Linux' and platform.machine() in ['arm', 'armv7l', + 'aarch64', + 'arm64', 'ARM64'], + reason='Ticket - 126314') def test_argmin_max_net(self, params, input_type, output_type, op_type, ie_device, precision, ir_version, temp_dir, use_new_frontend, use_old_api): self._test(*self.create_argmin_max_net(**params, input_type=input_type, diff --git a/tests/layer_tests/tensorflow_tests/test_tf_MulNoNan.py b/tests/layer_tests/tensorflow_tests/test_tf_MulNoNan.py new file mode 100644 index 00000000000000..3472147215c991 --- /dev/null +++ b/tests/layer_tests/tensorflow_tests/test_tf_MulNoNan.py @@ -0,0 +1,47 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import tensorflow as tf +from common.tf_layer_test_class import CommonTFLayerTest +from common.utils.tf_utils import mix_array_with_value + +class TestMulNoNan(CommonTFLayerTest): + def _prepare_input(self, inputs_info): + assert 'x' in inputs_info + assert 'y' in inputs_info + x_shape = inputs_info['x'] + y_shape = inputs_info['y'] + inputs_data = {} + inputs_data['x'] = np.random.randint(-10, 10, x_shape).astype(self.input_type) + inputs_data['x'] = mix_array_with_value(inputs_data['x'], np.inf) + inputs_data['x'] = mix_array_with_value(inputs_data['x'], np.nan) + inputs_data['y'] = np.random.randint(-10, 10, y_shape).astype(self.input_type) * \ + np.random.choice([0.0], y_shape).astype(self.input_type) + return inputs_data + + def create_mul_no_nan_net(self, input_shape, input_type): + self.input_type = input_type + tf.compat.v1.reset_default_graph() + with tf.compat.v1.Session() as sess: + x = tf.compat.v1.placeholder(input_type, input_shape, 'x') + y = tf.compat.v1.placeholder(input_type, input_shape, 'y') + tf.raw_ops.MulNoNan(x=x, y=y) + tf.compat.v1.global_variables_initializer() + tf_net = sess.graph_def + return tf_net, None + + test_data_basic = [ + dict(input_shape=[10, 10], input_type=np.float32), + dict(input_shape=[2, 3, 4], input_type=np.float32), + ] + + @pytest.mark.parametrize("params", test_data_basic) + @pytest.mark.precommit_tf_fe + @pytest.mark.nightly + def test_mul_no_nan_basic(self, params, ie_device, precision, ir_version, temp_dir, + use_new_frontend, use_old_api): + self._test(*self.create_mul_no_nan_net(**params), + ie_device, precision, ir_version, temp_dir=temp_dir, + use_new_frontend=use_new_frontend, use_old_api=use_old_api) \ No newline at end of file diff --git a/tests/model_hub_tests/models_hub_common/constants.py b/tests/model_hub_tests/models_hub_common/constants.py index 5c9d1c600c9e7c..8830543ed8bec2 100644 --- a/tests/model_hub_tests/models_hub_common/constants.py +++ b/tests/model_hub_tests/models_hub_common/constants.py @@ -5,6 +5,18 @@ import os import tempfile +''' +@brief Time in seconds of measurement performance on each of the networks. This time doesn't include +loading and heating and includes measurement only one of 2 models - got through convert and read_model. +Both "converted" and "read_model" modes will be 2 * runtime_measure_duration +''' +runtime_measure_duration = os.environ.get('RUNTIME_MEASURE_DURATION', '60') +''' +@brief Time in seconds of heating before measurement +''' +runtime_heat_duration = os.environ.get('RUNTIME_HEAT_DURATION', '5') + + tf_hub_cache_dir = os.environ.get('TFHUB_CACHE_DIR', os.path.join(tempfile.gettempdir(), "tfhub_modules")) os.environ['TFHUB_CACHE_DIR'] = tf_hub_cache_dir diff --git a/tests/model_hub_tests/models_hub_common/test_convert_model.py b/tests/model_hub_tests/models_hub_common/test_convert_model.py index 68e58a0658defc..ad09380daeb212 100644 --- a/tests/model_hub_tests/models_hub_common/test_convert_model.py +++ b/tests/model_hub_tests/models_hub_common/test_convert_model.py @@ -87,6 +87,7 @@ def teardown_method(self): gc.collect() def _run(self, model_name, model_link, ie_device): + self.model_name = model_name print("Load the model {} (url: {})".format(model_name, model_link)) fw_model = self.load_model(model_name, model_link) print("Retrieve inputs info") diff --git a/tests/model_hub_tests/models_hub_common/test_performance_model.py b/tests/model_hub_tests/models_hub_common/test_performance_model.py index ec51a23e4291a0..90704aa0d6939f 100644 --- a/tests/model_hub_tests/models_hub_common/test_performance_model.py +++ b/tests/model_hub_tests/models_hub_common/test_performance_model.py @@ -3,9 +3,14 @@ import sys -import time import traceback +import time from enum import Enum +import pytest +from openvino.runtime.utils.types import openvino_to_numpy_types_map +import models_hub_common.utils as utils +import models_hub_common.constants as const + import numpy as np import openvino as ov @@ -43,29 +48,24 @@ class Status(Enum): LARGE_INFER_TIME_DIFF_WITH_LARGE_VAR = 9 +class ModelResults: + def __init__(self): + self.infer_mean_time = 0.0 + self.infer_variance = 0.0 + + class Results: def __init__(self): - self.converted_infer_time = 0.0 - self.converted_model_time_variance = 0.0 - self.read_model_infer_time = 0.0 - self.read_model_infer_time_variance = 0.0 + self.converted_model_results = ModelResults() + self.read_model_results = ModelResults() self.infer_time_ratio = 0.0 self.error_message = '' self.status = None -def wrap_timer(func, args): - t0 = time.time() - retval = func(*args) - t1 = time.time() - return retval, t1 - t0 - - class TestModelPerformance: infer_timeout = 600 threshold_ratio = 0.1 - num_heat_runs = 100 - num_measure_runs = 500 threshold_var = 10.0 def load_model(self, model_name, model_link): @@ -116,78 +116,73 @@ def get_inputs_info(self, model_path: str): inputs_info.append((param.get_node().get_friendly_name(), input_shape, param.get_element_type())) return inputs_info - def get_converted_model(self, model_path: str): - return ov.convert_model(model_path) - def get_read_model(self, model_path: str): core = ov.Core() return core.read_model(model=model_path) - def infer_model(self, ov_model, inputs): - infer_step_t0 = time.time() - # heat run - for _ in range(0, TestModelPerformance.num_heat_runs): - ov_model(inputs) - # measure - results = [] - for _ in range(0, TestModelPerformance.num_measure_runs): - t0 = time.time() - ov_model(inputs) - t1 = time.time() - results.append(t1 - t0) - mean = np.mean(results) - var = np.std(results, ddof=1) * 100 / mean - infer_step_t1 = time.time() - print('inference measurement done in {} secs'.format(infer_step_t1 - infer_step_t0)) - return mean, var + def heat_hardware(self, ov_model, inputs) -> None: + _, heat_n_repeats, _ = utils.measure(utils.nano_secs(const.runtime_heat_duration), ov_model, (inputs,)) + print('heat done in {} repeats'.format(heat_n_repeats)) + + def measure_inference(self, ov_model, inputs) -> ModelResults: + time_slices, infer_n_repeats, real_runtime = utils.measure(utils.nano_secs(const.runtime_measure_duration), + ov_model, + (inputs,)) + print('measurement done in {} repeats'.format(infer_n_repeats)) + infer_throughput = float(infer_n_repeats * (10 ** 9)) / real_runtime + infer_mean_time_ns = np.mean(time_slices) + infer_mean_time = infer_mean_time_ns / (10 ** 9) + infer_variance = (np.std(time_slices, ddof=1) * 100) / infer_mean_time_ns + utils.print_stat('model time infer {} secs', infer_mean_time) + utils.print_stat('model time infer var {}', infer_variance) + utils.print_stat('model time infer throughput {}', infer_throughput) + results = ModelResults() + results.infer_mean_time = infer_mean_time + results.infer_variance = infer_variance + return results + + def infer_model(self, ov_model, inputs) -> ModelResults: + self.heat_hardware(ov_model, inputs) + return self.measure_inference(ov_model, inputs) def compile_model(self, model, ie_device): core = ov.Core() return core.compile_model(model, ie_device) - def _run(self, model_name, model_link, ie_device): + def __run(self, model_name, model_link, ie_device): results = Results() results.status = None try: - print("Load the model {} (url: {})".format(model_name, model_link)) results.status = Status.LOAD_MODEL - model_obj, timedelta = wrap_timer(self.load_model, (model_name, model_link)) - print('Model {} loaded in {} secs'.format(model_name, timedelta)) - print("Retrieve inputs info") + model_obj = utils.call_with_timer('Load model', self.load_model, (model_name, model_link)) results.status = Status.GET_INPUTS_INFO - inputs_info, timedelta = wrap_timer(self.get_inputs_info, (model_obj,)) - print('Got inputs info in {} secs'.format(timedelta)) - print("Prepare input data") + inputs_info = utils.call_with_timer('Retrieve model inputs', self.get_inputs_info, (model_obj,)) results.status = Status.PREPARE_INPUTS inputs = self.prepare_inputs(inputs_info) - print("Convert the model into ov::Model") results.status = Status.GET_CONVERTED_MODEL - converted_model = self.compile_model(self.get_converted_model(model_obj), ie_device) - print("read the model into ov::Model") + model = utils.call_with_timer('Convert model', ov.convert_model, (model_obj,)) + converted_model = utils.call_with_timer('Compile converted model', self.compile_model, (model, ie_device)) results.status = Status.GET_READ_MODEL - read_model = self.compile_model(self.get_read_model(model_obj), ie_device) - print("Infer the converted model") + model = utils.call_with_timer('Read model', self.get_read_model, (model_obj,)) + read_model = utils.call_with_timer('Compile read model', self.compile_model, (model, ie_device)) results.status = Status.INFER_CONVERTED_MODEL - converted_model_time, converted_model_time_variance = self.infer_model(converted_model, inputs) - print('converted model time infer {}'.format(converted_model_time)) - print('converted model time infer var {}'.format(converted_model_time_variance)) - print("Infer read model") + results.converted_model_results = utils.call_with_timer('Infer converted model', + self.infer_model, + (converted_model, inputs)) results.status = Status.INFER_READ_MODEL - read_model_time, read_model_time_variance = self.infer_model(read_model, inputs) - print('read model time infer {}'.format(read_model_time)) - print('read model time infer var {}'.format(read_model_time_variance)) + results.read_model_results = utils.call_with_timer('Infer read model', + self.infer_model, + (read_model, inputs)) - infer_time_ratio = converted_model_time / read_model_time + infer_time_ratio = (results.converted_model_results.infer_mean_time / + results.read_model_results.infer_mean_time) + utils.print_stat('infer ratio converted_model_time/read_model_time {}', infer_time_ratio) - results.converted_infer_time = converted_model_time - results.converted_model_time_variance = converted_model_time_variance - results.read_model_infer_time = read_model_time - results.read_model_infer_time_variance = read_model_time_variance results.infer_time_ratio = infer_time_ratio if abs(infer_time_ratio - 1) > TestModelPerformance.threshold_ratio: - if (read_model_time_variance > TestModelPerformance.threshold_var - or converted_model_time_variance > TestModelPerformance.threshold_var): + if (results.read_model_results.infer_variance > TestModelPerformance.threshold_var + or results.converted_model_results.infer_variance > TestModelPerformance.threshold_var): results.status = Status.LARGE_INFER_TIME_DIFF_WITH_LARGE_VAR results.error_message = "too large ratio {} with large variance".format(infer_time_ratio) else: @@ -204,10 +199,10 @@ def _run(self, model_name, model_link, ie_device): def run(self, model_name, model_link, ie_device): self.result = Results() t0 = time.time() - self.result = multiprocessing_run(self._run, [model_name, model_link, ie_device], model_name, + self.result = multiprocessing_run(self.__run, [model_name, model_link, ie_device], model_name, self.infer_timeout) t1 = time.time() - print('test running time {}'.format(t1 - t0)) + utils.print_stat('test run time {} secs', (t1 - t0)) if self.result.status == Status.OK: return err_message = "\n{func} running failed: \n{msg}".format(func=model_name, msg=self.result.error_message) diff --git a/tests/model_hub_tests/models_hub_common/utils.py b/tests/model_hub_tests/models_hub_common/utils.py index bf714c86279061..a45d9aa77d1784 100644 --- a/tests/model_hub_tests/models_hub_common/utils.py +++ b/tests/model_hub_tests/models_hub_common/utils.py @@ -4,6 +4,7 @@ import itertools import os import shutil +import time import numpy as np from models_hub_common.constants import test_device @@ -53,6 +54,8 @@ def get_params(ie_device=None): def cleanup_dir(dir: str): + if not os.path.exists(dir): + return # remove all downloaded files from cache for file_name in os.listdir(dir): file_path = os.path.join(dir, file_name) @@ -63,3 +66,45 @@ def cleanup_dir(dir: str): shutil.rmtree(file_path) except Exception as e: pass + + +def round_num(n: float) -> str: + if 0.1 < n < 1: + return str(n)[:4] + s = '{:.2E}'.format(n) + if s.endswith('E+00'): + return s[:-4] + return s + + +def nano_secs(secs): + return float(secs) * (10 ** 9) + + +def measure(max_time_nano_secs: float, func, args): + left_time_ns = float(max_time_nano_secs) + time_slices = [] + n_repeats = 0 + while left_time_ns > 0: + t0 = time.perf_counter_ns() + func(*args) + t1 = time.perf_counter_ns() + timedelta = t1 - t0 + time_slices.append(timedelta) + left_time_ns -= timedelta + n_repeats += 1 + real_runtime_nano_secs = max_time_nano_secs - left_time_ns + return time_slices, n_repeats, real_runtime_nano_secs + + +def call_with_timer(timer_label: str, func, args): + print('{} ...'.format(timer_label)) + t0 = time.time() + ret_value = func(*args) + t1 = time.time() + print('{} is done in {} secs'.format(timer_label, round_num(t1 - t0))) + return ret_value + + +def print_stat(s: str, value: float): + print(s.format(round_num(value))) diff --git a/tests/model_hub_tests/performance_tests/conftest.py b/tests/model_hub_tests/performance_tests/conftest.py index 09c55569c4d66d..28b96b716829ed 100644 --- a/tests/model_hub_tests/performance_tests/conftest.py +++ b/tests/model_hub_tests/performance_tests/conftest.py @@ -6,6 +6,7 @@ import pytest from models_hub_common.utils import get_params from py.xml import html +from models_hub_common.utils import round_num def pytest_generate_tests(metafunc): @@ -18,35 +19,24 @@ def pytest_generate_tests(metafunc): def pytest_runtest_makereport(item, call): outcome = yield report = outcome.get_result() - if call.when == 'teardown' and getattr(item.obj.__self__, 'result', None) is not None: + if getattr(item.obj.__self__, 'result', None) is not None: results = item.obj.__self__.result report._results = results @pytest.mark.optionalhook def pytest_html_results_table_header(cells): - cells.insert(2, html.th('status', class_="sortable")) - cells.insert(3, html.th('converted model infer time')) - cells.insert(4, html.th('converted model infer time variance')) - cells.insert(5, html.th('read model infer time')) - cells.insert(6, html.th('read model infer time variance')) - cells.insert(7, html.th('model infer time ratio converted_model_time/read_model_time')) - - -def round_num(n: float) -> str: - s = '{:.4E}'.format(n) - if s.endswith('E+00'): - return s[:-4] - return s + cells.insert(3, html.th('Status', class_="sortable")) + cells.insert(4, html.th('convert_model Pipeline Inference Time, sec.')) + cells.insert(5, html.th('read_model Pipeline Inference Time, sec.')) + cells.insert(6, html.th('Inference Time Ratio (convert_model vs. read_model)')) @pytest.mark.optionalhook def pytest_html_results_table_row(report, cells): - if not getattr(report, '_results', None): + if getattr(report, '_results', None) is None: return - cells.insert(2, html.td(report._results.status)) - cells.insert(3, html.td(round_num(report._results.converted_infer_time))) - cells.insert(4, html.td(round_num(report._results.converted_model_time_variance))) - cells.insert(5, html.td(round_num(report._results.read_model_infer_time))) - cells.insert(6, html.td(round_num(report._results.read_model_infer_time_variance))) - cells.insert(7, html.td(round_num(report._results.infer_time_ratio))) + cells.insert(3, html.td(str(report._results.status)[7:])) + cells.insert(4, html.td(round_num(report._results.converted_model_results.infer_mean_time))) + cells.insert(5, html.td(round_num(report._results.read_model_results.infer_mean_time))) + cells.insert(6, html.td(round_num(report._results.infer_time_ratio))) diff --git a/tests/model_hub_tests/performance_tests/test_tf_hub_performance_model.py b/tests/model_hub_tests/performance_tests/test_tf_hub_performance_model.py index d24831ce5fac44..08b4c785a2019b 100644 --- a/tests/model_hub_tests/performance_tests/test_tf_hub_performance_model.py +++ b/tests/model_hub_tests/performance_tests/test_tf_hub_performance_model.py @@ -12,6 +12,7 @@ from models_hub_common.constants import tf_hub_cache_dir from models_hub_common.test_performance_model import TestModelPerformance from models_hub_common.utils import get_models_list +from models_hub_common.utils import cleanup_dir def clean_cache(): @@ -35,7 +36,7 @@ def load_model(self, model_name, model_link): def teardown_method(self): if not no_clean_cache_dir: - clean_cache() + cleanup_dir(tf_hub_cache_dir) # deallocate memory after each test case gc.collect() diff --git a/tests/model_hub_tests/torch_tests/hf_transformers_models b/tests/model_hub_tests/torch_tests/hf_transformers_models index 73e884d49eac25..363b9a89922757 100644 --- a/tests/model_hub_tests/torch_tests/hf_transformers_models +++ b/tests/model_hub_tests/torch_tests/hf_transformers_models @@ -65,9 +65,9 @@ EleutherAI/pythia-6.9b,gpt_neox facebook/bart-large-mnli,bart facebook/convnextv2-tiny-22k-384,convnextv2 facebook/detr-resnet-50,detr -facebook/dinov2-base,dinov2,skip,Load problem +facebook/dinov2-base,dinov2 facebook/dpr-question_encoder-single-nq-base,dpr -facebook/encodec_24khz,encodec,xfail,Unsupported op aten::lstm +facebook/encodec_24khz,encodec facebook/esm2_t6_8M_UR50D,esm facebook/flava-full,flava,xfail,Tracing problem facebook/flava-image-codebook,flava_image_codebook,skip,Load problem @@ -168,7 +168,7 @@ HJHGJGHHG/GAU-Base-Full,gau,skip,Load problem huggingface/autoformer-tourism-monthly,autoformer,skip,Load problem huggingface/informer-tourism-monthly,informer,skip,Load problem huggingface/time-series-transformer-tourism-monthly,time_series_transformer,skip,Load problem -HuggingFaceM4/tiny-random-idefics,idefics,skip,Load problem +HuggingFaceM4/tiny-random-idefics,idefics,xfail,tracing error: Please check correctness of provided example_input (eval was correct but trace failed with incommatible tuples and tensors) HuggingFaceM4/tiny-random-vllama-clip,vllama,skip,Load problem HuggingFaceM4/tiny-random-vopt-clip,vopt,skip,Load problem HuiHuang/gpt3-damo-base-zh,gpt3,skip,Load problem @@ -243,12 +243,12 @@ microsoft/conditional-detr-resnet-50,conditional_detr microsoft/deberta-base,deberta microsoft/git-large-coco,git,skip,Load problem microsoft/layoutlm-base-uncased,layoutlm -microsoft/layoutlmv2-base-uncased,layoutlmv2,skip,Load problem +microsoft/layoutlmv2-base-uncased,layoutlmv2,xfail,Tracing error: Please check correctness of provided example_input (but eval was correct) microsoft/layoutlmv3-base,layoutlmv3 microsoft/markuplm-base,markuplm microsoft/resnet-50,resnet microsoft/speecht5_hifigan,hifigan,skip,Load problem -microsoft/speecht5_tts,speecht5,skip,Load problem +microsoft/speecht5_tts,speecht5,xfail,Tracing error: hangs with no error (probably because of infinite while inside generate) microsoft/swinv2-tiny-patch4-window8-256,swinv2 microsoft/table-transformer-detection,table-transformer microsoft/wavlm-large,wavlm,skip,Load problem @@ -317,7 +317,7 @@ sahasrarjn/interbert,BERT,skip,Load problem saibo/genkalm-medium-gpt2,genkalm,skip,Load problem SajjadAyoubi/clip-fa-vision,clip_vision_model Salesforce/blip2-flan-t5-xl,blip-2,skip,Load problem -Salesforce/blip-image-captioning-large,blip,skip,Load problem +Salesforce/blip-image-captioning-large,blip Salesforce/instructblip-vicuna-7b,instructblip,skip,Load problem SamLowe/roberta-base-go_emotions,roberta sanchit-gandhi/enhanced_direct_s2st_en_to_es,speech-to-speech,skip,Load problem diff --git a/tests/model_hub_tests/torch_tests/scripts/process_op_report.py b/tests/model_hub_tests/torch_tests/scripts/process_op_report.py new file mode 100644 index 00000000000000..c9336c606b6858 --- /dev/null +++ b/tests/model_hub_tests/torch_tests/scripts/process_op_report.py @@ -0,0 +1,27 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import os +import sys + +if __name__ == "__main__": + assert len(sys.argv) > 1, "Please provide filename" + filename = sys.argv[1] + if os.path.isfile(filename): + ops_dict = dict() + with open(filename, 'r') as f: + for line in f.readlines(): + r = line.split() + if r[0] in ops_dict: + ops_dict[r[0]].append(r[1]) + else: + ops_dict[r[0]] = [r[1]] + + with open(filename, 'w') as f: + for op in sorted(ops_dict.keys()): + models = ops_dict[op] + m_str = ', '.join(models) + f.write( + f"{op:<30} appears in {len(models):>2} models: {m_str}\n") + else: + print(f"File {filename} doesn't exist.") diff --git a/tests/model_hub_tests/torch_tests/test_hf_transformers.py b/tests/model_hub_tests/torch_tests/test_hf_transformers.py index 24d878408a55e1..3c735b90aad159 100644 --- a/tests/model_hub_tests/torch_tests/test_hf_transformers.py +++ b/tests/model_hub_tests/torch_tests/test_hf_transformers.py @@ -154,6 +154,94 @@ def forward(self, x): model = VIT_GPT2_Model(model) example = (encoded_input.pixel_values,) + elif 'idefics' in mi.tags: + from transformers import IdeficsForVisionText2Text, AutoProcessor + model = IdeficsForVisionText2Text.from_pretrained(name) + processor = AutoProcessor.from_pretrained(name) + + prompts = [[ + "User: What is in this image?", + "https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG", + "", + + "\nAssistant: This picture depicts Idefix, the dog of Obelix in Asterix and Obelix. Idefix is running on the ground.", + + "\nUser:", + "https://static.wikia.nocookie.net/asterix/images/2/25/R22b.gif/revision/latest?cb=20110815073052", + "And who is that?", + + "\nAssistant:", + ]] + + inputs = processor(prompts, add_end_of_utterance_token=False, return_tensors="pt") + exit_condition = processor.tokenizer("", add_special_tokens=False).input_ids + bad_words_ids = processor.tokenizer(["", ""], add_special_tokens=False).input_ids + + example = dict(inputs) + example.update({ + 'eos_token_id': exit_condition, + 'bad_words_ids': bad_words_ids, + }) + + class Decorator(torch.nn.Module): + def __init__(self, model): + super().__init__() + self.model = model + def forward(self, input_ids, attention_mask, pixel_values, image_attention_mask, eos_token_id, bad_words_ids): + return self.model.generate( + input_ids=input_ids, + attention_mask=attention_mask, + pixel_values=pixel_values, + image_attention_mask=image_attention_mask, + eos_token_id=eos_token_id, + bad_words_ids=bad_words_ids, + max_length=100 + ) + model = Decorator(model) + elif 'blip' in mi.tags and 'text2text-generation' in mi.tags: + from transformers import BlipProcessor, BlipForConditionalGeneration + + processor = BlipProcessor.from_pretrained(name) + model = BlipForConditionalGeneration.from_pretrained(name) + text = "a photography of" + inputs = processor(self.image, text, return_tensors="pt") + + class DecoratorForBlipForConditional(torch.nn.Module): + def __init__(self, model): + super().__init__() + self.model = model + + def forward(self, pixel_values, input_ids, attention_mask): + return self.model.generate(pixel_values, input_ids, attention_mask) + + model = DecoratorForBlipForConditional(model) + example = dict(inputs) + elif 'speecht5' in mi.tags: + from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan + from datasets import load_dataset + processor = SpeechT5Processor.from_pretrained(name) + model = SpeechT5ForTextToSpeech.from_pretrained(name) + + inputs = processor(text="Hello, my dog is cute.", return_tensors="pt") + # load xvector containing speaker's voice characteristics from a dataset + embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") + speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) + + example = {'input_ids': inputs["input_ids"], 'speaker_embeddings': speaker_embeddings} + class DecoratorModelForSeq2SeqLM(torch.nn.Module): + def __init__(self, model): + super().__init__() + self.model = model + def forward(self, input_ids, speaker_embeddings): + return self.model.generate_speech(input_ids=input_ids, speaker_embeddings=speaker_embeddings) #, vocoder=vocoder) + model = DecoratorModelForSeq2SeqLM(model) + elif 'layoutlmv2' in mi.tags: + from transformers import LayoutLMv2Processor + processor = LayoutLMv2Processor.from_pretrained(name) + + question = "What's the content of this image?" + encoding = processor(self.image, question, max_length=512, truncation=True, return_tensors="pt") + example = dict(encoding) elif 'pix2struct' in mi.tags: from transformers import AutoProcessor, Pix2StructForConditionalGeneration model = Pix2StructForConditionalGeneration.from_pretrained(name, **model_kwargs) diff --git a/tests/model_hub_tests/torch_tests/test_timm.py b/tests/model_hub_tests/torch_tests/test_timm.py index e6affb85b80338..7e8d8dcc1e05e1 100644 --- a/tests/model_hub_tests/torch_tests/test_timm.py +++ b/tests/model_hub_tests/torch_tests/test_timm.py @@ -68,7 +68,8 @@ def teardown_method(self): @pytest.mark.parametrize("name", ["mobilevitv2_050.cvnets_in1k", "poolformerv2_s12.sail_in1k", "vit_base_patch8_224.augreg_in21k", - "beit_base_patch16_224.in22k_ft_in22k"]) + "beit_base_patch16_224.in22k_ft_in22k", + "sequencer2d_l.in1k"]) @pytest.mark.precommit def test_convert_model_precommit(self, name, ie_device): self.run(name, None, ie_device) diff --git a/tests/model_hub_tests/torch_tests/timm_models b/tests/model_hub_tests/torch_tests/timm_models index 46c06498d3c610..4b23afec81e77d 100644 --- a/tests/model_hub_tests/torch_tests/timm_models +++ b/tests/model_hub_tests/torch_tests/timm_models @@ -385,7 +385,7 @@ selecsls60.in1k,None selecsls60b.in1k,None semnasnet_075.rmsp_in1k,None senet154.gluon_in1k,None -sequencer2d_l.in1k,None,xfail,Unsupported aten::lstm +sequencer2d_l.in1k,None seresnet152d.ra2_in1k,None seresnet33ts.ra2_in1k,None seresnet50.a1_in1k,None diff --git a/tests/model_hub_tests/torch_tests/torch_utils.py b/tests/model_hub_tests/torch_tests/torch_utils.py index e726068feff64d..d92462efaf6521 100644 --- a/tests/model_hub_tests/torch_tests/torch_utils.py +++ b/tests/model_hub_tests/torch_tests/torch_utils.py @@ -3,6 +3,7 @@ import pytest import torch +import os from models_hub_common.test_convert_model import TestConvertModel from models_hub_common.utils import get_models_list from openvino import convert_model @@ -27,10 +28,22 @@ def flattenize_structure(outputs): def process_pytest_marks(filepath: str): return [ - pytest.param(n, marks=pytest.mark.xfail(reason=r) if m == "xfail" else pytest.mark.skip(reason=r)) if m else n + pytest.param(n, marks=pytest.mark.xfail(reason=r) if m == + "xfail" else pytest.mark.skip(reason=r)) if m else n for n, _, m, r in get_models_list(filepath)] +def extract_unsupported_ops_from_exception(e: str) -> list: + exception_str = "No conversion rule found for operations:" + for s in e.splitlines(): + it = s.find(exception_str) + if it >= 0: + _s = s[it + len(exception_str):] + ops = _s.replace(" ", "").split(",") + return ops + return [] + + class TestTorchConvertModel(TestConvertModel): def setup_class(self): torch.set_grad_enabled(False) @@ -49,8 +62,19 @@ def prepare_inputs(self, inputs_info): return [i.numpy() for i in inputs] def convert_model(self, model_obj): - ov_model = convert_model( - model_obj, example_input=self.example, verbose=True) + try: + ov_model = convert_model( + model_obj, example_input=self.example, verbose=True) + except Exception as e: + report_filename = os.environ.get("OP_REPORT_FILE", None) + if report_filename: + mode = 'a' if os.path.exists(report_filename) else 'w' + with open(report_filename, mode) as f: + ops = extract_unsupported_ops_from_exception(str(e)) + if ops: + ops = [f"{op} {self.model_name}" for op in ops] + f.write("\n".join(ops) + "\n") + raise e return ov_model def infer_fw_model(self, model_obj, inputs): diff --git a/tests/model_hub_tests/torch_tests/torchbench_models b/tests/model_hub_tests/torch_tests/torchbench_models index 1817aa54657c62..8c2a2ee93fdfa0 100644 --- a/tests/model_hub_tests/torch_tests/torchbench_models +++ b/tests/model_hub_tests/torch_tests/torchbench_models @@ -5,12 +5,12 @@ LearningToPaint,None Super_SloMo,None,xfail,Unsupported ops aten::l1_loss aten::mse_loss #alexnet,None - Already tested by torchvision tests basic_gnn_edgecnn,None,xfail,Accuracy validation failed -basic_gnn_gcn,None,xfail,Unsupported ops aten::pow_ aten::scatter_add_ -basic_gnn_gin,None,xfail,Unsupported op aten::scatter_add_ -basic_gnn_sage,None,xfail,Unsupported op aten::scatter_add_ +basic_gnn_gcn,None,xfail,Unsupported ops aten::pow_ +basic_gnn_gin,None +basic_gnn_sage,None #cm3leon_generate,None,skip,No install.py is found dcgan,None -demucs,None,xfail,Unsupported op aten::lstm +demucs,None #densenet121,None - Already tested by torchvision tests #detectron2_fasterrcnn_r_101_c4,None - Already tested by det2 tests #detectron2_fasterrcnn_r_101_dc5,None - Already tested by det2 tests @@ -93,7 +93,7 @@ tacotron2,None,skip,Can't be loaded without CUDA #timm_vision_transformer_large,None - Already tested by timm tests #timm_vovnet,None - Already tested by timm tests torch_multimodal_clip,None,skip,Can't be traced -tts_angular,None,xfail,Unsupported op aten::lstm +tts_angular,None #vgg16,None - Already tested by torchvision tests #vision_maskrcnn,None,skip,Only tensors, lists, tuples of tensors, or dictionary of tensors can be output from traced functions yolov3,None \ No newline at end of file diff --git a/thirdparty/onnx/CMakeLists.txt b/thirdparty/onnx/CMakeLists.txt index c9752a833f7b7c..fb41a383bcef41 100644 --- a/thirdparty/onnx/CMakeLists.txt +++ b/thirdparty/onnx/CMakeLists.txt @@ -70,5 +70,6 @@ if(NOT ENABLE_SYSTEM_PROTOBUF AND NOT BUILD_SHARED_LIBS) endif() install(TARGETS ${protobuf_target_name} EXPORT ONNXTargets - ARCHIVE DESTINATION ${OV_CPACK_ARCHIVEDIR} COMPONENT ${OV_CPACK_COMP_CORE}) + ARCHIVE DESTINATION ${OV_CPACK_ARCHIVEDIR} COMPONENT ${OV_CPACK_COMP_CORE} + ${OV_CPACK_COMP_CORE_EXCLUDE_ALL}) endif() diff --git a/tools/mo/openvino/tools/mo/moc_frontend/pytorch_frontend_utils.py b/tools/mo/openvino/tools/mo/moc_frontend/pytorch_frontend_utils.py index 214fbbc4ff77cc..cf4c611feee979 100644 --- a/tools/mo/openvino/tools/mo/moc_frontend/pytorch_frontend_utils.py +++ b/tools/mo/openvino/tools/mo/moc_frontend/pytorch_frontend_utils.py @@ -152,6 +152,8 @@ def to_torch_tensor(tensor): if isinstance(tensor, (tuple, list)): # TODO: Function to_torch_tensor should be renamed as it handles not only a tensor return tuple(to_torch_tensor(x) for x in tensor) + if isinstance(tensor, dict) and all(isinstance(k, str) for k in tensor.keys()): + return dict((k, to_torch_tensor(x)) for k, x in tensor.items()) else: raise Error("Unexpected type of example_input. Supported types torch.Tensor, np.array or ov.Tensor. " "Got {}".format(type(tensor))) diff --git a/tools/openvino_dev/setup.py b/tools/openvino_dev/setup.py index 5804ba47c2e3bd..0b9cf99e894d9b 100644 --- a/tools/openvino_dev/setup.py +++ b/tools/openvino_dev/setup.py @@ -278,8 +278,8 @@ def concat_files(output_file, input_files): outfile.write(content) return output_file -description_md = SCRIPT_DIR.parents[1] / 'docs' / 'install_guides' / 'pypi-openvino-dev.md' -md_files = [description_md, SCRIPT_DIR.parents[1] / 'docs' / 'install_guides' / 'pre-release-note.md'] +description_md = SCRIPT_DIR.parents[1] / 'docs' / 'dev' / "pypi_publish" / 'pypi-openvino-dev.md' +md_files = [description_md, SCRIPT_DIR.parents[1] / 'docs' / 'dev' / "pypi_publish" / 'pre-release-note.md'] docs_url = 'https://docs.openvino.ai/2023.0/index.html' if(os.getenv('CI_BUILD_DEV_TAG')): diff --git a/tools/ovc/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py b/tools/ovc/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py index 882a075b7de5f7..3a24e84af1ae9a 100644 --- a/tools/ovc/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py +++ b/tools/ovc/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py @@ -154,6 +154,8 @@ def to_torch_tensor(tensor): if isinstance(tensor, (tuple, list)): # TODO: Function to_torch_tensor should be renamed as it handles not only a tensor return tuple(to_torch_tensor(x) for x in tensor) + if isinstance(tensor, dict) and all(isinstance(k, str) for k in tensor.keys()): + return dict((k, to_torch_tensor(x)) for k, x in tensor.items()) else: raise Error("Unexpected type of example_input. Supported types torch.Tensor, np.array or ov.Tensor. " "Got {}".format(type(tensor)))