Skip to content

Code Quality & Regression Tests - NVIDIA T4 #97

Code Quality & Regression Tests - NVIDIA T4

Code Quality & Regression Tests - NVIDIA T4 #97

name: Code Quality & Regression Tests - NVIDIA T4
on:
workflow_dispatch:
jobs:
build:
if: ${{ !github.event.act }}
runs-on: [self-hosted, t4-gpu, gcp]
steps:
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: 🛎️ Checkout
uses: actions/checkout@v3
with:
ref: ${{ github.head_ref }}
- name: 🦾 Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install -r requirements/requirements.test.integration.txt
- name: 🔨 Build and Push Test Docker - GPU
run: |
docker pull roboflow/roboflow-inference-server-gpu:test
docker build -t roboflow/roboflow-inference-server-gpu:test -f docker/dockerfiles/Dockerfile.onnx.gpu .
docker push roboflow/roboflow-inference-server-gpu:test
- name: 🔋 Start Test Docker - GPU
run: |
PORT=9101 INFERENCE_SERVER_REPO=roboflow-inference-server-gpu make start_test_docker_gpu
- name: 🧪 Regression Tests - GPU
run: |
MINIMUM_FPS=25 FUNCTIONAL=true PORT=9101 API_KEY=${{ secrets.API_KEY }} asl_instance_segmentation_API_KEY=${{ secrets.ASL_INSTANCE_SEGMENTATION_API_KEY }} asl_poly_instance_seg_API_KEY=${{ secrets.ASL_POLY_INSTANCE_SEG_API_KEY }} bccd_favz3_API_KEY=${{ secrets.BCCD_FAVZ3_API_KEY }} bccd_i4nym_API_KEY=${{ secrets.BCCD_I4NYM_API_KEY }} cats_and_dogs_smnpl_API_KEY=${{ secrets.CATS_AND_DOGS_SMNPL_API_KEY }} coins_xaz9i_API_KEY=${{ secrets.COINS_XAZ9I_API_KEY }} melee_API_KEY=${{ secrets.MELEE_API_KEY }} yolonas_test_API_KEY=${{ secrets.YOLONAS_TEST_API_KEY }} python -m pytest tests/inference/integration_tests/
- name: 🧹 Cleanup Test Docker - GPU
run: make stop_test_docker
if: success() || failure()
- name: 🔨 Build and Push Test Docker - Parallel GPU
run: |
docker pull roboflow/roboflow-inference-server-gpu-parallel:test
docker build -t roboflow/roboflow-inference-server-gpu-parallel:test -f docker/dockerfiles/Dockerfile.onnx.gpu.parallel .
docker push roboflow/roboflow-inference-server-gpu-parallel:test
- name: 🔋 Start Test Docker - Parallel GPU
run: |
PORT=9101 INFERENCE_SERVER_REPO=roboflow-inference-server-gpu-parallel make start_test_docker_gpu
- name: 🧪 Regression Tests - Parallel GPU
run: |
SKIP_VISUALISATION_TESTS=true FUNCTIONAL=true PORT=9101 API_KEY=${{ secrets.API_KEY }} asl_instance_segmentation_API_KEY=${{ secrets.ASL_INSTANCE_SEGMENTATION_API_KEY }} asl_poly_instance_seg_API_KEY=${{ secrets.ASL_POLY_INSTANCE_SEG_API_KEY }} bccd_favz3_API_KEY=${{ secrets.BCCD_FAVZ3_API_KEY }} bccd_i4nym_API_KEY=${{ secrets.BCCD_I4NYM_API_KEY }} cats_and_dogs_smnpl_API_KEY=${{ secrets.CATS_AND_DOGS_SMNPL_API_KEY }} coins_xaz9i_API_KEY=${{ secrets.COINS_XAZ9I_API_KEY }} melee_API_KEY=${{ secrets.MELEE_API_KEY }} yolonas_test_API_KEY=${{ secrets.YOLONAS_TEST_API_KEY }} python -m pytest tests/inference/integration_tests/regression_test.py tests/inference/integration_tests/batch_regression_test.py
- name: 🧹 Cleanup Test Docker - Parallel GPU
run: make stop_test_docker
if: success() || failure()