Skip to content

Merge pull request #523 from roboflow/feature/functional_tests_for_pl… #37

Merge pull request #523 from roboflow/feature/functional_tests_for_pl…

Merge pull request #523 from roboflow/feature/functional_tests_for_pl… #37

name: Build and Push GPU Slim Container
on:
release:
types: [created]
push:
branches: [ main ]
workflow_dispatch:
inputs:
force_push:
type: boolean
description: "Do you want to push image after build?"
default: false
env:
VERSION: '0.0.0' # Default version, will be overwritten
jobs:
docker:
runs-on:
group: group8core
steps:
- name: Remove unnecessary files
run: |
sudo rm -rf /usr/share/dotnet
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
-
name: Set up QEMU
uses: docker/setup-qemu-action@v2
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
-
name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: 🛎️ Checkout
uses: actions/checkout@v3
-
name: Read version from file
run: echo "VERSION=$(DISABLE_VERSION_CHECK=true python ./inference/core/version.py)" >> $GITHUB_ENV
-
name: Build and Push
uses: docker/build-push-action@v4
with:
push: ${{ github.event_name == 'release' || (github.event.inputs.force_push == 'true')}}
tags: roboflow/roboflow-inference-server-gpu-slim:latest,roboflow/roboflow-inference-server-gpu-slim:${{env.VERSION}}
cache-from: type=registry,ref=roboflow/roboflow-inference-server-gpu-slim:cache
cache-to: type=registry,ref=roboflow/roboflow-inference-server-gpu-slim:cache,mode=max
platforms: linux/amd64
file: ./docker/dockerfiles/Dockerfile.onnx.gpu.slim