From 78b993d82011ac1e3b505849f19729285120cda6 Mon Sep 17 00:00:00 2001 From: wandemberg-eldorado <91546771+wandemberg-eldorado@users.noreply.github.com> Date: Tue, 8 Oct 2024 16:45:21 -0300 Subject: [PATCH] Update java-ci.yml --- .github/workflows/java-ci.yml | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/.github/workflows/java-ci.yml b/.github/workflows/java-ci.yml index 5839260..c059fb5 100644 --- a/.github/workflows/java-ci.yml +++ b/.github/workflows/java-ci.yml @@ -11,6 +11,41 @@ on: - main jobs: + ollama-e2e: + runs-on: ubuntu-22.04 + name: ollama-e2e + steps: + - uses: actions/checkout@v1 + - uses: self-actuated/nvidia-run@master + - name: Install Ollama + run: | + curl -fsSL https://ollama.com/install.sh | sudo -E sh + - name: Start serving + run: | + # Run the background, there is no way to daemonise at the moment + ollama serve & + + # A short pause is required before the HTTP port is opened + sleep 5 + + # This endpoint blocks until ready + time curl -i http://localhost:11434 + + - name: Pull llama3 + run: | + ollama pull llama3 + + #- name: Invoke via the CLI + # run: | + # ollama run llama2 "What are the pros of MicroVMs for continous integrations, especially if Docker is the alternative?" + + #- name: Invoke via API + # run: | + # curl -s http://localhost:11434/api/generate -d '{ + # "model": "llama2", + # "stream": false, + # "prompt":"What are the risks of running privileged Docker containers for CI workloads?" + # }' | jq build: runs-on: ubuntu-22.04 strategy: