Skip to content

Commit

Permalink
feat: add a CLI to interact with the apiserver (#270)
Browse files Browse the repository at this point in the history
* feat: add a CLI to interact with the apiserver

* test status cmd

* add cli tests
  • Loading branch information
masci authored Sep 26, 2024
1 parent df7ea57 commit b3ea3ff
Show file tree
Hide file tree
Showing 10 changed files with 211 additions and 0 deletions.
28 changes: 28 additions & 0 deletions llama_deploy/cli/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import click

from .deploy import deploy
from .status import status


@click.group(
context_settings={"help_option_names": ["-h", "--help"]},
invoke_without_command=True,
)
@click.version_option(prog_name="llamactl")
@click.option("-s", "--server", default="http://localhost:4501", help="Apiserver URL")
@click.option(
"-k",
"--disable-ssl",
default=False,
is_flag=True,
help="Disable SSL certificate verification",
)
@click.pass_context
def llamactl(ctx: click.Context, server: str, disable_ssl: bool) -> None:
ctx.obj = server, disable_ssl
if ctx.invoked_subcommand is None:
click.echo(ctx.get_help()) # show the help if no subcommand was provided


llamactl.add_command(deploy)
llamactl.add_command(status)
11 changes: 11 additions & 0 deletions llama_deploy/cli/__main__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
import sys
from llama_deploy.cli import llamactl


def main() -> None:
"""CLI entrypoint."""
sys.exit(llamactl())


if __name__ == "__main__": # pragma: no cover
main()
20 changes: 20 additions & 0 deletions llama_deploy/cli/deploy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
from typing import IO

import click
import httpx


@click.command()
@click.pass_obj # global_config
@click.argument("deployment_config_file", type=click.File("rb"))
def deploy(global_config: tuple, deployment_config_file: IO) -> None:
server_url, disable_ssl = global_config
deploy_url = f"{server_url}/deployments/create/"

files = {"file": deployment_config_file.read()}
resp = httpx.post(deploy_url, files=files, verify=not disable_ssl)

if resp.status_code >= 400:
raise click.ClickException(resp.json().get("detail"))
else:
click.echo(f"Deployment successful: {resp.json().get('name')}")
32 changes: 32 additions & 0 deletions llama_deploy/cli/status.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
import click
import httpx


@click.command()
@click.pass_obj # global_config
def status(global_config: tuple) -> None:
server_url, disable_ssl = global_config
status_url = f"{server_url}/status/"

try:
r = httpx.get(status_url, verify=not disable_ssl)
except httpx.ConnectError:
raise click.ClickException(
f"Llama Deploy is not responding, check the apiserver address {server_url} is correct and try again."
)

if r.status_code >= 400:
body = r.json()
click.echo(
f"Llama Deploy is unhealthy: [{r.status_code}] {r.json().get('detail')}"
)
return

click.echo("Llama Deploy is up and running.")
body = r.json()
if deployments := body.get("deployments"):
click.echo("\nActive deployments:")
for d in deployments:
click.echo(f"- {d}")
else:
click.echo("\nCurrently there are no active deployments")
Empty file added tests/cli/__init__.py
Empty file.
14 changes: 14 additions & 0 deletions tests/cli/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from pathlib import Path

import pytest
from click.testing import CliRunner


@pytest.fixture
def data_path() -> Path:
return Path(__file__).parent / "data"


@pytest.fixture
def runner() -> CliRunner:
return CliRunner()
7 changes: 7 additions & 0 deletions tests/cli/data/deployment.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
name: TestDeployment

control-plane: {}

services:
test-workflow:
name: Test Workflow
21 changes: 21 additions & 0 deletions tests/cli/test_cli.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
from unittest import mock

from click.testing import CliRunner

from llama_deploy.cli.__main__ import main
from llama_deploy.cli import llamactl


@mock.patch("llama_deploy.cli.__main__.sys")
@mock.patch("llama_deploy.cli.__main__.llamactl")
def test_main(mocked_cli, mocked_sys) -> None: # type: ignore
mocked_cli.return_value = 0
main()
mocked_sys.exit.assert_called_with(0)


def test_root_command(runner: CliRunner) -> None:
result = runner.invoke(llamactl)
assert result.exit_code == 0
# Ensure invoking the root command outputs the help
assert "Usage: llamactl" in result.output
34 changes: 34 additions & 0 deletions tests/cli/test_deploy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
from pathlib import Path
from unittest import mock

from click.testing import CliRunner

from llama_deploy.cli import llamactl


def test_deploy(runner: CliRunner, data_path: Path) -> None:
test_config_file = data_path / "deployment.yaml"
mocked_response = mock.MagicMock(status_code=200, json=lambda: {})
with mock.patch("llama_deploy.cli.deploy.httpx") as mocked_httpx:
mocked_httpx.post.return_value = mocked_response
result = runner.invoke(llamactl, ["deploy", str(test_config_file)])

assert result.exit_code == 0
with open(test_config_file, "rb") as f:
mocked_httpx.post.assert_called_with(
"http://localhost:4501/deployments/create/",
files={"file": f.read()},
verify=True,
)


def test_deploy_failed(runner: CliRunner, data_path: Path) -> None:
test_config_file = data_path / "deployment.yaml"
mocked_response = mock.MagicMock(
status_code=401, json=lambda: {"detail": "Unauthorized!"}
)
with mock.patch("llama_deploy.cli.deploy.httpx") as mocked_httpx:
mocked_httpx.post.return_value = mocked_response
result = runner.invoke(llamactl, ["deploy", str(test_config_file)])
assert result.exit_code == 1
assert result.output == "Error: Unauthorized!\n"
44 changes: 44 additions & 0 deletions tests/cli/test_status.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
from unittest import mock

from click.testing import CliRunner

from llama_deploy.cli import llamactl


def test_status_server_down(runner: CliRunner) -> None:
result = runner.invoke(llamactl, ["status"])
assert result.exit_code == 1
assert "Error: Llama Deploy is not responding" in result.output


def test_status_unhealthy(runner: CliRunner) -> None:
mocked_response = mock.MagicMock(status_code=500)
with mock.patch("llama_deploy.cli.status.httpx") as mocked_httpx:
mocked_httpx.get.return_value = mocked_response
result = runner.invoke(llamactl, ["status"])
assert result.exit_code == 0
assert "Llama Deploy is unhealthy: [500]" in result.output


def test_status(runner: CliRunner) -> None:
mocked_response = mock.MagicMock(status_code=200, json=lambda: {})
with mock.patch("llama_deploy.cli.status.httpx") as mocked_httpx:
mocked_httpx.get.return_value = mocked_response
result = runner.invoke(llamactl, ["status"])
assert result.exit_code == 0
assert (
result.output
== "Llama Deploy is up and running.\n\nCurrently there are no active deployments\n"
)


def test_status_with_deployments(runner: CliRunner) -> None:
mocked_response = mock.MagicMock(status_code=200)
mocked_response.json.return_value = {"deployments": ["foo", "bar"]}
with mock.patch("llama_deploy.cli.status.httpx") as mocked_httpx:
mocked_httpx.get.return_value = mocked_response
result = runner.invoke(llamactl, ["status"])
assert result.exit_code == 0
assert result.output == (
"Llama Deploy is up and running.\n\nActive deployments:\n- foo\n- bar\n"
)

0 comments on commit b3ea3ff

Please sign in to comment.