Skip to content

Commit

Permalink
ensure to pass host to client not request
Browse files Browse the repository at this point in the history
  • Loading branch information
fgebhart committed Dec 4, 2024
1 parent 43989c1 commit 8ee43e7
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 14 deletions.
12 changes: 8 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,12 @@ Python client for the [Aleph Alpha](https://aleph-alpha.com) API.
import os
from aleph_alpha_client import Client, CompletionRequest, Prompt

client = Client(token=os.getenv("AA_TOKEN"))
client = Client(
token=os.getenv("AA_TOKEN"),
host="https://inference-api.your-domain.com",
)
request = CompletionRequest(
prompt=Prompt.from_text("Provide a short description of AI:"),
host="https://inference-api.your-domain.com",
maximum_tokens=64,
)
response = client.complete(request, model="pharia-1-llm-7b-control")
Expand All @@ -36,10 +38,12 @@ import os
from aleph_alpha_client import AsyncClient, CompletionRequest, Prompt

# Can enter context manager within an async function
async with AsyncClient(token=os.environ["AA_TOKEN"]) as client:
async with AsyncClient(
token=os.environ["AA_TOKEN"]
host="https://inference-api.your-domain.com",
) as client:
request = CompletionRequest(
prompt=Prompt.from_text("Provide a short description of AI:"),
host="https://inference-api.your-domain.com",
maximum_tokens=64,
)
response = client.complete_with_streaming(request, model="pharia-1-llm-7b-control")
Expand Down
16 changes: 9 additions & 7 deletions aleph_alpha_client/aleph_alpha_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,10 +163,11 @@ class Client:
Internal feature.
Example usage:
>>> request = CompletionRequest(
prompt=Prompt.from_text(f"Request"), host="https://inference-api.your-domain.com", maximum_tokens=64
>>> request = CompletionRequest(prompt=Prompt.from_text(f"Request"), maximum_tokens=64)
>>> client = Client(
token=os.environ["AA_TOKEN"],
host="https://inference-api.your-domain.com",
)
>>> client = Client(token=os.environ["AA_TOKEN"])
>>> response: CompletionResponse = client.complete(request, "pharia-1-llm-7b-control")
"""

Expand Down Expand Up @@ -740,10 +741,11 @@ class AsyncClient:
Internal feature.
Example usage:
>>> request = CompletionRequest(
prompt=Prompt.from_text(f"Request"), host="https://inference-api.your-domain.com", maximum_tokens=64
)
>>> async with AsyncClient(token=os.environ["AA_TOKEN"]) as client:
>>> request = CompletionRequest(prompt=Prompt.from_text(f"Request"), maximum_tokens=64)
>>> async with AsyncClient(
token=os.environ["AA_TOKEN"],
host="https://inference-api.your-domain.com"
) as client:
response: CompletionResponse = await client.complete(request, "pharia-1-llm-7b-control")
"""

Expand Down
6 changes: 3 additions & 3 deletions docs/source/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ Synchronous client.
from aleph_alpha_client import Client, CompletionRequest, Prompt
import os
client = Client(token=os.getenv("AA_TOKEN"))
client = Client(token=os.getenv("AA_TOKEN"), host="https://inference-api.your-domain.com")
prompt = Prompt.from_text("Provide a short description of AI:")
request = CompletionRequest(prompt=prompt, maximum_tokens=20)
result = client.complete(request, model="luminous-extended")
Expand All @@ -32,7 +32,7 @@ Synchronous client with prompt containing an image.
from aleph_alpha_client import Client, CompletionRequest, PromptTemplate, Image
import os
client = Client(token=os.getenv("AA_TOKEN"))
client = Client(token=os.getenv("AA_TOKEN"), host="https://inference-api.your-domain.com")
image = Image.from_file("path-to-an-image")
prompt_template = PromptTemplate("{{image}}This picture shows ")
prompt = prompt_template.to_prompt(image=prompt_template.placeholder(image))
Expand All @@ -50,7 +50,7 @@ Asynchronous client.
from aleph_alpha_client import AsyncClient, CompletionRequest, Prompt
# Can enter context manager within an async function
async with AsyncClient(token=os.environ["AA_TOKEN"]) as client:
async with AsyncClient(token=os.environ["AA_TOKEN"], host="https://inference-api.your-domain.com") as client:
request = CompletionRequest(
prompt=Prompt.from_text("Request"),
maximum_tokens=64,
Expand Down

0 comments on commit 8ee43e7

Please sign in to comment.