diff --git a/.gitignore b/.gitignore index e5662d2..7c716a5 100644 --- a/.gitignore +++ b/.gitignore @@ -16,4 +16,13 @@ pom.xml.asc !/infra/docker-compose.yml .idea/ .clj-kondo/ -.lsp/ \ No newline at end of file +.lsp/ +**/.venv + +**/__pycache__/ + +*.pyc +*.pyo + +*.pyd +*.py[co] \ No newline at end of file diff --git a/services/llm-client/diplomat/__pycache__/__init__.cpython-39.pyc b/services/llm-client/diplomat/__pycache__/__init__.cpython-39.pyc deleted file mode 100644 index 221dc39..0000000 Binary files a/services/llm-client/diplomat/__pycache__/__init__.cpython-39.pyc and /dev/null differ diff --git a/services/llm-client/diplomat/__pycache__/http_server.cpython-39.pyc b/services/llm-client/diplomat/__pycache__/http_server.cpython-39.pyc deleted file mode 100644 index e02fc6a..0000000 Binary files a/services/llm-client/diplomat/__pycache__/http_server.cpython-39.pyc and /dev/null differ diff --git a/services/llm-client/diplomat/http_client.py b/services/llm-client/diplomat/http_client.py index 10faa00..eb77c15 100644 --- a/services/llm-client/diplomat/http_client.py +++ b/services/llm-client/diplomat/http_client.py @@ -1,19 +1,35 @@ + from openai import OpenAI +from models.prompts import RenderedPrompt +import os; client = OpenAI( - api_key="" - #os.environ['OPENAI_API_KEY'], # this is also the default, it can be omitted + api_key=os.getenv('OPENAI_API_KEY').strip() ) -def complete(): +def complete(rendered_prompt : RenderedPrompt): + + images = [{ + "type": "image_url", + "image_url": { + "url": f"data:image/jpeg;base64,{image}" + } + } for image in rendered_prompt.images] + completion = client.chat.completions.create( - model="gpt-4o-mini", - messages=[ - { - "role": "user", - "content": "Hello, how are you?", - }, - ], - ) - print(completion.choices[0].message.content) - return completion.choices[0].message.content \ No newline at end of file + model="gpt-4o-mini", + messages=[ + { + "role": "user", + "content": [ + { + "type": "text", + "text": rendered_prompt.prompt + }, + *images + ] + } + ], + ) + return completion.choices[0].message.content + \ No newline at end of file diff --git a/services/llm-client/flows/__pycache__/interactions.cpython-39.pyc b/services/llm-client/flows/__pycache__/interactions.cpython-39.pyc index fb1583c..1450ee2 100644 Binary files a/services/llm-client/flows/__pycache__/interactions.cpython-39.pyc and b/services/llm-client/flows/__pycache__/interactions.cpython-39.pyc differ diff --git a/services/llm-client/flows/interactions.py b/services/llm-client/flows/interactions.py index 0297259..6cc2217 100644 --- a/services/llm-client/flows/interactions.py +++ b/services/llm-client/flows/interactions.py @@ -2,10 +2,21 @@ from components.scylla_connection import ScyllaConnection from datetime import datetime, timezone from models.interactions import Interaction +from models.prompts import Prompt, RenderedPrompt from diplomat.db import interactions as interactions_db from diplomat.db import prompts as prompts_db from logic import interactions as interactions_logic from logic import prompts as prompts_logic +from diplomat import http_client + +def render_prompt(interaction: Interaction, prompt: Prompt) -> RenderedPrompt: + prompt_variables = prompts_logic.find_variables(prompt) + missing_variables = prompts_logic.find_missing_variables(prompt_variables, interaction.variables) + if (missing_variables): + missing_variables_str = ", ".join(missing_variables) + raise ValueError(f"Variables missing: {missing_variables_str}") + return RenderedPrompt(prompt.prompt_name, prompts_logic.replace_variables(prompt, interaction.variables), interaction.images) + def new_interaction(interaction: Interaction, scylla: ScyllaConnection): existent_interaction = interactions_db.get_interaction(interaction.id, scylla) @@ -28,22 +39,26 @@ def new_interaction(interaction: Interaction, scylla: ScyllaConnection): interactions_db.update_interaction(interaction_request, scylla) return interaction_request - prompt_variables = prompts_logic.find_variables(prompt) - missing_variables = prompts_logic.find_missing_variables(prompt_variables, interaction.variables) + rendered_prompt = None - if(missing_variables): - missing_variables = ", ".join(missing_variables) - interaction_request.failed(f"Variables missing: {missing_variables}") + try: + rendered_prompt = render_prompt(interaction, prompt) + except ValueError as e: + error_message = str(e) + interaction_request.failed(error_message) interactions_db.update_interaction(interaction_request, scylla) return interaction_request - rendered_prompt = prompts_logic.replace_variables(prompt, interaction.variables) - - - # request Open IA - # update the database - # return - return rendered_prompt + try: + request_response = http_client.complete(rendered_prompt) + interaction_request.sucess(request_response) + interactions_db.update_interaction(interaction_request, scylla) + return interaction_request + except Exception as e: + error_message = str(e) + interaction_request.failed(error_message) + interactions_db.update_interaction(interaction_request, scylla) + return interaction_request diff --git a/services/llm-client/models/__pycache__/interactions.cpython-39.pyc b/services/llm-client/models/__pycache__/interactions.cpython-39.pyc index 7e0caac..dccc0d9 100644 Binary files a/services/llm-client/models/__pycache__/interactions.cpython-39.pyc and b/services/llm-client/models/__pycache__/interactions.cpython-39.pyc differ diff --git a/services/llm-client/models/__pycache__/prompts.cpython-39.pyc b/services/llm-client/models/__pycache__/prompts.cpython-39.pyc index 9618a71..8243f6c 100644 Binary files a/services/llm-client/models/__pycache__/prompts.cpython-39.pyc and b/services/llm-client/models/__pycache__/prompts.cpython-39.pyc differ diff --git a/services/llm-client/models/interactions.py b/services/llm-client/models/interactions.py index 6f2f1e7..8cc3c65 100644 --- a/services/llm-client/models/interactions.py +++ b/services/llm-client/models/interactions.py @@ -19,7 +19,13 @@ def __init__(self, id, interaction, response, request_date, status, timeout, det self.details = details def failed(self, details: str): - self.status = 'failed' + self.status = 'failed' self.details = details return self + + def sucess(self, response: str): + self.status = 'success' + self.details = "" + self.response = response + return self \ No newline at end of file diff --git a/services/llm-client/models/prompts.py b/services/llm-client/models/prompts.py index d8aaef1..4129781 100644 --- a/services/llm-client/models/prompts.py +++ b/services/llm-client/models/prompts.py @@ -1,4 +1,11 @@ class Prompt: def __init__(self, prompt_name:str, prompt:str): self.prompt_name = prompt_name - self.prompt = prompt \ No newline at end of file + self.prompt = prompt + +class RenderedPrompt: + def __init__(self, prompt_name:str, prompt:str, images:dict[str, str]): + self.prompt_name = prompt_name + self.prompt = prompt + self.images = images + \ No newline at end of file diff --git a/services/llm-client/readme.md b/services/llm-client/readme.md index 73b3d54..17606e1 100644 --- a/services/llm-client/readme.md +++ b/services/llm-client/readme.md @@ -1,8 +1,9 @@ # Requirements -- cassandra-driver +- pip install cassandra-driver - pip install 'uvicorn[standard]' - pip install fastapi - pip install pytest +- pip install openai docker run -d --name some-scylla -p 9042:9042 scylladb/scylla docker exec -it some-scylla cqlsh