From cb2717253e31b35dcf23d5e1674cf025ca4bfdd0 Mon Sep 17 00:00:00 2001 From: Muspi Merol Date: Fri, 19 Apr 2024 16:28:57 +0800 Subject: [PATCH] feat: support llama3 --- src/routes/run.py | 2 ++ src/utils/llm/groq.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/routes/run.py b/src/routes/run.py index 23d0e91b..e643b53b 100644 --- a/src/routes/run.py +++ b/src/routes/run.py @@ -38,6 +38,8 @@ class Msg(BaseModel): "claude-3-sonnet-20240229", "claude-3-haiku-20240307", "gemma-7b-it", + "llama3-8b-8192", + "llama3-70b-8192", "llama2-70b-4096", "mixtral-8x7b-32768", "nous-hermes-2-mixtral-8x7b-dpo", diff --git a/src/utils/llm/groq.py b/src/utils/llm/groq.py index 16618b03..5407d8d4 100644 --- a/src/utils/llm/groq.py +++ b/src/utils/llm/groq.py @@ -14,7 +14,7 @@ @link_llm("gemma") -@link_llm("llama2") +@link_llm("llama") @link_llm("mixtral") class Groq(AsyncChatOpenAI): async def complete(self, prompt: str | list[Message], /, **config):