From 994e71717a8d9b377e2c94b2fe008d4d1353ddb7 Mon Sep 17 00:00:00 2001 From: PEDDIREDDY MADHAVI Date: Sat, 26 Oct 2024 01:13:11 +0530 Subject: [PATCH 1/4] Added Cohere --- src/beyondllm/llms/cohere.py | 73 ++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 src/beyondllm/llms/cohere.py diff --git a/src/beyondllm/llms/cohere.py b/src/beyondllm/llms/cohere.py new file mode 100644 index 0000000..dd3b19e --- /dev/null +++ b/src/beyondllm/llms/cohere.py @@ -0,0 +1,73 @@ +from beyondllm.llms.base import BaseLLMModel, ModelConfig +from typing import Any, Dict +from dataclasses import dataclass, field +import os +import cohere + +@dataclass +class CohereModel: + """ + Class representing a Language Model (LLM) model using Cohere. + + Example: + ``` + >>> llm = CohereModel(api_key="", model_kwargs={"temperature": 0.5}) + ``` + or + ``` + >>> import os + >>> os.environ['COHERE_API_KEY'] = "***********" #replace with your key + >>> llm = CohereModel() + ``` + """ + api_key: str =" " + model_kwargs: dict = field(default_factory=lambda: { + "temperature": 0.5, + "top_p": 1, + "max_tokens": 2048, + }) + + def __post_init__(self): + if not self.api_key: + self.api_key = os.getenv('COHERE_API_KEY') + if not self.api_key: + raise ValueError("COHERE_API_KEY is not provided and not found in environment variables.") + self.load_llm() + + def load_llm(self): + """Load the Cohere client.""" + try: + self.client = cohere.ClientV2(api_key=self.api_key) + except Exception as e: + raise Exception(f"Failed to initialize Cohere client: {str(e)}") + + def predict(self, prompt: Any) -> str: + try: + response = self.client.chat( + model="command-r-plus-08-2024", + messages=[{"role": "user", "content": prompt}] + ) + return response.message.content[0].text + except Exception as e: + raise Exception(f"Failed to generate prediction: {str(e)}") + + @staticmethod + def load_from_kwargs(self, kwargs: Dict): + model_config = ModelConfig(**kwargs) + self.config = model_config + self.load_llm() + +if __name__ == "__main__": + import os + + # set the API key in an environment variable + os.environ['COHERE_API_KEY'] = " " + + # Create an instance of CohereModel + llm = CohereModel() + + # Make a prediction + prompt = "Write a Linkedin post on generative AI using emojis and symbols?" + response = llm.predict(prompt) + + print(f"Response: {response}") From 80e32b42a0eeb50c430e358fa9bf0fd0f84fd71a Mon Sep 17 00:00:00 2001 From: PEDDIREDDY MADHAVI Date: Sat, 26 Oct 2024 19:28:15 +0530 Subject: [PATCH 2/4] changes done --- src/beyondllm/llms/cohere.py | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/src/beyondllm/llms/cohere.py b/src/beyondllm/llms/cohere.py index dd3b19e..d523de7 100644 --- a/src/beyondllm/llms/cohere.py +++ b/src/beyondllm/llms/cohere.py @@ -21,6 +21,7 @@ class CohereModel: ``` """ api_key: str =" " + model_name: str = "command-r-plus-08-2024" model_kwargs: dict = field(default_factory=lambda: { "temperature": 0.5, "top_p": 1, @@ -44,7 +45,7 @@ def load_llm(self): def predict(self, prompt: Any) -> str: try: response = self.client.chat( - model="command-r-plus-08-2024", + model=self.model_name, messages=[{"role": "user", "content": prompt}] ) return response.message.content[0].text @@ -57,17 +58,3 @@ def load_from_kwargs(self, kwargs: Dict): self.config = model_config self.load_llm() -if __name__ == "__main__": - import os - - # set the API key in an environment variable - os.environ['COHERE_API_KEY'] = " " - - # Create an instance of CohereModel - llm = CohereModel() - - # Make a prediction - prompt = "Write a Linkedin post on generative AI using emojis and symbols?" - response = llm.predict(prompt) - - print(f"Response: {response}") From 819f46588d200681d19821f7d39da78ca330424b Mon Sep 17 00:00:00 2001 From: PEDDIREDDY MADHAVI Date: Sat, 26 Oct 2024 20:09:05 +0530 Subject: [PATCH 3/4] Added Together AI --- src/beyondllm/llms/together_ai.py | 66 +++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 src/beyondllm/llms/together_ai.py diff --git a/src/beyondllm/llms/together_ai.py b/src/beyondllm/llms/together_ai.py new file mode 100644 index 0000000..f188099 --- /dev/null +++ b/src/beyondllm/llms/together_ai.py @@ -0,0 +1,66 @@ +from beyondllm.llms.base import BaseLLMModel, ModelConfig +from typing import Any, Dict +from dataclasses import dataclass, field +import os +from together import Together + +@dataclass +class TogetherModel: + """ + Class representing a Language Model (LLM) using Together AI. + + Example: + ``` + >>> llm = TogetherModel(api_key="", model_kwargs={"temperature": 0.7}) + ``` + or + ``` + >>> import os + >>> os.environ['TOGETHER_API_KEY'] = "***********" #replace with your key + >>> llm = TogetherModel() + ``` + """ + api_key: str = " " + model_name: str = "meta-llama/Llama-3-8b-chat-hf" + model_kwargs: dict = field(default_factory=lambda: { + "temperature": 0.7, + "top_p": 0.9, + "max_tokens": 1024, + }) + + def __post_init__(self): + if not self.api_key: + self.api_key = os.getenv('TOGETHER_API_KEY') + if not self.api_key: + raise ValueError("TOGETHER_API_KEY is not provided and not found in environment variables.") + self.load_llm() + + def load_llm(self): + """Load the Together client.""" + try: + self.client = Together(api_key=self.api_key) + except Exception as e: + raise Exception(f"Failed to initialize Together client: {str(e)}") + + def predict(self, prompt: Any) -> str: + """Generate a response from the model based on the provided prompt.""" + try: + stream = self.client.chat.completions.create( + model=self.model_name, + messages=[{"role": "system", "content": "You are a highly skilled software engineer. Provide detailed explanations and code examples when relevant."}, + {"role": "user", "content": prompt}], + stream=True + ) + response_text = "" + for chunk in stream: + response_text += chunk.choices[0].delta.content or "" + return response_text + except Exception as e: + raise Exception(f"Failed to generate prediction: {str(e)}") + + @staticmethod + def load_from_kwargs(self, kwargs: Dict): + model_config = ModelConfig(**kwargs) + self.config = model_config + self.load_llm() + From 89c806263f11386cf91dc3065c61de3795406b79 Mon Sep 17 00:00:00 2001 From: PEDDIREDDY MADHAVI Date: Tue, 29 Oct 2024 15:05:36 +0530 Subject: [PATCH 4/4] updated --- src/beyondllm/llms/__init__.py | 4 +++- src/beyondllm/llms/cohere.py | 7 +++++-- .../llms/{together_ai.py => together.py} | 18 ++++++++++-------- 3 files changed, 18 insertions(+), 11 deletions(-) rename src/beyondllm/llms/{together_ai.py => together.py} (82%) diff --git a/src/beyondllm/llms/__init__.py b/src/beyondllm/llms/__init__.py index 255f364..c4bf52f 100644 --- a/src/beyondllm/llms/__init__.py +++ b/src/beyondllm/llms/__init__.py @@ -8,4 +8,6 @@ from .gpt4o import GPT4oOpenAIModel from .chatgroq import GroqModel from .claude import ClaudeModel -from .mistral import MistralModel \ No newline at end of file +from .mistral import MistralModel +from .cohere import CohereModel +from .together import TogetherModel diff --git a/src/beyondllm/llms/cohere.py b/src/beyondllm/llms/cohere.py index d523de7..f4b70fe 100644 --- a/src/beyondllm/llms/cohere.py +++ b/src/beyondllm/llms/cohere.py @@ -2,7 +2,6 @@ from typing import Any, Dict from dataclasses import dataclass, field import os -import cohere @dataclass class CohereModel: @@ -36,7 +35,11 @@ def __post_init__(self): self.load_llm() def load_llm(self): - """Load the Cohere client.""" + try: + import cohere + except ImportError: + print("The cohere module is not installed. Please install it with 'pip install cohere'.") + try: self.client = cohere.ClientV2(api_key=self.api_key) except Exception as e: diff --git a/src/beyondllm/llms/together_ai.py b/src/beyondllm/llms/together.py similarity index 82% rename from src/beyondllm/llms/together_ai.py rename to src/beyondllm/llms/together.py index f188099..6e7aa7d 100644 --- a/src/beyondllm/llms/together_ai.py +++ b/src/beyondllm/llms/together.py @@ -2,7 +2,6 @@ from typing import Any, Dict from dataclasses import dataclass, field import os -from together import Together @dataclass class TogetherModel: @@ -36,7 +35,11 @@ def __post_init__(self): self.load_llm() def load_llm(self): - """Load the Together client.""" + try: + from together import Together + except ImportError: + print("The together module is not installed. Please install it with 'pip install together'.") + try: self.client = Together(api_key=self.api_key) except Exception as e: @@ -45,16 +48,15 @@ def load_llm(self): def predict(self, prompt: Any) -> str: """Generate a response from the model based on the provided prompt.""" try: - stream = self.client.chat.completions.create( + response = self.client.chat.completions.create( model=self.model_name, messages=[{"role": "system", "content": "You are a highly skilled software engineer. Provide detailed explanations and code examples when relevant."}, - {"role": "user", "content": prompt}], - stream=True + {"role": "user", "content": prompt}] + ) - response_text = "" - for chunk in stream: - response_text += chunk.choices[0].delta.content or "" + response_text = response.choices[0].message.content return response_text + except Exception as e: raise Exception(f"Failed to generate prediction: {str(e)}")