From 39f9490135ceb2631e828acc26f8a910c71441b7 Mon Sep 17 00:00:00 2001 From: Massimiliano Pronesti Date: Wed, 4 Oct 2023 06:53:07 +0200 Subject: [PATCH] fix(llms): restore completion API for gpt-3.5-turbo-instruct (#610) --- pandasai/llm/openai.py | 9 ++++++--- tests/llms/test_openai.py | 7 +++++++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/pandasai/llm/openai.py b/pandasai/llm/openai.py index 9a6fb6066..816e13292 100644 --- a/pandasai/llm/openai.py +++ b/pandasai/llm/openai.py @@ -29,7 +29,8 @@ class OpenAI(BaseOpenAI): The list of supported Chat models includes ["gpt-4", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-instruct"]. - + The list of supported Completion models includes "gpt-3.5-turbo-instruct" and + "text-davinci-003" (soon to be deprecated). """ _supported_chat_models = [ @@ -41,8 +42,8 @@ class OpenAI(BaseOpenAI): "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", - "gpt-3.5-turbo-instruct", ] + _supported_completion_models = ["text-davinci-003", "gpt-3.5-turbo-instruct"] model: str = "gpt-3.5-turbo" @@ -101,7 +102,9 @@ def call(self, instruction: AbstractPrompt, suffix: str = "") -> str: """ self.last_prompt = instruction.to_string() + suffix - if self.model in self._supported_chat_models: + if self.model in self._supported_completion_models: + response = self.completion(self.last_prompt) + elif self.model in self._supported_chat_models: response = self.chat_completion(self.last_prompt) else: raise UnsupportedOpenAIModelError("Unsupported model") diff --git a/tests/llms/test_openai.py b/tests/llms/test_openai.py index dacac2ada..18fa3ffa6 100644 --- a/tests/llms/test_openai.py +++ b/tests/llms/test_openai.py @@ -111,6 +111,13 @@ def test_call_with_unsupported_model(self, prompt): llm = OpenAI(api_token="test", model="not a model") llm.call(instruction=prompt) + def test_call_supported_completion_model(self, mocker, prompt): + openai = OpenAI(api_token="test", model="gpt-3.5-turbo-instruct") + mocker.patch.object(openai, "completion", return_value="response") + + result = openai.call(instruction=prompt) + assert result == "response" + def test_call_supported_chat_model(self, mocker, prompt): openai = OpenAI(api_token="test", model="gpt-4") mocker.patch.object(openai, "chat_completion", return_value="response")