Skip to content

Commit

Permalink
Better error handling (#91)
Browse files Browse the repository at this point in the history
* fix how we handle an error with a completion

* check for empty string in completion
  • Loading branch information
jerpint authored May 1, 2023
1 parent 17e3538 commit a5af3f8
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 11 deletions.
26 changes: 16 additions & 10 deletions buster/busterbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

from buster.completers import completer_factory
from buster.completers.base import Completion
from buster.formatters.prompts import SystemPromptFormatter, prompt_formatter_factory
from buster.formatters.prompts import prompt_formatter_factory
from buster.retriever import Retriever

logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -130,8 +130,8 @@ def rank_documents(

return matched_documents

def check_response_relevance(
self, completion_text: str, engine: str, unk_embedding: np.array, unk_threshold: float
def check_completion_relevance(
self, completion: Completion, engine: str, unk_embedding: np.array, unk_threshold: float
) -> bool:
"""Check to see if a response is relevant to the chatbot's knowledge or not.
Expand All @@ -140,8 +140,15 @@ def check_response_relevance(
set the unk_threshold to 0 to essentially turn off this feature.
"""
if completion.error:
# considered not relevant if an error occured
return False

if completion.text == "":
raise ValueError("Cannot compute embedding of an empty string.")

response_embedding = self.get_embedding(
completion_text,
completion.text,
engine=engine,
)
score = cosine_similarity(response_embedding, unk_embedding)
Expand Down Expand Up @@ -184,17 +191,16 @@ def process_input(self, user_input: str) -> Response:
logger.info(f"GPT Response:\n{completion.text}")

# check for relevance
is_relevant = self.check_response_relevance(
completion_text=completion.text,
is_relevant = self.check_completion_relevance(
completion=completion,
engine=self.embedding_model,
unk_embedding=self.unk_embedding,
unk_threshold=self.unknown_threshold,
)

if not is_relevant:
matched_documents = pd.DataFrame(columns=matched_documents.columns)
# answer generated was the chatbot saying it doesn't know how to answer
# uncomment override completion with unknown prompt
# completion = Completion(text=self.unknown_prompt)
empty_documents = pd.DataFrame(columns=matched_documents.columns)
matched_documents = empty_documents

response = Response(
completion=completion, matched_documents=matched_documents, is_relevant=is_relevant, user_input=user_input
Expand Down
7 changes: 6 additions & 1 deletion buster/completers/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,10 +43,15 @@ def generate_response(self, system_prompt, user_input) -> Completion:
logger.info(f"{user_input=}")
try:
completion = self.complete(prompt=prompt, **self.completion_kwargs)
except openai.error.InvalidRequestError:
logger.exception("Error connecting to OpenAI API. See traceback:")
return Completion("Something went wrong, try again soon!", True, "Invalid request made to openai.")
except Exception as e:
# log the error and return a generic response instead.
logger.exception("Error connecting to OpenAI API. See traceback:")
return Completion("", True, "We're having trouble connecting to OpenAI right now... Try again soon!")
return Completion(
"Something went wrong, try again soon!", True, "Unexpected error at the generate_response level"
)

return Completion(completion)

Expand Down

0 comments on commit a5af3f8

Please sign in to comment.