From bf5af4fd29abed21b7ba7c4dc512831849ebb652 Mon Sep 17 00:00:00 2001 From: okirmis Date: Mon, 23 Dec 2024 00:51:32 +0100 Subject: [PATCH] Fix bug #17352 by catching errors for malformed LLM responses --- llama-index-core/llama_index/core/indices/utils.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/llama-index-core/llama_index/core/indices/utils.py b/llama-index-core/llama_index/core/indices/utils.py index 4580da5c80909..ae1c0a4e22b4e 100644 --- a/llama-index-core/llama_index/core/indices/utils.py +++ b/llama-index-core/llama_index/core/indices/utils.py @@ -116,8 +116,18 @@ def default_parse_choice_select_answer_fn( continue answer_nums.append(answer_num) # extract just the first digits after the colon. - _answer_relevance = re.findall(r"\d+", line_tokens[1].split(":")[1].strip())[0] - answer_relevances.append(float(_answer_relevance)) + try: + _answer_relevance = re.findall(r"\d+", line_tokens[1].split(":")[1].strip())[0] + answer_relevances.append(float(_answer_relevance)) + except (IndexError, ValueError) as e: + if not raise_error: + continue + else: + raise ValueError( + f"Invalid answer line: {answer_line}. " + "Answer line must be of the form: " + "answer_num: , answer_relevance: " + ) return answer_nums, answer_relevances