Skip to content

Commit

Permalink
Merge pull request Sinaptik-AI#33 from avelino/avelino/issue-32
Browse files Browse the repository at this point in the history
interpret json when llm returns json code
  • Loading branch information
gventuri authored May 4, 2023
2 parents 6029c55 + 4ef7277 commit 3f2c0d5
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 4 deletions.
4 changes: 1 addition & 3 deletions pandasai/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,9 +172,7 @@ def run_code(
lines = code.strip().split("\n")
last_line = lines[-1].strip()
if last_line.startswith("print(") and last_line.endswith(")"):
# Last line is already printing
return eval(last_line[6:-1])
# Evaluate last line and return its value or the captured output
last_line = last_line[6:-1]
try:
result = eval(last_line)
return result
Expand Down
9 changes: 8 additions & 1 deletion tests/test_pandasai.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,4 +164,11 @@ def test_run_without_privacy_enforcement(self):
to get the answer to the following question :
How many countries are in the dataframe?"""
self.pandasai.run(df, "How many countries are in the dataframe?")
assert self.pandasai._llm.last_prompt == expected_prompt
assert self.pandasai._llm.last_prompt == expected_prompt

def test_run_with_print_at_the_end(self):
code = """
result = {"happiness": 0.5, "gdp": 0.8}
print(result)"""
self.setup(code)
self.pandasai.run_code(code, pd.DataFrame())

0 comments on commit 3f2c0d5

Please sign in to comment.