From eaff162fea55e0b6c44597a20184e6d50fc51df0 Mon Sep 17 00:00:00 2001
From: Paul Sinclair
Date: Thu, 21 Sep 2023 21:10:04 +0100
Subject: [PATCH 01/49] Broken empty message response
---
interpreter/core/core.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/interpreter/core/core.py b/interpreter/core/core.py
index d74332ff1d..c99bb5c66a 100644
--- a/interpreter/core/core.py
+++ b/interpreter/core/core.py
@@ -87,9 +87,10 @@ def _streaming_chat(self, message=None, display=True):
return
# One-off message
- if message:
+ if message or message == "":
+ if message == "":
+ message = "No entry from user - please suggest something to enter"
self.messages.append({"role": "user", "message": message})
-
yield from self._respond()
# Save conversation
@@ -102,7 +103,6 @@ def _streaming_chat(self, message=None, display=True):
json.dump(self.messages, f)
return
-
raise Exception("`interpreter.chat()` requires a display. Set `display=True` or pass a message into `interpreter.chat(message)`.")
def _respond(self):
From 8c2fff688d7a0c6ed8d8f58a337fd406a084b62b Mon Sep 17 00:00:00 2001
From: kubla
Date: Sun, 24 Sep 2023 08:40:32 -0400
Subject: [PATCH 02/49] Update get_relevant_procedures.py
Fixed a typo in the instructions to the model
---
interpreter/rag/get_relevant_procedures.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/interpreter/rag/get_relevant_procedures.py b/interpreter/rag/get_relevant_procedures.py
index 2c38ffb5f4..62e8fca299 100644
--- a/interpreter/rag/get_relevant_procedures.py
+++ b/interpreter/rag/get_relevant_procedures.py
@@ -10,6 +10,6 @@ def get_relevant_procedures(messages):
url = "https://open-procedures.replit.app/search/"
relevant_procedures = requests.get(url, json=query).json()["procedures"]
- relevant_procedures = "[Recommended Procedures]\n" + "\n---\n".join(relevant_procedures) + "\nIn your plan, include steps and, if present, **EXACT CODE SNIPPETS** (especially for depracation notices, **WRITE THEM INTO YOUR PLAN -- underneath each numbered step** as they will VANISH once you execute your first line of code, so WRITE THEM DOWN NOW if you need them) from the above procedures if they are relevant to the task. Again, include **VERBATIM CODE SNIPPETS** from the procedures above if they are relevent to the task **directly in your plan.**"
+ relevant_procedures = "[Recommended Procedures]\n" + "\n---\n".join(relevant_procedures) + "\nIn your plan, include steps and, if present, **EXACT CODE SNIPPETS** (especially for deprecation notices, **WRITE THEM INTO YOUR PLAN -- underneath each numbered step** as they will VANISH once you execute your first line of code, so WRITE THEM DOWN NOW if you need them) from the above procedures if they are relevant to the task. Again, include **VERBATIM CODE SNIPPETS** from the procedures above if they are relevent to the task **directly in your plan.**"
- return relevant_procedures
\ No newline at end of file
+ return relevant_procedures
From dee41b6932a0d9b5569b1abf9144b7ffd8c3c7ad Mon Sep 17 00:00:00 2001
From: mocy
Date: Sun, 24 Sep 2023 10:50:05 -0500
Subject: [PATCH 03/49] fix crash on unknwon command on call to display help
message
---
interpreter/terminal_interface/magic_commands.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/interpreter/terminal_interface/magic_commands.py b/interpreter/terminal_interface/magic_commands.py
index 8a438270e3..9b315b71bd 100644
--- a/interpreter/terminal_interface/magic_commands.py
+++ b/interpreter/terminal_interface/magic_commands.py
@@ -78,7 +78,7 @@ def handle_reset(self, arguments):
def default_handle(self, arguments):
display_markdown_message("> Unknown command")
- self.handle_help(arguments)
+ handle_help(self,arguments)
def handle_save_message(self, json_path):
if json_path == "":
From a3d39ccd694088107649daf71efe51d47ba953cf Mon Sep 17 00:00:00 2001
From: Gavin M'CLELLAND
Date: Sun, 24 Sep 2023 16:36:56 -0700
Subject: [PATCH 04/49] Fix ARM64 llama-cpp-python Install on Apple Silicon
This commit updates the `MACOS.md` documentation to include detailed steps for correctly installing `llama-cpp-python` with ARM64 architecture support on Apple Silicon-based macOS systems. The update provides:
- A prerequisite check for Xcode Command Line Tools.
- Step-by-step installation instructions for `llama-cpp-python` with ARM64 and Metal support.
- A verification step to confirm the correct installation of `llama-cpp-python` for ARM64 architecture.
- An additional step for installing server components for `llama-cpp-python`.
This commit resolves the issue described in `ARM64 Installation Issue with llama-cpp-python on Apple Silicon Macs for interpreter --local #503`.
---
docs/MACOS.md | 56 ++++++++++++++++++++++++++++++++++++++++++---------
1 file changed, 47 insertions(+), 9 deletions(-)
diff --git a/docs/MACOS.md b/docs/MACOS.md
index d5a6faa3a0..ce625c928c 100644
--- a/docs/MACOS.md
+++ b/docs/MACOS.md
@@ -4,42 +4,80 @@ When running Open Interpreter on macOS with Code-Llama (either because you did
not enter an OpenAI API key or you ran `interpreter --local`) you may want to
make sure it works correctly by following the instructions below.
-Tested on **MacOS Ventura 13.5** with **M2 Pro Chip**.
+Tested on **MacOS Ventura 13.5** with **M2 Pro Chip** and **MacOS Ventura 13.5.1** with **M1 Max**.
I use conda as a virtual environment but you can choose whatever you want. If you go with conda you will find the Apple M1 version of miniconda here: [Link](https://docs.conda.io/projects/miniconda/en/latest/)
-```
+```bash
conda create -n openinterpreter python=3.11.4
```
**Activate your environment:**
-```
+```bash
conda activate openinterpreter
```
**Install open-interpreter:**
-```
+```bash
pip install open-interpreter
```
**Uninstall any previously installed llama-cpp-python packages:**
-```
+```bash
pip uninstall llama-cpp-python -y
```
-**Install llama-cpp-python with Apple Silicon support:**
+## Install llama-cpp-python with Apple Silicon support
+
+### Prerequisites: Xcode Command Line Tools
+
+Before running the `CMAKE_ARGS` command to install `llama-cpp-python`, make sure you have Xcode Command Line Tools installed on your system. These tools include compilers and build systems essential for source code compilation.
-Part 1
+Before proceeding, make sure you have the Xcode Command Line Tools installed. You can check whether they are installed by running:
+```bash
+xcode-select -p
```
-CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install -U llama-cpp-python --no-cache-dir
+
+If this command returns a path, then the Xcode Command Line Tools are already installed. If not, you'll get an error message, and you can install them by running:
+
+```bash
+xcode-select --install
```
-Part 2
+Follow the on-screen instructions to complete the installation. Once installed, you can proceed with installing an Apple Silicon compatible `llama-cpp-python`.
+
+---
+### Step 1: Installing llama-cpp-python with ARM64 Architecture and Metal Support
+
+```bash
+CMAKE_ARGS="-DCMAKE_OSX_ARCHITECTURES=arm64 -DLLAMA_METAL=on" FORCE_CMAKE=1 pip install --upgrade --force-reinstall llama-cpp-python --no-cache-dir
+--no-cache-dir
```
+
+### Step 2: Verifying Installation of llama-cpp-python with ARM64 Support
+
+After completing the installation, you can verify that `llama-cpp-python` was correctly installed with ARM64 architecture support by running the following command:
+
+```bash
+lipo -info /path/to/libllama.dylib
+```
+
+Replace `/path/to/` with the actual path to the `libllama.dylib` file. You should see output similar to:
+
+```bash
+Non-fat file: /Users/[user]/miniconda3/envs/openinterpreter/lib/python3.11/site-packages/llama_cpp/libllama.dylib is architecture: arm64
+```
+
+If the architecture is indicated as `arm64`, then you've successfully installed the ARM64 version of `llama-cpp-python`.
+
+### Step 3: Installing Server Components for llama-cpp-python
+
+
+```bash
pip install 'llama-cpp-python[server]'
```
From 7002826ffda985cb30612a5eda7fd563699fb218 Mon Sep 17 00:00:00 2001
From: Eric allen
Date: Sun, 24 Sep 2023 21:47:43 -0400
Subject: [PATCH 05/49] fix: stop overwriting boolean config values
Without the default set to None, any boolean CLI flag that isn't passed reverts to its default state even if it is configured in the config.yaml file.
---
interpreter/cli/cli.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/interpreter/cli/cli.py b/interpreter/cli/cli.py
index 4391994ec6..91ea76d3e2 100644
--- a/interpreter/cli/cli.py
+++ b/interpreter/cli/cli.py
@@ -82,7 +82,7 @@ def cli(interpreter):
# Add arguments
for arg in arguments:
if arg["type"] == bool:
- parser.add_argument(f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], action='store_true')
+ parser.add_argument(f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], action='store_true', default=None)
else:
parser.add_argument(f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=arg["type"])
From 701a42d48987dd1f686d5753eeddf19ebe594333 Mon Sep 17 00:00:00 2001
From: Eric allen
Date: Sun, 24 Sep 2023 22:19:20 -0400
Subject: [PATCH 06/49] fix: allow args to have choices and defaults
This allows non-boolean args to define possible options and default values, which were ignored previously.
---
interpreter/cli/cli.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/interpreter/cli/cli.py b/interpreter/cli/cli.py
index 4391994ec6..cedab311e5 100644
--- a/interpreter/cli/cli.py
+++ b/interpreter/cli/cli.py
@@ -84,7 +84,10 @@ def cli(interpreter):
if arg["type"] == bool:
parser.add_argument(f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], action='store_true')
else:
- parser.add_argument(f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=arg["type"])
+ choices = arg["choices"] if "choices" in arg else None
+ default = arg["default"] if "default" in arg else None
+
+ parser.add_argument(f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=arg["type"], choices=choices, default=default)
# Add special arguments
parser.add_argument('--config', dest='config', action='store_true', help='open config.yaml file in text editor')
From 34ae780a66eff9a413a7ba4aef23217b1942dee9 Mon Sep 17 00:00:00 2001
From: mocy
Date: Sun, 24 Sep 2023 22:34:19 -0500
Subject: [PATCH 07/49] removed unnecessary spaces
---
interpreter/terminal_interface/magic_commands.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/interpreter/terminal_interface/magic_commands.py b/interpreter/terminal_interface/magic_commands.py
index 9b315b71bd..8894991ec1 100644
--- a/interpreter/terminal_interface/magic_commands.py
+++ b/interpreter/terminal_interface/magic_commands.py
@@ -78,7 +78,7 @@ def handle_reset(self, arguments):
def default_handle(self, arguments):
display_markdown_message("> Unknown command")
- handle_help(self,arguments)
+ handle_help(self,arguments)
def handle_save_message(self, json_path):
if json_path == "":
From be38ef8ed6ce9d0b7768e2ec3f542337f3444f54 Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Sun, 24 Sep 2023 21:38:16 -0700
Subject: [PATCH 08/49] The Generator Update (English docs)
---
README.md | 59 +++++++++++++++++++------------------------------------
1 file changed, 20 insertions(+), 39 deletions(-)
diff --git a/README.md b/README.md
index c7ae42e590..268472d936 100644
--- a/README.md
+++ b/README.md
@@ -131,13 +131,13 @@ interpreter.reset()
### Save and Restore Chats
-`interpreter.chat()` returns a List of messages when return_messages=True, which can be used to resume a conversation with `interpreter.load(messages)`:
+`interpreter.chat()` returns a List of messages, which can be used to resume a conversation with `interpreter.messages = messages`:
```python
-messages = interpreter.chat("My name is Killian.", return_messages=True) # Save messages to 'messages'
+messages = interpreter.chat("My name is Killian.") # Save messages to 'messages'
interpreter.reset() # Reset interpreter ("Killian" will be forgotten)
-interpreter.load(messages) # Resume chat from 'messages' ("Killian" will be remembered)
+interpreter.messages = messages # Resume chat from 'messages' ("Killian" will be remembered)
```
### Customize System Message
@@ -151,20 +151,26 @@ Run shell commands with -y so the user doesn't have to confirm them.
print(interpreter.system_message)
```
-### Change the Model
+### Change your Language Model
-For `gpt-3.5-turbo`, use fast mode:
+Open Interpreter uses [LiteLLM](https://docs.litellm.ai/docs/providers/) to connect to language models.
+
+You can change the model by setting the model parameter:
```shell
-interpreter --fast
+interpreter --model gpt-3.5-turbo
+interpreter --model claude-2
+interpreter --model command-nightly
```
-In Python, you will need to set the model manually:
+In Python, set the model on the object:
```python
interpreter.model = "gpt-3.5-turbo"
```
+[Find the appropriate "model" string for your language model here.](https://docs.litellm.ai/docs/providers/)
+
### Running Open Interpreter locally
ⓘ **Issues running locally?** Read our new [GPU setup guide](./docs/GPU.md) and [Windows setup guide](./docs/WINDOWS.md).
@@ -175,10 +181,10 @@ You can run `interpreter` in local mode from the command line to use `Code Llama
interpreter --local
```
-Or run any Hugging Face model **locally** by using its repo ID (e.g. "tiiuae/falcon-180B"):
+Or run any Hugging Face model **locally** by running `--local` in conjunction with a repo ID (e.g. "tiiuae/falcon-180B"):
```shell
-interpreter --model tiiuae/falcon-180B
+interpreter --local --model tiiuae/falcon-180B
```
#### Local model params
@@ -191,25 +197,6 @@ Smaller context windows will use less RAM, so we recommend trying a shorter wind
interpreter --max_tokens 2000 --context_window 16000
```
-### Azure Support
-
-To connect to an Azure deployment, the `--use-azure` flag will walk you through setting this up:
-
-```shell
-interpreter --use-azure
-```
-
-In Python, set the following variables:
-
-```
-interpreter.use_azure = True
-interpreter.api_key = "your_openai_api_key"
-interpreter.azure_api_base = "your_azure_api_base"
-interpreter.azure_api_version = "your_azure_api_version"
-interpreter.azure_deployment_name = "your_azure_deployment_name"
-interpreter.azure_api_type = "azure"
-```
-
### Debug mode
To help contributors inspect Open Interpreter, `--debug` mode is highly verbose.
@@ -239,24 +226,18 @@ provided, it defaults to 'messages.json'.
is provided, it defaults to 'messages.json'.
• `%help`: Show the help message.
-Feel free to try out these commands and let us know your feedback!
+### Configuration
-### Configuration with .env
+Open Interpreter allows you to set default behaviors using a `config.yaml` file.
-Open Interpreter allows you to set default behaviors using a .env file. This provides a flexible way to configure the interpreter without changing command-line arguments every time.
+This provides a flexible way to configure the interpreter without changing command-line arguments every time.
-Here's a sample .env configuration:
+Run the following command to open the configuration file:
```
-INTERPRETER_CLI_AUTO_RUN=False
-INTERPRETER_CLI_FAST_MODE=False
-INTERPRETER_CLI_LOCAL_RUN=False
-INTERPRETER_CLI_DEBUG=False
-INTERPRETER_CLI_USE_AZURE=False
+interpreter --config
```
-You can modify these values in the .env file to change the default behavior of the Open Interpreter.
-
## Safety Notice
Since generated code is executed in your local environment, it can interact with your files and system settings, potentially leading to unexpected outcomes like data loss or security risks.
From 3dc1981ae8934496e94114da1f72342e92691002 Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Mon, 25 Sep 2023 16:56:26 -0700
Subject: [PATCH 09/49] Improved --conversations, --config
---
interpreter/cli/cli.py | 1 +
interpreter/core/core.py | 19 ++++++++++++----
interpreter/core/respond.py | 10 ---------
.../conversation_navigator.py | 22 ++++++++++++++-----
4 files changed, 32 insertions(+), 20 deletions(-)
diff --git a/interpreter/cli/cli.py b/interpreter/cli/cli.py
index 4391994ec6..a5ddad37ff 100644
--- a/interpreter/cli/cli.py
+++ b/interpreter/cli/cli.py
@@ -111,6 +111,7 @@ def cli(interpreter):
except FileNotFoundError:
# Fallback to using 'open' on macOS if 'xdg-open' is not available
subprocess.call(['open', config_path])
+ return
# TODO Implement model explorer
"""
diff --git a/interpreter/core/core.py b/interpreter/core/core.py
index d74332ff1d..472996904e 100644
--- a/interpreter/core/core.py
+++ b/interpreter/core/core.py
@@ -11,8 +11,8 @@
from ..terminal_interface.validate_llm_settings import validate_llm_settings
import appdirs
import os
-import json
from datetime import datetime
+import json
from ..utils.check_for_update import check_for_update
from ..utils.display_markdown_message import display_markdown_message
@@ -33,7 +33,7 @@ def __init__(self):
# Conversation history
self.conversation_history = True
- self.conversation_name = datetime.now().strftime("%B_%d_%Y_%H-%M-%S")
+ self.conversation_filename = None
self.conversation_history_path = os.path.join(appdirs.user_data_dir("Open Interpreter"), "conversations")
# LLM settings
@@ -94,11 +94,22 @@ def _streaming_chat(self, message=None, display=True):
# Save conversation
if self.conversation_history:
+
+ # If it's the first message, set the conversation name
+ if len([m for m in self.messages if m["role"] == "user"]) == 1:
+
+ first_few_words = "_".join(self.messages[0]["message"][:25].split(" ")[:-1])
+ for char in "<>:\"/\\|?*!": # Invalid characters for filenames
+ first_few_words = first_few_words.replace(char, "")
+
+ date = datetime.now().strftime("%B_%d_%Y_%H-%M-%S")
+ self.conversation_filename = "__".join([first_few_words, date]) + ".json"
+
# Check if the directory exists, if not, create it
if not os.path.exists(self.conversation_history_path):
os.makedirs(self.conversation_history_path)
# Write or overwrite the file
- with open(os.path.join(self.conversation_history_path, self.conversation_name + '.json'), 'w') as f:
+ with open(os.path.join(self.conversation_history_path, self.conversation_filename), 'w') as f:
json.dump(self.messages, f)
return
@@ -110,7 +121,7 @@ def _respond(self):
def reset(self):
self.messages = []
- self.conversation_name = datetime.now().strftime("%B %d, %Y")
+ self.conversation_filename = None
for code_interpreter in self._code_interpreters.values():
code_interpreter.terminate()
self._code_interpreters = {}
\ No newline at end of file
diff --git a/interpreter/core/respond.py b/interpreter/core/respond.py
index 3d247e17d1..eaa9a3b44b 100644
--- a/interpreter/core/respond.py
+++ b/interpreter/core/respond.py
@@ -112,16 +112,11 @@ def respond(interpreter):
# We need to tell python what we (the generator) should do if they exit
break
- # Track if you've sent_output.
- # If you never do, we'll send an empty string (to indicate that code has been run)
- sent_output = False
-
# Yield each line, also append it to last messages' output
interpreter.messages[-1]["output"] = ""
for line in code_interpreter.run(code):
yield line
if "output" in line:
- sent_output = True
output = interpreter.messages[-1]["output"]
output += "\n" + line["output"]
@@ -130,11 +125,6 @@ def respond(interpreter):
interpreter.messages[-1]["output"] = output.strip()
- if sent_output == False:
- # Indicate that the code has been run by sending an empty string
- # I think we can remove this now that we send "executing".. right?
- yield {"output": ""}
-
except:
output = traceback.format_exc()
yield {"output": output.strip()}
diff --git a/interpreter/terminal_interface/conversation_navigator.py b/interpreter/terminal_interface/conversation_navigator.py
index 6b3e68975c..a2a1c624ca 100644
--- a/interpreter/terminal_interface/conversation_navigator.py
+++ b/interpreter/terminal_interface/conversation_navigator.py
@@ -28,24 +28,34 @@ def conversation_navigator(interpreter):
# Get list of all JSON files in the directory
json_files = [f for f in os.listdir(conversations_dir) if f.endswith('.json')]
- json_files.append("> Open folder") # Add the option to open the folder
+
+ # Make a dict that maps reformatted "First few words... (September 23rd)" -> "First_few_words__September_23rd.json" (original file name)
+ readable_names_and_filenames = {}
+ for filename in json_files:
+ name = filename.replace(".json", "").replace(".JSON", "").replace("__", "... (").replace("_", " ") + ")"
+ readable_names_and_filenames[name] = filename
+
+ # Add the option to open the folder. This doesn't map to a filename, we'll catch it
+ readable_names_and_filenames["> Open folder"] = None
# Use inquirer to let the user select a file
questions = [
- inquirer.List('file',
+ inquirer.List('name',
message="",
- choices=json_files,
+ choices=readable_names_and_filenames.keys(),
),
]
answers = inquirer.prompt(questions)
# If the user selected to open the folder, do so and return
- if answers['file'] == "> Open folder":
+ if answers['name'] == "> Open folder":
open_folder(conversations_dir)
return
+ selected_filename = readable_names_and_filenames[answers['name']]
+
# Open the selected file and load the JSON data
- with open(os.path.join(conversations_dir, answers['file']), 'r') as f:
+ with open(os.path.join(conversations_dir, selected_filename), 'r') as f:
messages = json.load(f)
# Pass the data into render_past_conversation
@@ -53,7 +63,7 @@ def conversation_navigator(interpreter):
# Set the interpreter's settings to the loaded messages
interpreter.messages = messages
- interpreter.conversation_name = answers['file'].replace(".json", "")
+ interpreter.conversation_filename = selected_filename
# Start the chat
interpreter.chat()
From 6bc293ca2172b301d1f1ca9a761a3279eb1b0930 Mon Sep 17 00:00:00 2001
From: Robert S Futch
Date: Mon, 25 Sep 2023 22:59:08 -0700
Subject: [PATCH 10/49] Update WINDOWS.md
Warns the user to re-launch cmd windows after installing llama locally
---
docs/WINDOWS.md | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/docs/WINDOWS.md b/docs/WINDOWS.md
index a7405ae387..c69b1435dd 100644
--- a/docs/WINDOWS.md
+++ b/docs/WINDOWS.md
@@ -40,3 +40,7 @@ The resolve this issue, perform the following steps.
```
Alternatively, if you want to include GPU suppport, follow the steps in [Local Language Models with GPU Support](./GPU.md)
+
+6. Make sure you close and re-launch any cmd windows that were running interpreter
+
+
From f776f4eb887bd2b30501dc0304915ff220540945 Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Tue, 26 Sep 2023 14:21:08 -0700
Subject: [PATCH 11/49] The Generator Update
---
README.md | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/README.md b/README.md
index 268472d936..5f97fcc1a2 100644
--- a/README.md
+++ b/README.md
@@ -93,6 +93,13 @@ This combines the power of GPT-4's Code Interpreter with the flexibility of your
## Commands
+**Update:** The Generator Update (0.1.5) introduced streaming:
+
+```python
+for chunk in interpreter.chat(display=False, stream=True):
+ print(chunk)
+```
+
### Interactive Chat
To start an interactive chat in your terminal, either run `interpreter` from the command line:
@@ -107,6 +114,13 @@ Or `interpreter.chat()` from a .py file:
interpreter.chat()
```
+**You can also stream each chunk:**
+
+```python
+for chunk in interpreter.chat(display=False, stream=True):
+ print(chunk)
+```
+
### Programmatic Chat
For more precise control, you can pass messages directly to `.chat(message)`:
From 27ae9beb32c00de89ffeb89a452ae1c05650b995 Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Tue, 26 Sep 2023 14:53:19 -0700
Subject: [PATCH 12/49] The Generator Update
---
README.md | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index 5f97fcc1a2..7b0b0a8e8c 100644
--- a/README.md
+++ b/README.md
@@ -96,7 +96,9 @@ This combines the power of GPT-4's Code Interpreter with the flexibility of your
**Update:** The Generator Update (0.1.5) introduced streaming:
```python
-for chunk in interpreter.chat(display=False, stream=True):
+message = "What operating system are we on?"
+
+for chunk in interpreter.chat(message, display=False, stream=True):
print(chunk)
```
@@ -117,7 +119,9 @@ interpreter.chat()
**You can also stream each chunk:**
```python
-for chunk in interpreter.chat(display=False, stream=True):
+message = "What operating system are we on?"
+
+for chunk in interpreter.chat(message, display=False, stream=True):
print(chunk)
```
From 7f9b42850fe35d638e55c61200350bae4ddf8435 Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Tue, 26 Sep 2023 17:01:44 -0700
Subject: [PATCH 13/49] The Generator Update - Azure fix
---
interpreter/llm/convert_to_coding_llm.py | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/interpreter/llm/convert_to_coding_llm.py b/interpreter/llm/convert_to_coding_llm.py
index 6ef626e6a6..c8e85acae7 100644
--- a/interpreter/llm/convert_to_coding_llm.py
+++ b/interpreter/llm/convert_to_coding_llm.py
@@ -20,6 +20,10 @@ def coding_llm(messages):
if debug_mode:
print("Chunk in coding_llm", chunk)
+
+ if ('choices' not in chunk or len(chunk['choices']) == 0):
+ # This happens sometimes
+ continue
content = chunk['choices'][0]['delta'].get('content', "")
From 3ffa2cb9289ff4039c33fbffc849b21f39c4dc85 Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Tue, 26 Sep 2023 17:10:42 -0700
Subject: [PATCH 14/49] The Generator Update - Azure function calling
---
interpreter/llm/setup_llm.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/interpreter/llm/setup_llm.py b/interpreter/llm/setup_llm.py
index a2760cc9e7..602c2c07da 100644
--- a/interpreter/llm/setup_llm.py
+++ b/interpreter/llm/setup_llm.py
@@ -4,6 +4,7 @@
from .convert_to_coding_llm import convert_to_coding_llm
from .setup_openai_coding_llm import setup_openai_coding_llm
import os
+import litellm
def setup_llm(interpreter):
"""
@@ -11,7 +12,8 @@ def setup_llm(interpreter):
returns a Coding LLM (a generator that streams deltas with `message` and `code`).
"""
- if not interpreter.local and "gpt-" in interpreter.model:
+ if (not interpreter.local
+ and (interpreter.model in litellm.open_ai_chat_completion_models or interpreter.model.startswith("azure/")):
# Function calling LLM
coding_llm = setup_openai_coding_llm(interpreter)
else:
From ffdcc60d309532fc63c51888a60ca7f33744e968 Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Tue, 26 Sep 2023 17:15:50 -0700
Subject: [PATCH 15/49] The Generator Update - Azure fix
---
interpreter/llm/setup_llm.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/interpreter/llm/setup_llm.py b/interpreter/llm/setup_llm.py
index 602c2c07da..b2b517b15a 100644
--- a/interpreter/llm/setup_llm.py
+++ b/interpreter/llm/setup_llm.py
@@ -13,7 +13,7 @@ def setup_llm(interpreter):
"""
if (not interpreter.local
- and (interpreter.model in litellm.open_ai_chat_completion_models or interpreter.model.startswith("azure/")):
+ and (interpreter.model in litellm.open_ai_chat_completion_models or interpreter.model.startswith("azure/"))):
# Function calling LLM
coding_llm = setup_openai_coding_llm(interpreter)
else:
From 476f692ee01dfc8781136ad1bfcbe0d130a341a5 Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Tue, 26 Sep 2023 17:26:06 -0700
Subject: [PATCH 16/49] Better debugging
---
interpreter/llm/setup_text_llm.py | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/interpreter/llm/setup_text_llm.py b/interpreter/llm/setup_text_llm.py
index 686afcd2c5..8397f3b8a0 100644
--- a/interpreter/llm/setup_text_llm.py
+++ b/interpreter/llm/setup_text_llm.py
@@ -108,6 +108,10 @@ def base_llm(messages):
if interpreter.debug_mode:
litellm.set_verbose = True
+ # Report what we're sending to LiteLLM
+ if interpreter.debug_mode:
+ print("Sending this to LiteLLM:", params)
+
return litellm.completion(**params)
return base_llm
\ No newline at end of file
From 3671d9f969f8c80e14ffa1ac8b4df441de93b995 Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Tue, 26 Sep 2023 17:26:09 -0700
Subject: [PATCH 17/49] Better debugging
---
interpreter/llm/setup_openai_coding_llm.py | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/interpreter/llm/setup_openai_coding_llm.py b/interpreter/llm/setup_openai_coding_llm.py
index 0c7aaf343f..c6039f85f1 100644
--- a/interpreter/llm/setup_openai_coding_llm.py
+++ b/interpreter/llm/setup_openai_coding_llm.py
@@ -38,7 +38,7 @@ def coding_llm(messages):
# Convert messages
messages = convert_to_openai_messages(messages)
- # Add OpenAI's reccomended function message
+ # Add OpenAI's recommended function message
messages[0]["content"] += "\n\nOnly use the function you have been provided with."
# Seperate out the system_message from messages
@@ -76,6 +76,10 @@ def coding_llm(messages):
if interpreter.debug_mode:
litellm.set_verbose = True
+ # Report what we're sending to LiteLLM
+ if interpreter.debug_mode:
+ print("Sending this to LiteLLM:", params)
+
response = litellm.completion(**params)
accumulated_deltas = {}
From 23242419426b883ef80a9806796547ebbd522d02 Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Tue, 26 Sep 2023 17:47:04 -0700
Subject: [PATCH 18/49] Proper TokenTrimming for new models
---
interpreter/llm/setup_openai_coding_llm.py | 13 ++++++++++++-
interpreter/llm/setup_text_llm.py | 6 +++++-
2 files changed, 17 insertions(+), 2 deletions(-)
diff --git a/interpreter/llm/setup_openai_coding_llm.py b/interpreter/llm/setup_openai_coding_llm.py
index c6039f85f1..a8dcfa16fe 100644
--- a/interpreter/llm/setup_openai_coding_llm.py
+++ b/interpreter/llm/setup_openai_coding_llm.py
@@ -2,6 +2,7 @@
from ..utils.merge_deltas import merge_deltas
from ..utils.parse_partial_json import parse_partial_json
from ..utils.convert_to_openai_messages import convert_to_openai_messages
+from ..utils.display_markdown_message import display_markdown_message
import tokentrim as tt
@@ -47,7 +48,17 @@ def coding_llm(messages):
messages = messages[1:]
# Trim messages, preserving the system_message
- messages = tt.trim(messages=messages, system_message=system_message, model=interpreter.model)
+ try:
+ messages = tt.trim(messages=messages, system_message=system_message, model=interpreter.model)
+ except:
+ if interpreter.context_window:
+ messages = tt.trim(messages=messages, system_message=system_message, max_tokens=interpreter.context_window)
+ else:
+ display_markdown_message("""
+ **We were unable to determine the context window of this model.** Defaulting to 3000.
+ If your model can handle more, run `interpreter --context_window {token limit}` or `interpreter.context_window = {token limit}`.
+ """)
+ messages = tt.trim(messages=messages, system_message=system_message, max_tokens=3000)
if interpreter.debug_mode:
print("Sending this to the OpenAI LLM:", messages)
diff --git a/interpreter/llm/setup_text_llm.py b/interpreter/llm/setup_text_llm.py
index 8397f3b8a0..de662ddd44 100644
--- a/interpreter/llm/setup_text_llm.py
+++ b/interpreter/llm/setup_text_llm.py
@@ -79,7 +79,11 @@ def base_llm(messages):
try:
messages = tt.trim(messages, system_message=system_message, model=interpreter.model)
except:
- # If we don't know the model, just do 3000.
+ display_markdown_message("""
+ **We were unable to determine the context window of this model.** Defaulting to 3000.
+ If your model can handle more, run `interpreter --context_window {token limit}` or `interpreter.context_window = {token limit}`.
+ Also, please set max_tokens: `interpreter --max_tokens {max tokens per response}` or `interpreter.max_tokens = {max tokens per response}`
+ """)
messages = tt.trim(messages, system_message=system_message, max_tokens=3000)
if interpreter.debug_mode:
From 1851db8002186f2355d832b049510d4a891ea8c3 Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Wed, 27 Sep 2023 11:28:10 -0700
Subject: [PATCH 19/49] Generator Update Fixes (Updated Version)
---
poetry.lock | 33 ++++++++++++++++++++++++---------
pyproject.toml | 2 +-
2 files changed, 25 insertions(+), 10 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 4bdef1e2ed..62c8e48e64 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -295,6 +295,20 @@ files = [
{file = "charset_normalizer-3.2.0-py3-none-any.whl", hash = "sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6"},
]
+[[package]]
+name = "click"
+version = "8.1.7"
+description = "Composable command line interface toolkit"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"},
+ {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
[[package]]
name = "colorama"
version = "0.4.6"
@@ -591,21 +605,22 @@ ansicon = {version = "*", markers = "platform_system == \"Windows\""}
[[package]]
name = "litellm"
-version = "0.1.738"
+version = "0.1.781"
description = "Library to easily interface with LLM API providers"
optional = false
python-versions = ">=3.8,<4.0"
files = [
- {file = "litellm-0.1.738-py3-none-any.whl", hash = "sha256:dbb563aa1658ba90a1678a7bed0b53fe8712476cdadca61245c21f053ac54177"},
- {file = "litellm-0.1.738.tar.gz", hash = "sha256:67069c76b215e5f5deae91777f205f27641337ba5aa963c2abe720f3e3bee077"},
+ {file = "litellm-0.1.781-py3-none-any.whl", hash = "sha256:244b4b57bd01b2258beb149f16a52b777e5e955728ce67d56457da6117c48474"},
+ {file = "litellm-0.1.781.tar.gz", hash = "sha256:3b8e875ad5ee8cae5ea2cf9affc5c998d069111cee827e5eda2926bcf5ff8810"},
]
[package.dependencies]
-importlib-metadata = ">=6.8.0,<7.0.0"
+click = "*"
+importlib-metadata = ">=6.8.0"
openai = ">=0.27.0,<0.29.0"
python-dotenv = ">=0.2.0"
-tiktoken = ">=0.4.0,<0.5.0"
-tokenizers = ">=0.14.0,<0.15.0"
+tiktoken = ">=0.4.0"
+tokenizers = "*"
[[package]]
name = "markdown-it-py"
@@ -727,13 +742,13 @@ files = [
[[package]]
name = "openai"
-version = "0.28.0"
+version = "0.28.1"
description = "Python client library for the OpenAI API"
optional = false
python-versions = ">=3.7.1"
files = [
- {file = "openai-0.28.0-py3-none-any.whl", hash = "sha256:d207ece78469be5648eb87b825753282225155a29d0eec6e02013ddbf8c31c0c"},
- {file = "openai-0.28.0.tar.gz", hash = "sha256:417b78c4c2864ba696aedaf1ccff77be1f04a581ab1739f0a56e0aae19e5a794"},
+ {file = "openai-0.28.1-py3-none-any.whl", hash = "sha256:d18690f9e3d31eedb66b57b88c2165d760b24ea0a01f150dd3f068155088ce68"},
+ {file = "openai-0.28.1.tar.gz", hash = "sha256:4be1dad329a65b4ce1a660fe6d5431b438f429b5855c883435f0f7fcb6d2dcc8"},
]
[package.dependencies]
diff --git a/pyproject.toml b/pyproject.toml
index 4b56cd5f3b..656c97e602 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,7 @@ name = "open-interpreter"
packages = [
{include = "interpreter"},
]
-version = "0.1.5"
+version = "0.1.6"
description = "Let language models run code locally."
authors = ["Killian Lucas "]
readme = "README.md"
From 916e1e19d610d83597e88c11248e66094b5ee9ef Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Wed, 27 Sep 2023 15:24:55 -0700
Subject: [PATCH 20/49] Added example JARVIS Colab Notebook
---
README.md | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/README.md b/README.md
index 7b0b0a8e8c..a1b347973e 100644
--- a/README.md
+++ b/README.md
@@ -51,6 +51,10 @@ https://github.com/KillianLucas/open-interpreter/assets/63927363/37152071-680d-4
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1WKmRXZgsErej2xUriKzxrEAXdxMSgWbb?usp=sharing)
+#### Along with an example implementation of a "Her"-inspired style voice interface:
+
+[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1NojYGHDgxH6Y1G1oxThEBBb2AtyODBIK)
+
## Quick Start
```shell
From f89629a5db4bf78a075be63483aa9b3d11b4317d Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Wed, 27 Sep 2023 15:26:16 -0700
Subject: [PATCH 21/49] Added example JARVIS Colab Notebook
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index a1b347973e..1c6dca0cb0 100644
--- a/README.md
+++ b/README.md
@@ -51,7 +51,7 @@ https://github.com/KillianLucas/open-interpreter/assets/63927363/37152071-680d-4
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1WKmRXZgsErej2xUriKzxrEAXdxMSgWbb?usp=sharing)
-#### Along with an example implementation of a "Her"-inspired style voice interface:
+#### Along with an example implementation of a voice interface (inspired by _Her_):
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1NojYGHDgxH6Y1G1oxThEBBb2AtyODBIK)
From c9e2159a10f84a235ac9725a2a536935c1e14a3c Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Wed, 27 Sep 2023 22:45:29 -0700
Subject: [PATCH 22/49] Generator Update Quick Fixes
---
.vscode/settings.json | 3 +++
interpreter/cli/cli.py | 7 +++++++
interpreter/core/respond.py | 1 +
interpreter/llm/setup_local_text_llm.py | 1 -
interpreter/rag/get_relevant_procedures.py | 4 ++--
5 files changed, 13 insertions(+), 3 deletions(-)
create mode 100644 .vscode/settings.json
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 0000000000..457f44d9ba
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,3 @@
+{
+ "python.analysis.typeCheckingMode": "basic"
+}
\ No newline at end of file
diff --git a/interpreter/cli/cli.py b/interpreter/cli/cli.py
index b77f0789c9..d97c56c763 100644
--- a/interpreter/cli/cli.py
+++ b/interpreter/cli/cli.py
@@ -2,6 +2,7 @@
import subprocess
import os
import platform
+import pkg_resources
import appdirs
from ..utils.display_markdown_message import display_markdown_message
from ..terminal_interface.conversation_navigator import conversation_navigator
@@ -90,6 +91,7 @@ def cli(interpreter):
parser.add_argument('--config', dest='config', action='store_true', help='open config.yaml file in text editor')
parser.add_argument('--conversations', dest='conversations', action='store_true', help='list conversations to resume')
parser.add_argument('-f', '--fast', dest='fast', action='store_true', help='(depracated) runs `interpreter --model gpt-3.5-turbo`')
+ parser.add_argument('--version', dest='version', action='store_true', help="get Open Interpreter's version number")
# TODO: Implement model explorer
# parser.add_argument('--models', dest='models', action='store_true', help='list avaliable models')
@@ -137,6 +139,11 @@ def cli(interpreter):
conversation_navigator(interpreter)
return
+ if args.version:
+ version = pkg_resources.get_distribution("open-interpreter").version
+ print(f"Open Interpreter {version}")
+ return
+
# Depracated --fast
if args.fast:
# This will cause the terminal_interface to walk the user through setting up a local LLM
diff --git a/interpreter/core/respond.py b/interpreter/core/respond.py
index eaa9a3b44b..51d7b51400 100644
--- a/interpreter/core/respond.py
+++ b/interpreter/core/respond.py
@@ -21,6 +21,7 @@ def respond(interpreter):
# Open Procedures is an open-source database of tiny, up-to-date coding tutorials.
# We can query it semantically and append relevant tutorials/procedures to our system message
+ get_relevant_procedures(interpreter.messages[-2:])
if not interpreter.local:
try:
system_message += "\n\n" + get_relevant_procedures(interpreter.messages[-2:])
diff --git a/interpreter/llm/setup_local_text_llm.py b/interpreter/llm/setup_local_text_llm.py
index fc1a765d00..1c42687fc7 100644
--- a/interpreter/llm/setup_local_text_llm.py
+++ b/interpreter/llm/setup_local_text_llm.py
@@ -265,7 +265,6 @@ def local_text_llm(messages):
else:
max_tokens = DEFAULT_MAX_TOKENS
-
messages = tt.trim(
messages,
max_tokens=(context_window-max_tokens-25),
diff --git a/interpreter/rag/get_relevant_procedures.py b/interpreter/rag/get_relevant_procedures.py
index 62e8fca299..e84f823860 100644
--- a/interpreter/rag/get_relevant_procedures.py
+++ b/interpreter/rag/get_relevant_procedures.py
@@ -9,7 +9,7 @@ def get_relevant_procedures(messages):
query = {"query": convert_to_openai_messages(messages)}
url = "https://open-procedures.replit.app/search/"
- relevant_procedures = requests.get(url, json=query).json()["procedures"]
+ relevant_procedures = requests.post(url, json=query).json()["procedures"]
relevant_procedures = "[Recommended Procedures]\n" + "\n---\n".join(relevant_procedures) + "\nIn your plan, include steps and, if present, **EXACT CODE SNIPPETS** (especially for deprecation notices, **WRITE THEM INTO YOUR PLAN -- underneath each numbered step** as they will VANISH once you execute your first line of code, so WRITE THEM DOWN NOW if you need them) from the above procedures if they are relevant to the task. Again, include **VERBATIM CODE SNIPPETS** from the procedures above if they are relevent to the task **directly in your plan.**"
- return relevant_procedures
+ return relevant_procedures
\ No newline at end of file
From 76a220efde058a1f9902014695e65b19b6cce563 Mon Sep 17 00:00:00 2001
From: Eric allen
Date: Sat, 23 Sep 2023 00:41:08 -0400
Subject: [PATCH 23/49] feat: add semgrep code scanning via --safe flag
This reintroduces the --safe functionality from #24.
--safe has 3 possible values auto, ask, and off
Code scanning is opt-in.
---
README.md | 9 +
interpreter/cli/cli.py | 13 +-
.../create_code_interpreter.py | 19 +-
interpreter/code_interpreters/language_map.py | 17 +
.../languages/applescript.py | 2 +
.../code_interpreters/languages/html.py | 2 +
.../code_interpreters/languages/javascript.py | 2 +
.../code_interpreters/languages/python.py | 2 +
interpreter/code_interpreters/languages/r.py | 2 +
.../code_interpreters/languages/shell.py | 2 +
interpreter/core/core.py | 1 +
.../terminal_interface/terminal_interface.py | 21 +
interpreter/utils/scan_code.py | 62 +++
poetry.lock | 489 +++++++++++++++++-
pyproject.toml | 1 +
15 files changed, 606 insertions(+), 38 deletions(-)
create mode 100644 interpreter/code_interpreters/language_map.py
create mode 100644 interpreter/utils/scan_code.py
diff --git a/README.md b/README.md
index 1c6dca0cb0..60ee9f43fe 100644
--- a/README.md
+++ b/README.md
@@ -272,6 +272,15 @@ You can run `interpreter -y` or set `interpreter.auto_run = True` to bypass this
- Watch Open Interpreter like a self-driving car, and be prepared to end the process by closing your terminal.
- Consider running Open Interpreter in a restricted environment like Google Colab or Replit. These environments are more isolated, reducing the risks associated with executing arbitrary code.
+### Code Scanning
+
+Open Intepreter provides a way to enable code scanning via [semgrep](https://semgrep.dev/) before executing code blocks by using the `--scan` flag.
+
+- `--scan` allows you to control the behavior Open Interpreter uses to scan code with semgrep
+ - `auto` scans all code automatically before executing it
+ - `ask` asks you if you want to scan a code block before asking if you want to execute it
+ - `off` does not scan code before asking if you want to execute it
+
## How Does it Work?
Open Interpreter equips a [function-calling language model](https://platform.openai.com/docs/guides/gpt/function-calling) with an `exec()` function, which accepts a `language` (like "Python" or "JavaScript") and `code` to run.
diff --git a/interpreter/cli/cli.py b/interpreter/cli/cli.py
index d97c56c763..fd42329e0a 100644
--- a/interpreter/cli/cli.py
+++ b/interpreter/cli/cli.py
@@ -73,6 +73,14 @@
"nickname": "ak",
"help_text": "optionally set the API key for your llm calls (this will override environment variables)",
"type": str
+ },
+ {
+ "name": "scan_code",
+ "nickname": "scan",
+ "help_text": "optionally scan code with semgrep",
+ "type": str,
+ "default": "off",
+ "choices": ["off", "ask", "auto"]
}
]
@@ -85,7 +93,10 @@ def cli(interpreter):
if arg["type"] == bool:
parser.add_argument(f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], action='store_true', default=None)
else:
- parser.add_argument(f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=arg["type"])
+ choices = arg["choices"] if "choices" in arg else None
+ default = arg["default"] if "default" in arg else None
+
+ parser.add_argument(f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=arg["type"], choices=choices, default=default)
# Add special arguments
parser.add_argument('--config', dest='config', action='store_true', help='open config.yaml file in text editor')
diff --git a/interpreter/code_interpreters/create_code_interpreter.py b/interpreter/code_interpreters/create_code_interpreter.py
index 41e99d78d9..e18db43efd 100644
--- a/interpreter/code_interpreters/create_code_interpreter.py
+++ b/interpreter/code_interpreters/create_code_interpreter.py
@@ -1,26 +1,9 @@
-
-
-from .languages.python import Python
-from .languages.shell import Shell
-from .languages.javascript import JavaScript
-from .languages.html import HTML
-from .languages.applescript import AppleScript
-from .languages.r import R
+from .language_map import language_map
def create_code_interpreter(language):
# Case in-sensitive
language = language.lower()
- language_map = {
- "python": Python,
- "bash": Shell,
- "shell": Shell,
- "javascript": JavaScript,
- "html": HTML,
- "applescript": AppleScript,
- "r": R,
- }
-
try:
CodeInterpreter = language_map[language]
return CodeInterpreter()
diff --git a/interpreter/code_interpreters/language_map.py b/interpreter/code_interpreters/language_map.py
new file mode 100644
index 0000000000..b6beaeed0d
--- /dev/null
+++ b/interpreter/code_interpreters/language_map.py
@@ -0,0 +1,17 @@
+from .languages.python import Python
+from .languages.shell import Shell
+from .languages.javascript import JavaScript
+from .languages.html import HTML
+from .languages.applescript import AppleScript
+from .languages.r import R
+
+
+language_map = {
+ "python": Python,
+ "bash": Shell,
+ "shell": Shell,
+ "javascript": JavaScript,
+ "html": HTML,
+ "applescript": AppleScript,
+ "r": R,
+}
diff --git a/interpreter/code_interpreters/languages/applescript.py b/interpreter/code_interpreters/languages/applescript.py
index 862c9f99b1..58103a8f70 100644
--- a/interpreter/code_interpreters/languages/applescript.py
+++ b/interpreter/code_interpreters/languages/applescript.py
@@ -2,6 +2,8 @@
from ..subprocess_code_interpreter import SubprocessCodeInterpreter
class AppleScript(SubprocessCodeInterpreter):
+ file_extension = "applescript"
+
def __init__(self):
super().__init__()
self.start_cmd = os.environ.get('SHELL', '/bin/zsh')
diff --git a/interpreter/code_interpreters/languages/html.py b/interpreter/code_interpreters/languages/html.py
index ba4ccde932..ff139aac66 100644
--- a/interpreter/code_interpreters/languages/html.py
+++ b/interpreter/code_interpreters/languages/html.py
@@ -4,6 +4,8 @@
from ..base_code_interpreter import BaseCodeInterpreter
class HTML(BaseCodeInterpreter):
+ file_extension = "html"
+
def __init__(self):
super().__init__()
diff --git a/interpreter/code_interpreters/languages/javascript.py b/interpreter/code_interpreters/languages/javascript.py
index b6be88aefa..ad008e2664 100644
--- a/interpreter/code_interpreters/languages/javascript.py
+++ b/interpreter/code_interpreters/languages/javascript.py
@@ -2,6 +2,8 @@
import re
class JavaScript(SubprocessCodeInterpreter):
+ file_extension = "js"
+
def __init__(self):
super().__init__()
self.start_cmd = "node -i"
diff --git a/interpreter/code_interpreters/languages/python.py b/interpreter/code_interpreters/languages/python.py
index 3c7ee16167..f36e0935b2 100644
--- a/interpreter/code_interpreters/languages/python.py
+++ b/interpreter/code_interpreters/languages/python.py
@@ -4,6 +4,8 @@
import re
class Python(SubprocessCodeInterpreter):
+ file_extension = "py"
+
def __init__(self):
super().__init__()
self.start_cmd = sys.executable + " -i -q -u"
diff --git a/interpreter/code_interpreters/languages/r.py b/interpreter/code_interpreters/languages/r.py
index b9733f871b..c769ae239e 100644
--- a/interpreter/code_interpreters/languages/r.py
+++ b/interpreter/code_interpreters/languages/r.py
@@ -2,6 +2,8 @@
import re
class R(SubprocessCodeInterpreter):
+ file_extension = "r"
+
def __init__(self):
super().__init__()
self.start_cmd = "R -q --vanilla" # Start R in quiet and vanilla mode
diff --git a/interpreter/code_interpreters/languages/shell.py b/interpreter/code_interpreters/languages/shell.py
index 5d77eacf6a..dbd1e9bf94 100644
--- a/interpreter/code_interpreters/languages/shell.py
+++ b/interpreter/code_interpreters/languages/shell.py
@@ -4,6 +4,8 @@
import os
class Shell(SubprocessCodeInterpreter):
+ file_extension = "sh"
+
def __init__(self):
super().__init__()
diff --git a/interpreter/core/core.py b/interpreter/core/core.py
index 639c652a57..4a90360459 100644
--- a/interpreter/core/core.py
+++ b/interpreter/core/core.py
@@ -30,6 +30,7 @@ def __init__(self):
self.auto_run = False
self.debug_mode = False
self.max_output = 2000
+ self.scan_code = False
# Conversation history
self.conversation_history = True
diff --git a/interpreter/terminal_interface/terminal_interface.py b/interpreter/terminal_interface/terminal_interface.py
index fe9469d5d9..b2123e9505 100644
--- a/interpreter/terminal_interface/terminal_interface.py
+++ b/interpreter/terminal_interface/terminal_interface.py
@@ -8,6 +8,7 @@
from .magic_commands import handle_magic_command
from ..utils.display_markdown_message import display_markdown_message
from ..utils.truncate_output import truncate_output
+from ..utils.scan_code import scan_code
def terminal_interface(interpreter, message):
if not interpreter.auto_run:
@@ -87,6 +88,26 @@ def terminal_interface(interpreter, message):
# End the active block so you can run input() below it
active_block.end()
+ should_scan_code = False
+
+ if not interpreter.scan_code == "off":
+ if interpreter.scan_code == "auto":
+ should_scan_code = True
+ elif interpreter.scan_code == 'ask':
+ response = input(" Would you like to scan this code? (y/n)\n\n ")
+ print("") # <- Aesthetic choice
+
+ if response.strip().lower() == "y":
+ should_scan_code = True
+
+ if should_scan_code:
+ # Get code language and actual code from the chunk
+ # We need to give these to semgrep when we start our scan
+ language = chunk["executing"]["language"]
+ code = chunk["executing"]["code"]
+
+ scan_code(code, language, interpreter)
+
response = input(" Would you like to run this code? (y/n)\n\n ")
print("") # <- Aesthetic choice
diff --git a/interpreter/utils/scan_code.py b/interpreter/utils/scan_code.py
new file mode 100644
index 0000000000..4fb36404f7
--- /dev/null
+++ b/interpreter/utils/scan_code.py
@@ -0,0 +1,62 @@
+import os
+import tempfile
+import subprocess
+
+from ..code_interpreters.language_map import language_map
+
+
+def get_extension(language_name):
+ """
+ Get the file extension for a given language
+ """
+ language = language_map[language_name.lower()]
+
+ if language.file_extension:
+ return language.file_extension
+ else:
+ return language
+
+
+def scan_code(code, language, self):
+ """
+ Scan code with semgrep
+ """
+
+ # Create a temporary file
+ with tempfile.NamedTemporaryFile(
+ mode="w", delete=False, suffix=f".{get_extension(language)}"
+ ) as f:
+ f.write(code)
+ temp_file_name = f.name
+ f.close()
+
+ temp_path = os.path.dirname(temp_file_name)
+ file_name = os.path.basename(temp_file_name)
+
+ if self.debug_mode:
+ print(f"Created temporary file {temp_file_name}")
+ print(f"Scanning {language} code in {file_name}")
+ print("---")
+
+ # Run semgrep
+ try:
+ # HACK: we need to give the subprocess shell access so that the semgrep from our pyproject.toml is available
+ # the global namespace might have semgrep from guarddog installed, but guarddog is currenlty
+ # pinned to an old semgrep version that has issues with reading the semgrep registry
+ # while scanning a single file like the temporary one we generate
+ # if guarddog solves [#249](https://github.com/DataDog/guarddog/issues/249) we can change this approach a bit
+ subprocess.run(
+ f"cd {temp_path} && semgrep scan --config auto --dryrun {file_name}",
+ shell=True,
+ )
+
+ # TODO: it would be great if we could capture any vulnerabilities identified by semgrep
+ # and add them to the conversation history
+
+ except Exception as e:
+ print(f"Could not scan {language} code.")
+ print(e)
+ print("") # <- Aesthetic choice
+
+ # clean up temporary file
+ os.remove(temp_file_name)
diff --git a/poetry.lock b/poetry.lock
index 62c8e48e64..46d6e7b060 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -200,6 +200,28 @@ jinxed = {version = ">=1.1.0", markers = "platform_system == \"Windows\""}
six = ">=1.9.0"
wcwidth = ">=0.1.4"
+[[package]]
+name = "boltons"
+version = "21.0.0"
+description = "When they're not builtins, they're boltons."
+optional = false
+python-versions = "*"
+files = [
+ {file = "boltons-21.0.0-py2.py3-none-any.whl", hash = "sha256:b9bb7b58b2b420bbe11a6025fdef6d3e5edc9f76a42fb467afe7ca212ef9948b"},
+ {file = "boltons-21.0.0.tar.gz", hash = "sha256:65e70a79a731a7fe6e98592ecfb5ccf2115873d01dbc576079874629e5c90f13"},
+]
+
+[[package]]
+name = "bracex"
+version = "2.4"
+description = "Bash style brace expander."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "bracex-2.4-py3-none-any.whl", hash = "sha256:efdc71eff95eaff5e0f8cfebe7d01adf2c8637c8c92edaf63ef348c241a82418"},
+ {file = "bracex-2.4.tar.gz", hash = "sha256:a27eaf1df42cf561fed58b7a8f3fdf129d1ea16a81e1fadd1d17989bc6384beb"},
+]
+
[[package]]
name = "certifi"
version = "2023.7.22"
@@ -309,6 +331,25 @@ files = [
[package.dependencies]
colorama = {version = "*", markers = "platform_system == \"Windows\""}
+[[package]]
+name = "click-option-group"
+version = "0.5.6"
+description = "Option groups missing in Click"
+optional = false
+python-versions = ">=3.6,<4"
+files = [
+ {file = "click-option-group-0.5.6.tar.gz", hash = "sha256:97d06703873518cc5038509443742b25069a3c7562d1ea72ff08bfadde1ce777"},
+ {file = "click_option_group-0.5.6-py3-none-any.whl", hash = "sha256:38a26d963ee3ad93332ddf782f9259c5bdfe405e73408d943ef5e7d0c3767ec7"},
+]
+
+[package.dependencies]
+Click = ">=7.0,<9"
+
+[package.extras]
+docs = ["Pallets-Sphinx-Themes", "m2r2", "sphinx"]
+tests = ["pytest"]
+tests-cov = ["coverage", "coveralls", "pytest", "pytest-cov"]
+
[[package]]
name = "colorama"
version = "0.4.6"
@@ -320,6 +361,17 @@ files = [
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
+[[package]]
+name = "defusedxml"
+version = "0.7.1"
+description = "XML bomb protection for Python stdlib modules"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+files = [
+ {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
+ {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
+]
+
[[package]]
name = "exceptiongroup"
version = "1.1.3"
@@ -334,6 +386,20 @@ files = [
[package.extras]
test = ["pytest (>=6)"]
+[[package]]
+name = "face"
+version = "22.0.0"
+description = "A command-line application framework (and CLI parser). Friendly for users, full-featured for developers."
+optional = false
+python-versions = "*"
+files = [
+ {file = "face-22.0.0-py3-none-any.whl", hash = "sha256:344fe31562d0f6f444a45982418f3793d4b14f9abb98ccca1509d22e0a3e7e35"},
+ {file = "face-22.0.0.tar.gz", hash = "sha256:d5d692f90bc8f5987b636e47e36384b9bbda499aaf0a77aa0b0bbe834c76923d"},
+]
+
+[package.dependencies]
+boltons = ">=20.0.0"
+
[[package]]
name = "filelock"
version = "3.12.4"
@@ -500,6 +566,25 @@ gitdb = ">=4.0.1,<5"
[package.extras]
test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mypy", "pre-commit", "pytest", "pytest-cov", "pytest-sugar"]
+[[package]]
+name = "glom"
+version = "22.1.0"
+description = "A declarative object transformer and formatter, for conglomerating nested data."
+optional = false
+python-versions = "*"
+files = [
+ {file = "glom-22.1.0-py2.py3-none-any.whl", hash = "sha256:5339da206bf3532e01a83a35aca202960ea885156986d190574b779598e9e772"},
+ {file = "glom-22.1.0.tar.gz", hash = "sha256:1510c6587a8f9c64a246641b70033cbc5ebde99f02ad245693678038e821aeb5"},
+]
+
+[package.dependencies]
+attrs = "*"
+boltons = ">=19.3.0"
+face = ">=20.1.0"
+
+[package.extras]
+yaml = ["PyYAML"]
+
[[package]]
name = "huggingface-hub"
version = "0.16.4"
@@ -603,24 +688,58 @@ files = [
[package.dependencies]
ansicon = {version = "*", markers = "platform_system == \"Windows\""}
+[[package]]
+name = "jsonschema"
+version = "4.19.1"
+description = "An implementation of JSON Schema validation for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "jsonschema-4.19.1-py3-none-any.whl", hash = "sha256:cd5f1f9ed9444e554b38ba003af06c0a8c2868131e56bfbef0550fb450c0330e"},
+ {file = "jsonschema-4.19.1.tar.gz", hash = "sha256:ec84cc37cfa703ef7cd4928db24f9cb31428a5d0fa77747b8b51a847458e0bbf"},
+]
+
+[package.dependencies]
+attrs = ">=22.2.0"
+jsonschema-specifications = ">=2023.03.6"
+referencing = ">=0.28.4"
+rpds-py = ">=0.7.1"
+
+[package.extras]
+format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
+format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"]
+
+[[package]]
+name = "jsonschema-specifications"
+version = "2023.7.1"
+description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "jsonschema_specifications-2023.7.1-py3-none-any.whl", hash = "sha256:05adf340b659828a004220a9613be00fa3f223f2b82002e273dee62fd50524b1"},
+ {file = "jsonschema_specifications-2023.7.1.tar.gz", hash = "sha256:c91a50404e88a1f6ba40636778e2ee08f6e24c5613fe4c53ac24578a5a7f72bb"},
+]
+
+[package.dependencies]
+referencing = ">=0.28.0"
+
[[package]]
name = "litellm"
-version = "0.1.781"
+version = "0.1.736"
description = "Library to easily interface with LLM API providers"
optional = false
python-versions = ">=3.8,<4.0"
files = [
- {file = "litellm-0.1.781-py3-none-any.whl", hash = "sha256:244b4b57bd01b2258beb149f16a52b777e5e955728ce67d56457da6117c48474"},
- {file = "litellm-0.1.781.tar.gz", hash = "sha256:3b8e875ad5ee8cae5ea2cf9affc5c998d069111cee827e5eda2926bcf5ff8810"},
+ {file = "litellm-0.1.736-py3-none-any.whl", hash = "sha256:2918b3b0e807b39b6575e36c02624352fafd5ba2f01c923782f8f3b863793bb4"},
+ {file = "litellm-0.1.736.tar.gz", hash = "sha256:3d4dc22bee2cb11243d8111f61493acc0f32b4bfe46eb478b2b940c85e1b546b"},
]
[package.dependencies]
-click = "*"
-importlib-metadata = ">=6.8.0"
+importlib-metadata = ">=6.8.0,<7.0.0"
openai = ">=0.27.0,<0.29.0"
python-dotenv = ">=0.2.0"
-tiktoken = ">=0.4.0"
-tokenizers = "*"
+tiktoken = ">=0.4.0,<0.5.0"
+tokenizers = ">=0.14.0,<0.15.0"
[[package]]
name = "markdown-it-py"
@@ -742,13 +861,13 @@ files = [
[[package]]
name = "openai"
-version = "0.28.1"
+version = "0.28.0"
description = "Python client library for the OpenAI API"
optional = false
python-versions = ">=3.7.1"
files = [
- {file = "openai-0.28.1-py3-none-any.whl", hash = "sha256:d18690f9e3d31eedb66b57b88c2165d760b24ea0a01f150dd3f068155088ce68"},
- {file = "openai-0.28.1.tar.gz", hash = "sha256:4be1dad329a65b4ce1a660fe6d5431b438f429b5855c883435f0f7fcb6d2dcc8"},
+ {file = "openai-0.28.0-py3-none-any.whl", hash = "sha256:d207ece78469be5648eb87b825753282225155a29d0eec6e02013ddbf8c31c0c"},
+ {file = "openai-0.28.0.tar.gz", hash = "sha256:417b78c4c2864ba696aedaf1ccff77be1f04a581ab1739f0a56e0aae19e5a794"},
]
[package.dependencies]
@@ -773,6 +892,16 @@ files = [
{file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"},
]
+[[package]]
+name = "peewee"
+version = "3.16.3"
+description = "a little orm"
+optional = false
+python-versions = "*"
+files = [
+ {file = "peewee-3.16.3.tar.gz", hash = "sha256:12b30e931193bc37b11f7c2ac646e3f67125a8b1a543ad6ab37ad124c8df7d16"},
+]
+
[[package]]
name = "pluggy"
version = "1.3.0"
@@ -861,6 +990,23 @@ files = [
{file = "python_editor-1.0.4-py3-none-any.whl", hash = "sha256:1bf6e860a8ad52a14c3ee1252d5dc25b2030618ed80c022598f00176adc8367d"},
]
+[[package]]
+name = "python-lsp-jsonrpc"
+version = "1.0.0"
+description = "JSON RPC 2.0 server library"
+optional = false
+python-versions = "*"
+files = [
+ {file = "python-lsp-jsonrpc-1.0.0.tar.gz", hash = "sha256:7bec170733db628d3506ea3a5288ff76aa33c70215ed223abdb0d95e957660bd"},
+ {file = "python_lsp_jsonrpc-1.0.0-py3-none-any.whl", hash = "sha256:079b143be64b0a378bdb21dff5e28a8c1393fe7e8a654ef068322d754e545fc7"},
+]
+
+[package.dependencies]
+ujson = ">=3.0.0"
+
+[package.extras]
+test = ["coverage", "pycodestyle", "pyflakes", "pylint", "pytest", "pytest-cov"]
+
[[package]]
name = "pyyaml"
version = "6.0.1"
@@ -934,6 +1080,21 @@ files = [
[package.dependencies]
setuptools = ">=41.0"
+[[package]]
+name = "referencing"
+version = "0.30.2"
+description = "JSON Referencing + Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "referencing-0.30.2-py3-none-any.whl", hash = "sha256:449b6669b6121a9e96a7f9e410b245d471e8d48964c67113ce9afe50c8dd7bdf"},
+ {file = "referencing-0.30.2.tar.gz", hash = "sha256:794ad8003c65938edcdbc027f1933215e0d0ccc0291e3ce20a4d87432b59efc0"},
+]
+
+[package.dependencies]
+attrs = ">=22.2.0"
+rpds-py = ">=0.7.0"
+
[[package]]
name = "regex"
version = "2023.8.8"
@@ -1070,6 +1231,213 @@ pygments = ">=2.13.0,<3.0.0"
[package.extras]
jupyter = ["ipywidgets (>=7.5.1,<9)"]
+[[package]]
+name = "rpds-py"
+version = "0.10.3"
+description = "Python bindings to Rust's persistent data structures (rpds)"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "rpds_py-0.10.3-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:485747ee62da83366a44fbba963c5fe017860ad408ccd6cd99aa66ea80d32b2e"},
+ {file = "rpds_py-0.10.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c55f9821f88e8bee4b7a72c82cfb5ecd22b6aad04033334f33c329b29bfa4da0"},
+ {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3b52a67ac66a3a64a7e710ba629f62d1e26ca0504c29ee8cbd99b97df7079a8"},
+ {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3aed39db2f0ace76faa94f465d4234aac72e2f32b009f15da6492a561b3bbebd"},
+ {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:271c360fdc464fe6a75f13ea0c08ddf71a321f4c55fc20a3fe62ea3ef09df7d9"},
+ {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef5fddfb264e89c435be4adb3953cef5d2936fdeb4463b4161a6ba2f22e7b740"},
+ {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a771417c9c06c56c9d53d11a5b084d1de75de82978e23c544270ab25e7c066ff"},
+ {file = "rpds_py-0.10.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:52b5cbc0469328e58180021138207e6ec91d7ca2e037d3549cc9e34e2187330a"},
+ {file = "rpds_py-0.10.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6ac3fefb0d168c7c6cab24fdfc80ec62cd2b4dfd9e65b84bdceb1cb01d385c33"},
+ {file = "rpds_py-0.10.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:8d54bbdf5d56e2c8cf81a1857250f3ea132de77af543d0ba5dce667183b61fec"},
+ {file = "rpds_py-0.10.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cd2163f42868865597d89399a01aa33b7594ce8e2c4a28503127c81a2f17784e"},
+ {file = "rpds_py-0.10.3-cp310-none-win32.whl", hash = "sha256:ea93163472db26ac6043e8f7f93a05d9b59e0505c760da2a3cd22c7dd7111391"},
+ {file = "rpds_py-0.10.3-cp310-none-win_amd64.whl", hash = "sha256:7cd020b1fb41e3ab7716d4d2c3972d4588fdfbab9bfbbb64acc7078eccef8860"},
+ {file = "rpds_py-0.10.3-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:1d9b5ee46dcb498fa3e46d4dfabcb531e1f2e76b477e0d99ef114f17bbd38453"},
+ {file = "rpds_py-0.10.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:563646d74a4b4456d0cf3b714ca522e725243c603e8254ad85c3b59b7c0c4bf0"},
+ {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e626b864725680cd3904414d72e7b0bd81c0e5b2b53a5b30b4273034253bb41f"},
+ {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:485301ee56ce87a51ccb182a4b180d852c5cb2b3cb3a82f7d4714b4141119d8c"},
+ {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:42f712b4668831c0cd85e0a5b5a308700fe068e37dcd24c0062904c4e372b093"},
+ {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6c9141af27a4e5819d74d67d227d5047a20fa3c7d4d9df43037a955b4c748ec5"},
+ {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef750a20de1b65657a1425f77c525b0183eac63fe7b8f5ac0dd16f3668d3e64f"},
+ {file = "rpds_py-0.10.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e1a0ffc39f51aa5f5c22114a8f1906b3c17eba68c5babb86c5f77d8b1bba14d1"},
+ {file = "rpds_py-0.10.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f4c179a7aeae10ddf44c6bac87938134c1379c49c884529f090f9bf05566c836"},
+ {file = "rpds_py-0.10.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:176287bb998fd1e9846a9b666e240e58f8d3373e3bf87e7642f15af5405187b8"},
+ {file = "rpds_py-0.10.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6446002739ca29249f0beaaf067fcbc2b5aab4bc7ee8fb941bd194947ce19aff"},
+ {file = "rpds_py-0.10.3-cp311-none-win32.whl", hash = "sha256:c7aed97f2e676561416c927b063802c8a6285e9b55e1b83213dfd99a8f4f9e48"},
+ {file = "rpds_py-0.10.3-cp311-none-win_amd64.whl", hash = "sha256:8bd01ff4032abaed03f2db702fa9a61078bee37add0bd884a6190b05e63b028c"},
+ {file = "rpds_py-0.10.3-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:4cf0855a842c5b5c391dd32ca273b09e86abf8367572073bd1edfc52bc44446b"},
+ {file = "rpds_py-0.10.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:69b857a7d8bd4f5d6e0db4086da8c46309a26e8cefdfc778c0c5cc17d4b11e08"},
+ {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:975382d9aa90dc59253d6a83a5ca72e07f4ada3ae3d6c0575ced513db322b8ec"},
+ {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:35fbd23c1c8732cde7a94abe7fb071ec173c2f58c0bd0d7e5b669fdfc80a2c7b"},
+ {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:106af1653007cc569d5fbb5f08c6648a49fe4de74c2df814e234e282ebc06957"},
+ {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ce5e7504db95b76fc89055c7f41e367eaadef5b1d059e27e1d6eabf2b55ca314"},
+ {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5aca759ada6b1967fcfd4336dcf460d02a8a23e6abe06e90ea7881e5c22c4de6"},
+ {file = "rpds_py-0.10.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b5d4bdd697195f3876d134101c40c7d06d46c6ab25159ed5cbd44105c715278a"},
+ {file = "rpds_py-0.10.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a657250807b6efd19b28f5922520ae002a54cb43c2401e6f3d0230c352564d25"},
+ {file = "rpds_py-0.10.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:177c9dd834cdf4dc39c27436ade6fdf9fe81484758885f2d616d5d03c0a83bd2"},
+ {file = "rpds_py-0.10.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e22491d25f97199fc3581ad8dd8ce198d8c8fdb8dae80dea3512e1ce6d5fa99f"},
+ {file = "rpds_py-0.10.3-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:2f3e1867dd574014253b4b8f01ba443b9c914e61d45f3674e452a915d6e929a3"},
+ {file = "rpds_py-0.10.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c22211c165166de6683de8136229721f3d5c8606cc2c3d1562da9a3a5058049c"},
+ {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40bc802a696887b14c002edd43c18082cb7b6f9ee8b838239b03b56574d97f71"},
+ {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e271dd97c7bb8eefda5cca38cd0b0373a1fea50f71e8071376b46968582af9b"},
+ {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:95cde244e7195b2c07ec9b73fa4c5026d4a27233451485caa1cd0c1b55f26dbd"},
+ {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08a80cf4884920863623a9ee9a285ee04cef57ebedc1cc87b3e3e0f24c8acfe5"},
+ {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:763ad59e105fca09705d9f9b29ecffb95ecdc3b0363be3bb56081b2c6de7977a"},
+ {file = "rpds_py-0.10.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:187700668c018a7e76e89424b7c1042f317c8df9161f00c0c903c82b0a8cac5c"},
+ {file = "rpds_py-0.10.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:5267cfda873ad62591b9332fd9472d2409f7cf02a34a9c9cb367e2c0255994bf"},
+ {file = "rpds_py-0.10.3-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:2ed83d53a8c5902ec48b90b2ac045e28e1698c0bea9441af9409fc844dc79496"},
+ {file = "rpds_py-0.10.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:255f1a10ae39b52122cce26ce0781f7a616f502feecce9e616976f6a87992d6b"},
+ {file = "rpds_py-0.10.3-cp38-none-win32.whl", hash = "sha256:a019a344312d0b1f429c00d49c3be62fa273d4a1094e1b224f403716b6d03be1"},
+ {file = "rpds_py-0.10.3-cp38-none-win_amd64.whl", hash = "sha256:efb9ece97e696bb56e31166a9dd7919f8f0c6b31967b454718c6509f29ef6fee"},
+ {file = "rpds_py-0.10.3-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:570cc326e78ff23dec7f41487aa9c3dffd02e5ee9ab43a8f6ccc3df8f9327623"},
+ {file = "rpds_py-0.10.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cff7351c251c7546407827b6a37bcef6416304fc54d12d44dbfecbb717064717"},
+ {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:177914f81f66c86c012311f8c7f46887ec375cfcfd2a2f28233a3053ac93a569"},
+ {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:448a66b8266de0b581246ca7cd6a73b8d98d15100fb7165974535fa3b577340e"},
+ {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bbac1953c17252f9cc675bb19372444aadf0179b5df575ac4b56faaec9f6294"},
+ {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9dd9d9d9e898b9d30683bdd2b6c1849449158647d1049a125879cb397ee9cd12"},
+ {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8c71ea77536149e36c4c784f6d420ffd20bea041e3ba21ed021cb40ce58e2c9"},
+ {file = "rpds_py-0.10.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16a472300bc6c83fe4c2072cc22b3972f90d718d56f241adabc7ae509f53f154"},
+ {file = "rpds_py-0.10.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b9255e7165083de7c1d605e818025e8860636348f34a79d84ec533546064f07e"},
+ {file = "rpds_py-0.10.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:53d7a3cd46cdc1689296348cb05ffd4f4280035770aee0c8ead3bbd4d6529acc"},
+ {file = "rpds_py-0.10.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22da15b902f9f8e267020d1c8bcfc4831ca646fecb60254f7bc71763569f56b1"},
+ {file = "rpds_py-0.10.3-cp39-none-win32.whl", hash = "sha256:850c272e0e0d1a5c5d73b1b7871b0a7c2446b304cec55ccdb3eaac0d792bb065"},
+ {file = "rpds_py-0.10.3-cp39-none-win_amd64.whl", hash = "sha256:de61e424062173b4f70eec07e12469edde7e17fa180019a2a0d75c13a5c5dc57"},
+ {file = "rpds_py-0.10.3-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:af247fd4f12cca4129c1b82090244ea5a9d5bb089e9a82feb5a2f7c6a9fe181d"},
+ {file = "rpds_py-0.10.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:3ad59efe24a4d54c2742929001f2d02803aafc15d6d781c21379e3f7f66ec842"},
+ {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:642ed0a209ced4be3a46f8cb094f2d76f1f479e2a1ceca6de6346a096cd3409d"},
+ {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:37d0c59548ae56fae01c14998918d04ee0d5d3277363c10208eef8c4e2b68ed6"},
+ {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aad6ed9e70ddfb34d849b761fb243be58c735be6a9265b9060d6ddb77751e3e8"},
+ {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8f94fdd756ba1f79f988855d948ae0bad9ddf44df296770d9a58c774cfbcca72"},
+ {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77076bdc8776a2b029e1e6ffbe6d7056e35f56f5e80d9dc0bad26ad4a024a762"},
+ {file = "rpds_py-0.10.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:87d9b206b1bd7a0523375dc2020a6ce88bca5330682ae2fe25e86fd5d45cea9c"},
+ {file = "rpds_py-0.10.3-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:8efaeb08ede95066da3a3e3c420fcc0a21693fcd0c4396d0585b019613d28515"},
+ {file = "rpds_py-0.10.3-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:a4d9bfda3f84fc563868fe25ca160c8ff0e69bc4443c5647f960d59400ce6557"},
+ {file = "rpds_py-0.10.3-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:d27aa6bbc1f33be920bb7adbb95581452cdf23005d5611b29a12bb6a3468cc95"},
+ {file = "rpds_py-0.10.3-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ed8313809571a5463fd7db43aaca68ecb43ca7a58f5b23b6e6c6c5d02bdc7882"},
+ {file = "rpds_py-0.10.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:e10e6a1ed2b8661201e79dff5531f8ad4cdd83548a0f81c95cf79b3184b20c33"},
+ {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:015de2ce2af1586ff5dc873e804434185199a15f7d96920ce67e50604592cae9"},
+ {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ae87137951bb3dc08c7d8bfb8988d8c119f3230731b08a71146e84aaa919a7a9"},
+ {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0bb4f48bd0dd18eebe826395e6a48b7331291078a879295bae4e5d053be50d4c"},
+ {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:09362f86ec201288d5687d1dc476b07bf39c08478cde837cb710b302864e7ec9"},
+ {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:821392559d37759caa67d622d0d2994c7a3f2fb29274948ac799d496d92bca73"},
+ {file = "rpds_py-0.10.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7170cbde4070dc3c77dec82abf86f3b210633d4f89550fa0ad2d4b549a05572a"},
+ {file = "rpds_py-0.10.3-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:5de11c041486681ce854c814844f4ce3282b6ea1656faae19208ebe09d31c5b8"},
+ {file = "rpds_py-0.10.3-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:4ed172d0c79f156c1b954e99c03bc2e3033c17efce8dd1a7c781bc4d5793dfac"},
+ {file = "rpds_py-0.10.3-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:11fdd1192240dda8d6c5d18a06146e9045cb7e3ba7c06de6973000ff035df7c6"},
+ {file = "rpds_py-0.10.3-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:f602881d80ee4228a2355c68da6b296a296cd22bbb91e5418d54577bbf17fa7c"},
+ {file = "rpds_py-0.10.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:691d50c99a937709ac4c4cd570d959a006bd6a6d970a484c84cc99543d4a5bbb"},
+ {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24cd91a03543a0f8d09cb18d1cb27df80a84b5553d2bd94cba5979ef6af5c6e7"},
+ {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fc2200e79d75b5238c8d69f6a30f8284290c777039d331e7340b6c17cad24a5a"},
+ {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea65b59882d5fa8c74a23f8960db579e5e341534934f43f3b18ec1839b893e41"},
+ {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:829e91f3a8574888b73e7a3feb3b1af698e717513597e23136ff4eba0bc8387a"},
+ {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eab75a8569a095f2ad470b342f2751d9902f7944704f0571c8af46bede438475"},
+ {file = "rpds_py-0.10.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:061c3ff1f51ecec256e916cf71cc01f9975af8fb3af9b94d3c0cc8702cfea637"},
+ {file = "rpds_py-0.10.3-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:39d05e65f23a0fe897b6ac395f2a8d48c56ac0f583f5d663e0afec1da89b95da"},
+ {file = "rpds_py-0.10.3-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:4eca20917a06d2fca7628ef3c8b94a8c358f6b43f1a621c9815243462dcccf97"},
+ {file = "rpds_py-0.10.3-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:e8d0f0eca087630d58b8c662085529781fd5dc80f0a54eda42d5c9029f812599"},
+ {file = "rpds_py-0.10.3.tar.gz", hash = "sha256:fcc1ebb7561a3e24a6588f7c6ded15d80aec22c66a070c757559b57b17ffd1cb"},
+]
+
+[[package]]
+name = "ruamel-yaml"
+version = "0.17.32"
+description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order"
+optional = false
+python-versions = ">=3"
+files = [
+ {file = "ruamel.yaml-0.17.32-py3-none-any.whl", hash = "sha256:23cd2ed620231677564646b0c6a89d138b6822a0d78656df7abda5879ec4f447"},
+ {file = "ruamel.yaml-0.17.32.tar.gz", hash = "sha256:ec939063761914e14542972a5cba6d33c23b0859ab6342f61cf070cfc600efc2"},
+]
+
+[package.dependencies]
+"ruamel.yaml.clib" = {version = ">=0.2.7", markers = "platform_python_implementation == \"CPython\" and python_version < \"3.12\""}
+
+[package.extras]
+docs = ["ryd"]
+jinja2 = ["ruamel.yaml.jinja2 (>=0.2)"]
+
+[[package]]
+name = "ruamel-yaml-clib"
+version = "0.2.7"
+description = "C version of reader, parser and emitter for ruamel.yaml derived from libyaml"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "ruamel.yaml.clib-0.2.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d5859983f26d8cd7bb5c287ef452e8aacc86501487634573d260968f753e1d71"},
+ {file = "ruamel.yaml.clib-0.2.7-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:debc87a9516b237d0466a711b18b6ebeb17ba9f391eb7f91c649c5c4ec5006c7"},
+ {file = "ruamel.yaml.clib-0.2.7-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:df5828871e6648db72d1c19b4bd24819b80a755c4541d3409f0f7acd0f335c80"},
+ {file = "ruamel.yaml.clib-0.2.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:efa08d63ef03d079dcae1dfe334f6c8847ba8b645d08df286358b1f5293d24ab"},
+ {file = "ruamel.yaml.clib-0.2.7-cp310-cp310-win32.whl", hash = "sha256:763d65baa3b952479c4e972669f679fe490eee058d5aa85da483ebae2009d231"},
+ {file = "ruamel.yaml.clib-0.2.7-cp310-cp310-win_amd64.whl", hash = "sha256:d000f258cf42fec2b1bbf2863c61d7b8918d31ffee905da62dede869254d3b8a"},
+ {file = "ruamel.yaml.clib-0.2.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:045e0626baf1c52e5527bd5db361bc83180faaba2ff586e763d3d5982a876a9e"},
+ {file = "ruamel.yaml.clib-0.2.7-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:1a6391a7cabb7641c32517539ca42cf84b87b667bad38b78d4d42dd23e957c81"},
+ {file = "ruamel.yaml.clib-0.2.7-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:9c7617df90c1365638916b98cdd9be833d31d337dbcd722485597b43c4a215bf"},
+ {file = "ruamel.yaml.clib-0.2.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:41d0f1fa4c6830176eef5b276af04c89320ea616655d01327d5ce65e50575c94"},
+ {file = "ruamel.yaml.clib-0.2.7-cp311-cp311-win32.whl", hash = "sha256:f6d3d39611ac2e4f62c3128a9eed45f19a6608670c5a2f4f07f24e8de3441d38"},
+ {file = "ruamel.yaml.clib-0.2.7-cp311-cp311-win_amd64.whl", hash = "sha256:da538167284de58a52109a9b89b8f6a53ff8437dd6dc26d33b57bf6699153122"},
+ {file = "ruamel.yaml.clib-0.2.7-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:4b3a93bb9bc662fc1f99c5c3ea8e623d8b23ad22f861eb6fce9377ac07ad6072"},
+ {file = "ruamel.yaml.clib-0.2.7-cp36-cp36m-macosx_12_0_arm64.whl", hash = "sha256:a234a20ae07e8469da311e182e70ef6b199d0fbeb6c6cc2901204dd87fb867e8"},
+ {file = "ruamel.yaml.clib-0.2.7-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:15910ef4f3e537eea7fe45f8a5d19997479940d9196f357152a09031c5be59f3"},
+ {file = "ruamel.yaml.clib-0.2.7-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:370445fd795706fd291ab00c9df38a0caed0f17a6fb46b0f607668ecb16ce763"},
+ {file = "ruamel.yaml.clib-0.2.7-cp36-cp36m-win32.whl", hash = "sha256:ecdf1a604009bd35c674b9225a8fa609e0282d9b896c03dd441a91e5f53b534e"},
+ {file = "ruamel.yaml.clib-0.2.7-cp36-cp36m-win_amd64.whl", hash = "sha256:f34019dced51047d6f70cb9383b2ae2853b7fc4dce65129a5acd49f4f9256646"},
+ {file = "ruamel.yaml.clib-0.2.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2aa261c29a5545adfef9296b7e33941f46aa5bbd21164228e833412af4c9c75f"},
+ {file = "ruamel.yaml.clib-0.2.7-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:f01da5790e95815eb5a8a138508c01c758e5f5bc0ce4286c4f7028b8dd7ac3d0"},
+ {file = "ruamel.yaml.clib-0.2.7-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:40d030e2329ce5286d6b231b8726959ebbe0404c92f0a578c0e2482182e38282"},
+ {file = "ruamel.yaml.clib-0.2.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:c3ca1fbba4ae962521e5eb66d72998b51f0f4d0f608d3c0347a48e1af262efa7"},
+ {file = "ruamel.yaml.clib-0.2.7-cp37-cp37m-win32.whl", hash = "sha256:7bdb4c06b063f6fd55e472e201317a3bb6cdeeee5d5a38512ea5c01e1acbdd93"},
+ {file = "ruamel.yaml.clib-0.2.7-cp37-cp37m-win_amd64.whl", hash = "sha256:be2a7ad8fd8f7442b24323d24ba0b56c51219513cfa45b9ada3b87b76c374d4b"},
+ {file = "ruamel.yaml.clib-0.2.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:91a789b4aa0097b78c93e3dc4b40040ba55bef518f84a40d4442f713b4094acb"},
+ {file = "ruamel.yaml.clib-0.2.7-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:99e77daab5d13a48a4054803d052ff40780278240a902b880dd37a51ba01a307"},
+ {file = "ruamel.yaml.clib-0.2.7-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:3243f48ecd450eddadc2d11b5feb08aca941b5cd98c9b1db14b2fd128be8c697"},
+ {file = "ruamel.yaml.clib-0.2.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:8831a2cedcd0f0927f788c5bdf6567d9dc9cc235646a434986a852af1cb54b4b"},
+ {file = "ruamel.yaml.clib-0.2.7-cp38-cp38-win32.whl", hash = "sha256:3110a99e0f94a4a3470ff67fc20d3f96c25b13d24c6980ff841e82bafe827cac"},
+ {file = "ruamel.yaml.clib-0.2.7-cp38-cp38-win_amd64.whl", hash = "sha256:92460ce908546ab69770b2e576e4f99fbb4ce6ab4b245345a3869a0a0410488f"},
+ {file = "ruamel.yaml.clib-0.2.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5bc0667c1eb8f83a3752b71b9c4ba55ef7c7058ae57022dd9b29065186a113d9"},
+ {file = "ruamel.yaml.clib-0.2.7-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:4a4d8d417868d68b979076a9be6a38c676eca060785abaa6709c7b31593c35d1"},
+ {file = "ruamel.yaml.clib-0.2.7-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:bf9a6bc4a0221538b1a7de3ed7bca4c93c02346853f44e1cd764be0023cd3640"},
+ {file = "ruamel.yaml.clib-0.2.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:a7b301ff08055d73223058b5c46c55638917f04d21577c95e00e0c4d79201a6b"},
+ {file = "ruamel.yaml.clib-0.2.7-cp39-cp39-win32.whl", hash = "sha256:d5e51e2901ec2366b79f16c2299a03e74ba4531ddcfacc1416639c557aef0ad8"},
+ {file = "ruamel.yaml.clib-0.2.7-cp39-cp39-win_amd64.whl", hash = "sha256:184faeaec61dbaa3cace407cffc5819f7b977e75360e8d5ca19461cd851a5fc5"},
+ {file = "ruamel.yaml.clib-0.2.7.tar.gz", hash = "sha256:1f08fd5a2bea9c4180db71678e850b995d2a5f4537be0e94557668cf0f5f9497"},
+]
+
+[[package]]
+name = "semgrep"
+version = "1.41.0"
+description = "Lightweight static analysis for many languages. Find bug variants with patterns that look like source code."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "semgrep-1.41.0-cp37.cp38.cp39.cp310.cp311.py37.py38.py39.py310.py311-none-any.whl", hash = "sha256:bb1679826cb14a9444e5cfb5dcb1cd9c20071baf9ae541eed200076721362ad6"},
+ {file = "semgrep-1.41.0-cp37.cp38.cp39.cp310.cp311.py37.py38.py39.py310.py311-none-macosx_10_14_x86_64.whl", hash = "sha256:fbcc2ea85e828023f2da51dcf93684d09b95b062716f585587d27485b0af5c37"},
+ {file = "semgrep-1.41.0-cp37.cp38.cp39.cp310.cp311.py37.py38.py39.py310.py311-none-macosx_11_0_arm64.whl", hash = "sha256:0ebb2f992618f145c49e5c8de56ed7aec57819b9736128c3690a95213fa0da79"},
+ {file = "semgrep-1.41.0-cp37.cp38.cp39.cp310.cp311.py37.py38.py39.py310.py311-none-musllinux_1_0_aarch64.manylinux2014_aarch64.whl", hash = "sha256:551dc331001674d742afd80aa3b7e8a0ee5ede28f3502ca915338d6548e161d6"},
+ {file = "semgrep-1.41.0.tar.gz", hash = "sha256:d7231b5a65a77da14af9da66d95044ee41137b03da1380f6c646b1e2b60ef81d"},
+]
+
+[package.dependencies]
+attrs = ">=21.3"
+boltons = ">=21.0,<22.0"
+click = ">=8.1,<9.0"
+click-option-group = ">=0.5,<1.0"
+colorama = ">=0.4.0,<0.5.0"
+defusedxml = ">=0.7.1,<0.8.0"
+glom = ">=22.1,<23.0"
+jsonschema = ">=4.6,<5.0"
+packaging = ">=21.0"
+peewee = ">=3.14,<4.0"
+python-lsp-jsonrpc = ">=1.0.0,<1.1.0"
+requests = ">=2.22,<3.0"
+rich = ">=12.6.0"
+"ruamel.yaml" = ">=0.16.0,<0.18"
+tomli = ">=2.0.1,<2.1.0"
+typing-extensions = ">=4.2,<5.0"
+urllib3 = ">=1.26,<2.0"
+wcmatch = ">=8.3,<9.0"
+
+[package.extras]
+experiments = ["jsonnet (>=0.18,<1.0)"]
+
[[package]]
name = "setuptools"
version = "68.2.2"
@@ -1324,22 +1692,105 @@ files = [
{file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"},
]
+[[package]]
+name = "ujson"
+version = "5.8.0"
+description = "Ultra fast JSON encoder and decoder for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "ujson-5.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f4511560d75b15ecb367eef561554959b9d49b6ec3b8d5634212f9fed74a6df1"},
+ {file = "ujson-5.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9399eaa5d1931a0ead49dce3ffacbea63f3177978588b956036bfe53cdf6af75"},
+ {file = "ujson-5.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4e7bb7eba0e1963f8b768f9c458ecb193e5bf6977090182e2b4f4408f35ac76"},
+ {file = "ujson-5.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40931d7c08c4ce99adc4b409ddb1bbb01635a950e81239c2382cfe24251b127a"},
+ {file = "ujson-5.8.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d53039d39de65360e924b511c7ca1a67b0975c34c015dd468fca492b11caa8f7"},
+ {file = "ujson-5.8.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:bdf04c6af3852161be9613e458a1fb67327910391de8ffedb8332e60800147a2"},
+ {file = "ujson-5.8.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a70f776bda2e5072a086c02792c7863ba5833d565189e09fabbd04c8b4c3abba"},
+ {file = "ujson-5.8.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f26629ac531d712f93192c233a74888bc8b8212558bd7d04c349125f10199fcf"},
+ {file = "ujson-5.8.0-cp310-cp310-win32.whl", hash = "sha256:7ecc33b107ae88405aebdb8d82c13d6944be2331ebb04399134c03171509371a"},
+ {file = "ujson-5.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:3b27a8da7a080add559a3b73ec9ebd52e82cc4419f7c6fb7266e62439a055ed0"},
+ {file = "ujson-5.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:193349a998cd821483a25f5df30b44e8f495423840ee11b3b28df092ddfd0f7f"},
+ {file = "ujson-5.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4ddeabbc78b2aed531f167d1e70387b151900bc856d61e9325fcdfefb2a51ad8"},
+ {file = "ujson-5.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ce24909a9c25062e60653073dd6d5e6ec9d6ad7ed6e0069450d5b673c854405"},
+ {file = "ujson-5.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27a2a3c7620ebe43641e926a1062bc04e92dbe90d3501687957d71b4bdddaec4"},
+ {file = "ujson-5.8.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b852bdf920fe9f84e2a2c210cc45f1b64f763b4f7d01468b33f7791698e455e"},
+ {file = "ujson-5.8.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:20768961a6a706170497129960762ded9c89fb1c10db2989c56956b162e2a8a3"},
+ {file = "ujson-5.8.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e0147d41e9fb5cd174207c4a2895c5e24813204499fd0839951d4c8784a23bf5"},
+ {file = "ujson-5.8.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e3673053b036fd161ae7a5a33358ccae6793ee89fd499000204676baafd7b3aa"},
+ {file = "ujson-5.8.0-cp311-cp311-win32.whl", hash = "sha256:a89cf3cd8bf33a37600431b7024a7ccf499db25f9f0b332947fbc79043aad879"},
+ {file = "ujson-5.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:3659deec9ab9eb19e8646932bfe6fe22730757c4addbe9d7d5544e879dc1b721"},
+ {file = "ujson-5.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:102bf31c56f59538cccdfec45649780ae00657e86247c07edac434cb14d5388c"},
+ {file = "ujson-5.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:299a312c3e85edee1178cb6453645217ba23b4e3186412677fa48e9a7f986de6"},
+ {file = "ujson-5.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2e385a7679b9088d7bc43a64811a7713cc7c33d032d020f757c54e7d41931ae"},
+ {file = "ujson-5.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad24ec130855d4430a682c7a60ca0bc158f8253ec81feed4073801f6b6cb681b"},
+ {file = "ujson-5.8.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:16fde596d5e45bdf0d7de615346a102510ac8c405098e5595625015b0d4b5296"},
+ {file = "ujson-5.8.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:6d230d870d1ce03df915e694dcfa3f4e8714369cce2346686dbe0bc8e3f135e7"},
+ {file = "ujson-5.8.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:9571de0c53db5cbc265945e08f093f093af2c5a11e14772c72d8e37fceeedd08"},
+ {file = "ujson-5.8.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7cba16b26efe774c096a5e822e4f27097b7c81ed6fb5264a2b3f5fd8784bab30"},
+ {file = "ujson-5.8.0-cp312-cp312-win32.whl", hash = "sha256:48c7d373ff22366eecfa36a52b9b55b0ee5bd44c2b50e16084aa88b9de038916"},
+ {file = "ujson-5.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:5ac97b1e182d81cf395ded620528c59f4177eee024b4b39a50cdd7b720fdeec6"},
+ {file = "ujson-5.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2a64cc32bb4a436e5813b83f5aab0889927e5ea1788bf99b930fad853c5625cb"},
+ {file = "ujson-5.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e54578fa8838ddc722539a752adfce9372474114f8c127bb316db5392d942f8b"},
+ {file = "ujson-5.8.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9721cd112b5e4687cb4ade12a7b8af8b048d4991227ae8066d9c4b3a6642a582"},
+ {file = "ujson-5.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d9707e5aacf63fb919f6237d6490c4e0244c7f8d3dc2a0f84d7dec5db7cb54c"},
+ {file = "ujson-5.8.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0be81bae295f65a6896b0c9030b55a106fb2dec69ef877253a87bc7c9c5308f7"},
+ {file = "ujson-5.8.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ae7f4725c344bf437e9b881019c558416fe84ad9c6b67426416c131ad577df67"},
+ {file = "ujson-5.8.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:9ab282d67ef3097105552bf151438b551cc4bedb3f24d80fada830f2e132aeb9"},
+ {file = "ujson-5.8.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:94c7bd9880fa33fcf7f6d7f4cc032e2371adee3c5dba2922b918987141d1bf07"},
+ {file = "ujson-5.8.0-cp38-cp38-win32.whl", hash = "sha256:bf5737dbcfe0fa0ac8fa599eceafae86b376492c8f1e4b84e3adf765f03fb564"},
+ {file = "ujson-5.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:11da6bed916f9bfacf13f4fc6a9594abd62b2bb115acfb17a77b0f03bee4cfd5"},
+ {file = "ujson-5.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:69b3104a2603bab510497ceabc186ba40fef38ec731c0ccaa662e01ff94a985c"},
+ {file = "ujson-5.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9249fdefeb021e00b46025e77feed89cd91ffe9b3a49415239103fc1d5d9c29a"},
+ {file = "ujson-5.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2873d196725a8193f56dde527b322c4bc79ed97cd60f1d087826ac3290cf9207"},
+ {file = "ujson-5.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a4dafa9010c366589f55afb0fd67084acd8added1a51251008f9ff2c3e44042"},
+ {file = "ujson-5.8.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a42baa647a50fa8bed53d4e242be61023bd37b93577f27f90ffe521ac9dc7a3"},
+ {file = "ujson-5.8.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f3554eaadffe416c6f543af442066afa6549edbc34fe6a7719818c3e72ebfe95"},
+ {file = "ujson-5.8.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:fb87decf38cc82bcdea1d7511e73629e651bdec3a43ab40985167ab8449b769c"},
+ {file = "ujson-5.8.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:407d60eb942c318482bbfb1e66be093308bb11617d41c613e33b4ce5be789adc"},
+ {file = "ujson-5.8.0-cp39-cp39-win32.whl", hash = "sha256:0fe1b7edaf560ca6ab023f81cbeaf9946a240876a993b8c5a21a1c539171d903"},
+ {file = "ujson-5.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:3f9b63530a5392eb687baff3989d0fb5f45194ae5b1ca8276282fb647f8dcdb3"},
+ {file = "ujson-5.8.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:efeddf950fb15a832376c0c01d8d7713479fbeceaed1eaecb2665aa62c305aec"},
+ {file = "ujson-5.8.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d8283ac5d03e65f488530c43d6610134309085b71db4f675e9cf5dff96a8282"},
+ {file = "ujson-5.8.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb0142f6f10f57598655340a3b2c70ed4646cbe674191da195eb0985a9813b83"},
+ {file = "ujson-5.8.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07d459aca895eb17eb463b00441986b021b9312c6c8cc1d06880925c7f51009c"},
+ {file = "ujson-5.8.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d524a8c15cfc863705991d70bbec998456a42c405c291d0f84a74ad7f35c5109"},
+ {file = "ujson-5.8.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d6f84a7a175c75beecde53a624881ff618e9433045a69fcfb5e154b73cdaa377"},
+ {file = "ujson-5.8.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b748797131ac7b29826d1524db1cc366d2722ab7afacc2ce1287cdafccddbf1f"},
+ {file = "ujson-5.8.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e72ba76313d48a1a3a42e7dc9d1db32ea93fac782ad8dde6f8b13e35c229130"},
+ {file = "ujson-5.8.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f504117a39cb98abba4153bf0b46b4954cc5d62f6351a14660201500ba31fe7f"},
+ {file = "ujson-5.8.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a8c91b6f4bf23f274af9002b128d133b735141e867109487d17e344d38b87d94"},
+ {file = "ujson-5.8.0.tar.gz", hash = "sha256:78e318def4ade898a461b3d92a79f9441e7e0e4d2ad5419abed4336d702c7425"},
+]
+
[[package]]
name = "urllib3"
-version = "2.0.5"
+version = "1.26.16"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
-python-versions = ">=3.7"
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
files = [
- {file = "urllib3-2.0.5-py3-none-any.whl", hash = "sha256:ef16afa8ba34a1f989db38e1dbbe0c302e4289a47856990d0682e374563ce35e"},
- {file = "urllib3-2.0.5.tar.gz", hash = "sha256:13abf37382ea2ce6fb744d4dad67838eec857c9f4f57009891805e0b5e123594"},
+ {file = "urllib3-1.26.16-py2.py3-none-any.whl", hash = "sha256:8d36afa7616d8ab714608411b4a3b13e58f463aee519024578e062e141dce20f"},
+ {file = "urllib3-1.26.16.tar.gz", hash = "sha256:8f135f6502756bde6b2a9b28989df5fbe87c9970cecaa69041edcce7f0589b14"},
]
[package.extras]
-brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
-secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"]
-socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
-zstd = ["zstandard (>=0.18.0)"]
+brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"]
+secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
+socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
+
+[[package]]
+name = "wcmatch"
+version = "8.5"
+description = "Wildcard/glob file name matcher."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "wcmatch-8.5-py3-none-any.whl", hash = "sha256:14554e409b142edeefab901dc68ad570b30a72a8ab9a79106c5d5e9a6d241bd5"},
+ {file = "wcmatch-8.5.tar.gz", hash = "sha256:86c17572d0f75cbf3bcb1a18f3bf2f9e72b39a9c08c9b4a74e991e1882a8efb3"},
+]
+
+[package.dependencies]
+bracex = ">=2.1.1"
[[package]]
name = "wcwidth"
@@ -1467,4 +1918,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
[metadata]
lock-version = "2.0"
python-versions = "^3.10"
-content-hash = "f02c8f01a4cb4b735db54d90c470d933b2ce99b1932107ae5284f47443aafef6"
+content-hash = "87377d33bbe74e1ebc114878798d463d645015784475b584fd1504931400e0de"
diff --git a/pyproject.toml b/pyproject.toml
index 656c97e602..f27930ed78 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -27,6 +27,7 @@ wget = "^3.2"
huggingface-hub = "^0.16.4"
litellm = "^0.1.590"
pyyaml = "^6.0.1"
+semgrep = "^1.41.0"
[tool.poetry.dependencies.pyreadline3]
version = "^3.4.1"
markers = "sys_platform == 'win32'"
From ffe9b6fa5c1eda8e375df527f635bb0845337ed3 Mon Sep 17 00:00:00 2001
From: Eric allen
Date: Sat, 23 Sep 2023 07:40:43 -0400
Subject: [PATCH 24/49] fix: default to 'off' for scan_code attribute
---
interpreter/core/core.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/interpreter/core/core.py b/interpreter/core/core.py
index 4a90360459..56610c23d3 100644
--- a/interpreter/core/core.py
+++ b/interpreter/core/core.py
@@ -30,7 +30,7 @@ def __init__(self):
self.auto_run = False
self.debug_mode = False
self.max_output = 2000
- self.scan_code = False
+ self.scan_code = "off"
# Conversation history
self.conversation_history = True
From 7379727d3cd31b4b9f52efe72254bbbfecde3cfd Mon Sep 17 00:00:00 2001
From: Eric allen
Date: Sat, 23 Sep 2023 10:33:43 -0400
Subject: [PATCH 25/49] fix: toggle code_scan based on auto_run setting; update
--scan docs
---
README.md | 4 +++-
interpreter/cli/cli.py | 4 ++++
2 files changed, 7 insertions(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 60ee9f43fe..50be1caa7d 100644
--- a/README.md
+++ b/README.md
@@ -277,10 +277,12 @@ You can run `interpreter -y` or set `interpreter.auto_run = True` to bypass this
Open Intepreter provides a way to enable code scanning via [semgrep](https://semgrep.dev/) before executing code blocks by using the `--scan` flag.
- `--scan` allows you to control the behavior Open Interpreter uses to scan code with semgrep
- - `auto` scans all code automatically before executing it
+ - `auto` scans all code automatically before asking if you want to execute it
- `ask` asks you if you want to scan a code block before asking if you want to execute it
- `off` does not scan code before asking if you want to execute it
+**⚠️ Enabling `auto_run` disables code scanning.**
+
## How Does it Work?
Open Interpreter equips a [function-calling language model](https://platform.openai.com/docs/guides/gpt/function-calling) with an `exec()` function, which accepts a `language` (like "Python" or "JavaScript") and `code` to run.
diff --git a/interpreter/cli/cli.py b/interpreter/cli/cli.py
index fd42329e0a..9cd94b4d8d 100644
--- a/interpreter/cli/cli.py
+++ b/interpreter/cli/cli.py
@@ -140,6 +140,10 @@ def cli(interpreter):
if attr_value is not None and hasattr(interpreter, attr_name):
setattr(interpreter, attr_name, attr_value)
+ # if auto_run is enabled, we won't bother scanning code
+ if interpreter.auto_run and not interpreter.scan_code == "off":
+ setattr(interpreter, "scan_code", "off")
+
# Default to CodeLlama if --local is on but --model is unset
if interpreter.local and args.model is None:
# This will cause the terminal_interface to walk the user through setting up a local LLM
From 286945717c5ec17d210eeeeea74e587268368e01 Mon Sep 17 00:00:00 2001
From: Eric allen
Date: Sun, 24 Sep 2023 22:28:49 -0400
Subject: [PATCH 26/49] revert: undo default and choices change to cli.py
This is being removed from this PR in favor of a standalone fix in #511
---
interpreter/cli/cli.py | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/interpreter/cli/cli.py b/interpreter/cli/cli.py
index 9cd94b4d8d..e819a7583b 100644
--- a/interpreter/cli/cli.py
+++ b/interpreter/cli/cli.py
@@ -93,10 +93,7 @@ def cli(interpreter):
if arg["type"] == bool:
parser.add_argument(f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], action='store_true', default=None)
else:
- choices = arg["choices"] if "choices" in arg else None
- default = arg["default"] if "default" in arg else None
-
- parser.add_argument(f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=arg["type"], choices=choices, default=default)
+ parser.add_argument(f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=arg["type"])
# Add special arguments
parser.add_argument('--config', dest='config', action='store_true', help='open config.yaml file in text editor')
From 3c89b82df6ebccaf9e95e3d5e9c51740365fd334 Mon Sep 17 00:00:00 2001
From: Eric allen
Date: Tue, 26 Sep 2023 11:30:50 -0400
Subject: [PATCH 27/49] feat: cleanup code scanning and convert to safe mode
---
README.md | 27 ++++++++---
interpreter/cli/cli.py | 12 ++---
.../languages/applescript.py | 1 +
.../code_interpreters/languages/html.py | 1 +
.../code_interpreters/languages/javascript.py | 1 +
.../code_interpreters/languages/python.py | 1 +
interpreter/code_interpreters/languages/r.py | 1 +
.../code_interpreters/languages/shell.py | 1 +
interpreter/core/core.py | 2 +-
.../terminal_interface/terminal_interface.py | 18 +++++--
interpreter/utils/scan_code.py | 48 ++++++++++++-------
interpreter/utils/temporary_file.py | 47 ++++++++++++++++++
12 files changed, 125 insertions(+), 35 deletions(-)
create mode 100644 interpreter/utils/temporary_file.py
diff --git a/README.md b/README.md
index 50be1caa7d..432e22fd9d 100644
--- a/README.md
+++ b/README.md
@@ -272,16 +272,29 @@ You can run `interpreter -y` or set `interpreter.auto_run = True` to bypass this
- Watch Open Interpreter like a self-driving car, and be prepared to end the process by closing your terminal.
- Consider running Open Interpreter in a restricted environment like Google Colab or Replit. These environments are more isolated, reducing the risks associated with executing arbitrary code.
-### Code Scanning
+### Safe Mode
-Open Intepreter provides a way to enable code scanning via [semgrep](https://semgrep.dev/) before executing code blocks by using the `--scan` flag.
+Open Intepreter provides a way to enable some safety measures, like disabling `auto_run` and scanning generated code with [semgrep](https://semgrep.dev/) before executing it by using the `-safety` CLI flag or the `safe_mode` option in your `config.yml`.
-- `--scan` allows you to control the behavior Open Interpreter uses to scan code with semgrep
- - `auto` scans all code automatically before asking if you want to execute it
- - `ask` asks you if you want to scan a code block before asking if you want to execute it
- - `off` does not scan code before asking if you want to execute it
+**⚠️ Enabling `safe_mode` disables `auto_run`.**
-**⚠️ Enabling `auto_run` disables code scanning.**
+It currently has three options:
+
+- `auto`: automatically applies safety features before asking if you want to execute a code block
+- `ask`: asks you if you want to apply safety features to a code block before asking if you want to execute it
+- `off`: does not apply safety features to any code block before asking if you want to execute it
+
+#### Safety Toolkit
+
+Open Interpreter currently includes the following features in Safe Mode.
+
+##### Code Scanning with Semgrep
+
+Open Interpreter allows you to scan generated code blocks for vulnerabilities using [semgrep](https://semgrep.dev/), which is available for free, without an account, and runs locally on your machine.
+
+If you have a Semgrep account, you can also login via the [Semgrep CLI](https://github.com/returntocorp/semgrep) and enable advanced features.
+
+Code scanning current runs with `--config auto`
## How Does it Work?
diff --git a/interpreter/cli/cli.py b/interpreter/cli/cli.py
index e819a7583b..1da4bde8d9 100644
--- a/interpreter/cli/cli.py
+++ b/interpreter/cli/cli.py
@@ -75,9 +75,9 @@
"type": str
},
{
- "name": "scan_code",
- "nickname": "scan",
- "help_text": "optionally scan code with semgrep",
+ "name": "safe_mode",
+ "nickname": "safe",
+ "help_text": "optionally enable safety mechanisms like code scanning; valid options are off, ask, and auto",
"type": str,
"default": "off",
"choices": ["off", "ask", "auto"]
@@ -137,9 +137,9 @@ def cli(interpreter):
if attr_value is not None and hasattr(interpreter, attr_name):
setattr(interpreter, attr_name, attr_value)
- # if auto_run is enabled, we won't bother scanning code
- if interpreter.auto_run and not interpreter.scan_code == "off":
- setattr(interpreter, "scan_code", "off")
+ # if safe_mode and auto_run are enabled, safe_mode disables auto_run
+ if interpreter.auto_run and not interpreter.safe_mode == "off":
+ setattr(interpreter, "auto_run", False)
# Default to CodeLlama if --local is on but --model is unset
if interpreter.local and args.model is None:
diff --git a/interpreter/code_interpreters/languages/applescript.py b/interpreter/code_interpreters/languages/applescript.py
index 58103a8f70..403c3e0fc3 100644
--- a/interpreter/code_interpreters/languages/applescript.py
+++ b/interpreter/code_interpreters/languages/applescript.py
@@ -3,6 +3,7 @@
class AppleScript(SubprocessCodeInterpreter):
file_extension = "applescript"
+ proper_name = "AppleScript"
def __init__(self):
super().__init__()
diff --git a/interpreter/code_interpreters/languages/html.py b/interpreter/code_interpreters/languages/html.py
index ff139aac66..965b38717b 100644
--- a/interpreter/code_interpreters/languages/html.py
+++ b/interpreter/code_interpreters/languages/html.py
@@ -5,6 +5,7 @@
class HTML(BaseCodeInterpreter):
file_extension = "html"
+ proper_name = "HTML"
def __init__(self):
super().__init__()
diff --git a/interpreter/code_interpreters/languages/javascript.py b/interpreter/code_interpreters/languages/javascript.py
index ad008e2664..d5e74ff824 100644
--- a/interpreter/code_interpreters/languages/javascript.py
+++ b/interpreter/code_interpreters/languages/javascript.py
@@ -3,6 +3,7 @@
class JavaScript(SubprocessCodeInterpreter):
file_extension = "js"
+ proper_name = "JavaScript"
def __init__(self):
super().__init__()
diff --git a/interpreter/code_interpreters/languages/python.py b/interpreter/code_interpreters/languages/python.py
index f36e0935b2..03d84781b5 100644
--- a/interpreter/code_interpreters/languages/python.py
+++ b/interpreter/code_interpreters/languages/python.py
@@ -5,6 +5,7 @@
class Python(SubprocessCodeInterpreter):
file_extension = "py"
+ proper_name = "Python"
def __init__(self):
super().__init__()
diff --git a/interpreter/code_interpreters/languages/r.py b/interpreter/code_interpreters/languages/r.py
index c769ae239e..16f51f93cf 100644
--- a/interpreter/code_interpreters/languages/r.py
+++ b/interpreter/code_interpreters/languages/r.py
@@ -3,6 +3,7 @@
class R(SubprocessCodeInterpreter):
file_extension = "r"
+ proper_name = "R"
def __init__(self):
super().__init__()
diff --git a/interpreter/code_interpreters/languages/shell.py b/interpreter/code_interpreters/languages/shell.py
index dbd1e9bf94..03ac6a13b5 100644
--- a/interpreter/code_interpreters/languages/shell.py
+++ b/interpreter/code_interpreters/languages/shell.py
@@ -5,6 +5,7 @@
class Shell(SubprocessCodeInterpreter):
file_extension = "sh"
+ proper_name = "Shell"
def __init__(self):
super().__init__()
diff --git a/interpreter/core/core.py b/interpreter/core/core.py
index 56610c23d3..cb604a5601 100644
--- a/interpreter/core/core.py
+++ b/interpreter/core/core.py
@@ -30,7 +30,7 @@ def __init__(self):
self.auto_run = False
self.debug_mode = False
self.max_output = 2000
- self.scan_code = "off"
+ self.safe_mode = "off"
# Conversation history
self.conversation_history = True
diff --git a/interpreter/terminal_interface/terminal_interface.py b/interpreter/terminal_interface/terminal_interface.py
index b2123e9505..6707196059 100644
--- a/interpreter/terminal_interface/terminal_interface.py
+++ b/interpreter/terminal_interface/terminal_interface.py
@@ -12,10 +12,20 @@
def terminal_interface(interpreter, message):
if not interpreter.auto_run:
- display_markdown_message("""**Open Interpreter** will require approval before running code. Use `interpreter -y` to bypass this.
+ interpreter_intro_message = [
+ "**Open Interpreter** will require approval before running code."
+ ]
- Press `CTRL-C` to exit.
- """)
+ if interpreter.safe_mode != "off":
+ interpreter_intro_message.append(f"**Safe Mode**: {interpreter.safe_mode}")
+ else:
+ interpreter_intro_message.append(
+ "Use `interpreter -y` or set `auto_run: true` to bypass this."
+ )
+
+ interpreter_intro_message.append("Press `CTRL-C` to exit.")
+
+ display_markdown_message("\n\n".join(interpreter_intro_message))
active_block = None
@@ -94,7 +104,7 @@ def terminal_interface(interpreter, message):
if interpreter.scan_code == "auto":
should_scan_code = True
elif interpreter.scan_code == 'ask':
- response = input(" Would you like to scan this code? (y/n)\n\n ")
+ response = input(" Would you like to scan this code? (y/n)\n\n ")
print("") # <- Aesthetic choice
if response.strip().lower() == "y":
diff --git a/interpreter/utils/scan_code.py b/interpreter/utils/scan_code.py
index 4fb36404f7..ba43f1f62a 100644
--- a/interpreter/utils/scan_code.py
+++ b/interpreter/utils/scan_code.py
@@ -2,10 +2,11 @@
import tempfile
import subprocess
+from .temporary_file import create_temporary_file, cleanup_temporary_file
from ..code_interpreters.language_map import language_map
-def get_extension(language_name):
+def get_language_file_extension(language_name):
"""
Get the file extension for a given language
"""
@@ -17,24 +18,31 @@ def get_extension(language_name):
return language
-def scan_code(code, language, self):
+def get_language_proper_name(language_name):
+ """
+ Get the proper name for a given language
+ """
+ language = language_map[language_name.lower()]
+
+ if language.proper_name:
+ return language.proper_name
+ else:
+ return language
+
+
+def scan_code(code, language, interpreter):
"""
Scan code with semgrep
"""
- # Create a temporary file
- with tempfile.NamedTemporaryFile(
- mode="w", delete=False, suffix=f".{get_extension(language)}"
- ) as f:
- f.write(code)
- temp_file_name = f.name
- f.close()
+ temp_file = create_temporary_file(
+ code, get_language_file_extension(language), verbose=interpreter.debug_mode
+ )
- temp_path = os.path.dirname(temp_file_name)
- file_name = os.path.basename(temp_file_name)
+ temp_path = os.path.dirname(temp_file)
+ file_name = os.path.basename(temp_file)
- if self.debug_mode:
- print(f"Created temporary file {temp_file_name}")
+ if interpreter.debug_mode:
print(f"Scanning {language} code in {file_name}")
print("---")
@@ -45,11 +53,18 @@ def scan_code(code, language, self):
# pinned to an old semgrep version that has issues with reading the semgrep registry
# while scanning a single file like the temporary one we generate
# if guarddog solves [#249](https://github.com/DataDog/guarddog/issues/249) we can change this approach a bit
- subprocess.run(
- f"cd {temp_path} && semgrep scan --config auto --dryrun {file_name}",
+ scan = subprocess.run(
+ f"cd {temp_path} && semgrep scan --config auto --quiet --error {file_name}",
shell=True,
)
+ if scan.returncode == 0:
+ language_name = get_language_proper_name(language)
+ print(
+ f" {'Code Scaner: ' if interpreter.safe_mode == 'auto' else ''}No issues were found in this {language_name} code."
+ )
+ print("")
+
# TODO: it would be great if we could capture any vulnerabilities identified by semgrep
# and add them to the conversation history
@@ -58,5 +73,4 @@ def scan_code(code, language, self):
print(e)
print("") # <- Aesthetic choice
- # clean up temporary file
- os.remove(temp_file_name)
+ cleanup_temporary_file(temp_file)
diff --git a/interpreter/utils/temporary_file.py b/interpreter/utils/temporary_file.py
new file mode 100644
index 0000000000..c72bf5f30a
--- /dev/null
+++ b/interpreter/utils/temporary_file.py
@@ -0,0 +1,47 @@
+import os
+import tempfile
+
+
+def cleanup_temporary_file(temp_file_name, verbose=False):
+ """
+ clean up temporary file
+ """
+
+ try:
+ # clean up temporary file
+ os.remove(temp_file_name)
+
+ if verbose:
+ print(f"Cleaning up temporary file {temp_file_name}")
+ print("---")
+
+ except Exception as e:
+ print(f"Could not clean up temporary file.")
+ print(e)
+ print("")
+
+
+def create_temporary_file(contents, extension=None, verbose=False):
+ """
+ create a temporary file with the given contents
+ """
+
+ try:
+ # Create a temporary file
+ with tempfile.NamedTemporaryFile(
+ mode="w", delete=False, suffix=f".{extension}" if extension else ""
+ ) as f:
+ f.write(contents)
+ temp_file_name = f.name
+ f.close()
+
+ if verbose:
+ print(f"Created temporary file {temp_file_name}")
+ print("---")
+
+ return temp_file_name
+
+ except Exception as e:
+ print(f"Could not create temporary file.")
+ print(e)
+ print("")
From ba930b8b7ef06fe6be62d7ef1b31c99c9542e338 Mon Sep 17 00:00:00 2001
From: Eric allen
Date: Tue, 26 Sep 2023 12:11:32 -0400
Subject: [PATCH 28/49] docs: fix naming of safe_mode flag in README
---
README.md | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/README.md b/README.md
index 432e22fd9d..ca3ba69731 100644
--- a/README.md
+++ b/README.md
@@ -274,15 +274,15 @@ You can run `interpreter -y` or set `interpreter.auto_run = True` to bypass this
### Safe Mode
-Open Intepreter provides a way to enable some safety measures, like disabling `auto_run` and scanning generated code with [semgrep](https://semgrep.dev/) before executing it by using the `-safety` CLI flag or the `safe_mode` option in your `config.yml`.
+Open Intepreter provides a way to enable some safety measures, like disabling `auto_run` and scanning generated code with [semgrep](https://semgrep.dev/) before executing it by using the `-safe` CLI flag or the `safe_mode` option in your `config.yml`.
**⚠️ Enabling `safe_mode` disables `auto_run`.**
-It currently has three options:
+Safe Mode currently has three options:
-- `auto`: automatically applies safety features before asking if you want to execute a code block
+- `off`: (_default_) does not apply safety features to any code block before asking if you want to execute it
- `ask`: asks you if you want to apply safety features to a code block before asking if you want to execute it
-- `off`: does not apply safety features to any code block before asking if you want to execute it
+- `auto`: automatically applies safety features before asking if you want to execute a code block
#### Safety Toolkit
From efce2858ba110e5ddfbac2b64dd60315dd2705cb Mon Sep 17 00:00:00 2001
From: Eric allen
Date: Tue, 26 Sep 2023 13:33:18 -0400
Subject: [PATCH 29/49] fix: pass debug_mode flag into file cleanup for code
scan
---
interpreter/utils/scan_code.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/interpreter/utils/scan_code.py b/interpreter/utils/scan_code.py
index ba43f1f62a..c2f8cbee55 100644
--- a/interpreter/utils/scan_code.py
+++ b/interpreter/utils/scan_code.py
@@ -73,4 +73,4 @@ def scan_code(code, language, interpreter):
print(e)
print("") # <- Aesthetic choice
- cleanup_temporary_file(temp_file)
+ cleanup_temporary_file(temp_file, verbose=interpreter.debug_mode)
From 2653909d2d257f02a2dc02997a543b1d8c8ad14a Mon Sep 17 00:00:00 2001
From: Eric allen
Date: Wed, 27 Sep 2023 00:10:54 -0400
Subject: [PATCH 30/49] fix: remove extra tempfile import from scan_code util
---
interpreter/utils/scan_code.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/interpreter/utils/scan_code.py b/interpreter/utils/scan_code.py
index c2f8cbee55..441892da25 100644
--- a/interpreter/utils/scan_code.py
+++ b/interpreter/utils/scan_code.py
@@ -1,5 +1,4 @@
import os
-import tempfile
import subprocess
from .temporary_file import create_temporary_file, cleanup_temporary_file
From 0b73235504c5cd907ce85d2669a0a556302f2d0a Mon Sep 17 00:00:00 2001
From: Eric allen
Date: Wed, 27 Sep 2023 00:40:33 -0400
Subject: [PATCH 31/49] feat: add loading indicator for feedback
Also update scan_code to safe_mode in conditional
---
.../terminal_interface/terminal_interface.py | 7 +++--
interpreter/utils/scan_code.py | 11 ++++---
poetry.lock | 30 ++++++++++++++++++-
pyproject.toml | 1 +
4 files changed, 41 insertions(+), 8 deletions(-)
diff --git a/interpreter/terminal_interface/terminal_interface.py b/interpreter/terminal_interface/terminal_interface.py
index 6707196059..b6da2e3dd2 100644
--- a/interpreter/terminal_interface/terminal_interface.py
+++ b/interpreter/terminal_interface/terminal_interface.py
@@ -10,6 +10,7 @@
from ..utils.truncate_output import truncate_output
from ..utils.scan_code import scan_code
+
def terminal_interface(interpreter, message):
if not interpreter.auto_run:
interpreter_intro_message = [
@@ -100,10 +101,10 @@ def terminal_interface(interpreter, message):
should_scan_code = False
- if not interpreter.scan_code == "off":
- if interpreter.scan_code == "auto":
+ if not interpreter.safe_mode == "off":
+ if interpreter.safe_mode == "auto":
should_scan_code = True
- elif interpreter.scan_code == 'ask':
+ elif interpreter.safe_mode == 'ask':
response = input(" Would you like to scan this code? (y/n)\n\n ")
print("") # <- Aesthetic choice
diff --git a/interpreter/utils/scan_code.py b/interpreter/utils/scan_code.py
index 441892da25..fa5db98431 100644
--- a/interpreter/utils/scan_code.py
+++ b/interpreter/utils/scan_code.py
@@ -1,5 +1,7 @@
import os
import subprocess
+from yaspin import yaspin
+from yaspin.spinners import Spinners
from .temporary_file import create_temporary_file, cleanup_temporary_file
from ..code_interpreters.language_map import language_map
@@ -52,10 +54,11 @@ def scan_code(code, language, interpreter):
# pinned to an old semgrep version that has issues with reading the semgrep registry
# while scanning a single file like the temporary one we generate
# if guarddog solves [#249](https://github.com/DataDog/guarddog/issues/249) we can change this approach a bit
- scan = subprocess.run(
- f"cd {temp_path} && semgrep scan --config auto --quiet --error {file_name}",
- shell=True,
- )
+ with yaspin(text=" Scanning code...").green.right.binary as loading:
+ scan = subprocess.run(
+ f"cd {temp_path} && semgrep scan --config auto --quiet --error {file_name}",
+ shell=True,
+ )
if scan.returncode == 0:
language_name = get_language_proper_name(language)
diff --git a/poetry.lock b/poetry.lock
index 46d6e7b060..e6fb980ed0 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1476,6 +1476,20 @@ files = [
{file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"},
]
+[[package]]
+name = "termcolor"
+version = "2.3.0"
+description = "ANSI color formatting for output in terminal"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "termcolor-2.3.0-py3-none-any.whl", hash = "sha256:3afb05607b89aed0ffe25202399ee0867ad4d3cb4180d98aaf8eefa6a5f7d475"},
+ {file = "termcolor-2.3.0.tar.gz", hash = "sha256:b5b08f68937f138fe92f6c089b99f1e2da0ae56c52b78bf7075fd95420fd9a5a"},
+]
+
+[package.extras]
+tests = ["pytest", "pytest-cov"]
+
[[package]]
name = "tiktoken"
version = "0.4.0"
@@ -1900,6 +1914,20 @@ files = [
idna = ">=2.0"
multidict = ">=4.0"
+[[package]]
+name = "yaspin"
+version = "3.0.1"
+description = "Yet Another Terminal Spinner"
+optional = false
+python-versions = ">=3.9,<4.0"
+files = [
+ {file = "yaspin-3.0.1-py3-none-any.whl", hash = "sha256:c4b5d2ca23ae664b87a5cd53401c5107cef12668a71d9ee5ea5536045f364121"},
+ {file = "yaspin-3.0.1.tar.gz", hash = "sha256:9c04aa69cce9be83e1ea3134a6712e749e6c0c9cd02599023713e6befd7bf369"},
+]
+
+[package.dependencies]
+termcolor = ">=2.3,<3.0"
+
[[package]]
name = "zipp"
version = "3.17.0"
@@ -1918,4 +1946,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
[metadata]
lock-version = "2.0"
python-versions = "^3.10"
-content-hash = "87377d33bbe74e1ebc114878798d463d645015784475b584fd1504931400e0de"
+content-hash = "8fc08ba2916be2552a008bbc3415f561de7d484a34816d7ac732095064506265"
diff --git a/pyproject.toml b/pyproject.toml
index f27930ed78..c874e7192a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -28,6 +28,7 @@ huggingface-hub = "^0.16.4"
litellm = "^0.1.590"
pyyaml = "^6.0.1"
semgrep = "^1.41.0"
+yaspin = "^3.0.1"
[tool.poetry.dependencies.pyreadline3]
version = "^3.4.1"
markers = "sys_platform == 'win32'"
From 2f84517e3d13b84581562ec6e91e24740a8857ed Mon Sep 17 00:00:00 2001
From: goalkeepr
Date: Thu, 28 Sep 2023 14:18:51 -0400
Subject: [PATCH 32/49] Skip wrap_in_trap on Windows
---
interpreter/code_interpreters/languages/shell.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/interpreter/code_interpreters/languages/shell.py b/interpreter/code_interpreters/languages/shell.py
index 5d77eacf6a..a2ce31ae61 100644
--- a/interpreter/code_interpreters/languages/shell.py
+++ b/interpreter/code_interpreters/languages/shell.py
@@ -39,7 +39,8 @@ def preprocess_shell(code):
code = add_active_line_prints(code)
# Wrap in a trap for errors
- code = wrap_in_trap(code)
+ if platform.system() != 'Windows':
+ code = wrap_in_trap(code)
# Add end command (we'll be listening for this so we know when it ends)
code += '\necho "## end_of_execution ##"'
From 0ad4627c160f1a8ed419b76d38dd8edc7b558667 Mon Sep 17 00:00:00 2001
From: Eric allen
Date: Thu, 28 Sep 2023 17:58:00 -0400
Subject: [PATCH 33/49] fix: stop overwriting safe_mode config.yaml setting
with default in args
---
interpreter/cli/cli.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/interpreter/cli/cli.py b/interpreter/cli/cli.py
index 40e566a064..721757421c 100644
--- a/interpreter/cli/cli.py
+++ b/interpreter/cli/cli.py
@@ -79,7 +79,6 @@
"nickname": "safe",
"help_text": "optionally enable safety mechanisms like code scanning; valid options are off, ask, and auto",
"type": str,
- "default": "off",
"choices": ["off", "ask", "auto"]
}
]
From 3e9502f3094cbad4c28bc7790ed5b5777a95c6c9 Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Thu, 28 Sep 2023 16:37:56 -0700
Subject: [PATCH 34/49] Holding `--safe` docs for pip release
---
README.md | 24 ------------------------
1 file changed, 24 deletions(-)
diff --git a/README.md b/README.md
index ca3ba69731..1c6dca0cb0 100644
--- a/README.md
+++ b/README.md
@@ -272,30 +272,6 @@ You can run `interpreter -y` or set `interpreter.auto_run = True` to bypass this
- Watch Open Interpreter like a self-driving car, and be prepared to end the process by closing your terminal.
- Consider running Open Interpreter in a restricted environment like Google Colab or Replit. These environments are more isolated, reducing the risks associated with executing arbitrary code.
-### Safe Mode
-
-Open Intepreter provides a way to enable some safety measures, like disabling `auto_run` and scanning generated code with [semgrep](https://semgrep.dev/) before executing it by using the `-safe` CLI flag or the `safe_mode` option in your `config.yml`.
-
-**⚠️ Enabling `safe_mode` disables `auto_run`.**
-
-Safe Mode currently has three options:
-
-- `off`: (_default_) does not apply safety features to any code block before asking if you want to execute it
-- `ask`: asks you if you want to apply safety features to a code block before asking if you want to execute it
-- `auto`: automatically applies safety features before asking if you want to execute a code block
-
-#### Safety Toolkit
-
-Open Interpreter currently includes the following features in Safe Mode.
-
-##### Code Scanning with Semgrep
-
-Open Interpreter allows you to scan generated code blocks for vulnerabilities using [semgrep](https://semgrep.dev/), which is available for free, without an account, and runs locally on your machine.
-
-If you have a Semgrep account, you can also login via the [Semgrep CLI](https://github.com/returntocorp/semgrep) and enable advanced features.
-
-Code scanning current runs with `--config auto`
-
## How Does it Work?
Open Interpreter equips a [function-calling language model](https://platform.openai.com/docs/guides/gpt/function-calling) with an `exec()` function, which accepts a `language` (like "Python" or "JavaScript") and `code` to run.
From ecf07c1d6d6beeb711381887236b97821fc92175 Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Thu, 28 Sep 2023 18:48:34 -0700
Subject: [PATCH 35/49] Fixed `%load` magic command
But I think we should deprecate it in favor of `--conversations`.
---
interpreter/terminal_interface/magic_commands.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/interpreter/terminal_interface/magic_commands.py b/interpreter/terminal_interface/magic_commands.py
index 8894991ec1..48780a9a92 100644
--- a/interpreter/terminal_interface/magic_commands.py
+++ b/interpreter/terminal_interface/magic_commands.py
@@ -96,7 +96,7 @@ def handle_load_message(self, json_path):
if not json_path.endswith(".json"):
json_path += ".json"
with open(json_path, 'r') as f:
- self.load(json.load(f))
+ self.messages = json.load(f)
display_markdown_message(f"> messages json loaded from {os.path.abspath(json_path)}")
@@ -115,4 +115,4 @@ def handle_magic_command(self, user_input):
command = user_input.split(" ")[0]
arguments = user_input[len(command):].strip()
action = switch.get(command, default_handle) # Get the function from the dictionary, or default_handle if not found
- action(self, arguments) # Execute the function
\ No newline at end of file
+ action(self, arguments) # Execute the function
From b8826fd60816e6c4928e42ba1638d62aef665367 Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Thu, 28 Sep 2023 20:07:39 -0700
Subject: [PATCH 36/49] Fixed first message inturruption error
---
interpreter/core/core.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/interpreter/core/core.py b/interpreter/core/core.py
index cb604a5601..0bcd3b8a42 100644
--- a/interpreter/core/core.py
+++ b/interpreter/core/core.py
@@ -94,11 +94,11 @@ def _streaming_chat(self, message=None, display=True):
self.messages.append({"role": "user", "message": message})
yield from self._respond()
- # Save conversation
+ # Save conversation if we've turned conversation_history on
if self.conversation_history:
# If it's the first message, set the conversation name
- if len([m for m in self.messages if m["role"] == "user"]) == 1:
+ if not self.conversation_filename:
first_few_words = "_".join(self.messages[0]["message"][:25].split(" ")[:-1])
for char in "<>:\"/\\|?*!": # Invalid characters for filenames
From 89b206629dcc6a839059fcda1524598fa2550736 Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Thu, 28 Sep 2023 20:10:16 -0700
Subject: [PATCH 37/49] Generalized API key error message
---
interpreter/core/respond.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/interpreter/core/respond.py b/interpreter/core/respond.py
index 51d7b51400..06b4df1273 100644
--- a/interpreter/core/respond.py
+++ b/interpreter/core/respond.py
@@ -76,7 +76,7 @@ def respond(interpreter):
except Exception as e:
if 'auth' in str(e).lower() or 'api key' in str(e).lower():
output = traceback.format_exc()
- raise Exception(f"{output}\n\nThere might be an issue with your API key(s).\n\nTo reset your OPENAI_API_KEY (for example):\n Mac/Linux: 'export OPENAI_API_KEY=your-key-here',\n Windows: 'setx OPENAI_API_KEY your-key-here' then restart terminal.\n\n")
+ raise Exception(f"{output}\n\nThere might be an issue with your API key(s).\n\nTo reset your API key (we'll use OPENAI_API_KEY for this example, but you may need to reset your ANTHROPIC_API_KEY, HUGGINGFACE_API_KEY, etc):\n Mac/Linux: 'export OPENAI_API_KEY=your-key-here',\n Windows: 'setx OPENAI_API_KEY your-key-here' then restart terminal.\n\n")
else:
raise
From db25704e8d96f02bebbef6a6062a6db32f34b8c8 Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Thu, 28 Sep 2023 20:50:42 -0700
Subject: [PATCH 38/49] Better model validation, better config debugging
---
interpreter/core/core.py | 2 +-
interpreter/terminal_interface/validate_llm_settings.py | 3 ++-
interpreter/utils/get_config.py | 5 ++++-
3 files changed, 7 insertions(+), 3 deletions(-)
diff --git a/interpreter/core/core.py b/interpreter/core/core.py
index 0bcd3b8a42..ebf9b74ea1 100644
--- a/interpreter/core/core.py
+++ b/interpreter/core/core.py
@@ -49,7 +49,7 @@ def __init__(self):
self._llm = None
# Load config defaults
- config = get_config()
+ config = get_config(debug_mode=self.debug_mode)
self.__dict__.update(config)
# Check for update
diff --git a/interpreter/terminal_interface/validate_llm_settings.py b/interpreter/terminal_interface/validate_llm_settings.py
index cf03150ab0..64fd5b4598 100644
--- a/interpreter/terminal_interface/validate_llm_settings.py
+++ b/interpreter/terminal_interface/validate_llm_settings.py
@@ -83,7 +83,8 @@ def validate_llm_settings(interpreter):
**Tip:** To save this key for later, run `export OPENAI_API_KEY=your_api_key` on Mac/Linux or `setx OPENAI_API_KEY your_api_key` on Windows.
---""")
-
+
+ interpreter.api_key = response
time.sleep(2)
break
diff --git a/interpreter/utils/get_config.py b/interpreter/utils/get_config.py
index 5df3dd3d96..5c0e422a35 100644
--- a/interpreter/utils/get_config.py
+++ b/interpreter/utils/get_config.py
@@ -10,12 +10,15 @@
config_dir = appdirs.user_config_dir("Open Interpreter")
user_config_path = os.path.join(config_dir, config_filename)
-def get_config():
+def get_config(debug_mode=False):
if not os.path.exists(user_config_path):
# If user's config doesn't exist, copy the default config from the package
here = os.path.abspath(os.path.dirname(__file__))
parent_dir = os.path.dirname(here)
default_config_path = os.path.join(parent_dir, 'config.yaml')
+ if debug_mode:
+ print("user_config_path:", user_config_path)
+ print("default_config_path", default_config_path)
# Ensure the user-specific directory exists
os.makedirs(config_dir, exist_ok=True)
# Copying the file using shutil.copy
From 2b0343d936078c74ac87f08d616e12faeb477adc Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Thu, 28 Sep 2023 21:04:05 -0700
Subject: [PATCH 39/49] Better config debugging
---
interpreter/core/core.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/interpreter/core/core.py b/interpreter/core/core.py
index ebf9b74ea1..0bcd3b8a42 100644
--- a/interpreter/core/core.py
+++ b/interpreter/core/core.py
@@ -49,7 +49,7 @@ def __init__(self):
self._llm = None
# Load config defaults
- config = get_config(debug_mode=self.debug_mode)
+ config = get_config()
self.__dict__.update(config)
# Check for update
From 2e98965465273ea092523a5bc243ed1698613deb Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Thu, 28 Sep 2023 21:04:08 -0700
Subject: [PATCH 40/49] Better config debugging
---
interpreter/utils/get_config.py | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/interpreter/utils/get_config.py b/interpreter/utils/get_config.py
index 5c0e422a35..3e709ccf85 100644
--- a/interpreter/utils/get_config.py
+++ b/interpreter/utils/get_config.py
@@ -10,15 +10,17 @@
config_dir = appdirs.user_config_dir("Open Interpreter")
user_config_path = os.path.join(config_dir, config_filename)
-def get_config(debug_mode=False):
+def get_config():
if not os.path.exists(user_config_path):
# If user's config doesn't exist, copy the default config from the package
here = os.path.abspath(os.path.dirname(__file__))
parent_dir = os.path.dirname(here)
default_config_path = os.path.join(parent_dir, 'config.yaml')
- if debug_mode:
- print("user_config_path:", user_config_path)
- print("default_config_path", default_config_path)
+
+ # Temporarily testing config
+ print("user_config_path:", user_config_path)
+ print("default_config_path", default_config_path)
+
# Ensure the user-specific directory exists
os.makedirs(config_dir, exist_ok=True)
# Copying the file using shutil.copy
From 7b892ed74b93ae559070f7b307ac0c79666ed67d Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Thu, 28 Sep 2023 21:08:49 -0700
Subject: [PATCH 41/49] Better config debugging
---
interpreter/utils/get_config.py | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/interpreter/utils/get_config.py b/interpreter/utils/get_config.py
index 3e709ccf85..69c57cf1a0 100644
--- a/interpreter/utils/get_config.py
+++ b/interpreter/utils/get_config.py
@@ -11,16 +11,18 @@
user_config_path = os.path.join(config_dir, config_filename)
def get_config():
+ # Temporarily testing config
+ print("user_config_path:", user_config_path)
+
if not os.path.exists(user_config_path):
# If user's config doesn't exist, copy the default config from the package
here = os.path.abspath(os.path.dirname(__file__))
parent_dir = os.path.dirname(here)
default_config_path = os.path.join(parent_dir, 'config.yaml')
-
+
# Temporarily testing config
- print("user_config_path:", user_config_path)
print("default_config_path", default_config_path)
-
+
# Ensure the user-specific directory exists
os.makedirs(config_dir, exist_ok=True)
# Copying the file using shutil.copy
From 7c83acd81d1e5e438ed8749c6187c937caa54a2b Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Thu, 28 Sep 2023 21:12:02 -0700
Subject: [PATCH 42/49] Better --config
---
interpreter/cli/cli.py | 3 ++-
interpreter/utils/get_config.py | 7 -------
2 files changed, 2 insertions(+), 8 deletions(-)
diff --git a/interpreter/cli/cli.py b/interpreter/cli/cli.py
index 721757421c..40e66895c7 100644
--- a/interpreter/cli/cli.py
+++ b/interpreter/cli/cli.py
@@ -111,7 +111,8 @@ def cli(interpreter):
# This should be pushed into an open_config.py util
# If --config is used, open the config.yaml file in the Open Interpreter folder of the user's config dir
if args.config:
- config_path = os.path.join(appdirs.user_config_dir(), 'Open Interpreter', 'config.yaml')
+ config_dir = appdirs.user_config_dir("Open Interpreter")
+ config_path = os.path.join(config_dir, 'config.yaml')
print(f"Opening `{config_path}`...")
# Use the default system editor to open the file
if platform.system() == 'Windows':
diff --git a/interpreter/utils/get_config.py b/interpreter/utils/get_config.py
index 69c57cf1a0..5df3dd3d96 100644
--- a/interpreter/utils/get_config.py
+++ b/interpreter/utils/get_config.py
@@ -11,18 +11,11 @@
user_config_path = os.path.join(config_dir, config_filename)
def get_config():
- # Temporarily testing config
- print("user_config_path:", user_config_path)
-
if not os.path.exists(user_config_path):
# If user's config doesn't exist, copy the default config from the package
here = os.path.abspath(os.path.dirname(__file__))
parent_dir = os.path.dirname(here)
default_config_path = os.path.join(parent_dir, 'config.yaml')
-
- # Temporarily testing config
- print("default_config_path", default_config_path)
-
# Ensure the user-specific directory exists
os.makedirs(config_dir, exist_ok=True)
# Copying the file using shutil.copy
From 4dfdab33f070b61e38aa3d33c7c06d28776e42f5 Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Thu, 28 Sep 2023 21:14:13 -0700
Subject: [PATCH 43/49] Cleaned up initial message
---
interpreter/terminal_interface/terminal_interface.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/interpreter/terminal_interface/terminal_interface.py b/interpreter/terminal_interface/terminal_interface.py
index b6da2e3dd2..fff048240d 100644
--- a/interpreter/terminal_interface/terminal_interface.py
+++ b/interpreter/terminal_interface/terminal_interface.py
@@ -21,7 +21,7 @@ def terminal_interface(interpreter, message):
interpreter_intro_message.append(f"**Safe Mode**: {interpreter.safe_mode}")
else:
interpreter_intro_message.append(
- "Use `interpreter -y` or set `auto_run: true` to bypass this."
+ "Use `interpreter -y` to bypass this."
)
interpreter_intro_message.append("Press `CTRL-C` to exit.")
From 7a3f54d688e46dabde58f698574f300791b1944a Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Thu, 28 Sep 2023 21:26:55 -0700
Subject: [PATCH 44/49] Generator Update Quick Fixes II
---
pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pyproject.toml b/pyproject.toml
index c874e7192a..e06c143526 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,7 @@ name = "open-interpreter"
packages = [
{include = "interpreter"},
]
-version = "0.1.6"
+version = "0.1.7"
description = "Let language models run code locally."
authors = ["Killian Lucas "]
readme = "README.md"
From 75486deac3cd21248de5060ce4e9e14444fbcd8c Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Fri, 29 Sep 2023 18:32:52 -0700
Subject: [PATCH 45/49] Added link to new documentation site
---
README.md | 13 +++++--------
1 file changed, 5 insertions(+), 8 deletions(-)
diff --git a/README.md b/README.md
index 1c6dca0cb0..eeb481990e 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,5 @@
-● Open Interpreter
+
+![banner 2](https://github.com/KillianLucas/open-interpreter/assets/63927363/c1aec011-6d3c-4960-ab55-749326b8a7c9)
@@ -6,20 +7,16 @@
-
+
- Let language models run code on your computer.
+ Open Interpreter lets language models run code on your computer.
An open-source, locally running implementation of OpenAI's Code Interpreter.
-
Get early access to the desktop application.
+
Get early access to the desktop app | Read our new docs
-![poster](https://github.com/KillianLucas/open-interpreter/assets/63927363/08f0d493-956b-4d49-982e-67d4b20c4b56)
-
-
-
```shell
pip install open-interpreter
```
From fbe4b621662bf6d8e13cca2138a936dd5eedd51e Mon Sep 17 00:00:00 2001
From: BiliOrange <90908765+orangeZSCB@users.noreply.github.com>
Date: Sat, 30 Sep 2023 22:46:16 +0800
Subject: [PATCH 46/49] Update README_ZH.md
Made the Chinese Translate better.
---
README_ZH.md | 26 ++++++++++++++++----------
1 file changed, 16 insertions(+), 10 deletions(-)
diff --git a/README_ZH.md b/README_ZH.md
index ff4d089d47..38c8c74a43 100644
--- a/README_ZH.md
+++ b/README_ZH.md
@@ -8,7 +8,7 @@
让语言模型在您的计算机上运行代码。
- 在本地实现开源的OpenAI的代码解释器。
+ 在本地实现的开源OpenAI的代码解释器。
登记以提前获取Open Interpreter(开放解释器)桌面应用程序
@@ -57,7 +57,7 @@ pip install open-interpreter
### 终端
-安装后,简单地运行 `interpreter`:
+安装后,运行 `interpreter`:
```shell
interpreter
@@ -151,10 +151,14 @@ print(interpreter.system_message)
### 更改模型
-在 `gpt-3.5-turbo` 下,使用快速模式:
+Open Interpreter使用[LiteLLM](https://docs.litellm.ai/docs/providers/)连接到语言模型。
+
+您可以通过设置模型参数来更改模型:
```shell
-interpreter --fast
+interpreter --model gpt-3.5-turbo
+interpreter --model claude-2
+interpreter --model command-nightly
```
在 Python 环境下,您需要手动设置模型:
@@ -202,12 +206,14 @@ interpreter.azure_api_type = "azure"
为了帮助贡献者检查和调试 Open Interpreter,`--debug` 模式提供了详细的日志。
-您可以使用 `interpreter --debug` 来激活调试模式,或者直接在聊天时输入:
+您可以使用 `interpreter --debug` 来激活调试模式,或者直接在终端输入:
```shell
$ interpreter
...
-> %debug # <- 开启调试模式
+> %debug true <- 开启调试模式
+
+> %debug false <- 关闭调试模式
```
### 使用 .env 配置
@@ -230,13 +236,13 @@ INTERPRETER_CLI_USE_AZURE=False
由于生成的代码是在本地环境中运行的,因此会与文件和系统设置发生交互,从而可能导致本地数据丢失或安全风险等意想不到的结果。
-**⚠️ 所以在执行任何代码之前,Open Interpreter 都会要求用户确认是否运行。**
+**⚠️ 所以在执行任何代码之前,Open Interpreter 都会询问用户是否运行。**
您可以运行 `interpreter -y` 或设置 `interpreter.auto_run = True` 来绕过此确认,此时:
- 在运行请求修改本地文件或系统设置的命令时要谨慎。
-- 请像驾驶自动驾驶汽车一样留意 Open Interpreter,并随时做好通过关闭终端来结束进程的准备。
-- 考虑在 Google Colab 或 Replit 等受限环境中运行 Open Interpreter。主要是这些环境更加独立,从而降低执行任意代码导致出现问题的风险。
+- 请像驾驶自动驾驶汽车一直握着方向盘一样留意 Open Interpreter,并随时做好通过关闭终端来结束进程的准备。
+- 考虑在 Google Colab 或 Replit 等受限环境中运行 Open Interpreter的主要原因是这些环境更加独立,从而降低执行任意代码导致出现问题的风险。
## 它是如何工作的?
@@ -258,6 +264,6 @@ Open Interpreter 采用 MIT 许可授权。您可以使用、复制、修改、
> 拥有一个像您的指尖一样快速工作的初级程序员...可以使新的工作流程变得轻松高效,同时也能让新的受众群体享受到编程的好处。
>
-> — _OpenAI 的代码解释器发布_
+> — _OpenAI 的代码解释器发布宣传语_
From fa7d9372d779fc32a406a60f476acd9926698ef4 Mon Sep 17 00:00:00 2001
From: Alex Ishida
Date: Mon, 2 Oct 2023 09:43:40 +0900
Subject: [PATCH 47/49] Fix bug when trying to use local non-CodeLlama model
Python split method and indexing into the second element in the list does not work when the string pattern is not found; there would be only one element in the resulting list.
---
interpreter/llm/setup_local_text_llm.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/interpreter/llm/setup_local_text_llm.py b/interpreter/llm/setup_local_text_llm.py
index 1c42687fc7..cf809ef532 100644
--- a/interpreter/llm/setup_local_text_llm.py
+++ b/interpreter/llm/setup_local_text_llm.py
@@ -23,7 +23,7 @@ def setup_local_text_llm(interpreter):
DEFAULT_CONTEXT_WINDOW = 2000
DEFAULT_MAX_TOKENS = 1000
- repo_id = interpreter.model.split("huggingface/")[1]
+ repo_id = interpreter.model.replace("huggingface/", "")
if "TheBloke/CodeLlama-" not in repo_id:
# ^ This means it was prob through the old --local, so we have already displayed this message.
From 792cfd2c88aa010f9368cebbd2af95f0768d37d5 Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Wed, 4 Oct 2023 00:46:48 -0700
Subject: [PATCH 48/49] Clean Up Repo
---
.vscode/settings.json | 3 ---
1 file changed, 3 deletions(-)
delete mode 100644 .vscode/settings.json
diff --git a/.vscode/settings.json b/.vscode/settings.json
deleted file mode 100644
index 457f44d9ba..0000000000
--- a/.vscode/settings.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "python.analysis.typeCheckingMode": "basic"
-}
\ No newline at end of file
From 61a38c327a933d5a60fe897befa9d36e4379b9ec Mon Sep 17 00:00:00 2001
From: killian <63927363+KillianLucas@users.noreply.github.com>
Date: Wed, 4 Oct 2023 00:47:02 -0700
Subject: [PATCH 49/49] Clean Up Repo
---
.vscode/launch.json | 19 -------------------
1 file changed, 19 deletions(-)
delete mode 100644 .vscode/launch.json
diff --git a/.vscode/launch.json b/.vscode/launch.json
deleted file mode 100644
index 7285e8310d..0000000000
--- a/.vscode/launch.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- // Use IntelliSense to learn about possible attributes.
- // Hover to view descriptions of existing attributes.
- // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
- "version": "0.2.0",
- "configurations": [
- {
- "name": "Python",
- "type": "python",
- "request": "launch",
- "module": "poetry",
- "justMyCode": true,
- "args": [
- "run",
- "interpreter"
- ]
- }
- ]
-}
\ No newline at end of file