diff --git a/.vscode/launch.json b/.vscode/launch.json
deleted file mode 100644
index 7285e8310d..0000000000
--- a/.vscode/launch.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- // Use IntelliSense to learn about possible attributes.
- // Hover to view descriptions of existing attributes.
- // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
- "version": "0.2.0",
- "configurations": [
- {
- "name": "Python",
- "type": "python",
- "request": "launch",
- "module": "poetry",
- "justMyCode": true,
- "args": [
- "run",
- "interpreter"
- ]
- }
- ]
-}
\ No newline at end of file
diff --git a/README.md b/README.md
index 1c6dca0cb0..eeb481990e 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,5 @@
-
● Open Interpreter
+
+![banner 2](https://github.com/KillianLucas/open-interpreter/assets/63927363/c1aec011-6d3c-4960-ab55-749326b8a7c9)
@@ -6,20 +7,16 @@
-
+
- Let language models run code on your computer.
+ Open Interpreter lets language models run code on your computer.
An open-source, locally running implementation of OpenAI's Code Interpreter.
-
Get early access to the desktop application.
+
Get early access to the desktop app | Read our new docs
-![poster](https://github.com/KillianLucas/open-interpreter/assets/63927363/08f0d493-956b-4d49-982e-67d4b20c4b56)
-
-
-
```shell
pip install open-interpreter
```
diff --git a/README_ZH.md b/README_ZH.md
index ff4d089d47..38c8c74a43 100644
--- a/README_ZH.md
+++ b/README_ZH.md
@@ -8,7 +8,7 @@
让语言模型在您的计算机上运行代码。
- 在本地实现开源的OpenAI的代码解释器。
+ 在本地实现的开源OpenAI的代码解释器。
登记以提前获取Open Interpreter(开放解释器)桌面应用程序
@@ -57,7 +57,7 @@ pip install open-interpreter
### 终端
-安装后,简单地运行 `interpreter`:
+安装后,运行 `interpreter`:
```shell
interpreter
@@ -151,10 +151,14 @@ print(interpreter.system_message)
### 更改模型
-在 `gpt-3.5-turbo` 下,使用快速模式:
+Open Interpreter使用[LiteLLM](https://docs.litellm.ai/docs/providers/)连接到语言模型。
+
+您可以通过设置模型参数来更改模型:
```shell
-interpreter --fast
+interpreter --model gpt-3.5-turbo
+interpreter --model claude-2
+interpreter --model command-nightly
```
在 Python 环境下,您需要手动设置模型:
@@ -202,12 +206,14 @@ interpreter.azure_api_type = "azure"
为了帮助贡献者检查和调试 Open Interpreter,`--debug` 模式提供了详细的日志。
-您可以使用 `interpreter --debug` 来激活调试模式,或者直接在聊天时输入:
+您可以使用 `interpreter --debug` 来激活调试模式,或者直接在终端输入:
```shell
$ interpreter
...
-> %debug # <- 开启调试模式
+> %debug true <- 开启调试模式
+
+> %debug false <- 关闭调试模式
```
### 使用 .env 配置
@@ -230,13 +236,13 @@ INTERPRETER_CLI_USE_AZURE=False
由于生成的代码是在本地环境中运行的,因此会与文件和系统设置发生交互,从而可能导致本地数据丢失或安全风险等意想不到的结果。
-**⚠️ 所以在执行任何代码之前,Open Interpreter 都会要求用户确认是否运行。**
+**⚠️ 所以在执行任何代码之前,Open Interpreter 都会询问用户是否运行。**
您可以运行 `interpreter -y` 或设置 `interpreter.auto_run = True` 来绕过此确认,此时:
- 在运行请求修改本地文件或系统设置的命令时要谨慎。
-- 请像驾驶自动驾驶汽车一样留意 Open Interpreter,并随时做好通过关闭终端来结束进程的准备。
-- 考虑在 Google Colab 或 Replit 等受限环境中运行 Open Interpreter。主要是这些环境更加独立,从而降低执行任意代码导致出现问题的风险。
+- 请像驾驶自动驾驶汽车一直握着方向盘一样留意 Open Interpreter,并随时做好通过关闭终端来结束进程的准备。
+- 考虑在 Google Colab 或 Replit 等受限环境中运行 Open Interpreter的主要原因是这些环境更加独立,从而降低执行任意代码导致出现问题的风险。
## 它是如何工作的?
@@ -258,6 +264,6 @@ Open Interpreter 采用 MIT 许可授权。您可以使用、复制、修改、
> 拥有一个像您的指尖一样快速工作的初级程序员...可以使新的工作流程变得轻松高效,同时也能让新的受众群体享受到编程的好处。
>
-> — _OpenAI 的代码解释器发布_
+> — _OpenAI 的代码解释器发布宣传语_
diff --git a/interpreter/cli/cli.py b/interpreter/cli/cli.py
index f49ec2be61..9544f13cb0 100644
--- a/interpreter/cli/cli.py
+++ b/interpreter/cli/cli.py
@@ -100,7 +100,10 @@ def cli():
if arg["type"] == bool:
parser.add_argument(f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], action='store_true', default=None)
else:
- parser.add_argument(f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=arg["type"])
+ choices = arg["choices"] if "choices" in arg else None
+ default = arg["default"] if "default" in arg else None
+
+ parser.add_argument(f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=arg["type"], choices=choices, default=default)
# Add special arguments
parser.add_argument('--config', dest='config', action='store_true', help='open config.yaml file in text editor')
diff --git a/interpreter/code_interpreters/create_code_interpreter.py b/interpreter/code_interpreters/create_code_interpreter.py
index 69eb9d9034..1625533702 100644
--- a/interpreter/code_interpreters/create_code_interpreter.py
+++ b/interpreter/code_interpreters/create_code_interpreter.py
@@ -1,3 +1,4 @@
+<<<<<<< HEAD
import inspect
import os
import uuid
@@ -7,6 +8,9 @@
from .language_map import language_map
+=======
+from .language_map import language_map
+>>>>>>> upstream/main
# Global dictionary to store the session IDs by the weak reference of the calling objects
SESSION_IDS_BY_OBJECT = weakref.WeakKeyDictionary()
@@ -40,6 +44,7 @@ def create_code_interpreter(language, use_containers=False):
# Case in-sensitive
language = language.lower()
+<<<<<<< HEAD
caller_object = None
if use_containers:
@@ -72,6 +77,8 @@ def create_code_interpreter(language, use_containers=False):
session_id = f"ses-{str(uuid.uuid4())}"
SESSION_IDS_BY_OBJECT[caller_object] = session_id
+=======
+>>>>>>> upstream/main
try:
# Retrieve the specific CodeInterpreter class based on the language
CodeInterpreter = language_map[language]
diff --git a/interpreter/code_interpreters/languages/applescript.py b/interpreter/code_interpreters/languages/applescript.py
index 78168e4577..74b7446127 100644
--- a/interpreter/code_interpreters/languages/applescript.py
+++ b/interpreter/code_interpreters/languages/applescript.py
@@ -2,6 +2,7 @@
from ..subprocess_code_interpreter import SubprocessCodeInterpreter
class AppleScript(SubprocessCodeInterpreter):
+<<<<<<< HEAD
file_extension = "applescript"
proper_name = "AppleScript"
@@ -9,6 +10,13 @@ class AppleScript(SubprocessCodeInterpreter):
def __init__(self, **kwargs):
super().__init__(**kwargs)
+=======
+ file_extension = "applescript"
+ proper_name = "AppleScript"
+
+ def __init__(self):
+ super().__init__()
+>>>>>>> upstream/main
self.start_cmd = os.environ.get('SHELL', '/bin/zsh')
def preprocess_code(self, code):
diff --git a/interpreter/code_interpreters/languages/html.py b/interpreter/code_interpreters/languages/html.py
index 3be7cbcf40..f6d5fc86e3 100644
--- a/interpreter/code_interpreters/languages/html.py
+++ b/interpreter/code_interpreters/languages/html.py
@@ -7,9 +7,14 @@ class HTML(BaseCodeInterpreter):
file_extension = "html"
proper_name = "HTML"
+<<<<<<< HEAD
def __init__(self, **kwargs):
super().__init__(**kwargs) # include kwargs though they dont do anything in this case.
# This is just so we dont need more logic in the create interpreter function
+=======
+ def __init__(self):
+ super().__init__()
+>>>>>>> upstream/main
def run(self, code):
# Create a temporary HTML file with the content
diff --git a/interpreter/code_interpreters/languages/javascript.py b/interpreter/code_interpreters/languages/javascript.py
index 4a078e7df7..e312ce1712 100644
--- a/interpreter/code_interpreters/languages/javascript.py
+++ b/interpreter/code_interpreters/languages/javascript.py
@@ -2,6 +2,7 @@
import re
class JavaScript(SubprocessCodeInterpreter):
+<<<<<<< HEAD
file_extension = "js"
proper_name = "JavaScript"
@@ -9,6 +10,13 @@ class JavaScript(SubprocessCodeInterpreter):
def __init__(self, **kwargs):
super().__init__(**kwargs)
+=======
+ file_extension = "js"
+ proper_name = "JavaScript"
+
+ def __init__(self):
+ super().__init__()
+>>>>>>> upstream/main
self.start_cmd = "node -i"
def preprocess_code(self, code):
diff --git a/interpreter/code_interpreters/languages/python.py b/interpreter/code_interpreters/languages/python.py
index f18cf1c509..f851571e6d 100644
--- a/interpreter/code_interpreters/languages/python.py
+++ b/interpreter/code_interpreters/languages/python.py
@@ -4,6 +4,7 @@
import re
class Python(SubprocessCodeInterpreter):
+<<<<<<< HEAD
file_extension = "py"
proper_name = "Python"
@@ -14,6 +15,14 @@ def __init__(self, **kwargs):
self.start_cmd = "python3 -i -q -u"
else:
self.start_cmd = sys.executable + " -i -q -u"
+=======
+ file_extension = "py"
+ proper_name = "Python"
+
+ def __init__(self):
+ super().__init__()
+ self.start_cmd = sys.executable + " -i -q -u"
+>>>>>>> upstream/main
def preprocess_code(self, code):
return preprocess_python(code)
diff --git a/interpreter/code_interpreters/languages/r.py b/interpreter/code_interpreters/languages/r.py
index 13e54d6848..6df8ac4b64 100644
--- a/interpreter/code_interpreters/languages/r.py
+++ b/interpreter/code_interpreters/languages/r.py
@@ -2,6 +2,7 @@
import re
class R(SubprocessCodeInterpreter):
+<<<<<<< HEAD
file_extension = "r"
@@ -9,6 +10,13 @@ class R(SubprocessCodeInterpreter):
def __init__(self, **kwargs):
super().__init__(**kwargs)
+=======
+ file_extension = "r"
+ proper_name = "R"
+
+ def __init__(self):
+ super().__init__()
+>>>>>>> upstream/main
self.start_cmd = "R -q --vanilla" # Start R in quiet and vanilla mode
def preprocess_code(self, code):
diff --git a/interpreter/code_interpreters/languages/shell.py b/interpreter/code_interpreters/languages/shell.py
index a202b08768..e9fea68858 100644
--- a/interpreter/code_interpreters/languages/shell.py
+++ b/interpreter/code_interpreters/languages/shell.py
@@ -4,9 +4,17 @@
import os
class Shell(SubprocessCodeInterpreter):
+<<<<<<< HEAD
file_extension = "sh"
proper_name = "Shell"
+=======
+ file_extension = "sh"
+ proper_name = "Shell"
+
+ def __init__(self):
+ super().__init__()
+>>>>>>> upstream/main
def __init__(self, **kwargs):
super().__init__(**kwargs)
diff --git a/interpreter/core/core.py b/interpreter/core/core.py
index b4cbfa399c..dcf9da7c75 100644
--- a/interpreter/core/core.py
+++ b/interpreter/core/core.py
@@ -120,12 +120,10 @@ def _streaming_chat(self, message=None, display=True):
if not os.path.exists(self.conversation_history_path):
os.makedirs(self.conversation_history_path)
# Write or overwrite the file
-
with open(os.path.join(self.conversation_history_path, self.conversation_filename), 'w') as f:
json.dump(self.messages, f)
return
-
raise Exception("`interpreter.chat()` requires a display. Set `display=True` or pass a message into `interpreter.chat(message)`.")
def _respond(self):
diff --git a/interpreter/llm/setup_local_text_llm.py b/interpreter/llm/setup_local_text_llm.py
index 2caa353c8f..3eca7854bc 100644
--- a/interpreter/llm/setup_local_text_llm.py
+++ b/interpreter/llm/setup_local_text_llm.py
@@ -23,7 +23,7 @@ def setup_local_text_llm(interpreter):
DEFAULT_CONTEXT_WINDOW = 2000
DEFAULT_MAX_TOKENS = 1000
- repo_id = interpreter.model.split("huggingface/")[1]
+ repo_id = interpreter.model.replace("huggingface/", "")
if "TheBloke/CodeLlama-" not in repo_id:
# ^ This means it was prob through the old --local, so we have already displayed this message.
diff --git a/interpreter/terminal_interface/terminal_interface.py b/interpreter/terminal_interface/terminal_interface.py
index 6edf09402c..5a502f07c7 100644
--- a/interpreter/terminal_interface/terminal_interface.py
+++ b/interpreter/terminal_interface/terminal_interface.py
@@ -100,10 +100,10 @@ def terminal_interface(interpreter, message):
should_scan_code = False
- if not interpreter.scan_code == "off":
- if interpreter.scan_code == "auto":
+ if not interpreter.safe_mode == "off":
+ if interpreter.safe_mode == "auto":
should_scan_code = True
- elif interpreter.scan_code == 'ask':
+ elif interpreter.safe_mode == 'ask':
response = input(" Would you like to scan this code? (y/n)\n\n ")
print("") # <- Aesthetic choice
diff --git a/interpreter/utils/scan_code.py b/interpreter/utils/scan_code.py
index 441892da25..4148823e99 100644
--- a/interpreter/utils/scan_code.py
+++ b/interpreter/utils/scan_code.py
@@ -1,5 +1,7 @@
import os
import subprocess
+from yaspin import yaspin
+from yaspin.spinners import Spinners
from .temporary_file import create_temporary_file, cleanup_temporary_file
from ..code_interpreters.language_map import language_map
@@ -52,15 +54,16 @@ def scan_code(code, language, interpreter):
# pinned to an old semgrep version that has issues with reading the semgrep registry
# while scanning a single file like the temporary one we generate
# if guarddog solves [#249](https://github.com/DataDog/guarddog/issues/249) we can change this approach a bit
- scan = subprocess.run(
- f"cd {temp_path} && semgrep scan --config auto --quiet --error {file_name}",
- shell=True,
- )
+ with yaspin(text=" Scanning code...").green.right.binary as loading:
+ scan = subprocess.run(
+ f"cd {temp_path} && semgrep scan --config auto --quiet --error {file_name}",
+ shell=True,
+ )
if scan.returncode == 0:
language_name = get_language_proper_name(language)
print(
- f" {'Code Scaner: ' if interpreter.safe_mode == 'auto' else ''}No issues were found in this {language_name} code."
+ f" {'Code Scaner: ' if interpreter.safe_mode == 'auto' else ''} No issues were found in this {language_name} code."
)
print("")
diff --git a/poetry.lock b/poetry.lock
index 35a1d9bd15..6967cb7261 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -778,13 +778,13 @@ referencing = ">=0.28.0"
[[package]]
name = "litellm"
-version = "0.1.819"
+version = "0.1.820"
description = "Library to easily interface with LLM API providers"
optional = false
python-versions = ">=3.8,<4.0"
files = [
- {file = "litellm-0.1.819-py3-none-any.whl", hash = "sha256:4aefb79dc3a10bdf9d42ac69a0b3f07b1553b81e3a9bcb9add077a5e34f643a3"},
- {file = "litellm-0.1.819.tar.gz", hash = "sha256:d253825781236fd42c8c8d0199ac4453d6bad5d78e496f7581fd15b11468c959"},
+ {file = "litellm-0.1.820-py3-none-any.whl", hash = "sha256:bd50cbdfd52b97c3c0a6a2084f265aa7a6e17565fada1b4d9c46c68ab067a294"},
+ {file = "litellm-0.1.820.tar.gz", hash = "sha256:740a1336d614aa7f78106bdbbdcc7edfa65ecb5ef0fb1eed05179df293f98ead"},
]
[package.dependencies]
@@ -1617,6 +1617,20 @@ files = [
{file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"},
]
+[[package]]
+name = "termcolor"
+version = "2.3.0"
+description = "ANSI color formatting for output in terminal"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "termcolor-2.3.0-py3-none-any.whl", hash = "sha256:3afb05607b89aed0ffe25202399ee0867ad4d3cb4180d98aaf8eefa6a5f7d475"},
+ {file = "termcolor-2.3.0.tar.gz", hash = "sha256:b5b08f68937f138fe92f6c089b99f1e2da0ae56c52b78bf7075fd95420fd9a5a"},
+]
+
+[package.extras]
+tests = ["pytest", "pytest-cov"]
+
[[package]]
name = "tiktoken"
version = "0.4.0"
@@ -2057,6 +2071,20 @@ files = [
idna = ">=2.0"
multidict = ">=4.0"
+[[package]]
+name = "yaspin"
+version = "3.0.1"
+description = "Yet Another Terminal Spinner"
+optional = false
+python-versions = ">=3.9,<4.0"
+files = [
+ {file = "yaspin-3.0.1-py3-none-any.whl", hash = "sha256:c4b5d2ca23ae664b87a5cd53401c5107cef12668a71d9ee5ea5536045f364121"},
+ {file = "yaspin-3.0.1.tar.gz", hash = "sha256:9c04aa69cce9be83e1ea3134a6712e749e6c0c9cd02599023713e6befd7bf369"},
+]
+
+[package.dependencies]
+termcolor = ">=2.3,<3.0"
+
[[package]]
name = "zipp"
version = "3.17.0"
@@ -2075,4 +2103,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
[metadata]
lock-version = "2.0"
python-versions = "^3.10"
-content-hash = "878bd3a8334d3178d84c396045acb5d5f9400fa273fa53f32d4eb98d01a031e0"
+content-hash = "ccf0aaa9ae52c3b6d45011661827e845956721114202585962d4b07098edf903"
diff --git a/pyproject.toml b/pyproject.toml
index 8fc799ea8d..46e69a6404 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -29,6 +29,7 @@ litellm = "^0.1.590"
pyyaml = "^6.0.1"
docker = "^6.1.3"
semgrep = "^1.41.0"
+yaspin = "^3.0.1"
[tool.poetry.dependencies.pyreadline3]
version = "^3.4.1"
markers = "sys_platform == 'win32'"
diff --git a/tests/test_interpreter.py b/tests/test_interpreter.py
index aa85ed1889..f011b9ce25 100644
--- a/tests/test_interpreter.py
+++ b/tests/test_interpreter.py
@@ -1,26 +1,29 @@
import interpreter
-interpreter.auto_run = True
-interpreter.model = "gpt-3.5-turbo"
-interpreter.temperature = 0
+
+interpreter_instance = interpreter.create_interpreter()
+
+interpreter_instance.auto_run = True
+interpreter_instance.model = "gpt-3.5-turbo"
+interpreter_instance.temperature = 0
def test_hello_world():
- interpreter.reset()
- messages = interpreter.chat("""Please reply with just the words "Hello, World!" and nothing else. Do not run code.""")
+ interpreter_instance.reset()
+ messages = interpreter_instance.chat("""Please reply with just the words "Hello, World!" and nothing else. Do not run code.""")
assert messages == [{'role': 'user', 'message': 'Please reply with just the words "Hello, World!" and nothing else. Do not run code.'}, {'role': 'assistant', 'message': 'Hello, World!'}]
def test_math():
- interpreter.reset()
- messages = interpreter.chat("""Please perform the calculation 27073*7397 then reply with just the integer answer with no commas or anything, nothing else.""")
+ interpreter_instance.reset()
+ messages = interpreter_instance.chat("""Please perform the calculation 27073*7397 then reply with just the integer answer with no commas or anything, nothing else.""")
assert "200258981" in messages[-1]["message"]
def test_delayed_exec():
- interpreter.reset()
- interpreter.chat("""Can you write a single block of code and run_code it that prints something, then delays 1 second, then prints something else? No talk just code. Thanks!""")
+ interpreter_instance.reset()
+ interpreter_instance.chat("""Can you write a single block of code and run_code it that prints something, then delays 1 second, then prints something else? No talk just code. Thanks!""")
def test_nested_loops_and_multiple_newlines():
- interpreter.reset()
- interpreter.chat("""Can you write a nested for loop in python and shell and run them? Also put 1-3 newlines between each line in the code. Thanks!""")
+ interpreter_instance.reset()
+ interpreter_instance.chat("""Can you write a nested for loop in python and shell and run them? Also put 1-3 newlines between each line in the code. Thanks!""")
def test_markdown():
- interpreter.reset()
- interpreter.chat("""Hi, can you test out a bunch of markdown features? Try writing a fenced code block, a table, headers, everything. DO NOT write the markdown inside a markdown code block, just write it raw.""")
+ interpreter_instance.reset()
+ interpreter_instance.chat("""Hi, can you test out a bunch of markdown features? Try writing a fenced code block, a table, headers, everything. DO NOT write the markdown inside a markdown code block, just write it raw.""")