From 1cc33893a9bc0cd00f1cbf7a5fedbe6c6c86b555 Mon Sep 17 00:00:00 2001 From: tonic Date: Sun, 10 Dec 2023 12:56:33 +0100 Subject: [PATCH 01/52] added demo with interface removed gradio (didnt work) --- app.py | 9 ++++-- chainlitdemo.py | 78 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+), 2 deletions(-) create mode 100644 chainlitdemo.py diff --git a/app.py b/app.py index 352dbd5..0ad94b6 100644 --- a/app.py +++ b/app.py @@ -8,6 +8,7 @@ from azure.core.credentials import AzureKeyCredential from semantic_kernel.core_skills.text_skill import TextSkill from semantic_kernel.planning.basic_planner import BasicPlanner +from semantic_kernel import Kernel # Configure your credentials here bing_api_key = "ArXXXXdpJ" # Replace with your Bing API key @@ -28,8 +29,12 @@ def get_response(question, max_auto_reply): worker = sk_planner.create_user_agent("Worker", max_auto_reply=max_auto_reply, human_input="NEVER") + assistant = sk_planner.create_assistant_agent("Assistant") worker.initiate_chat(assistant, message=question) return worker.get_response() -iface = gr.Interface(fn=get_response, inputs=["text", "number"], outputs="text", inputs_label=["Question", "Max Auto Reply"]) -iface.launch() +if __name__ == "__main__": + question = input("Tonic's EasyAGI builds multi-agent systems that use Semantic-Kernel Plugins to automate your business operations ! Describe your problem in detail, then optionally bullet point a brief step by step way to solve it, then (or optionally) give a clear command or instruction to solve the issues above:") + max_auto_reply = int(input("Set a maximum number of autoreplies by entering a number with minimum 10: ")) + response = simulate_conversation(question, max_auto_reply) + print("Response:", response) \ No newline at end of file diff --git a/chainlitdemo.py b/chainlitdemo.py new file mode 100644 index 0000000..db1d22e --- /dev/null +++ b/chainlitdemo.py @@ -0,0 +1,78 @@ +import gradio as gr +from pydantic import BaseModel, ValidationError +from plugins.sk_bing_plugin import BingPlugin +from plugins.sk_web_pages_plugin import WebPagesPlugin +from planning.autogen_planner import AutoGenPlanner +from web_search_client import WebSearchClient +from web_search_client.models import SafeSearch +from azure.core.credentials import AzureKeyCredential +from semantic_kernel.core_skills.text_skill import TextSkill +from semantic_kernel.planning.basic_planner import BasicPlanner +from semantic_kernel import Kernel + +# Configure your credentials here +bing_api_key = "ArXXXXdpJ" # Replace with your Bing API key + +llm_config = { + "type": "openai", # "azure" or "openai" + "openai_api_key": "sk-rR5XXXXm", # OpenAI API Key + "azure_deployment": "", # Azure OpenAI deployment name + "azure_api_key": "", # Azure OpenAI API key in the Azure portal + "azure_endpoint": "" # Endpoint URL for Azure OpenAI, e.g. https://contoso.openai.azure.com/ +} +import semantic_kernel +kernel = semantic_kernel.Kernel() +kernel.import_skill(BingPlugin(bing_api_key)) +kernel.import_skill(WebPagesPlugin()) +sk_planner = AutoGenPlanner(kernel, llm_config) +assistant = sk_planner.create_assistant_agent("Assistant") + +def get_response(question, max_auto_reply): + worker = sk_planner.create_user_agent("Worker", max_auto_reply=max_auto_reply, human_input="NEVER") + assistant = sk_planner.create_assistant_agent("Assistant") + worker.initiate_chat(assistant, message=question) + return worker.get_response() + +class ChainlitAssistantAgent(AssistantAgent): + def __init__(self, name, sk_planner): + super().__init__(name) + self.sk_planner = sk_planner + + async def process_message(self, message): + # Use sk_planner to process the message and generate a response + response = self.sk_planner.create_assistant_agent("Assistant") + response.initiate_chat(self, message=message) + return response.get_response() + +class ChainlitUserProxyAgent(UserProxyAgent): + def __init__(self, name, assistant_agent): + super().__init__(name) + self.assistant_agent = assistant_agent + + async def get_human_input(self, prompt): + # Get input from the user via Chainlit interface + reply = await cl.ask_user_message(content=prompt) + return reply["content"].strip() + + async def send(self, message): + # Send the message to the assistant agent and get the response + response = await self.assistant_agent.process_message(message) + # Display the response in the Chainlit interface + cl.message(content=response, author=self.assistant_agent.name).send() + +# Initialize the agents +assistant_agent = ChainlitAssistantAgent("Assistant", sk_planner) +user_proxy_agent = ChainlitUserProxyAgent("User_Proxy", assistant_agent) + +# Chainlit Web Interface +@cl.page("/") +def main_page(): + with cl.form("user_input_form"): + question = cl.text_input("Describe your problem:") + submit_button = cl.button("Submit") + + if submit_button: + cl.run_async(user_proxy_agent.send(question)) + +if __name__ == "__main__": + cl.run(main_page) \ No newline at end of file From cc964016fd7ec088ab40f8647375735e223dabd2 Mon Sep 17 00:00:00 2001 From: tonic Date: Sun, 10 Dec 2023 14:02:09 +0100 Subject: [PATCH 02/52] added plugins.py files in plugins folder focus here --- plugins/clarifai.py | 0 plugins/seamlessondevice.py | 0 plugins/unstructured.py | 0 3 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 plugins/clarifai.py create mode 100644 plugins/seamlessondevice.py create mode 100644 plugins/unstructured.py diff --git a/plugins/clarifai.py b/plugins/clarifai.py new file mode 100644 index 0000000..e69de29 diff --git a/plugins/seamlessondevice.py b/plugins/seamlessondevice.py new file mode 100644 index 0000000..e69de29 diff --git a/plugins/unstructured.py b/plugins/unstructured.py new file mode 100644 index 0000000..e69de29 From 45b6dc7786dde09569a4dd9c030f5962c9bec8a8 Mon Sep 17 00:00:00 2001 From: tonic Date: Sun, 10 Dec 2023 14:22:27 +0100 Subject: [PATCH 03/52] Create __init__.py --- __init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 __init__.py diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..e69de29 From a0ab9dd7f3b2cbf70402bacd2b71a334d03c0a0e Mon Sep 17 00:00:00 2001 From: tonic Date: Sun, 10 Dec 2023 14:25:41 +0100 Subject: [PATCH 04/52] Update README.md --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index eaf4091..a1a01d9 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,10 @@ git clone https://github.com/Josephrp/LablabAutogen.git cd LablabAutogen ``` +```bash +set PATH=%PATH%;%CD% +``` + ```bash nano app.py ``` From eb129899b8935b5752c6a476f7b4cfe7f47899b6 Mon Sep 17 00:00:00 2001 From: tonic Date: Sun, 10 Dec 2023 15:14:29 +0100 Subject: [PATCH 05/52] making the demo work :-) always push on instinct --- README.md | 4 +++- app.py | 2 +- planning/autogen_planner.py | 2 ++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index a1a01d9..45e71d9 100644 --- a/README.md +++ b/README.md @@ -20,9 +20,11 @@ git clone https://github.com/Josephrp/LablabAutogen.git cd LablabAutogen ``` +On Windows Only : ```bash -set PATH=%PATH%;%CD% +set PATH=%PATH% ``` +then ```bash nano app.py diff --git a/app.py b/app.py index 0ad94b6..d3e625e 100644 --- a/app.py +++ b/app.py @@ -36,5 +36,5 @@ def get_response(question, max_auto_reply): if __name__ == "__main__": question = input("Tonic's EasyAGI builds multi-agent systems that use Semantic-Kernel Plugins to automate your business operations ! Describe your problem in detail, then optionally bullet point a brief step by step way to solve it, then (or optionally) give a clear command or instruction to solve the issues above:") max_auto_reply = int(input("Set a maximum number of autoreplies by entering a number with minimum 10: ")) - response = simulate_conversation(question, max_auto_reply) + response = get_response(question, max_auto_reply) print("Response:", response) \ No newline at end of file diff --git a/planning/autogen_planner.py b/planning/autogen_planner.py index 6160d05..280797e 100644 --- a/planning/autogen_planner.py +++ b/planning/autogen_planner.py @@ -1,6 +1,8 @@ +import autogen from typing import Optional, Dict, List import semantic_kernel, autogen import datetime +from autogen.agentchat.contrib.agent_builder import class AutoGenPlanner: """ From b770224e23d5796df7b157d66f82004382f6e2d1 Mon Sep 17 00:00:00 2001 From: tonic Date: Sun, 10 Dec 2023 15:17:44 +0100 Subject: [PATCH 06/52] still fixing this app --- app.py | 1 + planning/autogen_planner.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/app.py b/app.py index d3e625e..7e3de28 100644 --- a/app.py +++ b/app.py @@ -1,4 +1,5 @@ import gradio as gr +import semantic_kernel , autogen from pydantic import BaseModel, ValidationError from plugins.sk_bing_plugin import BingPlugin from plugins.sk_web_pages_plugin import WebPagesPlugin diff --git a/planning/autogen_planner.py b/planning/autogen_planner.py index 280797e..d24fb3e 100644 --- a/planning/autogen_planner.py +++ b/planning/autogen_planner.py @@ -1,4 +1,3 @@ -import autogen from typing import Optional, Dict, List import semantic_kernel, autogen import datetime From 234983de3f3148c5d798a0192905fdcfa15ce45d Mon Sep 17 00:00:00 2001 From: tonic Date: Sun, 10 Dec 2023 16:36:13 +0100 Subject: [PATCH 07/52] add code execution --- planning/autogen_planner.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/planning/autogen_planner.py b/planning/autogen_planner.py index d24fb3e..f9bc0c4 100644 --- a/planning/autogen_planner.py +++ b/planning/autogen_planner.py @@ -25,6 +25,23 @@ def __init__(self, kernel: semantic_kernel.Kernel, llm_config: Dict = None, buil self.validate_llm_config() self.builder = self.create_builder() + def execute_code(self, code: str) -> str: + """ + Execute a Python code snippet and return the result. + Args: + code (str): The Python code to execute. + Returns: + str: The output of the executed code. + """ + ipython = get_ipython() + result = ipython.run_cell(code) + output = str(result.result) + if result.error_before_exec is not None: + output += f"\n{result.error_before_exec}" + if result.error_in_exec is not None: + output += f"\n{result.error_in_exec}" + return output + def create_builder(self) -> autogen.agentchat.contrib.agent_builder.AgentBuilder: """ Create an instance of AgentBuilder. From 8cf4d135f66bc77ffebf634fe5bf7074719fa2f4 Mon Sep 17 00:00:00 2001 From: tonic Date: Sun, 10 Dec 2023 16:46:35 +0100 Subject: [PATCH 08/52] Added Zilliz Cloud Retrieval Agent --- README.md | 2 ++ app.py | 6 +++++- planning/autogen_planner.py | 8 +++++++- plugins/zillizcloud.py | 31 +++++++++++++++++++++++++++++++ requirements.txt | 1 + 5 files changed, 46 insertions(+), 2 deletions(-) create mode 100644 plugins/zillizcloud.py diff --git a/README.md b/README.md index 45e71d9..40b94ef 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,8 @@ pinned: false license: mit --- +**Check the plugins folder for new Semantic Kernel Plugins** + ## Use and Install on the Command Line ```bashh diff --git a/app.py b/app.py index 7e3de28..e23d5ab 100644 --- a/app.py +++ b/app.py @@ -21,7 +21,11 @@ "azure_api_key": "", # Azure OpenAI API key in the Azure portal "azure_endpoint": "" # Endpoint URL for Azure OpenAI, e.g. https://contoso.openai.azure.com/ } -import semantic_kernel +Zilliz_config = { + "host": "your_milvus_host", + "port": "your_milvus_port", + "collection_name": "your_collection_name" +} kernel = semantic_kernel.Kernel() kernel.import_skill(BingPlugin(bing_api_key)) kernel.import_skill(WebPagesPlugin()) diff --git a/planning/autogen_planner.py b/planning/autogen_planner.py index f9bc0c4..7f9fc1f 100644 --- a/planning/autogen_planner.py +++ b/planning/autogen_planner.py @@ -18,12 +18,13 @@ class AutoGenPlanner: f"Reply TERMINATE when the task is done." ) - def __init__(self, kernel: semantic_kernel.Kernel, llm_config: Dict = None, builder_config_path: str = None): + def __init__(self, kernel: semantic_kernel.Kernel, llm_config: Dict = None, builder_config_path: str = None, zilliz_config: Dict = None): self.kernel = kernel self.llm_config = llm_config or {} self.builder_config_path = builder_config_path self.validate_llm_config() self.builder = self.create_builder() + self.Zilliz_agent = ZillizRetrievalAgent(**zilliz_config) if zilliz_config else None def execute_code(self, code: str) -> str: """ @@ -79,6 +80,11 @@ def create_user_agent( max_consecutive_auto_reply=max_auto_reply, function_map=self.__get_function_map(), ) + + def perform_retrieval(self, query_vector: List[float], top_k: int = 10): + if not self.Zilliz_agent: + raise ValueError("Zilliz agent is not configured.") + return self.Zilliz_agent.search([query_vector], top_k, {"nprobe": 16}) def validate_llm_config(self): if self.llm_config.get("type") == "openai": diff --git a/plugins/zillizcloud.py b/plugins/zillizcloud.py new file mode 100644 index 0000000..7ddb281 --- /dev/null +++ b/plugins/zillizcloud.py @@ -0,0 +1,31 @@ +from pymilvus import connections, Collection +from typing import List, Dict, Any + +class ZillizRetrievalAgent: + def __init__(self, host: str, port: str, collection_name: str): + self.host = host + self.port = port + self.collection_name = collection_name + self.collection = None + self.connect() + + def connect(self): + connections.connect("default", host=self.host, port=self.port) + self.collection = Collection(name=self.collection_name) + + def search(self, vectors: List[List[float]], top_k: int, params: Dict[str, Any]): + """ + Search the Milvus collection for similar vectors. + Args: + vectors (List[List[float]]): The query vectors. + top_k (int): Number of top similar results to retrieve. + params (Dict[str, Any]): Additional search parameters. + Returns: + List of search results. + """ + search_params = { + "metric_type": "L2", + "params": params + } + results = self.collection.search(vectors, "embedding", search_params, top_k) + return results diff --git a/requirements.txt b/requirements.txt index aa36a33..71f8c0e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,3 +20,4 @@ microsoft-bing-customimagesearch microsoft-bing-customwebsearch msrest msrestazure +pymilvus From 90aa897f9f2e5c2d14006817cd156980630955fa Mon Sep 17 00:00:00 2001 From: tonic Date: Sun, 10 Dec 2023 19:11:46 +0100 Subject: [PATCH 09/52] Update README.md --- README.md | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 40b94ef..3e9d52d 100644 --- a/README.md +++ b/README.md @@ -12,17 +12,23 @@ license: mit **Check the plugins folder for new Semantic Kernel Plugins** +## Before You Install and Use + +- sign up and get an api key for open ai +- sign up and set up a project in [zilliz cloud](https://cloud.zilliz.com/) + ## Use and Install on the Command Line -```bashh -git clone https://github.com/Josephrp/LablabAutogen.git +```bash +git clone https://github.com/Tonic-AI/EasyAGI ``` ```bash cd LablabAutogen ``` -On Windows Only : +If you're on Windows : + ```bash set PATH=%PATH% ``` @@ -32,7 +38,14 @@ then nano app.py ``` -edit line 17 " ```"openai_api_key": "YOUR_KEY_HERE", # OpenAI API Key``` with your key +edit line 19 " ```"openai_api_key": "sk-rR5XXXXm", # OpenAI API Key``` with your key +and edit lines 25-27 +```python + "host": "your_milvus_host", + "port": "your_milvus_port", + "collection_name": "your_collection_name" + ``` +with your zilliz cloud credentials. then press: From 69a098691dc5d56416fa5293f4cf91d909b6b058 Mon Sep 17 00:00:00 2001 From: Tonic Date: Sun, 10 Dec 2023 19:26:51 +0100 Subject: [PATCH 10/52] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3e9d52d..06a4e5a 100644 --- a/README.md +++ b/README.md @@ -24,7 +24,7 @@ git clone https://github.com/Tonic-AI/EasyAGI ``` ```bash -cd LablabAutogen +cd EasyAGI ``` If you're on Windows : From cdae2c70131e6ac806d19772f71cda6762d34405 Mon Sep 17 00:00:00 2001 From: Tonic Date: Sun, 10 Dec 2023 19:28:32 +0100 Subject: [PATCH 11/52] Update README.md --- README.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 06a4e5a..67a645b 100644 --- a/README.md +++ b/README.md @@ -38,13 +38,19 @@ then nano app.py ``` -edit line 19 " ```"openai_api_key": "sk-rR5XXXXm", # OpenAI API Key``` with your key +edit line 19 +```python +"openai_api_key": "sk-rR5XXXXm", # OpenAI API Key +``` +with your key and edit lines 25-27 + ```python "host": "your_milvus_host", "port": "your_milvus_port", "collection_name": "your_collection_name" - ``` +``` + with your zilliz cloud credentials. then press: From 2aaa3225daa318397e80272464e65b474aaff12e Mon Sep 17 00:00:00 2001 From: tonic Date: Sun, 10 Dec 2023 19:49:59 +0100 Subject: [PATCH 12/52] remove empty plugins --- plugins/clarifai.py | 0 plugins/seamlessondevice.py | 0 plugins/unstructured.py | 0 3 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 plugins/clarifai.py delete mode 100644 plugins/seamlessondevice.py delete mode 100644 plugins/unstructured.py diff --git a/plugins/clarifai.py b/plugins/clarifai.py deleted file mode 100644 index e69de29..0000000 diff --git a/plugins/seamlessondevice.py b/plugins/seamlessondevice.py deleted file mode 100644 index e69de29..0000000 diff --git a/plugins/unstructured.py b/plugins/unstructured.py deleted file mode 100644 index e69de29..0000000 From 7a8ca3bcfcaefefa522b88b23ad87cafd996de6b Mon Sep 17 00:00:00 2001 From: tonic Date: Sun, 10 Dec 2023 20:49:08 +0100 Subject: [PATCH 13/52] making things work --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 71f8c0e..715f7f8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -21,3 +21,4 @@ microsoft-bing-customwebsearch msrest msrestazure pymilvus +bs4 \ No newline at end of file From b14a7d7f07426cfa687b1e30ec3b7ed0cea9dd5e Mon Sep 17 00:00:00 2001 From: tonic Date: Sun, 10 Dec 2023 20:52:47 +0100 Subject: [PATCH 14/52] Update zillizcloud.py --- plugins/zillizcloud.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/zillizcloud.py b/plugins/zillizcloud.py index 7ddb281..1bb1749 100644 --- a/plugins/zillizcloud.py +++ b/plugins/zillizcloud.py @@ -1,4 +1,5 @@ -from pymilvus import connections, Collection +import milvus +from milvus import Milvus, connections, Collection from typing import List, Dict, Any class ZillizRetrievalAgent: From f263f0c27abfe76068db5b72dd9b1a5d9fe54f6a Mon Sep 17 00:00:00 2001 From: tonic Date: Sun, 10 Dec 2023 21:00:02 +0100 Subject: [PATCH 15/52] Update requirements.txt --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 715f7f8..0fbf0d8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,6 +2,7 @@ beautifulsoup4~=4.12 typing-extensions pydantic gradio +openai azure-common azure-core azure-search-documents From 467bb7e4dcee7310cb3d71820616a73dd9c3dd27 Mon Sep 17 00:00:00 2001 From: tonic Date: Sun, 10 Dec 2023 21:06:19 +0100 Subject: [PATCH 16/52] Update autogen_planner.py --- planning/autogen_planner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/planning/autogen_planner.py b/planning/autogen_planner.py index 7f9fc1f..38aefca 100644 --- a/planning/autogen_planner.py +++ b/planning/autogen_planner.py @@ -1,7 +1,7 @@ from typing import Optional, Dict, List import semantic_kernel, autogen import datetime -from autogen.agentchat.contrib.agent_builder import +from autogen.agentchat.contrib.agent_builder import AgentBuilder class AutoGenPlanner: """ From 4fb38ea9996b6a1081df98eba20d5b1d334fdd6c Mon Sep 17 00:00:00 2001 From: tonic Date: Sun, 10 Dec 2023 21:32:21 +0100 Subject: [PATCH 17/52] added agent_builder --- planning/agent_builder.py | 408 ++++++++++++++++++++++++++++++++++++ planning/autogen_planner.py | 3 +- 2 files changed, 410 insertions(+), 1 deletion(-) create mode 100644 planning/agent_builder.py diff --git a/planning/agent_builder.py b/planning/agent_builder.py new file mode 100644 index 0000000..dbe7576 --- /dev/null +++ b/planning/agent_builder.py @@ -0,0 +1,408 @@ +import autogen +import time +import subprocess as sp +import socket +import os +import json +import hashlib +from typing import Optional, List, Dict, Tuple, Union + + +class AgentBuilder: + """ + AgentBuilder can help user build an automatic task solving process powered by multi-agent system. + Specifically, our building pipeline includes initialize and build. + In build(), we prompt a gpt-4 model to create multiple participant agents, and specify whether + this task need programming to solve. + User can save the built agents' config by calling save(), and load the saved configs by load(), which can skip the + building process. + """ + + openai_server_name = "openai" + max_tokens = 945 + max_agents = 5 # maximum number of agents build manager can create. + + CODING_PROMPT = """Does the following task need programming (i.e., access external API or tool by coding) to solve, + or use program may help the following task become easier? + + TASK: {task} + + Hint: + # Answer only YES or NO. + """ + + AGENT_NAME_PROMPT = """To complete the following task, what positions/jobs should be set to maximize the efficiency? + + TASK: {task} + + Hint: + # Considering the effort, the position in this task should be no more then {max_agents}, less is better. + # Answer the name of those positions/jobs, separated by comma and use "_" instead of space. For example: Product_manager,Programmer + # Only return the list of positions. + """ + + AGENT_SYS_MSG_PROMPT = """Considering the following position and corresponding task: + + TASK: {task} + POSITION: {position} + + Modify the following position requirement, let it more suitable for the above task and position: + + REQUIREMENT: {default_sys_msg} + + Hint: + # The modified requirement should not contain the code interpreter skill. + # Coding skill is limited to Python. + # Your answer should omit the word "REQUIREMENT". + # Your should let them reply "TERMINATE" in the end when the task complete (user's need has been satisfied). + """ + + def __init__( + self, + config_path: Optional[str] = "OAI_CONFIG_LIST", + builder_model: Optional[str] = "gpt-4", + agent_model: Optional[str] = "gpt-4", + host: Optional[str] = "localhost", + endpoint_building_timeout: Optional[int] = 600, + ): + """ + Args: + config_path: path of the OpenAI api configs. + builder_model: specify a model as the backbone of build manager. + host: endpoint host. + endpoint_building_timeout: timeout for building up an endpoint server. + """ + self.host = host + self.builder_model = builder_model + self.agent_model = agent_model + self.config_path = config_path + self.endpoint_building_timeout = endpoint_building_timeout + + self.building_task: str = None + self.agent_configs: List[Dict] = [] + self.open_ports: List[str] = [] + self.agent_procs: Dict[str, Tuple[sp.Popen, str]] = {} + self.agent_procs_assign: Dict[str, Tuple[autogen.ConversableAgent, str]] = {} + self.cached_configs: Dict = {} + + for port in range(8000, 65535): + if self._is_port_open(host, port): + self.open_ports.append(str(port)) + + @staticmethod + def _is_port_open(host, port): + """Check if a tcp port is open.""" + try: + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.settimeout(10) + s.bind((host, int(port))) + s.close() + return True + except OSError: + return False + + def _create_agent( + self, + agent_name: str, + model_name_or_hf_repo: str, + llm_config: dict, + system_message: Optional[str] = autogen.AssistantAgent.DEFAULT_SYSTEM_MESSAGE, + use_oai_assistant: Optional[bool] = False, + world_size: Optional[int] = 1, + ) -> autogen.AssistantAgent: + """ + Create a group chat participant agent. + + If the agent rely on an open-source model, this function will automatically set up an endpoint for that agent. + The API address of that endpoint will be "localhost:{free port}". + + Args: + agent_name: the name that identify the function of the agent (e.g., Coder, Product Manager,...) + model_name_or_hf_repo: + llm_config: specific configs for LLM (e.g., config_list, seed, temperature, ...). + system_message: system prompt use to format an agent's behavior. + use_oai_assistant: use OpenAI assistant api instead of self-constructed agent. + world_size: the max size of parallel tensors (in most of the cases, this is identical to the amount of GPUs). + + Returns: + agent: a set-up agent. + """ + config_list = autogen.config_list_from_json(self.config_path, filter_dict={"model": [model_name_or_hf_repo]}) + if "gpt-" in model_name_or_hf_repo: + server_id = self.openai_server_name + else: + model_name = model_name_or_hf_repo.split("/")[-1] + server_id = f"{model_name}_{self.host}" + if self.agent_procs.get(server_id, None) is None: + while True: + port = self.open_ports.pop() + if self._is_port_open(self.host, port): + break + + # Use vLLM to set up a server with OpenAI API support. + agent_proc = sp.Popen( + [ + "python", + "-m", + "vllm.entrypoints.openai.api_server", + "--host", + f"{self.host}", + "--port", + f"{port}", + "--model", + f"{model_name_or_hf_repo}", + "--tensor-parallel-size", + f"{world_size}", + ], + stdout=sp.PIPE, + stderr=sp.STDOUT, + ) + timeout_start = time.time() + + while True: + server_stdout = agent_proc.stdout.readline() + if server_stdout != b"": + print(server_stdout) + timeout_end = time.time() + if b"running" in server_stdout: + print( + f"Running {model_name_or_hf_repo} on http://{self.host}:{port} " + f"with tensor parallel size {world_size}." + ) + break + elif b"address already in use" in server_stdout: + raise RuntimeError( + f"{self.host}:{port} already in use. Fail to set up the endpoint for " + f"{model_name_or_hf_repo} on {self.host}:{port}." + ) + elif timeout_end - timeout_start > self.endpoint_building_timeout: + raise RuntimeError( + f"Timeout exceed. Fail to set up the endpoint for " + f"{model_name_or_hf_repo} on {self.host}:{port}." + ) + self.agent_procs[server_id] = (agent_proc, port) + else: + port = self.agent_procs[server_id][1] + + config_list[0]["base_url"] = f"http://{self.host}:{port}/v1" + + current_config = llm_config.copy() + current_config.update( + {"config_list": config_list, "model": model_name_or_hf_repo, "max_tokens": self.max_tokens} + ) + if use_oai_assistant: + from autogen.agentchat.contrib.gpt_assistant_agent import GPTAssistantAgent + + agent = GPTAssistantAgent( + name=agent_name, + llm_config={**current_config, "assistant_id": None}, + instructions=system_message, + overwrite_instructions=False, + ) + else: + agent = autogen.AssistantAgent( + name=agent_name, llm_config=current_config.copy(), system_message=system_message + ) + self.agent_procs_assign[agent_name] = (agent, server_id) + return agent + + def clear_agent(self, agent_name: str, recycle_endpoint: Optional[bool] = True): + """ + Clear a specific agent by name. + + Args: + agent_name: the name of agent. + recycle_endpoint: trigger for recycle the endpoint server. If true, the endpoint will be recycled + when there is no agent depending on. + """ + _, server_id = self.agent_procs_assign[agent_name] + del self.agent_procs_assign[agent_name] + if recycle_endpoint: + if server_id == self.openai_server_name: + return + else: + for _, iter_sid in self.agent_procs_assign.values(): + if server_id == iter_sid: + return + self.agent_procs[server_id][0].terminate() + self.open_ports.append(server_id.split("_")[-1]) + print(f"Agent {agent_name} has been cleared.") + + def clear_all_agents(self, recycle_endpoint: Optional[bool] = True): + """ + Clear all cached agents. + """ + for agent_name in [agent_name for agent_name in self.agent_procs_assign.keys()]: + self.clear_agent(agent_name, recycle_endpoint) + print("All agents have been cleared.") + + def build( + self, + building_task: Optional[str] = None, + default_llm_config: Optional[Dict] = None, + coding: Optional[bool] = None, + cached_configs: Optional[Dict] = None, + use_oai_assistant: Optional[bool] = False, + code_execution_config: Optional[Dict] = None, + **kwargs, + ): + """ + Auto build agents based on the building task. + + Args: + building_task: instruction that helps build manager (gpt-4) to decide what agent should be built. + default_llm_config: specific configs for LLM (e.g., config_list, seed, temperature, ...). + coding: use to identify if the user proxy (a code interpreter) should be added. + cached_configs: previously saved agent configs. + use_oai_assistant: use OpenAI assistant api instead of self-constructed agent. + code_execution_config: specific configs for user proxy (e.g., last_n_messages, work_dir, ...). + """ + use_api = False + + if code_execution_config is None: + code_execution_config = { + "last_n_messages": 2, + "work_dir": "groupchat", + "use_docker": False, + "timeout": 60, + } + + if cached_configs is None: + use_api = True + agent_configs = [] + self.building_task = building_task + else: + self.building_task = building_task = cached_configs["building_task"] + default_llm_config = cached_configs["default_llm_config"] + coding = cached_configs["coding"] + agent_configs = cached_configs["agent_configs"] + + if use_api: + config_list = autogen.config_list_from_json(self.config_path, filter_dict={"model": [self.builder_model]}) + build_manager = autogen.OpenAIWrapper(config_list=config_list) + + print("Generating agents...") + resp_agent_name = ( + build_manager.create( + messages=[ + { + "role": "user", + "content": self.AGENT_NAME_PROMPT.format(task=building_task, max_agents=self.max_agents), + } + ] + ) + .choices[0] + .message.content + ) + agent_name_list = resp_agent_name.split(",") + print(f"{resp_agent_name} are generated.") + + agent_sys_msg_list = [] + for name in agent_name_list: + print(f"Preparing configuration for {name}...") + resp_agent_sys_msg = ( + build_manager.create( + messages=[ + { + "role": "user", + "content": self.AGENT_SYS_MSG_PROMPT.format( + task=building_task, + position=name, + default_sys_msg=autogen.AssistantAgent.DEFAULT_SYSTEM_MESSAGE, + ), + } + ] + ) + .choices[0] + .message.content + ) + agent_sys_msg_list.append(resp_agent_sys_msg) + + for i in range(len(agent_name_list)): + agent_configs.append( + {"name": agent_name_list[i], "model": self.agent_model, "system_message": agent_sys_msg_list[i]} + ) + + if coding is None: + resp = ( + build_manager.create( + messages=[{"role": "user", "content": self.CODING_PROMPT.format(task=building_task)}] + ) + .choices[0] + .message.content + ) + coding = True if resp == "YES" else False + + for config in agent_configs: + print(f"Creating agent {config['name']} with backbone {config['model']}...") + self._create_agent( + config["name"], + config["model"], + default_llm_config, + system_message=config["system_message"], + use_oai_assistant=use_oai_assistant, + **kwargs, + ) + agent_list = [agent_config[0] for agent_config in self.agent_procs_assign.values()] + + if coding is True: + print("Adding user console proxy...") + agent_list = [ + autogen.UserProxyAgent( + name="User_console_and_Python_code_interpreter", + is_termination_msg=lambda x: "TERMINATE" in x.get("content"), + system_message="User console with a python code interpreter interface.", + code_execution_config=code_execution_config, + human_input_mode="NEVER", + ) + ] + agent_list + + self.cached_configs.update( + { + "building_task": building_task, + "agent_configs": agent_configs, + "coding": coding, + "default_llm_config": default_llm_config, + "code_execution_config": code_execution_config, + } + ) + + return agent_list, self.cached_configs.copy() + + def save(self, filepath: Optional[str] = None) -> str: + """ + Save building configs. If the filepath is not specific, this function will create a filename by encrypt the + building_task string by md5 with "save_config_" prefix, and save config to the local path. + + Args: + filepath: save path. + + Return: + filepath: path save. + """ + if filepath is None: + filepath = f'./save_config_{hashlib.md5(self.building_task.encode("utf-8")).hexdigest()}.json' + with open(filepath, "w") as save_file: + json.dump(self.cached_configs, save_file, indent=4) + print(f"Building config saved to {filepath}") + + return filepath + + def load( + self, + filepath: str, + **kwargs, + ): + """ + Load building configs and call the build function to complete building without calling online LLMs' api. + + Args: + filepath: filepath for the save config. + """ + try: + print(f"Loding config from {filepath}") + cached_configs = json.load(open(filepath)) + except FileNotFoundError: + raise FileNotFoundError(f"Config file {filepath} does not exist.") + + return self.build(cached_configs=cached_configs, **kwargs) \ No newline at end of file diff --git a/planning/autogen_planner.py b/planning/autogen_planner.py index 38aefca..a57c515 100644 --- a/planning/autogen_planner.py +++ b/planning/autogen_planner.py @@ -1,7 +1,8 @@ from typing import Optional, Dict, List import semantic_kernel, autogen import datetime -from autogen.agentchat.contrib.agent_builder import AgentBuilder +import autogen +from .agent_builder import AgentBuilder class AutoGenPlanner: """ From 2732e3ad571e1725615fad61e5a7e1316be40413 Mon Sep 17 00:00:00 2001 From: tonic Date: Sun, 10 Dec 2023 22:42:57 +0100 Subject: [PATCH 18/52] small updates --- OAI_CONFIG.json | 24 ++++++++++++++++++++ README.md | 44 ++++++++++++++++++++++++++++++------- planning/autogen_planner.py | 6 ++--- 3 files changed, 63 insertions(+), 11 deletions(-) create mode 100644 OAI_CONFIG.json diff --git a/OAI_CONFIG.json b/OAI_CONFIG.json new file mode 100644 index 0000000..63de2dd --- /dev/null +++ b/OAI_CONFIG.json @@ -0,0 +1,24 @@ +[ + { + "model": "gpt-4", + "api_key": "" + }, + { + "model": "gpt-4", + "api_key": "" + }, + { + "model": "gpt-4", + "api_key": "", + "api_base": "", + "api_type": "azure", + "api_version": "2023-07-01-preview" + }, + { + "model": "gpt-3.5-turbo", + "api_key": "", + "api_base": "", + "api_type": "azure", + "api_version": "2023-07-01-preview" + } +] \ No newline at end of file diff --git a/README.md b/README.md index 67a645b..98c79ad 100644 --- a/README.md +++ b/README.md @@ -35,14 +35,43 @@ set PATH=%PATH% then ```bash -nano app.py +nano OAI_CONFIG.json ``` -edit line 19 -```python -"openai_api_key": "sk-rR5XXXXm", # OpenAI API Key +enter your keys into the space provided, eg: +```json + { + "model": "gpt-4", + "api_key": "" + }, + { + "model": "gpt-4", + "api_key": "", + "api_base": "", + "api_type": "azure", + "api_version": "2023-07-01-preview" + } ``` -with your key +with your keys or Azure OpenAI deployments + +then press: + +```nano + control + x +``` + +Write : + +```nano +Y +``` + +to save then run + +```bash +nano app.py +``` + and edit lines 25-27 ```python @@ -58,11 +87,10 @@ then press: ```nano control + x ``` - Write : ```nano -Y +y ``` to save then type : @@ -76,4 +104,4 @@ and finally : ```bash python app.py ``` -to run. +to run. \ No newline at end of file diff --git a/planning/autogen_planner.py b/planning/autogen_planner.py index a57c515..860b02d 100644 --- a/planning/autogen_planner.py +++ b/planning/autogen_planner.py @@ -44,14 +44,14 @@ def execute_code(self, code: str) -> str: output += f"\n{result.error_in_exec}" return output - def create_builder(self) -> autogen.agentchat.contrib.agent_builder.AgentBuilder: + def create_builder(self) -> AgentBuilder: """ Create an instance of AgentBuilder. """ if not self.builder_config_path: raise ValueError("Builder config path is required to create AgentBuilder.") - return autogen.agentchat.contrib.agent_builder.AgentBuilder( - config_path=self.builder_config_path, + return AgentBuilder( + config_path= '', builder_model='gpt-4-1106-preview', agent_model='gpt-4-1106-preview' ) From 16552902e884ebcc70ff5309d50719bed3bfdf84 Mon Sep 17 00:00:00 2001 From: tonic Date: Sun, 10 Dec 2023 22:44:07 +0100 Subject: [PATCH 19/52] rename OAI_CONFIG_LIST --- OAI_CONFIG.json => OAI_CONFIG_LIST.json | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename OAI_CONFIG.json => OAI_CONFIG_LIST.json (100%) diff --git a/OAI_CONFIG.json b/OAI_CONFIG_LIST.json similarity index 100% rename from OAI_CONFIG.json rename to OAI_CONFIG_LIST.json From 8e5c763699a93b4b9d46860d774f8e8ea1fcd899 Mon Sep 17 00:00:00 2001 From: tonic Date: Sun, 10 Dec 2023 22:49:44 +0100 Subject: [PATCH 20/52] updating llm config method --- OAI_CONFIG_LIST.json | 6 +++++- app.py | 23 +++++++++++++++++------ 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/OAI_CONFIG_LIST.json b/OAI_CONFIG_LIST.json index 63de2dd..2c8627c 100644 --- a/OAI_CONFIG_LIST.json +++ b/OAI_CONFIG_LIST.json @@ -4,7 +4,11 @@ "api_key": "" }, { - "model": "gpt-4", + "model": "gpt-4-1106-preview", + "api_key": "" + }, + { + "model": "gpt-3.5-turbo-16k", "api_key": "" }, { diff --git a/app.py b/app.py index e23d5ab..6a39f0e 100644 --- a/app.py +++ b/app.py @@ -14,13 +14,24 @@ # Configure your credentials here bing_api_key = "ArXXXXdpJ" # Replace with your Bing API key -llm_config = { - "type": "openai", # "azure" or "openai" - "openai_api_key": "sk-rR5XXXXm", # OpenAI API Key - "azure_deployment": "", # Azure OpenAI deployment name - "azure_api_key": "", # Azure OpenAI API key in the Azure portal - "azure_endpoint": "" # Endpoint URL for Azure OpenAI, e.g. https://contoso.openai.azure.com/ +# llm_config = { +# "type": "openai", # "azure" or "openai" +# "openai_api_key": "sk-rR5XXXXm", # OpenAI API Key +# "azure_deployment": "", # Azure OpenAI deployment name +# "azure_api_key": "", # Azure OpenAI API key in the Azure portal +# "azure_endpoint": "" # Endpoint URL for Azure OpenAI, e.g. https://contoso.openai.azure.com/ } +llm_config = autogen.config_list_from_json( + env_or_file="OAI_CONFIG_LIST.json", + filter_dict={ + "model": { + "gpt-4", + "gpt-3.5-turbo-16k", + "gpt-4-1106-preview" + } + } +) + Zilliz_config = { "host": "your_milvus_host", "port": "your_milvus_port", From e55daf98395a10cf9ee8b92a2dbaf88d09bd71db Mon Sep 17 00:00:00 2001 From: tonic Date: Sun, 10 Dec 2023 22:55:56 +0100 Subject: [PATCH 21/52] update --- app.py | 11 ++++++++++- planning/agent_builder.py | 2 +- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/app.py b/app.py index 6a39f0e..fbcb9f4 100644 --- a/app.py +++ b/app.py @@ -32,6 +32,15 @@ } ) +builder_config_path = autogen.config_list_from_json( + env_or_file="OAI_CONFIG_LIST.json", + filter_dict={ + "model": { + "gpt-4-1106-preview" + } + } +) + Zilliz_config = { "host": "your_milvus_host", "port": "your_milvus_port", @@ -40,7 +49,7 @@ kernel = semantic_kernel.Kernel() kernel.import_skill(BingPlugin(bing_api_key)) kernel.import_skill(WebPagesPlugin()) -sk_planner = AutoGenPlanner(kernel, llm_config) +sk_planner = AutoGenPlanner(kernel, llm_config, builder_config_path) assistant = sk_planner.create_assistant_agent("Assistant") def get_response(question, max_auto_reply): diff --git a/planning/agent_builder.py b/planning/agent_builder.py index dbe7576..f320dbf 100644 --- a/planning/agent_builder.py +++ b/planning/agent_builder.py @@ -59,7 +59,7 @@ class AgentBuilder: def __init__( self, - config_path: Optional[str] = "OAI_CONFIG_LIST", + config_path: Optional[str] = "OAI_CONFIG_LIST.json", builder_model: Optional[str] = "gpt-4", agent_model: Optional[str] = "gpt-4", host: Optional[str] = "localhost", From a5011a0ab22fec476a5e6e7c33dcce435cb09aa1 Mon Sep 17 00:00:00 2001 From: Tonic Date: Sun, 10 Dec 2023 23:03:55 +0100 Subject: [PATCH 22/52] Update README.md --- README.md | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 98c79ad..ec671cb 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,9 @@ license: mit - sign up and get an api key for open ai - sign up and set up a project in [zilliz cloud](https://cloud.zilliz.com/) -## Use and Install on the Command Line +## Use and Install + +on the command line : ```bash git clone https://github.com/Tonic-AI/EasyAGI @@ -32,13 +34,14 @@ If you're on Windows : ```bash set PATH=%PATH% ``` -then +then edit the OAI_CONFIG_LIST file or on the command line: ```bash -nano OAI_CONFIG.json +nano OAI_CONFIG_LIST.json ``` enter your keys into the space provided, eg: + ```json { "model": "gpt-4", @@ -54,7 +57,7 @@ enter your keys into the space provided, eg: ``` with your keys or Azure OpenAI deployments -then press: +on the command line , press: ```nano control + x @@ -72,7 +75,7 @@ to save then run nano app.py ``` -and edit lines 25-27 +and edit lines 25-27 of app.py ```python "host": "your_milvus_host", @@ -82,7 +85,9 @@ and edit lines 25-27 with your zilliz cloud credentials. -then press: +and line 15 with your Bing! api key then save. + +or if you're on the command line press: ```nano control + x @@ -93,7 +98,9 @@ Write : y ``` -to save then type : +to save. + +then type the following in your command line ```bash pip install -r requirements.txt @@ -104,4 +111,4 @@ and finally : ```bash python app.py ``` -to run. \ No newline at end of file +to run. or install and run the application inside your compiler - like VS Code. From 52fb380d07ee683d8f46ebb91b4024b4ea6cf833 Mon Sep 17 00:00:00 2001 From: Tonic Date: Sun, 10 Dec 2023 23:18:17 +0100 Subject: [PATCH 23/52] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ec671cb..58f5797 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ git clone https://github.com/Tonic-AI/EasyAGI cd EasyAGI ``` -If you're on Windows : +If you're on Windows run the following command and edit the files below using notepad or VSCode and save them accordingly. ```bash set PATH=%PATH% From 33b818983cf5a5f3f8c4d9dc54dae7f950e080a5 Mon Sep 17 00:00:00 2001 From: tonic Date: Sun, 10 Dec 2023 23:44:42 +0100 Subject: [PATCH 24/52] Update app.py --- app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app.py b/app.py index fbcb9f4..fc8d618 100644 --- a/app.py +++ b/app.py @@ -20,7 +20,7 @@ # "azure_deployment": "", # Azure OpenAI deployment name # "azure_api_key": "", # Azure OpenAI API key in the Azure portal # "azure_endpoint": "" # Endpoint URL for Azure OpenAI, e.g. https://contoso.openai.azure.com/ -} +#} llm_config = autogen.config_list_from_json( env_or_file="OAI_CONFIG_LIST.json", filter_dict={ From 3792abb18b2e66a62037a991865695835bd5b0d4 Mon Sep 17 00:00:00 2001 From: tonic Date: Sun, 10 Dec 2023 23:58:39 +0100 Subject: [PATCH 25/52] OAI_CONFIG_LIST <3 --- OAI_CONFIG_LIST.json | 6 ++--- app.py | 14 +++------- planning/autogen_planner.py | 51 +++++++++++++++++-------------------- 3 files changed, 31 insertions(+), 40 deletions(-) diff --git a/OAI_CONFIG_LIST.json b/OAI_CONFIG_LIST.json index 2c8627c..6cfdae4 100644 --- a/OAI_CONFIG_LIST.json +++ b/OAI_CONFIG_LIST.json @@ -1,15 +1,15 @@ [ { "model": "gpt-4", - "api_key": "" + "api_key": "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" }, { "model": "gpt-4-1106-preview", - "api_key": "" + "api_key": "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" }, { "model": "gpt-3.5-turbo-16k", - "api_key": "" + "api_key": "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" }, { "model": "gpt-4", diff --git a/app.py b/app.py index fc8d618..d534d2a 100644 --- a/app.py +++ b/app.py @@ -23,13 +23,7 @@ #} llm_config = autogen.config_list_from_json( env_or_file="OAI_CONFIG_LIST.json", - filter_dict={ - "model": { - "gpt-4", - "gpt-3.5-turbo-16k", - "gpt-4-1106-preview" - } - } + filter_dict={"model": {"gpt-4", "gpt-3.5-turbo-16k", "gpt-4-1106-preview"}} ) builder_config_path = autogen.config_list_from_json( @@ -42,9 +36,9 @@ ) Zilliz_config = { - "host": "your_milvus_host", - "port": "your_milvus_port", - "collection_name": "your_collection_name" + "host": "your_milvus_host", # use Zilliz Cloud + "port": "your_milvus_port", # use Zilliz Cloud + "collection_name": "your_collection_name" # use Zilliz Cloud } kernel = semantic_kernel.Kernel() kernel.import_skill(BingPlugin(bing_api_key)) diff --git a/planning/autogen_planner.py b/planning/autogen_planner.py index 860b02d..994270f 100644 --- a/planning/autogen_planner.py +++ b/planning/autogen_planner.py @@ -23,7 +23,7 @@ def __init__(self, kernel: semantic_kernel.Kernel, llm_config: Dict = None, buil self.kernel = kernel self.llm_config = llm_config or {} self.builder_config_path = builder_config_path - self.validate_llm_config() +# self.validate_llm_config() self.builder = self.create_builder() self.Zilliz_agent = ZillizRetrievalAgent(**zilliz_config) if zilliz_config else None @@ -87,16 +87,16 @@ def perform_retrieval(self, query_vector: List[float], top_k: int = 10): raise ValueError("Zilliz agent is not configured.") return self.Zilliz_agent.search([query_vector], top_k, {"nprobe": 16}) - def validate_llm_config(self): - if self.llm_config.get("type") == "openai": - if not self.llm_config.get("openai_api_key"): - raise ValueError("OpenAI API key is required for OpenAI LLM.") - elif self.llm_config.get("type") == "azure": - required_keys = ["azure_api_key", "azure_deployment", "azure_endpoint"] - if any(key not in self.llm_config for key in required_keys): - raise ValueError("Azure OpenAI API configuration is incomplete.") - else: - raise ValueError("LLM type not provided, must be 'openai' or 'azure'.") +# def validate_llm_config(self): +# if self.llm_config.get("type") == "openai": +# if not self.llm_config.get("openai_api_key"): +# raise ValueError("OpenAI API key is required for OpenAI LLM.") +# elif self.llm_config.get("type") == "azure": +# required_keys = ["azure_api_key", "azure_deployment", "azure_endpoint"] +# if any(key not in self.llm_config for key in required_keys): +# raise ValueError("Azure OpenAI API configuration is incomplete.") +# else: +# raise ValueError("LLM type not provided, must be 'openai' or 'azure'.") def update_llm_config(self, new_config: Dict): self.llm_config = new_config @@ -116,22 +116,19 @@ def load_semantic_kernel_plugins(self, plugins: List[str]): print(f"Error loading plugin '{plugin}': {e}") def __get_autogen_config(self) -> Dict: - if self.llm_config["type"] == "openai": - return { - "functions": self.__get_function_definitions(), - "config_list": [{"model": "gpt-3.5-turbo", "api_key": self.llm_config["openai_api_key"]}] - } - elif self.llm_config["type"] == "azure": - return { - "functions": self.__get_function_definitions(), - "config_list": [{ - "model": self.llm_config["azure_deployment"], - "api_type": "azure", - "api_key": self.llm_config["azure_api_key"], - "api_base": self.llm_config["azure_endpoint"], - "api_version": "2023-08-01-preview" - }] - } + + for config in self.llm_config: + if config.get("type") == "openai": + return { + "functions": self.__get_function_definitions(), + "config_list": [config] + } + elif config.get("type") == "azure": + return { + "functions": self.__get_function_definitions(), + "config_list": [config] + } + raise ValueError("No suitable LLM configuration found.") def __get_function_definitions(self) -> List: functions = [] From 543d37b0bc343caa92b18732aac641547e9c6f40 Mon Sep 17 00:00:00 2001 From: tonic Date: Mon, 11 Dec 2023 00:06:34 +0100 Subject: [PATCH 26/52] update config --- app.py | 4 ++-- planning/autogen_planner.py | 13 +++++-------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/app.py b/app.py index d534d2a..dc8c661 100644 --- a/app.py +++ b/app.py @@ -43,9 +43,9 @@ kernel = semantic_kernel.Kernel() kernel.import_skill(BingPlugin(bing_api_key)) kernel.import_skill(WebPagesPlugin()) -sk_planner = AutoGenPlanner(kernel, llm_config, builder_config_path) -assistant = sk_planner.create_assistant_agent("Assistant") +sk_planner = AutoGenPlanner(kernel, llm_config) +assistant = sk_planner.create_assistant_agent("Assistant") def get_response(question, max_auto_reply): worker = sk_planner.create_user_agent("Worker", max_auto_reply=max_auto_reply, human_input="NEVER") assistant = sk_planner.create_assistant_agent("Assistant") diff --git a/planning/autogen_planner.py b/planning/autogen_planner.py index 994270f..6c91396 100644 --- a/planning/autogen_planner.py +++ b/planning/autogen_planner.py @@ -117,18 +117,15 @@ def load_semantic_kernel_plugins(self, plugins: List[str]): def __get_autogen_config(self) -> Dict: + desired_model = "gpt-4-1106-preview" # Update this with your choice of model + for config in self.llm_config: - if config.get("type") == "openai": - return { - "functions": self.__get_function_definitions(), - "config_list": [config] - } - elif config.get("type") == "azure": + if config.get("model") == desired_model: return { "functions": self.__get_function_definitions(), - "config_list": [config] + "config_list": [config] } - raise ValueError("No suitable LLM configuration found.") + raise ValueError(f"No suitable LLM configuration found for model '{desired_model}'.") def __get_function_definitions(self) -> List: functions = [] From 9b9969792ccd59fb9c78c65949deaccc95b048d2 Mon Sep 17 00:00:00 2001 From: tonic Date: Mon, 11 Dec 2023 00:08:05 +0100 Subject: [PATCH 27/52] Update app.py --- app.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/app.py b/app.py index dc8c661..db1f4b6 100644 --- a/app.py +++ b/app.py @@ -28,11 +28,7 @@ builder_config_path = autogen.config_list_from_json( env_or_file="OAI_CONFIG_LIST.json", - filter_dict={ - "model": { - "gpt-4-1106-preview" - } - } + filter_dict={"model": {"gpt-4-1106-preview"}} ) Zilliz_config = { @@ -43,7 +39,7 @@ kernel = semantic_kernel.Kernel() kernel.import_skill(BingPlugin(bing_api_key)) kernel.import_skill(WebPagesPlugin()) -sk_planner = AutoGenPlanner(kernel, llm_config) +sk_planner = AutoGenPlanner(kernel, llm_config, builder_config_path) assistant = sk_planner.create_assistant_agent("Assistant") def get_response(question, max_auto_reply): From 7803a7f5e40972e8b365af9db4582bd4d21cd106 Mon Sep 17 00:00:00 2001 From: Tonic Date: Mon, 11 Dec 2023 01:27:34 +0100 Subject: [PATCH 28/52] Update OAI_CONFIG_LIST.json --- OAI_CONFIG_LIST.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/OAI_CONFIG_LIST.json b/OAI_CONFIG_LIST.json index 6cfdae4..eb26a90 100644 --- a/OAI_CONFIG_LIST.json +++ b/OAI_CONFIG_LIST.json @@ -1,15 +1,15 @@ [ { "model": "gpt-4", - "api_key": "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" + "api_key": "sk-...7HG" }, { "model": "gpt-4-1106-preview", - "api_key": "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" + "api_key": "sk-...7HG" }, { "model": "gpt-3.5-turbo-16k", - "api_key": "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" + "api_key": "sk-...7HG" }, { "model": "gpt-4", @@ -25,4 +25,4 @@ "api_type": "azure", "api_version": "2023-07-01-preview" } -] \ No newline at end of file +] From 2398b06cbe62abc96f8378cb50529e91c61afda9 Mon Sep 17 00:00:00 2001 From: yikes <111034762+twilwa@users.noreply.github.com> Date: Sun, 10 Dec 2023 20:46:50 -0800 Subject: [PATCH 29/52] add convo for aider to read --- .gitignore | 1 + convo.md | 445 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 446 insertions(+) create mode 100644 convo.md diff --git a/.gitignore b/.gitignore index d9005f2..03d0190 100644 --- a/.gitignore +++ b/.gitignore @@ -150,3 +150,4 @@ cython_debug/ # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ +.aider* diff --git a/convo.md b/convo.md new file mode 100644 index 0000000..2645d37 --- /dev/null +++ b/convo.md @@ -0,0 +1,445 @@ + +![User](https://s.gravatar.com/avatar/f215a1e34f8fd77a82ac38b4c81a0745?s=480&r=pg&d=https%3A%2F%2Fcdn.auth0.com%2Favatars%2Fdd.png) + +You + +You'll probably need to run a search for this one. Would there be any utility to making a semantic kernel plugin that calls the Gorilla CLI? + +ChatGPT + +ChatGPT + +Creating a semantic kernel plugin that calls the Gorilla CLI could be quite beneficial, especially considering the distinct functionalities and advantages offered by each. + +Gorilla CLI is a tool designed to enhance command-line interactions. It generates potential commands for execution based on user objectives, supporting a wide range of APIs like Kubernetes, AWS, GCP, Azure, GitHub, and many others. This user-centric approach simplifies command-line operations, eliminating the need to remember complex CLI arguments​[](https://github.com/gorilla-llm/gorilla-cli)​. + +On the other hand, Semantic Kernel, with its plugin architecture, serves as a foundation for AI applications, allowing integration with services like ChatGPT, Bing, and Microsoft 365. Plugins in Semantic Kernel can consist of both native code and AI service requests through semantic functions. This structure allows for the encapsulation of capabilities into a single unit, facilitating their use by the kernel. For developers, this means that plugins created for Semantic Kernel can be exported and made usable in other platforms like ChatGPT, enhancing the reach of AI capabilities without rewriting code​[](https://learn.microsoft.com/en-us/semantic-kernel/ai-orchestration/plugins/)​. + +Given these aspects, a semantic kernel plugin that calls Gorilla CLI could leverage the strengths of both platforms. It would enable the creation of sophisticated command-line tools that are enhanced by AI capabilities. For instance, such a plugin could use AI to interpret and simplify complex command sequences or integrate advanced data processing and analysis within command-line operations. + +Furthermore, the semantic kernel's capability to integrate with external data sources and the facility to create and use templatized prompts can add a layer of dynamic functionality to the command-line operations managed by Gorilla CLI. This could result in more efficient, context-aware, and adaptable CLI tools​[](https://techcommunity.microsoft.com/t5/azure-developer-community-blog/semantic-kernel-powered-openai-plugin-development-lifecycle/ba-p/3967751)​. + +Overall, combining the user-centric command generation of Gorilla CLI with the AI-enhanced, plugin-driven architecture of Semantic Kernel can lead to the development of more powerful, intuitive, and versatile tools for various command-line and data processing tasks. + +![User](https://s.gravatar.com/avatar/f215a1e34f8fd77a82ac38b4c81a0745?s=480&r=pg&d=https%3A%2F%2Fcdn.auth0.com%2Favatars%2Fdd.png) + +You + +Alright. Let's say I wanted to write a semantic kernel plugin that allowed for the usage of the Gorilla CLI during kernel operations, how would I go about that? + +ChatGPT + +ChatGPT + +Writing a semantic kernel plugin that integrates the Gorilla CLI into kernel operations involves a few key steps. Here's a high-level overview of the process you might follow: + +1. **Understand Gorilla CLI and Semantic Kernel**: Firstly, ensure you have a good understanding of both Gorilla CLI and the Semantic Kernel. Gorilla CLI enhances command-line interactions by generating potential commands based on user inputs, while Semantic Kernel allows for the integration of AI capabilities into various applications and services. + +2. **Design the Plugin's Functionality**: Decide on the specific functionalities you want your plugin to have. This could include determining how Gorilla CLI's command generation capabilities will be utilized within the Semantic Kernel environment. For example, the plugin could enable AI-enhanced command-line operations, facilitate data processing, or automate certain tasks using Gorilla CLI commands. + +3. **Familiarize with Plugin Standards**: As per the documentation from Microsoft Learn, Semantic Kernel has adopted the OpenAI plugin specification as the standard. This ensures interoperability of plugins across various AI applications and services. Understanding this specification is crucial for creating a plugin that is compatible with Semantic Kernel​[](https://learn.microsoft.com/en-us/semantic-kernel/ai-orchestration/plugins/)​. + +4. **Create Semantic and Native Functions**: Semantic Kernel plugins can have two types of functions: semantic and native. Semantic functions are more about AI-driven capabilities, like natural language processing, while native functions involve direct code execution, such as running a Gorilla CLI command. Depending on your plugin's requirements, you may need to implement both types of functions. + +5. **Templatize Prompts for Scalability**: If your plugin will use AI services, consider templatizing prompts. This approach makes the plugin more scalable and maintainable, allowing dynamic generation of prompts based on variables and enabling nested function calls within a semantic function​[](https://techcommunity.microsoft.com/t5/azure-developer-community-blog/semantic-kernel-powered-openai-plugin-development-lifecycle/ba-p/3967751)​. + +6. **Develop and Test the Plugin**: Develop your plugin according to the OpenAI plugin standards and test it thoroughly. Ensure that it integrates well with Semantic Kernel and that it can effectively call and utilize Gorilla CLI commands within its operations. + +7. **Document and Share**: Once your plugin is developed and tested, document its functionalities, usage, and any limitations. Consider sharing it on platforms like GitHub, and ensure it adheres to the relevant licensing requirements, like the Apache 2.0 license under which Gorilla CLI operates​[](https://github.com/gorilla-llm/gorilla-cli)​. + +8. **Feedback and Iteration**: Be prepared to receive feedback from users and iterate on your plugin. The AI and CLI fields are rapidly evolving, so staying updated with the latest developments and user needs is crucial for maintaining an effective and relevant tool. + + +Remember, these steps provide a general guideline. The specifics can vary based on your exact requirements and the functionalities you wish to implement in your plugin. + +![User](https://s.gravatar.com/avatar/f215a1e34f8fd77a82ac38b4c81a0745?s=480&r=pg&d=https%3A%2F%2Fcdn.auth0.com%2Favatars%2Fdd.png) + +You + +Sure. Let's have your act as though you're the lead developer on a hackathon project, the goal of which is to develop a useful plugin for Semantic Kernel that makes use of the Gorilla CLI. Develop a step by step plan to implement this plugin, so we can relay it to your juniors. + +ChatGPT + +ChatGPT + +As the lead developer on this hackathon project, our goal is to develop a Semantic Kernel plugin that integrates the Gorilla CLI. Here's a step-by-step plan to implement this plugin: + +### 1. Team Briefing and Role Assignment + +- **Objective Explanation**: Brief the team about the project's goal: creating a plugin that combines the AI-enhanced capabilities of Semantic Kernel with the command-line efficiency of Gorilla CLI. +- **Role Distribution**: Assign roles based on expertise, such as development, testing, documentation, and integration. + +### 2. Requirement Analysis + +- **Identify Use Cases**: Determine specific scenarios where the plugin will be useful, like automating repetitive CLI tasks or enhancing command-line operations with AI. +- **Gather Requirements**: List the functionalities needed for these use cases, considering both AI capabilities and command-line operations. + +### 3. Understanding Gorilla CLI and Semantic Kernel + +- **Research**: Familiarize the team with Gorilla CLI's functionalities and how it simplifies CLI interactions. +- **Study Semantic Kernel**: Understand the architecture of Semantic Kernel, focusing on plugin development and integration standards. + +### 4. Plugin Design + +- **Define Functionalities**: Decide what the plugin will do. For instance, it could translate natural language into CLI commands using Gorilla CLI. +- **Sketch Workflow**: Create a high-level workflow of how the plugin will operate within the Semantic Kernel environment. + +### 5. Development Environment Setup + +- **Tools and Technologies**: Set up development tools, including necessary libraries and access to Gorilla CLI and Semantic Kernel platforms. +- **Version Control**: Establish a Git repository for collaborative development and version control. + +### 6. Plugin Development + +- **Create Semantic Functions**: Develop functions for AI-related tasks, such as interpreting user input or processing command outputs. +- **Develop Native Functions**: Implement native functions that will call Gorilla CLI commands. +- **Integration**: Ensure that the semantic and native functions work cohesively. + +### 7. Templating and AI Integration + +- **Templatize Prompts**: Implement templated prompts for scalability and dynamic functionality. +- **AI Service Connection**: Connect the AI services of Semantic Kernel to the plugin, ensuring that the AI can interpret and respond to user inputs effectively. + +### 8. Testing and Iteration + +- **Unit Testing**: Perform thorough unit tests for each function. +- **Integration Testing**: Test the plugin in an integrated environment to ensure it works seamlessly within Semantic Kernel and with Gorilla CLI. +- **Feedback Loop**: Use team feedback to iterate and improve the plugin. + +### 9. Documentation + +- **User Guide**: Create a detailed user guide explaining how to install and use the plugin. +- **Developer Documentation**: Document the code and architecture for future development and maintenance. + +### 10. Demonstration and Feedback + +- **Internal Demo**: Conduct a demonstration within the team to gather initial feedback and make necessary adjustments. +- **Hackathon Presentation**: Prepare for the final presentation at the hackathon, highlighting key features and potential use cases. + +### 11. Submission and Post-Hackathon Plan + +- **Finalize Submission**: Ensure all components are polished and submit the project. +- **Post-Hackathon Roadmap**: Discuss potential future improvements or additional features based on the feedback received during the hackathon. + +### Regular Check-Ins + +Throughout the hackathon, hold regular check-ins to monitor progress, address challenges, and ensure the project stays on track. + +This plan provides a structured approach for developing the plugin, ensuring that we cover all necessary aspects from conception to completion. Remember, effective communication and collaboration within the team are key to the success of this project. + +![User](https://s.gravatar.com/avatar/f215a1e34f8fd77a82ac38b4c81a0745?s=480&r=pg&d=https%3A%2F%2Fcdn.auth0.com%2Favatars%2Fdd.png) + +You + +Great. Let's start stepping through that process. The team understands the goal, and has roles assigned already. Let's begin with requirement analysis and create a design architecture, specification, or prd, whichever's the most appropriate to begin with. + +ChatGPT + +ChatGPT + +Great, let's move forward with the requirement analysis and the creation of a design architecture or Product Requirement Document (PRD) for our Semantic Kernel plugin that integrates with Gorilla CLI. Here's how we can approach this: + +### Requirement Analysis + +1. **Identify Target Users and Use Cases** + + - Determine who will use the plugin (e.g., developers, system administrators). + - Identify specific use cases, such as automating complex CLI tasks, simplifying command generation, or integrating AI-driven insights into command-line operations. +2. **Gather Functional Requirements** + + - List the key functionalities based on the use cases. For example: + - Natural language interpretation to CLI commands. + - AI-enhanced assistance for command syntax and options. + - Automatic generation of command scripts based on user goals. +3. **Non-Functional Requirements** + + - Consider performance criteria (e.g., response time, scalability). + - Ensure security and privacy compliance, particularly if dealing with sensitive data. + - User experience aspects, like ease of installation and use. +4. **Integration Requirements** + + - How the plugin will integrate with existing systems and workflows. + - Compatibility with different operating systems and environments. + +### Design Architecture/Specification + +1. **High-Level Architecture** + + - Outline the plugin's architecture, showing how it fits into the Semantic Kernel ecosystem and interacts with Gorilla CLI. + - Include diagrams for visual representation. +2. **Data Flow Diagrams** + + - Illustrate how data flows through the system, from user input to the generation of CLI commands and their execution. +3. **Component Design** + + - Break down the system into smaller components (e.g., input parser, command generator, execution manager). + - Define the responsibilities and interfaces for each component. +4. **Technology Stack** + + - Specify the programming languages, frameworks, and tools that will be used. + +### PRD (Product Requirement Document) + +1. **Purpose and Scope** + + - Define the purpose of the plugin and its scope within the Semantic Kernel environment. +2. **Detailed Functional Requirements** + + - Elaborate on the functionalities identified during the requirement analysis. + - Include use case scenarios and user stories. +3. **User Interface and Interaction Flow** + + - Sketch out the user interface, if applicable. + - Describe the interaction flow from the user's perspective. +4. **Performance Metrics** + + - Define the key performance indicators (KPIs) and success metrics. +5. **Security and Compliance** + + - Detail any security measures and compliance standards the plugin adheres to. +6. **Timeline and Milestones** + + - Provide a tentative timeline for development, testing, and release. + - Set milestones for major phases of the project. +7. **Risk Assessment** + + - Identify potential risks and mitigation strategies. +8. **Appendices** + + - Include any additional resources, references, or technical specifications. + +### Next Steps + +- **Validation**: Review the PRD with key stakeholders (e.g., potential users, team members) for feedback and validation. +- **Refinement**: Refine the PRD based on feedback received. +- **Approval**: Get the final sign-off from the team or project sponsor. + +Once we have a solid PRD in place, we can move forward with confidence into the development phase, knowing that our objectives, requirements, and plan are clearly outlined and agreed upon. + +![User](https://s.gravatar.com/avatar/f215a1e34f8fd77a82ac38b4c81a0745?s=480&r=pg&d=https%3A%2F%2Fcdn.auth0.com%2Favatars%2Fdd.png) + +You + +Alright. If you can get started stepping through the creation of this PRD, i'll be available to conference with the team and provide feedback as needed. + +ChatGPT + +ChatGPT + +Excellent! Let's start developing the Product Requirement Document (PRD) for our Semantic Kernel plugin that integrates Gorilla CLI. + +### Product Requirement Document (PRD) for Semantic Kernel Plugin with Gorilla CLI + +#### 1. Purpose and Scope + +- **Objective**: Develop a plugin for Semantic Kernel that utilizes Gorilla CLI to enhance command-line operations with AI capabilities. +- **Scope**: The plugin will be used within Semantic Kernel environments, offering intuitive command generation and execution for various CLI tasks. + +#### 2. Target Users and Use Cases + +- **Target Users**: Developers, system administrators, and IT professionals. +- **Use Cases**: + 1. Simplifying complex command sequences into straightforward tasks. + 2. Automating repetitive CLI tasks using AI-driven insights. + 3. Enhancing command-line debugging with AI-generated suggestions. + +#### 3. Functional Requirements + +- **Command Interpretation**: The plugin should interpret natural language inputs and translate them into appropriate CLI commands. +- **AI-Assisted Command Creation**: Generate CLI commands or scripts based on user goals or descriptions. +- **User Interaction**: Interactive CLI interface that allows users to refine or edit AI-generated commands before execution. + +#### 4. Non-Functional Requirements + +- **Performance**: Response time should be within a few seconds for command generation. +- **Scalability**: Capable of handling a large number of requests simultaneously. +- **Security**: Ensure secure handling of commands, especially those that might affect system configurations or sensitive data. + +#### 5. Integration Requirements + +- **Semantic Kernel Integration**: Must seamlessly integrate with existing Semantic Kernel plugins and workflows. +- **Cross-Platform Compatibility**: Ensure compatibility with major operating systems like Windows, Linux, and macOS. + +#### 6. System Architecture + +- **High-Level Design**: Overview of the plugin architecture showing interaction between Semantic Kernel and Gorilla CLI. +- **Component Design**: Detailed description of each component, such as input parser, AI engine, and command execution manager. + +#### 7. User Interface and Interaction + +- **UI Sketch**: If applicable, include mockups of any user interface elements. +- **Interaction Flow**: Step-by-step flow of how users will interact with the plugin, from input to command execution. + +#### 8. Performance Metrics + +- **Success Metrics**: Define KPIs such as user adoption rate, accuracy of command generation, and user satisfaction. + +#### 9. Security and Compliance + +- **Data Handling**: Describe how user inputs and command outputs are handled securely. +- **Compliance Standards**: Adherence to relevant security standards and practices. + +#### 10. Development Timeline and Milestones + +- **Phase 1**: Requirement gathering and validation (Duration: 2 weeks). +- **Phase 2**: Design and architecture setup (Duration: 3 weeks). +- **Phase 3**: Development and unit testing (Duration: 6 weeks). +- **Phase 4**: Integration testing and bug fixing (Duration: 4 weeks). +- **Phase 5**: User acceptance testing and deployment (Duration: 3 weeks). + +#### 11. Risk Assessment and Mitigation + +- **Risk Identification**: Possible risks such as delays in development, unanticipated technical challenges, or integration issues. +- **Mitigation Strategies**: Contingency plans, such as allocating additional resources or having fallback options for critical components. + +#### 12. Appendices + +- **Additional Resources**: Links to relevant documentation, APIs, and tools. +- **Technical Specifications**: Detailed technical requirements and specifications. + +--- + +This draft PRD provides a structured framework for our plugin development. You can review it with the team, and I'll be here to update the document based on your feedback and inputs. Once we finalize the PRD, it will serve as a guiding document throughout the development process. + +![User](https://s.gravatar.com/avatar/f215a1e34f8fd77a82ac38b4c81a0745?s=480&r=pg&d=https%3A%2F%2Fcdn.auth0.com%2Favatars%2Fdd.png) + +You + +got it. Use cases look okay, i'm a little fuzzy on them though. My understanding is that semantic kernel is a package designed to perform various operations on and with llms, and the gorilla cli uses llms to translate natural language queries into command line executions. I don't have a clear picture of a specific use case where i would use them both at the same time, instead of one or the other as we intend, i.e. a clear usage situation of the plugin we're designing. reqs look fine, gorilla hits #1 by itself i think, it also prompts the user to confirm prior to execution. what's the intended difference between command interpretation and assisted command creation? not too concerned with the non-functionals, considering it's a hackathon project, goal is to just make the thing work and worry about testing and scalability later. integration we're looking for is to make a semantic kernel plugin that works the semantic kernel python sdk specifically, the rest can wait. UI will probably just be the terminal, or whatever standard interface you might operate a semantic kernel plugin via. not worried about compliance, hackathon project as mentioned above. timeline should be much shorter -- we have less than 24 hours, and a largely-functional series of semantic kernel plugins currently operating within an autogen multi-agent framework. not worried about risks or appendicies at the moment, short time frame, hackathon. + +ChatGPT + +ChatGPT + +Given your clarifications, let's refine our approach focusing on the specifics of your hackathon project and the integration of Semantic Kernel with Gorilla CLI. + +### Refined Use Cases + +Understanding that Semantic Kernel is primarily about working with LLMs and Gorilla CLI translates natural language into command-line executions, here are more targeted use cases for our plugin: + +1. **Natural Language Command Generation for Data Processing Tasks**: Users dealing with large datasets often use complex command-line operations for data manipulation. Our plugin could interpret natural language requests to generate these complex commands, leveraging both the LLM understanding of Semantic Kernel and the command-line efficiency of Gorilla CLI. + +2. **AI-Assisted Debugging and Optimization**: For developers working on optimizing scripts or debugging, our plugin can suggest command-line tweaks or optimizations based on natural language descriptions of their goals or problems, combining AI insights with practical CLI solutions. + +3. **Automating Routine Tasks with AI Insights**: System administrators and developers often have routine tasks that could be automated. Our plugin could interpret natural language descriptions of these tasks, generating scripts or command sequences to automate them. + + +### Functional Requirements Clarification + +- **Command Interpretation vs Assisted Command Creation**: + - **Command Interpretation** refers to the plugin's ability to understand user inputs in natural language and map them to relevant CLI commands, which is a primary function of Gorilla CLI. + - **Assisted Command Creation** goes a step further, where the plugin not only interprets the command but also assists in refining or optimizing it based on the context provided. This is where the Semantic Kernel's LLM integration adds value, by understanding the broader context or intent behind the command. + +### Simplified Project Scope + +- **Integration Focus**: Concentrate on creating a Semantic Kernel plugin compatible with the Semantic Kernel Python SDK, enabling the integration with Gorilla CLI. +- **UI and Interaction**: Since it's a hackathon project with time constraints, we'll stick with a standard terminal or command-line interface, which is typical for Semantic Kernel plugins. +- **Timeline Adjustment**: Acknowledging the time constraint (less than 24 hours), the project's focus will be on developing a basic functional prototype. This involves: + - Quickly setting up the development environment. + - Developing a minimal viable plugin that demonstrates the key integration between Semantic Kernel and Gorilla CLI. + - Basic testing to ensure the plugin's core functionality works as intended. + +### Next Steps + +- **Rapid Development**: With the shortened timeline, we'll prioritize developing the core functionalities – primarily the integration of Semantic Kernel with Gorilla CLI to interpret and refine CLI commands. +- **Collaborative Coding**: Given the time constraint, parallel development and immediate integration of components will be crucial. Regular sync-ups, even if brief, will help keep the team aligned. +- **Demonstration Prep**: Prepare a simple demonstration that showcases the plugin's ability to interpret natural language and generate/assist in creating command-line instructions. + +Remember, the key to success in a hackathon is often to create a proof of concept that demonstrates the innovative integration or use of technology, rather than a fully polished product. The focus should be on showcasing the unique capabilities that arise from combining Semantic Kernel's LLM functionalities with Gorilla CLI's command-line efficiency. + +![User](https://s.gravatar.com/avatar/f215a1e34f8fd77a82ac38b4c81a0745?s=480&r=pg&d=https%3A%2F%2Fcdn.auth0.com%2Favatars%2Fdd.png) + +You + +Now I'm getting it a bit better. Let's start outline something that lets us start actually writing code, we want to hit the gas on this asap. So rather than a single user input that generates one command line execution, like gorilla, what it sounds like the idea is would be to wrap gorilla in a semantic function and provide that interface to the CLI/an sk 'agent', if they have those, such that the user can give a high level overview or general goal and get a full script/series of executions, then execute them, provide feedback to the cli tool, and then get the next series, is that kind of the idea? + +ChatGPT + +ChatGPT + +Yes, your understanding is on point. Wrapping Gorilla CLI in a semantic function and providing it to a Semantic Kernel agent or CLI interface for script generation and execution is indeed a promising approach. This would elevate the capabilities from single command generation to creating a series of command executions based on a high-level overview or goal. Here's how we can start outlining the development process: + +### Development Outline for Semantic Kernel Plugin with Gorilla CLI Integration + +#### 1. **Define the Semantic Function** + +- **Functionality**: The semantic function should take a high-level user goal or overview as input and translate it into a script or a series of executable commands using Gorilla CLI. +- **Input Processing**: This involves parsing the user input to understand the intent and context, which will then be fed into Gorilla CLI for command generation. + +#### 2. **Gorilla CLI Integration** + +- **Command Generation**: Utilize Gorilla CLI's capabilities to generate relevant command-line instructions based on the processed input. +- **Script Creation**: If the user's goal involves multiple steps, the function should be able to create a script that encapsulates these steps. + +#### 3. **Execution and Feedback Loop** + +- **Execution Interface**: Provide an interface for the user to review and execute the generated commands or script. +- **Feedback Mechanism**: After execution, allow the user to provide feedback, which can be used to refine the command generation process. + +#### 4. **Setting Up the Development Environment** + +- **Tools and Dependencies**: Set up a development environment with access to Semantic Kernel SDK, Gorilla CLI, and any other necessary tools or libraries. +- **Collaborative Tools**: Since we're on a tight deadline, ensure the team has access to collaborative tools for code sharing and communication. + +#### 5. **Rapid Prototyping** + +- **Core Functionality First**: Focus on building the core functionality – the semantic function that integrates Gorilla CLI and generates the command script. +- **Iterative Development**: Adopt an iterative approach, starting with a basic version and enhancing it based on initial testing and feedback. + +#### 6. **Basic Testing and Refinement** + +- **Initial Testing**: Test the basic functionality to ensure that the semantic function correctly interprets the input and generates viable commands. +- **Refinement**: Based on initial tests, refine the command generation logic and user interaction flow. + +#### 7. **Preparation for Demonstration** + +- **Demo Script**: Prepare a brief script that demonstrates the plugin's capabilities. +- **Presentation Materials**: Create any necessary presentation materials to explain the functionality and potential use cases. + +#### 8. **Hackathon Presentation** + +- **Showcasing the Plugin**: Demonstrate how the plugin takes a high-level goal and translates it into a series of CLI commands or a script. +- **Explaining the Integration**: Highlight how the integration of Semantic Kernel and Gorilla CLI offers enhanced capabilities over using them separately. + +#### 9. **Code Sharing and Version Control** + +- Ensure that all code is committed to a shared repository for easy access and collaboration among team members. + +### Next Steps + +- **Begin Coding**: Start with setting up the semantic function and integrating Gorilla CLI. +- **Regular Sync-Ups**: Have brief, regular meetings to synchronize the team’s progress and tackle any emerging challenges. + +This outline focuses on getting the core functionality up and running as soon as possible, given the time constraints of a hackathon. It ensures that we have a working prototype that demonstrates the innovative integration of Semantic Kernel and Gorilla CLI. + +![User](https://s.gravatar.com/avatar/f215a1e34f8fd77a82ac38b4c81a0745?s=480&r=pg&d=https%3A%2F%2Fcdn.auth0.com%2Favatars%2Fdd.png) + +You + +We have dev envs and collab tools already set up -- let's get started with some code blocks and i'll plug them into the environment to start testing/iterating on. + +ChatGPT + +ChatGPT + +Great! Let's begin by drafting a basic Python script that outlines the core functionality of our Semantic Kernel plugin integrating Gorilla CLI. The script will follow these steps: + +1. **Input Processing**: Receive a high-level description or goal from the user. +2. **Command Generation**: Use Gorilla CLI capabilities to translate the description into a series of CLI commands. +3. **Execution and Feedback**: Provide an interface for executing the commands and capturing user feedback. + +### Python Script Skeleton + +pythonCopy code + +`# Import necessary libraries # Assuming Gorilla CLI and Semantic Kernel SDK are installed and accessible def process_user_input(input_description): """ Process the high-level user input to a format suitable for Gorilla CLI. (This function might involve natural language processing or simple parsing) """ # Process input processed_input = input_description # Simplified for illustration return processed_input def generate_cli_commands(processed_input): """ Use Gorilla CLI to generate a series of CLI commands based on the processed input. (This function would interface with Gorilla CLI) """ # For the sake of example, let's assume we have a function in Gorilla CLI that does this. # In reality, you would replace this with actual Gorilla CLI function calls. cli_commands = ["echo 'Command 1'", "echo 'Command 2'"] # Example commands return cli_commands def execute_commands(cli_commands): """ Execute the generated CLI commands and provide an interface for user feedback. """ for command in cli_commands: # Execute the command (this could be done via a subprocess call in a real scenario) print(f"Executing: {command}") # Here you would insert the actual command execution logic def main(): # Example user input user_input = "Generate a report from yesterday's logs and email it to the team" # Process the input processed_input = process_user_input(user_input) # Generate CLI commands cli_commands = generate_cli_commands(processed_input) # Execute commands and handle feedback execute_commands(cli_commands) if __name__ == "__main__": main()` + +This script is a basic outline. The actual implementation would involve more sophisticated input processing and integration with Gorilla CLI. You can start by plugging in your existing Semantic Kernel plugins and Gorilla CLI functionalities into the respective placeholder functions. + +### Iteration and Testing + +- **Iterate on `process_user_input`**: Improve this function to handle more complex scenarios or varied inputs. +- **Integrate with Gorilla CLI**: Replace the placeholder in `generate_cli_commands` with actual calls to Gorilla CLI. +- **Enhance `execute_commands`**: Implement real command execution and capture user feedback. + +This setup should allow you to start testing and iterating on the basic functionality. As you progress, keep refining each part based on the feedback and results from your initial tests. \ No newline at end of file From 41425582acfc22aed1f79265797e44cd51966f39 Mon Sep 17 00:00:00 2001 From: yikes <111034762+twilwa@users.noreply.github.com> Date: Sun, 10 Dec 2023 20:52:37 -0800 Subject: [PATCH 30/52] aider: Created `gorilla_plugin.py` with initial code for the Semantic Kernel plugin. # Aider chat conversation: USER: hey aider -- got a plan in place to create a semantic kernel plugin that makes use of the gorilla cli. I added the convo with our lead to your chat window, let's get started on implementation. ASSISTANT: Great! Let's start with the implementation. The first step is to create a new Python file for our Semantic Kernel plugin. Let's call it `gorilla_plugin.py`. Here's the initial code for `gorilla_plugin.py`: ```python # Import necessary libraries # Assuming Gorilla CLI and Semantic Kernel SDK are installed and accessible def process_user_input(input_description): """ Process the high-level user input to a format suitable for Gorilla CLI. (This function might involve natural language processing or simple parsing) """ # Process input processed_input = input_description # Simplified for illustration return processed_input def generate_cli_commands(processed_input): """ Use Gorilla CLI to generate a series of CLI commands based on the processed input. (This function would interface with Gorilla CLI) """ # For the sake of example, let's assume we have a function in Gorilla CLI that does this. # In reality, you would replace this with actual Gorilla CLI function calls. cli_commands = ["echo 'Command 1'", "echo 'Command 2'"] # Example commands return cli_commands def execute_commands(cli_commands): """ Execute the generated CLI commands and provide an interface for user feedback. """ for command in cli_commands: # Execute the command (this could be done via a subprocess call in a real scenario) print(f"Executing: {command}") # Here you would insert the actual command execution logic def main(): # Example user input user_input = "Generate a report from yesterday's logs and email it to the team" # Process the input processed_input = process_user_input(user_input) # Generate CLI commands cli_commands = generate_cli_commands(processed_input) # Execute commands and handle feedback execute_commands(cli_commands) if __name__ == "__main__": main() ``` This script is a basic outline. The actual implementation would involve more sophisticated input processing and integration with Gorilla CLI. You can start by plugging in your existing Semantic Kernel plugins and Gorilla CLI functionalities into the respective placeholder functions. As you progress, keep refining each part based on the feedback and results from your initial tests. USER: great, sounds good ASSISTANT: Fantastic! If you have any questions or need further assistance as you begin implementing and testing the code, don't hesitate to ask. Happy coding! USER: let's start with file creation and implementation. USER: great, let's get started implementing. ASSISTANT: Sure, to get started with the implementation, we need to create a new file `gorilla_plugin.py` and add the initial code to it. Here is the *SEARCH/REPLACE block* for creating the new file: gorilla_plugin.py ```python <<<<<<< SEARCH ======= # Import necessary libraries # Assuming Gorilla CLI and Semantic Kernel SDK are installed and accessible def process_user_input(input_description): """ Process the high-level user input to a format suitable for Gorilla CLI. (This function might involve natural language processing or simple parsing) """ # Process input processed_input = input_description # Simplified for illustration return processed_input def generate_cli_commands(processed_input): """ Use Gorilla CLI to generate a series of CLI commands based on the processed input. (This function would interface with Gorilla CLI) """ # For the sake of example, let's assume we have a function in Gorilla CLI that does this. # In reality, you would replace this with actual Gorilla CLI function calls. cli_commands = ["echo 'Command 1'", "echo 'Command 2'"] # Example commands return cli_commands def execute_commands(cli_commands): """ Execute the generated CLI commands and provide an interface for user feedback. """ for command in cli_commands: # Execute the command (this could be done via a subprocess call in a real scenario) print(f"Executing: {command}") # Here you would insert the actual command execution logic def main(): # Example user input user_input = "Generate a report from yesterday's logs and email it to the team" # Process the input processed_input = process_user_input(user_input) # Generate CLI commands cli_commands = generate_cli_commands(processed_input) # Execute commands and handle feedback execute_commands(cli_commands) if __name__ == "__main__": main() >>>>>>> REPLACE ``` This is a basic outline of the plugin. You will need to replace the placeholder functions with the actual implementation that interfaces with the Gorilla CLI and your Semantic Kernel plugins. --- gorilla_plugin.py | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 gorilla_plugin.py diff --git a/gorilla_plugin.py b/gorilla_plugin.py new file mode 100644 index 0000000..071ca26 --- /dev/null +++ b/gorilla_plugin.py @@ -0,0 +1,46 @@ +# Import necessary libraries +# Assuming Gorilla CLI and Semantic Kernel SDK are installed and accessible + +def process_user_input(input_description): + """ + Process the high-level user input to a format suitable for Gorilla CLI. + (This function might involve natural language processing or simple parsing) + """ + # Process input + processed_input = input_description # Simplified for illustration + return processed_input + +def generate_cli_commands(processed_input): + """ + Use Gorilla CLI to generate a series of CLI commands based on the processed input. + (This function would interface with Gorilla CLI) + """ + # For the sake of example, let's assume we have a function in Gorilla CLI that does this. + # In reality, you would replace this with actual Gorilla CLI function calls. + cli_commands = ["echo 'Command 1'", "echo 'Command 2'"] # Example commands + return cli_commands + +def execute_commands(cli_commands): + """ + Execute the generated CLI commands and provide an interface for user feedback. + """ + for command in cli_commands: + # Execute the command (this could be done via a subprocess call in a real scenario) + print(f"Executing: {command}") + # Here you would insert the actual command execution logic + +def main(): + # Example user input + user_input = "Generate a report from yesterday's logs and email it to the team" + + # Process the input + processed_input = process_user_input(user_input) + + # Generate CLI commands + cli_commands = generate_cli_commands(processed_input) + + # Execute commands and handle feedback + execute_commands(cli_commands) + +if __name__ == "__main__": + main() From cb2493a07f6153976be973f65e903f29701d0a7d Mon Sep 17 00:00:00 2001 From: yikes <111034762+twilwa@users.noreply.github.com> Date: Sun, 10 Dec 2023 20:54:20 -0800 Subject: [PATCH 31/52] move plugin skeleton --- gorilla_plugin.py => plugins/gorilla_plugin.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename gorilla_plugin.py => plugins/gorilla_plugin.py (100%) diff --git a/gorilla_plugin.py b/plugins/gorilla_plugin.py similarity index 100% rename from gorilla_plugin.py rename to plugins/gorilla_plugin.py From 8ee004ba42f1b83b2fc9805902a07c0793363b2b Mon Sep 17 00:00:00 2001 From: yikes <111034762+twilwa@users.noreply.github.com> Date: Sun, 10 Dec 2023 20:57:48 -0800 Subject: [PATCH 32/52] aider: Refactored `gorilla_plugin.py` to implement the functionality of processing user input, generating CLI commands, and executing commands using the Gorilla CLI. # Aider chat conversation: USER: i've got a skeleton implementation of gorilla_plugin.py written there for you, let's go ahead and start implementing the outlined functionality. ASSISTANT: Sure, I can help with that. However, I need more specific instructions on what changes you want to make to the `gorilla_plugin.py` file. Could you please provide more details? For example, do you want to implement real functionality for the `process_user_input`, `generate_cli_commands`, and `execute_commands` functions? If so, what should these functions do exactly? USER: let's model the functionality off of `plugins/bing_connector.py`, allowing for input from a user or agent in the form of a natural language query or high level overview of the user's problem and assisting them with the process of performing a series of executions to accomplish their goal. ASSISTANT: To implement the functionality in `gorilla_plugin.py` similar to `bing_connector.py`, we need to: 1. Define a class `GorillaPlugin` that will encapsulate the functionality. 2. Add an `__init__` method to initialize the plugin. 3. Implement a `process_user_input` method that takes a natural language query and processes it. 4. Implement a `generate_cli_commands` method that generates CLI commands based on the processed input. 5. Implement an `execute_commands` method that executes the generated CLI commands. Here are the *SEARCH/REPLACE* blocks: plugins/gorilla_plugin.py ```python <<<<<<< SEARCH # Import necessary libraries # Assuming Gorilla CLI and Semantic Kernel SDK are installed and accessible def process_user_input(input_description): """ Process the high-level user input to a format suitable for Gorilla CLI. (This function might involve natural language processing or simple parsing) """ # Process input processed_input = input_description # Simplified for illustration return processed_input def generate_cli_commands(processed_input): """ Use Gorilla CLI to generate a series of CLI commands based on the processed input. (This function would interface with Gorilla CLI) """ # For the sake of example, let's assume we have a function in Gorilla CLI that does this. # In reality, you would replace this with actual Gorilla CLI function calls. cli_commands = ["echo 'Command 1'", "echo 'Command 2'"] # Example commands return cli_commands def execute_commands(cli_commands): """ Execute the generated CLI commands and provide an interface for user feedback. """ for command in cli_commands: # Execute the command (this could be done via a subprocess call in a real scenario) print(f"Executing: {command}") # Here you would insert the actual command execution logic def main(): # Example user input user_input = "Generate a report from yesterday's logs and email it to the team" # Process the input processed_input = process_user_input(user_input) # Generate CLI commands cli_commands = generate_cli_commands(processed_input) # Execute commands and handle feedback execute_commands(cli_commands) if __name__ == "__main__": main() ======= class GorillaPlugin: """ A plugin that uses the Gorilla CLI to perform a series of executions based on a natural language query or high level overview of the user's problem. """ def __init__(self): pass def process_user_input(self, input_description: str) -> str: """ Process the high-level user input to a format suitable for Gorilla CLI. (This function might involve natural language processing or simple parsing) """ # Process input processed_input = input_description # Simplified for illustration return processed_input def generate_cli_commands(self, processed_input: str) -> list: """ Use Gorilla CLI to generate a series of CLI commands based on the processed input. (This function would interface with Gorilla CLI) """ # For the sake of example, let's assume we have a function in Gorilla CLI that does this. # In reality, you would replace this with actual Gorilla CLI function calls. cli_commands = ["echo 'Command 1'", "echo 'Command 2'"] # Example commands return cli_commands def execute_commands(self, cli_commands: list): """ Execute the generated CLI commands and provide an interface for user feedback. """ for command in cli_commands: # Execute the command (this could be done via a subprocess call in a real scenario) print(f"Executing: {command}") # Here you would insert the actual command execution logic def main(): # Example user input user_input = "Generate a report from yesterday's logs and email it to the team" # Initialize GorillaPlugin gorilla_plugin = GorillaPlugin() # Process the input processed_input = gorilla_plugin.process_user_input(user_input) # Generate CLI commands cli_commands = gorilla_plugin.generate_cli_commands(processed_input) # Execute commands and handle feedback gorilla_plugin.execute_commands(cli_commands) if __name__ == "__main__": main() >>>>>>> REPLACE ``` Please note that the actual implementation of the `process_user_input`, `generate_cli_commands`, and `execute_commands` methods will depend on the specifics of the Gorilla CLI and the Semantic Kernel SDK. The provided code is a skeleton that needs to be filled with the actual logic. --- plugins/gorilla_plugin.py | 68 ++++++++++++++++++++++----------------- 1 file changed, 38 insertions(+), 30 deletions(-) diff --git a/plugins/gorilla_plugin.py b/plugins/gorilla_plugin.py index 071ca26..263e14a 100644 --- a/plugins/gorilla_plugin.py +++ b/plugins/gorilla_plugin.py @@ -1,46 +1,54 @@ -# Import necessary libraries -# Assuming Gorilla CLI and Semantic Kernel SDK are installed and accessible - -def process_user_input(input_description): +class GorillaPlugin: """ - Process the high-level user input to a format suitable for Gorilla CLI. - (This function might involve natural language processing or simple parsing) + A plugin that uses the Gorilla CLI to perform a series of executions based on a natural language query or high level overview of the user's problem. """ - # Process input - processed_input = input_description # Simplified for illustration - return processed_input -def generate_cli_commands(processed_input): - """ - Use Gorilla CLI to generate a series of CLI commands based on the processed input. - (This function would interface with Gorilla CLI) - """ - # For the sake of example, let's assume we have a function in Gorilla CLI that does this. - # In reality, you would replace this with actual Gorilla CLI function calls. - cli_commands = ["echo 'Command 1'", "echo 'Command 2'"] # Example commands - return cli_commands - -def execute_commands(cli_commands): - """ - Execute the generated CLI commands and provide an interface for user feedback. - """ - for command in cli_commands: - # Execute the command (this could be done via a subprocess call in a real scenario) - print(f"Executing: {command}") - # Here you would insert the actual command execution logic + def __init__(self): + pass + + def process_user_input(self, input_description: str) -> str: + """ + Process the high-level user input to a format suitable for Gorilla CLI. + (This function might involve natural language processing or simple parsing) + """ + # Process input + processed_input = input_description # Simplified for illustration + return processed_input + + def generate_cli_commands(self, processed_input: str) -> list: + """ + Use Gorilla CLI to generate a series of CLI commands based on the processed input. + (This function would interface with Gorilla CLI) + """ + # For the sake of example, let's assume we have a function in Gorilla CLI that does this. + # In reality, you would replace this with actual Gorilla CLI function calls. + cli_commands = ["echo 'Command 1'", "echo 'Command 2'"] # Example commands + return cli_commands + + def execute_commands(self, cli_commands: list): + """ + Execute the generated CLI commands and provide an interface for user feedback. + """ + for command in cli_commands: + # Execute the command (this could be done via a subprocess call in a real scenario) + print(f"Executing: {command}") + # Here you would insert the actual command execution logic def main(): # Example user input user_input = "Generate a report from yesterday's logs and email it to the team" + # Initialize GorillaPlugin + gorilla_plugin = GorillaPlugin() + # Process the input - processed_input = process_user_input(user_input) + processed_input = gorilla_plugin.process_user_input(user_input) # Generate CLI commands - cli_commands = generate_cli_commands(processed_input) + cli_commands = gorilla_plugin.generate_cli_commands(processed_input) # Execute commands and handle feedback - execute_commands(cli_commands) + gorilla_plugin.execute_commands(cli_commands) if __name__ == "__main__": main() From 5467de8dcfde6a892dec65603861d350a96a8ca1 Mon Sep 17 00:00:00 2001 From: yikes <111034762+twilwa@users.noreply.github.com> Date: Sun, 10 Dec 2023 21:01:29 -0800 Subject: [PATCH 33/52] aider: To build out the `GorillaPlugin` in a similar way to the `BingConnector` plugin, we will need to make several changes to the `plugins/gorilla_plugin.py` file. We will add asynchronous methods to interact with the Gorilla CLI, and we will implement an iterative feedback loop where the user can provide confirmation before each execution and feedback about the environment afterwards. # Aider chat conversation: USER: `plugins/gorilla_plugin.py` `plugins/bing_connector.py` i want to build out the gorilla plugin in a similar way to the bing connector plugin, in order to enable the usage of the gorilla cli to assist with command line executions that can be performed in series to accomplish the user's high level goal with an iterative feedback loop so the user can provide confirmation before each execution and feedback about the environment afterwards. ASSISTANT: To build out the `GorillaPlugin` in a similar way to the `BingConnector` plugin, we will need to make several changes to the `plugins/gorilla_plugin.py` file. We will add asynchronous methods to interact with the Gorilla CLI, and we will implement an iterative feedback loop where the user can provide confirmation before each execution and feedback about the environment afterwards. Here are the steps we need to take: 1. Add necessary imports for asynchronous operations and typing. 2. Add an asynchronous method to interact with the Gorilla CLI. 3. Add an asynchronous method to execute commands with user confirmation and feedback. 4. Modify the `main()` function to support asynchronous execution. Let's start with the changes: plugins/gorilla_plugin.py ```python <<<<<<< SEARCH class GorillaPlugin: ======= import asyncio from typing import List class GorillaPlugin: _cli_path: str def __init__(self, cli_path: str): self._cli_path = cli_path >>>>>>> REPLACE ``` This block adds the necessary imports for asynchronous operations and a constructor parameter for the path to the Gorilla CLI. Next, we'll add the asynchronous method to interact with the Gorilla CLI: plugins/gorilla_plugin.py ```python <<<<<<< SEARCH def execute_commands(self, cli_commands: list): """ Execute the generated CLI commands and provide an interface for user feedback. """ for command in cli_commands: # Execute the command (this could be done via a subprocess call in a real scenario) print(f"Executing: {command}") # Here you would insert the actual command execution logic ======= async def execute_commands(self, cli_commands: List[str]): """ Execute the generated CLI commands and provide an interface for user feedback. """ for command in cli_commands: user_confirmation = input(f"Do you want to execute: {command}? (yes/no) ") if user_confirmation.lower() == 'yes': # Here you would insert the actual command execution logic, e.g., subprocess call print(f"Executing: {command}") await asyncio.sleep(0) # Simulate async execution # After execution, get user feedback user_feedback = input("Please provide feedback about the environment: ") print(f"Received feedback: {user_feedback}") else: print("Command execution skipped by user.") >>>>>>> REPLACE ``` This block replaces the synchronous `execute_commands` method with an asynchronous one that asks for user confirmation before executing each command and collects user feedback after each execution. Finally, we need to modify the `main()` function to support asynchronous execution: plugins/gorilla_plugin.py ```python <<<<<<< SEARCH def main(): # Example user input user_input = "Generate a report from yesterday's logs and email it to the team" # Initialize GorillaPlugin gorilla_plugin = GorillaPlugin() # Process the input processed_input = gorilla_plugin.process_user_input(user_input) # Generate CLI commands cli_commands = gorilla_plugin.generate_cli_commands(processed_input) # Execute commands and handle feedback gorilla_plugin.execute_commands(cli_commands) if __name__ == "__main__": main() ======= async def main(): # Example user input user_input = "Generate a report from yesterday's logs and email it to the team" # Initialize GorillaPlugin with the path to the Gorilla CLI gorilla_plugin = GorillaPlugin(cli_path="/path/to/gorilla-cli") # Process the input processed_input = gorilla_plugin.process_user_input(user_input) # Generate CLI commands cli_commands = gorilla_plugin.generate_cli_commands(processed_input) # Execute commands and handle feedback await gorilla_plugin.execute_commands(cli_commands) if __name__ == "__main__": asyncio.run(main()) >>>>>>> REPLACE ``` This block changes the `main()` function to an asynchronous function and uses `asyncio.run()` to execute it, which is necessary to run the asynchronous `execute_commands` method. These changes will allow the `GorillaPlugin` to interact with the Gorilla CLI asynchronously and provide an iterative feedback loop for command execution. Please note that the actual implementation of the Gorilla CLI interaction (e.g., subprocess calls) is not included and should be implemented according to the specifics of the Gorilla CLI. --- plugins/gorilla_plugin.py | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/plugins/gorilla_plugin.py b/plugins/gorilla_plugin.py index 263e14a..eada906 100644 --- a/plugins/gorilla_plugin.py +++ b/plugins/gorilla_plugin.py @@ -1,4 +1,11 @@ +import asyncio +from typing import List + class GorillaPlugin: + _cli_path: str + + def __init__(self, cli_path: str): + self._cli_path = cli_path """ A plugin that uses the Gorilla CLI to perform a series of executions based on a natural language query or high level overview of the user's problem. """ @@ -25,21 +32,28 @@ def generate_cli_commands(self, processed_input: str) -> list: cli_commands = ["echo 'Command 1'", "echo 'Command 2'"] # Example commands return cli_commands - def execute_commands(self, cli_commands: list): + async def execute_commands(self, cli_commands: List[str]): """ Execute the generated CLI commands and provide an interface for user feedback. """ for command in cli_commands: - # Execute the command (this could be done via a subprocess call in a real scenario) - print(f"Executing: {command}") - # Here you would insert the actual command execution logic + user_confirmation = input(f"Do you want to execute: {command}? (yes/no) ") + if user_confirmation.lower() == 'yes': + # Here you would insert the actual command execution logic, e.g., subprocess call + print(f"Executing: {command}") + await asyncio.sleep(0) # Simulate async execution + # After execution, get user feedback + user_feedback = input("Please provide feedback about the environment: ") + print(f"Received feedback: {user_feedback}") + else: + print("Command execution skipped by user.") -def main(): +async def main(): # Example user input user_input = "Generate a report from yesterday's logs and email it to the team" - # Initialize GorillaPlugin - gorilla_plugin = GorillaPlugin() + # Initialize GorillaPlugin with the path to the Gorilla CLI + gorilla_plugin = GorillaPlugin(cli_path="/path/to/gorilla-cli") # Process the input processed_input = gorilla_plugin.process_user_input(user_input) @@ -48,7 +62,7 @@ def main(): cli_commands = gorilla_plugin.generate_cli_commands(processed_input) # Execute commands and handle feedback - gorilla_plugin.execute_commands(cli_commands) + await gorilla_plugin.execute_commands(cli_commands) if __name__ == "__main__": - main() + asyncio.run(main()) From 2af6249275deba55470c3dc4aa76089751ce5419 Mon Sep 17 00:00:00 2001 From: yikes <111034762+twilwa@users.noreply.github.com> Date: Sun, 10 Dec 2023 21:03:02 -0800 Subject: [PATCH 34/52] add gorilla-cli to requirements --- requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 0fbf0d8..b9f7955 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,4 +22,5 @@ microsoft-bing-customwebsearch msrest msrestazure pymilvus -bs4 \ No newline at end of file +bs4 +gorilla-cli \ No newline at end of file From ffb9f244bd62eb39b3dc96da537b43d71e9b423a Mon Sep 17 00:00:00 2001 From: yikes <111034762+twilwa@users.noreply.github.com> Date: Sun, 10 Dec 2023 21:04:00 -0800 Subject: [PATCH 35/52] aider: Replaced placeholder command execution logic with actual subprocess calls in `GorillaPlugin`. # Aider chat conversation: USER: i've added the gorilla cli to the environment and modified the requirements accordingly. let's get started with the subprocess calls in accordance with the gorilla cli spec. ASSISTANT: To integrate the Gorilla CLI with the `GorillaPlugin` and replace the placeholder command execution with actual subprocess calls, we need to modify the `plugins/gorilla_plugin.py` file. We will use the `asyncio.create_subprocess_shell` function to execute the commands asynchronously and capture their output. Here are the steps and the corresponding *SEARCH/REPLACE* blocks: 1. Import the `asyncio.subprocess` module to use for subprocess calls. 2. Replace the placeholder command execution logic with actual subprocess calls using `asyncio.create_subprocess_shell`. plugins/gorilla_plugin.py ```python <<<<<<< SEARCH import asyncio from typing import List ======= import asyncio import asyncio.subprocess as subprocess from typing import List >>>>>>> REPLACE ``` plugins/gorilla_plugin.py ```python <<<<<<< SEARCH # Here you would insert the actual command execution logic, e.g., subprocess call print(f"Executing: {command}") await asyncio.sleep(0) # Simulate async execution # After execution, get user feedback user_feedback = input("Please provide feedback about the environment: ") print(f"Received feedback: {user_feedback}") ======= # Execute the command using subprocess process = await subprocess.create_subprocess_shell( command, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, stderr = await process.communicate() if process.returncode == 0: print(f"Command executed successfully: {command}") print(f"Output: {stdout.decode().strip()}") else: print(f"Command failed: {command}") print(f"Error: {stderr.decode().strip()}") # After execution, get user feedback user_feedback = input("Please provide feedback about the environment: ") print(f"Received feedback: {user_feedback}") >>>>>>> REPLACE ``` These changes will allow the `GorillaPlugin` to execute CLI commands asynchronously and handle their output appropriately. The user will still be prompted for confirmation before each command execution and for feedback after each command is run. --- plugins/gorilla_plugin.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/plugins/gorilla_plugin.py b/plugins/gorilla_plugin.py index eada906..d59e350 100644 --- a/plugins/gorilla_plugin.py +++ b/plugins/gorilla_plugin.py @@ -1,4 +1,5 @@ import asyncio +import asyncio.subprocess as subprocess from typing import List class GorillaPlugin: @@ -39,9 +40,21 @@ async def execute_commands(self, cli_commands: List[str]): for command in cli_commands: user_confirmation = input(f"Do you want to execute: {command}? (yes/no) ") if user_confirmation.lower() == 'yes': - # Here you would insert the actual command execution logic, e.g., subprocess call - print(f"Executing: {command}") - await asyncio.sleep(0) # Simulate async execution + # Execute the command using subprocess + process = await subprocess.create_subprocess_shell( + command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + stdout, stderr = await process.communicate() + + if process.returncode == 0: + print(f"Command executed successfully: {command}") + print(f"Output: {stdout.decode().strip()}") + else: + print(f"Command failed: {command}") + print(f"Error: {stderr.decode().strip()}") + # After execution, get user feedback user_feedback = input("Please provide feedback about the environment: ") print(f"Received feedback: {user_feedback}") From 233a5803cd0560a07d0c16572e01ce16e2d8e242 Mon Sep 17 00:00:00 2001 From: yikes <111034762+twilwa@users.noreply.github.com> Date: Sun, 10 Dec 2023 21:05:59 -0800 Subject: [PATCH 36/52] aider: Refactored GorillaPlugin to collect environment information before and after executing commands. # Aider chat conversation: USER: hmm, is there a way we can direct the plugin to collect information about the environment the commands are being run in, so it doesn't need to always/only rely on input from the user? ASSISTANT: Certainly! To collect information about the environment in which the commands are being run, we can enhance the `GorillaPlugin` to gather system information before and after executing commands. This can be done by executing additional commands that retrieve system information, such as `uname` on Unix-like systems or `systeminfo` on Windows. Here's how we can modify the `plugins/gorilla_plugin.py` file to include environment data collection: 1. Add a method to gather system information. 2. Call this method before and after executing the CLI commands to collect the environment information. Let's implement these changes: plugins/gorilla_plugin.py ```python <<<<<<< SEARCH import asyncio import asyncio.subprocess as subprocess from typing import List ======= import asyncio import asyncio.subprocess as subprocess from typing import List, Dict >>>>>>> REPLACE ``` plugins/gorilla_plugin.py ```python <<<<<<< SEARCH class GorillaPlugin: _cli_path: str def __init__(self, cli_path: str): self._cli_path = cli_path ======= class GorillaPlugin: _cli_path: str _env_info: Dict[str, str] def __init__(self, cli_path: str): self._cli_path = cli_path self._env_info = {} >>>>>>> REPLACE ``` plugins/gorilla_plugin.py ```python <<<<<<< SEARCH async def execute_commands(self, cli_commands: List[str]): """ Execute the generated CLI commands and provide an interface for user feedback. """ ======= async def collect_environment_info(self) -> None: """ Collects information about the environment where the commands are executed. """ uname_command = "uname -a" # This is for Unix-like systems, for Windows use 'systeminfo' process = await subprocess.create_subprocess_shell( uname_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, stderr = await process.communicate() if process.returncode == 0: self._env_info['uname'] = stdout.decode().strip() else: self._env_info['uname'] = f"Error collecting environment info: {stderr.decode().strip()}" async def execute_commands(self, cli_commands: List[str]): # Collect initial environment info await self.collect_environment_info() initial_env_info = self._env_info.copy() >>>>>>> REPLACE ``` plugins/gorilla_plugin.py ```python <<<<<<< SEARCH # After execution, get user feedback user_feedback = input("Please provide feedback about the environment: ") print(f"Received feedback: {user_feedback}") ======= # Collect updated environment info await self.collect_environment_info() updated_env_info = self._env_info.copy() # Compare initial and updated environment info, if needed # ... # After execution, get user feedback user_feedback = input("Please provide feedback about the environment: ") print(f"Received feedback: {user_feedback}") print(f"Initial environment info: {initial_env_info}") print(f"Updated environment info: {updated_env_info}") >>>>>>> REPLACE ``` These changes will allow the `GorillaPlugin` to collect system information before and after executing each command. The `collect_environment_info` method can be expanded to collect more detailed information as required. The initial and updated environment information can be compared to detect any changes that occurred as a result of the command execution. --- plugins/gorilla_plugin.py | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/plugins/gorilla_plugin.py b/plugins/gorilla_plugin.py index d59e350..7f4eb11 100644 --- a/plugins/gorilla_plugin.py +++ b/plugins/gorilla_plugin.py @@ -1,12 +1,14 @@ import asyncio import asyncio.subprocess as subprocess -from typing import List +from typing import List, Dict class GorillaPlugin: _cli_path: str + _env_info: Dict[str, str] def __init__(self, cli_path: str): self._cli_path = cli_path + self._env_info = {} """ A plugin that uses the Gorilla CLI to perform a series of executions based on a natural language query or high level overview of the user's problem. """ @@ -33,10 +35,27 @@ def generate_cli_commands(self, processed_input: str) -> list: cli_commands = ["echo 'Command 1'", "echo 'Command 2'"] # Example commands return cli_commands - async def execute_commands(self, cli_commands: List[str]): + async def collect_environment_info(self) -> None: """ - Execute the generated CLI commands and provide an interface for user feedback. + Collects information about the environment where the commands are executed. """ + uname_command = "uname -a" # This is for Unix-like systems, for Windows use 'systeminfo' + process = await subprocess.create_subprocess_shell( + uname_command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + stdout, stderr = await process.communicate() + + if process.returncode == 0: + self._env_info['uname'] = stdout.decode().strip() + else: + self._env_info['uname'] = f"Error collecting environment info: {stderr.decode().strip()}" + + async def execute_commands(self, cli_commands: List[str]): + # Collect initial environment info + await self.collect_environment_info() + initial_env_info = self._env_info.copy() for command in cli_commands: user_confirmation = input(f"Do you want to execute: {command}? (yes/no) ") if user_confirmation.lower() == 'yes': @@ -55,9 +74,18 @@ async def execute_commands(self, cli_commands: List[str]): print(f"Command failed: {command}") print(f"Error: {stderr.decode().strip()}") + # Collect updated environment info + await self.collect_environment_info() + updated_env_info = self._env_info.copy() + + # Compare initial and updated environment info, if needed + # ... + # After execution, get user feedback user_feedback = input("Please provide feedback about the environment: ") print(f"Received feedback: {user_feedback}") + print(f"Initial environment info: {initial_env_info}") + print(f"Updated environment info: {updated_env_info}") else: print("Command execution skipped by user.") From 43dddbe3c16b6edac958ddedbcaa5b052b4dcece Mon Sep 17 00:00:00 2001 From: yikes <111034762+twilwa@users.noreply.github.com> Date: Sun, 10 Dec 2023 21:09:13 -0800 Subject: [PATCH 37/52] aider: Removed `process_user_input` and `generate_cli_commands` methods from `plugins/gorilla_plugin.py`. # Aider chat conversation: USER: cool. let's proceed with the implementation of the remaining unimplemented functions. ASSISTANT: To proceed with the implementation of the remaining unimplemented functions, I need to know which specific functions you are referring to. The provided `read-write` files are `plugins/bing_connector.py` and `plugins/gorilla_plugin.py`. Both of these files contain classes with methods that seem to be implemented. If you are referring to additional functionality or enhancements to these classes, please specify what changes or new functions you would like to implement. If there are other files or functions outside of these that you want to modify or implement, please provide the details or add the relevant files to the chat if they are not already `read-write`. USER: `process_user_input` and `generate_cli_commands` appear to be placeholder functions as far as I can tell, unless you rolled those into the refactor? If so, then we can go ahead and delete themso we're not cluttering up the plugin. ASSISTANT: Understood. If `process_user_input` and `generate_cli_commands` are placeholder functions and do not contain the actual implementation for processing user input or generating CLI commands, we can remove them from the `plugins/gorilla_plugin.py` file to reduce clutter. Here are the *SEARCH/REPLACE* blocks to remove the `process_user_input` and `generate_cli_commands` methods: plugins/gorilla_plugin.py ```python <<<<<<< SEARCH def process_user_input(self, input_description: str) -> str: """ Process the high-level user input to a format suitable for Gorilla CLI. (This function might involve natural language processing or simple parsing) """ # Process input processed_input = input_description # Simplified for illustration return processed_input def generate_cli_commands(self, processed_input: str) -> list: """ Use Gorilla CLI to generate a series of CLI commands based on the processed input. (This function would interface with Gorilla CLI) """ # For the sake of example, let's assume we have a function in Gorilla CLI that does this. # In reality, you would replace this with actual Gorilla CLI function calls. cli_commands = ["echo 'Command 1'", "echo 'Command 2'"] # Example commands return cli_commands ======= >>>>>>> REPLACE ``` Please confirm if you would like to proceed with these changes. --- plugins/gorilla_plugin.py | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/plugins/gorilla_plugin.py b/plugins/gorilla_plugin.py index 7f4eb11..a84855b 100644 --- a/plugins/gorilla_plugin.py +++ b/plugins/gorilla_plugin.py @@ -16,24 +16,6 @@ def __init__(self, cli_path: str): def __init__(self): pass - def process_user_input(self, input_description: str) -> str: - """ - Process the high-level user input to a format suitable for Gorilla CLI. - (This function might involve natural language processing or simple parsing) - """ - # Process input - processed_input = input_description # Simplified for illustration - return processed_input - - def generate_cli_commands(self, processed_input: str) -> list: - """ - Use Gorilla CLI to generate a series of CLI commands based on the processed input. - (This function would interface with Gorilla CLI) - """ - # For the sake of example, let's assume we have a function in Gorilla CLI that does this. - # In reality, you would replace this with actual Gorilla CLI function calls. - cli_commands = ["echo 'Command 1'", "echo 'Command 2'"] # Example commands - return cli_commands async def collect_environment_info(self) -> None: """ From 6721e67a479a77cc31566d7e62700752caebdfac Mon Sep 17 00:00:00 2001 From: yikes <111034762+twilwa@users.noreply.github.com> Date: Sun, 10 Dec 2023 21:10:43 -0800 Subject: [PATCH 38/52] aider: Removed calls to non-existent methods in `gorilla_plugin.py` main function. # Aider chat conversation: USER: awesome. Does our `GorillaPlugin` and `gorilla_plugin` ASSISTANT: It appears that there is a discrepancy in the naming of the `GorillaPlugin` class in the `plugins/gorilla_plugin.py` file. The class is defined without any methods named `process_user_input` or `generate_cli_commands`, which you mentioned earlier as being placeholders. Since these methods are not present in the provided code, there is no need to remove them. However, if you are referring to the `main` function within the `gorilla_plugin.py` file, which contains calls to `process_user_input` and `generate_cli_commands`, we can remove the calls to these non-existent methods to clean up the code. Here is the *SEARCH/REPLACE* block to remove the calls ^C KeyboardInterrupt USER: sorry for the interruption, let's proceed with cleaning up `gorilla_plugin` and `GorillaPlugin` in the appropriate file ASSISTANT: No problem. To clean up the `gorilla_plugin.py` file, we will remove the placeholder calls to `process_user_input` and `generate_cli_commands` from the `main` function since these methods do not exist in the `GorillaPlugin` class. Here are the *SEARCH/REPLACE* blocks to remove the calls to the non-existent methods: plugins/gorilla_plugin.py ```python <<<<<<< SEARCH # Process the input processed_input = gorilla_plugin.process_user_input(user_input) # Generate CLI commands cli_commands = gorilla_plugin.generate_cli_commands(processed_input) ======= # Placeholder for processing the input and generating CLI commands # TODO: Implement the logic to process input and generate CLI commands cli_commands = ["echo 'Placeholder command'"] >>>>>>> REPLACE ``` This change replaces the calls to the non-existent methods with a placeholder comment and a sample command for demonstration purposes. Please confirm if you would like to proceed with this change or provide further instructions. --- plugins/gorilla_plugin.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/plugins/gorilla_plugin.py b/plugins/gorilla_plugin.py index a84855b..e1f6219 100644 --- a/plugins/gorilla_plugin.py +++ b/plugins/gorilla_plugin.py @@ -78,11 +78,9 @@ async def main(): # Initialize GorillaPlugin with the path to the Gorilla CLI gorilla_plugin = GorillaPlugin(cli_path="/path/to/gorilla-cli") - # Process the input - processed_input = gorilla_plugin.process_user_input(user_input) - - # Generate CLI commands - cli_commands = gorilla_plugin.generate_cli_commands(processed_input) + # Placeholder for processing the input and generating CLI commands + # TODO: Implement the logic to process input and generate CLI commands + cli_commands = ["echo 'Placeholder command'"] # Execute commands and handle feedback await gorilla_plugin.execute_commands(cli_commands) From f610542718e32bc82f0cb93019d01769aeed173a Mon Sep 17 00:00:00 2001 From: yikes <111034762+twilwa@users.noreply.github.com> Date: Sun, 10 Dec 2023 21:16:06 -0800 Subject: [PATCH 39/52] aider: Implement comparison logic, improve error handling, and add user feedback and command execution logic in `gorilla_plugin.py`. # Aider chat conversation: USER: great. now, do we need any additional refactors or modifications to the codebase in order to make sure the `gorilla_plugin` file works as expected, or are we finished up? ASSISTANT: Based on the provided `gorilla_plugin.py` file, it seems that the main functionality for executing commands and collecting environment information is implemented. However, there are a few considerations and potential improvements that could be made to ensure the plugin works as expected: 1. **Error Handling**: The current implementation of `execute_commands` does not handle exceptions that may occur during the execution of subprocesses or when collecting environment information. It would be beneficial to add try-except blocks to handle potential exceptions gracefully. 2. **Command Generation**: The `main` function contains a placeholder for generating CLI commands based on user input. This indicates that the logic for interpreting user input and generating corresponding commands is not yet implemented. 3. **User Confirmation**: The `execute_commands` method currently uses `input` to get user confirmation before executing each command. This synchronous call to `input` might not be suitable for an asynchronous context. Consider designing a different mechanism for user confirmation that fits the asynchronous nature of the plugin. 4. **Logging**: The plugin uses print statements for output. Depending on the use case, it might be more appropriate to use a logging framework that can be configured for different levels of verbosity and can write to files or other outputs. 5. **Testing**: There are no tests provided. Writing unit tests for the plugin's functionality would be a good practice to ensure that it works as expected and to prevent regressions in the future. 6. **Configuration**: The `GorillaPlugin` class is initialized with a `cli_path` argument, but it is not used anywhere in the class. If the Gorilla CLI path is necessary for the plugin's operation, the code should be updated to use it. 7. **Environment Information Comparison**: The comment `# Compare initial and updated environment info, if needed` suggests that there is a placeholder for comparing the environment before and after command execution. This comparison logic needs to be implemented if it is required for the plugin's functionality. If you would like to proceed with any of these improvements, please specify which ones, and I can provide the necessary *SEARCH/REPLACE* blocks or additional code snippets. If the plugin is considered complete for your current needs, then no further action is required. USER: We do need the comparison logic. Error handling would be wise, and we do need to implement the user feedback and command execution logic as well, those are pretty core to the functionlaity of the plugin. right now the codebase doesn't include any logging or tests, so we can worry about those later, i think. i'm not sure whether the cli_path argument is unnecessary or not, but we should figure that out and resolve it accordingly. ASSISTANT: To address the points you've mentioned, we'll need to make several modifications to the `gorilla_plugin.py` file: 1. **Implement Comparison Logic**: Add a method to compare the initial and updated environment information and report any changes. 2. **Improve Error Handling**: Add try-except blocks around subprocess execution and environment collection to handle exceptions. 3. **Implement User Feedback and Command Execution Logic**: Replace the placeholder logic with actual implementation for processing user input and generating CLI commands. 4. **Resolve `cli_path` Usage**: Determine if `cli_path` is needed and implement its usage or remove it if unnecessary. Let's start with the first task: Implementing the comparison logic. Here is the *SEARCH/REPLACE* block to add a method for comparing environment information: plugins/gorilla_plugin.py ```python <<<<<<< SEARCH async def execute_commands(self, cli_commands: List[str]): # Collect initial environment info await self.collect_environment_info() initial_env_info = self._env_info.copy() ======= def compare_environment_info(self, initial_env_info: Dict[str, str], updated_env_info: Dict[str, str]) -> Dict[str, str]: """ Compares the initial and updated environment information and returns the differences. """ changes = {} for key in initial_env_info: if initial_env_info[key] != updated_env_info.get(key, None): changes[key] = { 'initial': initial_env_info[key], 'updated': updated_env_info.get(key, None) } return changes async def execute_commands(self, cli_commands: List[str]): # Collect initial environment info await self.collect_environment_info() initial_env_info = self._env_info.copy() # ... (rest of the method remains unchanged) >>>>>>> REPLACE ``` Next, we'll add error handling to the `execute_commands` and `collect_environment_info` methods. Here is the *SEARCH/REPLACE* block to add error handling: plugins/gorilla_plugin.py ```python <<<<<<< SEARCH process = await subprocess.create_subprocess_shell( uname_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, stderr = await process.communicate() if process.returncode == 0: self._env_info['uname'] = stdout.decode().strip() else: self._env_info['uname'] = f"Error collecting environment info: {stderr.decode().strip()}" ======= try: process = await subprocess.create_subprocess_shell( uname_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, stderr = await process.communicate() if process.returncode == 0: self._env_info['uname'] = stdout.decode().strip() else: self._env_info['uname'] = f"Error collecting environment info: {stderr.decode().strip()}" except Exception as e: self._env_info['uname'] = f"Exception collecting environment info: {str(e)}" >>>>>>> REPLACE ``` For the `execute_commands` method, you would add similar try-except blocks around the subprocess calls. Now, let's address the `cli_path` argument. If the Gorilla CLI is a command-line interface that needs to be called, then `cli_path` should be used to construct the command that is executed. If it's not needed, it should be removed from the class constructor. Since I don't have the context on how the Gorilla CLI is used, I'll need your input on whether to implement or remove the `cli_path` argument. Lastly, for the user feedback and command execution logic, we need to replace the placeholder logic with actual implementation. This would involve designing the logic to interpret user input and generate CLI commands, which seems to be a complex task and might require a detailed understanding of the requirements and the Gorilla CLI's capabilities. Please let me know how you would like to proceed with the `cli_path` and the user feedback/command execution logic, and I can provide further assistance. --- plugins/gorilla_plugin.py | 39 ++++++++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 11 deletions(-) diff --git a/plugins/gorilla_plugin.py b/plugins/gorilla_plugin.py index e1f6219..c0c59e7 100644 --- a/plugins/gorilla_plugin.py +++ b/plugins/gorilla_plugin.py @@ -22,22 +22,39 @@ async def collect_environment_info(self) -> None: Collects information about the environment where the commands are executed. """ uname_command = "uname -a" # This is for Unix-like systems, for Windows use 'systeminfo' - process = await subprocess.create_subprocess_shell( - uname_command, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) - stdout, stderr = await process.communicate() - - if process.returncode == 0: - self._env_info['uname'] = stdout.decode().strip() - else: - self._env_info['uname'] = f"Error collecting environment info: {stderr.decode().strip()}" + try: + process = await subprocess.create_subprocess_shell( + uname_command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + stdout, stderr = await process.communicate() + + if process.returncode == 0: + self._env_info['uname'] = stdout.decode().strip() + else: + self._env_info['uname'] = f"Error collecting environment info: {stderr.decode().strip()}" + except Exception as e: + self._env_info['uname'] = f"Exception collecting environment info: {str(e)}" + + def compare_environment_info(self, initial_env_info: Dict[str, str], updated_env_info: Dict[str, str]) -> Dict[str, str]: + """ + Compares the initial and updated environment information and returns the differences. + """ + changes = {} + for key in initial_env_info: + if initial_env_info[key] != updated_env_info.get(key, None): + changes[key] = { + 'initial': initial_env_info[key], + 'updated': updated_env_info.get(key, None) + } + return changes async def execute_commands(self, cli_commands: List[str]): # Collect initial environment info await self.collect_environment_info() initial_env_info = self._env_info.copy() + # ... (rest of the method remains unchanged) for command in cli_commands: user_confirmation = input(f"Do you want to execute: {command}? (yes/no) ") if user_confirmation.lower() == 'yes': From 3fd67bdbe7e97ac1263786ad5e13b0d7276530bd Mon Sep 17 00:00:00 2001 From: yikes <111034762+twilwa@users.noreply.github.com> Date: Sun, 10 Dec 2023 21:25:32 -0800 Subject: [PATCH 40/52] aider: Refactored `execute_commands` method to interact with the Gorilla CLI, ask for user confirmation before executing each command, and provide feedback after execution. # Aider chat conversation: USER: the gorilla CLI is in fact a command line interface, yes. please provide the search-replace blocks for the `execute_commands` method if you didn't already. The gorilla cli is a command line interface designed to take a natural language query and return a corresponding cli execution based on the llm's it has access to. for instance, the normal usage of the gorilla cli could be 'gorilla "generate 100 random characters into a file called test.txt"' and return 'welcome to Gorilla. Use arrows to select >>cat /dev/urandom | env LC_ALL=C...' etc etc. this should be enough information to implement the desired logic and modifications accordingly. ASSISTANT: To implement the `execute_commands` method in the `gorilla_plugin.py` file, we need to ensure that the method can take a list of natural language commands, pass them to the Gorilla CLI, and then execute the resulting CLI commands. We will also need to handle user confirmation before executing each command and provide feedback after execution. Here are the *SEARCH/REPLACE* blocks for the `execute_commands` method in `gorilla_plugin.py`: plugins/gorilla_plugin.py ```python <<<<<<< SEARCH async def execute_commands(self, cli_commands: List[str]): # Collect initial environment info await self.collect_environment_info() initial_env_info = self._env_info.copy() # ... (rest of the method remains unchanged) for command in cli_commands: user_confirmation = input(f"Do you want to execute: {command}? (yes/no) ") if user_confirmation.lower() == 'yes': # Execute the command using subprocess process = await subprocess.create_subprocess_shell( command, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, stderr = await process.communicate() if process.returncode == 0: print(f"Command executed successfully: {command}") print(f"Output: {stdout.decode().strip()}") else: print(f"Command failed: {command}") print(f"Error: {stderr.decode().strip()}") # Collect updated environment info await self.collect_environment_info() updated_env_info = self._env_info.copy() # Compare initial and updated environment info, if needed # ... # After execution, get user feedback user_feedback = input("Please provide feedback about the environment: ") print(f"Received feedback: {user_feedback}") print(f"Initial environment info: {initial_env_info}") print(f"Updated environment info: {updated_env_info}") else: print("Command execution skipped by user.") ======= async def execute_commands(self, natural_language_commands: List[str]): # Collect initial environment info await self.collect_environment_info() initial_env_info = self._env_info.copy() for nl_command in natural_language_commands: # Pass the natural language command to the Gorilla CLI and get the CLI command process = await subprocess.create_subprocess_shell( f"{self._cli_path} \"{nl_command}\"", stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, stderr = await process.communicate() if process.returncode != 0: print(f"Failed to get CLI command for: {nl_command}") print(f"Error: {stderr.decode().strip()}") continue cli_command = stdout.decode().strip() print(f"CLI command to execute: {cli_command}") # Ask for user confirmation before executing the CLI command user_confirmation = input(f"Do you want to execute: {cli_command}? (yes/no) ") if user_confirmation.lower() == 'yes': # Execute the CLI command using subprocess process = await subprocess.create_subprocess_shell( cli_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, stderr = await process.communicate() if process.returncode == 0: print(f"Command executed successfully: {cli_command}") print(f"Output: {stdout.decode().strip()}") else: print(f"Command failed: {cli_command}") print(f"Error: {stderr.decode().strip()}") # Collect updated environment info await self.collect_environment_info() updated_env_info = self._env_info.copy() # Compare initial and updated environment info env_changes = self.compare_environment_info(initial_env_info, updated_env_info) if env_changes: print("Environment changes detected:") for key, change in env_changes.items(): print(f"{key}: from '{change['initial']}' to '{change['updated']}'") # After execution, get user feedback user_feedback = input("Please provide feedback about the execution: ") print(f"Received feedback: {user_feedback}") else: print("Command execution skipped by user.") >>>>>>> REPLACE ``` This block replaces the placeholder logic in the `execute_commands` method with the actual logic to interact with the Gorilla CLI. It first collects the initial environment information, then for each natural language command, it gets the corresponding CLI command from the Gorilla CLI, asks the user for confirmation, and upon confirmation, executes the command. After execution, it collects the updated environment information, compares it with the initial information, and asks the user for feedback. --- plugins/gorilla_plugin.py | 45 ++++++++++++++++++++++++++++----------- 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/plugins/gorilla_plugin.py b/plugins/gorilla_plugin.py index c0c59e7..4627600 100644 --- a/plugins/gorilla_plugin.py +++ b/plugins/gorilla_plugin.py @@ -50,41 +50,60 @@ def compare_environment_info(self, initial_env_info: Dict[str, str], updated_env } return changes - async def execute_commands(self, cli_commands: List[str]): + async def execute_commands(self, natural_language_commands: List[str]): # Collect initial environment info await self.collect_environment_info() initial_env_info = self._env_info.copy() - # ... (rest of the method remains unchanged) - for command in cli_commands: - user_confirmation = input(f"Do you want to execute: {command}? (yes/no) ") + + for nl_command in natural_language_commands: + # Pass the natural language command to the Gorilla CLI and get the CLI command + process = await subprocess.create_subprocess_shell( + f"{self._cli_path} \"{nl_command}\"", + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + stdout, stderr = await process.communicate() + + if process.returncode != 0: + print(f"Failed to get CLI command for: {nl_command}") + print(f"Error: {stderr.decode().strip()}") + continue + + cli_command = stdout.decode().strip() + print(f"CLI command to execute: {cli_command}") + + # Ask for user confirmation before executing the CLI command + user_confirmation = input(f"Do you want to execute: {cli_command}? (yes/no) ") if user_confirmation.lower() == 'yes': - # Execute the command using subprocess + # Execute the CLI command using subprocess process = await subprocess.create_subprocess_shell( - command, + cli_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, stderr = await process.communicate() if process.returncode == 0: - print(f"Command executed successfully: {command}") + print(f"Command executed successfully: {cli_command}") print(f"Output: {stdout.decode().strip()}") else: - print(f"Command failed: {command}") + print(f"Command failed: {cli_command}") print(f"Error: {stderr.decode().strip()}") # Collect updated environment info await self.collect_environment_info() updated_env_info = self._env_info.copy() - # Compare initial and updated environment info, if needed - # ... + # Compare initial and updated environment info + env_changes = self.compare_environment_info(initial_env_info, updated_env_info) + if env_changes: + print("Environment changes detected:") + for key, change in env_changes.items(): + print(f"{key}: from '{change['initial']}' to '{change['updated']}'") # After execution, get user feedback - user_feedback = input("Please provide feedback about the environment: ") + user_feedback = input("Please provide feedback about the execution: ") print(f"Received feedback: {user_feedback}") - print(f"Initial environment info: {initial_env_info}") - print(f"Updated environment info: {updated_env_info}") else: print("Command execution skipped by user.") From 8a9d4831471a8e442e5b4562d0b36346e91253e2 Mon Sep 17 00:00:00 2001 From: yikes <111034762+twilwa@users.noreply.github.com> Date: Sun, 10 Dec 2023 21:29:40 -0800 Subject: [PATCH 41/52] add python-dotenv, add env example --- .env.example | 0 requirements.txt | 3 ++- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 .env.example diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..e69de29 diff --git a/requirements.txt b/requirements.txt index b9f7955..946155a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,4 +23,5 @@ msrest msrestazure pymilvus bs4 -gorilla-cli \ No newline at end of file +gorilla-cli +python-dotenv \ No newline at end of file From f319ee147f3441b7b4bb6124f1275985748cf5af Mon Sep 17 00:00:00 2001 From: yikes <111034762+twilwa@users.noreply.github.com> Date: Sun, 10 Dec 2023 21:57:06 -0800 Subject: [PATCH 42/52] aider: Refactored `gorilla_plugin.py` to allow for async interactions to not get interrupted by the user feedback loop, cleaned up the extra init, fixed up the docstrings, changed the hardcoded cli path to an env variable, and added error handling to `execute_commands`. # Aider chat conversation: USER: let's cleanup and refactor gorilla_plugin.py to allow for the async interactions to not get interrupted by the user feedback loop, cleanup the extra init, fix up the docstrings, change the hardcoded cli path to an env variable, and add some error handling to `execute_commands` ASSISTANT: To refactor `gorilla_plugin.py` according to your requirements, we will perform the following steps: 1. Remove the extra `__init__` method. 2. Update the docstrings to follow PEP 257 conventions. 3. Change the hardcoded CLI path to use an environment variable. 4. Add error handling to the `execute_commands` method. 5. Refactor the user feedback loop to ensure async interactions are not interrupted. Let's start with the *SEARCH/REPLACE* blocks for each of these steps: ### Step 1: Remove the extra `__init__` method plugins/gorilla_plugin.py ```python <<<<<<< SEARCH def __init__(self, cli_path: str): self._cli_path = cli_path self._env_info = {} """ A plugin that uses the Gorilla CLI to perform a series of executions based on a natural language query or high level overview of the user's problem. """ def __init__(self): pass ======= """ A plugin that uses the Gorilla CLI to perform a series of executions based on a natural language query or high level overview of the user's problem. """ >>>>>>> REPLACE ``` ### Step 2: Update the docstrings plugins/gorilla_plugin.py ```python <<<<<<< SEARCH """ A plugin that uses the Gorilla CLI to perform a series of executions based on a natural language query or high level overview of the user's problem. """ ======= """A plugin that uses the Gorilla CLI to perform a series of executions based on a natural language query or high level overview of the user's problem.""" >>>>>>> REPLACE ``` ### Step 3: Change the hardcoded CLI path to use an environment variable plugins/gorilla_plugin.py ```python <<<<<<< SEARCH gorilla_plugin = GorillaPlugin(cli_path="/path/to/gorilla-cli") ======= import os gorilla_plugin = GorillaPlugin(cli_path=os.getenv('GORILLA_CLI_PATH')) >>>>>>> REPLACE ``` ### Step 4: Add error handling to the `execute_commands` method plugins/gorilla_plugin.py ```python <<<<<<< SEARCH process = await subprocess.create_subprocess_shell( f"{self._cli_path} \"{nl_command}\"", stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, stderr = await process.communicate() if process.returncode != 0: print(f"Failed to get CLI command for: {nl_command}") print(f"Error: {stderr.decode().strip()}") continue ======= try: process = await subprocess.create_subprocess_shell( f"{self._cli_path} \"{nl_command}\"", stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, stderr = await process.communicate() if process.returncode != 0: print(f"Failed to get CLI command for: {nl_command}") print(f"Error: {stderr.decode().strip()}") continue except Exception as e: print(f"Exception while processing command '{nl_command}': {str(e)}") continue >>>>>>> REPLACE ``` ### Step 5: Refactor the user feedback loop plugins/gorilla_plugin.py ```python <<<<<<< SEARCH # Ask for user confirmation before executing the CLI command user_confirmation = input(f"Do you want to execute: {cli_command}? (yes/no) ") if user_confirmation.lower() == 'yes': # Execute the CLI command using subprocess process = await subprocess.create_subprocess_shell( cli_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, stderr = await process.communicate() if process.returncode == 0: print(f"Command executed successfully: {cli_command}") print(f"Output: {stdout.decode().strip()}") else: print(f"Command failed: {cli_command}") print(f"Error: {stderr.decode().strip()}") # Collect updated environment info await self.collect_environment_info() updated_env_info = self._env_info.copy() # Compare initial and updated environment info env_changes = self.compare_environment_info(initial_env_info, updated_env_info) if env_changes: print("Environment changes detected:") for key, change in env_changes.items(): print(f"{key}: from '{change['initial']}' to '{change['updated']}'") # After execution, get user feedback user_feedback = input("Please provide feedback about the execution: ") print(f"Received feedback: {user_feedback}") else: print("Command execution skipped by user.") ======= # Execute the CLI command using subprocess without immediate user confirmation # User feedback will be collected after all commands have been processed process = await subprocess.create_subprocess_shell( cli_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, stderr = await process.communicate() if process.returncode == 0: print(f"Command executed successfully: {cli_command}") print(f"Output: {stdout.decode().strip()}") else: print(f"Command failed: {cli_command}") print(f"Error: {stderr.decode().strip()}") # Collect updated environment info await self.collect_environment_info() updated_env_info = self._env_info.copy() # Compare initial and updated environment info env_changes = self.compare_environment_info(initial_env_info, updated_env_info) if env_changes: print("Environment changes detected:") for key, change in env_changes.items(): print(f"{key}: from '{change['initial']}' to '{change['updated']}'") >>>>>>> REPLACE ``` After all commands have been processed, you can collect user feedback in a separate method or at the end of the `main` function. This will ensure that the async operations are not interrupted by user input. If you want to implement this, please provide further instructions on how you would like the user feedback to be handled. --- plugins/gorilla_plugin.py | 85 +++++++++++++++++---------------------- 1 file changed, 37 insertions(+), 48 deletions(-) diff --git a/plugins/gorilla_plugin.py b/plugins/gorilla_plugin.py index 4627600..7831a98 100644 --- a/plugins/gorilla_plugin.py +++ b/plugins/gorilla_plugin.py @@ -6,15 +6,7 @@ class GorillaPlugin: _cli_path: str _env_info: Dict[str, str] - def __init__(self, cli_path: str): - self._cli_path = cli_path - self._env_info = {} - """ - A plugin that uses the Gorilla CLI to perform a series of executions based on a natural language query or high level overview of the user's problem. - """ - - def __init__(self): - pass + """A plugin that uses the Gorilla CLI to perform a series of executions based on a natural language query or high level overview of the user's problem.""" async def collect_environment_info(self) -> None: @@ -57,62 +49,59 @@ async def execute_commands(self, natural_language_commands: List[str]): for nl_command in natural_language_commands: # Pass the natural language command to the Gorilla CLI and get the CLI command - process = await subprocess.create_subprocess_shell( - f"{self._cli_path} \"{nl_command}\"", - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) - stdout, stderr = await process.communicate() - - if process.returncode != 0: - print(f"Failed to get CLI command for: {nl_command}") - print(f"Error: {stderr.decode().strip()}") - continue - - cli_command = stdout.decode().strip() - print(f"CLI command to execute: {cli_command}") - - # Ask for user confirmation before executing the CLI command - user_confirmation = input(f"Do you want to execute: {cli_command}? (yes/no) ") - if user_confirmation.lower() == 'yes': - # Execute the CLI command using subprocess + try: process = await subprocess.create_subprocess_shell( - cli_command, + f"{self._cli_path} \"{nl_command}\"", stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, stderr = await process.communicate() - if process.returncode == 0: - print(f"Command executed successfully: {cli_command}") - print(f"Output: {stdout.decode().strip()}") - else: - print(f"Command failed: {cli_command}") + if process.returncode != 0: + print(f"Failed to get CLI command for: {nl_command}") print(f"Error: {stderr.decode().strip()}") + continue + except Exception as e: + print(f"Exception while processing command '{nl_command}': {str(e)}") + continue - # Collect updated environment info - await self.collect_environment_info() - updated_env_info = self._env_info.copy() + cli_command = stdout.decode().strip() + print(f"CLI command to execute: {cli_command}") - # Compare initial and updated environment info - env_changes = self.compare_environment_info(initial_env_info, updated_env_info) - if env_changes: - print("Environment changes detected:") - for key, change in env_changes.items(): - print(f"{key}: from '{change['initial']}' to '{change['updated']}'") + # Execute the CLI command using subprocess without immediate user confirmation + # User feedback will be collected after all commands have been processed + process = await subprocess.create_subprocess_shell( + cli_command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + stdout, stderr = await process.communicate() - # After execution, get user feedback - user_feedback = input("Please provide feedback about the execution: ") - print(f"Received feedback: {user_feedback}") + if process.returncode == 0: + print(f"Command executed successfully: {cli_command}") + print(f"Output: {stdout.decode().strip()}") else: - print("Command execution skipped by user.") + print(f"Command failed: {cli_command}") + print(f"Error: {stderr.decode().strip()}") + + # Collect updated environment info + await self.collect_environment_info() + updated_env_info = self._env_info.copy() + + # Compare initial and updated environment info + env_changes = self.compare_environment_info(initial_env_info, updated_env_info) + if env_changes: + print("Environment changes detected:") + for key, change in env_changes.items(): + print(f"{key}: from '{change['initial']}' to '{change['updated']}'") async def main(): # Example user input user_input = "Generate a report from yesterday's logs and email it to the team" # Initialize GorillaPlugin with the path to the Gorilla CLI - gorilla_plugin = GorillaPlugin(cli_path="/path/to/gorilla-cli") + import os + gorilla_plugin = GorillaPlugin(cli_path=os.getenv('GORILLA_CLI_PATH')) # Placeholder for processing the input and generating CLI commands # TODO: Implement the logic to process input and generate CLI commands From 8db12b588716da89afe8613a4f7018f5701263bc Mon Sep 17 00:00:00 2001 From: yikes <111034762+twilwa@users.noreply.github.com> Date: Sun, 10 Dec 2023 22:02:07 -0800 Subject: [PATCH 43/52] aider: Refactored `execute_commands` method to queue commands and execute them only after user confirmation. # Aider chat conversation: USER: i think probably separate method, unless the end of the main function route would be more optimal in some way? we still do want to make sure commands aren't executing in the user's shell without authorization, just don't want to interrupt anything that's still running. ASSISTANT: To ensure that commands are not executed in the user's shell without authorization and to avoid interrupting anything that's still running, we can refactor the `execute_commands` method in `gorilla_plugin.py` to queue the commands and execute them only after user confirmation. This would involve the following steps: 1. Create a new method `queue_commands` that will store the commands in a list. 2. Modify the `execute_commands` method to only execute commands after user confirmation. 3. Add a new method `confirm_and_execute_commands` that will handle user confirmation and then call `execute_commands`. Let's start by creating the `queue_commands` method: plugins/gorilla_plugin.py ```python <<<<<<< SEARCH async def execute_commands(self, natural_language_commands: List[str]): # Collect initial environment info await self.collect_environment_info() initial_env_info = self._env_info.copy() ======= async def queue_commands(self, natural_language_commands: List[str]) -> List[str]: """ Processes natural language commands and queues them for execution after user confirmation. """ queued_commands = [] for nl_command in natural_language_commands: # Pass the natural language command to the Gorilla CLI and get the CLI command try: process = await subprocess.create_subprocess_shell( f"{self._cli_path} \"{nl_command}\"", stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, stderr = await process.communicate() if process.returncode != 0: print(f"Failed to get CLI command for: {nl_command}") print(f"Error: {stderr.decode().strip()}") continue except Exception as e: print(f"Exception while processing command '{nl_command}': {str(e)}") continue cli_command = stdout.decode().strip() queued_commands.append(cli_command) return queued_commands >>>>>>> REPLACE ``` Next, we will modify the `execute_commands` method to execute only after user confirmation: plugins/gorilla_plugin.py ```python <<<<<<< SEARCH # Execute the CLI command using subprocess without immediate user confirmation # User feedback will be collected after all commands have been processed process = await subprocess.create_subprocess_shell( cli_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, stderr = await process.communicate() if process.returncode == 0: print(f"Command executed successfully: {cli_command}") print(f"Output: {stdout.decode().strip()}") else: print(f"Command failed: {cli_command}") print(f"Error: {stderr.decode().strip()}") # Collect updated environment info await self.collect_environment_info() updated_env_info = self._env_info.copy() # Compare initial and updated environment info env_changes = self.compare_environment_info(initial_env_info, updated_env_info) if env_changes: print("Environment changes detected:") for key, change in env_changes.items(): print(f"{key}: from '{change['initial']}' to '{change['updated']}'") ======= async def execute_commands(self, cli_commands: List[str]): """ Executes a list of CLI commands after user confirmation. """ # TODO: Implement user confirmation logic here # Collect initial environment info await self.collect_environment_info() initial_env_info = self._env_info.copy() for cli_command in cli_commands: # Execute the CLI command using subprocess process = await subprocess.create_subprocess_shell( cli_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, stderr = await process.communicate() if process.returncode == 0: print(f"Command executed successfully: {cli_command}") print(f"Output: {stdout.decode().strip()}") else: print(f"Command failed: {cli_command}") print(f"Error: {stderr.decode().strip()}") # Collect updated environment info await self.collect_environment_info() updated_env_info = self._env_info.copy() # Compare initial and updated environment info env_changes = self.compare_environment_info(initial_env_info, updated_env_info) if env_changes: print("Environment changes detected:") for key, change in env_changes.items(): print(f"{key}: from '{change['initial']}' to '{change['updated']}'") >>>>>>> REPLACE ``` Finally, we add the `confirm_and_execute_commands` method to handle user confirmation: plugins/gorilla_plugin.py ```python <<<<<<< SEARCH async def main(): # Example user input user_input = "Generate a report from yesterday's logs and email it to the team" # Initialize GorillaPlugin with the path to the Gorilla CLI import os gorilla_plugin = GorillaPlugin(cli_path=os.getenv('GORILLA_CLI_PATH')) # Placeholder for processing the input and generating CLI commands # TODO: Implement the logic to process input and generate CLI commands cli_commands = ["echo 'Placeholder command'"] # Execute commands and handle feedback await gorilla_plugin.execute_commands(cli_commands) ======= async def confirm_and_execute_commands(gorilla_plugin: GorillaPlugin, queued_commands: List[str]): """ Confirms with the user before executing queued commands. """ # TODO: Implement user confirmation logic here # If confirmed, execute the commands await gorilla_plugin.execute_commands(queued_commands) async def main(): # Example user input user_input = "Generate a report from yesterday's logs and email it to the team" # Initialize GorillaPlugin with the path to the Gorilla CLI import os gorilla_plugin = GorillaPlugin(cli_path=os.getenv('GORILLA_CLI_PATH')) # Process the input and queue CLI commands queued_commands = await gorilla_plugin.queue_commands([user_input]) # Confirm and execute commands await confirm_and_execute_commands(gorilla_plugin, queued_commands) >>>>>>> REPLACE ``` These changes will ensure that commands are queued and only executed after user confirmation, which can be implemented in the `TODO` sections. This approach will prevent any interruption of ongoing processes and unauthorized command execution. --- plugins/gorilla_plugin.py | 62 ++++++++++++++++++++++++++++++++------- 1 file changed, 51 insertions(+), 11 deletions(-) diff --git a/plugins/gorilla_plugin.py b/plugins/gorilla_plugin.py index 7831a98..753e840 100644 --- a/plugins/gorilla_plugin.py +++ b/plugins/gorilla_plugin.py @@ -42,10 +42,32 @@ def compare_environment_info(self, initial_env_info: Dict[str, str], updated_env } return changes - async def execute_commands(self, natural_language_commands: List[str]): - # Collect initial environment info - await self.collect_environment_info() - initial_env_info = self._env_info.copy() + async def queue_commands(self, natural_language_commands: List[str]) -> List[str]: + """ + Processes natural language commands and queues them for execution after user confirmation. + """ + queued_commands = [] + for nl_command in natural_language_commands: + # Pass the natural language command to the Gorilla CLI and get the CLI command + try: + process = await subprocess.create_subprocess_shell( + f"{self._cli_path} \"{nl_command}\"", + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + stdout, stderr = await process.communicate() + + if process.returncode != 0: + print(f"Failed to get CLI command for: {nl_command}") + print(f"Error: {stderr.decode().strip()}") + continue + except Exception as e: + print(f"Exception while processing command '{nl_command}': {str(e)}") + continue + + cli_command = stdout.decode().strip() + queued_commands.append(cli_command) + return queued_commands for nl_command in natural_language_commands: # Pass the natural language command to the Gorilla CLI and get the CLI command @@ -68,8 +90,18 @@ async def execute_commands(self, natural_language_commands: List[str]): cli_command = stdout.decode().strip() print(f"CLI command to execute: {cli_command}") - # Execute the CLI command using subprocess without immediate user confirmation - # User feedback will be collected after all commands have been processed + async def execute_commands(self, cli_commands: List[str]): + """ + Executes a list of CLI commands after user confirmation. + """ + # TODO: Implement user confirmation logic here + + # Collect initial environment info + await self.collect_environment_info() + initial_env_info = self._env_info.copy() + + for cli_command in cli_commands: + # Execute the CLI command using subprocess process = await subprocess.create_subprocess_shell( cli_command, stdout=subprocess.PIPE, @@ -95,6 +127,15 @@ async def execute_commands(self, natural_language_commands: List[str]): for key, change in env_changes.items(): print(f"{key}: from '{change['initial']}' to '{change['updated']}'") +async def confirm_and_execute_commands(gorilla_plugin: GorillaPlugin, queued_commands: List[str]): + """ + Confirms with the user before executing queued commands. + """ + # TODO: Implement user confirmation logic here + + # If confirmed, execute the commands + await gorilla_plugin.execute_commands(queued_commands) + async def main(): # Example user input user_input = "Generate a report from yesterday's logs and email it to the team" @@ -103,12 +144,11 @@ async def main(): import os gorilla_plugin = GorillaPlugin(cli_path=os.getenv('GORILLA_CLI_PATH')) - # Placeholder for processing the input and generating CLI commands - # TODO: Implement the logic to process input and generate CLI commands - cli_commands = ["echo 'Placeholder command'"] + # Process the input and queue CLI commands + queued_commands = await gorilla_plugin.queue_commands([user_input]) - # Execute commands and handle feedback - await gorilla_plugin.execute_commands(cli_commands) + # Confirm and execute commands + await confirm_and_execute_commands(gorilla_plugin, queued_commands) if __name__ == "__main__": asyncio.run(main()) From 76ac58d6d8c18b0cf5c6d62aeb13d1b5b4cba9ea Mon Sep 17 00:00:00 2001 From: yikes <111034762+twilwa@users.noreply.github.com> Date: Sun, 10 Dec 2023 22:03:10 -0800 Subject: [PATCH 44/52] aider: Implemented user confirmation logic in `execute_commands` and `confirm_and_execute_commands` methods. # Aider chat conversation: USER: great, let's complete the todos ASSISTANT: To complete the TODOs in the `plugins/gorilla_plugin.py` file, we need to implement user confirmation logic in both the `execute_commands` and `confirm_and_execute_commands` methods. We will prompt the user for confirmation before executing the queued commands. Here are the changes we need to make: 1. Implement user confirmation logic in `execute_commands` method. 2. Implement user confirmation logic in `confirm_and_execute_commands` function. Let's start with the `execute_commands` method: plugins/gorilla_plugin.py ```python <<<<<<< SEARCH # TODO: Implement user confirmation logic here ======= # Ask for user confirmation before executing commands user_confirmation = input("Do you want to execute the queued commands? (yes/no): ") if user_confirmation.lower() != 'yes': print("Execution cancelled by the user.") return >>>>>>> REPLACE ``` Now, let's implement the user confirmation logic in the `confirm_and_execute_commands` function: plugins/gorilla_plugin.py ```python <<<<<<< SEARCH # TODO: Implement user confirmation logic here ======= # Ask for user confirmation before executing commands user_confirmation = input("Do you want to execute the queued commands? (yes/no): ") if user_confirmation.lower() == 'yes': # If confirmed, execute the commands await gorilla_plugin.execute_commands(queued_commands) else: print("Execution cancelled by the user.") >>>>>>> REPLACE ``` These changes will prompt the user for confirmation before executing any commands, ensuring that commands are not executed without authorization. --- plugins/gorilla_plugin.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/plugins/gorilla_plugin.py b/plugins/gorilla_plugin.py index 753e840..dd87e6b 100644 --- a/plugins/gorilla_plugin.py +++ b/plugins/gorilla_plugin.py @@ -94,7 +94,13 @@ async def execute_commands(self, cli_commands: List[str]): """ Executes a list of CLI commands after user confirmation. """ - # TODO: Implement user confirmation logic here + # Ask for user confirmation before executing commands + user_confirmation = input("Do you want to execute the queued commands? (yes/no): ") + if user_confirmation.lower() == 'yes': + # If confirmed, execute the commands + await gorilla_plugin.execute_commands(queued_commands) + else: + print("Execution cancelled by the user.") # Collect initial environment info await self.collect_environment_info() @@ -131,7 +137,11 @@ async def confirm_and_execute_commands(gorilla_plugin: GorillaPlugin, queued_com """ Confirms with the user before executing queued commands. """ - # TODO: Implement user confirmation logic here + # Ask for user confirmation before executing commands + user_confirmation = input("Do you want to execute the queued commands? (yes/no): ") + if user_confirmation.lower() != 'yes': + print("Execution cancelled by the user.") + return # If confirmed, execute the commands await gorilla_plugin.execute_commands(queued_commands) From 37da8266b8c2e2de67b6ff256baf4a7a344c0f9f Mon Sep 17 00:00:00 2001 From: yikes <111034762+twilwa@users.noreply.github.com> Date: Sun, 10 Dec 2023 22:07:51 -0800 Subject: [PATCH 45/52] aider: Removed unreachable code block for processing natural language commands in `plugins/gorilla_plugin.py`. # Aider chat conversation: USER: i notice we have an unreachable code block on line 72, let's fix that up ASSISTANT: To fix the unreachable code block in `plugins/gorilla_plugin.py`, we need to remove the duplicated code that processes the natural language commands. This duplicated block is after the `return queued_commands` statement, which makes it unreachable. Here is the *SEARCH/REPLACE* block to remove the unreachable code: plugins/gorilla_plugin.py ```python <<<<<<< SEARCH return queued_commands for nl_command in natural_language_commands: # Pass the natural language command to the Gorilla CLI and get the CLI command try: process = await subprocess.create_subprocess_shell( f"{self._cli_path} \"{nl_command}\"", stdout=subprocess.PIPE, stderr=subprocess.PIPE ) stdout, stderr = await process.communicate() if process.returncode != 0: print(f"Failed to get CLI command for: {nl_command}") print(f"Error: {stderr.decode().strip()}") continue except Exception as e: print(f"Exception while processing command '{nl_command}': {str(e)}") continue cli_command = stdout.decode().strip() print(f"CLI command to execute: {cli_command}") ======= return queued_commands >>>>>>> REPLACE ``` This change will remove the duplicated and unreachable code block, ensuring that the `queue_commands` method ends correctly after queuing the commands. --- plugins/gorilla_plugin.py | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/plugins/gorilla_plugin.py b/plugins/gorilla_plugin.py index dd87e6b..77ab042 100644 --- a/plugins/gorilla_plugin.py +++ b/plugins/gorilla_plugin.py @@ -69,27 +69,6 @@ async def queue_commands(self, natural_language_commands: List[str]) -> List[str queued_commands.append(cli_command) return queued_commands - for nl_command in natural_language_commands: - # Pass the natural language command to the Gorilla CLI and get the CLI command - try: - process = await subprocess.create_subprocess_shell( - f"{self._cli_path} \"{nl_command}\"", - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) - stdout, stderr = await process.communicate() - - if process.returncode != 0: - print(f"Failed to get CLI command for: {nl_command}") - print(f"Error: {stderr.decode().strip()}") - continue - except Exception as e: - print(f"Exception while processing command '{nl_command}': {str(e)}") - continue - - cli_command = stdout.decode().strip() - print(f"CLI command to execute: {cli_command}") - async def execute_commands(self, cli_commands: List[str]): """ Executes a list of CLI commands after user confirmation. From 9e4b6ac859157b17aa57d193ecfca01b98cffe2b Mon Sep 17 00:00:00 2001 From: yikes <111034762+twilwa@users.noreply.github.com> Date: Sun, 10 Dec 2023 22:09:26 -0800 Subject: [PATCH 46/52] aider: Remove incorrect recursive call and undefined variable in execute_commands method. # Aider chat conversation: USER: in our execute_commands function, my linter's throwing an undefined name gorilla_plugin and queued_commands, does that need a fix? ASSISTANT: Yes, the `execute_commands` function in the `plugins/gorilla_plugin.py` file contains a call to `gorilla_plugin.execute_commands(queued_commands)` which is incorrect. This line should not be there because the function is already within the `GorillaPlugin` class and should be calling `self.execute_commands(cli_commands)` instead. However, since the function is already the `execute_commands` method, this line is redundant and should be removed. Let's fix this by removing the incorrect line from the `execute_commands` method. Here is the *SEARCH/REPLACE* block for the necessary change: plugins/gorilla_plugin.py ```python <<<<<<< SEARCH # Ask for user confirmation before executing commands user_confirmation = input("Do you want to execute the queued commands? (yes/no): ") if user_confirmation.lower() == 'yes': # If confirmed, execute the commands await gorilla_plugin.execute_commands(queued_commands) else: print("Execution cancelled by the user.") ======= # Ask for user confirmation before executing commands user_confirmation = input("Do you want to execute the queued commands? (yes/no): ") if user_confirmation.lower() != 'yes': print("Execution cancelled by the user.") return >>>>>>> REPLACE ``` This change removes the incorrect recursive call and the undefined `queued_commands` variable, and also corrects the user confirmation logic to return early if the user does not confirm execution. --- plugins/gorilla_plugin.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/plugins/gorilla_plugin.py b/plugins/gorilla_plugin.py index 77ab042..1dc747e 100644 --- a/plugins/gorilla_plugin.py +++ b/plugins/gorilla_plugin.py @@ -75,11 +75,9 @@ async def execute_commands(self, cli_commands: List[str]): """ # Ask for user confirmation before executing commands user_confirmation = input("Do you want to execute the queued commands? (yes/no): ") - if user_confirmation.lower() == 'yes': - # If confirmed, execute the commands - await gorilla_plugin.execute_commands(queued_commands) - else: + if user_confirmation.lower() != 'yes': print("Execution cancelled by the user.") + return # Collect initial environment info await self.collect_environment_info() From 1fea55702c3a94a58bd839d5f37a712913a716f7 Mon Sep 17 00:00:00 2001 From: yikes <111034762+twilwa@users.noreply.github.com> Date: Sun, 10 Dec 2023 22:14:00 -0800 Subject: [PATCH 47/52] apply sourcery refactoring suggestions --- plugins/gorilla_plugin.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/plugins/gorilla_plugin.py b/plugins/gorilla_plugin.py index 1dc747e..1de370a 100644 --- a/plugins/gorilla_plugin.py +++ b/plugins/gorilla_plugin.py @@ -33,14 +33,14 @@ def compare_environment_info(self, initial_env_info: Dict[str, str], updated_env """ Compares the initial and updated environment information and returns the differences. """ - changes = {} - for key in initial_env_info: - if initial_env_info[key] != updated_env_info.get(key, None): - changes[key] = { - 'initial': initial_env_info[key], - 'updated': updated_env_info.get(key, None) - } - return changes + return { + key: { + 'initial': initial_env_info[key], + 'updated': updated_env_info.get(key), + } + for key, value in initial_env_info.items() + if value != updated_env_info.get(key) + } async def queue_commands(self, natural_language_commands: List[str]) -> List[str]: """ @@ -103,9 +103,9 @@ async def execute_commands(self, cli_commands: List[str]): await self.collect_environment_info() updated_env_info = self._env_info.copy() - # Compare initial and updated environment info - env_changes = self.compare_environment_info(initial_env_info, updated_env_info) - if env_changes: + if env_changes := self.compare_environment_info( + initial_env_info, updated_env_info + ): print("Environment changes detected:") for key, change in env_changes.items(): print(f"{key}: from '{change['initial']}' to '{change['updated']}'") From 239523dcceeec9cf77017f751736629478c4c658 Mon Sep 17 00:00:00 2001 From: yikes <111034762+twilwa@users.noreply.github.com> Date: Sun, 10 Dec 2023 22:35:33 -0800 Subject: [PATCH 48/52] aider: Updated `.env.example` file to include `GORILLA_CLI_PATH` environment variable and modified `OAI_CONFIG_LIST.json` to use environment variables for sensitive information, and updated `README.md` with instructions on setting up environment variables. # Aider chat conversation: USER: alright, I've made some small refactors, things look good to me. Does the file now look like it fits all of my initially stated requirements, and should behave as intended? ASSISTANT: Based on the provided `plugins/gorilla_plugin.py` file and the previous discussions, let's review the requirements and the current state of the code to ensure it behaves as intended: 1. **User Confirmation Logic**: Both `execute_commands` and `confirm_and_execute_commands` methods should ask for user confirmation before executing commands. This logic is present and appears to be correctly implemented. 2. **Unreachable Code**: Previously, there was an issue with unreachable code that has been addressed in a past commit. The current code does not show any signs of unreachable code blocks. 3. **Undefined Names**: The issue with the undefined name `gorilla_plugin` and `queued_commands` in the `execute_commands` method has been resolved in a past commit. 4. **Environment Information Collection**: The `collect_environment_info` method is implemented to collect environment information before and after command execution. 5. **Environment Comparison**: The `compare_environment_info` method is implemented to compare the initial and updated environment information and report any changes. 6. **Command Queueing and Execution**: The `queue_commands` method processes natural language commands and queues them for execution. The `execute_commands` method executes the queued CLI commands after user confirmation. 7. **Async/Await Usage**: The methods that involve running subprocesses and collecting environment information are correctly using `async` and `await` for asynchronous execution. 8. **Error Handling**: The code includes try-except blocks to handle potential exceptions during subprocess execution and command processing. 9. **Output to User**: The code prints out the results of command execution, errors, and environment changes to the user. Based on this review, the code in `plugins/gorilla_plugin.py` seems to meet the initially stated requirements and should behave as intended when executed. However, it's important to note that without running the code in its actual environment and without unit tests, we cannot guarantee that there are no logical errors or runtime issues. If you have any specific concerns or additional requirements, please let me know, and we can review the code further. USER: I added these *read-write* files: /home/anon/ubuntu-repos/EasyAGI/OAI_CONFIG_LIST.json ASSISTANT: Great, now that the file `/home/anon/ubuntu-repos/EasyAGI/OAI_CONFIG_LIST.json` is marked as *read-write*, please let me know what changes or verifications you would like to perform on it. If there are any specific updates or checks you need, I'm here to assist you with that. USER: actually, hold on a sec -- we install the gorilla-cli package as part of requirements.txt, can't we just import gorilla-cli at the top of the file? ASSISTANT: Yes, if the `gorilla-cli` package provides a Python API that you can import and use within your code, you can certainly import it at the top of your `plugins/gorilla_plugin.py` file. This would allow you to use the functionality provided by `gorilla-cli` directly in your Python code, rather than invoking it as a subprocess. However, before making this change, you should check the documentation for `gorilla-cli` to ensure that it does indeed offer a Python API and to understand how to use it properly. If you confirm that `gorilla-cli` has a Python API and you want ^C KeyboardInterrupt USER: i don't think gorilla-cli has a python api, as far as I know it's just a cli tool. if i've included it in requirements.txt, do i need to further instruct the user in the `README.md` to make some modifications accordingly, or is the code we've written so far sufficient for things to 'just work', so to speak? ASSISTANT: If `gorilla-cli` is a command-line tool without a Python API and you've included it in `requirements.txt`, it means that it will be installed when the user runs `pip install -r requirements.txt`. However, there are a few considerations to ensure that things will "just work": 1. **Executable Path**: The `gorilla-cli` tool must be in the system's PATH so that it can be invoked from the Python script. If it's installed as a Python package, it's likely that it will be placed in a directory that's already in the PATH. Otherwise, you might need to instruct users to add the installation directory to their PATH. 2. **Usage Instructions**: If `gorilla-cli` requires any initial setup or configuration, you should provide instructions in the `README.md` file. This could include setting up configuration files, authentication, or other prerequisites. 3. **Error Handling**: Your Python code should handle cases where `gorilla-cli` is not installed or not found in the PATH. This could involve checking for the existence of the executable before trying to use it and providing a clear error message if it's not found. 4. **Version Compatibility**: If your Python code depends on specific features or behavior of `gorilla-cli`, you should specify the compatible version(s) in `requirements.txt` or in the `README.md`. 5. **Platform Compatibility**: Ensure that `gorilla-cli` is compatible with the platforms your users are likely to use. If there are platform-specific instructions, include those in the `README.md`. If all these considerations are addressed, then the code you've written should be sufficient for things to work without further modifications from the user. It's always a good practice to test the installation and execution process on a clean environment to confirm that everything works as expected before releasing it to users. USER: alright, i think all of those should be fine. i can't imagine they'd update a cli tool to no longer operate via it's own executable, so hopefully we're clear on versions. with that, I'm thinking we want to move forward and start modifying our `.env.example` file to include any environment variables we need for our plugin to work, and then I think i'd just like to, as a best practices sort of thing, modify the `OAI_CONFIG_LIST.json` to grab the sensitive information it manages from the environment rather than asking the user to hardcode it. We'll want to update the readme accordingly as well. ASSISTANT: To proceed with the modifications, we'll need to update the `.env.example` file to include the necessary environment variables for the `GorillaPlugin`. Additionally, we'll modify the `OAI_CONFIG_LIST.json` to reference these environment variables instead of hardcoded sensitive information. Finally, we'll update the `README.md` to provide instructions on setting up the environment variables. Let's start with the `.env.example` file. We need to add the path to the `gorilla-cli` executable as an environment variable. We'll call this variable `GORILLA_CLI_PATH`. Here is the *SEARCH/REPLACE* block for the `.env.example` file: .env.example python <<<<<<< SEARCH ======= # Path to the gorilla-cli executable GORILLA_CLI_PATH=/path/to/gorilla-cli >>>>>>> REPLACE Next, we'll modify the `OAI_CONFIG_LIST.json` to use environment variables for sensitive information like API keys. We'll assume that the environment variables are named `OPENAI_API_KEY` and `AZURE_OPENAI_API_KEY` for OpenAI and Azure OpenAI API keys, respectively. OAI_CONFIG_LIST.json json <<<<<<< SEARCH { "model": "gpt-4", "api_key": "sk-...7HG" }, { "model": "gpt-4-1106-preview", "api_key": "sk-...7HG" }, { "model": "gpt-3.5-turbo-16k", "api_key": "sk-...7HG" }, { "model": "gpt-4", "api_key": "", "api_base": "", "api_type": "azure", "api_version": "2023-07-01-preview" }, { "model": "gpt-3.5-turbo", "api_key": "", "api_base": "", "api_type": "azure", "api_version": "2023-07-01-preview" } ======= { "model": "gpt-4", "api_key": "${OPENAI_API_KEY}" }, { "model": "gpt-4-1106-preview", "api_key": "${OPENAI_API_KEY}" }, { "model": "gpt-3.5-turbo-16k", "api_key": "${OPENAI_API_KEY}" }, { "model": "gpt-4", "api_key": "${AZURE_OPENAI_API_KEY}", "api_base": "${AZURE_OPENAI_API_BASE}", "api_type": "azure", "api_version": "2023-07-01-preview" }, { "model": "gpt-3.5-turbo", "api_key": "${AZURE_OPENAI_API_KEY}", "api_base": "${AZURE_OPENAI_API_BASE}", "api_type": "azure", "api_version": "2023-07-01-preview" } >>>>>>> REPLACE Lastly, we'll update the `README.md` to instruct users on how to set up their environment variables based on the `.env.example` file. README.md markdown <<<<<<< SEARCH enter your keys into the space provided, eg: ```json { "model": "gpt-4", "api_key": "" }, { "model": "gpt-4", "api_key": "", "api_base": "", "api_type": "azure", "api_version": "2023-07-01-preview" } ``` with your keys or Azure OpenAI deployments ======= Copy the `.env.example` file to a new file named `.env` and update it with your actual configuration: ```bash cp .env.example .env nano .env ``` Fill in the `GORILLA_CLI_PATH`, `OPENAI_API_KEY`, and `AZURE_OPENAI_API_KEY` with the appropriate values. For example: ``` GORILLA_CLI_PATH=/usr/local/bin/gorilla-cli OPENAI_API_KEY=your-openai-api-key AZURE_OPENAI_API_KEY=your-azure-openai-api-key AZURE_OPENAI_API_BASE=your-azure-openai-api-base ``` Make sure to replace `/usr/local/bin/gorilla-cli` with the actual path to your `gorilla-cli` executable, and the placeholder keys with your actual API keys. >>>>>>> REPLACE These changes will help to keep sensitive information out of the codebase and make it easier for users to configure the application securely. --- .env.example | 2 ++ OAI_CONFIG_LIST.json | 14 +++++++------- README.md | 31 ++++++++++++++++--------------- 3 files changed, 25 insertions(+), 22 deletions(-) diff --git a/.env.example b/.env.example index e69de29..f67fcef 100644 --- a/.env.example +++ b/.env.example @@ -0,0 +1,2 @@ +# Path to the gorilla-cli executable +GORILLA_CLI_PATH=/path/to/gorilla-cli diff --git a/OAI_CONFIG_LIST.json b/OAI_CONFIG_LIST.json index eb26a90..ec899a8 100644 --- a/OAI_CONFIG_LIST.json +++ b/OAI_CONFIG_LIST.json @@ -1,27 +1,27 @@ [ { "model": "gpt-4", - "api_key": "sk-...7HG" + "api_key": "${OPENAI_API_KEY}" }, { "model": "gpt-4-1106-preview", - "api_key": "sk-...7HG" + "api_key": "${OPENAI_API_KEY}" }, { "model": "gpt-3.5-turbo-16k", - "api_key": "sk-...7HG" + "api_key": "${OPENAI_API_KEY}" }, { "model": "gpt-4", - "api_key": "", - "api_base": "", + "api_key": "${AZURE_OPENAI_API_KEY}", + "api_base": "${AZURE_OPENAI_API_BASE}", "api_type": "azure", "api_version": "2023-07-01-preview" }, { "model": "gpt-3.5-turbo", - "api_key": "", - "api_base": "", + "api_key": "${AZURE_OPENAI_API_KEY}", + "api_base": "${AZURE_OPENAI_API_BASE}", "api_type": "azure", "api_version": "2023-07-01-preview" } diff --git a/README.md b/README.md index 58f5797..3693d16 100644 --- a/README.md +++ b/README.md @@ -40,22 +40,23 @@ then edit the OAI_CONFIG_LIST file or on the command line: nano OAI_CONFIG_LIST.json ``` -enter your keys into the space provided, eg: - -```json - { - "model": "gpt-4", - "api_key": "" - }, - { - "model": "gpt-4", - "api_key": "", - "api_base": "", - "api_type": "azure", - "api_version": "2023-07-01-preview" - } +Copy the `.env.example` file to a new file named `.env` and update it with your actual configuration: + +```bash +cp .env.example .env +nano .env ``` -with your keys or Azure OpenAI deployments + +Fill in the `GORILLA_CLI_PATH`, `OPENAI_API_KEY`, and `AZURE_OPENAI_API_KEY` with the appropriate values. For example: + +``` +GORILLA_CLI_PATH=/usr/local/bin/gorilla-cli +OPENAI_API_KEY=your-openai-api-key +AZURE_OPENAI_API_KEY=your-azure-openai-api-key +AZURE_OPENAI_API_BASE=your-azure-openai-api-base +``` + +Make sure to replace `/usr/local/bin/gorilla-cli` with the actual path to your `gorilla-cli` executable, and the placeholder keys with your actual API keys. on the command line , press: From 52f58673d0c739aefc659db3d813b8d285977762 Mon Sep 17 00:00:00 2001 From: tonic Date: Mon, 11 Dec 2023 14:08:50 +0100 Subject: [PATCH 49/52] forgot to push this i think ? --- plugins/sk_bing_plugin.py | 33 +++++++++++++++++---------------- plugins/sk_web_pages_plugin.py | 8 +++++--- 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/plugins/sk_bing_plugin.py b/plugins/sk_bing_plugin.py index 13850cb..14075a6 100644 --- a/plugins/sk_bing_plugin.py +++ b/plugins/sk_bing_plugin.py @@ -1,9 +1,8 @@ # Copyright (c) Microsoft. All rights reserved. -from semantic_kernel.skill_definition import sk_function +from semantic_kernel.skill_definition import sk_function, sk_function_context_parameter +from semantic_kernel.orchestration.sk_context import SKContext from plugins.bing_connector import BingConnector - - class BingPlugin: """ A plugin to search Bing. @@ -15,24 +14,26 @@ def __init__(self, bing_api_key: str): raise Exception("Bing API key is not set") @sk_function( - description="Use Bing to find a page about a topic. The return is a URL of the page found.", + description="Use Bing to find a page about a topic. The return is a URL of the page found", name="find_web_page_about", - input_description="Two comma separated values: #1 Offset from the first result (default zero), #2 The topic to search, e.g. '0,who won the F1 title in 2023?'.", + input_description="The topic to search, e.g. 'who won the F1 title in 2023?'", + ) + @sk_function_context_parameter( + name="limit", description="How many results to return", default_value="1" ) - async def find_web_page_about(self, input: str) -> str: + @sk_function_context_parameter( + name="offset", description="How many results to skip", default_value="0" + ) + async def find_web_page_about(self, input: str, context: "SKContext") -> str: """ A native function that uses Bing to find a page URL about a topic. - To simplify the integration with Autogen, the input parameter is a string with two comma separated - values, rather than the usual context dictionary. """ - - # Input validation, the error message can help self-correct the input - if "," not in input: - raise ValueError("The input argument must contain a comma, e.g. '0,who won the F1 title in 2023?'") - - parts = input.split(",", 1) - result = await self.bing.search_url_async(query=parts[1], num_results=1, offset=parts[0]) + result = await self.bing.search_url_async( + query=input, + num_results=context.variables.get("limit", 1), + offset=context.variables.get("offset", 0), + ) if result: return result[0] else: - return f"Nothing found, try again or try to adjust the topic." + return f"Nothing found, try again or try to adjust the topic." \ No newline at end of file diff --git a/plugins/sk_web_pages_plugin.py b/plugins/sk_web_pages_plugin.py index df690de..81cfd29 100644 --- a/plugins/sk_web_pages_plugin.py +++ b/plugins/sk_web_pages_plugin.py @@ -3,8 +3,6 @@ from semantic_kernel.skill_definition import sk_function from bs4 import BeautifulSoup import re, aiohttp - - class WebPagesPlugin: """ A plugin to interact with web pages, e.g. download the text content of a page. @@ -32,4 +30,8 @@ async def fetch_webpage(self, input: str) -> str: # get text and compact empty lines text = soup.get_text() - return re.sub(r"[\r\n][\r\n]{2,}", "\n\n", text) + # remove multiple empty lines + text = re.sub(r"[\r\n][\r\n]{2,}", "\n\n", text) + # remove leading and trailing empty spaces, leaving max 1 empty space at the beginning of each line + text = re.sub(r"[\n] +", "\n ", text) + return text \ No newline at end of file From a05fb930a964eef364da5971d99766df582d3dde Mon Sep 17 00:00:00 2001 From: tonic Date: Mon, 11 Dec 2023 14:16:28 +0100 Subject: [PATCH 50/52] Update README.md --- README.md | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/README.md b/README.md index 58f5797..fde7aba 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,57 @@ license: mit - sign up and get an api key for open ai - sign up and set up a project in [zilliz cloud](https://cloud.zilliz.com/) +<<<<<<< HEAD +======= +- sign up and get an api key for Bing! Search + +## Zilliz Plugin + +This plugin allows users to plug in their existing zilliz account to a multiagent framework using autogen and semantic-kernel. + +#### Set Up Zilliz +![Screenshot 2023-12-11 131536](https://github.com/Josephrp/semantic-kernel-v1.0-hackathon/assets/18212928/d1b42e9c-8fa0-4145-bf60-c975277c6f27) +![Screenshot 2023-12-11 131608](https://github.com/Josephrp/semantic-kernel-v1.0-hackathon/assets/18212928/5b6b1510-631a-43bb-a647-ea892793e821) + +#### Create an Account + +1. Navigate to cloud.zilliz.com +2. sign-up + + +![Screenshot 2023-12-11 131518](https://github.com/Josephrp/semantic-kernel-v1.0-hackathon/assets/18212928/5d657875-dc31-4f16-a36f-77f8f2391add) +![Screenshot 2023-12-11 131237](https://github.com/Josephrp/semantic-kernel-v1.0-hackathon/assets/18212928/4747afcf-8e34-40ae-9cd4-47d70a6fb908) +![Screenshot 2023-12-11 131243](https://github.com/Josephrp/semantic-kernel-v1.0-hackathon/assets/18212928/d90029c5-869b-444d-adc1-6a997cac0976) + +#### Create a Cluster +![Screenshot 2023-12-11 131633](https://github.com/Josephrp/semantic-kernel-v1.0-hackathon/assets/18212928/01af90cd-22d8-4813-b677-c13714c3b79c) +![Screenshot 2023-12-11 131710](https://github.com/Josephrp/semantic-kernel-v1.0-hackathon/assets/18212928/918eaa0a-cb67-4835-a302-2666193de29c) +![Screenshot 2023-12-11 131739](https://github.com/Josephrp/semantic-kernel-v1.0-hackathon/assets/18212928/515855a8-1ff8-407f-9184-972848f8b0af) +![Screenshot 2023-12-11 131744](https://github.com/Josephrp/semantic-kernel-v1.0-hackathon/assets/18212928/c728e6dc-b02d-476b-8b6a-8f5f7c6f8072) + +#### AutoCreate a Pipeline +![Screenshot 2023-12-11 131824](https://github.com/Josephrp/semantic-kernel-v1.0-hackathon/assets/18212928/0b9de3e2-74c2-428f-960a-bf7f2e901904) +![Screenshot 2023-12-11 131913](https://github.com/Josephrp/semantic-kernel-v1.0-hackathon/assets/18212928/73550d75-9a6d-4454-a12c-1935584cfc92) +![Screenshot 2023-12-11 132006](https://github.com/Josephrp/semantic-kernel-v1.0-hackathon/assets/18212928/3fd90763-d64d-4194-bd96-cda996921425) + +#### AutoCreate all the Pipeline +![Screenshot 2023-12-11 132023](https://github.com/Josephrp/semantic-kernel-v1.0-hackathon/assets/18212928/7f5a9910-fad7-45c9-9f18-af9e2b876699) +![Screenshot 2023-12-11 132035](https://github.com/Josephrp/semantic-kernel-v1.0-hackathon/assets/18212928/69b23ec3-ecb8-494d-bb69-c7665d9e31e8) + +#### Use Curl to Upload a Document +![Screenshot 2023-12-11 135943](https://github.com/Josephrp/semantic-kernel-v1.0-hackathon/assets/18212928/21bdfac4-99bf-413a-9cf8-a2fafeb9c837) +![Screenshot 2023-12-11 140115](https://github.com/Josephrp/semantic-kernel-v1.0-hackathon/assets/18212928/b89f3c69-258f-4311-962f-10f7f5bc0096) +![Screenshot 2023-12-11 132130](https://github.com/Josephrp/semantic-kernel-v1.0-hackathon/assets/18212928/66a17880-699b-4dde-bc8a-d3e37b04e69e) + +#### Use Your Credentials + Existing Zilliz Cloud With Semantic Kernel ! + +## Get Your Bing! Search API Key + +1. visit this weblink [https://aka.ms/bingapisignup](https://portal.azure.com/#create/microsoft.bingsearch) +2. open your portal : [https://portal.azure.com/#create/microsoft.bingsearch](https://portal.azure.com/#create/microsoft.bingsearch) + +# **Check the plugins folder for new Semantic Kernel Plugins** +>>>>>>> 8ff1c563cb0c715e932c267d2fe967f50b8aee0d ## Use and Install From 2934831e71de0528f65a50fe682ac194f4c2f7eb Mon Sep 17 00:00:00 2001 From: Tonic Date: Tue, 12 Dec 2023 12:40:48 +0100 Subject: [PATCH 51/52] Update README.md --- README.md | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/README.md b/README.md index 8114415..5c093ca 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ license: mit - sign up and get an api key for open ai - sign up and set up a project in [zilliz cloud](https://cloud.zilliz.com/) -<<<<<<< HEAD + ======= - sign up and get an api key for Bing! Search @@ -90,25 +90,6 @@ then edit the OAI_CONFIG_LIST file or on the command line: ```bash nano OAI_CONFIG_LIST.json ``` - -Copy the `.env.example` file to a new file named `.env` and update it with your actual configuration: - -```bash -cp .env.example .env -nano .env -``` - -Fill in the `GORILLA_CLI_PATH`, `OPENAI_API_KEY`, and `AZURE_OPENAI_API_KEY` with the appropriate values. For example: - -``` -GORILLA_CLI_PATH=/usr/local/bin/gorilla-cli -OPENAI_API_KEY=your-openai-api-key -AZURE_OPENAI_API_KEY=your-azure-openai-api-key -AZURE_OPENAI_API_BASE=your-azure-openai-api-base -``` - -Make sure to replace `/usr/local/bin/gorilla-cli` with the actual path to your `gorilla-cli` executable, and the placeholder keys with your actual API keys. - on the command line , press: ```nano From c92e759725fed04ef14d2af110b654bfeba82d1a Mon Sep 17 00:00:00 2001 From: Tonic Date: Tue, 12 Dec 2023 12:42:18 +0100 Subject: [PATCH 52/52] Delete convo.md --- convo.md | 445 ------------------------------------------------------- 1 file changed, 445 deletions(-) delete mode 100644 convo.md diff --git a/convo.md b/convo.md deleted file mode 100644 index 2645d37..0000000 --- a/convo.md +++ /dev/null @@ -1,445 +0,0 @@ - -![User](https://s.gravatar.com/avatar/f215a1e34f8fd77a82ac38b4c81a0745?s=480&r=pg&d=https%3A%2F%2Fcdn.auth0.com%2Favatars%2Fdd.png) - -You - -You'll probably need to run a search for this one. Would there be any utility to making a semantic kernel plugin that calls the Gorilla CLI? - -ChatGPT - -ChatGPT - -Creating a semantic kernel plugin that calls the Gorilla CLI could be quite beneficial, especially considering the distinct functionalities and advantages offered by each. - -Gorilla CLI is a tool designed to enhance command-line interactions. It generates potential commands for execution based on user objectives, supporting a wide range of APIs like Kubernetes, AWS, GCP, Azure, GitHub, and many others. This user-centric approach simplifies command-line operations, eliminating the need to remember complex CLI arguments​[](https://github.com/gorilla-llm/gorilla-cli)​. - -On the other hand, Semantic Kernel, with its plugin architecture, serves as a foundation for AI applications, allowing integration with services like ChatGPT, Bing, and Microsoft 365. Plugins in Semantic Kernel can consist of both native code and AI service requests through semantic functions. This structure allows for the encapsulation of capabilities into a single unit, facilitating their use by the kernel. For developers, this means that plugins created for Semantic Kernel can be exported and made usable in other platforms like ChatGPT, enhancing the reach of AI capabilities without rewriting code​[](https://learn.microsoft.com/en-us/semantic-kernel/ai-orchestration/plugins/)​. - -Given these aspects, a semantic kernel plugin that calls Gorilla CLI could leverage the strengths of both platforms. It would enable the creation of sophisticated command-line tools that are enhanced by AI capabilities. For instance, such a plugin could use AI to interpret and simplify complex command sequences or integrate advanced data processing and analysis within command-line operations. - -Furthermore, the semantic kernel's capability to integrate with external data sources and the facility to create and use templatized prompts can add a layer of dynamic functionality to the command-line operations managed by Gorilla CLI. This could result in more efficient, context-aware, and adaptable CLI tools​[](https://techcommunity.microsoft.com/t5/azure-developer-community-blog/semantic-kernel-powered-openai-plugin-development-lifecycle/ba-p/3967751)​. - -Overall, combining the user-centric command generation of Gorilla CLI with the AI-enhanced, plugin-driven architecture of Semantic Kernel can lead to the development of more powerful, intuitive, and versatile tools for various command-line and data processing tasks. - -![User](https://s.gravatar.com/avatar/f215a1e34f8fd77a82ac38b4c81a0745?s=480&r=pg&d=https%3A%2F%2Fcdn.auth0.com%2Favatars%2Fdd.png) - -You - -Alright. Let's say I wanted to write a semantic kernel plugin that allowed for the usage of the Gorilla CLI during kernel operations, how would I go about that? - -ChatGPT - -ChatGPT - -Writing a semantic kernel plugin that integrates the Gorilla CLI into kernel operations involves a few key steps. Here's a high-level overview of the process you might follow: - -1. **Understand Gorilla CLI and Semantic Kernel**: Firstly, ensure you have a good understanding of both Gorilla CLI and the Semantic Kernel. Gorilla CLI enhances command-line interactions by generating potential commands based on user inputs, while Semantic Kernel allows for the integration of AI capabilities into various applications and services. - -2. **Design the Plugin's Functionality**: Decide on the specific functionalities you want your plugin to have. This could include determining how Gorilla CLI's command generation capabilities will be utilized within the Semantic Kernel environment. For example, the plugin could enable AI-enhanced command-line operations, facilitate data processing, or automate certain tasks using Gorilla CLI commands. - -3. **Familiarize with Plugin Standards**: As per the documentation from Microsoft Learn, Semantic Kernel has adopted the OpenAI plugin specification as the standard. This ensures interoperability of plugins across various AI applications and services. Understanding this specification is crucial for creating a plugin that is compatible with Semantic Kernel​[](https://learn.microsoft.com/en-us/semantic-kernel/ai-orchestration/plugins/)​. - -4. **Create Semantic and Native Functions**: Semantic Kernel plugins can have two types of functions: semantic and native. Semantic functions are more about AI-driven capabilities, like natural language processing, while native functions involve direct code execution, such as running a Gorilla CLI command. Depending on your plugin's requirements, you may need to implement both types of functions. - -5. **Templatize Prompts for Scalability**: If your plugin will use AI services, consider templatizing prompts. This approach makes the plugin more scalable and maintainable, allowing dynamic generation of prompts based on variables and enabling nested function calls within a semantic function​[](https://techcommunity.microsoft.com/t5/azure-developer-community-blog/semantic-kernel-powered-openai-plugin-development-lifecycle/ba-p/3967751)​. - -6. **Develop and Test the Plugin**: Develop your plugin according to the OpenAI plugin standards and test it thoroughly. Ensure that it integrates well with Semantic Kernel and that it can effectively call and utilize Gorilla CLI commands within its operations. - -7. **Document and Share**: Once your plugin is developed and tested, document its functionalities, usage, and any limitations. Consider sharing it on platforms like GitHub, and ensure it adheres to the relevant licensing requirements, like the Apache 2.0 license under which Gorilla CLI operates​[](https://github.com/gorilla-llm/gorilla-cli)​. - -8. **Feedback and Iteration**: Be prepared to receive feedback from users and iterate on your plugin. The AI and CLI fields are rapidly evolving, so staying updated with the latest developments and user needs is crucial for maintaining an effective and relevant tool. - - -Remember, these steps provide a general guideline. The specifics can vary based on your exact requirements and the functionalities you wish to implement in your plugin. - -![User](https://s.gravatar.com/avatar/f215a1e34f8fd77a82ac38b4c81a0745?s=480&r=pg&d=https%3A%2F%2Fcdn.auth0.com%2Favatars%2Fdd.png) - -You - -Sure. Let's have your act as though you're the lead developer on a hackathon project, the goal of which is to develop a useful plugin for Semantic Kernel that makes use of the Gorilla CLI. Develop a step by step plan to implement this plugin, so we can relay it to your juniors. - -ChatGPT - -ChatGPT - -As the lead developer on this hackathon project, our goal is to develop a Semantic Kernel plugin that integrates the Gorilla CLI. Here's a step-by-step plan to implement this plugin: - -### 1. Team Briefing and Role Assignment - -- **Objective Explanation**: Brief the team about the project's goal: creating a plugin that combines the AI-enhanced capabilities of Semantic Kernel with the command-line efficiency of Gorilla CLI. -- **Role Distribution**: Assign roles based on expertise, such as development, testing, documentation, and integration. - -### 2. Requirement Analysis - -- **Identify Use Cases**: Determine specific scenarios where the plugin will be useful, like automating repetitive CLI tasks or enhancing command-line operations with AI. -- **Gather Requirements**: List the functionalities needed for these use cases, considering both AI capabilities and command-line operations. - -### 3. Understanding Gorilla CLI and Semantic Kernel - -- **Research**: Familiarize the team with Gorilla CLI's functionalities and how it simplifies CLI interactions. -- **Study Semantic Kernel**: Understand the architecture of Semantic Kernel, focusing on plugin development and integration standards. - -### 4. Plugin Design - -- **Define Functionalities**: Decide what the plugin will do. For instance, it could translate natural language into CLI commands using Gorilla CLI. -- **Sketch Workflow**: Create a high-level workflow of how the plugin will operate within the Semantic Kernel environment. - -### 5. Development Environment Setup - -- **Tools and Technologies**: Set up development tools, including necessary libraries and access to Gorilla CLI and Semantic Kernel platforms. -- **Version Control**: Establish a Git repository for collaborative development and version control. - -### 6. Plugin Development - -- **Create Semantic Functions**: Develop functions for AI-related tasks, such as interpreting user input or processing command outputs. -- **Develop Native Functions**: Implement native functions that will call Gorilla CLI commands. -- **Integration**: Ensure that the semantic and native functions work cohesively. - -### 7. Templating and AI Integration - -- **Templatize Prompts**: Implement templated prompts for scalability and dynamic functionality. -- **AI Service Connection**: Connect the AI services of Semantic Kernel to the plugin, ensuring that the AI can interpret and respond to user inputs effectively. - -### 8. Testing and Iteration - -- **Unit Testing**: Perform thorough unit tests for each function. -- **Integration Testing**: Test the plugin in an integrated environment to ensure it works seamlessly within Semantic Kernel and with Gorilla CLI. -- **Feedback Loop**: Use team feedback to iterate and improve the plugin. - -### 9. Documentation - -- **User Guide**: Create a detailed user guide explaining how to install and use the plugin. -- **Developer Documentation**: Document the code and architecture for future development and maintenance. - -### 10. Demonstration and Feedback - -- **Internal Demo**: Conduct a demonstration within the team to gather initial feedback and make necessary adjustments. -- **Hackathon Presentation**: Prepare for the final presentation at the hackathon, highlighting key features and potential use cases. - -### 11. Submission and Post-Hackathon Plan - -- **Finalize Submission**: Ensure all components are polished and submit the project. -- **Post-Hackathon Roadmap**: Discuss potential future improvements or additional features based on the feedback received during the hackathon. - -### Regular Check-Ins - -Throughout the hackathon, hold regular check-ins to monitor progress, address challenges, and ensure the project stays on track. - -This plan provides a structured approach for developing the plugin, ensuring that we cover all necessary aspects from conception to completion. Remember, effective communication and collaboration within the team are key to the success of this project. - -![User](https://s.gravatar.com/avatar/f215a1e34f8fd77a82ac38b4c81a0745?s=480&r=pg&d=https%3A%2F%2Fcdn.auth0.com%2Favatars%2Fdd.png) - -You - -Great. Let's start stepping through that process. The team understands the goal, and has roles assigned already. Let's begin with requirement analysis and create a design architecture, specification, or prd, whichever's the most appropriate to begin with. - -ChatGPT - -ChatGPT - -Great, let's move forward with the requirement analysis and the creation of a design architecture or Product Requirement Document (PRD) for our Semantic Kernel plugin that integrates with Gorilla CLI. Here's how we can approach this: - -### Requirement Analysis - -1. **Identify Target Users and Use Cases** - - - Determine who will use the plugin (e.g., developers, system administrators). - - Identify specific use cases, such as automating complex CLI tasks, simplifying command generation, or integrating AI-driven insights into command-line operations. -2. **Gather Functional Requirements** - - - List the key functionalities based on the use cases. For example: - - Natural language interpretation to CLI commands. - - AI-enhanced assistance for command syntax and options. - - Automatic generation of command scripts based on user goals. -3. **Non-Functional Requirements** - - - Consider performance criteria (e.g., response time, scalability). - - Ensure security and privacy compliance, particularly if dealing with sensitive data. - - User experience aspects, like ease of installation and use. -4. **Integration Requirements** - - - How the plugin will integrate with existing systems and workflows. - - Compatibility with different operating systems and environments. - -### Design Architecture/Specification - -1. **High-Level Architecture** - - - Outline the plugin's architecture, showing how it fits into the Semantic Kernel ecosystem and interacts with Gorilla CLI. - - Include diagrams for visual representation. -2. **Data Flow Diagrams** - - - Illustrate how data flows through the system, from user input to the generation of CLI commands and their execution. -3. **Component Design** - - - Break down the system into smaller components (e.g., input parser, command generator, execution manager). - - Define the responsibilities and interfaces for each component. -4. **Technology Stack** - - - Specify the programming languages, frameworks, and tools that will be used. - -### PRD (Product Requirement Document) - -1. **Purpose and Scope** - - - Define the purpose of the plugin and its scope within the Semantic Kernel environment. -2. **Detailed Functional Requirements** - - - Elaborate on the functionalities identified during the requirement analysis. - - Include use case scenarios and user stories. -3. **User Interface and Interaction Flow** - - - Sketch out the user interface, if applicable. - - Describe the interaction flow from the user's perspective. -4. **Performance Metrics** - - - Define the key performance indicators (KPIs) and success metrics. -5. **Security and Compliance** - - - Detail any security measures and compliance standards the plugin adheres to. -6. **Timeline and Milestones** - - - Provide a tentative timeline for development, testing, and release. - - Set milestones for major phases of the project. -7. **Risk Assessment** - - - Identify potential risks and mitigation strategies. -8. **Appendices** - - - Include any additional resources, references, or technical specifications. - -### Next Steps - -- **Validation**: Review the PRD with key stakeholders (e.g., potential users, team members) for feedback and validation. -- **Refinement**: Refine the PRD based on feedback received. -- **Approval**: Get the final sign-off from the team or project sponsor. - -Once we have a solid PRD in place, we can move forward with confidence into the development phase, knowing that our objectives, requirements, and plan are clearly outlined and agreed upon. - -![User](https://s.gravatar.com/avatar/f215a1e34f8fd77a82ac38b4c81a0745?s=480&r=pg&d=https%3A%2F%2Fcdn.auth0.com%2Favatars%2Fdd.png) - -You - -Alright. If you can get started stepping through the creation of this PRD, i'll be available to conference with the team and provide feedback as needed. - -ChatGPT - -ChatGPT - -Excellent! Let's start developing the Product Requirement Document (PRD) for our Semantic Kernel plugin that integrates Gorilla CLI. - -### Product Requirement Document (PRD) for Semantic Kernel Plugin with Gorilla CLI - -#### 1. Purpose and Scope - -- **Objective**: Develop a plugin for Semantic Kernel that utilizes Gorilla CLI to enhance command-line operations with AI capabilities. -- **Scope**: The plugin will be used within Semantic Kernel environments, offering intuitive command generation and execution for various CLI tasks. - -#### 2. Target Users and Use Cases - -- **Target Users**: Developers, system administrators, and IT professionals. -- **Use Cases**: - 1. Simplifying complex command sequences into straightforward tasks. - 2. Automating repetitive CLI tasks using AI-driven insights. - 3. Enhancing command-line debugging with AI-generated suggestions. - -#### 3. Functional Requirements - -- **Command Interpretation**: The plugin should interpret natural language inputs and translate them into appropriate CLI commands. -- **AI-Assisted Command Creation**: Generate CLI commands or scripts based on user goals or descriptions. -- **User Interaction**: Interactive CLI interface that allows users to refine or edit AI-generated commands before execution. - -#### 4. Non-Functional Requirements - -- **Performance**: Response time should be within a few seconds for command generation. -- **Scalability**: Capable of handling a large number of requests simultaneously. -- **Security**: Ensure secure handling of commands, especially those that might affect system configurations or sensitive data. - -#### 5. Integration Requirements - -- **Semantic Kernel Integration**: Must seamlessly integrate with existing Semantic Kernel plugins and workflows. -- **Cross-Platform Compatibility**: Ensure compatibility with major operating systems like Windows, Linux, and macOS. - -#### 6. System Architecture - -- **High-Level Design**: Overview of the plugin architecture showing interaction between Semantic Kernel and Gorilla CLI. -- **Component Design**: Detailed description of each component, such as input parser, AI engine, and command execution manager. - -#### 7. User Interface and Interaction - -- **UI Sketch**: If applicable, include mockups of any user interface elements. -- **Interaction Flow**: Step-by-step flow of how users will interact with the plugin, from input to command execution. - -#### 8. Performance Metrics - -- **Success Metrics**: Define KPIs such as user adoption rate, accuracy of command generation, and user satisfaction. - -#### 9. Security and Compliance - -- **Data Handling**: Describe how user inputs and command outputs are handled securely. -- **Compliance Standards**: Adherence to relevant security standards and practices. - -#### 10. Development Timeline and Milestones - -- **Phase 1**: Requirement gathering and validation (Duration: 2 weeks). -- **Phase 2**: Design and architecture setup (Duration: 3 weeks). -- **Phase 3**: Development and unit testing (Duration: 6 weeks). -- **Phase 4**: Integration testing and bug fixing (Duration: 4 weeks). -- **Phase 5**: User acceptance testing and deployment (Duration: 3 weeks). - -#### 11. Risk Assessment and Mitigation - -- **Risk Identification**: Possible risks such as delays in development, unanticipated technical challenges, or integration issues. -- **Mitigation Strategies**: Contingency plans, such as allocating additional resources or having fallback options for critical components. - -#### 12. Appendices - -- **Additional Resources**: Links to relevant documentation, APIs, and tools. -- **Technical Specifications**: Detailed technical requirements and specifications. - ---- - -This draft PRD provides a structured framework for our plugin development. You can review it with the team, and I'll be here to update the document based on your feedback and inputs. Once we finalize the PRD, it will serve as a guiding document throughout the development process. - -![User](https://s.gravatar.com/avatar/f215a1e34f8fd77a82ac38b4c81a0745?s=480&r=pg&d=https%3A%2F%2Fcdn.auth0.com%2Favatars%2Fdd.png) - -You - -got it. Use cases look okay, i'm a little fuzzy on them though. My understanding is that semantic kernel is a package designed to perform various operations on and with llms, and the gorilla cli uses llms to translate natural language queries into command line executions. I don't have a clear picture of a specific use case where i would use them both at the same time, instead of one or the other as we intend, i.e. a clear usage situation of the plugin we're designing. reqs look fine, gorilla hits #1 by itself i think, it also prompts the user to confirm prior to execution. what's the intended difference between command interpretation and assisted command creation? not too concerned with the non-functionals, considering it's a hackathon project, goal is to just make the thing work and worry about testing and scalability later. integration we're looking for is to make a semantic kernel plugin that works the semantic kernel python sdk specifically, the rest can wait. UI will probably just be the terminal, or whatever standard interface you might operate a semantic kernel plugin via. not worried about compliance, hackathon project as mentioned above. timeline should be much shorter -- we have less than 24 hours, and a largely-functional series of semantic kernel plugins currently operating within an autogen multi-agent framework. not worried about risks or appendicies at the moment, short time frame, hackathon. - -ChatGPT - -ChatGPT - -Given your clarifications, let's refine our approach focusing on the specifics of your hackathon project and the integration of Semantic Kernel with Gorilla CLI. - -### Refined Use Cases - -Understanding that Semantic Kernel is primarily about working with LLMs and Gorilla CLI translates natural language into command-line executions, here are more targeted use cases for our plugin: - -1. **Natural Language Command Generation for Data Processing Tasks**: Users dealing with large datasets often use complex command-line operations for data manipulation. Our plugin could interpret natural language requests to generate these complex commands, leveraging both the LLM understanding of Semantic Kernel and the command-line efficiency of Gorilla CLI. - -2. **AI-Assisted Debugging and Optimization**: For developers working on optimizing scripts or debugging, our plugin can suggest command-line tweaks or optimizations based on natural language descriptions of their goals or problems, combining AI insights with practical CLI solutions. - -3. **Automating Routine Tasks with AI Insights**: System administrators and developers often have routine tasks that could be automated. Our plugin could interpret natural language descriptions of these tasks, generating scripts or command sequences to automate them. - - -### Functional Requirements Clarification - -- **Command Interpretation vs Assisted Command Creation**: - - **Command Interpretation** refers to the plugin's ability to understand user inputs in natural language and map them to relevant CLI commands, which is a primary function of Gorilla CLI. - - **Assisted Command Creation** goes a step further, where the plugin not only interprets the command but also assists in refining or optimizing it based on the context provided. This is where the Semantic Kernel's LLM integration adds value, by understanding the broader context or intent behind the command. - -### Simplified Project Scope - -- **Integration Focus**: Concentrate on creating a Semantic Kernel plugin compatible with the Semantic Kernel Python SDK, enabling the integration with Gorilla CLI. -- **UI and Interaction**: Since it's a hackathon project with time constraints, we'll stick with a standard terminal or command-line interface, which is typical for Semantic Kernel plugins. -- **Timeline Adjustment**: Acknowledging the time constraint (less than 24 hours), the project's focus will be on developing a basic functional prototype. This involves: - - Quickly setting up the development environment. - - Developing a minimal viable plugin that demonstrates the key integration between Semantic Kernel and Gorilla CLI. - - Basic testing to ensure the plugin's core functionality works as intended. - -### Next Steps - -- **Rapid Development**: With the shortened timeline, we'll prioritize developing the core functionalities – primarily the integration of Semantic Kernel with Gorilla CLI to interpret and refine CLI commands. -- **Collaborative Coding**: Given the time constraint, parallel development and immediate integration of components will be crucial. Regular sync-ups, even if brief, will help keep the team aligned. -- **Demonstration Prep**: Prepare a simple demonstration that showcases the plugin's ability to interpret natural language and generate/assist in creating command-line instructions. - -Remember, the key to success in a hackathon is often to create a proof of concept that demonstrates the innovative integration or use of technology, rather than a fully polished product. The focus should be on showcasing the unique capabilities that arise from combining Semantic Kernel's LLM functionalities with Gorilla CLI's command-line efficiency. - -![User](https://s.gravatar.com/avatar/f215a1e34f8fd77a82ac38b4c81a0745?s=480&r=pg&d=https%3A%2F%2Fcdn.auth0.com%2Favatars%2Fdd.png) - -You - -Now I'm getting it a bit better. Let's start outline something that lets us start actually writing code, we want to hit the gas on this asap. So rather than a single user input that generates one command line execution, like gorilla, what it sounds like the idea is would be to wrap gorilla in a semantic function and provide that interface to the CLI/an sk 'agent', if they have those, such that the user can give a high level overview or general goal and get a full script/series of executions, then execute them, provide feedback to the cli tool, and then get the next series, is that kind of the idea? - -ChatGPT - -ChatGPT - -Yes, your understanding is on point. Wrapping Gorilla CLI in a semantic function and providing it to a Semantic Kernel agent or CLI interface for script generation and execution is indeed a promising approach. This would elevate the capabilities from single command generation to creating a series of command executions based on a high-level overview or goal. Here's how we can start outlining the development process: - -### Development Outline for Semantic Kernel Plugin with Gorilla CLI Integration - -#### 1. **Define the Semantic Function** - -- **Functionality**: The semantic function should take a high-level user goal or overview as input and translate it into a script or a series of executable commands using Gorilla CLI. -- **Input Processing**: This involves parsing the user input to understand the intent and context, which will then be fed into Gorilla CLI for command generation. - -#### 2. **Gorilla CLI Integration** - -- **Command Generation**: Utilize Gorilla CLI's capabilities to generate relevant command-line instructions based on the processed input. -- **Script Creation**: If the user's goal involves multiple steps, the function should be able to create a script that encapsulates these steps. - -#### 3. **Execution and Feedback Loop** - -- **Execution Interface**: Provide an interface for the user to review and execute the generated commands or script. -- **Feedback Mechanism**: After execution, allow the user to provide feedback, which can be used to refine the command generation process. - -#### 4. **Setting Up the Development Environment** - -- **Tools and Dependencies**: Set up a development environment with access to Semantic Kernel SDK, Gorilla CLI, and any other necessary tools or libraries. -- **Collaborative Tools**: Since we're on a tight deadline, ensure the team has access to collaborative tools for code sharing and communication. - -#### 5. **Rapid Prototyping** - -- **Core Functionality First**: Focus on building the core functionality – the semantic function that integrates Gorilla CLI and generates the command script. -- **Iterative Development**: Adopt an iterative approach, starting with a basic version and enhancing it based on initial testing and feedback. - -#### 6. **Basic Testing and Refinement** - -- **Initial Testing**: Test the basic functionality to ensure that the semantic function correctly interprets the input and generates viable commands. -- **Refinement**: Based on initial tests, refine the command generation logic and user interaction flow. - -#### 7. **Preparation for Demonstration** - -- **Demo Script**: Prepare a brief script that demonstrates the plugin's capabilities. -- **Presentation Materials**: Create any necessary presentation materials to explain the functionality and potential use cases. - -#### 8. **Hackathon Presentation** - -- **Showcasing the Plugin**: Demonstrate how the plugin takes a high-level goal and translates it into a series of CLI commands or a script. -- **Explaining the Integration**: Highlight how the integration of Semantic Kernel and Gorilla CLI offers enhanced capabilities over using them separately. - -#### 9. **Code Sharing and Version Control** - -- Ensure that all code is committed to a shared repository for easy access and collaboration among team members. - -### Next Steps - -- **Begin Coding**: Start with setting up the semantic function and integrating Gorilla CLI. -- **Regular Sync-Ups**: Have brief, regular meetings to synchronize the team’s progress and tackle any emerging challenges. - -This outline focuses on getting the core functionality up and running as soon as possible, given the time constraints of a hackathon. It ensures that we have a working prototype that demonstrates the innovative integration of Semantic Kernel and Gorilla CLI. - -![User](https://s.gravatar.com/avatar/f215a1e34f8fd77a82ac38b4c81a0745?s=480&r=pg&d=https%3A%2F%2Fcdn.auth0.com%2Favatars%2Fdd.png) - -You - -We have dev envs and collab tools already set up -- let's get started with some code blocks and i'll plug them into the environment to start testing/iterating on. - -ChatGPT - -ChatGPT - -Great! Let's begin by drafting a basic Python script that outlines the core functionality of our Semantic Kernel plugin integrating Gorilla CLI. The script will follow these steps: - -1. **Input Processing**: Receive a high-level description or goal from the user. -2. **Command Generation**: Use Gorilla CLI capabilities to translate the description into a series of CLI commands. -3. **Execution and Feedback**: Provide an interface for executing the commands and capturing user feedback. - -### Python Script Skeleton - -pythonCopy code - -`# Import necessary libraries # Assuming Gorilla CLI and Semantic Kernel SDK are installed and accessible def process_user_input(input_description): """ Process the high-level user input to a format suitable for Gorilla CLI. (This function might involve natural language processing or simple parsing) """ # Process input processed_input = input_description # Simplified for illustration return processed_input def generate_cli_commands(processed_input): """ Use Gorilla CLI to generate a series of CLI commands based on the processed input. (This function would interface with Gorilla CLI) """ # For the sake of example, let's assume we have a function in Gorilla CLI that does this. # In reality, you would replace this with actual Gorilla CLI function calls. cli_commands = ["echo 'Command 1'", "echo 'Command 2'"] # Example commands return cli_commands def execute_commands(cli_commands): """ Execute the generated CLI commands and provide an interface for user feedback. """ for command in cli_commands: # Execute the command (this could be done via a subprocess call in a real scenario) print(f"Executing: {command}") # Here you would insert the actual command execution logic def main(): # Example user input user_input = "Generate a report from yesterday's logs and email it to the team" # Process the input processed_input = process_user_input(user_input) # Generate CLI commands cli_commands = generate_cli_commands(processed_input) # Execute commands and handle feedback execute_commands(cli_commands) if __name__ == "__main__": main()` - -This script is a basic outline. The actual implementation would involve more sophisticated input processing and integration with Gorilla CLI. You can start by plugging in your existing Semantic Kernel plugins and Gorilla CLI functionalities into the respective placeholder functions. - -### Iteration and Testing - -- **Iterate on `process_user_input`**: Improve this function to handle more complex scenarios or varied inputs. -- **Integrate with Gorilla CLI**: Replace the placeholder in `generate_cli_commands` with actual calls to Gorilla CLI. -- **Enhance `execute_commands`**: Implement real command execution and capture user feedback. - -This setup should allow you to start testing and iterating on the basic functionality. As you progress, keep refining each part based on the feedback and results from your initial tests. \ No newline at end of file