From e8f15a632f875d79ab114d18708c93769bc98aa9 Mon Sep 17 00:00:00 2001 From: Ivan Gabriele Date: Sun, 22 Oct 2023 12:20:20 +0000 Subject: [PATCH] wip --- actions/write_file.py | 2 +- agents/__init__.py | 1 + agents/assistant.py | 110 ++++++++++++ agents/functionner.py | 20 +-- agents/product_owner.py | 8 +- agents/software_engineer.py | 34 ++-- agents/user_experience_designer.py | 12 +- constants.py | 170 ++++++++++--------- env.sample.jsonc => env.openai-sample.jsonc | 2 +- env.opensource-sample.jsonc | 58 +++++++ main.backup.py | 142 ++++++++++++++++ main.py | 80 ++------- poetry.lock | 24 +-- tests/utils/test_get_command_from_message.py | 39 +++++ typedefs.py | 10 +- utils/__init__.py | 2 + utils/ensure_str.py | 14 ++ utils/get_command_from_message.py | 29 ++++ utils/get_model_config_as_dict.py | 8 +- utils/mask_secret.py | 6 +- utils/print_project_config.py | 4 +- 21 files changed, 568 insertions(+), 207 deletions(-) create mode 100644 agents/assistant.py rename env.sample.jsonc => env.openai-sample.jsonc (100%) create mode 100644 env.opensource-sample.jsonc create mode 100644 main.backup.py create mode 100644 tests/utils/test_get_command_from_message.py create mode 100644 utils/ensure_str.py create mode 100644 utils/get_command_from_message.py diff --git a/actions/write_file.py b/actions/write_file.py index b2fe777..782131e 100644 --- a/actions/write_file.py +++ b/actions/write_file.py @@ -14,6 +14,6 @@ def write_file(relative_path: str, file_source: str) -> str: with open(full_path, "w") as file: file.write(file_source) - return f"Done." + return f"File successfully written to `{relative_path}`." except Exception as e: return f"Error: {str(e)}" diff --git a/agents/__init__.py b/agents/__init__.py index dd7d27d..974fa52 100644 --- a/agents/__init__.py +++ b/agents/__init__.py @@ -1,4 +1,5 @@ from .base_agent import BaseAgent +from .assistant import Assistant from .functionner import Functioneer from .product_owner import ProductOwner from .software_engineer import SoftwareEngineer diff --git a/agents/assistant.py b/agents/assistant.py new file mode 100644 index 0000000..4ec0902 --- /dev/null +++ b/agents/assistant.py @@ -0,0 +1,110 @@ +import autogen + +import actions +import agents +from constants import COMMON_LLM_CONFIG +from utils import clean_text +import utils + +COMMAND_DISPATCH_DICT = { + "OPEN": actions.fetch_web_page, + "READ": actions.read_file, + "SEARCH": actions.search_web, + "WRITE": actions.write_file, +} + + +class Assistant(agents.BaseAgent): + ceo_user_proxy_agent: autogen.UserProxyAgent + + def __init__(self) -> None: + self.as_assistant_agent = autogen.AssistantAgent( + "Assistant", + llm_config=COMMON_LLM_CONFIG, + system_message=clean_text( + """ + Your are the CEO's assistant. + + You can run commands by replying using a specific syntax: "A_COMMAND [param_1] [param_2]" (without double quotes). + To run this command, you must reply with the command alone, without any other text. Otherwise it won't work. + You are expected to use these commands in chain until you can give a well-informed solution to the CEO's requests. + Each time you run a command, the CEO will run your command and reply to you with its result. + + Here are the available commands: + - "OPEN [url]" to get the content of a web page as basic markdown. + - "READ [file_path]" to read the content of a file. + - "SEARCH [query]" to search the web and get a JSON of ranked results. + - "WRITE [file_path] [file_content]" to create or edit a file. + + Here is a process example: + + ```md + 1. The CEO asks you "What are the features of SimCity 2000 game?". + 2. You reply "SEARCH SimCity 2000 features". + 3. You receive a message with the JSON of results from the CEO. + 4. You write a first analysis of the results, as well as the most interesting pages URL to visit, in a file + by replying "WRITE .oads/ASSISTANT.md [your_analysis_and_tasks_as_mardown]" + (replacing the bracketed parameter with your Markdown source). + 6. You receive a message of confirmation from the CEO. + 7. You reply "OPEN [first_url_of_interest]" + (replacing the bracketed parameter with the correct URL). + 8. You receive a message with the content of the web page from the CEO. + 9. You analyze the content of the web page and update your analysis and tasks in the file + by replying "WRITE .oads/ASSISTANT.md [your_updated_analysis_and_tasks_as_mardown]" + (replacing the bracketed parameter with your Markdown source). + + And so on... you repeat steps 7, 8 and 9 until you are done visiting all the URLs of interest. + + Once you are done visiting the URLs of interest, you answer the original CEO's question with your analysis, + ending the message with "TERMINATE" to end the conversation. + ``` + """ + ), + ) + + async def start( + self, ceo_user_proxy_agent: autogen.UserProxyAgent, initial_prompt: str + ): + self.ceo_user_proxy_agent = ceo_user_proxy_agent + + self.as_assistant_agent.clear_history() + self.ceo_user_proxy_agent.clear_history() + + await self._ask(initial_prompt) + + async def _ask(self, message: str): + await self.as_assistant_agent.a_receive( + message=utils.clean_text(message), + request_reply=True, + sender=self.ceo_user_proxy_agent, + silent=False, + ) + + last_message: str = self.as_assistant_agent.last_message( + self.ceo_user_proxy_agent + )["content"] + + maybe_command_tuple = utils.get_command_from_message(last_message) + if maybe_command_tuple is None: + await self._ask_for_ceo_input() + + return + + elif isinstance(maybe_command_tuple, str): + action_message = maybe_command_tuple + + else: + command, parameters = maybe_command_tuple + + func = COMMAND_DISPATCH_DICT.get(command) + if func is None: + raise ValueError(f"Unknown AI Command: `{command}`.") + + action_message = func(*parameters) + + await self._ask(action_message) + + async def _ask_for_ceo_input(self): + ceo_message = self.as_assistant_agent.get_human_input("Prompt:\n\n") + + await self._ask(ceo_message) diff --git a/agents/functionner.py b/agents/functionner.py index 9cf2c8a..07023e5 100644 --- a/agents/functionner.py +++ b/agents/functionner.py @@ -1,7 +1,7 @@ import autogen import agents -from constants import COMMON_LLM_CONFIG +from constants import FUNCTIONEER_LLM_CONFIG from utils import clean_text @@ -9,26 +9,12 @@ class Functioneer(agents.BaseAgent): def __init__(self) -> None: self.as_assistant_agent = autogen.AssistantAgent( "Functioneer", - llm_config=COMMON_LLM_CONFIG, + llm_config=FUNCTIONEER_LLM_CONFIG, system_message=clean_text( """ You are the Functioneer. - You are part of a company including a CEO, a Product Owner, a Software Engineer - and a User Experience Designer. - - You role is to assist other agents, by suggesting function calls to the CEO, when they ask you to: - - Compile and run a Rust file. - - Get a web page content by it URL. - - Read a project file. - - Run a bash command in the project directory. - - Search the web. - - Write a project file. - - Rules: - - Keep it short. Get to the point. Be straightforward. Always specify your recipient's name. - - Only reply to messages prefixed with your name, i.e.: "Functioneer, etc". - - Ask the CEO to run functions when you need to use them. You are not allowed to run them yourself. + Your role is to reply to agents' requests with the result of the command they asked you to run. """ ), ) diff --git a/agents/product_owner.py b/agents/product_owner.py index 2132c7e..d90bf76 100644 --- a/agents/product_owner.py +++ b/agents/product_owner.py @@ -16,16 +16,16 @@ def __init__(self) -> None: You manage a team including a Software Engineer and a User Experience Designer. - You role is to plan, organize and tell your specialized agents what to do - in order to achieve the CEO's goals to the best of your ability. - Rules: - Keep it short. Get to the point. Be straightforward. Always specify your recipient's name. - - Ask the Functioneer to run functions when you need to use them. You are not allowed to run them yourself. + - ONLY reply if messages are prefixed with your name, i.e.: "Product Owner, etc". - Use a `BOARD.json` file to plan and keep track of ALL the steps you and your team makes. ALWAYS check for its content when you start. - Your team should always start with the UX and UI parts. + You role is to plan, organize and tell your specialized agents what to do + in order to achieve the CEO's goals to the best of your ability. + In order to help with your tasks, you can ask the Functioneer to do the following for you: - Get a web page content by it URL. - Read a project file. diff --git a/agents/software_engineer.py b/agents/software_engineer.py index c9b2214..77f187a 100644 --- a/agents/software_engineer.py +++ b/agents/software_engineer.py @@ -12,30 +12,28 @@ def __init__(self) -> None: llm_config=COMMON_LLM_CONFIG, system_message=clean_text( """ - Your are the Sofware Engineer. - - You are part of a team inluding a Product Owner and a User Experience Designer. + Your are the Sofware Engineer and assist the CEO. Your role is to write the expected program source code. - The Product Owner is your team manager. - The Product Owner will tell you what to do, don't answer to the CEO yourself. - Rules: - Keep it short. Get to the point. Be straightforward. Always specify your recipient's name. - - Only reply to messages prefixed with your name, i.e.: "Software Engineer, etc". - - Only communicate with the Product Owner, and nobody else. - ALWAYS write unit/e2e tests to check that your code works. - - NEVER run the program directly, run it via e2e tests. - - Use a `TECH.json` file to keep track of your work. ALWAYS check for its content when you start. - - In order to help with your tasks, you can ask the Functioneer to do the following for you: - - Compile and run a Rust file. - - Get a web page content by it URL. - - Read a project file. - - Run a bash command in the project directory. - - Search the web. - - Write a project file. + - NEVER run the program directly, run it via headless e2e tests. + - Always make some online research to provide the best answer or solution possible. + - Only open web pages after you have searched for them using the SEARCH command. + - Don't stop at the first result when you search or browse online but go on until you find the best one. + + Commands: + - Reply OPEN to get the content of a web page. + - Reply READ to get the content of a workspace file. + - Reply SEARCH to search the web. + - Reply WRITE to search the web. + - Reply TERMINATE when your task is done. + + Command examples: + - SEARCH weather in Paris + - WRITE src/index.js console.log('Hello world!'); """ ), ) diff --git a/agents/user_experience_designer.py b/agents/user_experience_designer.py index abb1600..b5ac6ee 100644 --- a/agents/user_experience_designer.py +++ b/agents/user_experience_designer.py @@ -16,18 +16,18 @@ def __init__(self) -> None: You are part of a team including a Product Owner and a Software Engineer. - Your role is to design the program UI and UX. - - The Product Owner is your team manager. - The Product Owner will tell you what to do, don't answer to the CEO yourself. - Rules: - Keep it short. Get to the point. Be straightforward. Always specify your recipient's name. - - Only reply to messages prefixed with your name, i.e.: "User Experience Designer, etc". + - ONLY reply if messages are prefixed with your name, i.e.: "User Experience Designer, etc". - Only communicate with the Product Owner, and nobody else. - Keep it short. Get to the point. Be straightforward. Specify your recipient's name. - Use a `DESIGN.md` file to keep a memo of your analyses. ALWAYS check for its content when you start. + Your role is to design the program UI and UX. + + The Product Owner is your team manager. + The Product Owner will tell you what to do, don't answer to the CEO yourself. + In order to help with your tasks, you can ask the Functioneer to do the following for you: - Get a web page content by it URL. - Read a project file. diff --git a/constants.py b/constants.py index b2fd930..32876c4 100644 --- a/constants.py +++ b/constants.py @@ -1,7 +1,6 @@ -from autogen import config_list_from_json +import os from dacite import from_dict from jsonc_parser.parser import JsoncParser -import os from typedefs import ProjectConfig import utils @@ -15,99 +14,114 @@ COMMON_LLM_CONFIG = { # https://microsoft.github.io/autogen/docs/FAQ#set-your-api-endpoints "config_list": [utils.get_model_config_as_dict(PROJECT_CONFIG)], - "functions": [ - { - "name": "fetch_web_page", - "description": "Fetch a web page and return its content as text and Markdown links.", - "parameters": { - "type": "object", - "properties": { - "url": { - "type": "string", - "description": "Url to fetch from.", - }, + "request_timeout": 600, + # "seed": 42, +} + +PROJECT_DIRECTORY_NAME = "project" +PROJECT_DIRECTORY_PATH = os.path.join(os.getcwd(), PROJECT_DIRECTORY_NAME) + +FUNCTIONS = [ + { + "name": "fetch_web_page", + "description": "Fetch a web page and return its content as text and Markdown links.", + "parameters": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "Url to fetch from.", }, - "required": ["url"], }, + "required": ["url"], }, - { - "name": "read_file", - "description": "Read and return a file content.", - "parameters": { - "type": "object", - "properties": { - "relative_path": { - "type": "string", - "description": "Relative path of the file.", - }, + }, + { + "name": "read_file", + "description": "Read and return a file content.", + "parameters": { + "type": "object", + "properties": { + "relative_path": { + "type": "string", + "description": "Relative path of the file.", }, - "required": ["relative_path"], }, + "required": ["relative_path"], }, - { - "name": "run_bash_command", - "description": "Run a bash command and return the output.", - "parameters": { - "type": "object", - "properties": { - "command": { - "type": "string", - "description": "Bash command.", - }, + }, + { + "name": "run_bash_command", + "description": "Run a bash command and return the output.", + "parameters": { + "type": "object", + "properties": { + "command": { + "type": "string", + "description": "Bash command.", }, - "required": ["command"], }, + "required": ["command"], }, - { - "name": "run_rust_file", - "description": "Compile a rust file into `./temp_executable` and execute it.", - "parameters": { - "type": "object", - "properties": { - "rust_file_path": { - "type": "string", - "description": "Rust file path.", - }, + }, + { + "name": "run_rust_file", + "description": "Compile a rust file into `./temp_executable` and execute it.", + "parameters": { + "type": "object", + "properties": { + "rust_file_path": { + "type": "string", + "description": "Rust file path.", }, - "required": ["rust_file_path"], }, + "required": ["rust_file_path"], }, - { - "name": "search_web", - "description": "Search for a text query using Brave search engine and return results as JSON.", - "parameters": { - "type": "object", - "properties": { - "query": { - "type": "string", - "description": "Query to search.", - }, + }, + { + "name": "search_web", + "description": "Search for a text query using Brave search engine and return results as JSON.", + "parameters": { + "type": "object", + "properties": { + "query": { + "type": "string", + "description": "Query to search.", }, - "required": ["query"], }, + "required": ["query"], }, - { - "name": "write_file", - "description": "Write content to a file, creating it if necessary.", - "parameters": { - "type": "object", - "properties": { - "relative_path": { - "type": "string", - "description": "Relative path of the file.", - }, - "file_source": { - "type": "string", - "description": """Content to write.""", - }, + }, + { + "name": "write_file", + "description": "Write content to a file, creating it if necessary.", + "parameters": { + "type": "object", + "properties": { + "relative_path": { + "type": "string", + "description": "Relative path of the file.", + }, + "file_source": { + "type": "string", + "description": """Content to write.""", }, - "required": ["relative_path", "file_source"], }, + "required": ["relative_path", "file_source"], }, - ], - "request_timeout": 600, - "seed": 42, + }, +] + +FUNCTION_CAPABLE_LLM_CONFIG_LIST = [ + utils.get_model_config_as_dict( + project_config=PROJECT_CONFIG, + custom_current_model=PROJECT_CONFIG.user_proxy_agent.current_model, + ) +] + +FUNCTIONEER_LLM_CONFIG = COMMON_LLM_CONFIG | { + "config_list": FUNCTION_CAPABLE_LLM_CONFIG_LIST, + # "functions": FUNCTIONS, } -PROJECT_DIRECTORY_NAME = "project" -PROJECT_DIRECTORY_PATH = os.path.join(os.getcwd(), PROJECT_DIRECTORY_NAME) +CEO_LLM_CONFIG = FUNCTIONEER_LLM_CONFIG diff --git a/env.sample.jsonc b/env.openai-sample.jsonc similarity index 100% rename from env.sample.jsonc rename to env.openai-sample.jsonc index f236d6b..f1711e4 100644 --- a/env.sample.jsonc +++ b/env.openai-sample.jsonc @@ -30,8 +30,8 @@ // Any model using Azure OpenAI API { "model": "[AZURE_OPENAI_STUDIO_DEPLOYMENT_NAME]", - "api_key": "[AZURE_OPENAI_API_KEY]", "api_base": "https://[AZURE_OPENAI_RESOURCE_NAME].openai.azure.com", + "api_key": "[AZURE_OPENAI_API_KEY]", "api_type": "azure", // https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions "api_version": "2023-08-01-preview" diff --git a/env.opensource-sample.jsonc b/env.opensource-sample.jsonc new file mode 100644 index 0000000..49221d1 --- /dev/null +++ b/env.opensource-sample.jsonc @@ -0,0 +1,58 @@ +{ + // ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――― + // Brave Search API Key + + // Used by agent function `search_web()` which agents can use to help with their task + // https://brave.com/search/api/ (there is a free plan) + // "Data for AI" plan is prefered over "Data for Search" since it includes some useful additional props + "brave_search_api_key": "[BRAVE_SEARCH_API_KEY]", + + // ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――― + // Current Model + + // Selected `model` that agents must use in the list of OpenAI API endpoints + "current_model": "Open-Orca/Mistral-7B-OpenOrca", + + // ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――― + // Project Description + + // Description of the program you want the agents to develop + // You can also set it to `null` if you want the Product Owner agent + // to prompt you for your project desciption each time you run OADS. + "initial_project_description": "Create a \"guess the number\" CLI game in Python.", + + // ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――― + // List of OpenAI-Compatible API endpoints + + // The `model` key must be unique. + // https://microsoft.github.io/FLAML/docs/reference/autogen/oai/completion/#create + "models": [ + // Open-source LLM deployment using "Text generation web UI" with `OpenAI` extension enabled: + // https://github.com/oobabooga/text-generation-webui/tree/main/extensions/openai#an-openedai-api-openai-like + // FUNCTIONS ARE NOT SUPPORTED BY THIS "Text generation web UI" EXTENSION (YET?): + // This is why we use a second endpoint — a Functionary LLM behind a vLLM OpenAI-Compatible API — + // in order to give Function Calling abilities to almost any smart-enough open-source LLM. + + // This can be also be any inference endpoint compatible following OpenAI API specs, + // regardless of the model you use behind it. + { + "model": "Open-Orca/Mistral-7B-OpenOrca", + "api_base": "https://[YOUR_CONTAINER_ID]-5001.proxy.runpod.net", // or your public endpoint + "api_key": "[CUSTOM_API_KEY]", + "api_type": "open_ai" + } + ], + + // ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――― + // Funtionary LLM API Endpoint Configuration + + // This must be a secondary deployment. Don't use this endpoint in your models. + // You can deploy it in one click using this Github repository: + // https://github.com/ivangabriele/docker-functionary + "functionary_model": { + "model": "musabgultekin/functionary-7b-v1", + "api_base": "https://[YOUR_CONTAINER_ID]-8000.proxy.runpod.net/v1", + "api_key": "functionary", // Dummy API Key since it can't be `null` + "api_type": "open_ai" + } +} diff --git a/main.backup.py b/main.backup.py new file mode 100644 index 0000000..953e9e2 --- /dev/null +++ b/main.backup.py @@ -0,0 +1,142 @@ +""" +Module Name: main.py + +Short Description: +This program intends to leverage Microsoft Autogen to automate software development via AI agents. + +Detailed Description: +Microsoft Autogen is a framework that enables the development of LLM (Lifelong Learning Machines) applications +using multiple agents capable of conversing with each other to solve tasks. +Autogen agents are customizable, conversable, and can seamlessly incorporate human participation. +These agents can operate in various modes, utilizing combinations of LLMs, human input, and other tools. +""" + +import asyncio +import autogen + +import actions + +import agents +from constants import ( + CEO_LLM_CONFIG, + COMMON_LLM_CONFIG, + PROJECT_CONFIG, + # PROJECT_DIRECTORY_NAME, +) +import utils + + +# CEO Human Proxy Agent +# Uses shell with human-in-the-loop, meaning the human user can either give their input when asked, +# or ignore step, to let the agent interactions continue from there. +ceo_user_proxy_agent = autogen.UserProxyAgent( + "CEO", + # code_execution_config={"work_dir": PROJECT_DIRECTORY_NAME}, + code_execution_config=False, + human_input_mode="TERMINATE", + llm_config=CEO_LLM_CONFIG, + # is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"), + # max_consecutive_auto_reply=10, + system_message=utils.clean_text( + """ + You are the CEO. + + You are assisted by a Product Owner who will try his best to achieve your goals with the team under his orders. + Your are the only agent allowed to run functions. + + Rules: + - Keep it short. Get to the point. Be straightforward. Always specify your recipient's name. + - Only reply to messages prefixed with your name, i.e.: "CEO, etc". + - Only communicate with the Product Owner, and nobody else. + + Reply TERMINATE when the task has been solved to full satisfaction. + Otherwise, reply CONTINUE. + """ + ), +) + + +COMMON_FUNCTION_MAP = { + "fetch_web_page": actions.fetch_web_page, + "read_file": actions.read_file, + "run_bash_command": actions.run_bash_command, + "run_rust_file": actions.run_rust_file, + "search_web": actions.search_web, + "write_file": actions.write_file, +} + +ceo_user_proxy_agent.register_function( + function_map=COMMON_FUNCTION_MAP, +) + +functioneer = agents.Functioneer() +# functioneer.as_assistant_agent.register_function( +# function_map=COMMON_FUNCTION_MAP, +# ) + +product_owner = agents.ProductOwner() +# product_owner.as_assistant_agent.register_function( +# function_map=COMMON_FUNCTION_MAP, +# ) + +software_engineer = agents.SoftwareEngineer() +# software_engineer.as_assistant_agent.register_function( +# function_map=COMMON_FUNCTION_MAP, +# ) + +user_experience_designer = agents.UserExperienceDesigner() +# user_experience_designer.as_assistant_agent.register_function( +# function_map=COMMON_FUNCTION_MAP, +# ) + +group_chat = autogen.GroupChat( + admin_name="Administrator", + agents=[ + ceo_user_proxy_agent, + functioneer.as_assistant_agent, + product_owner.as_assistant_agent, + software_engineer.as_assistant_agent, + user_experience_designer.as_assistant_agent, + ], + messages=[], + max_round=100, +) + +group_chat_manager = autogen.GroupChatManager( + groupchat=group_chat, llm_config=COMMON_LLM_CONFIG +) + +utils.print_project_config(PROJECT_CONFIG) + +if PROJECT_CONFIG.initial_project_description is None: + initial_project_description = ceo_user_proxy_agent.get_human_input( + "You didn't specify a project in `env.jsonc`. What do you want us to develop?\nRequest: " + ) +else: + initial_project_description = PROJECT_CONFIG.initial_project_description + + +async def start(): + # ceo_user_proxy_agent.initiate_chat( + # recipient=group_chat_manager, + # message=utils.clean_text( + # f"Product Owner, I want your team to achieve these goals:\n- {initial_project_description}" + # ), + # ) + a = await software_engineer.as_assistant_agent.a_receive( + message=utils.clean_text( + "Can you find the SimCity 2000 game main features for me please?" + ), + request_reply=True, + sender=ceo_user_proxy_agent, + silent=True, + ) + + print("a") + print(a) + print("last_message") + print(software_engineer.as_assistant_agent.last_message(ceo_user_proxy_agent)) + + +if __name__ == "__main__": + asyncio.run(start()) diff --git a/main.py b/main.py index 9dafa42..f30db8b 100644 --- a/main.py +++ b/main.py @@ -11,12 +11,14 @@ These agents can operate in various modes, utilizing combinations of LLMs, human input, and other tools. """ +import asyncio import autogen -import actions - import agents -from constants import COMMON_LLM_CONFIG, PROJECT_CONFIG, PROJECT_DIRECTORY_NAME +from constants import ( + CEO_LLM_CONFIG, + PROJECT_CONFIG, +) import utils @@ -25,9 +27,10 @@ # or ignore step, to let the agent interactions continue from there. ceo_user_proxy_agent = autogen.UserProxyAgent( "CEO", - code_execution_config={"work_dir": PROJECT_DIRECTORY_NAME}, + # code_execution_config={"work_dir": PROJECT_DIRECTORY_NAME}, + code_execution_config=False, human_input_mode="TERMINATE", - llm_config=COMMON_LLM_CONFIG, + llm_config=CEO_LLM_CONFIG, # is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"), # max_consecutive_auto_reply=10, system_message=utils.clean_text( @@ -49,69 +52,18 @@ ) -COMMON_FUNCTION_MAP = { - "fetch_web_page": actions.fetch_web_page, - "read_file": actions.read_file, - "run_bash_command": actions.run_bash_command, - "run_rust_file": actions.run_rust_file, - "search_web": actions.search_web, - "write_file": actions.write_file, -} - -ceo_user_proxy_agent.register_function( - function_map=COMMON_FUNCTION_MAP, -) - - +assistant = agents.Assistant() functioneer = agents.Functioneer() -# functioneer.as_assistant_agent.register_function( -# function_map=COMMON_FUNCTION_MAP, -# ) - -product_owner = agents.ProductOwner() -# product_owner.as_assistant_agent.register_function( -# function_map=COMMON_FUNCTION_MAP, -# ) - software_engineer = agents.SoftwareEngineer() -# software_engineer.as_assistant_agent.register_function( -# function_map=COMMON_FUNCTION_MAP, -# ) -user_experience_designer = agents.UserExperienceDesigner() -# user_experience_designer.as_assistant_agent.register_function( -# function_map=COMMON_FUNCTION_MAP, -# ) - -group_chat = autogen.GroupChat( - admin_name="Administrator", - agents=[ - ceo_user_proxy_agent, - functioneer.as_assistant_agent, - product_owner.as_assistant_agent, - software_engineer.as_assistant_agent, - user_experience_designer.as_assistant_agent, - ], - messages=[], - max_round=100, -) - -group_chat_manager = autogen.GroupChatManager( - groupchat=group_chat, llm_config=COMMON_LLM_CONFIG -) utils.print_project_config(PROJECT_CONFIG) -if PROJECT_CONFIG.initial_project_description is None: - initial_project_description = ceo_user_proxy_agent.get_human_input( - "You didn't specify a project in `env.jsonc`. What do you want us to develop?\nRequest: " - ) -else: - initial_project_description = PROJECT_CONFIG.initial_project_description -ceo_user_proxy_agent.initiate_chat( - recipient=group_chat_manager, - message=utils.clean_text( - f"Product Owner, I want your team to achieve these goals:\n- {initial_project_description}" - ), -) +if __name__ == "__main__": + asyncio.run( + assistant.start( + ceo_user_proxy_agent=ceo_user_proxy_agent, + initial_prompt="What are the best Ubuntu features?", + ) + ) diff --git a/poetry.lock b/poetry.lock index f00ae98..f813e43 100644 --- a/poetry.lock +++ b/poetry.lock @@ -921,7 +921,7 @@ tests = ["pytest"] [[package]] name = "pyautogen" -version = "0.1.10" +version = "0.1.11" description = "Enabling Next-Gen LLM Applications via Multi-Agent Conversation Framework" optional = false python-versions = ">=3.8" @@ -945,7 +945,7 @@ test = ["chromadb", "coverage (>=5.3)", "datasets", "ipykernel", "lancedb", "nbc type = "git" url = "https://github.com/microsoft/autogen" reference = "HEAD" -resolved_reference = "294e006ac9a35975bce896b4a525c732f7aee159" +resolved_reference = "a8da3854c00cf8a4517d2572668a8b45077c63bc" [[package]] name = "pygments" @@ -1014,13 +1014,13 @@ testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "no [[package]] name = "pytest-mock" -version = "3.11.1" +version = "3.12.0" description = "Thin-wrapper around the mock package for easier use with pytest" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pytest-mock-3.11.1.tar.gz", hash = "sha256:7f6b125602ac6d743e523ae0bfa71e1a697a2f5534064528c6ff84c2f7c2fc7f"}, - {file = "pytest_mock-3.11.1-py3-none-any.whl", hash = "sha256:21c279fff83d70763b05f8874cc9cfb3fcacd6d354247a976f9529d19f9acf39"}, + {file = "pytest-mock-3.12.0.tar.gz", hash = "sha256:31a40f038c22cad32287bb43932054451ff5583ff094bca6f675df2f8bc1a6e9"}, + {file = "pytest_mock-3.12.0-py3-none-any.whl", hash = "sha256:0972719a7263072da3a21c7f4773069bcc7486027d7e8e1f81d98a47e701bc4f"}, ] [package.dependencies] @@ -1203,13 +1203,13 @@ files = [ [[package]] name = "types-requests" -version = "2.31.0.9" +version = "2.31.0.10" description = "Typing stubs for requests" optional = false python-versions = ">=3.7" files = [ - {file = "types-requests-2.31.0.9.tar.gz", hash = "sha256:3bb11188795cc3aa39f9635032044ee771009370fb31c3a06ae952b267b6fcd7"}, - {file = "types_requests-2.31.0.9-py3-none-any.whl", hash = "sha256:140e323da742a0cd0ff0a5a83669da9ffcebfaeb855d367186b2ec3985ba2742"}, + {file = "types-requests-2.31.0.10.tar.gz", hash = "sha256:dc5852a76f1eaf60eafa81a2e50aefa3d1f015c34cf0cba130930866b1b22a92"}, + {file = "types_requests-2.31.0.10-py3-none-any.whl", hash = "sha256:b32b9a86beffa876c0c3ac99a4cd3b8b51e973fb8e3bd4e0a6bb32c7efad80fc"}, ] [package.dependencies] @@ -1228,13 +1228,13 @@ files = [ [[package]] name = "urllib3" -version = "2.0.6" +version = "2.0.7" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.7" files = [ - {file = "urllib3-2.0.6-py3-none-any.whl", hash = "sha256:7a7c7003b000adf9e7ca2a377c9688bbc54ed41b985789ed576570342a375cd2"}, - {file = "urllib3-2.0.6.tar.gz", hash = "sha256:b19e1a85d206b56d7df1d5e683df4a7725252a964e3993648dd0fb5a1c157564"}, + {file = "urllib3-2.0.7-py3-none-any.whl", hash = "sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e"}, + {file = "urllib3-2.0.7.tar.gz", hash = "sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84"}, ] [package.extras] diff --git a/tests/utils/test_get_command_from_message.py b/tests/utils/test_get_command_from_message.py new file mode 100644 index 0000000..60e2754 --- /dev/null +++ b/tests/utils/test_get_command_from_message.py @@ -0,0 +1,39 @@ +import utils + + +def test_get_command_from_message(): + # Test with a valid command and parameters + message1 = "SEARCH weather in Paris" + assert utils.get_command_from_message(message1) == ( + "SEARCH", + ["weather in Paris"], + ) + + # Test with a valid command and no parameters + message2 = "TERMINATE" + assert utils.get_command_from_message(message2) == ( + "TERMINATE", + [], + ) + + # Test with a message containing no command + message4 = "Some random message without a command" + assert utils.get_command_from_message(message4) is None + + # Test with a message containing command but not at the start + message5 = "Message with SEARCH command in the middle" + assert utils.get_command_from_message(message5) is None + + # Test with a message containing only the command and no other text + message7 = "OPEN" + assert utils.get_command_from_message(message7) == ( + "OPEN", + [], + ) + + # Test with a message containing command followed by parameters, but with extra whitespace + message8 = " READ dir/file_path.txt " + assert utils.get_command_from_message(message8) == ( + "READ", + ["dir/file_path.txt"], + ) diff --git a/typedefs.py b/typedefs.py index baf3b73..6108dc4 100644 --- a/typedefs.py +++ b/typedefs.py @@ -4,11 +4,16 @@ @dataclass class ModelConfig: - model: str - api_key: str api_base: Optional[str] + api_key: str api_type: Optional[str] api_version: Optional[str] + model: str + + +@dataclass +class UserAgentConfig: + current_model: Optional[str] @dataclass @@ -17,3 +22,4 @@ class ProjectConfig: current_model: str initial_project_description: Optional[str] models: List[ModelConfig] + user_proxy_agent: UserAgentConfig diff --git a/utils/__init__.py b/utils/__init__.py index c0eb9a7..75f1d90 100644 --- a/utils/__init__.py +++ b/utils/__init__.py @@ -1,5 +1,7 @@ from .clean_text import clean_text from .debug import debug +from .ensure_str import ensure_str +from .get_command_from_message import get_command_from_message from .get_model_config_as_dict import get_model_config_as_dict from .mask_secret import mask_secret from .print_project_config import print_project_config diff --git a/utils/ensure_str.py b/utils/ensure_str.py new file mode 100644 index 0000000..e74fc3f --- /dev/null +++ b/utils/ensure_str.py @@ -0,0 +1,14 @@ +import utils + + +def ensure_str(value) -> str: + if not isinstance(value, str): + error_message = ( + f"Expected value to be of type `str`, got `{type(value).__name__}`." + ) + + utils.debug("Error", error_message) + + raise TypeError(error_message) + + return value diff --git a/utils/get_command_from_message.py b/utils/get_command_from_message.py new file mode 100644 index 0000000..f6a3d30 --- /dev/null +++ b/utils/get_command_from_message.py @@ -0,0 +1,29 @@ +import re +from typing import List, Tuple + +import utils + + +# AGENT_COMMANDS = {"OPEN", "READ", "RUN", "SEARCH", "TERMINATE", "WRITE"} +def get_command_from_message(message: str) -> Tuple[str, List[str]] | str | None: + if message == "TERMINATE": + return "TERMINATE", [] + + pattern = re.compile(r"^\s*(OPEN|READ|SEARCH|WRITE)\s*(.*)\s*$") + match = pattern.search(message) + if not match: + return None + + command = utils.ensure_str(match.group(1)) + parameter = utils.ensure_str(match.group(2)).strip() + + if command == "WRITE": + file_path, *file_content = parameter.split(" ", 1) + if not file_content: + return "The WRITE command requires both a file path and file content." + + return command, [file_path, " ".join(file_content)] + + parameters = [parameter] + + return command, parameters diff --git a/utils/get_model_config_as_dict.py b/utils/get_model_config_as_dict.py index d12cadd..1c6d69f 100644 --- a/utils/get_model_config_as_dict.py +++ b/utils/get_model_config_as_dict.py @@ -4,14 +4,18 @@ from typedefs import ProjectConfig -def get_model_config_as_dict(project_config: ProjectConfig) -> Optional[Dict[str, Any]]: +def get_model_config_as_dict( + project_config: ProjectConfig, custom_current_model: Optional[str] = None +) -> Optional[Dict[str, Any]]: """Return the dictionary representation of the selected model configuration.""" + current_model = custom_current_model or project_config.current_model + model_config = next( ( config_dict for config_dict in project_config.models - if config_dict.model == project_config.current_model + if config_dict.model == current_model ), None, ) diff --git a/utils/mask_secret.py b/utils/mask_secret.py index 3e156d6..5355d71 100644 --- a/utils/mask_secret.py +++ b/utils/mask_secret.py @@ -1,5 +1,9 @@ -def mask_secret(value: str) -> str: +def mask_secret(value: str | None) -> str | None: """Mask a secret value, revealing only the first 3 and last 3 characters.""" + if value is None: + return None + if len(value) <= 6: return "*" * len(value) + return value[:3] + "*" * (len(value) - 6) + value[-3:] diff --git a/utils/print_project_config.py b/utils/print_project_config.py index 31b4cc9..f3cc785 100644 --- a/utils/print_project_config.py +++ b/utils/print_project_config.py @@ -38,7 +38,9 @@ def _print_model_config(model_config: dict, is_selected: bool): for model_key, model_value in model_config.items(): formatted_value = ( - utils.mask_secret(model_value) if model_key in ["api_key"] else model_value + utils.mask_secret(model_value) + if model_key in ["api_base", "api_key"] + else model_value ) print( colored(