From 23c99b95a8b74fee99bafbad9740a2b9494d805b Mon Sep 17 00:00:00 2001 From: Botir Khaltaev Date: Thu, 27 Jun 2024 22:51:38 +0100 Subject: [PATCH 1/4] Create Code and Test classes to abstract these objects and use them in other actions --- .../forge/actions/code_gen/__init__.py | 4 + .../forge/actions/code_gen/code_gen.py | 311 +++--------------- .../forge/actions/code_gen/models/Code.py | 16 + .../forge/actions/code_gen/models/TestCase.py | 16 + .../forge/actions/diagram_gen/diagram_gen.py | 8 +- 5 files changed, 86 insertions(+), 269 deletions(-) create mode 100644 autogpts/SoloAgent/forge/actions/code_gen/__init__.py create mode 100644 autogpts/SoloAgent/forge/actions/code_gen/models/Code.py create mode 100644 autogpts/SoloAgent/forge/actions/code_gen/models/TestCase.py diff --git a/autogpts/SoloAgent/forge/actions/code_gen/__init__.py b/autogpts/SoloAgent/forge/actions/code_gen/__init__.py new file mode 100644 index 000000000000..763dc6bfeb83 --- /dev/null +++ b/autogpts/SoloAgent/forge/actions/code_gen/__init__.py @@ -0,0 +1,4 @@ + +from .models import Code, TestCase + +__all__ = ["Code", "TestCase"] diff --git a/autogpts/SoloAgent/forge/actions/code_gen/code_gen.py b/autogpts/SoloAgent/forge/actions/code_gen/code_gen.py index 5e923de91c99..3d1e35c36056 100644 --- a/autogpts/SoloAgent/forge/actions/code_gen/code_gen.py +++ b/autogpts/SoloAgent/forge/actions/code_gen/code_gen.py @@ -1,22 +1,17 @@ - from __future__ import annotations -from typing import Dict +from typing import Dict, Any from ..registry import action from forge.sdk import ForgeLogger, PromptEngine from forge.llm import chat_completion_request import os -from forge.sdk import Agent, LocalWorkspace -import re +from forge.sdk import Agent import subprocess import json +from ..models import Code, TestCase LOG = ForgeLogger(__name__) -CodeType = Dict[str, str] -TestCaseType = Dict[str, str] - - @action( name="test_code", description="Test the generated code for errors", @@ -47,169 +42,6 @@ async def test_code(agent: Agent, task_id: str, project_path: str) -> str: return f"Failed to test code: {e}" -@action( - name="generate_solana_code", - description="Generate Solana on-chain code using Anchor based on the provided specification", - parameters=[ - { - "name": "specification", - "description": "Code specification", - "type": "string", - "required": True, - } - ], - output_type="str", -) -async def generate_solana_code(agent: Agent, task_id: str, specification: str) -> str: - - prompt_engine = PromptEngine("gpt-4o") - lib_prompt = prompt_engine.load_prompt( - "anchor-lib", specification=specification) - instructions_prompt = prompt_engine.load_prompt( - "anchor-instructions", specification=specification) - errors_prompt = prompt_engine.load_prompt( - "anchor-errors", specification=specification) - cargo_toml_prompt = prompt_engine.load_prompt( - "anchor-cargo-toml", specification=specification) - anchor_toml_prompt = prompt_engine.load_prompt( - "anchor-anchor-toml", specification=specification) - - messages = [ - {"role": "system", "content": "You are a code generation assistant specialized in Anchor for Solana."}, - {"role": "user", "content": lib_prompt}, - {"role": "user", "content": instructions_prompt}, - {"role": "user", "content": errors_prompt}, - {"role": "user", "content": cargo_toml_prompt}, - {"role": "user", "content": anchor_toml_prompt}, - {"role": "user", "content": "Return the whole code as a string with the file markers intact that you received in each of the input without changing their wording at all."} - ] - - chat_completion_kwargs = { - "messages": messages, - "model": "gpt-3.5-turbo", - } - - chat_response = await chat_completion_request(**chat_completion_kwargs) - response_content = chat_response["choices"][0]["message"]["content"] - - LOG.info(f"Response content: {response_content}") - - try: - parts = parse_response_content(response_content) - except Exception as e: - LOG.error(f"Error parsing response content: {e}") - return "Failed to generate Solana on-chain code due to response parsing error." - - base_path = agent.workspace.base_path if isinstance( - agent.workspace, LocalWorkspace) else str(agent.workspace.base_path) - project_path = os.path.join(base_path, task_id) - LOG.info(f"Base path: {base_path}") - LOG.info(f"Project path: {project_path}") - cargo_toml_content = """ - [package] - name = "my_anchor_program" - version = "0.1.0" - edition = "2018" - - [dependencies] - anchor-lang = "0.30.1" - """ - - LOG.info(f"id: {task_id}") - LOG.info(f"Parts: {response_content}") - await agent.abilities.run_action( - task_id, "write_file", file_path=os.path.join(project_path, 'src', 'lib.rs'), data=parts['anchor-lib.rs'].encode() - ) - await agent.abilities.run_action( - task_id, "write_file", file_path=os.path.join(project_path, 'src', 'instructions.rs'), data=parts['anchor-instructions.rs'].encode() - ) - await agent.abilities.run_action( - task_id, "write_file", file_path=os.path.join(project_path, 'src', 'errors.rs'), data=parts['errors.rs'].encode() - ) - await agent.abilities.run_action( - task_id, "write_file", file_path=os.path.join(project_path, 'Cargo.toml'), data=cargo_toml_content.encode() - ) - await agent.abilities.run_action( - task_id, "write_file", file_path=os.path.join(project_path, 'Anchor.toml'), data=parts['Anchor.toml'].encode() - ) - test_result = await agent.abilities.run_action(task_id, "test_code", project_path=project_path) - if "All tests passed" not in test_result: - # Regenerate the code based on errors - LOG.info(f"Regenerating code due to errors: {test_result}") - return await generate_solana_code(agent, task_id, specification) - - return "Solana on-chain code generated, tested, and verified successfully." - - -@action( - name="generate_frontend_code", - description="Generate frontend code based on the provided specification", - parameters=[ - { - "name": "specification", - "description": "Frontend code specification", - "type": "string", - "required": True, - } - ], - output_type="str", -) -async def generate_frontend_code(agent, task_id: str, specification: str) -> str: - prompt_engine = PromptEngine("gpt-3.5-turbo") - index_prompt = prompt_engine.load_prompt( - "frontend-index", specification=specification) - styles_prompt = prompt_engine.load_prompt( - "frontend-styles", specification=specification) - app_prompt = prompt_engine.load_prompt( - "frontend-app", specification=specification) - package_json_prompt = prompt_engine.load_prompt( - "frontend-package-json", specification=specification) - webpack_config_prompt = prompt_engine.load_prompt( - "frontend-webpack-config", specification=specification) - - messages = [ - {"role": "system", "content": "You are a code generation assistant specialized in frontend development."}, - {"role": "user", "content": index_prompt}, - {"role": "user", "content": styles_prompt}, - {"role": "user", "content": app_prompt}, - {"role": "user", "content": package_json_prompt}, - {"role": "user", "content": webpack_config_prompt}, - ] - - chat_completion_kwargs = { - "messages": messages, - "model": "gpt-3.5-turbo", - } - chat_response = await chat_completion_request(**chat_completion_kwargs) - response_content = chat_response["choices"][0]["message"]["content"] - - try: - parts = parse_response_content(response_content) - except Exception as e: - LOG.error(f"Error parsing response content: {e}") - return "Failed to generate Solana on-chain code due to response parsing error." - - project_path = os.path.join(agent.workspace.base_path, task_id) - - await agent.abilities.run_action( - task_id, "write_file", file_path=os.path.join(project_path, 'src', 'index.html'), data=parts['index.html'].encode() - ) - await agent.abilities.run_action( - task_id, "write_file", file_path=os.path.join(project_path, 'src', 'styles.css'), data=parts['styles.css'].encode() - ) - await agent.abilities.run_action( - task_id, "write_file", file_path=os.path.join(project_path, 'src', 'app.js'), data=parts['app.js'].encode() - ) - await agent.abilities.run_action( - task_id, "write_file", file_path=os.path.join(project_path, 'package.json'), data=parts['package.json'].encode() - ) - await agent.abilities.run_action( - task_id, "write_file", file_path=os.path.join(project_path, 'webpack.config.js'), data=parts['webpack.config.js'].encode() - ) - - return "Modular frontend code generated and written to respective files." - - @action( name="generate_unit_tests", description="Generates unit tests for Solana code.", @@ -223,36 +55,16 @@ async def generate_frontend_code(agent, task_id: str, specification: str) -> str ], output_type="str", ) -async def generate_test_cases(agent: Agent, task_id: str, code_dict: CodeType) -> str: - prompt_engine = PromptEngine("gpt-3.5-turbo") - test_struct_prompt = prompt_engine.load_prompt("test-case-struct-return") - +async def generate_test_cases(agent: Agent, task_id: str, code_dict: Dict[str, str]) -> str: + code_type = Code(code_dict) messages = [ - {"role": "system", "content": "You are a code generation assistant specialized in generating test cases."}, - ] - - for file_name, code in code_dict.items(): - LOG.info(f"File Name: {file_name}") - LOG.info(f"Code: {code}") - test_prompt = prompt_engine.load_prompt( - "test-case-generation", file_name=file_name, code=code) - messages.append({"role": "user", "content": test_prompt}) - - messages.append({"role": "user", "content": test_struct_prompt}) - - chat_completion_kwargs = { - "messages": messages, - "model": "gpt-3.5-turbo", - } - - chat_response = await chat_completion_request(**chat_completion_kwargs) - response_content = chat_response["choices"][0]["message"]["content"] + {"role": "system", "content": "You are a code generation assistant specialized in generating test cases."} + ] + [ + {"role": "user", "content": load_test_prompt(file_name, code)} + for file_name, code in code_type.items() + ] + [{"role": "user", "content": load_test_struct_prompt()}] - LOG.info(f"Response content: {response_content}") - - base_path = agent.workspace.base_path if isinstance( - agent.workspace, LocalWorkspace) else str(agent.workspace.base_path) - project_path = os.path.join(base_path, task_id) + response_content = await get_chat_response(messages) try: test_cases = parse_test_cases_response(response_content) @@ -260,88 +72,53 @@ async def generate_test_cases(agent: Agent, task_id: str, code_dict: CodeType) - LOG.error(f"Error parsing test cases response: {e}") return "Failed to generate test cases due to response parsing error." - for file_name, test_case in test_cases.items(): - test_file_path = os.path.join(project_path, 'tests', file_name) - await agent.abilities.run_action( - task_id, "write_file", file_path=test_file_path, data=test_case.encode() - ) + project_path = os.path.join(agent.workspace.base_path, task_id) + await write_test_files(agent, task_id, project_path, TestCase(test_cases)) return "Test cases generated and written to respective files." -def sanitize_json_string(json_string: str) -> str: - # Replace newlines and tabs with escaped versions - sanitized_string = json_string.replace( - '\n', '\\n').replace('\t', '\\t').replace(' ', '\\t') - return sanitized_string +async def get_chat_response(messages: list[dict[str, Any]]) -> str: + chat_completion_kwargs = { + "messages": messages, + "model": "gpt-3.5-turbo", + } + chat_response = await chat_completion_request(**chat_completion_kwargs) + return chat_response["choices"][0]["message"]["content"] -def parse_test_cases_response(response_content: str) -> TestCaseType: +def parse_test_cases_response(response_content: str) -> TestCase: try: - # Extract JSON part from response content - json_start = response_content.index('{') - json_end = response_content.rindex('}') + 1 - json_content = response_content[json_start:json_end] - - # Sanitize JSON content - sanitized_content = sanitize_json_string(json_content) - - # Load JSON content - response_dict = json.loads(sanitized_content) - - file_name = response_dict["file_name"] - test_file = response_dict["test_file"] - - # Unescape newlines and tabs in test_file - test_file = test_file.replace('\\n', '\n').replace( - '\\t', '\t').strip().strip('"') - - test_cases = {file_name: test_file} + response_dict = json.loads(response_content) + test_cases = TestCase( + {response_dict["file_name"]: response_dict["test_file"]}) return test_cases - except (json.JSONDecodeError, ValueError) as e: + except json.JSONDecodeError as e: LOG.error(f"Error decoding JSON response: {e}") raise -def parse_response_content(response_content: str) -> dict: - # This function will split the response content into different parts - parts = { - 'anchor-lib.rs': '', - 'anchor-instructions.rs': '', - 'errors.rs': '', - 'Cargo.toml': '', - 'Anchor.toml': '' - } +async def write_code_files(agent: Agent, task_id: str, project_path: str, parts: Code) -> None: + for file_name, content in parts.items(): + await write_file(agent, task_id, os.path.join(project_path, 'src', file_name), content.encode()) - current_part = None - for line in response_content.split('\n'): - if '// anchor-lib.rs' in line: - current_part = 'anchor-lib.rs' - elif '// anchor-instructions.rs' in line: - current_part = 'anchor-instructions.rs' - elif '// errors.rs' in line: - current_part = 'errors.rs' - elif '# Cargo.toml' in line: - current_part = 'Cargo.toml' - elif '# Anchor.toml' in line: - current_part = 'Anchor.toml' - elif current_part: - parts[current_part] += line + '\n' - for key in parts: - parts[key] = re.sub(r'```|rust|toml', '', parts[key]).strip() +async def write_test_files(agent: Agent, task_id: str, project_path: str, test_cases: TestCase) -> None: + for file_name, test_case in test_cases.items(): + await write_file(agent, task_id, os.path.join(project_path, 'tests', file_name), test_case.encode()) - return parts +async def write_file(agent: Agent, task_id: str, file_path: str, data: bytes) -> None: + await agent.abilities.run_action( + task_id, "write_file", file_path=file_path, data=data + ) -def parse_test_cases_response(response_content: str) -> TestCaseType: - # Correctly parse the JSON response content by escaping control characters - try: - response_dict = json.loads(response_content) - file_name = response_dict["file_name"] - test_file = response_dict["test_file"] - test_cases = {file_name: test_file} - return test_cases - except json.JSONDecodeError as e: - LOG.error(f"Error decoding JSON response: {e}") - raise + +def load_test_prompt(file_name: str, code: str) -> str: + prompt_engine = PromptEngine("gpt-3.5-turbo") + return prompt_engine.load_prompt("test-case-generation", file_name=file_name, code=code) + + +def load_test_struct_prompt() -> str: + prompt_engine = PromptEngine("gpt-3.5-turbo") + return prompt_engine.load_prompt("test-case-struct-return") diff --git a/autogpts/SoloAgent/forge/actions/code_gen/models/Code.py b/autogpts/SoloAgent/forge/actions/code_gen/models/Code.py new file mode 100644 index 000000000000..9c3dcc9b172d --- /dev/null +++ b/autogpts/SoloAgent/forge/actions/code_gen/models/Code.py @@ -0,0 +1,16 @@ +from typing import Dict +from dataclasses import dataclass, field + + +@dataclass +class Code: + code_files: Dict[str, str] = field(default_factory=dict) + + def __getitem__(self, item: str) -> str: + return self.code_files[item] + + def __setitem__(self, key: str, value: str) -> None: + self.code_files[key] = value + + def items(self): + return self.code_files.items() diff --git a/autogpts/SoloAgent/forge/actions/code_gen/models/TestCase.py b/autogpts/SoloAgent/forge/actions/code_gen/models/TestCase.py new file mode 100644 index 000000000000..f60cbd1c25d6 --- /dev/null +++ b/autogpts/SoloAgent/forge/actions/code_gen/models/TestCase.py @@ -0,0 +1,16 @@ +from typing import Dict +from dataclasses import dataclass, field + + +@dataclass +class TestCase: + test_cases: Dict[str, str] = field(default_factory=dict) + + def __getitem__(self, item: str) -> str: + return self.test_cases[item] + + def __setitem__(self, key: str, value: str) -> None: + self.test_cases[key] = value + + def items(self): + return self.test_cases.items() diff --git a/autogpts/SoloAgent/forge/actions/diagram_gen/diagram_gen.py b/autogpts/SoloAgent/forge/actions/diagram_gen/diagram_gen.py index 29e7c52ab410..f0ca289edefe 100644 --- a/autogpts/SoloAgent/forge/actions/diagram_gen/diagram_gen.py +++ b/autogpts/SoloAgent/forge/actions/diagram_gen/diagram_gen.py @@ -7,6 +7,7 @@ LOG = ForgeLogger(__name__) API_KEY = os.getenv("ERASERIO_API_KEY") + @action( name="gen-diagram-eraserio", description="Generate a code diagram using eraser.io", @@ -26,9 +27,10 @@ ], output_type="str" ) -async def generate_diagram(agent, task_id: str, specification: str, code: str) -> str: +async def generate_architecture_diagram(agent, task_id: str, specification: str, code: str) -> str: prompt_engine = PromptEngine("gpt-3.5-turbo") - diagram_prompt = prompt_engine.load_prompt("diagram-prompt", specification=specification, code=code) + diagram_prompt = prompt_engine.load_prompt( + "diagram-prompt", specification=specification, code=code) url = "https://app.eraser.io/api/render/prompt" @@ -57,3 +59,5 @@ async def generate_diagram(agent, task_id: str, specification: str, code: str) - return "Failed to generate diagram." +async def generate_use_case_diagram(code): + pass From d0359303fd853eeacb0d14266ceaeecce582e1f3 Mon Sep 17 00:00:00 2001 From: Botir Khaltaev Date: Fri, 28 Jun 2024 16:03:36 +0100 Subject: [PATCH 2/4] Implement usecase diagram functionality --- .../forge/actions/code_gen/code_gen.py | 113 ++++++++++-------- .../{models/TestCase.py => models.py} | 15 +++ .../forge/actions/code_gen/models/Code.py | 16 --- .../forge/actions/diagram_gen/diagram_gen.py | 99 ++++++++++----- .../gpt-3.5-turbo/anchor-anchor-toml.j2 | 14 --- .../gpt-3.5-turbo/anchor-cargo-toml.j2 | 18 --- .../prompts/gpt-3.5-turbo/anchor-errors.j2 | 16 --- .../gpt-3.5-turbo/anchor-instructions.j2 | 27 ----- .../forge/prompts/gpt-3.5-turbo/anchor-lib.j2 | 28 ----- .../prompts/gpt-3.5-turbo/diagram-prompt.j2 | 8 +- .../gpt-3.5-turbo/solana-code-generation.j2 | 4 - .../test-case-generation-frontend.j2 | 7 ++ .../gpt-3.5-turbo/test-case-generation.j2 | 3 +- .../test-case-struct-return-frontend.j2 | 17 +++ .../gpt-3.5-turbo/test-case-struct-return.j2 | 13 +- .../use-case-diagram-gen-return.j2 | 6 + 16 files changed, 196 insertions(+), 208 deletions(-) rename autogpts/SoloAgent/forge/actions/code_gen/{models/TestCase.py => models.py} (54%) delete mode 100644 autogpts/SoloAgent/forge/actions/code_gen/models/Code.py delete mode 100644 autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-anchor-toml.j2 delete mode 100644 autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-cargo-toml.j2 delete mode 100644 autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-errors.j2 delete mode 100644 autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-instructions.j2 delete mode 100644 autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-lib.j2 delete mode 100644 autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/solana-code-generation.j2 create mode 100644 autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-generation-frontend.j2 create mode 100644 autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-struct-return-frontend.j2 create mode 100644 autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/use-case-diagram-gen-return.j2 diff --git a/autogpts/SoloAgent/forge/actions/code_gen/code_gen.py b/autogpts/SoloAgent/forge/actions/code_gen/code_gen.py index 3d1e35c36056..28aef3095860 100644 --- a/autogpts/SoloAgent/forge/actions/code_gen/code_gen.py +++ b/autogpts/SoloAgent/forge/actions/code_gen/code_gen.py @@ -1,13 +1,12 @@ from __future__ import annotations -from typing import Dict, Any from ..registry import action -from forge.sdk import ForgeLogger, PromptEngine +from forge.sdk import ForgeLogger, PromptEngine, Agent, LocalWorkspace from forge.llm import chat_completion_request import os -from forge.sdk import Agent import subprocess import json -from ..models import Code, TestCase +from typing import Dict +from .models import Code, TestCase LOG = ForgeLogger(__name__) @@ -53,32 +52,57 @@ async def test_code(agent: Agent, task_id: str, project_path: str) -> str: "required": True } ], - output_type="str", + output_type="TestCase object", ) -async def generate_test_cases(agent: Agent, task_id: str, code_dict: Dict[str, str]) -> str: - code_type = Code(code_dict) - messages = [ - {"role": "system", "content": "You are a code generation assistant specialized in generating test cases."} - ] + [ - {"role": "user", "content": load_test_prompt(file_name, code)} - for file_name, code in code_type.items() - ] + [{"role": "user", "content": load_test_struct_prompt()}] +async def generate_test_cases(agent: Agent, task_id: str, code_dict: Dict[str, str]) -> TestCase: + try: + prompt_engine = PromptEngine("gpt-3.5-turbo") + messages = [ + {"role": "system", "content": "You are a code generation assistant specialized in generating test cases."}] - response_content = await get_chat_response(messages) + test_prompt_template, test_struct_template, folder_name = determine_templates( + next(iter(code_dict))) + if not test_prompt_template: + return "Unsupported file type." + + code = Code(code_dict) + for file_name, code_content in code.items(): + LOG.info(f"File Name: {file_name}") + LOG.info(f"Code: {code_content}") + test_prompt = prompt_engine.load_prompt( + test_prompt_template, file_name=file_name, code=code_content) + messages.append({"role": "user", "content": test_prompt}) + + test_struct_prompt = prompt_engine.load_prompt(test_struct_template) + messages.append({"role": "user", "content": test_struct_prompt}) + + response_content = await get_chat_response(messages) + LOG.info(f"Response content: {response_content}") + + project_path = get_project_path(agent, task_id, folder_name) + os.makedirs(project_path, exist_ok=True) - try: test_cases = parse_test_cases_response(response_content) + await write_test_cases(agent, task_id, project_path, test_cases) + + return test_cases + except Exception as e: - LOG.error(f"Error parsing test cases response: {e}") - return "Failed to generate test cases due to response parsing error." + LOG.error(f"Error generating test cases: {e}") + return "Failed to generate test cases due to an error." - project_path = os.path.join(agent.workspace.base_path, task_id) - await write_test_files(agent, task_id, project_path, TestCase(test_cases)) - return "Test cases generated and written to respective files." +def determine_templates(first_file_name: str): + if first_file_name.endswith(('.js', '.ts')): + return "test-case-generation-frontend", "test-case-struct-return-frontend", 'frontend/tests' + elif first_file_name.endswith('.rs'): + return "test-case-generation", "test-case-struct-return", 'rust/tests' + else: + LOG.error(f"Unsupported file type for: {first_file_name}") + return None, None, None -async def get_chat_response(messages: list[dict[str, Any]]) -> str: +async def get_chat_response(messages: list) -> str: chat_completion_kwargs = { "messages": messages, "model": "gpt-3.5-turbo", @@ -87,38 +111,33 @@ async def get_chat_response(messages: list[dict[str, Any]]) -> str: return chat_response["choices"][0]["message"]["content"] -def parse_test_cases_response(response_content: str) -> TestCase: - try: - response_dict = json.loads(response_content) - test_cases = TestCase( - {response_dict["file_name"]: response_dict["test_file"]}) - return test_cases - except json.JSONDecodeError as e: - LOG.error(f"Error decoding JSON response: {e}") - raise - +def get_project_path(agent: Agent, task_id: str, folder_name: str) -> str: + base_path = agent.workspace.base_path if isinstance( + agent.workspace, LocalWorkspace) else str(agent.workspace.base_path) + return os.path.join(base_path, task_id, folder_name) -async def write_code_files(agent: Agent, task_id: str, project_path: str, parts: Code) -> None: - for file_name, content in parts.items(): - await write_file(agent, task_id, os.path.join(project_path, 'src', file_name), content.encode()) - -async def write_test_files(agent: Agent, task_id: str, project_path: str, test_cases: TestCase) -> None: +async def write_test_cases(agent: Agent, task_id: str, project_path: str, test_cases: TestCase): for file_name, test_case in test_cases.items(): - await write_file(agent, task_id, os.path.join(project_path, 'tests', file_name), test_case.encode()) + test_file_path = os.path.join(project_path, file_name) + await agent.abilities.run_action(task_id, "write_file", file_path=test_file_path, data=test_case.encode()) -async def write_file(agent: Agent, task_id: str, file_path: str, data: bytes) -> None: - await agent.abilities.run_action( - task_id, "write_file", file_path=file_path, data=data - ) +def parse_test_cases_response(response_content: str) -> TestCase: + try: + json_start = response_content.index('{') + json_end = response_content.rindex('}') + 1 + json_content = response_content[json_start:json_end] + LOG.info(f"JSON Content: {json_content}") -def load_test_prompt(file_name: str, code: str) -> str: - prompt_engine = PromptEngine("gpt-3.5-turbo") - return prompt_engine.load_prompt("test-case-generation", file_name=file_name, code=code) + response_dict = json.loads(json_content) + file_name = response_dict["file_name"] + test_file = response_dict["test_file"].replace( + '\\n', '\n').replace('\\t', '\t').strip().strip('"') + return TestCase({file_name: test_file}) + except (json.JSONDecodeError, ValueError) as e: + LOG.error(f"Error decoding JSON response: {e}") + raise -def load_test_struct_prompt() -> str: - prompt_engine = PromptEngine("gpt-3.5-turbo") - return prompt_engine.load_prompt("test-case-struct-return") diff --git a/autogpts/SoloAgent/forge/actions/code_gen/models/TestCase.py b/autogpts/SoloAgent/forge/actions/code_gen/models.py similarity index 54% rename from autogpts/SoloAgent/forge/actions/code_gen/models/TestCase.py rename to autogpts/SoloAgent/forge/actions/code_gen/models.py index f60cbd1c25d6..d881b64473f0 100644 --- a/autogpts/SoloAgent/forge/actions/code_gen/models/TestCase.py +++ b/autogpts/SoloAgent/forge/actions/code_gen/models.py @@ -1,7 +1,22 @@ + from typing import Dict from dataclasses import dataclass, field +@dataclass +class Code: + code_files: Dict[str, str] = field(default_factory=dict) + + def __getitem__(self, item: str) -> str: + return self.code_files[item] + + def __setitem__(self, key: str, value: str) -> None: + self.code_files[key] = value + + def items(self): + return self.code_files.items() + + @dataclass class TestCase: test_cases: Dict[str, str] = field(default_factory=dict) diff --git a/autogpts/SoloAgent/forge/actions/code_gen/models/Code.py b/autogpts/SoloAgent/forge/actions/code_gen/models/Code.py deleted file mode 100644 index 9c3dcc9b172d..000000000000 --- a/autogpts/SoloAgent/forge/actions/code_gen/models/Code.py +++ /dev/null @@ -1,16 +0,0 @@ -from typing import Dict -from dataclasses import dataclass, field - - -@dataclass -class Code: - code_files: Dict[str, str] = field(default_factory=dict) - - def __getitem__(self, item: str) -> str: - return self.code_files[item] - - def __setitem__(self, key: str, value: str) -> None: - self.code_files[key] = value - - def items(self): - return self.code_files.items() diff --git a/autogpts/SoloAgent/forge/actions/diagram_gen/diagram_gen.py b/autogpts/SoloAgent/forge/actions/diagram_gen/diagram_gen.py index f0ca289edefe..4513609914e6 100644 --- a/autogpts/SoloAgent/forge/actions/diagram_gen/diagram_gen.py +++ b/autogpts/SoloAgent/forge/actions/diagram_gen/diagram_gen.py @@ -1,8 +1,10 @@ from __future__ import annotations from ..registry import action -from forge.sdk import ForgeLogger, PromptEngine +from forge.sdk import ForgeLogger, PromptEngine, Agent import requests import os +from forge.actions.code_gen.models import Code +from forge.llm import chat_completion_request LOG = ForgeLogger(__name__) API_KEY = os.getenv("ERASERIO_API_KEY") @@ -21,43 +23,80 @@ { "name": "code", "description": "Code generated from the specification", - "type": "string", + "type": "Code object", "required": False } ], output_type="str" ) -async def generate_architecture_diagram(agent, task_id: str, specification: str, code: str) -> str: - prompt_engine = PromptEngine("gpt-3.5-turbo") - diagram_prompt = prompt_engine.load_prompt( - "diagram-prompt", specification=specification, code=code) - - url = "https://app.eraser.io/api/render/prompt" - - payload = { - "text": diagram_prompt, - "diagramType": "sequence-diagram", - "background": True, - "theme": "light", - "scale": "1", - "returnFile": True - } - headers = { - "accept": "application/json", - "content-type": "application/json", - "Authorization": f"Bearer {API_KEY}" - } - - response = requests.post(url, json=payload, headers=headers) - - if response.status_code == 200: +async def generate_architecture_diagram(agent: Agent, task_id: str, specification: str, code: Code) -> str: + try: + prompt_engine = PromptEngine("gpt-3.5-turbo") + diagram_prompt = prompt_engine.load_prompt( + "diagram-prompt", specification=specification, code=code + ) + + url = "https://app.eraser.io/api/render/prompt" + + payload = { + "text": diagram_prompt, + "diagramType": "sequence-diagram", + "background": True, + "theme": "light", + "scale": "1", + "returnFile": True + } + headers = { + "accept": "application/json", + "content-type": "application/json", + "Authorization": f"Bearer {API_KEY}" + } + + response = requests.post(url, json=payload, headers=headers) + response.raise_for_status() + result = response.json() LOG.info(f"Diagram generated successfully: {result['fileUrl']}") return result['fileUrl'] - else: - LOG.error(f"Error generating diagram: {response.text}") + except requests.RequestException as e: + LOG.error(f"Error generating diagram: {e}") return "Failed to generate diagram." -async def generate_use_case_diagram(code): - pass +@action( + name="gen-use-case-diagram", + description="Generate a use case diagram from analyzing the code.", + parameters=[ + { + "name": "code", + "description": "A dictionary containing filenames and code for a codebase.", + "type": "Code object", + "required": True + } + ], + output_type="str" +) +async def generate_use_case_diagram(agent: Agent, task_id: str, code: Code) -> str: + try: + prompt_engine = PromptEngine("gpt-3.5-turbo") + usecase_diagram_template = prompt_engine.load_prompt( + "use-case-diagram-gen-return", code=code) + + messages = [ + {"role": "system", "content": "You are a code generation assistant specialized in generating test cases."}, + {"role": "system", "content": usecase_diagram_template} + ] + + chat_completion_kwargs = { + "messages": messages, + "model": "gpt-3.5-turbo", + } + + chat_response = await chat_completion_request(**chat_completion_kwargs) + + LOG.info(f"Response content: {chat_response}") + return chat_response['choices'][0]['message']['content'] + except Exception as e: + LOG.error(f"Error generating use case diagram: {e}") + return "Failed to generate use case diagram." + diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-anchor-toml.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-anchor-toml.j2 deleted file mode 100644 index 709bbe08aa99..000000000000 --- a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-anchor-toml.j2 +++ /dev/null @@ -1,14 +0,0 @@ -# Modify the code if needed for this {specification} - -# Anchor.toml - -[programs.localnet] -my_anchor_program = "YourProgramID" - -[provider] -cluster = "localnet" -wallet = "~/.config/solana/id.json" - -[scripts] -test = "anchor test" - diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-cargo-toml.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-cargo-toml.j2 deleted file mode 100644 index 8b9f420d7274..000000000000 --- a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-cargo-toml.j2 +++ /dev/null @@ -1,18 +0,0 @@ -# Modify the file to include functonality if needed for this {specification} - -# Cargo.toml - -[package] -name = "my_anchor_program" -version = "0.1.0" -edition = "2018" - -[dependencies] -anchor-lang = "0.18.0" - -[features] -default = ["program"] - -[workspace] -members = ["programs/*"] - diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-errors.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-errors.j2 deleted file mode 100644 index 21ed72bd9fe6..000000000000 --- a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-errors.j2 +++ /dev/null @@ -1,16 +0,0 @@ -// Modify the file to include functionality for {{ specification }} - - -// errors.rs - -// This file contains the custom errors for the Anchor program. - -use anchor_lang::prelude::*; - -#[error_code] -pub enum MyProgramError { - #[msg("An error occurred.")] - SomeError, - // Add other errors here -} - diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-instructions.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-instructions.j2 deleted file mode 100644 index 7c63d6d00b0c..000000000000 --- a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-instructions.j2 +++ /dev/null @@ -1,27 +0,0 @@ -// Modify the file if needed for this {{ specification }} - - -// anchor-instructions.rs - -// This file contains the instruction handlers for the Anchor program. - -use anchor_lang::prelude::*; -use crate::errors::MyProgramError; - -#[derive(Accounts)] -pub struct Initialize<'info> { - #[account(init, payer = user, space = 8 + 8)] - pub my_account: Account<'info, MyAccount>, - #[account(mut)] - pub user: Signer<'info>, - pub system_program: Program<'info, System>, -} - -pub fn initialize(ctx: Context) -> Result<()> { - let my_account = &mut ctx.accounts.my_account; - my_account.data = 0; - Ok(()) -} - -// Define other instructions here - diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-lib.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-lib.j2 deleted file mode 100644 index 6b0bbcab2ba9..000000000000 --- a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-lib.j2 +++ /dev/null @@ -1,28 +0,0 @@ -// Modify the file for this {{ specification }} - -// This is the main library file for the Anchor program. -// It includes the program's entrypoint and instruction handlers. - - -// anchor-lib.rs - -use anchor_lang::prelude::*; - -pub mod instructions; -pub mod errors; - -use instructions::*; - -declare_id!("{{ program_id }}"); - -#[program] -mod my_anchor_program { - use super::*; - - pub fn initialize(ctx: Context) -> Result<()> { - instructions::initialize(ctx) - } - - // Add other instruction handlers here -} - diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/diagram-prompt.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/diagram-prompt.j2 index 9d1c24d80f16..f03788487f69 100644 --- a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/diagram-prompt.j2 +++ b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/diagram-prompt.j2 @@ -1,8 +1,8 @@ -# Solana Account Diagram -# Specification: +Specification: + {{ specification }} -# Code: +Code: {{ code }} -# Based on the specification and the code craft a perfect prompt to feed into the eraser.io API LLM to generate the diagram +Based on the specification and the code craft a perfect prompt to feed into the eraser.io API LLM to generate the architecture diagram for the application for the application. diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/solana-code-generation.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/solana-code-generation.j2 deleted file mode 100644 index 9590e11ca735..000000000000 --- a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/solana-code-generation.j2 +++ /dev/null @@ -1,4 +0,0 @@ -# solana-code-generation.j2 - -# {{ specification }} - diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-generation-frontend.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-generation-frontend.j2 new file mode 100644 index 000000000000..73e4333cccb9 --- /dev/null +++ b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-generation-frontend.j2 @@ -0,0 +1,7 @@ + +Generate unit tests for the following JavaScript or TypeScript code. +File Name: {{ file_name }} + +Code: +{{ code }} + diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-generation.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-generation.j2 index 1474b0f5fa2e..eb5c72326211 100644 --- a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-generation.j2 +++ b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-generation.j2 @@ -1,5 +1,4 @@ -Generate unit tests for the following Rust code. Ensure all control characters such as \n and \t are escaped in the response. - +Generate unit tests for the following Rust code. File Name: {{ file_name }} Code: diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-struct-return-frontend.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-struct-return-frontend.j2 new file mode 100644 index 000000000000..167d8ba58ba6 --- /dev/null +++ b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-struct-return-frontend.j2 @@ -0,0 +1,17 @@ + + + +Return the tests in the following format according to JSON structure so that all problematic characters are escaped: + +{ + "file_name": "{{ "{{ file_name | replace('.js', '_test.js') | replace(\'.ts\', \'.test.ts\') }}" }}", + "test_file": "" +} + +// Example Response: + +{ + "file_name": "{{ '{{ file_name | replace(\'.js\', \'.test.js\') | replace(\'.ts\', \'.test.ts\') }}' }}", + "test_file": "const {{ module_name }} = require('./{{ file_name }}');\\n\\nconst { add, subtract } = {{ module_name }};\\n\\n test('adds 2 + 3 to equal 5', () => {\\n expect(add(2, 3)).toBe(5);\\n});\\n\\n test('adds -2 + 3 to equal 1', () => {\\n expect(add(-2, 3)).toBe(1);\\n});\\n\\n test('adds 0 + 0 to equal 0', () => {\\n expect(add(0, 0)).toBe(0);\\n});\\n\\n test('subtracts 5 - 3 to equal 2', () => {\\n expect(subtract(5, 3)).toBe(2);\\n});\\n\\n test('subtracts 3 - 5 to equal -2', () => {\\n expect(subtract(3, 5)).toBe(-2);\\n});\\n\\n test('subtracts 0 - 0 to equal 0', () => {\\n expect(subtract(0, 0)).toBe(0);\\n});" +} + diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-struct-return.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-struct-return.j2 index c2803cd0dfd3..373cc5e74220 100644 --- a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-struct-return.j2 +++ b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-struct-return.j2 @@ -1,6 +1,15 @@ -Return the tests in the following format: + +Return the tests in the following format according to JSON structure so that all problematic characters are escaped: { - "file_name": "{{ file_name | replace('.rs', '_test.rs') }}", + "file_name": "{{ "{{ file_name | replace('.rs', '_test.rs') }}" }}", "test_file": "" } + +// Example Response: +{ + "file_name": "{{ '{{ file_name | replace(\'.rs\', \'_test.rs\') }}' }}", + "test_file": "use super::*;\\n\\n#[cfg(test)]\\nmod tests {\\n use super::*;\\n\\n #[test]\\n fn test_add() {\\n assert_eq!(add(2, 3), 5);\\n assert_eq!(add(-2, 3), 1);\\n assert_eq!(add(0, 0), 0);\\n }\\n\\n #[test]\\n fn test_subtract() {\\n assert_eq!(subtract(5, 3), 2);\\n assert_eq!(subtract(3, 5), -2);\\n assert_eq!(subtract(0, 0), 0);\\n }\\n}" +} + + diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/use-case-diagram-gen-return.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/use-case-diagram-gen-return.j2 new file mode 100644 index 000000000000..16c4429206e6 --- /dev/null +++ b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/use-case-diagram-gen-return.j2 @@ -0,0 +1,6 @@ + +From the code you been analyzing in the previous prompts. Return a usecase design for this application based on the code +below + +{code} + From 6f0d02c85c018a32cc038803f332e313d566ac10 Mon Sep 17 00:00:00 2001 From: Botir Khaltaev Date: Fri, 28 Jun 2024 16:07:38 +0100 Subject: [PATCH 3/4] Add specification as one of the params for use case diagram gen --- .../SoloAgent/forge/actions/diagram_gen/diagram_gen.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/autogpts/SoloAgent/forge/actions/diagram_gen/diagram_gen.py b/autogpts/SoloAgent/forge/actions/diagram_gen/diagram_gen.py index 4513609914e6..91f086cd91d8 100644 --- a/autogpts/SoloAgent/forge/actions/diagram_gen/diagram_gen.py +++ b/autogpts/SoloAgent/forge/actions/diagram_gen/diagram_gen.py @@ -72,11 +72,18 @@ async def generate_architecture_diagram(agent: Agent, task_id: str, specificatio "description": "A dictionary containing filenames and code for a codebase.", "type": "Code object", "required": True + }, + + { + "name": "specification", + "description": "Specification of the project.", + "type": "str", + "required": True } ], output_type="str" ) -async def generate_use_case_diagram(agent: Agent, task_id: str, code: Code) -> str: +async def generate_use_case_diagram(agent: Agent, task_id: str, code: Code, specification: str) -> str: try: prompt_engine = PromptEngine("gpt-3.5-turbo") usecase_diagram_template = prompt_engine.load_prompt( From c1b05fe617e4791fd430ea287d9f57c9dded98e8 Mon Sep 17 00:00:00 2001 From: Botir Khaltaev Date: Sun, 30 Jun 2024 19:36:30 +0100 Subject: [PATCH 4/4] Implement action that deploys solana project to the devnet --- .../forge/actions/infra_gen/infra_gen.py | 54 +++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 autogpts/SoloAgent/forge/actions/infra_gen/infra_gen.py diff --git a/autogpts/SoloAgent/forge/actions/infra_gen/infra_gen.py b/autogpts/SoloAgent/forge/actions/infra_gen/infra_gen.py new file mode 100644 index 000000000000..a1c462deeab9 --- /dev/null +++ b/autogpts/SoloAgent/forge/actions/infra_gen/infra_gen.py @@ -0,0 +1,54 @@ +from __future__ import annotations +from ..registry import action +from forge.sdk import ForgeLogger, Agent +import os +import subprocess +from typing import Dict + +LOG = ForgeLogger(__name__) + + +@action( + name="deploy_code", + description="Deploy the Anchor project to Solana Devnet", + parameters=[ + { + "name": "project_path", + "description": "Path to the Anchor project directory", + "type": "string", + "required": True, + } + ], + output_type="str", +) +async def deploy_code(agent: Agent, task_id: str, project_path: str) -> str: + try: + # Ensure that the Solana CLI is installed and configured to use the devnet + subprocess.run(['solana', 'config', 'set', '--url', + 'https://api.devnet.solana.com'], check=True) + subprocess.run(['solana', 'config', 'set', '--keypair', + os.path.expanduser('~/.config/solana/id.json')], check=True) + + # Build the Anchor project + result = subprocess.run( + ['anchor', 'build'], cwd=project_path, capture_output=True, text=True) + if result.returncode != 0: + LOG.error(f"Build failed with errors: {result.stderr}") + return f"Build failed: {result.stderr}" + + # Deploy the Anchor project to Devnet + result = subprocess.run( + ['anchor', 'deploy'], cwd=project_path, capture_output=True, text=True) + if result.returncode != 0: + LOG.error(f"Deployment failed with errors: {result.stderr}") + return f"Deployment failed: {result.stderr}" + + LOG.info(f"Deployment successful: {result.stdout}") + return f"Deployment successful: {result.stdout}" + + except subprocess.CalledProcessError as e: + LOG.error(f"Error during deployment: {e}") + return f"Deployment process failed: {e}" + except Exception as e: + LOG.error(f"Unexpected error during deployment: {e}") + return f"Unexpected error: {e}"