diff --git a/autogpts/SoloAgent/forge/actions/code_gen/__init__.py b/autogpts/SoloAgent/forge/actions/code_gen/__init__.py new file mode 100644 index 000000000000..763dc6bfeb83 --- /dev/null +++ b/autogpts/SoloAgent/forge/actions/code_gen/__init__.py @@ -0,0 +1,4 @@ + +from .models import Code, TestCase + +__all__ = ["Code", "TestCase"] diff --git a/autogpts/SoloAgent/forge/actions/code_gen/code_gen.py b/autogpts/SoloAgent/forge/actions/code_gen/code_gen.py index f9c1b301641d..b64fd25ce0ef 100644 --- a/autogpts/SoloAgent/forge/actions/code_gen/code_gen.py +++ b/autogpts/SoloAgent/forge/actions/code_gen/code_gen.py @@ -1,36 +1,16 @@ - from __future__ import annotations -from typing import Dict from ..registry import action -from forge.sdk import ForgeLogger, PromptEngine +from forge.sdk import ForgeLogger, PromptEngine, Agent, LocalWorkspace from forge.llm import chat_completion_request import os -from forge.sdk import Agent, LocalWorkspace -import re import subprocess import json -import test_code LOG = ForgeLogger(__name__) -CodeType = Dict[str, str] -TestCaseType = Dict[str, str] - - -ARGO_TOML_CONTENT = """ -[package] -name = "my_anchor_program" -version = "0.1.0" -edition = "2018" -[dependencies] -anchor-lang = "0.30.1" -""" -ANCHOR_TOML_CONTENT = """ -[programs.localnet] -my_anchor_program = "4d3d5ab7f6b5e4b2b7d1f5d6e4b7d1f5d6e4b7d1" -""" -ERROR_INFO = "" +CodeType = Dict[str, str] +TestCaseType = Dict[str, str] @action( @@ -77,17 +57,19 @@ async def test_code(agent: Agent, task_id: str, project_path: str) -> str: output_type="str", ) async def generate_solana_code(agent: Agent, task_id: str, specification: str) -> str: - global ERROR_INFO - test_code.cargo_test_agbenchmark_config() - prompt_engine = PromptEngine("gpt-4o") - lib_prompt = prompt_engine.load_prompt("anchor-lib", specification=specification, error_info=ERROR_INFO) - instructions_prompt = prompt_engine.load_prompt("anchor-instructions", specification=specification, error_info=ERROR_INFO) - errors_prompt = prompt_engine.load_prompt("anchor-errors", specification=specification, error_info=ERROR_INFO) - cargo_toml_prompt = prompt_engine.load_prompt("anchor-cargo-toml", specification=specification, error_info=ERROR_INFO) - anchor_toml_prompt = prompt_engine.load_prompt("anchor-anchor-toml", specification=specification, error_info=ERROR_INFO) - + lib_prompt = prompt_engine.load_prompt( + "anchor-lib", specification=specification) + instructions_prompt = prompt_engine.load_prompt( + "anchor-instructions", specification=specification) + errors_prompt = prompt_engine.load_prompt( + "anchor-errors", specification=specification) + cargo_toml_prompt = prompt_engine.load_prompt( + "anchor-cargo-toml", specification=specification) + anchor_toml_prompt = prompt_engine.load_prompt( + "anchor-anchor-toml", specification=specification) + messages = [ {"role": "system", "content": "You are a code generation assistant specialized in Anchor for Solana."}, {"role": "user", "content": lib_prompt}, @@ -95,13 +77,9 @@ async def generate_solana_code(agent: Agent, task_id: str, specification: str) - {"role": "user", "content": errors_prompt}, {"role": "user", "content": cargo_toml_prompt}, {"role": "user", "content": anchor_toml_prompt}, - {"role": "user", "content": "Return the whole code as a string with the file markers intact that you received in each of the input without changing their wording at all and use // becore comments."}, - + {"role": "user", "content": "Return the whole code as a string with the file markers intact that you received in each of the input without changing their wording at all."} ] - - - chat_completion_kwargs = { "messages": messages, "model": "gpt-3.5-turbo", @@ -123,63 +101,42 @@ async def generate_solana_code(agent: Agent, task_id: str, specification: str) - project_path = os.path.join(base_path, task_id) LOG.info(f"Base path: {base_path}") LOG.info(f"Project path: {project_path}") + cargo_toml_content = """ + [package] + name = "my_anchor_program" + version = "0.1.0" + edition = "2018" + + [dependencies] + anchor-lang = "0.30.1" + """ LOG.info(f"id: {task_id}") LOG.info(f"Parts: {response_content}") - - file_actions = [ - ('Cargo.toml', ARGO_TOML_CONTENT), - ('Anchor.toml', parts['Anchor.toml']), - ('src/errors.rs', parts['errors.rs']), - ('src/instructions.rs', parts['anchor-instructions.rs']), - ('src/lib.rs', parts['anchor-lib.rs']), - ] - - for file_path, file_content in file_actions: - full_file_path = os.path.join(project_path, file_path) - - if os.path.exists(full_file_path): - print(f"{file_path} already exists. Skipping regeneration.") - else: - print(f"Generating {file_path}. Press 'y' to continue...") - if input().strip().lower() != 'y': - return f"Generation halted by user at {file_path}." - - await agent.abilities.run_action(task_id, "write_file", file_path=full_file_path, data=file_content.encode()) - print(f"{file_path} generated successfully.") - - # Compile the generated file - compile_result = await compile_file(agent, task_id, project_path, file_path) - if "error" in compile_result.lower(): - LOG.error(f"Compilation failed for {file_path}: {compile_result}") - print(f"Compilation failed for {file_path}, regenerating...") - - # Update ERROR_INFO with the compilation error - ERROR_INFO = compile_result - - # Regenerate only the faulty file - return await generate_solana_code(agent, task_id, specification) - + await agent.abilities.run_action( + task_id, "write_file", file_path=os.path.join(project_path, 'src', 'lib.rs'), data=parts['anchor-lib.rs'].encode() + ) + await agent.abilities.run_action( + task_id, "write_file", file_path=os.path.join(project_path, 'src', 'instructions.rs'), data=parts['anchor-instructions.rs'].encode() + ) + await agent.abilities.run_action( + task_id, "write_file", file_path=os.path.join(project_path, 'src', 'errors.rs'), data=parts['errors.rs'].encode() + ) + await agent.abilities.run_action( + task_id, "write_file", file_path=os.path.join(project_path, 'Cargo.toml'), data=cargo_toml_content.encode() + ) + await agent.abilities.run_action( + task_id, "write_file", file_path=os.path.join(project_path, 'Anchor.toml'), data=parts['Anchor.toml'].encode() + ) test_result = await agent.abilities.run_action(task_id, "test_code", project_path=project_path) if "All tests passed" not in test_result: + # Regenerate the code based on errors LOG.info(f"Regenerating code due to errors: {test_result}") - ERROR_INFO = test_result # Update ERROR_INFO with the test error return await generate_solana_code(agent, task_id, specification) return "Solana on-chain code generated, tested, and verified successfully." -async def compile_file(agent: Agent, task_id: str, project_path: str, file_path: str) -> str: - try: - result = subprocess.run(['cargo', 'check', '--release'], cwd=project_path, capture_output=True, text=True) - if result.returncode != 0: - return result.stderr - return "Compilation successful." - except Exception as e: - return f"Compilation failed: {e}" - - - @action( name="generate_frontend_code", description="Generate frontend code based on the provided specification", @@ -260,83 +217,91 @@ async def generate_frontend_code(agent, task_id: str, specification: str) -> str "required": True } ], - output_type="str", + output_type="TestCase object", ) -async def generate_test_cases(agent: Agent, task_id: str, code_dict: CodeType) -> str: - prompt_engine = PromptEngine("gpt-3.5-turbo") - test_struct_prompt = prompt_engine.load_prompt("test-case-struct-return") +async def generate_test_cases(agent: Agent, task_id: str, code_dict: Dict[str, str]) -> TestCase: + try: + prompt_engine = PromptEngine("gpt-3.5-turbo") + messages = [ + {"role": "system", "content": "You are a code generation assistant specialized in generating test cases."}] - messages = [ - {"role": "system", "content": "You are a code generation assistant specialized in generating test cases."}, - ] + test_prompt_template, test_struct_template, folder_name = determine_templates( + next(iter(code_dict))) + if not test_prompt_template: + return "Unsupported file type." + + code = Code(code_dict) + for file_name, code_content in code.items(): + LOG.info(f"File Name: {file_name}") + LOG.info(f"Code: {code_content}") + test_prompt = prompt_engine.load_prompt( + test_prompt_template, file_name=file_name, code=code_content) + messages.append({"role": "user", "content": test_prompt}) + + test_struct_prompt = prompt_engine.load_prompt(test_struct_template) + messages.append({"role": "user", "content": test_struct_prompt}) + + response_content = await get_chat_response(messages) + LOG.info(f"Response content: {response_content}") + + project_path = get_project_path(agent, task_id, folder_name) + os.makedirs(project_path, exist_ok=True) - for file_name, code in code_dict.items(): - LOG.info(f"File Name: {file_name}") - LOG.info(f"Code: {code}") - test_prompt = prompt_engine.load_prompt( - "test-case-generation", file_name=file_name, code=code) - messages.append({"role": "user", "content": test_prompt}) + test_cases = parse_test_cases_response(response_content) + await write_test_cases(agent, task_id, project_path, test_cases) + + return test_cases + + except Exception as e: + LOG.error(f"Error generating test cases: {e}") + return "Failed to generate test cases due to an error." - messages.append({"role": "user", "content": test_struct_prompt}) +def determine_templates(first_file_name: str): + if first_file_name.endswith(('.js', '.ts')): + return "test-case-generation-frontend", "test-case-struct-return-frontend", 'frontend/tests' + elif first_file_name.endswith('.rs'): + return "test-case-generation", "test-case-struct-return", 'rust/tests' + else: + LOG.error(f"Unsupported file type for: {first_file_name}") + return None, None, None + + +async def get_chat_response(messages: list) -> str: chat_completion_kwargs = { "messages": messages, "model": "gpt-3.5-turbo", } - chat_response = await chat_completion_request(**chat_completion_kwargs) - response_content = chat_response["choices"][0]["message"]["content"] + return chat_response["choices"][0]["message"]["content"] - LOG.info(f"Response content: {response_content}") +def get_project_path(agent: Agent, task_id: str, folder_name: str) -> str: base_path = agent.workspace.base_path if isinstance( agent.workspace, LocalWorkspace) else str(agent.workspace.base_path) - project_path = os.path.join(base_path, task_id) + return os.path.join(base_path, task_id, folder_name) - try: - test_cases = parse_test_cases_response(response_content) - except Exception as e: - LOG.error(f"Error parsing test cases response: {e}") - return "Failed to generate test cases due to response parsing error." +async def write_test_cases(agent: Agent, task_id: str, project_path: str, test_cases: TestCase): for file_name, test_case in test_cases.items(): - test_file_path = os.path.join(project_path, 'tests', file_name) - await agent.abilities.run_action( - task_id, "write_file", file_path=test_file_path, data=test_case.encode() - ) + test_file_path = os.path.join(project_path, file_name) + await agent.abilities.run_action(task_id, "write_file", file_path=test_file_path, data=test_case.encode()) - return "Test cases generated and written to respective files." - -def sanitize_json_string(json_string: str) -> str: - # Replace newlines and tabs with escaped versions - sanitized_string = json_string.replace( - '\n', '\\n').replace('\t', '\\t').replace(' ', '\\t') - return sanitized_string - - -def parse_test_cases_response(response_content: str) -> TestCaseType: +def parse_test_cases_response(response_content: str) -> TestCase: try: - # Extract JSON part from response content json_start = response_content.index('{') json_end = response_content.rindex('}') + 1 json_content = response_content[json_start:json_end] - # Sanitize JSON content - sanitized_content = sanitize_json_string(json_content) - - # Load JSON content - response_dict = json.loads(sanitized_content) + LOG.info(f"JSON Content: {json_content}") + response_dict = json.loads(json_content) file_name = response_dict["file_name"] - test_file = response_dict["test_file"] - - # Unescape newlines and tabs in test_file - test_file = test_file.replace('\\n', '\n').replace( - '\\t', '\t').strip().strip('"') + test_file = response_dict["test_file"].replace( + '\\n', '\n').replace('\\t', '\t').strip().strip('"') - test_cases = {file_name: test_file} - return test_cases + return TestCase({file_name: test_file}) except (json.JSONDecodeError, ValueError) as e: LOG.error(f"Error decoding JSON response: {e}") raise @@ -373,4 +338,14 @@ def parse_response_content(response_content: str) -> dict: return parts - +def parse_test_cases_response(response_content: str) -> TestCaseType: + # Correctly parse the JSON response content by escaping control characters + try: + response_dict = json.loads(response_content) + file_name = response_dict["file_name"] + test_file = response_dict["test_file"] + test_cases = {file_name: test_file} + return test_cases + except json.JSONDecodeError as e: + LOG.error(f"Error decoding JSON response: {e}") + raise diff --git a/autogpts/SoloAgent/forge/actions/code_gen/models.py b/autogpts/SoloAgent/forge/actions/code_gen/models.py new file mode 100644 index 000000000000..d881b64473f0 --- /dev/null +++ b/autogpts/SoloAgent/forge/actions/code_gen/models.py @@ -0,0 +1,31 @@ + +from typing import Dict +from dataclasses import dataclass, field + + +@dataclass +class Code: + code_files: Dict[str, str] = field(default_factory=dict) + + def __getitem__(self, item: str) -> str: + return self.code_files[item] + + def __setitem__(self, key: str, value: str) -> None: + self.code_files[key] = value + + def items(self): + return self.code_files.items() + + +@dataclass +class TestCase: + test_cases: Dict[str, str] = field(default_factory=dict) + + def __getitem__(self, item: str) -> str: + return self.test_cases[item] + + def __setitem__(self, key: str, value: str) -> None: + self.test_cases[key] = value + + def items(self): + return self.test_cases.items() diff --git a/autogpts/SoloAgent/forge/actions/diagram_gen/diagram_gen.py b/autogpts/SoloAgent/forge/actions/diagram_gen/diagram_gen.py index 29e7c52ab410..91f086cd91d8 100644 --- a/autogpts/SoloAgent/forge/actions/diagram_gen/diagram_gen.py +++ b/autogpts/SoloAgent/forge/actions/diagram_gen/diagram_gen.py @@ -1,12 +1,15 @@ from __future__ import annotations from ..registry import action -from forge.sdk import ForgeLogger, PromptEngine +from forge.sdk import ForgeLogger, PromptEngine, Agent import requests import os +from forge.actions.code_gen.models import Code +from forge.llm import chat_completion_request LOG = ForgeLogger(__name__) API_KEY = os.getenv("ERASERIO_API_KEY") + @action( name="gen-diagram-eraserio", description="Generate a code diagram using eraser.io", @@ -20,40 +23,87 @@ { "name": "code", "description": "Code generated from the specification", - "type": "string", + "type": "Code object", "required": False } ], output_type="str" ) -async def generate_diagram(agent, task_id: str, specification: str, code: str) -> str: - prompt_engine = PromptEngine("gpt-3.5-turbo") - diagram_prompt = prompt_engine.load_prompt("diagram-prompt", specification=specification, code=code) - - url = "https://app.eraser.io/api/render/prompt" - - payload = { - "text": diagram_prompt, - "diagramType": "sequence-diagram", - "background": True, - "theme": "light", - "scale": "1", - "returnFile": True - } - headers = { - "accept": "application/json", - "content-type": "application/json", - "Authorization": f"Bearer {API_KEY}" - } - - response = requests.post(url, json=payload, headers=headers) - - if response.status_code == 200: +async def generate_architecture_diagram(agent: Agent, task_id: str, specification: str, code: Code) -> str: + try: + prompt_engine = PromptEngine("gpt-3.5-turbo") + diagram_prompt = prompt_engine.load_prompt( + "diagram-prompt", specification=specification, code=code + ) + + url = "https://app.eraser.io/api/render/prompt" + + payload = { + "text": diagram_prompt, + "diagramType": "sequence-diagram", + "background": True, + "theme": "light", + "scale": "1", + "returnFile": True + } + headers = { + "accept": "application/json", + "content-type": "application/json", + "Authorization": f"Bearer {API_KEY}" + } + + response = requests.post(url, json=payload, headers=headers) + response.raise_for_status() + result = response.json() LOG.info(f"Diagram generated successfully: {result['fileUrl']}") return result['fileUrl'] - else: - LOG.error(f"Error generating diagram: {response.text}") + except requests.RequestException as e: + LOG.error(f"Error generating diagram: {e}") return "Failed to generate diagram." +@action( + name="gen-use-case-diagram", + description="Generate a use case diagram from analyzing the code.", + parameters=[ + { + "name": "code", + "description": "A dictionary containing filenames and code for a codebase.", + "type": "Code object", + "required": True + }, + + { + "name": "specification", + "description": "Specification of the project.", + "type": "str", + "required": True + } + ], + output_type="str" +) +async def generate_use_case_diagram(agent: Agent, task_id: str, code: Code, specification: str) -> str: + try: + prompt_engine = PromptEngine("gpt-3.5-turbo") + usecase_diagram_template = prompt_engine.load_prompt( + "use-case-diagram-gen-return", code=code) + + messages = [ + {"role": "system", "content": "You are a code generation assistant specialized in generating test cases."}, + {"role": "system", "content": usecase_diagram_template} + ] + + chat_completion_kwargs = { + "messages": messages, + "model": "gpt-3.5-turbo", + } + + chat_response = await chat_completion_request(**chat_completion_kwargs) + + LOG.info(f"Response content: {chat_response}") + return chat_response['choices'][0]['message']['content'] + except Exception as e: + LOG.error(f"Error generating use case diagram: {e}") + return "Failed to generate use case diagram." + diff --git a/autogpts/SoloAgent/forge/actions/infra_gen/infra_gen.py b/autogpts/SoloAgent/forge/actions/infra_gen/infra_gen.py new file mode 100644 index 000000000000..a1c462deeab9 --- /dev/null +++ b/autogpts/SoloAgent/forge/actions/infra_gen/infra_gen.py @@ -0,0 +1,54 @@ +from __future__ import annotations +from ..registry import action +from forge.sdk import ForgeLogger, Agent +import os +import subprocess +from typing import Dict + +LOG = ForgeLogger(__name__) + + +@action( + name="deploy_code", + description="Deploy the Anchor project to Solana Devnet", + parameters=[ + { + "name": "project_path", + "description": "Path to the Anchor project directory", + "type": "string", + "required": True, + } + ], + output_type="str", +) +async def deploy_code(agent: Agent, task_id: str, project_path: str) -> str: + try: + # Ensure that the Solana CLI is installed and configured to use the devnet + subprocess.run(['solana', 'config', 'set', '--url', + 'https://api.devnet.solana.com'], check=True) + subprocess.run(['solana', 'config', 'set', '--keypair', + os.path.expanduser('~/.config/solana/id.json')], check=True) + + # Build the Anchor project + result = subprocess.run( + ['anchor', 'build'], cwd=project_path, capture_output=True, text=True) + if result.returncode != 0: + LOG.error(f"Build failed with errors: {result.stderr}") + return f"Build failed: {result.stderr}" + + # Deploy the Anchor project to Devnet + result = subprocess.run( + ['anchor', 'deploy'], cwd=project_path, capture_output=True, text=True) + if result.returncode != 0: + LOG.error(f"Deployment failed with errors: {result.stderr}") + return f"Deployment failed: {result.stderr}" + + LOG.info(f"Deployment successful: {result.stdout}") + return f"Deployment successful: {result.stdout}" + + except subprocess.CalledProcessError as e: + LOG.error(f"Error during deployment: {e}") + return f"Deployment process failed: {e}" + except Exception as e: + LOG.error(f"Unexpected error during deployment: {e}") + return f"Unexpected error: {e}" diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-anchor-toml.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-anchor-toml.j2 deleted file mode 100644 index 709bbe08aa99..000000000000 --- a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-anchor-toml.j2 +++ /dev/null @@ -1,14 +0,0 @@ -# Modify the code if needed for this {specification} - -# Anchor.toml - -[programs.localnet] -my_anchor_program = "YourProgramID" - -[provider] -cluster = "localnet" -wallet = "~/.config/solana/id.json" - -[scripts] -test = "anchor test" - diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-cargo-toml.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-cargo-toml.j2 deleted file mode 100644 index 8b9f420d7274..000000000000 --- a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-cargo-toml.j2 +++ /dev/null @@ -1,18 +0,0 @@ -# Modify the file to include functonality if needed for this {specification} - -# Cargo.toml - -[package] -name = "my_anchor_program" -version = "0.1.0" -edition = "2018" - -[dependencies] -anchor-lang = "0.18.0" - -[features] -default = ["program"] - -[workspace] -members = ["programs/*"] - diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-errors.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-errors.j2 deleted file mode 100644 index 21ed72bd9fe6..000000000000 --- a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-errors.j2 +++ /dev/null @@ -1,16 +0,0 @@ -// Modify the file to include functionality for {{ specification }} - - -// errors.rs - -// This file contains the custom errors for the Anchor program. - -use anchor_lang::prelude::*; - -#[error_code] -pub enum MyProgramError { - #[msg("An error occurred.")] - SomeError, - // Add other errors here -} - diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-instructions.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-instructions.j2 deleted file mode 100644 index 7c63d6d00b0c..000000000000 --- a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-instructions.j2 +++ /dev/null @@ -1,27 +0,0 @@ -// Modify the file if needed for this {{ specification }} - - -// anchor-instructions.rs - -// This file contains the instruction handlers for the Anchor program. - -use anchor_lang::prelude::*; -use crate::errors::MyProgramError; - -#[derive(Accounts)] -pub struct Initialize<'info> { - #[account(init, payer = user, space = 8 + 8)] - pub my_account: Account<'info, MyAccount>, - #[account(mut)] - pub user: Signer<'info>, - pub system_program: Program<'info, System>, -} - -pub fn initialize(ctx: Context) -> Result<()> { - let my_account = &mut ctx.accounts.my_account; - my_account.data = 0; - Ok(()) -} - -// Define other instructions here - diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-lib.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-lib.j2 deleted file mode 100644 index 6b0bbcab2ba9..000000000000 --- a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/anchor-lib.j2 +++ /dev/null @@ -1,28 +0,0 @@ -// Modify the file for this {{ specification }} - -// This is the main library file for the Anchor program. -// It includes the program's entrypoint and instruction handlers. - - -// anchor-lib.rs - -use anchor_lang::prelude::*; - -pub mod instructions; -pub mod errors; - -use instructions::*; - -declare_id!("{{ program_id }}"); - -#[program] -mod my_anchor_program { - use super::*; - - pub fn initialize(ctx: Context) -> Result<()> { - instructions::initialize(ctx) - } - - // Add other instruction handlers here -} - diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/diagram-prompt.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/diagram-prompt.j2 index 9d1c24d80f16..f03788487f69 100644 --- a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/diagram-prompt.j2 +++ b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/diagram-prompt.j2 @@ -1,8 +1,8 @@ -# Solana Account Diagram -# Specification: +Specification: + {{ specification }} -# Code: +Code: {{ code }} -# Based on the specification and the code craft a perfect prompt to feed into the eraser.io API LLM to generate the diagram +Based on the specification and the code craft a perfect prompt to feed into the eraser.io API LLM to generate the architecture diagram for the application for the application. diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/solana-code-generation.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/solana-code-generation.j2 deleted file mode 100644 index 9590e11ca735..000000000000 --- a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/solana-code-generation.j2 +++ /dev/null @@ -1,4 +0,0 @@ -# solana-code-generation.j2 - -# {{ specification }} - diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-generation-frontend.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-generation-frontend.j2 new file mode 100644 index 000000000000..73e4333cccb9 --- /dev/null +++ b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-generation-frontend.j2 @@ -0,0 +1,7 @@ + +Generate unit tests for the following JavaScript or TypeScript code. +File Name: {{ file_name }} + +Code: +{{ code }} + diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-generation.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-generation.j2 index 1474b0f5fa2e..eb5c72326211 100644 --- a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-generation.j2 +++ b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-generation.j2 @@ -1,5 +1,4 @@ -Generate unit tests for the following Rust code. Ensure all control characters such as \n and \t are escaped in the response. - +Generate unit tests for the following Rust code. File Name: {{ file_name }} Code: diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-struct-return-frontend.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-struct-return-frontend.j2 new file mode 100644 index 000000000000..167d8ba58ba6 --- /dev/null +++ b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-struct-return-frontend.j2 @@ -0,0 +1,17 @@ + + + +Return the tests in the following format according to JSON structure so that all problematic characters are escaped: + +{ + "file_name": "{{ "{{ file_name | replace('.js', '_test.js') | replace(\'.ts\', \'.test.ts\') }}" }}", + "test_file": "" +} + +// Example Response: + +{ + "file_name": "{{ '{{ file_name | replace(\'.js\', \'.test.js\') | replace(\'.ts\', \'.test.ts\') }}' }}", + "test_file": "const {{ module_name }} = require('./{{ file_name }}');\\n\\nconst { add, subtract } = {{ module_name }};\\n\\n test('adds 2 + 3 to equal 5', () => {\\n expect(add(2, 3)).toBe(5);\\n});\\n\\n test('adds -2 + 3 to equal 1', () => {\\n expect(add(-2, 3)).toBe(1);\\n});\\n\\n test('adds 0 + 0 to equal 0', () => {\\n expect(add(0, 0)).toBe(0);\\n});\\n\\n test('subtracts 5 - 3 to equal 2', () => {\\n expect(subtract(5, 3)).toBe(2);\\n});\\n\\n test('subtracts 3 - 5 to equal -2', () => {\\n expect(subtract(3, 5)).toBe(-2);\\n});\\n\\n test('subtracts 0 - 0 to equal 0', () => {\\n expect(subtract(0, 0)).toBe(0);\\n});" +} + diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-struct-return.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-struct-return.j2 index c2803cd0dfd3..373cc5e74220 100644 --- a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-struct-return.j2 +++ b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/test-case-struct-return.j2 @@ -1,6 +1,15 @@ -Return the tests in the following format: + +Return the tests in the following format according to JSON structure so that all problematic characters are escaped: { - "file_name": "{{ file_name | replace('.rs', '_test.rs') }}", + "file_name": "{{ "{{ file_name | replace('.rs', '_test.rs') }}" }}", "test_file": "" } + +// Example Response: +{ + "file_name": "{{ '{{ file_name | replace(\'.rs\', \'_test.rs\') }}' }}", + "test_file": "use super::*;\\n\\n#[cfg(test)]\\nmod tests {\\n use super::*;\\n\\n #[test]\\n fn test_add() {\\n assert_eq!(add(2, 3), 5);\\n assert_eq!(add(-2, 3), 1);\\n assert_eq!(add(0, 0), 0);\\n }\\n\\n #[test]\\n fn test_subtract() {\\n assert_eq!(subtract(5, 3), 2);\\n assert_eq!(subtract(3, 5), -2);\\n assert_eq!(subtract(0, 0), 0);\\n }\\n}" +} + + diff --git a/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/use-case-diagram-gen-return.j2 b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/use-case-diagram-gen-return.j2 new file mode 100644 index 000000000000..16c4429206e6 --- /dev/null +++ b/autogpts/SoloAgent/forge/prompts/gpt-3.5-turbo/use-case-diagram-gen-return.j2 @@ -0,0 +1,6 @@ + +From the code you been analyzing in the previous prompts. Return a usecase design for this application based on the code +below + +{code} +