Skip to content

Commit

Permalink
Merge pull request #8 from botirk38/feat/workingversion
Browse files Browse the repository at this point in the history
Feat/workingversion
  • Loading branch information
botirk38 authored Jul 4, 2024
2 parents cfffa9f + 97ea2d9 commit e2e7e14
Show file tree
Hide file tree
Showing 4 changed files with 374 additions and 6 deletions.
4 changes: 0 additions & 4 deletions autogpts/SoloAgent/agbenchmark_config/config.json

This file was deleted.

241 changes: 239 additions & 2 deletions autogpts/SoloAgent/forge/actions/code_gen/code_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,14 @@
import os
import subprocess
import json
from typing import Dict
from .models import Code, TestCase

LOG = ForgeLogger(__name__)


CodeType = Dict[str, str]
TestCaseType = Dict[str, str]


@action(
name="test_code",
description="Test the generated code for errors",
Expand Down Expand Up @@ -41,6 +43,198 @@ async def test_code(agent: Agent, task_id: str, project_path: str) -> str:
return f"Failed to test code: {e}"


@action(
name="generate_solana_code",
description="Generate Solana on-chain code using Anchor based on the provided specification",
parameters=[
{
"name": "specification",
"description": "Code specification",
"type": "string",
"required": True,
}
],
output_type="str",
)
async def generate_solana_code(agent: Agent, task_id: str, specification: str) -> str:
global ERROR_INFO

prompt_engine = PromptEngine("gpt-4o")
lib_prompt = prompt_engine.load_prompt("anchor-lib", specification=specification, error_info=ERROR_INFO)
instructions_prompt = prompt_engine.load_prompt("anchor-instructions", specification=specification, error_info=ERROR_INFO)
errors_prompt = prompt_engine.load_prompt("anchor-errors", specification=specification, error_info=ERROR_INFO)
cargo_toml_prompt = prompt_engine.load_prompt("anchor-cargo-toml", specification=specification, error_info=ERROR_INFO)
anchor_toml_prompt = prompt_engine.load_prompt("anchor-anchor-toml", specification=specification, error_info=ERROR_INFO)

messages = [
{"role": "system", "content": "You are a code generation assistant specialized in Anchor for Solana."},
{"role": "user", "content": lib_prompt},
{"role": "user", "content": instructions_prompt},
{"role": "user", "content": errors_prompt},
{"role": "user", "content": cargo_toml_prompt},
{"role": "user", "content": anchor_toml_prompt},
{"role": "user", "content": "Return the whole code as a string with the file markers intact that you received in each of the input without changing their wording at all and use *filename* before each file's content."}
]

chat_completion_kwargs = {
"messages": messages,
"model": "gpt-3.5-turbo",
}

chat_response = await chat_completion_request(**chat_completion_kwargs)
response_content = chat_response["choices"][0]["message"]["content"]

LOG.info(f"Response content: {response_content}")

try:
parts = parse_response_content(response_content)
except Exception as e:
LOG.error(f"Error parsing response content: {e}")
return "Failed to generate Solana on-chain code due to response parsing error."

base_path = agent.workspace.base_path if isinstance(agent.workspace, LocalWorkspace) else str(agent.workspace.base_path)
project_path = os.path.join(base_path, task_id)
LOG.info(f"Base path: {base_path}")
LOG.info(f"Project path: {project_path}")

LOG.info(f"id: {task_id}")
LOG.info(f"Parts: {response_content}")

# Ensure Cargo.toml is written first
cargo_file_path = os.path.join(project_path, 'Cargo.toml')
await agent.abilities.run_action(task_id, "write_file", file_path=cargo_file_path, data=ARGO_TOML_CONTENT.encode())
LOG.info(f"Cargo.toml generated successfully.")

# Ensure Anchor.toml is written first
anchor_file_path = os.path.join(project_path, 'Anchor.toml')
await agent.abilities.run_action(task_id, "write_file", file_path=anchor_file_path, data=ANCHOR_TOML_CONTENT.encode())
LOG.info(f"Anchor.toml generated successfully.")



file_actions = [
('src/lib.rs', parts['anchor-lib.rs']),
('src/instructions.rs', parts['anchor-instructions.rs']),
('src/errors.rs', parts['errors.rs']),
]
# Check if all files have a valid .rs extension for any additional files
for file_path, file_content in parts.items():
if not file_path.endswith(".rs"):
LOG.error(f"File {file_path} does not have a valid .rs extension.")
continue

for file_path, file_content in file_actions:
full_file_path = os.path.join(project_path, file_path)

if os.path.exists(full_file_path):
LOG.info(f"{file_path} already exists. Skipping regeneration.")
else:
LOG.info(f"Generating {file_path}. Press 'y' to continue...")
if input().strip().lower() != 'y':
return f"Generation halted by user at {file_path}."

await agent.abilities.run_action(task_id, "write_file", file_path=full_file_path, data=file_content.encode())
LOG.info(f"{file_path} generated successfully.")

# Compile the generated file
compile_result = await compile_file(agent, task_id, project_path, file_path)
if "error" in compile_result.lower():
LOG.error(f"Compilation failed for {file_path}: {compile_result}")
LOG.info(f"Compilation failed for {file_path}, regenerating...")

# Update ERROR_INFO with the compilation error
ERROR_INFO = compile_result

# Regenerate only the faulty file
return await generate_solana_code(agent, task_id, specification)

test_result = await agent.abilities.run_action(task_id, "test_code", project_path=project_path)
if "All tests passed" not in test_result:
LOG.info(f"Regenerating code due to errors: {test_result}")
ERROR_INFO = test_result # Update ERROR_INFO with the test error
return await generate_solana_code(agent, task_id, specification)

return "Solana on-chain code generated, tested, and verified successfully."


async def compile_file(agent: Agent, task_id: str, project_path: str, file_path: str) -> str:
try:
result = subprocess.run(['cargo', 'check', '--release'], cwd=project_path, capture_output=True, text=True)
if result.returncode != 0:
return result.stderr
return "Compilation successful."
except Exception as e:
return f"Compilation failed: {e}"

@action(
name="generate_frontend_code",
description="Generate frontend code based on the provided specification",
parameters=[
{
"name": "specification",
"description": "Frontend code specification",
"type": "string",
"required": True,
}
],
output_type="str",
)
async def generate_frontend_code(agent, task_id: str, specification: str) -> str:
prompt_engine = PromptEngine("gpt-3.5-turbo")
index_prompt = prompt_engine.load_prompt(
"frontend-index", specification=specification)
styles_prompt = prompt_engine.load_prompt(
"frontend-styles", specification=specification)
app_prompt = prompt_engine.load_prompt(
"frontend-app", specification=specification)
package_json_prompt = prompt_engine.load_prompt(
"frontend-package-json", specification=specification)
webpack_config_prompt = prompt_engine.load_prompt(
"frontend-webpack-config", specification=specification)

messages = [
{"role": "system", "content": "You are a code generation assistant specialized in frontend development."},
{"role": "user", "content": index_prompt},
{"role": "user", "content": styles_prompt},
{"role": "user", "content": app_prompt},
{"role": "user", "content": package_json_prompt},
{"role": "user", "content": webpack_config_prompt},
]

chat_completion_kwargs = {
"messages": messages,
"model": "gpt-3.5-turbo",
}
chat_response = await chat_completion_request(**chat_completion_kwargs)
response_content = chat_response["choices"][0]["message"]["content"]

try:
parts = parse_response_content(response_content)
except Exception as e:
LOG.error(f"Error parsing response content: {e}")
return "Failed to generate Solana on-chain code due to response parsing error."

project_path = os.path.join(agent.workspace.base_path, task_id)

await agent.abilities.run_action(
task_id, "write_file", file_path=os.path.join(project_path, 'src', 'index.html'), data=parts['index.html'].encode()
)
await agent.abilities.run_action(
task_id, "write_file", file_path=os.path.join(project_path, 'src', 'styles.css'), data=parts['styles.css'].encode()
)
await agent.abilities.run_action(
task_id, "write_file", file_path=os.path.join(project_path, 'src', 'app.js'), data=parts['app.js'].encode()
)
await agent.abilities.run_action(
task_id, "write_file", file_path=os.path.join(project_path, 'package.json'), data=parts['package.json'].encode()
)
await agent.abilities.run_action(
task_id, "write_file", file_path=os.path.join(project_path, 'webpack.config.js'), data=parts['webpack.config.js'].encode()
)

return "Modular frontend code generated and written to respective files."


@action(
name="generate_unit_tests",
description="Generates unit tests for Solana code.",
Expand Down Expand Up @@ -141,3 +335,46 @@ def parse_test_cases_response(response_content: str) -> TestCase:
LOG.error(f"Error decoding JSON response: {e}")
raise


def parse_response_content(response_content: str) -> dict:
# This function will split the response content into different parts
parts = {
'anchor-lib.rs': '',
'anchor-instructions.rs': '',
'errors.rs': '',
'Cargo.toml': '',
'Anchor.toml': ''
}

current_part = None
for line in response_content.split('\n'):
if '// anchor-lib.rs' in line:
current_part = 'anchor-lib.rs'
elif '// anchor-instructions.rs' in line:
current_part = 'anchor-instructions.rs'
elif '// errors.rs' in line:
current_part = 'errors.rs'
elif '# Cargo.toml' in line:
current_part = 'Cargo.toml'
elif '# Anchor.toml' in line:
current_part = 'Anchor.toml'
elif current_part:
parts[current_part] += line + '\n'

for key in parts:
parts[key] = re.sub(r'```|rust|toml', '', parts[key]).strip()

return parts


def parse_test_cases_response(response_content: str) -> TestCaseType:
# Correctly parse the JSON response content by escaping control characters
try:
response_dict = json.loads(response_content)
file_name = response_dict["file_name"]
test_file = response_dict["test_file"]
test_cases = {file_name: test_file}
return test_cases
except json.JSONDecodeError as e:
LOG.error(f"Error decoding JSON response: {e}")
raise
123 changes: 123 additions & 0 deletions autogpts/SoloAgent/forge/actions/code_gen/singularity_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
import os
import re
from forge.sdk import Agent, LocalWorkspace


# Mock GPT response for testing
mock_response_content = """
// anchor-lib.rs
use anchor_lang::prelude::*;
pub mod instructions;
pub mod errors;
use instructions::*;
declare_id!("Fg6PaFpoGXkYsidMpWxTWt8sWAb2uZ7AcfQkwJDrsVwC");
#[program]
mod my_anchor_program {
use super::*;
pub fn initialize(ctx: Context<Initialize>) -> Result<()> {
instructions::initialize(ctx)
}
}
// anchor-instructions.rs
use anchor_lang::prelude::*;
use crate::errors::MyProgramError;
#[derive(Accounts)]
pub struct Initialize<'info> {
#[account(init, payer = user, space = 8 + 8)]
pub my_account: Account<'info, MyAccount>,
#[account(mut)]
pub user: Signer<'info>,
pub system_program: Program<'info, System>,
}
pub fn initialize(ctx: Context<Initialize>) -> Result<()> {
let my_account = &mut ctx.accounts.my_account;
my_account.data = 0;
Ok(())
}
// errors.rs
use anchor_lang::prelude::*;
#[error_code]
pub enum MyProgramError {
#[msg("An error occurred.")]
GeneralError,
}
"""

# Hardcoded Cargo.toml content
cargo_toml_content = """
[package]
name = "my_anchor_program"
version = "0.1.0"
edition = "2018"
[dependencies]
anchor-lang = "0.30.1"
"""



# Function to parse the response content
def parse_response_content(response_content: str) -> dict:
parts = {}
current_filename = None

# Regular expression to match filenames
filename_pattern = re.compile(r'^//\s*(.*\.(rs|html|js|css|json|config\.js))$')

for line in response_content.split('\n'):
# Check if the line matches the filename pattern
match = filename_pattern.match(line)
if match:
current_filename = match.group(1).strip()
parts[current_filename] = ''
elif current_filename:
parts[current_filename] += line + '\n'

# Clean up the content by removing unwanted characters and trimming whitespace
for key in parts:
parts[key] = re.sub(r'```|rust|html|js|css|json|config\.js', '', parts[key]).strip()

return parts

# Function to write files based on parsed content
def write_files(parts: dict, base_path: str):
os.makedirs(base_path, exist_ok=True)

# Write hardcoded Cargo.toml and Anchor.toml
with open(os.path.join(base_path, 'Cargo.toml'), 'w') as f:
f.write(cargo_toml_content)
print("Written Cargo.toml")

with open(os.path.join(base_path, 'Anchor.toml'), 'w') as f:
f.write(anchor_toml_content)
print("Written Anchor.toml")

# Write other files based on parsed content
for filename, content in parts.items():
file_path = os.path.join(base_path, filename)
with open(file_path, 'w') as f:
f.write(content)
print(f"Written {filename}")

# Test the functionality
def test_generate_files():
parsed_parts = parse_response_content(mock_response_content)
base_path = Agent.workspace.base_path if isinstance(
Agent.workspace, LocalWorkspace) else str(Agent.workspace.base_path)
project_path = os.path.join(base_path, task_id)
write_files(parsed_parts, base_path)
print("All files written successfully.")

# Run the test
if __name__ == "__main__":
test_generate_files()
Loading

0 comments on commit e2e7e14

Please sign in to comment.