Skip to content

Commit

Permalink
fix for human mode set to always in user proxy (#158)
Browse files Browse the repository at this point in the history
  • Loading branch information
davorrunje authored Sep 9, 2024
1 parent d41812c commit dc65f3a
Show file tree
Hide file tree
Showing 3 changed files with 95 additions and 5 deletions.
2 changes: 1 addition & 1 deletion docs/docs/en/tutorial/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ search:

---

Welcome to FastAgency! This guide will walk you through the initial setup and usage of FastAgency, a powerful tool that leverages the [AutoGen](https://autogen-ai.github.io/autogen/){target="_blank"} framework to quickly build applications. FastAgency is designed to be flexible and adaptable, and we plan to extend support to additional agentic frameworks such as [CrewAI](https://www.crewai.com/){target="_blank"} in the near future. This will provide even more options for defining workflows and integrating with various AI tools.
Welcome to FastAgency! This guide will walk you through the initial setup and usage of FastAgency, a powerful tool that leverages the AutoGen framework to quickly build applications. FastAgency is designed to be flexible and adaptable, and we plan to extend support to additional agentic frameworks such as [CrewAI](https://www.crewai.com/){target="_blank"} in the near future. This will provide even more options for defining workflows and integrating with various AI tools.

With FastAgency, you can create interactive applications using various interfaces such as a console or Mesop.

Expand Down
25 changes: 23 additions & 2 deletions fastagency/core/runtimes/autogen/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@
"stars": "\\x1b\\[32m(\\*+)\\x1b\\[0m\n",
"function_call_execution": "^\\x1b\\[35m\\n>>>>>>>> EXECUTING FUNCTION ([a-zA-Z_]+)...\\x1b\\[0m\\n$",
"response_from_calling_tool": "^\\x1b\\[32m\\*\\*\\*\\*\\* Response from calling tool \\((call_[a-zA-Z0-9_]+)\\) \\*\\*\\*\\*\\*\\x1b\\[0m\\n$",
"no_human_input_received": "^\\x1b\\[31m\\n>>>>>>>> NO HUMAN INPUT RECEIVED\\.\\x1b\\[0m$",
"user_interrupted": "^USER INTERRUPTED\\n$",
"arguments": "^Arguments: \\n(.*)\\n$",
}

Expand All @@ -70,39 +72,56 @@ class CurrentMessage:
arguments: Optional[dict[str, Any]] = None
retval: Optional[Any] = None

def process_chunk(self, chunk: str) -> bool:
def process_chunk(self, chunk: str) -> bool: # noqa: C901
if _match("end_of_message", chunk):
return True

if _match("auto_reply", chunk):
# logger.info("CurrentMessage.process_chunk(): auto_reply detected")
self.auto_reply = True
elif _match("sender_recipient", chunk):
elif _match("sender_recepient", chunk):
# logger.info("CurrentMessage.process_chunk(): sender_recepient detected")
self.sender, self.recipient = _findall("sender_recipient", chunk)
elif _match("suggested_function_call", chunk):
# logger.info("CurrentMessage.process_chunk(): suggested_function_call detected")
self.call_id, self.function_name = _findall(
"suggested_function_call", chunk
)
self.type = "suggested_function_call"
elif _match("stars", chunk):
# logger.info("CurrentMessage.process_chunk(): stars detected")
pass
elif _match("function_call_execution", chunk):
# logger.info("CurrentMessage.process_chunk(): function_call_execution detected")
self.function_name = _findall("function_call_execution", chunk) # type: ignore[assignment]
self.type = "function_call_execution"
elif _match("response_from_calling_tool", chunk):
# logger.info("CurrentMessage.process_chunk(): response_from_calling_tool detected")
self.type = "function_call_execution"
self.call_id = _findall("response_from_calling_tool", chunk) # type: ignore[assignment]
elif _match("no_human_input_received", chunk):
# logger.info("CurrentMessage.process_chunk(): no_human_input_received detected")
pass
elif _match("user_interrupted", chunk):
# logger.info("CurrentMessage.process_chunk(): user_interrupted detected")
pass
else:
if self.type == "suggested_function_call":
# logger.info("CurrentMessage.process_chunk(): parsing arguments")
arguments_json: str = _findall("arguments", chunk) # type: ignore[assignment]
self.arguments = json.loads(arguments_json)
elif self.type == "function_call_execution":
# logger.info("CurrentMessage.process_chunk(): parsing retval")
self.retval = chunk
else:
# logger.info("CurrentMessage.process_chunk(): parsing body")
self.body = chunk if self.body is None else self.body + chunk

return False

def create_message(self) -> IOMessage:
kwargs = {k: v for k, v in asdict(self).items() if v is not None}
# logger.info(f"CurrentMessage.create_message(): {kwargs=}")
return IOMessage.create(**kwargs)


Expand Down Expand Up @@ -134,13 +153,15 @@ def _process_message_chunk(self, chunk: str) -> bool:
def print(
self, *objects: Any, sep: str = " ", end: str = "\n", flush: bool = False
) -> None:
# logger.info(f"print(): {objects=}, {sep=}, {end=}, {flush=}")
body = sep.join(map(str, objects)) + end
ready_to_send = self._process_message_chunk(body)
if ready_to_send:
message = self.messages[-1]
self.io.process_message(message)

def input(self, prompt: str = "", *, password: bool = False) -> str:
# logger.info(f"input(): {prompt=}, {password=}")
message = TextInput(
sender=None, recipient=None, prompt=prompt, password=password
)
Expand Down
73 changes: 71 additions & 2 deletions tests/core/test_autogen.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
from typing import Any
from typing import Annotated, Any
from unittest.mock import MagicMock

import pytest
from autogen.agentchat import ConversableAgent
from autogen.agentchat import ConversableAgent, UserProxyAgent

from fastagency.core import Chatable, IOMessage
from fastagency.core.io.console import ConsoleIO
Expand Down Expand Up @@ -72,3 +73,71 @@ def simple_workflow(io: Chatable, initial_message: str, session_id: str) -> str:
},
)
)


class InputMock:
def __init__(self, responses: list[str]) -> None:
"""Initialize the InputMock."""
self.responses = responses
self.mock = MagicMock()

def __call__(self, *args: Any, **kwargs: Any) -> str:
self.mock(*args, **kwargs)
return self.responses.pop(0)


@pytest.mark.openai
class TestAutoGenWorkflowsWithHumanInputAlways:
@pytest.fixture
def wf(self, openai_gpt4o_mini_llm_config: dict[str, Any]) -> AutoGenWorkflows:
wf = AutoGenWorkflows()

@wf.register(
name="test_workflow",
description="Test of user proxy with human input mode set to always",
)
def workflow(io: Chatable, initial_message: str, session_id: str) -> str:
user_proxy = UserProxyAgent(
name="User_Proxy",
human_input_mode="ALWAYS",
)
assistant = ConversableAgent(
name="Teacher_Agent",
system_message="You are a math teacher.",
llm_config=openai_gpt4o_mini_llm_config,
)

@user_proxy.register_for_execution() # type: ignore[misc]
@assistant.register_for_llm(description="Get weather information") # type: ignore[misc]
def get_weather_info(
city: Annotated[
str, "city for which the weather information is requested"
],
) -> str:
return "The weather in Zagreb right now is heavy rain."

chat_result = user_proxy.initiate_chat(
assistant,
message=initial_message,
summary_method="reflection_with_llm",
max_turns=5,
)

return chat_result.summary # type: ignore[no-any-return]

return wf

@pytest.mark.parametrize("response", ["", "no"])
def test(
self, wf: AutoGenWorkflows, response: str, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setattr("builtins.input", InputMock([response] * 5))

result = wf.run(
name="test_workflow",
session_id="session_id",
io=ConsoleIO(),
initial_message="What is the weather in Zagreb right now?",
)

assert result is not None

0 comments on commit dc65f3a

Please sign in to comment.