From f0aaa9683c63e25429a19ed99152b42b70df59b8 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Fri, 13 Sep 2024 07:59:25 +0200 Subject: [PATCH] Adds multiple choice message handling to MesopUI (#198) * Multiple choice WIP(1) * Multiple choice WIP(2) * Multiple choice WIP(3) * test fix * tyest fixes * added all caps instructions for message termination * added all caps instructions for message termination --------- Co-authored-by: Davorin Rusevljan --- docs/docs/SUMMARY.md | 4 +- .../mesop/data_model/ConversationMessage.md | 11 ++ .../ui/mesop/message/consume_responses.md | 11 ++ .../handle_message.md} | 2 +- .../custom_user_interactions/main.py | 4 +- .../custom_user_interactions/main_mesop.py | 136 +++++++++++++++++ .../external_rest_apis/main_mesop.py | 60 ++++++++ fastagency/studio/models/agents/assistant.py | 2 +- fastagency/ui/mesop/base.py | 14 ++ fastagency/ui/mesop/components/inputs.py | 16 +- fastagency/ui/mesop/data_model.py | 12 +- fastagency/ui/mesop/main.py | 73 +-------- fastagency/ui/mesop/message.py | 138 +++++++++++++++--- fastagency/ui/mesop/send_prompt.py | 2 +- tests/conftest.py | 2 +- tests/studio/models/agents/test_assistant.py | 2 +- .../models/teams/test_two_agents_team.py | 3 +- 17 files changed, 392 insertions(+), 100 deletions(-) create mode 100644 docs/docs/en/api/fastagency/ui/mesop/data_model/ConversationMessage.md create mode 100644 docs/docs/en/api/fastagency/ui/mesop/message/consume_responses.md rename docs/docs/en/api/fastagency/ui/mesop/{main/on_user_feedback.md => message/handle_message.md} (70%) create mode 100644 docs/docs_src/user_guide/custom_user_interactions/main_mesop.py create mode 100644 docs/docs_src/user_guide/external_rest_apis/main_mesop.py diff --git a/docs/docs/SUMMARY.md b/docs/docs/SUMMARY.md index c14248254..6aa8b9dc3 100644 --- a/docs/docs/SUMMARY.md +++ b/docs/docs/SUMMARY.md @@ -256,17 +256,19 @@ search: - [header](api/fastagency/ui/mesop/components/ui_common/header.md) - data_model - [Conversation](api/fastagency/ui/mesop/data_model/Conversation.md) + - [ConversationMessage](api/fastagency/ui/mesop/data_model/ConversationMessage.md) - [State](api/fastagency/ui/mesop/data_model/State.md) - main - [conversation_box](api/fastagency/ui/mesop/main/conversation_box.md) - [conversation_starter_box](api/fastagency/ui/mesop/main/conversation_starter_box.md) - [get_ui](api/fastagency/ui/mesop/main/get_ui.md) - [home_page](api/fastagency/ui/mesop/main/home_page.md) - - [on_user_feedback](api/fastagency/ui/mesop/main/on_user_feedback.md) - [past_conversations_box](api/fastagency/ui/mesop/main/past_conversations_box.md) - [send_prompt](api/fastagency/ui/mesop/main/send_prompt.md) - message - [MesopGUIMessageVisitor](api/fastagency/ui/mesop/message/MesopGUIMessageVisitor.md) + - [consume_responses](api/fastagency/ui/mesop/message/consume_responses.md) + - [handle_message](api/fastagency/ui/mesop/message/handle_message.md) - [message_box](api/fastagency/ui/mesop/message/message_box.md) - send_prompt - [send_prompt_to_autogen](api/fastagency/ui/mesop/send_prompt/send_prompt_to_autogen.md) diff --git a/docs/docs/en/api/fastagency/ui/mesop/data_model/ConversationMessage.md b/docs/docs/en/api/fastagency/ui/mesop/data_model/ConversationMessage.md new file mode 100644 index 000000000..4c9c31e35 --- /dev/null +++ b/docs/docs/en/api/fastagency/ui/mesop/data_model/ConversationMessage.md @@ -0,0 +1,11 @@ +--- +# 0.5 - API +# 2 - Release +# 3 - Contributing +# 5 - Template Page +# 10 - Default +search: + boost: 0.5 +--- + +::: fastagency.ui.mesop.data_model.ConversationMessage diff --git a/docs/docs/en/api/fastagency/ui/mesop/message/consume_responses.md b/docs/docs/en/api/fastagency/ui/mesop/message/consume_responses.md new file mode 100644 index 000000000..09f6c44a9 --- /dev/null +++ b/docs/docs/en/api/fastagency/ui/mesop/message/consume_responses.md @@ -0,0 +1,11 @@ +--- +# 0.5 - API +# 2 - Release +# 3 - Contributing +# 5 - Template Page +# 10 - Default +search: + boost: 0.5 +--- + +::: fastagency.ui.mesop.message.consume_responses diff --git a/docs/docs/en/api/fastagency/ui/mesop/main/on_user_feedback.md b/docs/docs/en/api/fastagency/ui/mesop/message/handle_message.md similarity index 70% rename from docs/docs/en/api/fastagency/ui/mesop/main/on_user_feedback.md rename to docs/docs/en/api/fastagency/ui/mesop/message/handle_message.md index 5ead74b05..8b6c8763c 100644 --- a/docs/docs/en/api/fastagency/ui/mesop/main/on_user_feedback.md +++ b/docs/docs/en/api/fastagency/ui/mesop/message/handle_message.md @@ -8,4 +8,4 @@ search: boost: 0.5 --- -::: fastagency.ui.mesop.main.on_user_feedback +::: fastagency.ui.mesop.message.handle_message diff --git a/docs/docs_src/user_guide/custom_user_interactions/main.py b/docs/docs_src/user_guide/custom_user_interactions/main.py index f248450c1..e2904ea52 100644 --- a/docs/docs_src/user_guide/custom_user_interactions/main.py +++ b/docs/docs_src/user_guide/custom_user_interactions/main.py @@ -23,8 +23,8 @@ wf = AutoGenWorkflows() -@wf.register(name="exam_practice", description="Student and teacher chat") # type: ignore[type-var] -def exam_learning(ui: UI, initial_message: str, session_id: str) -> Optional[str]: +@wf.register(name="exam_practice", description="Student and teacher chat") +def exam_learning(ui: UI, initial_message: str, session_id: str) -> str: def is_termination_msg(msg: dict[str, Any]) -> bool: return msg["content"] is not None and "TERMINATE" in msg["content"] diff --git a/docs/docs_src/user_guide/custom_user_interactions/main_mesop.py b/docs/docs_src/user_guide/custom_user_interactions/main_mesop.py new file mode 100644 index 000000000..6de815dd9 --- /dev/null +++ b/docs/docs_src/user_guide/custom_user_interactions/main_mesop.py @@ -0,0 +1,136 @@ +import os +from typing import Annotated, Any, Dict, Optional + +from autogen import register_function +from autogen.agentchat import ConversableAgent + +from fastagency import FastAgency +from fastagency import UI +from fastagency.base import MultipleChoice, SystemMessage, TextInput +from fastagency.ui.mesop import MesopUI +from fastagency.runtime.autogen.base import AutoGenWorkflows + +llm_config = { + "config_list": [ + { + "model": "gpt-4o-mini", + "api_key": os.getenv("OPENAI_API_KEY"), + } + ], + "temperature": 0.0, +} + +wf = AutoGenWorkflows() + + +@wf.register(name="exam_practice", description="Student and teacher chat") +def exam_learning(ui: UI, initial_message: str, session_id: str) -> str: + + def is_termination_msg(msg: dict[str, Any]) -> bool: + return msg["content"] is not None and "TERMINATE" in msg["content"] + + student_agent = ConversableAgent( + name="Student_Agent", + system_message="You are a student writing a practice test. Your task is as follows:\n" + " 1) Retrieve exam questions by calling a function.\n" + " 2) Write a draft of proposed answers and engage in dialogue with your tutor.\n" + " 3) Once you are done with the dialogue, register the final answers by calling a function.\n" + " 4) Retrieve the final grade by calling a function.\n" + "Finally, terminate the chat by saying 'TERMINATE'.", + llm_config=llm_config, + human_input_mode="NEVER", + is_termination_msg=is_termination_msg, + ) + teacher_agent = ConversableAgent( + name="Teacher_Agent", + system_message="You are a teacher.", + llm_config=llm_config, + human_input_mode="NEVER", + is_termination_msg=is_termination_msg, + ) + + def retrieve_exam_questions( + message: Annotated[str, "Message for examiner"] + ) -> Optional[str]: + try: + msg = MultipleChoice( + sender="student", + recipient="teacher", + prompt=message, + choices=[ + "1) Mona Lisa", + "2) Innovations", + "3) Florence at the time of Leonardo", + "4) The Last Supper", + "5) Vitruvian Man", + ], + default="1) Mona Lisa" + ) + return ui.process_message(msg) + except Exception as e: + return f"retrieve_exam_questions() FAILED! {e}" + + def write_final_answers(message: Annotated[str, "Message for examiner"]) -> str: + try: + msg = SystemMessage( + sender="function call logger", + recipient="system", + message={ + "operation": "storing final answers", + "content": message, + }, + ) + ui.process_message(msg) + return "Final answers stored." + except Exception as e: + return f"write_final_answers() FAILED! {e}" + + def get_final_grade( + message: Annotated[str, "Message for examiner"] + ) -> Optional[str]: + try: + msg = MultipleChoice( + sender="student", + recipient="teacher", + prompt=message, + choices=["A", "B", "C", "D", "F"], + ) + return ui.process_message(msg) + except Exception as e: + return f"get_final_grade() FAILED! {e}" + + register_function( + retrieve_exam_questions, + caller=student_agent, + executor=teacher_agent, + name="retrieve_exam_questions", + description="Get exam questions from examiner", + ) + + register_function( + write_final_answers, + caller=student_agent, + executor=teacher_agent, + name="write_final_answers", + description="Write a final answers to exam questions to examiner, but only after discussing with the tutor first.", + ) + + register_function( + get_final_grade, + caller=student_agent, + executor=teacher_agent, + name="get_final_grade", + description="Get the final grade after submitting the answers.", + ) + + chat_result = teacher_agent.initiate_chat( + student_agent, + message=initial_message, + summary_method="reflection_with_llm", + max_turns=10, + ) + + return chat_result.summary # type: ignore[no-any-return] + + +app = FastAgency(wf=wf, ui=MesopUI()) diff --git a/docs/docs_src/user_guide/external_rest_apis/main_mesop.py b/docs/docs_src/user_guide/external_rest_apis/main_mesop.py new file mode 100644 index 000000000..4ba119b18 --- /dev/null +++ b/docs/docs_src/user_guide/external_rest_apis/main_mesop.py @@ -0,0 +1,60 @@ +import os + +from autogen import UserProxyAgent +from autogen.agentchat import ConversableAgent + +from fastagency import FastAgency +from fastagency import UI +from fastagency.ui.mesop import MesopUI +from fastagency.runtime.autogen.base import AutoGenWorkflows + +from fastagency.api.openapi import OpenAPI + + +llm_config = { + "config_list": [ + { + "model": "gpt-4o-mini", + "api_key": os.getenv("OPENAI_API_KEY"), + } + ], + "temperature": 0.0, +} + +WEATHER_OPENAPI_URL = "https://weather.tools.fastagency.ai/openapi.json" + +wf = AutoGenWorkflows() + + +@wf.register(name="simple_weather", description="Weather chat") +def weather_workflow(ui: UI, initial_message: str, session_id: str) -> str: + + weather_api = OpenAPI.create(openapi_url=WEATHER_OPENAPI_URL) + + user_agent = UserProxyAgent( + name="User_Agent", + system_message="You are a user agent", + llm_config=llm_config, + human_input_mode="NEVER", + ) + weather_agent = ConversableAgent( + name="Weather_Agent", + system_message="You are a weather agent", + llm_config=llm_config, + human_input_mode="NEVER", + ) + + weather_api.register_for_llm(weather_agent) + weather_api.register_for_execution(user_agent) + + chat_result = user_agent.initiate_chat( + weather_agent, + message=initial_message, + summary_method="reflection_with_llm", + max_turns=3, + ) + + return chat_result.summary # type: ignore[no-any-return] + + +app = FastAgency(wf=wf, ui=MesopUI()) diff --git a/fastagency/studio/models/agents/assistant.py b/fastagency/studio/models/agents/assistant.py index c6f816ed2..b323744b1 100644 --- a/fastagency/studio/models/agents/assistant.py +++ b/fastagency/studio/models/agents/assistant.py @@ -16,7 +16,7 @@ class AssistantAgent(AgentBaseModel): Field( description="The system message of the agent. This message is used to inform the agent about his role in the conversation" ), - ] = "You are a helpful assistant. After you successfully answer all questions and there are no new questions asked after your response (e.g. there is no specific direction or question asked after you give a response), terminate the chat by outputting 'TERMINATE'" + ] = "You are a helpful assistant. After you successfully answer all questions and there are no new questions asked after your response (e.g. there is no specific direction or question asked after you give a response), terminate the chat by outputting 'TERMINATE' (IMPORTANT: use all caps)" @classmethod async def create_autogen( diff --git a/fastagency/ui/mesop/base.py b/fastagency/ui/mesop/base.py index 837abe42c..53c01c432 100644 --- a/fastagency/ui/mesop/base.py +++ b/fastagency/ui/mesop/base.py @@ -221,6 +221,19 @@ def conversation_worker(ui: MesopUI, subconversation: MesopUI) -> None: }, ) ) + + # io.process_message( + # IOMessage.create( + # sender="tester", + # recipient="workflow", + # type="multiple_choice", + # prompt="Concentrate and choose correct answer. When are you going to write unit tests?", + # choices=["Today", "Tomorrow", "Never", "I already have unit tests"], + # default="Tomorrow", + # single=True, + # ) + # ) + try: result = wf.run( name=name, @@ -228,6 +241,7 @@ def conversation_worker(ui: MesopUI, subconversation: MesopUI) -> None: ui=subconversation, # type: ignore[arg-type] initial_message=initial_message, ) + except Exception as ex: ui.process_message( IOMessage.create( diff --git a/fastagency/ui/mesop/components/inputs.py b/fastagency/ui/mesop/components/inputs.py index 2f05fe81d..36269f29d 100644 --- a/fastagency/ui/mesop/components/inputs.py +++ b/fastagency/ui/mesop/components/inputs.py @@ -12,8 +12,12 @@ def _on_blur(e: me.InputBlurEvent) -> None: def input_user_feedback( - send_feedback: Callable[[me.ClickEvent], Iterator[None]], + send_feedback: Callable[[me.ClickEvent], Iterator[None]], disabled: bool = False ) -> None: + def _on_feedback_blur(e: me.InputBlurEvent) -> None: + state = me.state(State) + state.conversation.feedback = e.value + with me.box( style=me.Style( border_radius=16, @@ -26,8 +30,8 @@ def input_user_feedback( with me.box(style=me.Style(flex_grow=1)): me.native_textarea( placeholder="Provide a feedback to the team", - on_blur=_on_blur, - key="feedback", + on_blur=_on_feedback_blur, + disabled=disabled, style=me.Style( padding=me.Padding(top=16, left=16), outline="none", @@ -35,7 +39,11 @@ def input_user_feedback( border=me.Border.all(me.BorderSide(style="none")), ), ) - with me.content_button(type="icon", on_click=send_feedback): + with me.content_button( + type="icon", + on_click=send_feedback, + disabled=disabled, + ): me.icon("send") diff --git a/fastagency/ui/mesop/data_model.py b/fastagency/ui/mesop/data_model.py index f1563c3de..7110a9071 100644 --- a/fastagency/ui/mesop/data_model.py +++ b/fastagency/ui/mesop/data_model.py @@ -4,6 +4,14 @@ import mesop as me +@dataclass +class ConversationMessage: + io_message_json: str = "" + level: int = 0 + conversation_id: str = "" + feedback: list[str] = field(default_factory=list) + + @dataclass class Conversation: id: str = "" @@ -12,8 +20,7 @@ class Conversation: waiting_for_feedback: bool = False feedback: str = "" is_from_the_past: bool = False - # messages: list[ConversationMessage] = field(default_factory=list) - messages: list[str] = field(default_factory=list) + messages: list[ConversationMessage] = field(default_factory=list) fastagency: Optional[str] = None @@ -24,4 +31,3 @@ class State: conversation: Conversation past_conversations: list[Conversation] = field(default_factory=list) hide_past: bool = True - fastagency: Optional[str] = None diff --git a/fastagency/ui/mesop/main.py b/fastagency/ui/mesop/main.py index 815e432a8..b489d5a72 100644 --- a/fastagency/ui/mesop/main.py +++ b/fastagency/ui/mesop/main.py @@ -1,17 +1,14 @@ -import json from collections.abc import Iterator -from uuid import uuid4 import mesop as me -from ...base import AskingMessage, WorkflowCompleted from ...logging import get_logger -from .base import MesopMessage, MesopUI -from .components.inputs import input_prompt, input_user_feedback +from .base import MesopUI +from .components.inputs import input_prompt from .components.ui_common import header from .data_model import Conversation, State -from .message import message_box -from .send_prompt import send_prompt_to_autogen, send_user_feedback_to_autogen +from .message import consume_responses, message_box +from .send_prompt import send_prompt_to_autogen from .styles import ( CHAT_STARTER_STYLE, PAST_CHATS_HIDE_STYLE, @@ -114,37 +111,6 @@ def conversation_starter_box() -> None: input_prompt(send_prompt) -def _handle_message(state: State, message: MesopMessage) -> None: - conversation = state.conversation - messages = conversation.messages - level = message.conversation.level - conversation_id = message.conversation.id - io_message = message.io_message - message_dict = io_message.model_dump() - message_string = json.dumps( - {"level": level, "conversationId": conversation_id, "io_message": message_dict} - ) - messages.append(message_string) - conversation.messages = list(messages) - if isinstance(io_message, AskingMessage): - conversation.waiting_for_feedback = True - conversation.completed = False - if isinstance(io_message, WorkflowCompleted): - conversation.completed = True - conversation.waiting_for_feedback = False - if not conversation.is_from_the_past: - uuid: str = uuid4().hex - becomme_past = Conversation( - id=uuid, - title=conversation.title, - messages=conversation.messages, - completed=True, - is_from_the_past=True, - waiting_for_feedback=False, - ) - state.past_conversations.insert(0, becomme_past) - - def send_prompt(e: me.ClickEvent) -> Iterator[None]: ui = get_ui() wf = ui.app.wf @@ -162,13 +128,7 @@ def send_prompt(e: me.ClickEvent) -> Iterator[None]: state.in_conversation = True yield responses = send_prompt_to_autogen(prompt=prompt, wf=wf, name=name) - for message in responses: - state = me.state(State) - _handle_message(state, message) - yield - me.scroll_into_view(key="end_of_messages") - yield - yield + yield from consume_responses(responses) def conversation_box() -> None: @@ -183,30 +143,9 @@ def conversation_box() -> None: ) ): for message in messages: - message_box(message) + message_box(message, conversation.is_from_the_past) if messages: me.box( key="end_of_messages", style=me.Style(margin=me.Margin(bottom="50vh")), ) - if conversation.waiting_for_feedback: - input_user_feedback(on_user_feedback) - - -def on_user_feedback(e: me.ClickEvent) -> Iterator[None]: - state = me.state(State) - conversation = state.conversation - feedback = conversation.feedback - conversation.feedback = "" - conversation.waiting_for_feedback = False - yield - me.scroll_into_view(key="end_of_messages") - yield - responses = send_user_feedback_to_autogen(feedback) - for message in responses: - state = me.state(State) - _handle_message(state, message) - yield - me.scroll_into_view(key="end_of_messages") - yield - yield diff --git a/fastagency/ui/mesop/message.py b/fastagency/ui/mesop/message.py index e7ff75cfc..00e315425 100644 --- a/fastagency/ui/mesop/message.py +++ b/fastagency/ui/mesop/message.py @@ -1,8 +1,15 @@ import json +from collections.abc import Iterable, Iterator from typing import Optional +from uuid import uuid4 import mesop as me +from fastagency.base import AskingMessage, WorkflowCompleted +from fastagency.ui.mesop.base import MesopMessage +from fastagency.ui.mesop.components.inputs import input_user_feedback +from fastagency.ui.mesop.send_prompt import send_user_feedback_to_autogen + from ...base import ( IOMessage, IOMessageVisitor, @@ -12,28 +19,97 @@ TextMessage, ) from .components.ui_common import darken_hex_color +from .data_model import Conversation, ConversationMessage, State + +def consume_responses(responses: Iterable[MesopMessage]) -> Iterator[None]: + for message in responses: + state = me.state(State) + handle_message(state, message) + yield + me.scroll_into_view(key="end_of_messages") + yield + yield -def message_box(message: str) -> None: - message_dict = json.loads(message) - level = message_dict["level"] - conversation_id = message_dict["conversationId"] - io_message_dict = message_dict["io_message"] + +def message_box(message: ConversationMessage, read_only: bool) -> None: + io_message_dict = json.loads(message.io_message_json) + level = message.level + conversation_id = message.conversation_id io_message = IOMessage.create(**io_message_dict) - visitor = MesopGUIMessageVisitor(level, conversation_id) + visitor = MesopGUIMessageVisitor(level, conversation_id, message, read_only) visitor.process_message(io_message) +def handle_message(state: State, message: MesopMessage) -> None: + conversation = state.conversation + messages = conversation.messages + level = message.conversation.level + conversation_id = message.conversation.id + io_message = message.io_message + message_dict = io_message.model_dump() + message_json = json.dumps(message_dict) + conversation_message = ConversationMessage( + level=level, + conversation_id=conversation_id, + io_message_json=message_json, + feedback=[], + ) + messages.append(conversation_message) + conversation.messages = list(messages) + if isinstance(io_message, AskingMessage): + conversation.waiting_for_feedback = True + conversation.completed = False + if isinstance(io_message, WorkflowCompleted): + conversation.completed = True + conversation.waiting_for_feedback = False + if not conversation.is_from_the_past: + uuid: str = uuid4().hex + becomme_past = Conversation( + id=uuid, + title=conversation.title, + messages=conversation.messages, + completed=True, + is_from_the_past=True, + waiting_for_feedback=False, + ) + state.past_conversations.insert(0, becomme_past) + + class MesopGUIMessageVisitor(IOMessageVisitor): - def __init__(self, level: int, conversation_id: str) -> None: + def __init__( + self, + level: int, + conversation_id: str, + conversation_message: ConversationMessage, + read_only: bool = False, + ) -> None: """Initialize the MesopGUIMessageVisitor object. Args: level (int): The level of the message. conversation_id (str): The ID of the conversation. + conversation_message (ConversationMessage): Conversation message that wraps the visited io_message + read_only (bool): Input messages are disabled in read only mode """ self._level = level self._conversation_id = conversation_id + self._readonly = read_only + self._conversation_message = conversation_message + + def _has_feedback(self) -> bool: + return len(self._conversation_message.feedback) > 0 + + def _provide_feedback(self, feedback: str) -> Iterator[None]: + state = me.state(State) + conversation = state.conversation + conversation.feedback = "" + conversation.waiting_for_feedback = False + yield + me.scroll_into_view(key="end_of_messages") + yield + responses = send_user_feedback_to_autogen(feedback) + yield from consume_responses(responses) def visit_default(self, message: IOMessage) -> None: base_color = "#aff" @@ -75,12 +151,18 @@ def visit_system_message(self, message: SystemMessage) -> None: me.markdown(json.dumps(message.message, indent=2)) def visit_text_input(self, message: TextInput) -> str: - text = message.prompt if message.prompt else "Please enter a value" + def on_input(ev: me.RadioChangeEvent) -> Iterator[None]: + state = me.state(State) + feedback = state.conversation.feedback + self._conversation_message.feedback = [feedback] + yield from self._provide_feedback(feedback) + + base_color = "#dff" + prompt = message.prompt if message.prompt else "Please enter a value" if message.suggestions: suggestions = ",".join(suggestion for suggestion in message.suggestions) - text += "\n" + suggestions + prompt += "\n" + suggestions - base_color = "#bff" with me.box( style=me.Style( background=base_color, @@ -90,17 +172,32 @@ def visit_text_input(self, message: TextInput) -> str: ) ): self._header(message, base_color, title="Input requested") - me.markdown(text) + me.markdown(prompt) + input_user_feedback( + on_input, disabled=self._readonly or self._has_feedback() + ) return "" def visit_multiple_choice(self, message: MultipleChoice) -> str: - text = message.prompt if message.prompt else "Please enter a value" + def on_change(ev: me.RadioChangeEvent) -> Iterator[None]: + feedback = ev.value + self._conversation_message.feedback = [feedback] + yield from self._provide_feedback(feedback) + + base_color = "#dff" + prompt = message.prompt if message.prompt else "Please enter a value" if message.choices: - options = ",".join( - f"{i+1}. {choice}" for i, choice in enumerate(message.choices) + options = ( + me.RadioOption( + label=(choice if choice != message.default else choice + " *"), + value=choice, + ) + for choice in message.choices ) - text += "\n" + options - base_color = "#cff" + if self._has_feedback(): + pre_selected = {"value": self._conversation_message.feedback[0]} + else: + pre_selected = {} with me.box( style=me.Style( background=base_color, @@ -110,7 +207,14 @@ def visit_multiple_choice(self, message: MultipleChoice) -> str: ) ): self._header(message, base_color, title="Input requested") - me.markdown(text) + me.text(prompt) + me.radio( + on_change=on_change, + disabled=self._readonly or self._has_feedback(), + options=options, + style=me.Style(display="flex", flex_direction="column"), + **pre_selected, + ) return "" def process_message(self, message: IOMessage) -> Optional[str]: diff --git a/fastagency/ui/mesop/send_prompt.py b/fastagency/ui/mesop/send_prompt.py index e21961f2e..be22f02e4 100644 --- a/fastagency/ui/mesop/send_prompt.py +++ b/fastagency/ui/mesop/send_prompt.py @@ -18,7 +18,7 @@ def send_prompt_to_autogen( def send_user_feedback_to_autogen(user_response: str) -> Iterable[MesopMessage]: state = me.state(State) - mesop_id = state.fastagency + mesop_id = state.conversation.fastagency mesop_io = MesopUI.get_conversation(mesop_id) mesop_io.respond(user_response) return mesop_io.get_message_stream() diff --git a/tests/conftest.py b/tests/conftest.py index 6c7164428..b812a3697 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -619,7 +619,7 @@ async def placeholder_assistant_weatherapi_ref( name=add_random_suffix("assistant_weather"), llm=llm_ref, toolbox_1=weather_toolbox_ref, - system_message="You are a helpful assistant with access to Weather API. After you successfully answer the question asked and there are no new questions, terminate the chat by outputting 'TERMINATE' (all caps)", + system_message="You are a helpful assistant with access to Weather API. After you successfully answer the question asked and there are no new questions, terminate the chat by outputting 'TERMINATE' (in all caps, e.g.'Terminate' will be ignored).", ) diff --git a/tests/studio/models/agents/test_assistant.py b/tests/studio/models/agents/test_assistant.py index ba3a6c9dd..fdba77802 100644 --- a/tests/studio/models/agents/test_assistant.py +++ b/tests/studio/models/agents/test_assistant.py @@ -209,7 +209,7 @@ def test_assistant_model_schema(self) -> None: "title": "Toolbox", }, "system_message": { - "default": "You are a helpful assistant. After you successfully answer all questions and there are no new questions asked after your response (e.g. there is no specific direction or question asked after you give a response), terminate the chat by outputting 'TERMINATE'", + "default": "You are a helpful assistant. After you successfully answer all questions and there are no new questions asked after your response (e.g. there is no specific direction or question asked after you give a response), terminate the chat by outputting 'TERMINATE' (IMPORTANT: use all caps)", "description": "The system message of the agent. This message is used to inform the agent about his role in the conversation", "title": "System Message", "type": "string", diff --git a/tests/studio/models/teams/test_two_agents_team.py b/tests/studio/models/teams/test_two_agents_team.py index 67e181f1b..5a8689a32 100644 --- a/tests/studio/models/teams/test_two_agents_team.py +++ b/tests/studio/models/teams/test_two_agents_team.py @@ -204,7 +204,8 @@ async def test_simple_chat( assert ag_team history = ag_team.initiate_chat("What is 2 + 2?") - assert sum(["TERMINATE" in msg["content"] for msg in history.chat_history]) == 1 + messages = (msg["content"] for msg in history.chat_history) + assert sum("TERMINATE" in msg for msg in messages) == 1 @pytest.mark.asyncio @parametrize_fixtures("team_ref", get_by_tag("team", "weather"))