From 0b5a6c1b76012b22f17a9b0010d51769eb600632 Mon Sep 17 00:00:00 2001 From: Mark Sze Date: Thu, 14 Nov 2024 05:05:10 +0000 Subject: [PATCH 01/10] Transfer over to AG2 --- autogen/agentchat/groupchat.py | 49 +++++++++++++- autogen/agentchat/swarm/__init__.py | 2 + autogen/agentchat/swarm/swarm_agent.py | 90 ++++++++++++++++++++++++++ autogen/agentchat/swarm/swarm_core.py | 4 ++ 4 files changed, 142 insertions(+), 3 deletions(-) create mode 100644 autogen/agentchat/swarm/__init__.py create mode 100644 autogen/agentchat/swarm/swarm_agent.py create mode 100644 autogen/agentchat/swarm/swarm_core.py diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py index 4e9e107f92..9630dda9e5 100644 --- a/autogen/agentchat/groupchat.py +++ b/autogen/agentchat/groupchat.py @@ -23,6 +23,7 @@ from .agent import Agent from .contrib.capabilities import transform_messages from .conversable_agent import ConversableAgent +from .swarm import SwarmAgent logger = logging.getLogger(__name__) @@ -68,6 +69,7 @@ class GroupChat: - "manual": the next speaker is selected manually by user input. - "random": the next speaker is selected randomly. - "round_robin": the next speaker is selected in a round robin fashion, i.e., iterating in the same order as provided in `agents`. + - "swarm": utilises the swarm pattern, where agents continue to speak until they handoff to another agent. - a customized speaker selection function (Callable): the function will be called to select the next speaker. The function should take the last speaker and the group chat as input and return one of the following: 1. an `Agent` class, it must be one of the agents in the group chat. @@ -116,7 +118,7 @@ def custom_speaker_selection_func( max_round: int = 10 admin_name: str = "Admin" func_call_filter: bool = True - speaker_selection_method: Union[Literal["auto", "manual", "random", "round_robin"], Callable] = "auto" + speaker_selection_method: Union[Literal["auto", "manual", "random", "round_robin", "swarm"], Callable] = "auto" max_retries_for_selecting_speaker: int = 2 allow_repeat_speaker: Optional[Union[bool, List[Agent]]] = None allowed_or_disallowed_speaker_transitions: Optional[Dict] = None @@ -149,7 +151,7 @@ def custom_speaker_selection_func( select_speaker_auto_llm_config: Optional[Union[Dict, Literal[False]]] = None role_for_select_speaker_messages: Optional[str] = "system" - _VALID_SPEAKER_SELECTION_METHODS = ["auto", "manual", "random", "round_robin"] + _VALID_SPEAKER_SELECTION_METHODS = ["auto", "manual", "random", "round_robin", "swarm"] _VALID_SPEAKER_TRANSITIONS_TYPE = ["allowed", "disallowed", None] # Define a class attribute for the default introduction message @@ -276,6 +278,10 @@ def __post_init__(self): if self.select_speaker_auto_verbose is None or not isinstance(self.select_speaker_auto_verbose, bool): raise ValueError("select_speaker_auto_verbose cannot be None or non-bool") + # Ensure, for swarms, all agents are swarm agents + if self.speaker_selection_method == "swarm" and not all(isinstance(agent, SwarmAgent) for agent in self.agents): + raise ValueError("All agents must be of type SwarmAgent when using the 'swarm' speaker selection method.") + @property def agent_names(self) -> List[str]: """Return the names of the agents in the group chat.""" @@ -419,6 +425,32 @@ def random_select_speaker(self, agents: Optional[List[Agent]] = None) -> Union[A agents = self.agents return random.choice(agents) + def swarm_select_speaker(self, last_speaker: Agent, agents: Optional[List[Agent]] = None) -> Union[Agent, None]: + messages = self.messages + + # TODO TODO TODO + + # Always start with the first speaker + if len(messages) <= 1: + return last_speaker + + # If the last message is a tool call, the last agent should execute it + if "tool_calls" in messages[-1]: + return last_speaker # If it's a tool_call then the agent executes it + + # If the last message is a tool response, check if the tool response is the name of the next agent + # Otherwise return the last agent before the tool call + if "tool_responses" in messages[-1]: + tool_call_msg = messages[-1].get("content", "") + if self.agent_by_name(name=tool_call_msg): + return self.agent_by_name(name=messages[-1].get("content", "")) + return self.agent_by_name(name=messages[-2].get("name", "")) + # elif last_speaker in [flight_modification, flight_cancel, flight_change, lost_baggage, triage_agent]: + # return user + + else: + return self.agent_by_name(name=messages[-2].get("name", "")) + def _prepare_and_select_agents( self, last_speaker: Agent, @@ -466,7 +498,9 @@ def _prepare_and_select_agents( f"GroupChat is underpopulated with {n_agents} agents. " "Please add more agents to the GroupChat or use direct communication instead." ) - elif n_agents == 2 and speaker_selection_method.lower() != "round_robin" and allow_repeat_speaker: + elif ( + n_agents == 2 and speaker_selection_method.lower() not in ["round_robin", "swarm"] and allow_repeat_speaker + ): logger.warning( f"GroupChat is underpopulated with {n_agents} agents. " "Consider setting speaker_selection_method to 'round_robin' or allow_repeat_speaker to False, " @@ -536,6 +570,8 @@ def _prepare_and_select_agents( selected_agent = self.next_agent(last_speaker, graph_eligible_agents) elif speaker_selection_method.lower() == "random": selected_agent = self.random_select_speaker(graph_eligible_agents) + elif speaker_selection_method.lower() == "swarm": + selected_agent = self.swarm_select_speaker(last_speaker, graph_eligible_agents) else: # auto selected_agent = None select_speaker_messages = self.messages.copy() @@ -1130,6 +1166,7 @@ def run_chat( messages: Optional[List[Dict]] = None, sender: Optional[Agent] = None, config: Optional[GroupChat] = None, + context_variables: Optional[Dict] = {}, # For Swarms ) -> Tuple[bool, Optional[str]]: """Run a group chat.""" if messages is None: @@ -1148,6 +1185,11 @@ def run_chat( # NOTE: We do not also append to groupchat.messages, # since groupchat handles its own introductions + # Swarm + if self.groupchat.speaker_selection_method == "swarm": + context_variables = copy.deepcopy(context_variables) + config.allow_repeat_speaker = True # Swarms allow the last speaker to be the next speaker + if self.client_cache is not None: for a in groupchat.agents: a.previous_cache = a.client_cache @@ -1210,6 +1252,7 @@ async def a_run_chat( messages: Optional[List[Dict]] = None, sender: Optional[Agent] = None, config: Optional[GroupChat] = None, + context_variables: Optional[Dict] = {}, # For Swarms ): """Run a group chat asynchronously.""" if messages is None: diff --git a/autogen/agentchat/swarm/__init__.py b/autogen/agentchat/swarm/__init__.py new file mode 100644 index 0000000000..32bc33e26e --- /dev/null +++ b/autogen/agentchat/swarm/__init__.py @@ -0,0 +1,2 @@ +from .swarm_agent import * +from .swarm_core import * diff --git a/autogen/agentchat/swarm/swarm_agent.py b/autogen/agentchat/swarm/swarm_agent.py new file mode 100644 index 0000000000..f0a614e164 --- /dev/null +++ b/autogen/agentchat/swarm/swarm_agent.py @@ -0,0 +1,90 @@ +import json +import os +from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Type, TypeVar, Union + +from openai.types.chat.chat_completion import ChatCompletion + +import autogen +from autogen.agentchat import Agent, ConversableAgent +from autogen.function_utils import get_function_schema, load_basemodels_if_needed, serialize_to_str +from autogen.oai import OpenAIWrapper + + +def parse_json_object(response: str) -> dict: + return json.loads(response) + + +class SwarmAgent(ConversableAgent): + def __init__( + self, + name: str, + system_message: Optional[str] = "You are a helpful AI Assistant.", + llm_config: Optional[Union[Dict, Literal[False]]] = None, + functions: Union[List[Callable], Callable] = None, + is_termination_msg: Optional[Callable[[Dict], bool]] = None, + max_consecutive_auto_reply: Optional[int] = None, + human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "NEVER", + description: Optional[str] = None, + **kwargs, + ) -> None: + super().__init__( + name, + system_message, + is_termination_msg, + max_consecutive_auto_reply, + human_input_mode, + llm_config=llm_config, + description=description, + **kwargs, + ) + if isinstance(functions, list): + self.add_functions(functions) + elif isinstance(functions, Callable): + self.add_single_function(functions) + + self._reply_func_list.clear() + self.register_reply([Agent, None], SwarmAgent.generate_reply_with_tool_calls) + + def update_context_variables(self, context_variables: Dict[str, Any]) -> None: + pass + + # return str or any instance of BaseModel from pydantic + + def generate_reply_with_tool_calls( + self, + messages: Optional[List[Dict]] = None, + sender: Optional[Agent] = None, + config: Optional[OpenAIWrapper] = None, + ) -> Tuple[bool, Union[str, Dict, None]]: + + if messages is None: + messages = self._oai_messages[sender] + + messages = self._oai_system_message + [{"role": "user", "content": input}] + response = self.client.create(messages=messages) + + if isinstance(response, ChatCompletion): + response = self.client.extract_text_or_completion_object(response) + if isinstance(response, str): + return response + elif isinstance(response, dict): + _, func_response = self.generate_tool_calls_reply([response]) + return [response, func_response] + else: + raise ValueError("Invalid response type:", type(response)) + + def add_single_function(self, func: Callable, description=""): + func._name = func.__name__ + + if description: + func._description = description + else: + func._description = func.__doc__ + + f = get_function_schema(func, name=func._name, description=func._description) + self.update_tool_signature(f, is_remove=False) + self.register_function({func._name: self._wrap_function(func)}) + + def add_functions(self, func_list: List[Callable]): + for func in func_list: + self.add_single_function(func["func"]) diff --git a/autogen/agentchat/swarm/swarm_core.py b/autogen/agentchat/swarm/swarm_core.py new file mode 100644 index 0000000000..fb22c4e43a --- /dev/null +++ b/autogen/agentchat/swarm/swarm_core.py @@ -0,0 +1,4 @@ +# Parameter name for context variables +# Use the value in functions and they will be substituted with the context variables: +# e.g. def my_function(context_variables: Dict[str, Any], my_other_parameters: Any) -> Any: +__CONTEXT_VARIABLES_PARAM_NAME__ = "context_variables" From fb1ad59c5da196a8d5dec4f39cfef25f30580a07 Mon Sep 17 00:00:00 2001 From: Mark Sze Date: Fri, 15 Nov 2024 04:12:46 +0000 Subject: [PATCH 02/10] Added SwarmResult, start of context variables, progress on run_chat --- autogen/agentchat/groupchat.py | 79 ++++++++++++----- autogen/agentchat/swarm/__init__.py | 1 - autogen/agentchat/swarm/swarm_agent.py | 118 ++++++++++++++++++++----- autogen/agentchat/swarm/swarm_core.py | 4 - autogen/function_utils.py | 77 +++++++++++++++- 5 files changed, 228 insertions(+), 51 deletions(-) delete mode 100644 autogen/agentchat/swarm/swarm_core.py diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py index 9630dda9e5..04e42d2dbe 100644 --- a/autogen/agentchat/groupchat.py +++ b/autogen/agentchat/groupchat.py @@ -11,7 +11,7 @@ import re import sys from dataclasses import dataclass, field -from typing import Callable, Dict, List, Literal, Optional, Tuple, Union +from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union from ..code_utils import content_str from ..exception_utils import AgentNameConflict, NoEligibleSpeaker, UndefinedNextAgent @@ -23,7 +23,7 @@ from .agent import Agent from .contrib.capabilities import transform_messages from .conversable_agent import ConversableAgent -from .swarm import SwarmAgent +from .swarm import SwarmAgent, SwarmResult logger = logging.getLogger(__name__) @@ -111,6 +111,7 @@ def custom_speaker_selection_func( - select_speaker_auto_model_client_cls: Custom model client class for the internal speaker select agent used during 'auto' speaker selection (optional) - select_speaker_auto_llm_config: LLM config for the internal speaker select agent used during 'auto' speaker selection (optional) - role_for_select_speaker_messages: sets the role name for speaker selection when in 'auto' mode, typically 'user' or 'system'. (default: 'system') + - context_variables: dictionary of context variables for use with swarm-based group chats """ agents: List[Agent] @@ -150,6 +151,7 @@ def custom_speaker_selection_func( select_speaker_auto_model_client_cls: Optional[Union[ModelClient, List[ModelClient]]] = None select_speaker_auto_llm_config: Optional[Union[Dict, Literal[False]]] = None role_for_select_speaker_messages: Optional[str] = "system" + context_variables: Optional[Dict] = None _VALID_SPEAKER_SELECTION_METHODS = ["auto", "manual", "random", "round_robin", "swarm"] _VALID_SPEAKER_TRANSITIONS_TYPE = ["allowed", "disallowed", None] @@ -279,8 +281,13 @@ def __post_init__(self): raise ValueError("select_speaker_auto_verbose cannot be None or non-bool") # Ensure, for swarms, all agents are swarm agents - if self.speaker_selection_method == "swarm" and not all(isinstance(agent, SwarmAgent) for agent in self.agents): - raise ValueError("All agents must be of type SwarmAgent when using the 'swarm' speaker selection method.") + if self.speaker_selection_method == "swarm": + """MS TEMP REMOVE + if not all(isinstance(agent, SwarmAgent) for agent in self.agents): + raise ValueError("All agents must be of type SwarmAgent when using the 'swarm' speaker selection method.") + """ + if not isinstance(self.context_variables, dict): + self.context_variables = {} @property def agent_names(self) -> List[str]: @@ -426,30 +433,23 @@ def random_select_speaker(self, agents: Optional[List[Agent]] = None) -> Union[A return random.choice(agents) def swarm_select_speaker(self, last_speaker: Agent, agents: Optional[List[Agent]] = None) -> Union[Agent, None]: + """Select the next speaker using the swarm pattern. Note that this does not need to cater for when the agent is continuing to speak.""" messages = self.messages - # TODO TODO TODO - # Always start with the first speaker if len(messages) <= 1: return last_speaker - # If the last message is a tool call, the last agent should execute it - if "tool_calls" in messages[-1]: - return last_speaker # If it's a tool_call then the agent executes it + last_message = messages[-1] - # If the last message is a tool response, check if the tool response is the name of the next agent - # Otherwise return the last agent before the tool call - if "tool_responses" in messages[-1]: - tool_call_msg = messages[-1].get("content", "") - if self.agent_by_name(name=tool_call_msg): - return self.agent_by_name(name=messages[-1].get("content", "")) - return self.agent_by_name(name=messages[-2].get("name", "")) - # elif last_speaker in [flight_modification, flight_cancel, flight_change, lost_baggage, triage_agent]: - # return user + # If the last message is a TRANSFER message, extract agent name and return them + if "content" in last_message and last_message["content"].startswith("TRANSFER:"): + agent_name = last_message["content"].split(":")[1].strip() + if self.agent_by_name(name=agent_name): + return self.agent_by_name(agent_name) - else: - return self.agent_by_name(name=messages[-2].get("name", "")) + # Otherwise, return the agent before the previous one + return self.agent_by_name(name=messages[-2].get("name", "")) def _prepare_and_select_agents( self, @@ -1166,13 +1166,13 @@ def run_chat( messages: Optional[List[Dict]] = None, sender: Optional[Agent] = None, config: Optional[GroupChat] = None, - context_variables: Optional[Dict] = {}, # For Swarms ) -> Tuple[bool, Optional[str]]: """Run a group chat.""" if messages is None: messages = self._oai_messages[sender] message = messages[-1] speaker = sender + next_speaker = None # The next swarm agent to speak, determined by the current swarm agent groupchat = config send_introductions = getattr(groupchat, "send_introductions", False) silent = getattr(self, "_silent", False) @@ -1187,7 +1187,6 @@ def run_chat( # Swarm if self.groupchat.speaker_selection_method == "swarm": - context_variables = copy.deepcopy(context_variables) config.allow_repeat_speaker = True # Swarms allow the last speaker to be the next speaker if self.client_cache is not None: @@ -1205,13 +1204,45 @@ def run_chat( # The conversation is over or it's the last round break try: - # select the next speaker - speaker = groupchat.select_speaker(speaker, self) + if next_speaker: + # Speaker has already been selected (swarm) + speaker = next_speaker + next_speaker = None + else: + speaker = groupchat.select_speaker(speaker, self) + if not silent: iostream = IOStream.get_default() iostream.print(colored(f"\nNext speaker: {speaker.name}\n", "green"), flush=True) + + # Update the context_variables on the agent + if self.groupchat.speaker_selection_method == "swarm" and isinstance(speaker, SwarmAgent): + speaker.context_variables.update(groupchat.context_variables) + # let the speaker speak reply = speaker.generate_reply(sender=self) + + # If we have a swarm reply, update context variables + if isinstance(reply, SwarmResult): + if reply.context_variables: + self.groupchat.context_variables.update(reply.context_variables) + + reply_value = "\n".join(reply.values) + + if reply.next_agent is not None: + next_speaker = groupchat.agent_by_name(reply.next_agent) + else: + # If there are multiple replies, it indicates multiple tool calls + # In this case we will see if any of the replies contains an agent Transfer and set the reply to that + if len(reply.values) > 1: + for content in reply.values: + if content in groupchat.agent_names: + reply_value = content + break + + # Replaces the swarm result with string value + reply = reply_value + except KeyboardInterrupt: # let the admin agent speak if interrupted if groupchat.admin_name in groupchat.agent_names: diff --git a/autogen/agentchat/swarm/__init__.py b/autogen/agentchat/swarm/__init__.py index 32bc33e26e..95d4aab864 100644 --- a/autogen/agentchat/swarm/__init__.py +++ b/autogen/agentchat/swarm/__init__.py @@ -1,2 +1 @@ from .swarm_agent import * -from .swarm_core import * diff --git a/autogen/agentchat/swarm/swarm_agent.py b/autogen/agentchat/swarm/swarm_agent.py index f0a614e164..19fa00686b 100644 --- a/autogen/agentchat/swarm/swarm_agent.py +++ b/autogen/agentchat/swarm/swarm_agent.py @@ -1,12 +1,12 @@ import json -import os -from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Type, TypeVar, Union +from inspect import signature +from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union from openai.types.chat.chat_completion import ChatCompletion +from pydantic import BaseModel -import autogen from autogen.agentchat import Agent, ConversableAgent -from autogen.function_utils import get_function_schema, load_basemodels_if_needed, serialize_to_str +from autogen.function_utils import get_function_schema, remove_parameter_from_function_schema from autogen.oai import OpenAIWrapper @@ -14,6 +14,37 @@ def parse_json_object(response: str) -> dict: return json.loads(response) +# Parameter name for context variables +# Use the value in functions and they will be substituted with the context variables: +# e.g. def my_function(context_variables: Dict[str, Any], my_other_parameters: Any) -> Any: +__CONTEXT_VARIABLES_PARAM_NAME__ = "context_variables" + + +class SwarmResult: + """ + Encapsulates the possible return values for a swarm agent function. + + arguments: + values (str): The result values as a string. Can be many due to multiple tool calls. + agent (SwarmAgent): The swarm agent instance, if applicable. + context_variables (dict): A dictionary of context variables. + """ + + values: List[str] = [] + agent: Optional["SwarmAgent"] = None + context_variables: dict = {} + + def __init__( + self, + values: str, # Text response, could be the next agent name as well + next_agent: str = None, # The name of the next agent if known + context_variables: dict = {}, + ) -> None: + self.values = values + self.next_agent = next_agent + self.context_variables = context_variables + + class SwarmAgent(ConversableAgent): def __init__( self, @@ -25,6 +56,7 @@ def __init__( max_consecutive_auto_reply: Optional[int] = None, human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "NEVER", description: Optional[str] = None, + context_variables: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: super().__init__( @@ -37,6 +69,7 @@ def __init__( description=description, **kwargs, ) + if isinstance(functions, list): self.add_functions(functions) elif isinstance(functions, Callable): @@ -44,32 +77,68 @@ def __init__( self._reply_func_list.clear() self.register_reply([Agent, None], SwarmAgent.generate_reply_with_tool_calls) + self.context_variables = context_variables or {} def update_context_variables(self, context_variables: Dict[str, Any]) -> None: pass - # return str or any instance of BaseModel from pydantic - def generate_reply_with_tool_calls( self, messages: Optional[List[Dict]] = None, sender: Optional[Agent] = None, config: Optional[OpenAIWrapper] = None, - ) -> Tuple[bool, Union[str, Dict, None]]: + ) -> Tuple[bool, SwarmResult]: + client = self.client if config is None else config + if client is None: + return False, None if messages is None: messages = self._oai_messages[sender] - - messages = self._oai_system_message + [{"role": "user", "content": input}] - response = self.client.create(messages=messages) - - if isinstance(response, ChatCompletion): - response = self.client.extract_text_or_completion_object(response) - if isinstance(response, str): - return response - elif isinstance(response, dict): - _, func_response = self.generate_tool_calls_reply([response]) - return [response, func_response] + response = self._generate_oai_reply_from_client(client, self._oai_system_message + messages, self.client_cache) + + if isinstance(response, str): + return True, SwarmResult( + values=[response], + next_agent=self.name, + ) + elif isinstance(response, dict): + # Tool calls, inject context_variables back in to the response before executing the tools + if "tool_calls" in response: + for tool_call in response["tool_calls"]: + if tool_call["type"] == "function": + function_name = tool_call["function"]["name"] + + # Check if this function exists in our function map + if function_name in self._function_map: + func = self._function_map[function_name] # Get the original function + + # Check if function has context_variables parameter + sig = signature(func) + needs_context = __CONTEXT_VARIABLES_PARAM_NAME__ in sig.parameters + + if needs_context: + # Parse existing arguments + try: + current_args = json.loads(tool_call["function"]["arguments"]) + except json.JSONDecodeError: + current_args = {} + + # Inject context_variables + updated_args = {"context_variables": self.context_variables, **current_args} + + # Update the tool call with new arguments + tool_call["function"]["arguments"] = json.dumps(updated_args) + + _, func_response = self.generate_tool_calls_reply([response]) + + return_values = [] + for response in func_response["tool_responses"]: + return_values.append(response["content"]) + + return True, SwarmResult( + values=return_values, + next_agent=None, + ) else: raise ValueError("Invalid response type:", type(response)) @@ -79,12 +148,19 @@ def add_single_function(self, func: Callable, description=""): if description: func._description = description else: - func._description = func.__doc__ + # Use function's docstring, strip whitespace, fall back to empty string + func._description = (func.__doc__ or "").strip() f = get_function_schema(func, name=func._name, description=func._description) - self.update_tool_signature(f, is_remove=False) + + # Remove the context_variable parameter from the function signature stored in self.llm_config["tools"] + # This is done to prevent the context_variable parameter from being passed to the function when it is called + # by the LLM + f_no_context_variable = remove_parameter_from_function_schema(f, __CONTEXT_VARIABLES_PARAM_NAME__) + self.update_tool_signature(f_no_context_variable, is_remove=False) + self.register_function({func._name: self._wrap_function(func)}) def add_functions(self, func_list: List[Callable]): for func in func_list: - self.add_single_function(func["func"]) + self.add_single_function(func) diff --git a/autogen/agentchat/swarm/swarm_core.py b/autogen/agentchat/swarm/swarm_core.py deleted file mode 100644 index fb22c4e43a..0000000000 --- a/autogen/agentchat/swarm/swarm_core.py +++ /dev/null @@ -1,4 +0,0 @@ -# Parameter name for context variables -# Use the value in functions and they will be substituted with the context variables: -# e.g. def my_function(context_variables: Dict[str, Any], my_other_parameters: Any) -> Any: -__CONTEXT_VARIABLES_PARAM_NAME__ = "context_variables" diff --git a/autogen/function_utils.py b/autogen/function_utils.py index f4a6531fe5..7a814d2797 100644 --- a/autogen/function_utils.py +++ b/autogen/function_utils.py @@ -8,7 +8,7 @@ import inspect import json from logging import getLogger -from typing import Any, Callable, Dict, ForwardRef, List, Optional, Set, Tuple, Type, TypeVar, Union +from typing import Any, Callable, Dict, ForwardRef, List, Optional, Set, Tuple, Type, TypeVar, Union, cast from pydantic import BaseModel, Field from typing_extensions import Annotated, Literal, get_args, get_origin @@ -214,6 +214,81 @@ def get_missing_annotations(typed_signature: inspect.Signature, required: List[s return missing, unannotated_with_default +def remove_parameter_from_function_schema(function_schema: Dict[str, Any], parameter_name: str) -> Dict[str, Any]: + """ + Removes a parameter from a function schema by updating both the properties + and required fields. + + Args: + function_schema (Dict[str, Any]): The function schema to modify + parameter_name (str): The name of the parameter to remove + + Returns: + Dict[str, Any]: Updated function schema with the parameter removed + + Raises: + ValueError: If the schema structure is invalid or parameter_name not found + """ + # Convert string to dict if needed + schema_dict: Dict[str, Any] + if isinstance(function_schema, str): + try: + schema_dict = cast(Dict[str, Any], json.loads(function_schema.replace("'", '"'))) + except json.JSONDecodeError as e: + raise ValueError(f"Invalid function schema string: {e}") + else: + schema_dict = function_schema + + # Validate schema structure + if not isinstance(schema_dict, dict): + raise ValueError("Function schema must be a dictionary or valid JSON string") + + if "type" not in schema_dict or schema_dict["type"] != "function": + raise ValueError("Schema must have 'type': 'function'") + + if "function" not in schema_dict: + raise ValueError("Schema must have a 'function' key") + + function_def = schema_dict["function"] + if not isinstance(function_def, dict): + raise ValueError("Function definition must be a dictionary") + + if "parameters" not in function_def: + raise ValueError("Schema must have 'parameters' in function definition") + + parameters = function_def["parameters"] + if not isinstance(parameters, dict): + raise ValueError("Parameters must be a dictionary") + + if "properties" not in parameters: + raise ValueError("Schema must have 'properties' in parameters") + + # Create a deep copy to avoid modifying the original + updated_schema: Dict[str, Any] = cast(Dict[str, Any], json.loads(json.dumps(schema_dict))) + + # Remove from properties if it exists + properties = updated_schema["function"]["parameters"]["properties"] + if not isinstance(properties, dict): + raise ValueError("Properties must be a dictionary") + + if parameter_name in properties: + del properties[parameter_name] + + # Remove from required if it exists + if "required" in updated_schema["function"]["parameters"]: + required = updated_schema["function"]["parameters"]["required"] + if not isinstance(required, list): + raise ValueError("Required must be a list") + + updated_schema["function"]["parameters"]["required"] = [param for param in required if param != parameter_name] + + # If required list is empty, remove it + if not updated_schema["function"]["parameters"]["required"]: + del updated_schema["function"]["parameters"]["required"] + + return updated_schema + + def get_function_schema(f: Callable[..., Any], *, name: Optional[str] = None, description: str) -> Dict[str, Any]: """Get a JSON schema for a function as defined by the OpenAI API From c4ebae4b7a4af6fe943055c512e2fd37a1a43926 Mon Sep 17 00:00:00 2001 From: Yiran Wu <32823396+kevin666aa@users.noreply.github.com> Date: Sat, 16 Nov 2024 16:44:30 -0500 Subject: [PATCH 03/10] update --- autogen/agentchat/conversable_agent.py | 16 ++++-- autogen/agentchat/groupchat.py | 4 +- autogen/agentchat/swarm/swarm_agent.py | 65 ++++++++++------------ autogen/function_utils.py | 76 -------------------------- 4 files changed, 40 insertions(+), 121 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 35183c865a..8573ad2b78 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -659,6 +659,9 @@ def _append_oai_message(self, message: Union[Dict, str], role, conversation_id: if message.get("role") in ["function", "tool"]: oai_message["role"] = message.get("role") + if "tool_responses" in oai_message: + for tool_response in oai_message["tool_responses"]: + tool_response["content"] = str(tool_response["content"]) elif "override_role" in message: # If we have a direction to override the role then set the # role accordingly. Used to customise the role for the @@ -791,15 +794,16 @@ async def a_send( "Message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided." ) - def _print_received_message(self, message: Union[Dict, str], sender: Agent): + def _print_received_message(self, message: Union[Dict, str], sender: Agent, skip_head: bool = False): iostream = IOStream.get_default() # print the message received - iostream.print(colored(sender.name, "yellow"), "(to", f"{self.name}):\n", flush=True) + if not skip_head: + iostream.print(colored(sender.name, "yellow"), "(to", f"{self.name}):\n", flush=True) message = self._message_to_dict(message) if message.get("tool_responses"): # Handle tool multi-call responses for tool_response in message["tool_responses"]: - self._print_received_message(tool_response, sender) + self._print_received_message(tool_response, sender, skip_head=True) if message.get("role") == "tool": return # If role is tool, then content is just a concatenation of all tool_responses @@ -2288,7 +2292,7 @@ def _format_json_str(jstr): result.append(char) return "".join(result) - def execute_function(self, func_call, verbose: bool = False) -> Tuple[bool, Dict[str, str]]: + def execute_function(self, func_call, verbose: bool = False) -> Tuple[bool, Dict[str, Any]]: """Execute a function call and return the result. Override this function to modify the way to execute function and tool calls. @@ -2342,7 +2346,7 @@ def execute_function(self, func_call, verbose: bool = False) -> Tuple[bool, Dict return is_exec_success, { "name": func_name, "role": "function", - "content": str(content), + "content": content, } async def a_execute_function(self, func_call): @@ -2397,7 +2401,7 @@ async def a_execute_function(self, func_call): return is_exec_success, { "name": func_name, "role": "function", - "content": str(content), + "content": content, } def generate_init_message(self, message: Union[Dict, str, None], **kwargs) -> Union[str, Dict]: diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py index 04e42d2dbe..6e9d987528 100644 --- a/autogen/agentchat/groupchat.py +++ b/autogen/agentchat/groupchat.py @@ -151,7 +151,7 @@ def custom_speaker_selection_func( select_speaker_auto_model_client_cls: Optional[Union[ModelClient, List[ModelClient]]] = None select_speaker_auto_llm_config: Optional[Union[Dict, Literal[False]]] = None role_for_select_speaker_messages: Optional[str] = "system" - context_variables: Optional[Dict] = None + context_variables: Optional[Dict] = field(default_factory=dict) _VALID_SPEAKER_SELECTION_METHODS = ["auto", "manual", "random", "round_robin", "swarm"] _VALID_SPEAKER_TRANSITIONS_TYPE = ["allowed", "disallowed", None] @@ -286,8 +286,6 @@ def __post_init__(self): if not all(isinstance(agent, SwarmAgent) for agent in self.agents): raise ValueError("All agents must be of type SwarmAgent when using the 'swarm' speaker selection method.") """ - if not isinstance(self.context_variables, dict): - self.context_variables = {} @property def agent_names(self) -> List[str]: diff --git a/autogen/agentchat/swarm/swarm_agent.py b/autogen/agentchat/swarm/swarm_agent.py index 19fa00686b..c6250a455c 100644 --- a/autogen/agentchat/swarm/swarm_agent.py +++ b/autogen/agentchat/swarm/swarm_agent.py @@ -6,7 +6,7 @@ from pydantic import BaseModel from autogen.agentchat import Agent, ConversableAgent -from autogen.function_utils import get_function_schema, remove_parameter_from_function_schema +from autogen.function_utils import get_function_schema from autogen.oai import OpenAIWrapper @@ -20,31 +20,20 @@ def parse_json_object(response: str) -> dict: __CONTEXT_VARIABLES_PARAM_NAME__ = "context_variables" -class SwarmResult: +class SwarmResult(BaseModel): """ Encapsulates the possible return values for a swarm agent function. arguments: - values (str): The result values as a string. Can be many due to multiple tool calls. + values (str): The result values as a string. agent (SwarmAgent): The swarm agent instance, if applicable. context_variables (dict): A dictionary of context variables. """ - values: List[str] = [] + values: str = "" agent: Optional["SwarmAgent"] = None context_variables: dict = {} - def __init__( - self, - values: str, # Text response, could be the next agent name as well - next_agent: str = None, # The name of the next agent if known - context_variables: dict = {}, - ) -> None: - self.values = values - self.next_agent = next_agent - self.context_variables = context_variables - - class SwarmAgent(ConversableAgent): def __init__( self, @@ -82,6 +71,9 @@ def __init__( def update_context_variables(self, context_variables: Dict[str, Any]) -> None: pass + def __str__(self): + return f"SwarmAgent: {self.name}" + def generate_reply_with_tool_calls( self, messages: Optional[List[Dict]] = None, @@ -95,10 +87,11 @@ def generate_reply_with_tool_calls( if messages is None: messages = self._oai_messages[sender] response = self._generate_oai_reply_from_client(client, self._oai_system_message + messages, self.client_cache) - + + print(response) if isinstance(response, str): return True, SwarmResult( - values=[response], + values=response, next_agent=self.name, ) elif isinstance(response, dict): @@ -114,20 +107,11 @@ def generate_reply_with_tool_calls( # Check if function has context_variables parameter sig = signature(func) - needs_context = __CONTEXT_VARIABLES_PARAM_NAME__ in sig.parameters - - if needs_context: - # Parse existing arguments - try: - current_args = json.loads(tool_call["function"]["arguments"]) - except json.JSONDecodeError: - current_args = {} - - # Inject context_variables - updated_args = {"context_variables": self.context_variables, **current_args} - + if __CONTEXT_VARIABLES_PARAM_NAME__ in sig.parameters: + current_args = json.loads(tool_call["function"]["arguments"]) + current_args[__CONTEXT_VARIABLES_PARAM_NAME__] = self.context_variables # Update the tool call with new arguments - tool_call["function"]["arguments"] = json.dumps(updated_args) + tool_call["function"]["arguments"] = json.dumps(current_args) _, func_response = self.generate_tool_calls_reply([response]) @@ -153,14 +137,23 @@ def add_single_function(self, func: Callable, description=""): f = get_function_schema(func, name=func._name, description=func._description) - # Remove the context_variable parameter from the function signature stored in self.llm_config["tools"] - # This is done to prevent the context_variable parameter from being passed to the function when it is called - # by the LLM - f_no_context_variable = remove_parameter_from_function_schema(f, __CONTEXT_VARIABLES_PARAM_NAME__) - self.update_tool_signature(f_no_context_variable, is_remove=False) - + # Remove context_variables parameter from function schema + f_no_context = f.copy() + if __CONTEXT_VARIABLES_PARAM_NAME__ in f_no_context["function"]["parameters"]["properties"]: + del f_no_context["function"]["parameters"]["properties"][__CONTEXT_VARIABLES_PARAM_NAME__] + if "required" in f_no_context["function"]["parameters"]: + required = f_no_context["function"]["parameters"]["required"] + f_no_context["function"]["parameters"]["required"] = [param for param in required if param != __CONTEXT_VARIABLES_PARAM_NAME__] + # If required list is empty, remove it + if not f_no_context["function"]["parameters"]["required"]: + del f_no_context["function"]["parameters"]["required"] + + self.update_tool_signature(f_no_context, is_remove=False) self.register_function({func._name: self._wrap_function(func)}) + def add_functions(self, func_list: List[Callable]): for func in func_list: self.add_single_function(func) + + print(self.llm_config['tools']) diff --git a/autogen/function_utils.py b/autogen/function_utils.py index 7a814d2797..06be34b6fa 100644 --- a/autogen/function_utils.py +++ b/autogen/function_utils.py @@ -213,82 +213,6 @@ def get_missing_annotations(typed_signature: inspect.Signature, required: List[s unannotated_with_default = all_missing.difference(missing) return missing, unannotated_with_default - -def remove_parameter_from_function_schema(function_schema: Dict[str, Any], parameter_name: str) -> Dict[str, Any]: - """ - Removes a parameter from a function schema by updating both the properties - and required fields. - - Args: - function_schema (Dict[str, Any]): The function schema to modify - parameter_name (str): The name of the parameter to remove - - Returns: - Dict[str, Any]: Updated function schema with the parameter removed - - Raises: - ValueError: If the schema structure is invalid or parameter_name not found - """ - # Convert string to dict if needed - schema_dict: Dict[str, Any] - if isinstance(function_schema, str): - try: - schema_dict = cast(Dict[str, Any], json.loads(function_schema.replace("'", '"'))) - except json.JSONDecodeError as e: - raise ValueError(f"Invalid function schema string: {e}") - else: - schema_dict = function_schema - - # Validate schema structure - if not isinstance(schema_dict, dict): - raise ValueError("Function schema must be a dictionary or valid JSON string") - - if "type" not in schema_dict or schema_dict["type"] != "function": - raise ValueError("Schema must have 'type': 'function'") - - if "function" not in schema_dict: - raise ValueError("Schema must have a 'function' key") - - function_def = schema_dict["function"] - if not isinstance(function_def, dict): - raise ValueError("Function definition must be a dictionary") - - if "parameters" not in function_def: - raise ValueError("Schema must have 'parameters' in function definition") - - parameters = function_def["parameters"] - if not isinstance(parameters, dict): - raise ValueError("Parameters must be a dictionary") - - if "properties" not in parameters: - raise ValueError("Schema must have 'properties' in parameters") - - # Create a deep copy to avoid modifying the original - updated_schema: Dict[str, Any] = cast(Dict[str, Any], json.loads(json.dumps(schema_dict))) - - # Remove from properties if it exists - properties = updated_schema["function"]["parameters"]["properties"] - if not isinstance(properties, dict): - raise ValueError("Properties must be a dictionary") - - if parameter_name in properties: - del properties[parameter_name] - - # Remove from required if it exists - if "required" in updated_schema["function"]["parameters"]: - required = updated_schema["function"]["parameters"]["required"] - if not isinstance(required, list): - raise ValueError("Required must be a list") - - updated_schema["function"]["parameters"]["required"] = [param for param in required if param != parameter_name] - - # If required list is empty, remove it - if not updated_schema["function"]["parameters"]["required"]: - del updated_schema["function"]["parameters"]["required"] - - return updated_schema - - def get_function_schema(f: Callable[..., Any], *, name: Optional[str] = None, description: str) -> Dict[str, Any]: """Get a JSON schema for a function as defined by the OpenAI API From 47ae671320a11c94f24d4761b05b82ffc05693f3 Mon Sep 17 00:00:00 2001 From: Yiran Wu <32823396+kevin666aa@users.noreply.github.com> Date: Sat, 16 Nov 2024 17:07:53 -0500 Subject: [PATCH 04/10] merge --- .devcontainer/README.md | 2 +- .github/PULL_REQUEST_TEMPLATE.md | 4 ++-- MAINTAINERS.md | 2 +- README.md | 6 +++--- autogen/agentchat/conversable_agent.py | 2 +- autogen/oai/completion.py | 4 ++-- autogen/oai/gemini.py | 2 +- notebook/agentchat_MathChat.ipynb | 4 ++-- notebook/agentchat_RetrieveChat.ipynb | 8 ++++---- notebook/agentchat_RetrieveChat_mongodb.ipynb | 6 +++--- notebook/agentchat_RetrieveChat_pgvector.ipynb | 8 ++++---- notebook/agentchat_RetrieveChat_qdrant.ipynb | 6 +++--- notebook/agentchat_agentoptimizer.ipynb | 2 +- notebook/agentchat_cost_token_tracking.ipynb | 2 +- notebook/agentchat_custom_model.ipynb | 2 +- notebook/agentchat_databricks_dbrx.ipynb | 12 ++++++------ notebook/agentchat_function_call.ipynb | 4 ++-- notebook/agentchat_function_call_async.ipynb | 2 +- ...entchat_function_call_currency_calculator.ipynb | 4 ++-- notebook/agentchat_groupchat.ipynb | 4 ++-- notebook/agentchat_groupchat_RAG.ipynb | 4 ++-- notebook/agentchat_groupchat_customized.ipynb | 4 ++-- .../agentchat_groupchat_finite_state_machine.ipynb | 2 +- notebook/agentchat_groupchat_research.ipynb | 4 ++-- notebook/agentchat_groupchat_stateflow.ipynb | 2 +- notebook/agentchat_groupchat_vis.ipynb | 4 ++-- notebook/agentchat_human_feedback.ipynb | 4 ++-- notebook/agentchat_inception_function.ipynb | 2 +- notebook/agentchat_langchain.ipynb | 4 ++-- notebook/agentchat_microsoft_fabric.ipynb | 6 +++--- notebook/agentchat_oai_assistant_groupchat.ipynb | 4 ++-- notebook/agentchat_oai_code_interpreter.ipynb | 2 +- notebook/agentchat_planning.ipynb | 4 ++-- notebook/agentchat_stream.ipynb | 4 ++-- notebook/agentchat_surfer.ipynb | 2 +- notebook/agentchat_swarm.ipynb | 4 ++-- notebook/agentchat_teachability.ipynb | 2 +- notebook/agentchat_teachable_oai_assistants.ipynb | 4 ++-- notebook/agentchat_teaching.ipynb | 2 +- notebook/agentchat_two_users.ipynb | 4 ++-- notebook/agentchat_web_info.ipynb | 4 ++-- notebook/agentchat_websockets.ipynb | 12 ++++++------ notebook/autobuild_basic.ipynb | 2 +- notebook/autogen_uniformed_api_calling.ipynb | 2 +- notebook/config_loader_utility_functions.ipynb | 2 +- notebook/oai_chatgpt_gpt4.ipynb | 2 +- notebook/oai_completion.ipynb | 4 ++-- .../capabilities/chat_with_teachable_agent.py | 2 +- .../contrib/capabilities/test_teachable_agent.py | 2 +- test/twoagent.py | 2 +- website/blog/2023-10-18-RetrieveChat/index.mdx | 2 +- website/blog/2023-10-26-TeachableAgent/index.mdx | 2 +- website/blog/2023-12-01-AutoGenStudio/index.mdx | 12 ++++++------ website/blog/2023-12-23-AgentOptimizer/index.mdx | 2 +- .../blog/2023-12-29-AgentDescriptions/index.mdx | 6 +++--- .../2024-01-23-Code-execution-in-docker/index.mdx | 4 ++-- website/blog/2024-01-25-AutoGenBench/index.mdx | 8 ++++---- website/blog/2024-02-02-AutoAnny/index.mdx | 4 ++-- website/blog/2024-02-11-FSM-GroupChat/index.mdx | 2 +- .../blog/2024-06-24-AltModels-Classes/index.mdx | 14 +++++++------- website/blog/2024-07-25-AgentOps/index.mdx | 2 +- website/docs/Examples.md | 6 +++--- website/docs/FAQ.mdx | 12 ++++++------ website/docs/Migration-Guide.md | 2 +- website/docs/Use-Cases/agent_chat.md | 2 +- website/docs/autogen-studio/faqs.md | 2 +- website/docs/autogen-studio/getting-started.md | 8 ++++---- website/docs/contributor-guide/docker.md | 2 +- website/docs/ecosystem/promptflow.md | 2 +- .../non-openai-models/local-litellm-ollama.ipynb | 2 +- .../docs/topics/non-openai-models/local-vllm.md | 2 +- website/docs/topics/retrieval_augmentation.md | 2 +- 72 files changed, 144 insertions(+), 144 deletions(-) diff --git a/.devcontainer/README.md b/.devcontainer/README.md index 9e8b918582..390bf39925 100644 --- a/.devcontainer/README.md +++ b/.devcontainer/README.md @@ -26,7 +26,7 @@ These configurations can be used with Codespaces and locally. - **Usage**: Recommended for developers who are contributing to the AutoGen project. - **Building the Image**: Run `docker build -f dev/Dockerfile -t ag2_dev_img .`. - **Using with Codespaces**: `Code > Codespaces > Click on ...> New with options > Choose "dev" as devcontainer configuration`. This image may require a Codespace with at least 64GB of disk space. -- **Before using**: We highly encourage all potential contributors to read the [AutoGen Contributing](https://ag2ai.github.io/autogen/docs/Contribute) page prior to submitting any pull requests. +- **Before using**: We highly encourage all potential contributors to read the [AutoGen Contributing](https://ag2ai.github.io/ag2/docs/Contribute) page prior to submitting any pull requests. ## Customizing Dockerfiles diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 78f0647c00..9f064605c5 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,4 +1,4 @@ - + @@ -12,6 +12,6 @@ ## Checks -- [ ] I've included any doc changes needed for https://ag2ai.github.io/autogen/. See https://ag2ai.github.io/autogen/docs/Contribute#documentation to build and test documentation locally. +- [ ] I've included any doc changes needed for https://ag2ai.github.io/autogen/. See https://ag2ai.github.io/ag2/docs/Contribute#documentation to build and test documentation locally. - [ ] I've added tests (if relevant) corresponding to the changes introduced in this PR. - [ ] I've made sure all auto checks have passed. diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 60a6a017b5..76d86b083a 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -27,7 +27,7 @@ | Evan David * | [evandavid1](https://github.com/evandavid1) | - | gpt assistant, group chat, rag, autobuild | ## I would like to join this list. How can I help the project? -> We're always looking for new contributors to join our team and help improve the project. For more information, please refer to our [CONTRIBUTING](https://ag2ai.github.io/autogen/docs/contributor-guide/contributing) guide. +> We're always looking for new contributors to join our team and help improve the project. For more information, please refer to our [CONTRIBUTING](https://ag2ai.github.io/ag2/docs/contributor-guide/contributing) guide. ## Are you missing from this list? diff --git a/README.md b/README.md index 5619fec373..56cc33c3a1 100644 --- a/README.md +++ b/README.md @@ -195,7 +195,7 @@ The figure below shows an example conversation flow with AG2. ![Agent Chat Example](./website/static/img/chat_example.png) -Alternatively, the [sample code](https://github.com/ag2ai/build-with-autogen/blob/main/samples/simple_chat.py) here allows a user to chat with an AG2 agent in ChatGPT style. +Alternatively, the [sample code](https://github.com/ag2ai/build-with-ag2/blob/main/samples/simple_chat.py) here allows a user to chat with an AG2 agent in ChatGPT style. Please find more [code examples](https://ag2ai.github.io/ag2/docs/Examples#automated-multi-agent-chat) for this feature.

@@ -254,12 +254,12 @@ In addition, you can find: ## CookBook Explore detailed implementations with sample code and applications to help you get started with AG2. -[Cookbook](https://github.com/ag2ai/build-with-autogen) +[Cookbook](https://github.com/ag2ai/build-with-ag2) ## Related Papers -[AG2](https://arxiv.org/abs/2308.08155) +[AutoGen](https://arxiv.org/abs/2308.08155) ``` @inproceedings{wu2023autogen, diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 8573ad2b78..840da79204 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -164,7 +164,7 @@ def __init__( except TypeError as e: raise TypeError( "Please implement __deepcopy__ method for each value class in llm_config to support deepcopy." - " Refer to the docs for more details: https://ag2ai.github.io/autogen/docs/topics/llm_configuration#adding-http-client-in-llm_config-for-proxy" + " Refer to the docs for more details: https://ag2ai.github.io/ag2/docs/topics/llm_configuration#adding-http-client-in-llm_config-for-proxy" ) from e self._validate_llm_config(llm_config) diff --git a/autogen/oai/completion.py b/autogen/oai/completion.py index aaddfff32c..72886dd857 100644 --- a/autogen/oai/completion.py +++ b/autogen/oai/completion.py @@ -742,7 +742,7 @@ def create( E.g., `prompt="Complete the following sentence: {prefix}, context={"prefix": "Today I feel"}`. The actual prompt will be: "Complete the following sentence: Today I feel". - More examples can be found at [templating](https://ag2ai.github.io/autogen/docs/Use-Cases/enhanced_inference#templating). + More examples can be found at [templating](https://ag2ai.github.io/ag2/docs/Use-Cases/enhanced_inference#templating). use_cache (bool, Optional): Whether to use cached responses. config_list (List, Optional): List of configurations for the completion to try. The first one that does not raise an error will be used. @@ -804,7 +804,7 @@ def yes_or_no_filter(context, config, response): logger.warning( "Completion.create is deprecated in autogen, pyautogen v0.2 and openai>=1. " "The new openai requires initiating a client for inference. " - "Please refer to https://ag2ai.github.io/autogen/docs/Use-Cases/enhanced_inference#api-unification" + "Please refer to https://ag2ai.github.io/ag2/docs/Use-Cases/enhanced_inference#api-unification" ) if ERROR: raise ERROR diff --git a/autogen/oai/gemini.py b/autogen/oai/gemini.py index fee3418c78..ad7bb3cf41 100644 --- a/autogen/oai/gemini.py +++ b/autogen/oai/gemini.py @@ -171,7 +171,7 @@ def create(self, params: Dict) -> ChatCompletion: raise ValueError( "Please provide a model name for the Gemini Client. " "You can configure it in the OAI Config List file. " - "See this [LLM configuration tutorial](https://ag2ai.github.io/autogen/docs/topics/llm_configuration/) for more details." + "See this [LLM configuration tutorial](https://ag2ai.github.io/ag2/docs/topics/llm_configuration/) for more details." ) params.get("api_type", "google") # not used diff --git a/notebook/agentchat_MathChat.ipynb b/notebook/agentchat_MathChat.ipynb index a84a1a6400..5c6fcc30c8 100644 --- a/notebook/agentchat_MathChat.ipynb +++ b/notebook/agentchat_MathChat.ipynb @@ -7,7 +7,7 @@ "source": [ "# Auto Generated Agent Chat: Using MathChat to Solve Math Problems\n", "\n", - "AutoGen offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation. Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "AutoGen offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation. Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "MathChat is an experimental conversational framework for math problem solving. In this notebook, we demonstrate how to use MathChat to solve math problems. MathChat uses the `AssistantAgent` and `MathUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/ag2ai/ag2/blob/main/notebook/agentchat_auto_feedback_from_code_execution.ipynb)). Essentially, `MathUserProxyAgent` implements a different auto reply mechanism corresponding to the MathChat prompts. You can find more details in the paper [An Empirical Study on Challenging Math Problem Solving with GPT-4](https://arxiv.org/abs/2306.01337) or the [blogpost](https://ag2ai.github.io/autogen/blog/2023/06/28/MathChat).\n", "\n", @@ -31,7 +31,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n" + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n" ] }, { diff --git a/notebook/agentchat_RetrieveChat.ipynb b/notebook/agentchat_RetrieveChat.ipynb index 2b2306445a..9509fb8134 100644 --- a/notebook/agentchat_RetrieveChat.ipynb +++ b/notebook/agentchat_RetrieveChat.ipynb @@ -8,7 +8,7 @@ "# Using RetrieveChat for Retrieve Augmented Code Generation and Question Answering\n", "\n", "AutoGen offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", - "Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "RetrieveChat is a conversational system for retrieval-augmented code generation and question answering. In this notebook, we demonstrate how to utilize RetrieveChat to generate code and answer questions based on customized documentations that are not present in the LLM's training dataset. RetrieveChat uses the `AssistantAgent` and `RetrieveUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/ag2ai/ag2/blob/main/notebook/agentchat_auto_feedback_from_code_execution.ipynb)). Essentially, `RetrieveUserProxyAgent` implement a different auto-reply mechanism corresponding to the RetrieveChat prompts.\n", "\n", @@ -43,7 +43,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n" + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n" ] }, { @@ -141,8 +141,8 @@ ")\n", "\n", "# 2. create the RetrieveUserProxyAgent instance named \"ragproxyagent\"\n", - "# Refer to https://ag2ai.github.io/autogen/docs/reference/agentchat/contrib/retrieve_user_proxy_agent\n", - "# and https://ag2ai.github.io/autogen/docs/reference/agentchat/contrib/vectordb/chromadb\n", + "# Refer to https://ag2ai.github.io/ag2/docs/reference/agentchat/contrib/retrieve_user_proxy_agent\n", + "# and https://ag2ai.github.io/ag2/docs/reference/agentchat/contrib/vectordb/chromadb\n", "# for more information on the RetrieveUserProxyAgent and ChromaVectorDB\n", "ragproxyagent = RetrieveUserProxyAgent(\n", " name=\"ragproxyagent\",\n", diff --git a/notebook/agentchat_RetrieveChat_mongodb.ipynb b/notebook/agentchat_RetrieveChat_mongodb.ipynb index 886ffeb8e4..ad91c98d30 100644 --- a/notebook/agentchat_RetrieveChat_mongodb.ipynb +++ b/notebook/agentchat_RetrieveChat_mongodb.ipynb @@ -8,7 +8,7 @@ "# Using RetrieveChat Powered by MongoDB Atlas for Retrieve Augmented Code Generation and Question Answering\n", "\n", "AutoGen offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", - "Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "RetrieveChat is a conversational system for retrieval-augmented code generation and question answering. In this notebook, we demonstrate how to utilize RetrieveChat to generate code and answer questions based on customized documentations that are not present in the LLM's training dataset. RetrieveChat uses the `AssistantAgent` and `RetrieveUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/ag2ai/ag2/blob/main/notebook/agentchat_auto_feedback_from_code_execution.ipynb)). Essentially, `RetrieveUserProxyAgent` implement a different auto-reply mechanism corresponding to the RetrieveChat prompts.\n", "\n", @@ -123,8 +123,8 @@ ")\n", "\n", "# 2. create the RetrieveUserProxyAgent instance named \"ragproxyagent\"\n", - "# Refer to https://ag2ai.github.io/autogen/docs/reference/agentchat/contrib/retrieve_user_proxy_agent\n", - "# and https://ag2ai.github.io/autogen/docs/reference/agentchat/contrib/vectordb/mongodb\n", + "# Refer to https://ag2ai.github.io/ag2/docs/reference/agentchat/contrib/retrieve_user_proxy_agent\n", + "# and https://ag2ai.github.io/ag2/docs/reference/agentchat/contrib/vectordb/mongodb\n", "# for more information on the RetrieveUserProxyAgent and MongoDBAtlasVectorDB\n", "ragproxyagent = RetrieveUserProxyAgent(\n", " name=\"ragproxyagent\",\n", diff --git a/notebook/agentchat_RetrieveChat_pgvector.ipynb b/notebook/agentchat_RetrieveChat_pgvector.ipynb index bc466f80d2..eca7654cc3 100644 --- a/notebook/agentchat_RetrieveChat_pgvector.ipynb +++ b/notebook/agentchat_RetrieveChat_pgvector.ipynb @@ -8,7 +8,7 @@ "# Using RetrieveChat Powered by PGVector for Retrieve Augmented Code Generation and Question Answering\n", "\n", "AutoGen offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", - "Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "RetrieveChat is a conversational system for retrieval-augmented code generation and question answering. In this notebook, we demonstrate how to utilize RetrieveChat to generate code and answer questions based on customized documentations that are not present in the LLM's training dataset. RetrieveChat uses the `AssistantAgent` and `RetrieveUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/ag2ai/ag2/blob/main/notebook/agentchat_auto_feedback_from_code_execution.ipynb)). Essentially, `RetrieveUserProxyAgent` implement a different auto-reply mechanism corresponding to the RetrieveChat prompts.\n", "\n", @@ -67,7 +67,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n" + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n" ] }, { @@ -175,8 +175,8 @@ "sentence_transformer_ef = SentenceTransformer(\"all-distilroberta-v1\").encode\n", "\n", "# 2. create the RetrieveUserProxyAgent instance named \"ragproxyagent\"\n", - "# Refer to https://ag2ai.github.io/autogen/docs/reference/agentchat/contrib/retrieve_user_proxy_agent\n", - "# and https://ag2ai.github.io/autogen/docs/reference/agentchat/contrib/vectordb/pgvectordb\n", + "# Refer to https://ag2ai.github.io/ag2/docs/reference/agentchat/contrib/retrieve_user_proxy_agent\n", + "# and https://ag2ai.github.io/ag2/docs/reference/agentchat/contrib/vectordb/pgvectordb\n", "# for more information on the RetrieveUserProxyAgent and PGVectorDB\n", "ragproxyagent = RetrieveUserProxyAgent(\n", " name=\"ragproxyagent\",\n", diff --git a/notebook/agentchat_RetrieveChat_qdrant.ipynb b/notebook/agentchat_RetrieveChat_qdrant.ipynb index 0b300cb12b..392147f92f 100644 --- a/notebook/agentchat_RetrieveChat_qdrant.ipynb +++ b/notebook/agentchat_RetrieveChat_qdrant.ipynb @@ -53,7 +53,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n" + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n" ] }, { @@ -167,8 +167,8 @@ "client = QdrantClient(\":memory:\")\n", "\n", "# 2. create the RetrieveUserProxyAgent instance named \"ragproxyagent\"\n", - "# Refer to https://ag2ai.github.io/autogen/docs/reference/agentchat/contrib/retrieve_user_proxy_agent\n", - "# and https://ag2ai.github.io/autogen/docs/reference/agentchat/contrib/vectordb/qdrant\n", + "# Refer to https://ag2ai.github.io/ag2/docs/reference/agentchat/contrib/retrieve_user_proxy_agent\n", + "# and https://ag2ai.github.io/ag2/docs/reference/agentchat/contrib/vectordb/qdrant\n", "# for more information on the RetrieveUserProxyAgent and QdrantVectorDB\n", "ragproxyagent = RetrieveUserProxyAgent(\n", " name=\"ragproxyagent\",\n", diff --git a/notebook/agentchat_agentoptimizer.ipynb b/notebook/agentchat_agentoptimizer.ipynb index 3e81b4ac47..764c944047 100644 --- a/notebook/agentchat_agentoptimizer.ipynb +++ b/notebook/agentchat_agentoptimizer.ipynb @@ -7,7 +7,7 @@ "# AgentOptimizer: An Agentic Way to Train Your LLM Agent\n", "\n", "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", - "Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "In traditional ML pipeline, we train a model by updating its parameter according to the loss on the training set, while in the era of LLM agents, how should we train an agent? Here, we take an initial step towards the agent training. Inspired by the [function calling](https://platform.openai.com/docs/guides/function-calling) capabilities provided by OpenAI, we draw an analogy between model parameters and agent functions/skills, and update agent’s functions/skills based on its historical performance on the training set. As an agentic way of training an agent, our approach help enhance the agents’ abilities without requiring access to the LLMs parameters.\n", "\n", diff --git a/notebook/agentchat_cost_token_tracking.ipynb b/notebook/agentchat_cost_token_tracking.ipynb index cd4ddac30a..92ee8d1b97 100644 --- a/notebook/agentchat_cost_token_tracking.ipynb +++ b/notebook/agentchat_cost_token_tracking.ipynb @@ -53,7 +53,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n" + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n" ] }, { diff --git a/notebook/agentchat_custom_model.ipynb b/notebook/agentchat_custom_model.ipynb index 3f13efef23..354f1c2556 100644 --- a/notebook/agentchat_custom_model.ipynb +++ b/notebook/agentchat_custom_model.ipynb @@ -210,7 +210,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n", + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n", "\n", "It first looks for an environment variable of a specified name (\"OAI_CONFIG_LIST\" in this example), which needs to be a valid json string. If that variable is not found, it looks for a json file with the same name. It filters the configs by models (you can filter by other keys as well).\n", "\n", diff --git a/notebook/agentchat_databricks_dbrx.ipynb b/notebook/agentchat_databricks_dbrx.ipynb index 63a57d8130..daddc8d3f5 100644 --- a/notebook/agentchat_databricks_dbrx.ipynb +++ b/notebook/agentchat_databricks_dbrx.ipynb @@ -10,7 +10,7 @@ "\n", "In March 2024, Databricks released [DBRX](https://www.databricks.com/blog/introducing-dbrx-new-state-art-open-llm), a general-purpose LLM that sets a new standard for open LLMs. While available as an open-source model on Hugging Face ([databricks/dbrx-instruct](https://huggingface.co/databricks/dbrx-instruct/tree/main) and [databricks/dbrx-base](https://huggingface.co/databricks/dbrx-base) ), customers of Databricks can also tap into the [Foundation Model APIs](https://docs.databricks.com/en/machine-learning/model-serving/score-foundation-models.html#query-a-chat-completion-model), which make DBRX available through an OpenAI-compatible, autoscaling REST API.\n", "\n", - "[Autogen](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat) is becoming a popular standard for agent creation. Built to support any \"LLM as a service\" that implements the OpenAI SDK, it can easily be extended to integrate with powerful open source models. \n", + "[Autogen](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat) is becoming a popular standard for agent creation. Built to support any \"LLM as a service\" that implements the OpenAI SDK, it can easily be extended to integrate with powerful open source models. \n", "\n", "This notebook will demonstrate a few basic examples of Autogen with DBRX, including the use of `AssistantAgent`, `UserProxyAgent`, and `ConversableAgent`. These demos are not intended to be exhaustive - feel free to use them as a base to build upon!\n", "\n", @@ -76,7 +76,7 @@ "source": [ "## Setup DBRX config list\n", "\n", - "See Autogen docs for more inforation on the use of `config_list`: [LLM Configuration](https://ag2ai.github.io/autogen/docs/topics/llm_configuration#why-is-it-a-list)" + "See Autogen docs for more inforation on the use of `config_list`: [LLM Configuration](https://ag2ai.github.io/ag2/docs/topics/llm_configuration#why-is-it-a-list)" ] }, { @@ -116,7 +116,7 @@ "source": [ "## Hello World Example\n", "\n", - "Our first example will be with a simple `UserProxyAgent` asking a question to an `AssistantAgent`. This is based on the tutorial demo [here](https://ag2ai.github.io/autogen/docs/tutorial/introduction).\n", + "Our first example will be with a simple `UserProxyAgent` asking a question to an `AssistantAgent`. This is based on the tutorial demo [here](https://ag2ai.github.io/ag2/docs/tutorial/introduction).\n", "\n", "After sending the question and seeing a response, you can type `exit` to end the chat or continue to converse." ] @@ -207,7 +207,7 @@ "source": [ "## Simple Coding Agent\n", "\n", - "In this example, we will implement a \"coding agent\" that can execute code. You will see how this code is run alongside your notebook in your current workspace, taking advantage of the performance benefits of Databricks clusters. This is based off the demo [here](https://ag2ai.github.io/autogen/docs/topics/non-openai-models/cloud-mistralai/).\n", + "In this example, we will implement a \"coding agent\" that can execute code. You will see how this code is run alongside your notebook in your current workspace, taking advantage of the performance benefits of Databricks clusters. This is based off the demo [here](https://ag2ai.github.io/ag2/docs/topics/non-openai-models/cloud-mistralai/).\n", "\n", "First, set up a directory: " ] @@ -430,7 +430,7 @@ "source": [ "## Conversable Bots\n", "\n", - "We can also implement the [two-agent chat pattern](https://ag2ai.github.io/autogen/docs/tutorial/conversation-patterns/#two-agent-chat-and-chat-result) using DBRX to \"talk to itself\" in a teacher/student exchange:" + "We can also implement the [two-agent chat pattern](https://ag2ai.github.io/ag2/docs/tutorial/conversation-patterns/#two-agent-chat-and-chat-result) using DBRX to \"talk to itself\" in a teacher/student exchange:" ] }, { @@ -498,7 +498,7 @@ "\n", "It can be useful to display chat logs to the notebook for debugging, and then persist those logs to a Delta table. The following section demonstrates how to extend the default AutoGen logging libraries.\n", "\n", - "First, we will implement a Python `class` that extends the capabilities of `autogen.runtime_logging` [docs](https://ag2ai.github.io/autogen/docs/notebooks/agentchat_logging):" + "First, we will implement a Python `class` that extends the capabilities of `autogen.runtime_logging` [docs](https://ag2ai.github.io/ag2/docs/notebooks/agentchat_logging):" ] }, { diff --git a/notebook/agentchat_function_call.ipynb b/notebook/agentchat_function_call.ipynb index 394105a852..03760d4cec 100644 --- a/notebook/agentchat_function_call.ipynb +++ b/notebook/agentchat_function_call.ipynb @@ -8,7 +8,7 @@ "source": [ "# Auto Generated Agent Chat: Task Solving with Provided Tools as Functions\n", "\n", - "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation. Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation. Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to make function calls with the new feature of OpenAI models (in model version 0613). A specified prompt and function configs must be passed to `AssistantAgent` to initialize the agent. The corresponding functions must be passed to `UserProxyAgent`, which will execute any function calls made by `AssistantAgent`. Besides this requirement of matching descriptions with functions, we recommend checking the system message in the `AssistantAgent` to ensure the instructions align with the function call descriptions.\n", "\n", @@ -38,7 +38,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." ] }, { diff --git a/notebook/agentchat_function_call_async.ipynb b/notebook/agentchat_function_call_async.ipynb index 9dbf3b0e2e..500f2dbc21 100644 --- a/notebook/agentchat_function_call_async.ipynb +++ b/notebook/agentchat_function_call_async.ipynb @@ -14,7 +14,7 @@ "id": "9a71fa36", "metadata": {}, "source": [ - "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation. Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation. Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to make function calls with the new feature of OpenAI models (in model version 0613). A specified prompt and function configs must be passed to `AssistantAgent` to initialize the agent. The corresponding functions must be passed to `UserProxyAgent`, which will execute any function calls made by `AssistantAgent`. Besides this requirement of matching descriptions with functions, we recommend checking the system message in the `AssistantAgent` to ensure the instructions align with the function call descriptions.\n", "\n", diff --git a/notebook/agentchat_function_call_currency_calculator.ipynb b/notebook/agentchat_function_call_currency_calculator.ipynb index 06b922cec2..dbe655da05 100644 --- a/notebook/agentchat_function_call_currency_calculator.ipynb +++ b/notebook/agentchat_function_call_currency_calculator.ipynb @@ -15,7 +15,7 @@ "id": "9a71fa36", "metadata": {}, "source": [ - "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation. Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation. Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to make function calls with the new feature of OpenAI models (in model version 0613). A specified prompt and function configs must be passed to `AssistantAgent` to initialize the agent. The corresponding functions must be passed to `UserProxyAgent`, which will execute any function calls made by `AssistantAgent`. Besides this requirement of matching descriptions with functions, we recommend checking the system message in the `AssistantAgent` to ensure the instructions align with the function call descriptions.\n", "\n", @@ -45,7 +45,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." ] }, { diff --git a/notebook/agentchat_groupchat.ipynb b/notebook/agentchat_groupchat.ipynb index 9036964a69..99dd40a791 100644 --- a/notebook/agentchat_groupchat.ipynb +++ b/notebook/agentchat_groupchat.ipynb @@ -8,7 +8,7 @@ "# Group Chat\n", "\n", "AutoGen offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", - "Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "This notebook is modified based on https://github.com/ag2ai/FLAML/blob/4ea686af5c3e8ff24d9076a7a626c8b28ab5b1d7/notebook/autogen_multiagent_roleplay_chat.ipynb\n", "\n", @@ -31,7 +31,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." ] }, { diff --git a/notebook/agentchat_groupchat_RAG.ipynb b/notebook/agentchat_groupchat_RAG.ipynb index 85e02c9ec1..333279bf35 100644 --- a/notebook/agentchat_groupchat_RAG.ipynb +++ b/notebook/agentchat_groupchat_RAG.ipynb @@ -8,7 +8,7 @@ "# Group Chat with Retrieval Augmented Generation\n", "\n", "AutoGen supports conversable agents powered by LLMs, tools, or humans, performing tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", - "Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "````{=mdx}\n", ":::info Requirements\n", @@ -30,7 +30,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." ] }, { diff --git a/notebook/agentchat_groupchat_customized.ipynb b/notebook/agentchat_groupchat_customized.ipynb index dc8b3c51dd..48f4459dc9 100644 --- a/notebook/agentchat_groupchat_customized.ipynb +++ b/notebook/agentchat_groupchat_customized.ipynb @@ -8,7 +8,7 @@ "# Group Chat with Customized Speaker Selection Method\n", "\n", "AutoGen offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", - "Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "In this notebook, we demonstrate how to pass a cumstomized agent selection method to GroupChat. The customized function looks like this:\n", "\n", @@ -56,7 +56,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." ] }, { diff --git a/notebook/agentchat_groupchat_finite_state_machine.ipynb b/notebook/agentchat_groupchat_finite_state_machine.ipynb index b6bd300bf3..17a7ac13f4 100644 --- a/notebook/agentchat_groupchat_finite_state_machine.ipynb +++ b/notebook/agentchat_groupchat_finite_state_machine.ipynb @@ -8,7 +8,7 @@ "# FSM - User can input speaker transition constraints\n", "\n", "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", - "Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "This notebook is about using graphs to define the transition paths amongst speakers.\n", "\n", diff --git a/notebook/agentchat_groupchat_research.ipynb b/notebook/agentchat_groupchat_research.ipynb index e484a2e419..85fdb08939 100644 --- a/notebook/agentchat_groupchat_research.ipynb +++ b/notebook/agentchat_groupchat_research.ipynb @@ -8,7 +8,7 @@ "# Perform Research with Multi-Agent Group Chat\n", "\n", "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", - "Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "## Requirements\n", "\n", @@ -31,7 +31,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." ] }, { diff --git a/notebook/agentchat_groupchat_stateflow.ipynb b/notebook/agentchat_groupchat_stateflow.ipynb index 77efb476cb..71a9edd90f 100644 --- a/notebook/agentchat_groupchat_stateflow.ipynb +++ b/notebook/agentchat_groupchat_stateflow.ipynb @@ -29,7 +29,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." ] }, { diff --git a/notebook/agentchat_groupchat_vis.ipynb b/notebook/agentchat_groupchat_vis.ipynb index a26768ee12..ba4623f94f 100644 --- a/notebook/agentchat_groupchat_vis.ipynb +++ b/notebook/agentchat_groupchat_vis.ipynb @@ -8,7 +8,7 @@ "# Group Chat with Coder and Visualization Critic\n", "\n", "AutoGen offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", - "Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "````{=mdx}\n", ":::info Requirements\n", @@ -29,7 +29,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." ] }, { diff --git a/notebook/agentchat_human_feedback.ipynb b/notebook/agentchat_human_feedback.ipynb index af058a8eb8..6e44badd6c 100644 --- a/notebook/agentchat_human_feedback.ipynb +++ b/notebook/agentchat_human_feedback.ipynb @@ -12,7 +12,7 @@ "# Auto Generated Agent Chat: Task Solving with Code Generation, Execution, Debugging & Human Feedback\n", "\n", "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", - "Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to solve a challenging math problem with human feedback. Here `AssistantAgent` is an LLM-based agent that can write Python code (in a Python coding block) for a user to execute for a given task. `UserProxyAgent` is an agent which serves as a proxy for a user to execute the code written by `AssistantAgent`. By setting `human_input_mode` properly, the `UserProxyAgent` can also prompt the user for feedback to `AssistantAgent`. For example, when `human_input_mode` is set to \"ALWAYS\", the `UserProxyAgent` will always prompt the user for feedback. When user feedback is provided, the `UserProxyAgent` will directly pass the feedback to `AssistantAgent`. When no user feedback is provided, the `UserProxyAgent` will execute the code written by `AssistantAgent` and return the execution results (success or failure and corresponding outputs) to `AssistantAgent`.\n", "\n", @@ -47,7 +47,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." ] }, { diff --git a/notebook/agentchat_inception_function.ipynb b/notebook/agentchat_inception_function.ipynb index e2c6756e42..fd2299a82f 100644 --- a/notebook/agentchat_inception_function.ipynb +++ b/notebook/agentchat_inception_function.ipynb @@ -6,7 +6,7 @@ "source": [ "# Auto Generated Agent Chat: Function Inception\n", "\n", - "AutoGen offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation. Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "AutoGen offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation. Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to give them the ability to auto-extend the list of functions the model may call. Functions need to be registered to `UserProxyAgent`, which will be responsible for executing any function calls made by `AssistantAgent`. The assistant also needs to know the signature of functions that may be called. A special `define_function` function is registered, which registers a new function in `UserProxyAgent` and updates the configuration of the assistant.\n", "\n", diff --git a/notebook/agentchat_langchain.ipynb b/notebook/agentchat_langchain.ipynb index 0da9b36088..798b29b4e7 100644 --- a/notebook/agentchat_langchain.ipynb +++ b/notebook/agentchat_langchain.ipynb @@ -10,7 +10,7 @@ "source": [ "# Auto Generated Agent Chat: Task Solving with Langchain Provided Tools as Functions\n", "\n", - "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participants through multi-agent conversation. Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participants through multi-agent conversation. Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to make function calls with the new feature of OpenAI models (in model version 0613) with a set of Langchain-provided tools and toolkits, to demonstrate how to leverage the 35+ tools available. \n", "A specified prompt and function configs must be passed to `AssistantAgent` to initialize the agent. The corresponding functions must be passed to `UserProxyAgent`, which will execute any function calls made by `AssistantAgent`. Besides this requirement of matching descriptions with functions, we recommend checking the system message in the `AssistantAgent` to ensure the instructions align with the function call descriptions.\n", @@ -49,7 +49,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_models`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_models) function tries to create a list of configurations using Azure OpenAI endpoints and OpenAI endpoints for the provided list of models. It assumes the api keys and api bases are stored in the corresponding environment variables or local txt files:\n", + "The [`config_list_from_models`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_models) function tries to create a list of configurations using Azure OpenAI endpoints and OpenAI endpoints for the provided list of models. It assumes the api keys and api bases are stored in the corresponding environment variables or local txt files:\n", "\n", "- OpenAI API key: os.environ[\"OPENAI_API_KEY\"] or `openai_api_key_file=\"key_openai.txt\"`.\n", "- Azure OpenAI API key: os.environ[\"AZURE_OPENAI_API_KEY\"] or `aoai_api_key_file=\"key_aoai.txt\"`. Multiple keys can be stored, one per line.\n", diff --git a/notebook/agentchat_microsoft_fabric.ipynb b/notebook/agentchat_microsoft_fabric.ipynb index 6fa0d5aac3..6011138ac2 100644 --- a/notebook/agentchat_microsoft_fabric.ipynb +++ b/notebook/agentchat_microsoft_fabric.ipynb @@ -14,7 +14,7 @@ "## Use AutoGen in Microsoft Fabric\n", "\n", "[AutoGen](https://github.com/ag2ai/ag2) offers conversable LLM agents, which can be used to solve various tasks with human or automatic feedback, including tasks that require using tools via code.\n", - "Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "[Microsoft Fabric](https://learn.microsoft.com/en-us/fabric/get-started/microsoft-fabric-overview) is an all-in-one analytics solution for enterprises that covers everything from data movement to data science, Real-Time Analytics, and business intelligence. It offers a comprehensive suite of services, including data lake, data engineering, and data integration, all in one place. Its pre-built AI models include GPT-x models such as `gpt-4o`, `gpt-4-turbo`, `gpt-4`, `gpt-4-8k`, `gpt-4-32k`, `gpt-35-turbo`, `gpt-35-turbo-16k` and `gpt-35-turbo-instruct`, etc. It's important to note that the Azure Open AI service is not supported on trial SKUs and only paid SKUs (F64 or higher, or P1 or higher) are supported.\n", "\n", @@ -282,7 +282,7 @@ "http_client = get_openai_httpx_sync_client() # http_client is needed for openai>1\n", "http_client.__deepcopy__ = types.MethodType(\n", " lambda self, memo: self, http_client\n", - ") # https://ag2ai.github.io/autogen/docs/topics/llm_configuration#adding-http-client-in-llm_config-for-proxy\\n\",\n", + ") # https://ag2ai.github.io/ag2/docs/topics/llm_configuration#adding-http-client-in-llm_config-for-proxy\\n\",\n", "\n", "config_list = [\n", " {\n", @@ -447,7 +447,7 @@ "http_client = get_openai_httpx_sync_client() # http_client is needed for openai>1\n", "http_client.__deepcopy__ = types.MethodType(\n", " lambda self, memo: self, http_client\n", - ") # https://ag2ai.github.io/autogen/docs/topics/llm_configuration#adding-http-client-in-llm_config-for-proxy\n", + ") # https://ag2ai.github.io/ag2/docs/topics/llm_configuration#adding-http-client-in-llm_config-for-proxy\n", "\n", "config_list = [\n", " {\n", diff --git a/notebook/agentchat_oai_assistant_groupchat.ipynb b/notebook/agentchat_oai_assistant_groupchat.ipynb index ced5e4c50c..9b5084c05a 100644 --- a/notebook/agentchat_oai_assistant_groupchat.ipynb +++ b/notebook/agentchat_oai_assistant_groupchat.ipynb @@ -7,7 +7,7 @@ "# Auto Generated Agent Chat: Group Chat with GPTAssistantAgent\n", "\n", "AutoGen offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", - "Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "In this notebook, we demonstrate how to get multiple `GPTAssistantAgent` converse through group chat.\n", "\n", @@ -32,7 +32,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." ] }, { diff --git a/notebook/agentchat_oai_code_interpreter.ipynb b/notebook/agentchat_oai_code_interpreter.ipynb index 80f317d54f..4bd93082f2 100644 --- a/notebook/agentchat_oai_code_interpreter.ipynb +++ b/notebook/agentchat_oai_code_interpreter.ipynb @@ -28,7 +28,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." ] }, { diff --git a/notebook/agentchat_planning.ipynb b/notebook/agentchat_planning.ipynb index 7ebb88191c..07ac8ec64a 100644 --- a/notebook/agentchat_planning.ipynb +++ b/notebook/agentchat_planning.ipynb @@ -12,7 +12,7 @@ "# Auto Generated Agent Chat: Collaborative Task Solving with Coding and Planning Agent\n", "\n", "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", - "Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "In this notebook, we demonstrate how to use multiple agents to work together and accomplish a task that requires finding info from the web and coding. `AssistantAgent` is an LLM-based agent that can write and debug Python code (in a Python coding block) for a user to execute for a given task. `UserProxyAgent` is an agent which serves as a proxy for a user to execute the code written by `AssistantAgent`. We further create a planning agent for the assistant agent to consult. The planning agent is a variation of the LLM-based `AssistantAgent` with a different system message.\n", "\n", @@ -47,7 +47,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file. It first looks for an environment variable with a specified name. The value of the environment variable needs to be a valid json string. If that variable is not found, it looks for a json file with the same name. It filters the configs by filter_dict.\n", + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file. It first looks for an environment variable with a specified name. The value of the environment variable needs to be a valid json string. If that variable is not found, it looks for a json file with the same name. It filters the configs by filter_dict.\n", "\n", "It's OK to have only the OpenAI API key, or only the Azure OpenAI API key + base.\n" ] diff --git a/notebook/agentchat_stream.ipynb b/notebook/agentchat_stream.ipynb index fbb8bcab08..b147350531 100644 --- a/notebook/agentchat_stream.ipynb +++ b/notebook/agentchat_stream.ipynb @@ -12,7 +12,7 @@ "# Interactive LLM Agent Dealing with Data Stream\n", "\n", "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", - "Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "In this notebook, we demonstrate how to use customized agents to continuously acquire news from the web and ask for investment suggestions.\n", "\n", @@ -47,7 +47,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n" + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n" ] }, { diff --git a/notebook/agentchat_surfer.ipynb b/notebook/agentchat_surfer.ipynb index 2fe6fda882..fbd10c6803 100644 --- a/notebook/agentchat_surfer.ipynb +++ b/notebook/agentchat_surfer.ipynb @@ -35,7 +35,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n", + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n", "\n", "It first looks for environment variable \"OAI_CONFIG_LIST\" which needs to be a valid json string. If that variable is not found, it then looks for a json file named \"OAI_CONFIG_LIST\". It filters the configs by models (you can filter by other keys as well).\n", "\n", diff --git a/notebook/agentchat_swarm.ipynb b/notebook/agentchat_swarm.ipynb index 56ac6f30de..c7f7ea7554 100644 --- a/notebook/agentchat_swarm.ipynb +++ b/notebook/agentchat_swarm.ipynb @@ -34,7 +34,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." ] }, { @@ -229,7 +229,7 @@ "metadata": {}, "source": [ "> With AutoGen, you don't need to write schemas for functions. You can add decorators to the functions to register a function schema to an LLM-based agent, where the schema is automatically generated.\n", - "See more details in this [doc](https://ag2ai.github.io/autogen/docs/tutorial/tool-use)" + "See more details in this [doc](https://ag2ai.github.io/ag2/docs/tutorial/tool-use)" ] }, { diff --git a/notebook/agentchat_teachability.ipynb b/notebook/agentchat_teachability.ipynb index 338dd5971d..aaff864ce9 100644 --- a/notebook/agentchat_teachability.ipynb +++ b/notebook/agentchat_teachability.ipynb @@ -37,7 +37,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." ] }, { diff --git a/notebook/agentchat_teachable_oai_assistants.ipynb b/notebook/agentchat_teachable_oai_assistants.ipynb index b43dadea25..296d2a130e 100644 --- a/notebook/agentchat_teachable_oai_assistants.ipynb +++ b/notebook/agentchat_teachable_oai_assistants.ipynb @@ -14,7 +14,7 @@ "In making decisions about memo storage and retrieval, `Teachability` calls an instance of `TextAnalyzerAgent` to analyze pieces of text in several different ways. This adds extra LLM calls involving a relatively small number of tokens. These calls can add a few seconds to the time a user waits for a response.\n", "\n", "This notebook demonstrates how `Teachability` can be added to instances of `GPTAssistantAgent`\n", - "so that they can learn facts, preferences, and skills from users. As explained [here](https://ag2ai.github.io/autogen/docs/topics/openai-assistant/gpt_assistant_agent), each instance of `GPTAssistantAgent` wraps an OpenAI Assistant that can be given a set of tools including functions, code interpreter, and retrieval. Assistants with these tools are demonstrated in separate standalone sections below, which can be run independently.\n", + "so that they can learn facts, preferences, and skills from users. As explained [here](https://ag2ai.github.io/ag2/docs/topics/openai-assistant/gpt_assistant_agent), each instance of `GPTAssistantAgent` wraps an OpenAI Assistant that can be given a set of tools including functions, code interpreter, and retrieval. Assistants with these tools are demonstrated in separate standalone sections below, which can be run independently.\n", "\n", "## Requirements\n", "\n", @@ -41,7 +41,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." ] }, { diff --git a/notebook/agentchat_teaching.ipynb b/notebook/agentchat_teaching.ipynb index 8a42616e51..f3096ea350 100644 --- a/notebook/agentchat_teaching.ipynb +++ b/notebook/agentchat_teaching.ipynb @@ -10,7 +10,7 @@ "TODO: Implement advanced teachability based on this example.\n", "\n", "AutoGen offers conversable agents powered by LLMs, tools, or humans, which can be used to perform tasks collectively via automated chat. This framework makes it easy to build many advanced applications of LLMs.\n", - "Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "This notebook demonstrates how AutoGen enables a user to teach AI new skills via natural agent interactions, without requiring knowledge of programming language. It is modified based on https://github.com/ag2ai/FLAML/blob/evaluation/notebook/research_paper/teaching.ipynb and https://github.com/ag2ai/FLAML/blob/evaluation/notebook/research_paper/teaching_recipe_reuse.ipynb.\n", "\n", diff --git a/notebook/agentchat_two_users.ipynb b/notebook/agentchat_two_users.ipynb index 5886c2f8b8..1621cfc6b5 100644 --- a/notebook/agentchat_two_users.ipynb +++ b/notebook/agentchat_two_users.ipynb @@ -11,7 +11,7 @@ "source": [ "# Auto Generated Agent Chat: Collaborative Task Solving with Multiple Agents and Human Users\n", "\n", - "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation. Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation. Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "In this notebook, we demonstrate an application involving multiple agents and human users to work together and accomplish a task. `AssistantAgent` is an LLM-based agent that can write Python code (in a Python coding block) for a user to execute for a given task. `UserProxyAgent` is an agent which serves as a proxy for a user to execute the code written by `AssistantAgent`. We create multiple `UserProxyAgent` instances that can represent different human users.\n", "\n", @@ -46,7 +46,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n", + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n", "\n", "It first looks for an environment variable of a specified name (\"OAI_CONFIG_LIST\" in this example), which needs to be a valid json string. If that variable is not found, it looks for a json file with the same name. It filters the configs by models (you can filter by other keys as well).\n", "\n", diff --git a/notebook/agentchat_web_info.ipynb b/notebook/agentchat_web_info.ipynb index bb9dfe037e..4232a70e7d 100644 --- a/notebook/agentchat_web_info.ipynb +++ b/notebook/agentchat_web_info.ipynb @@ -12,7 +12,7 @@ "# Auto Generated Agent Chat: Solving Tasks Requiring Web Info\n", "\n", "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", - "Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to perform tasks which require acquiring info from the web:\n", "* discuss a paper based on its URL.\n", @@ -51,7 +51,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n" + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file.\n" ] }, { diff --git a/notebook/agentchat_websockets.ipynb b/notebook/agentchat_websockets.ipynb index 33aa1a7c01..f2119d3bd4 100644 --- a/notebook/agentchat_websockets.ipynb +++ b/notebook/agentchat_websockets.ipynb @@ -8,16 +8,16 @@ "source": [ "# Websockets: Streaming input and output using websockets\n", "\n", - "This notebook demonstrates how to use the [`IOStream`](https://ag2ai.github.io/autogen/docs/reference/io/base/IOStream) class to stream both input and output using websockets. The use of websockets allows you to build web clients that are more responsive than the one using web methods. The main difference is that the webosockets allows you to push data while you need to poll the server for new response using web mothods.\n", + "This notebook demonstrates how to use the [`IOStream`](https://ag2ai.github.io/ag2/docs/reference/io/base/IOStream) class to stream both input and output using websockets. The use of websockets allows you to build web clients that are more responsive than the one using web methods. The main difference is that the webosockets allows you to push data while you need to poll the server for new response using web mothods.\n", "\n", "\n", - "In this guide, we explore the capabilities of the [`IOStream`](https://ag2ai.github.io/autogen/docs/reference/io/base/IOStream) class. It is specifically designed to enhance the development of clients such as web clients which use websockets for streaming both input and output. The [`IOStream`](https://ag2ai.github.io/autogen/docs/reference/io/base/IOStream) stands out by enabling a more dynamic and interactive user experience for web applications.\n", + "In this guide, we explore the capabilities of the [`IOStream`](https://ag2ai.github.io/ag2/docs/reference/io/base/IOStream) class. It is specifically designed to enhance the development of clients such as web clients which use websockets for streaming both input and output. The [`IOStream`](https://ag2ai.github.io/ag2/docs/reference/io/base/IOStream) stands out by enabling a more dynamic and interactive user experience for web applications.\n", "\n", "Websockets technology is at the core of this functionality, offering a significant advancement over traditional web methods by allowing data to be \"pushed\" to the client in real-time. This is a departure from the conventional approach where clients must repeatedly \"poll\" the server to check for any new responses. By employing the underlining [websockets](https://websockets.readthedocs.io/) library, the IOStream class facilitates a continuous, two-way communication channel between the server and client. This ensures that updates are received instantly, without the need for constant polling, thereby making web clients more efficient and responsive.\n", "\n", - "The real power of websockets, leveraged through the [`IOStream`](https://ag2ai.github.io/autogen/docs/reference/io/base/IOStream) class, lies in its ability to create highly responsive web clients. This responsiveness is critical for applications requiring real-time data updates such as chat applications. By integrating the [`IOStream`](https://ag2ai.github.io/autogen/docs/reference/io/base/IOStream) class into your web application, you not only enhance user experience through immediate data transmission but also reduce the load on your server by eliminating unnecessary polling.\n", + "The real power of websockets, leveraged through the [`IOStream`](https://ag2ai.github.io/ag2/docs/reference/io/base/IOStream) class, lies in its ability to create highly responsive web clients. This responsiveness is critical for applications requiring real-time data updates such as chat applications. By integrating the [`IOStream`](https://ag2ai.github.io/ag2/docs/reference/io/base/IOStream) class into your web application, you not only enhance user experience through immediate data transmission but also reduce the load on your server by eliminating unnecessary polling.\n", "\n", - "In essence, the transition to using websockets through the [`IOStream`](https://ag2ai.github.io/autogen/docs/reference/io/base/IOStream) class marks a significant enhancement in web client development. This approach not only streamlines the data exchange process between clients and servers but also opens up new possibilities for creating more interactive and engaging web applications. By following this guide, developers can harness the full potential of websockets and the [`IOStream`](https://ag2ai.github.io/autogen/docs/reference/io/base/IOStream) class to push the boundaries of what is possible with web client responsiveness and interactivity.\n", + "In essence, the transition to using websockets through the [`IOStream`](https://ag2ai.github.io/ag2/docs/reference/io/base/IOStream) class marks a significant enhancement in web client development. This approach not only streamlines the data exchange process between clients and servers but also opens up new possibilities for creating more interactive and engaging web applications. By following this guide, developers can harness the full potential of websockets and the [`IOStream`](https://ag2ai.github.io/ag2/docs/reference/io/base/IOStream) class to push the boundaries of what is possible with web client responsiveness and interactivity.\n", "\n", "## Requirements\n", "\n", @@ -42,7 +42,7 @@ "source": [ "## Set your API Endpoint\n", "\n", - "The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." + "The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." ] }, { @@ -92,7 +92,7 @@ "An `on_connect` function is a crucial part of applications that utilize websockets, acting as an event handler that is called whenever a new client connection is established. This function is designed to initiate any necessary setup, communication protocols, or data exchange procedures specific to the newly connected client. Essentially, it lays the groundwork for the interactive session that follows, configuring how the server and the client will communicate and what initial actions are to be taken once a connection is made. Now, let's delve into the details of how to define this function, especially in the context of using the AutoGen framework with websockets.\n", "\n", "\n", - "Upon a client's connection to the websocket server, the server automatically initiates a new instance of the [`IOWebsockets`](https://ag2ai.github.io/autogen/docs/reference/io/websockets/IOWebsockets) class. This instance is crucial for managing the data flow between the server and the client. The `on_connect` function leverages this instance to set up the communication protocol, define interaction rules, and initiate any preliminary data exchanges or configurations required for the client-server interaction to proceed smoothly.\n" + "Upon a client's connection to the websocket server, the server automatically initiates a new instance of the [`IOWebsockets`](https://ag2ai.github.io/ag2/docs/reference/io/websockets/IOWebsockets) class. This instance is crucial for managing the data flow between the server and the client. The `on_connect` function leverages this instance to set up the communication protocol, define interaction rules, and initiate any preliminary data exchanges or configurations required for the client-server interaction to proceed smoothly.\n" ] }, { diff --git a/notebook/autobuild_basic.ipynb b/notebook/autobuild_basic.ipynb index 42d39532d3..91a2829019 100644 --- a/notebook/autobuild_basic.ipynb +++ b/notebook/autobuild_basic.ipynb @@ -12,7 +12,7 @@ "Reference: [Agent AutoBuild](https://ag2ai.github.io/autogen/blog/2023/11/26/Agent-AutoBuild/)\n", "\n", "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", - "Please find documentation about this feature [here](https://ag2ai.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "Please find documentation about this feature [here](https://ag2ai.github.io/ag2/docs/Use-Cases/agent_chat).\n", "\n", "In this notebook, we introduce a new class, `AgentBuilder`, to help user build an automatic task solving process powered by multi-agent system. Specifically, in `build()`, we prompt a LLM to create multiple participant agent and initialize a group chat, and specify whether this task need programming to solve. AgentBuilder also support open-source LLMs by [vLLM](https://docs.vllm.ai/en/latest/index.html) and [Fastchat](https://github.com/lm-sys/FastChat). Check the supported model list [here](https://docs.vllm.ai/en/latest/models/supported_models.html)." ] diff --git a/notebook/autogen_uniformed_api_calling.ipynb b/notebook/autogen_uniformed_api_calling.ipynb index 4521674153..58175b31af 100644 --- a/notebook/autogen_uniformed_api_calling.ipynb +++ b/notebook/autogen_uniformed_api_calling.ipynb @@ -7,7 +7,7 @@ "# A Uniform interface to call different LLMs\n", "\n", "Autogen provides a uniform interface for API calls to different LLMs, and creating LLM agents from them.\n", - "Through setting up a configuration file, you can easily switch between different LLMs by just changing the model name, while enjoying all the [enhanced features](https://ag2ai.github.io/autogen/docs/topics/llm-caching) such as [caching](https://ag2ai.github.io/autogen/docs/Use-Cases/enhanced_inference/#usage-summary) and [cost calculation](https://ag2ai.github.io/autogen/docs/Use-Cases/enhanced_inference/#usage-summary)!\n", + "Through setting up a configuration file, you can easily switch between different LLMs by just changing the model name, while enjoying all the [enhanced features](https://ag2ai.github.io/ag2/docs/topics/llm-caching) such as [caching](https://ag2ai.github.io/ag2/docs/Use-Cases/enhanced_inference/#usage-summary) and [cost calculation](https://ag2ai.github.io/ag2/docs/Use-Cases/enhanced_inference/#usage-summary)!\n", "\n", "In this notebook, we will show you how to use AutoGen to call different LLMs and create LLM agents from them.\n", "\n", diff --git a/notebook/config_loader_utility_functions.ipynb b/notebook/config_loader_utility_functions.ipynb index 5bd6004bec..deb0b1c1b6 100644 --- a/notebook/config_loader_utility_functions.ipynb +++ b/notebook/config_loader_utility_functions.ipynb @@ -6,7 +6,7 @@ "source": [ "# Config loader utility functions\n", "\n", - "For an introduction to configuring LLMs, refer to the [main configuration docs](https://ag2ai.github.io/autogen/docs/topics/llm_configuration). This guide will run through examples of the more advanced utility functions for managing API configurations.\n", + "For an introduction to configuring LLMs, refer to the [main configuration docs](https://ag2ai.github.io/ag2/docs/topics/llm_configuration). This guide will run through examples of the more advanced utility functions for managing API configurations.\n", "\n", "Managing API configurations can be tricky, especially when dealing with multiple models and API versions. The provided utility functions assist users in managing these configurations effectively. Ensure your API keys and other sensitive data are stored securely. You might store keys in `.txt` or `.env` files or environment variables for local development. Never expose your API keys publicly. If you insist on storing your key files locally on your repo (you shouldn't), ensure the key file path is added to the `.gitignore` file.\n", "\n", diff --git a/notebook/oai_chatgpt_gpt4.ipynb b/notebook/oai_chatgpt_gpt4.ipynb index 3d3e65f45d..2e91ab005b 100644 --- a/notebook/oai_chatgpt_gpt4.ipynb +++ b/notebook/oai_chatgpt_gpt4.ipynb @@ -98,7 +98,7 @@ "source": [ "### Set your API Endpoint\n", "\n", - "The [`config_list_openai_aoai`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_openai_aoai) function tries to create a list of Azure OpenAI endpoints and OpenAI endpoints. It assumes the api keys and api bases are stored in the corresponding environment variables or local txt files:\n", + "The [`config_list_openai_aoai`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_openai_aoai) function tries to create a list of Azure OpenAI endpoints and OpenAI endpoints. It assumes the api keys and api bases are stored in the corresponding environment variables or local txt files:\n", "\n", "- OpenAI API key: os.environ[\"OPENAI_API_KEY\"] or `openai_api_key_file=\"key_openai.txt\"`.\n", "- Azure OpenAI API key: os.environ[\"AZURE_OPENAI_API_KEY\"] or `aoai_api_key_file=\"key_aoai.txt\"`. Multiple keys can be stored, one per line.\n", diff --git a/notebook/oai_completion.ipynb b/notebook/oai_completion.ipynb index 3fabbca5e2..0ad433da9e 100644 --- a/notebook/oai_completion.ipynb +++ b/notebook/oai_completion.ipynb @@ -64,11 +64,11 @@ "source": [ "## Set your API Endpoint\n", "\n", - "* The [`config_list_openai_aoai`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_openai_aoai) function tries to create a list of configurations using Azure OpenAI endpoints and OpenAI endpoints. It assumes the api keys and api bases are stored in the corresponding environment variables or local txt files:\n", + "* The [`config_list_openai_aoai`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_openai_aoai) function tries to create a list of configurations using Azure OpenAI endpoints and OpenAI endpoints. It assumes the api keys and api bases are stored in the corresponding environment variables or local txt files:\n", " - OpenAI API key: os.environ[\"OPENAI_API_KEY\"] or `openai_api_key_file=\"key_openai.txt\"`.\n", " - Azure OpenAI API key: os.environ[\"AZURE_OPENAI_API_KEY\"] or `aoai_api_key_file=\"key_aoai.txt\"`. Multiple keys can be stored, one per line.\n", " - Azure OpenAI API base: os.environ[\"AZURE_OPENAI_API_BASE\"] or `aoai_api_base_file=\"base_aoai.txt\"`. Multiple bases can be stored, one per line.\n", - "* The [`config_list_from_json`](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file. It first looks for the environment variable `env_or_file`, which must be a valid json string. If that variable is not found, it looks for a json file with the same name. It filters the configs by filter_dict.\n", + "* The [`config_list_from_json`](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file. It first looks for the environment variable `env_or_file`, which must be a valid json string. If that variable is not found, it looks for a json file with the same name. It filters the configs by filter_dict.\n", "\n", "It's OK to have only the OpenAI API key, or only the Azure OpenAI API key + base. If you open this notebook in colab, you can upload your files by clicking the file icon on the left panel and then choosing \"upload file\" icon.\n" ] diff --git a/test/agentchat/contrib/capabilities/chat_with_teachable_agent.py b/test/agentchat/contrib/capabilities/chat_with_teachable_agent.py index 58a3a38a4e..8dc0e2256e 100755 --- a/test/agentchat/contrib/capabilities/chat_with_teachable_agent.py +++ b/test/agentchat/contrib/capabilities/chat_with_teachable_agent.py @@ -28,7 +28,7 @@ def create_teachable_agent(reset_db=False): """Instantiates a teachable agent using the settings from the top of this file.""" # Load LLM inference endpoints from an env variable or a file - # See https://ag2ai.github.io/autogen/docs/FAQ#set-your-api-endpoints + # See https://ag2ai.github.io/ag2/docs/FAQ#set-your-api-endpoints # and OAI_CONFIG_LIST_sample config_list = config_list_from_json(env_or_file=OAI_CONFIG_LIST, filter_dict=filter_dict, file_location=KEY_LOC) diff --git a/test/agentchat/contrib/capabilities/test_teachable_agent.py b/test/agentchat/contrib/capabilities/test_teachable_agent.py index 6f705d6537..ade6aa1e7f 100755 --- a/test/agentchat/contrib/capabilities/test_teachable_agent.py +++ b/test/agentchat/contrib/capabilities/test_teachable_agent.py @@ -40,7 +40,7 @@ def create_teachable_agent(reset_db=False, verbosity=0): """Instantiates a teachable agent using the settings from the top of this file.""" # Load LLM inference endpoints from an env variable or a file - # See https://ag2ai.github.io/autogen/docs/FAQ#set-your-api-endpoints + # See https://ag2ai.github.io/ag2/docs/FAQ#set-your-api-endpoints # and OAI_CONFIG_LIST_sample config_list = config_list_from_json(env_or_file=OAI_CONFIG_LIST, filter_dict=filter_dict, file_location=KEY_LOC) diff --git a/test/twoagent.py b/test/twoagent.py index 75883ba946..1d3d80c857 100644 --- a/test/twoagent.py +++ b/test/twoagent.py @@ -7,7 +7,7 @@ from autogen import AssistantAgent, UserProxyAgent, config_list_from_json # Load LLM inference endpoints from an env variable or a file -# See https://ag2ai.github.io/autogen/docs/FAQ#set-your-api-endpoints +# See https://ag2ai.github.io/ag2/docs/FAQ#set-your-api-endpoints # and OAI_CONFIG_LIST_sample config_list = config_list_from_json(env_or_file="OAI_CONFIG_LIST") assistant = AssistantAgent("assistant", llm_config={"config_list": config_list}) diff --git a/website/blog/2023-10-18-RetrieveChat/index.mdx b/website/blog/2023-10-18-RetrieveChat/index.mdx index 4396511c93..c264f05779 100644 --- a/website/blog/2023-10-18-RetrieveChat/index.mdx +++ b/website/blog/2023-10-18-RetrieveChat/index.mdx @@ -82,7 +82,7 @@ from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProx 2. Create an 'AssistantAgent' instance named "assistant" and an 'RetrieveUserProxyAgent' instance named "ragproxyagent" -Refer to the [doc](https://ag2ai.github.io/autogen/docs/reference/agentchat/contrib/retrieve_user_proxy_agent) +Refer to the [doc](https://ag2ai.github.io/ag2/docs/reference/agentchat/contrib/retrieve_user_proxy_agent) for more information on the detailed configurations. ```python diff --git a/website/blog/2023-10-26-TeachableAgent/index.mdx b/website/blog/2023-10-26-TeachableAgent/index.mdx index bd9ac6d656..c567d5e41f 100644 --- a/website/blog/2023-10-26-TeachableAgent/index.mdx +++ b/website/blog/2023-10-26-TeachableAgent/index.mdx @@ -54,7 +54,7 @@ from autogen import ConversableAgent # As an example ```python # Load LLM inference endpoints from an env variable or a file -# See https://ag2ai.github.io/autogen/docs/FAQ#set-your-api-endpoints +# See https://ag2ai.github.io/ag2/docs/FAQ#set-your-api-endpoints # and OAI_CONFIG_LIST_sample filter_dict = {"model": ["gpt-4"]} # GPT-3.5 is less reliable than GPT-4 at learning from user feedback. config_list = config_list_from_json(env_or_file="OAI_CONFIG_LIST", filter_dict=filter_dict) diff --git a/website/blog/2023-12-01-AutoGenStudio/index.mdx b/website/blog/2023-12-01-AutoGenStudio/index.mdx index 0ceb7edd8f..da74e6c947 100644 --- a/website/blog/2023-12-01-AutoGenStudio/index.mdx +++ b/website/blog/2023-12-01-AutoGenStudio/index.mdx @@ -26,9 +26,9 @@ To help you rapidly prototype multi-agent solutions for your tasks, we are intro - Publish your sessions to a local gallery. -See the official AutoGen Studio documentation [here](https://ag2ai.github.io/autogen/docs/autogen-studio/getting-started) for more details. +See the official AutoGen Studio documentation [here](https://ag2ai.github.io/ag2/docs/autogen-studio/getting-started) for more details. -AutoGen Studio is open source [code here](https://github.com/ag2ai/build-with-autogen/blob/main/samples/apps/autogen-studio), and can be installed via pip. Give it a try! +AutoGen Studio is open source [code here](https://github.com/ag2ai/build-with-ag2/blob/main/samples/apps/autogen-studio), and can be installed via pip. Give it a try! ```bash pip install autogenstudio @@ -48,7 +48,7 @@ The following guide will help you get AutoGen Studio up and running on your syst ### Configuring an LLM Provider -To get started, you need access to a language model. You can get this set up by following the steps in the AutoGen documentation [here](https://ag2ai.github.io/autogen/docs/FAQ#set-your-api-endpoints). Configure your environment with either `OPENAI_API_KEY` or `AZURE_OPENAI_API_KEY`. +To get started, you need access to a language model. You can get this set up by following the steps in the AutoGen documentation [here](https://ag2ai.github.io/ag2/docs/FAQ#set-your-api-endpoints). Configure your environment with either `OPENAI_API_KEY` or `AZURE_OPENAI_API_KEY`. For example, in your terminal, you would set the API key like this: @@ -104,7 +104,7 @@ There are two ways to install AutoGen Studio - from PyPi or from source. We **re yarn build ``` - For Windows users, to build the frontend, you may need alternative commands provided in the [autogen studio readme](https://github.com/ag2ai/build-with-autogen/blob/main/samples/apps/autogen-studio). + For Windows users, to build the frontend, you may need alternative commands provided in the [autogen studio readme](https://github.com/ag2ai/build-with-ag2/blob/main/samples/apps/autogen-studio). ### Running the Application @@ -168,7 +168,7 @@ AutoGen Studio comes with 3 example skills: `fetch_profile`, `find_papers`, `gen ## The AutoGen Studio API -While AutoGen Studio is a web interface, it is powered by an underlying python API that is reusable and modular. Importantly, we have implemented an API where agent workflows can be declaratively specified (in JSON), loaded and run. An example of the current API is shown below. Please consult the [AutoGen Studio repo](https://github.com/ag2ai/build-with-autogen/blob/main/samples/apps/autogen-studio) for more details. +While AutoGen Studio is a web interface, it is powered by an underlying python API that is reusable and modular. Importantly, we have implemented an API where agent workflows can be declaratively specified (in JSON), loaded and run. An example of the current API is shown below. Please consult the [AutoGen Studio repo](https://github.com/ag2ai/build-with-ag2/blob/main/samples/apps/autogen-studio) for more details. ```python import json @@ -219,7 +219,7 @@ A: To reset your conversation history, you can delete the `database.sqlite` file A: Yes, you can view the generated messages in the debug console of the web UI, providing insights into the agent interactions. Alternatively, you can inspect the `database.sqlite` file for a comprehensive record of messages. **Q: Where can I find documentation and support for AutoGen Studio?** -A: We are constantly working to improve AutoGen Studio. For the latest updates, please refer to the [AutoGen Studio Readme](https://github.com/ag2ai/build-with-autogen/blob/main/samples/apps/autogen-studio). For additional support, please open an issue on [GitHub](https://github.com/ag2ai/ag2) or ask questions on [Discord](https://aka.ms/autogen-dc). +A: We are constantly working to improve AutoGen Studio. For the latest updates, please refer to the [AutoGen Studio Readme](https://github.com/ag2ai/build-with-ag2/blob/main/samples/apps/autogen-studio). For additional support, please open an issue on [GitHub](https://github.com/ag2ai/ag2) or ask questions on [Discord](https://aka.ms/autogen-dc). **Q: Can I use Other Models with AutoGen Studio?** Yes. AutoGen standardizes on the openai model api format, and you can use any api server that offers an openai compliant endpoint. In the AutoGen Studio UI, each agent has an `llm_config` field where you can input your model endpoint details including `model name`, `api key`, `base url`, `model type` and `api version`. For Azure OpenAI models, you can find these details in the Azure portal. Note that for Azure OpenAI, the `model name` is the deployment id or engine, and the `model type` is "azure". diff --git a/website/blog/2023-12-23-AgentOptimizer/index.mdx b/website/blog/2023-12-23-AgentOptimizer/index.mdx index a48ee28e94..23c27bce42 100644 --- a/website/blog/2023-12-23-AgentOptimizer/index.mdx +++ b/website/blog/2023-12-23-AgentOptimizer/index.mdx @@ -36,7 +36,7 @@ It contains three main methods: This method records the conversation history and performance of the agents in solving one problem. It includes two inputs: conversation_history (List[Dict]) and is_satisfied (bool). -conversation_history is a list of dictionaries which could be got from chat_messages_for_summary in the [AgentChat](https://ag2ai.github.io/autogen/docs/reference/agentchat/agentchat/) class. +conversation_history is a list of dictionaries which could be got from chat_messages_for_summary in the [AgentChat](https://ag2ai.github.io/ag2/docs/reference/agentchat/agentchat/) class. is_satisfied is a bool value that represents whether the user is satisfied with the solution. If it is none, the user will be asked to input the satisfaction. Example: diff --git a/website/blog/2023-12-29-AgentDescriptions/index.mdx b/website/blog/2023-12-29-AgentDescriptions/index.mdx index 301026e927..21cf6571f2 100644 --- a/website/blog/2023-12-29-AgentDescriptions/index.mdx +++ b/website/blog/2023-12-29-AgentDescriptions/index.mdx @@ -8,7 +8,7 @@ tags: [AutoGen] ## TL;DR -AutoGen 0.2.2 introduces a [description](https://ag2ai.github.io/autogen/docs/reference/agentchat/conversable_agent#__init__) field to ConversableAgent (and all subclasses), and changes GroupChat so that it uses agent `description`s rather than `system_message`s when choosing which agents should speak next. +AutoGen 0.2.2 introduces a [description](https://ag2ai.github.io/ag2/docs/reference/agentchat/conversable_agent#__init__) field to ConversableAgent (and all subclasses), and changes GroupChat so that it uses agent `description`s rather than `system_message`s when choosing which agents should speak next. This is expected to simplify GroupChat’s job, improve orchestration, and make it easier to implement new GroupChat or GroupChat-like alternatives. @@ -18,9 +18,9 @@ However, if you were struggling with getting GroupChat to work, you can now try ## Introduction -As AutoGen matures and developers build increasingly complex combinations of agents, orchestration is becoming an important capability. At present, [GroupChat](https://ag2ai.github.io/autogen/docs/reference/agentchat/groupchat#groupchat-objects) and the [GroupChatManager](https://ag2ai.github.io/autogen/docs/reference/agentchat/groupchat#groupchatmanager-objects) are the main built-in tools for orchestrating conversations between 3 or more agents. For orchestrators like GroupChat to work well, they need to know something about each agent so that they can decide who should speak and when. Prior to AutoGen 0.2.2, GroupChat relied on each agent's `system_message` and `name` to learn about each participating agent. This is likely fine when the system prompt is short and sweet, but can lead to problems when the instructions are very long (e.g., with the [AssistantAgent](https://ag2ai.github.io/autogen/docs/reference/agentchat/assistant_agent)), or non-existent (e.g., with the [UserProxyAgent](https://ag2ai.github.io/autogen/docs/reference/agentchat/user_proxy_agent)). +As AutoGen matures and developers build increasingly complex combinations of agents, orchestration is becoming an important capability. At present, [GroupChat](https://ag2ai.github.io/ag2/docs/reference/agentchat/groupchat#groupchat-objects) and the [GroupChatManager](https://ag2ai.github.io/ag2/docs/reference/agentchat/groupchat#groupchatmanager-objects) are the main built-in tools for orchestrating conversations between 3 or more agents. For orchestrators like GroupChat to work well, they need to know something about each agent so that they can decide who should speak and when. Prior to AutoGen 0.2.2, GroupChat relied on each agent's `system_message` and `name` to learn about each participating agent. This is likely fine when the system prompt is short and sweet, but can lead to problems when the instructions are very long (e.g., with the [AssistantAgent](https://ag2ai.github.io/ag2/docs/reference/agentchat/assistant_agent)), or non-existent (e.g., with the [UserProxyAgent](https://ag2ai.github.io/ag2/docs/reference/agentchat/user_proxy_agent)). -AutoGen 0.2.2 introduces a [description](https://ag2ai.github.io/autogen/docs/reference/agentchat/conversable_agent#__init__) field to all agents, and replaces the use of the `system_message` for orchestration in GroupChat and all future orchestrators. The `description` field defaults to the `system_message` to ensure backwards compatibility, so you may not need to change anything with your code if things are working well for you. However, if you were struggling with GroupChat, give setting the `description` field a try. +AutoGen 0.2.2 introduces a [description](https://ag2ai.github.io/ag2/docs/reference/agentchat/conversable_agent#__init__) field to all agents, and replaces the use of the `system_message` for orchestration in GroupChat and all future orchestrators. The `description` field defaults to the `system_message` to ensure backwards compatibility, so you may not need to change anything with your code if things are working well for you. However, if you were struggling with GroupChat, give setting the `description` field a try. The remainder of this post provides an example of how using the `description` field simplifies GroupChat's job, provides some evidence of its effectiveness, and provides tips for writing good descriptions. diff --git a/website/blog/2024-01-23-Code-execution-in-docker/index.mdx b/website/blog/2024-01-23-Code-execution-in-docker/index.mdx index d2582ac8e6..1965268e23 100644 --- a/website/blog/2024-01-23-Code-execution-in-docker/index.mdx +++ b/website/blog/2024-01-23-Code-execution-in-docker/index.mdx @@ -55,8 +55,8 @@ user_proxy = autogen.UserProxyAgent(name="user_proxy", llm_config=llm_config, ## Related documentation -- [Code execution with docker](https://ag2ai.github.io/autogen/docs/Installation#code-execution-with-docker-default) -- [How to disable code execution in docker](https://ag2ai.github.io/autogen/docs/FAQ#agents-are-throwing-due-to-docker-not-running-how-can-i-resolve-this) +- [Code execution with docker](https://ag2ai.github.io/ag2/docs/Installation#code-execution-with-docker-default) +- [How to disable code execution in docker](https://ag2ai.github.io/ag2/docs/FAQ#agents-are-throwing-due-to-docker-not-running-how-can-i-resolve-this) ## Conclusion diff --git a/website/blog/2024-01-25-AutoGenBench/index.mdx b/website/blog/2024-01-25-AutoGenBench/index.mdx index 1f2c2209c0..c148dedfa5 100644 --- a/website/blog/2024-01-25-AutoGenBench/index.mdx +++ b/website/blog/2024-01-25-AutoGenBench/index.mdx @@ -21,8 +21,8 @@ Today we are releasing AutoGenBench - a tool for evaluating AutoGen agents and w AutoGenBench is a standalone command line tool, installable from PyPI, which handles downloading, configuring, running, and reporting supported benchmarks. AutoGenBench works best when run alongside Docker, since it uses Docker to isolate tests from one another. -- See the [AutoGenBench README](https://github.com/ag2ai/build-with-autogen/blob/main/samples/tools/autogenbench/README.md) for information on installation and running benchmarks. -- See the [AutoGenBench CONTRIBUTING guide](https://github.com/ag2ai/build-with-autogen/blob/main/samples/tools/autogenbench/CONTRIBUTING.md) for information on developing or contributing benchmark datasets. +- See the [AutoGenBench README](https://github.com/ag2ai/build-with-ag2/blob/main/samples/tools/autogenbench/README.md) for information on installation and running benchmarks. +- See the [AutoGenBench CONTRIBUTING guide](https://github.com/ag2ai/build-with-ag2/blob/main/samples/tools/autogenbench/CONTRIBUTING.md) for information on developing or contributing benchmark datasets. ### Quick Start @@ -125,7 +125,7 @@ Please do not cite these values in academic work without first inspecting and ve From this output we can see the results of the three separate repetitions of each task, and final summary statistics of each run. In this case, the results were generated via GPT-4 (as defined in the OAI_CONFIG_LIST that was provided), and used the `TwoAgents` template. **It is important to remember that AutoGenBench evaluates _specific_ end-to-end configurations of agents (as opposed to evaluating a model or cognitive framework more generally).** -Finally, complete execution traces and logs can be found in the `Results` folder. See the [AutoGenBench README](https://github.com/ag2ai/build-with-autogen/blob/main/samples/tools/autogenbench/README.md) for more details about command-line options and output formats. Each of these commands also offers extensive in-line help via: +Finally, complete execution traces and logs can be found in the `Results` folder. See the [AutoGenBench README](https://github.com/ag2ai/build-with-ag2/blob/main/samples/tools/autogenbench/README.md) for more details about command-line options and output formats. Each of these commands also offers extensive in-line help via: - `autogenbench --help` - `autogenbench clone --help` @@ -145,4 +145,4 @@ For an up to date tracking of our work items on this project, please see [AutoGe ## Call for Participation -Finally, we want to end this blog post with an open call for contributions. AutoGenBench is still nascent, and has much opportunity for improvement. New benchmarks are constantly being published, and will need to be added. Everyone may have their own distinct set of metrics that they care most about optimizing, and these metrics should be onboarded. To this end, we welcome any and all contributions to this corner of the AutoGen project. If contributing is something that interests you, please see the [contributor’s guide](https://github.com/ag2ai/build-with-autogen/blob/main/samples/tools/autogenbench/CONTRIBUTING.md) and join our [Discord](https://aka.ms/autogen-dc) discussion in the [#autogenbench](https://discord.com/channels/1153072414184452236/1199851779328847902) channel! +Finally, we want to end this blog post with an open call for contributions. AutoGenBench is still nascent, and has much opportunity for improvement. New benchmarks are constantly being published, and will need to be added. Everyone may have their own distinct set of metrics that they care most about optimizing, and these metrics should be onboarded. To this end, we welcome any and all contributions to this corner of the AutoGen project. If contributing is something that interests you, please see the [contributor’s guide](https://github.com/ag2ai/build-with-ag2/blob/main/samples/tools/autogenbench/CONTRIBUTING.md) and join our [Discord](https://aka.ms/autogen-dc) discussion in the [#autogenbench](https://discord.com/channels/1153072414184452236/1199851779328847902) channel! diff --git a/website/blog/2024-02-02-AutoAnny/index.mdx b/website/blog/2024-02-02-AutoAnny/index.mdx index 5debefa2e9..0968a1feec 100644 --- a/website/blog/2024-02-02-AutoAnny/index.mdx +++ b/website/blog/2024-02-02-AutoAnny/index.mdx @@ -16,7 +16,7 @@ import AutoAnnyLogo from './img/AutoAnnyLogo.jpg'; ## TL;DR We are adding a new sample app called Anny-- a simple Discord bot powered -by AutoGen that's intended to assist AutoGen Devs. See [`samples/apps/auto-anny`](https://github.com/ag2ai/build-with-autogen/tree/main/samples/apps/auto-anny) for details. +by AutoGen that's intended to assist AutoGen Devs. See [`samples/apps/auto-anny`](https://github.com/ag2ai/build-with-ag2/tree/main/samples/apps/auto-anny) for details. ## Introduction @@ -41,7 +41,7 @@ The current version of Anny is pretty simple -- it uses the Discord API and Auto For example, it supports commands like `/heyanny help` for command listing, `/heyanny ghstatus` for GitHub activity summary, `/heyanny ghgrowth` for GitHub repo growth indicators, and `/heyanny ghunattended` for listing unattended issues and PRs. Most of these commands use multiple AutoGen agents to accomplish these task. -To use Anny, please follow instructions in [`samples/apps/auto-anny`](https://github.com/ag2ai/build-with-autogen/tree/main/samples/apps/auto-anny). +To use Anny, please follow instructions in [`samples/apps/auto-anny`](https://github.com/ag2ai/build-with-ag2/tree/main/samples/apps/auto-anny). ## It's Not Just for AutoGen If you're an open-source developer managing your own project, you can probably relate to our challenges. We invite you to check out Anny and contribute to its development and roadmap. diff --git a/website/blog/2024-02-11-FSM-GroupChat/index.mdx b/website/blog/2024-02-11-FSM-GroupChat/index.mdx index 86ecba25b2..91f9ca429e 100644 --- a/website/blog/2024-02-11-FSM-GroupChat/index.mdx +++ b/website/blog/2024-02-11-FSM-GroupChat/index.mdx @@ -285,4 +285,4 @@ pip install autogen[graph] ``` ## Notebook examples -More examples can be found in the [notebook](https://ag2ai.github.io/autogen/docs/notebooks/agentchat_groupchat_finite_state_machine/). The notebook includes more examples of possible transition paths such as (1) hub and spoke, (2) sequential team operations, and (3) think aloud and debate. It also uses the function `visualize_speaker_transitions_dict` from `autogen.graph_utils` to visualize the various graphs. +More examples can be found in the [notebook](https://ag2ai.github.io/ag2/docs/notebooks/agentchat_groupchat_finite_state_machine/). The notebook includes more examples of possible transition paths such as (1) hub and spoke, (2) sequential team operations, and (3) think aloud and debate. It also uses the function `visualize_speaker_transitions_dict` from `autogen.graph_utils` to visualize the various graphs. diff --git a/website/blog/2024-06-24-AltModels-Classes/index.mdx b/website/blog/2024-06-24-AltModels-Classes/index.mdx index aae3d2c531..e74a9ac2b1 100644 --- a/website/blog/2024-06-24-AltModels-Classes/index.mdx +++ b/website/blog/2024-06-24-AltModels-Classes/index.mdx @@ -48,7 +48,7 @@ AutoGen's ability to associate specific configurations to each agent means you c The common requirements of text generation and function/tool calling are supported by these client classes. -Multi-modal support, such as for image/audio/video, is an area of active development. The [Google Gemini](https://ag2ai.github.io/autogen/docs/topics/non-openai-models/cloud-gemini) client class can be +Multi-modal support, such as for image/audio/video, is an area of active development. The [Google Gemini](https://ag2ai.github.io/ag2/docs/topics/non-openai-models/cloud-gemini) client class can be used to create a multimodal agent. ## Tips @@ -58,9 +58,9 @@ Here are some tips when working with these client classes: - **Most to least capable** - start with larger models and get your workflow working, then iteratively try smaller models. - **Right model** - choose one that's suited to your task, whether it's coding, function calling, knowledge, or creative writing. - **Agent names** - these cloud providers do not use the `name` field on a message, so be sure to use your agent's name in their `system_message` and `description` fields, as well as instructing the LLM to 'act as' them. This is particularly important for "auto" speaker selection in group chats as we need to guide the LLM to choose the next agent based on a name, so tweak `select_speaker_message_template`, `select_speaker_prompt_template`, and `select_speaker_auto_multiple_template` with more guidance. -- **Context length** - as your conversation gets longer, models need to support larger context lengths, be mindful of what the model supports and consider using [Transform Messages](https://ag2ai.github.io/autogen/docs/topics/handling_long_contexts/intro_to_transform_messages) to manage context size. -- **Provider parameters** - providers have parameters you can set such as temperature, maximum tokens, top-k, top-p, and safety. See each client class in AutoGen's [API Reference](https://ag2ai.github.io/autogen/docs/reference/oai/gemini) or [documentation](https://ag2ai.github.io/autogen/docs/topics/non-openai-models/cloud-gemini) for details. -- **Prompts** - prompt engineering is critical in guiding smaller LLMs to do what you need. [ConversableAgent](https://ag2ai.github.io/autogen/docs/reference/agentchat/conversable_agent), [GroupChat](https://ag2ai.github.io/autogen/docs/reference/agentchat/groupchat), [UserProxyAgent](https://ag2ai.github.io/autogen/docs/reference/agentchat/user_proxy_agent), and [AssistantAgent](https://ag2ai.github.io/autogen/docs/reference/agentchat/assistant_agent) all have customizable prompt attributes that you can tailor. Here are some prompting tips from [Anthropic](https://docs.anthropic.com/en/docs/build-with-claude/prompt-engineering/overview)([+Library](https://docs.anthropic.com/en/prompt-library/library)), [Mistral AI](https://docs.mistral.ai/guides/prompting_capabilities/), [Together.AI](https://docs.together.ai/docs/examples), and [Meta](https://llama.meta.com/docs/how-to-guides/prompting/). +- **Context length** - as your conversation gets longer, models need to support larger context lengths, be mindful of what the model supports and consider using [Transform Messages](https://ag2ai.github.io/ag2/docs/topics/handling_long_contexts/intro_to_transform_messages) to manage context size. +- **Provider parameters** - providers have parameters you can set such as temperature, maximum tokens, top-k, top-p, and safety. See each client class in AutoGen's [API Reference](https://ag2ai.github.io/ag2/docs/reference/oai/gemini) or [documentation](https://ag2ai.github.io/ag2/docs/topics/non-openai-models/cloud-gemini) for details. +- **Prompts** - prompt engineering is critical in guiding smaller LLMs to do what you need. [ConversableAgent](https://ag2ai.github.io/ag2/docs/reference/agentchat/conversable_agent), [GroupChat](https://ag2ai.github.io/ag2/docs/reference/agentchat/groupchat), [UserProxyAgent](https://ag2ai.github.io/ag2/docs/reference/agentchat/user_proxy_agent), and [AssistantAgent](https://ag2ai.github.io/ag2/docs/reference/agentchat/assistant_agent) all have customizable prompt attributes that you can tailor. Here are some prompting tips from [Anthropic](https://docs.anthropic.com/en/docs/build-with-claude/prompt-engineering/overview)([+Library](https://docs.anthropic.com/en/prompt-library/library)), [Mistral AI](https://docs.mistral.ai/guides/prompting_capabilities/), [Together.AI](https://docs.together.ai/docs/examples), and [Meta](https://llama.meta.com/docs/how-to-guides/prompting/). - **Help!** - reach out on the AutoGen [Discord](https://discord.gg/pAbnFJrkgZ) or [log an issue](https://github.com/microsoft/autogen/issues) if you need help with or can help improve these client classes. Now it's time to try them out. @@ -109,7 +109,7 @@ Add your model configurations to the `OAI_CONFIG_LIST`. Ensure you specify the ` ### Usage -The `[config_list_from_json](https://ag2ai.github.io/autogen/docs/reference/oai/openai_utils/#config_list_from_json)` function loads a list of configurations from an environment variable or a json file. +The `[config_list_from_json](https://ag2ai.github.io/ag2/docs/reference/oai/openai_utils/#config_list_from_json)` function loads a list of configurations from an environment variable or a json file. ```py import autogen @@ -150,7 +150,7 @@ user_proxy.intiate_chat(assistant, message="Write python code to print Hello Wor ``` -**NOTE: To integrate this setup into GroupChat, follow the [tutorial](https://ag2ai.github.io/autogen/docs/notebooks/agentchat_groupchat) with the same config as above.** +**NOTE: To integrate this setup into GroupChat, follow the [tutorial](https://ag2ai.github.io/ag2/docs/notebooks/agentchat_groupchat) with the same config as above.** ## Function Calls @@ -390,4 +390,4 @@ So we can see how Anthropic's Sonnet is able to suggest multiple tools in a sing ## More tips and tricks -For an interesting chess game between Anthropic's Sonnet and Mistral's Mixtral, we've put together a sample notebook that highlights some of the tips and tricks for working with non-OpenAI LLMs. [See the notebook here](https://ag2ai.github.io/autogen/docs/notebooks/agentchat_nested_chats_chess_altmodels). +For an interesting chess game between Anthropic's Sonnet and Mistral's Mixtral, we've put together a sample notebook that highlights some of the tips and tricks for working with non-OpenAI LLMs. [See the notebook here](https://ag2ai.github.io/ag2/docs/notebooks/agentchat_nested_chats_chess_altmodels). diff --git a/website/blog/2024-07-25-AgentOps/index.mdx b/website/blog/2024-07-25-AgentOps/index.mdx index 2a2fc122b7..4ea260c288 100644 --- a/website/blog/2024-07-25-AgentOps/index.mdx +++ b/website/blog/2024-07-25-AgentOps/index.mdx @@ -28,7 +28,7 @@ Agent observability, in its most basic form, allows you to monitor, troubleshoot ## Why AgentOps? -AutoGen has simplified the process of building agents, yet we recognized the need for an easy-to-use, native tool for observability. We've previously discussed AgentOps, and now we're excited to partner with AgentOps as our official agent observability tool. Integrating AgentOps with AutoGen simplifies your workflow and boosts your agents' performance through clear observability, ensuring they operate optimally. For more details, check out our [AgentOps documentation](https://ag2ai.github.io/autogen/docs/notebooks/agentchat_agentops/). +AutoGen has simplified the process of building agents, yet we recognized the need for an easy-to-use, native tool for observability. We've previously discussed AgentOps, and now we're excited to partner with AgentOps as our official agent observability tool. Integrating AgentOps with AutoGen simplifies your workflow and boosts your agents' performance through clear observability, ensuring they operate optimally. For more details, check out our [AgentOps documentation](https://ag2ai.github.io/ag2/docs/notebooks/agentchat_agentops/). Agent Session Replay diff --git a/website/docs/Examples.md b/website/docs/Examples.md index 6e66940484..e1f4884e5c 100644 --- a/website/docs/Examples.md +++ b/website/docs/Examples.md @@ -40,7 +40,7 @@ Links to notebook examples: - Automated Continual Learning from New Data - [View Notebook](/docs/notebooks/agentchat_stream) -- [AutoAnny](https://github.com/ag2ai/build-with-autogen/tree/main/samples/apps/auto-anny) - A Discord bot built using AutoGen +- [AutoAnny](https://github.com/ag2ai/build-with-ag2/tree/main/samples/apps/auto-anny) - A Discord bot built using AutoGen ### Tool Use @@ -59,7 +59,7 @@ Links to notebook examples: ### Human Involvement -- Simple example in ChatGPT style [View example](https://github.com/ag2ai/build-with-autogen/blob/main/samples/simple_chat.py) +- Simple example in ChatGPT style [View example](https://github.com/ag2ai/build-with-ag2/blob/main/samples/simple_chat.py) - Auto Code Generation, Execution, Debugging and **Human Feedback** - [View Notebook](/docs/notebooks/agentchat_human_feedback) - Automated Task Solving with GPT-4 + **Multiple Human Users** - [View Notebook](/docs/notebooks/agentchat_two_users) - Agent Chat with **Async Human Inputs** - [View Notebook](/docs/notebooks/async_human_input) @@ -111,7 +111,7 @@ Links to notebook examples: ### Utilities -- API Unification - [View Documentation with Code Example](https://ag2ai.github.io/autogen/docs/Use-Cases/enhanced_inference/#api-unification) +- API Unification - [View Documentation with Code Example](https://ag2ai.github.io/ag2/docs/Use-Cases/enhanced_inference/#api-unification) - Utility Functions to Help Managing API configurations effectively - [View Notebook](/docs/topics/llm_configuration) ### Inference Hyperparameters Tuning diff --git a/website/docs/FAQ.mdx b/website/docs/FAQ.mdx index f07780c638..e588725289 100644 --- a/website/docs/FAQ.mdx +++ b/website/docs/FAQ.mdx @@ -52,9 +52,9 @@ When you call `initiate_chat` the conversation restarts by default. You can use ## `max_consecutive_auto_reply` vs `max_turn` vs `max_round` -- [`max_consecutive_auto_reply`](https://ag2ai.github.io/autogen/docs/reference/agentchat/conversable_agent#max_consecutive_auto_reply) the maximum number of consecutive auto replie (a reply from an agent without human input is considered an auto reply). It plays a role when `human_input_mode` is not "ALWAYS". -- [`max_turns` in `ConversableAgent.initiate_chat`](https://ag2ai.github.io/autogen/docs/reference/agentchat/conversable_agent#initiate_chat) limits the number of conversation turns between two conversable agents (without differentiating auto-reply and reply/input from human) -- [`max_round` in GroupChat](https://ag2ai.github.io/autogen/docs/reference/agentchat/groupchat#groupchat-objects) specifies the maximum number of rounds in a group chat session. +- [`max_consecutive_auto_reply`](https://ag2ai.github.io/ag2/docs/reference/agentchat/conversable_agent#max_consecutive_auto_reply) the maximum number of consecutive auto replie (a reply from an agent without human input is considered an auto reply). It plays a role when `human_input_mode` is not "ALWAYS". +- [`max_turns` in `ConversableAgent.initiate_chat`](https://ag2ai.github.io/ag2/docs/reference/agentchat/conversable_agent#initiate_chat) limits the number of conversation turns between two conversable agents (without differentiating auto-reply and reply/input from human) +- [`max_round` in GroupChat](https://ag2ai.github.io/ag2/docs/reference/agentchat/groupchat#groupchat-objects) specifies the maximum number of rounds in a group chat session. ## How do we decide what LLM is used for each agent? How many agents can be used? How do we decide how many agents in the group? @@ -159,7 +159,7 @@ Explanation: Per [this gist](https://gist.github.com/defulmere/8b9695e415a442710 (from [issue #478](https://github.com/microsoft/autogen/issues/478)) -See here https://ag2ai.github.io/autogen/docs/reference/agentchat/conversable_agent/#register_reply +See here https://ag2ai.github.io/ag2/docs/reference/agentchat/conversable_agent/#register_reply For example, you can register a reply function that gets called when `generate_reply` is called for an agent. @@ -188,11 +188,11 @@ In the above, we register a `print_messages` function that is called each time t ## How to get last message ? -Refer to https://ag2ai.github.io/autogen/docs/reference/agentchat/conversable_agent/#last_message +Refer to https://ag2ai.github.io/ag2/docs/reference/agentchat/conversable_agent/#last_message ## How to get each agent message ? -Please refer to https://ag2ai.github.io/autogen/docs/reference/agentchat/conversable_agent#chat_messages +Please refer to https://ag2ai.github.io/ag2/docs/reference/agentchat/conversable_agent#chat_messages ## When using autogen docker, is it always necessary to reinstall modules? diff --git a/website/docs/Migration-Guide.md b/website/docs/Migration-Guide.md index d35ffc59fe..76f2c40953 100644 --- a/website/docs/Migration-Guide.md +++ b/website/docs/Migration-Guide.md @@ -26,7 +26,7 @@ autogen.runtime_logging.start() # Stop logging autogen.runtime_logging.stop() ``` -Checkout [Logging documentation](https://ag2ai.github.io/autogen/docs/Use-Cases/enhanced_inference#logging) and [Logging example notebook](https://github.com/ag2ai/ag2/blob/main/notebook/agentchat_logging.ipynb) to learn more. +Checkout [Logging documentation](https://ag2ai.github.io/ag2/docs/Use-Cases/enhanced_inference#logging) and [Logging example notebook](https://github.com/ag2ai/ag2/blob/main/notebook/agentchat_logging.ipynb) to learn more. Inference parameter tuning can be done via [`flaml.tune`](https://microsoft.github.io/FLAML/docs/Use-Cases/Tune-User-Defined-Function). - `seed` in autogen is renamed into `cache_seed` to accommodate the newly added `seed` param in openai chat completion api. `use_cache` is removed as a kwarg in `OpenAIWrapper.create()` for being automatically decided by `cache_seed`: int | None. The difference between autogen's `cache_seed` and openai's `seed` is that: diff --git a/website/docs/Use-Cases/agent_chat.md b/website/docs/Use-Cases/agent_chat.md index fd9f1ccddc..e2b0bbcf8d 100644 --- a/website/docs/Use-Cases/agent_chat.md +++ b/website/docs/Use-Cases/agent_chat.md @@ -82,7 +82,7 @@ AutoGen, by integrating conversation-driven control utilizing both programming a With the pluggable auto-reply function, one can choose to invoke conversations with other agents depending on the content of the current message and context. For example: - Hierarchical chat like in [OptiGuide](https://github.com/ag2ai/optiguide). - [Dynamic Group Chat](https://github.com/ag2ai/ag2/blob/main/notebook/agentchat_groupchat.ipynb) which is a special form of hierarchical chat. In the system, we register a reply function in the group chat manager, which broadcasts messages and decides who the next speaker will be in a group chat setting. -- [Finite State Machine graphs to set speaker transition constraints](https://ag2ai.github.io/autogen/docs/notebooks/agentchat_groupchat_finite_state_machine) which is a special form of dynamic group chat. In this approach, a directed transition matrix is fed into group chat. Users can specify legal transitions or specify disallowed transitions. +- [Finite State Machine graphs to set speaker transition constraints](https://ag2ai.github.io/ag2/docs/notebooks/agentchat_groupchat_finite_state_machine) which is a special form of dynamic group chat. In this approach, a directed transition matrix is fed into group chat. Users can specify legal transitions or specify disallowed transitions. - Nested chat like in [conversational chess](https://github.com/ag2ai/ag2/blob/main/notebook/agentchat_nested_chats_chess.ipynb). 2. LLM-Based Function Call diff --git a/website/docs/autogen-studio/faqs.md b/website/docs/autogen-studio/faqs.md index ff8b3c28b4..926f377b04 100644 --- a/website/docs/autogen-studio/faqs.md +++ b/website/docs/autogen-studio/faqs.md @@ -57,7 +57,7 @@ Similarly, the workflow launch command above can be wrapped into a Dockerfile th ## Q: Can I run AutoGen Studio in a Docker container? -A: Yes, you can run AutoGen Studio in a Docker container. You can build the Docker image using the provided [Dockerfile](https://github.com/ag2ai/build-with-autogen/blob/main/samples/apps/autogen-studio/Dockerfile) and run the container using the following commands: +A: Yes, you can run AutoGen Studio in a Docker container. You can build the Docker image using the provided [Dockerfile](https://github.com/ag2ai/build-with-ag2/blob/main/samples/apps/autogen-studio/Dockerfile) and run the container using the following commands: ```bash FROM python:3.10 diff --git a/website/docs/autogen-studio/getting-started.md b/website/docs/autogen-studio/getting-started.md index 2f7cfab9bb..1ca954bfc6 100644 --- a/website/docs/autogen-studio/getting-started.md +++ b/website/docs/autogen-studio/getting-started.md @@ -7,7 +7,7 @@ AutoGen Studio is an low-code interface built to help you rapidly prototype AI agents, enhance them with skills, compose them into workflows and interact with them to accomplish tasks. It is built on top of the [AutoGen](https://ag2ai.github.io/autogen) framework, which is a toolkit for building AI agents. -Code for AutoGen Studio is on GitHub at [build-with-autogen](https://github.com/ag2ai/build-with-autogen/tree/main/samples/apps/autogen-studio) +Code for AutoGen Studio is on GitHub at [build-with-ag2](https://github.com/ag2ai/build-with-ag2/tree/main/samples/apps/autogen-studio) > **Note**: AutoGen Studio is meant to help you rapidly prototype multi-agent workflows and demonstrate an example of end user interfaces built with AutoGen. It is not meant to be a production-ready app. Developers are encouraged to use the AutoGen framework to build their own applications, implementing authentication, security and other features required for deployed applications. @@ -35,7 +35,7 @@ There are two ways to install AutoGen Studio - from PyPi or from source. We **re If you prefer to install from source, ensure you have Python 3.10+ and Node.js (version above 14.15.0) installed. Here's how you get started: - - Clone the AutoGen Studio repository from the [build-with-autogen](https://github.com/ag2ai/build-with-autogen) repository and install its Python dependencies: + - Clone the AutoGen Studio repository from the [build-with-ag2](https://github.com/ag2ai/build-with-ag2) repository and install its Python dependencies: ```bash pip install -e . @@ -98,9 +98,9 @@ Project Structure: We welcome contributions to AutoGen Studio. We recommend the following general steps to contribute to the project: -- AutoGen Studio is in the [build-with-autogen](https://github.com/ag2ai/build-with-autogen) repository. +- AutoGen Studio is in the [build-with-ag2](https://github.com/ag2ai/build-with-ag2) repository. - Please initiate a discussion on the roadmap issue or a new issue in that repository to discuss your proposed contribution. -- Submit a pull request in the [build-with-autogen](https://github.com/ag2ai/build-with-autogen) repository with your contribution! +- Submit a pull request in the [build-with-ag2](https://github.com/ag2ai/build-with-ag2) repository with your contribution! - Please use the tag `studio` for any issues, questions, and PRs related to Studio ## A Note on Security diff --git a/website/docs/contributor-guide/docker.md b/website/docs/contributor-guide/docker.md index 9e895ef53e..1fd603f181 100644 --- a/website/docs/contributor-guide/docker.md +++ b/website/docs/contributor-guide/docker.md @@ -8,7 +8,7 @@ For developers contributing to the AutoGen project, we offer a specialized Docke - **Usage**: This image is recommended for developers who intend to contribute code or documentation to AutoGen. - **Forking the Project**: It's advisable to fork the AutoGen GitHub project to your own repository. This allows you to make changes in a separate environment without affecting the main project. - **Updating Dockerfile**: Modify your copy of `Dockerfile` in the `dev` folder as needed for your development work. -- **Submitting Pull Requests**: Once your changes are ready, submit a pull request from your branch to the upstream AutoGen GitHub project for review and integration. For more details on contributing, see the [AutoGen Contributing](https://ag2ai.github.io/autogen/docs/Contribute) page. +- **Submitting Pull Requests**: Once your changes are ready, submit a pull request from your branch to the upstream AutoGen GitHub project for review and integration. For more details on contributing, see the [AutoGen Contributing](https://ag2ai.github.io/ag2/docs/Contribute) page. ## Building the Developer Docker Image diff --git a/website/docs/ecosystem/promptflow.md b/website/docs/ecosystem/promptflow.md index fcf9ec8e97..55e3281361 100644 --- a/website/docs/ecosystem/promptflow.md +++ b/website/docs/ecosystem/promptflow.md @@ -8,7 +8,7 @@ Quick links: - Why use Promptflow - [Link](https://learn.microsoft.com/en-us/azure/machine-learning/prompt-flow/overview-what-is-prompt-flow) - Quick start guide - [Link](https://ag2ai.github.io/promptflow/how-to-guides/quick-start.html) -- Sample application for Promptflow + AutoGen integration - [Link](https://github.com/ag2ai/build-with-autogen/tree/main/samples/apps/promptflow-autogen) +- Sample application for Promptflow + AutoGen integration - [Link](https://github.com/ag2ai/build-with-ag2/tree/main/samples/apps/promptflow-autogen) ## Sample Flow diff --git a/website/docs/topics/non-openai-models/local-litellm-ollama.ipynb b/website/docs/topics/non-openai-models/local-litellm-ollama.ipynb index 35d5018338..1f7df0b230 100644 --- a/website/docs/topics/non-openai-models/local-litellm-ollama.ipynb +++ b/website/docs/topics/non-openai-models/local-litellm-ollama.ipynb @@ -18,7 +18,7 @@ "2. LiteLLM\n", "3. Ollama\n", "\n", - "Note: We recommend using a virtual environment for your stack, see [this article](https://ag2ai.github.io/autogen/docs/installation/#create-a-virtual-environment-optional) for guidance.\n", + "Note: We recommend using a virtual environment for your stack, see [this article](https://ag2ai.github.io/ag2/docs/installation/#create-a-virtual-environment-optional) for guidance.\n", "\n", "## Installing LiteLLM\n", "\n", diff --git a/website/docs/topics/non-openai-models/local-vllm.md b/website/docs/topics/non-openai-models/local-vllm.md index 267127e3fe..69b3d624e1 100644 --- a/website/docs/topics/non-openai-models/local-vllm.md +++ b/website/docs/topics/non-openai-models/local-vllm.md @@ -11,7 +11,7 @@ Running this stack requires the installation of: 1. AutoGen ([installation instructions](/docs/installation)) 2. vLLM -Note: We recommend using a virtual environment for your stack, see [this article](https://ag2ai.github.io/autogen/docs/installation/#create-a-virtual-environment-optional) +Note: We recommend using a virtual environment for your stack, see [this article](https://ag2ai.github.io/ag2/docs/installation/#create-a-virtual-environment-optional) for guidance. ## Installing vLLM diff --git a/website/docs/topics/retrieval_augmentation.md b/website/docs/topics/retrieval_augmentation.md index 3e9bea2c0c..22ee7d28ba 100644 --- a/website/docs/topics/retrieval_augmentation.md +++ b/website/docs/topics/retrieval_augmentation.md @@ -11,7 +11,7 @@ The following is an example setup demonstrating how to create retrieval augmente Here `RetrieveUserProxyAgent` instance acts as a proxy agent that retrieves relevant information based on the user's input. -Refer to the [doc](https://ag2ai.github.io/autogen/docs/reference/agentchat/contrib/retrieve_user_proxy_agent) +Refer to the [doc](https://ag2ai.github.io/ag2/docs/reference/agentchat/contrib/retrieve_user_proxy_agent) for more information on the detailed configurations. ```python From 6e9d840030aea89af74b7dd2262d8c60216cbc5f Mon Sep 17 00:00:00 2001 From: Yiran Wu <32823396+kevin666aa@users.noreply.github.com> Date: Sat, 16 Nov 2024 17:08:59 -0500 Subject: [PATCH 05/10] reverse conversable commit --- autogen/agentchat/conversable_agent.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 840da79204..d2f8763281 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -659,9 +659,6 @@ def _append_oai_message(self, message: Union[Dict, str], role, conversation_id: if message.get("role") in ["function", "tool"]: oai_message["role"] = message.get("role") - if "tool_responses" in oai_message: - for tool_response in oai_message["tool_responses"]: - tool_response["content"] = str(tool_response["content"]) elif "override_role" in message: # If we have a direction to override the role then set the # role accordingly. Used to customise the role for the @@ -794,16 +791,15 @@ async def a_send( "Message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided." ) - def _print_received_message(self, message: Union[Dict, str], sender: Agent, skip_head: bool = False): + def _print_received_message(self, message: Union[Dict, str], sender: Agent): iostream = IOStream.get_default() # print the message received - if not skip_head: - iostream.print(colored(sender.name, "yellow"), "(to", f"{self.name}):\n", flush=True) + iostream.print(colored(sender.name, "yellow"), "(to", f"{self.name}):\n", flush=True) message = self._message_to_dict(message) if message.get("tool_responses"): # Handle tool multi-call responses for tool_response in message["tool_responses"]: - self._print_received_message(tool_response, sender, skip_head=True) + self._print_received_message(tool_response, sender) if message.get("role") == "tool": return # If role is tool, then content is just a concatenation of all tool_responses @@ -2292,7 +2288,7 @@ def _format_json_str(jstr): result.append(char) return "".join(result) - def execute_function(self, func_call, verbose: bool = False) -> Tuple[bool, Dict[str, Any]]: + def execute_function(self, func_call, verbose: bool = False) -> Tuple[bool, Dict[str, str]]: """Execute a function call and return the result. Override this function to modify the way to execute function and tool calls. @@ -2346,7 +2342,7 @@ def execute_function(self, func_call, verbose: bool = False) -> Tuple[bool, Dict return is_exec_success, { "name": func_name, "role": "function", - "content": content, + "content": str(content), } async def a_execute_function(self, func_call): @@ -2401,7 +2397,7 @@ async def a_execute_function(self, func_call): return is_exec_success, { "name": func_name, "role": "function", - "content": content, + "content": str(content), } def generate_init_message(self, message: Union[Dict, str, None], **kwargs) -> Union[str, Dict]: From 1b4a586ae88c04b730ff2b06469cc6b1b3fed52a Mon Sep 17 00:00:00 2001 From: Yiran Wu <32823396+kevin666aa@users.noreply.github.com> Date: Sat, 16 Nov 2024 17:01:31 -0500 Subject: [PATCH 06/10] update --- autogen/agentchat/conversable_agent.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index d2f8763281..840da79204 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -659,6 +659,9 @@ def _append_oai_message(self, message: Union[Dict, str], role, conversation_id: if message.get("role") in ["function", "tool"]: oai_message["role"] = message.get("role") + if "tool_responses" in oai_message: + for tool_response in oai_message["tool_responses"]: + tool_response["content"] = str(tool_response["content"]) elif "override_role" in message: # If we have a direction to override the role then set the # role accordingly. Used to customise the role for the @@ -791,15 +794,16 @@ async def a_send( "Message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided." ) - def _print_received_message(self, message: Union[Dict, str], sender: Agent): + def _print_received_message(self, message: Union[Dict, str], sender: Agent, skip_head: bool = False): iostream = IOStream.get_default() # print the message received - iostream.print(colored(sender.name, "yellow"), "(to", f"{self.name}):\n", flush=True) + if not skip_head: + iostream.print(colored(sender.name, "yellow"), "(to", f"{self.name}):\n", flush=True) message = self._message_to_dict(message) if message.get("tool_responses"): # Handle tool multi-call responses for tool_response in message["tool_responses"]: - self._print_received_message(tool_response, sender) + self._print_received_message(tool_response, sender, skip_head=True) if message.get("role") == "tool": return # If role is tool, then content is just a concatenation of all tool_responses @@ -2288,7 +2292,7 @@ def _format_json_str(jstr): result.append(char) return "".join(result) - def execute_function(self, func_call, verbose: bool = False) -> Tuple[bool, Dict[str, str]]: + def execute_function(self, func_call, verbose: bool = False) -> Tuple[bool, Dict[str, Any]]: """Execute a function call and return the result. Override this function to modify the way to execute function and tool calls. @@ -2342,7 +2346,7 @@ def execute_function(self, func_call, verbose: bool = False) -> Tuple[bool, Dict return is_exec_success, { "name": func_name, "role": "function", - "content": str(content), + "content": content, } async def a_execute_function(self, func_call): @@ -2397,7 +2401,7 @@ async def a_execute_function(self, func_call): return is_exec_success, { "name": func_name, "role": "function", - "content": str(content), + "content": content, } def generate_init_message(self, message: Union[Dict, str, None], **kwargs) -> Union[str, Dict]: From fef1450768e6413ce59de987a5abb34be3a759ad Mon Sep 17 00:00:00 2001 From: Yiran Wu <32823396+kevin666aa@users.noreply.github.com> Date: Sat, 16 Nov 2024 18:59:45 -0500 Subject: [PATCH 07/10] update --- autogen/agentchat/groupchat.py | 115 ++++++++++++++++--------- autogen/agentchat/swarm/swarm_agent.py | 44 +++++----- autogen/function_utils.py | 1 + 3 files changed, 99 insertions(+), 61 deletions(-) diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py index 6e9d987528..eb05ad492d 100644 --- a/autogen/agentchat/groupchat.py +++ b/autogen/agentchat/groupchat.py @@ -24,6 +24,7 @@ from .contrib.capabilities import transform_messages from .conversable_agent import ConversableAgent from .swarm import SwarmAgent, SwarmResult +from .user_proxy_agent import UserProxyAgent logger = logging.getLogger(__name__) @@ -433,21 +434,37 @@ def random_select_speaker(self, agents: Optional[List[Agent]] = None) -> Union[A def swarm_select_speaker(self, last_speaker: Agent, agents: Optional[List[Agent]] = None) -> Union[Agent, None]: """Select the next speaker using the swarm pattern. Note that this does not need to cater for when the agent is continuing to speak.""" messages = self.messages + user_agent = None + for agent in agents: + if isinstance(agent, UserProxyAgent): + user_agent = agent + break + + if user_agent is None: + raise ValueError("We need a UserProxyAgent to continue the conversation.") # Always start with the first speaker if len(messages) <= 1: - return last_speaker - + print("aaaaaaa") + return user_agent last_message = messages[-1] - # If the last message is a TRANSFER message, extract agent name and return them - if "content" in last_message and last_message["content"].startswith("TRANSFER:"): - agent_name = last_message["content"].split(":")[1].strip() - if self.agent_by_name(name=agent_name): - return self.agent_by_name(agent_name) + if last_message["role"] == "tool": + if "content" in last_message and last_message["content"].startswith("TRANSFER:"): + agent_name = last_message["content"].split(":")[1].strip() + if self.agent_by_name(name=agent_name): + return self.agent_by_name(agent_name) + else: + # if the agent just call a tool and not transfer, return the last speaker + return last_speaker + + if isinstance(last_speaker, SwarmAgent): + return user_agent + elif isinstance(last_speaker, UserProxyAgent): + return self.agent_by_name(name=messages[-2].get("name", "")) # Otherwise, return the agent before the previous one - return self.agent_by_name(name=messages[-2].get("name", "")) + raise ValueError("Something wrong with the speaker selection.") def _prepare_and_select_agents( self, @@ -1159,6 +1176,33 @@ def print_messages(recipient, messages, sender, config): """ return self._last_speaker + def _process_reply_from_swarm(self, reply: Union[Dict, List[Dict]], groupchat: GroupChat) -> None: + # If we have a swarm reply, update context variables, and determine the next agent + if isinstance(reply, list): + pass + elif isinstance(reply, dict): + reply = [reply] + else: + return None + next_agent = None + for r in reply: + content = r.get("content") + if isinstance(content, SwarmResult): + if content.context_variables != {}: + groupchat.context_variables.update(content.context_variables) + if content.next_agent is not None: + next_agent = content.next_agent + elif isinstance(content, Agent): + next_agent = content + return next_agent + + def _broadcast_message(self, groupchat: GroupChat, message: Dict, speaker: Agent) -> None: + # Broadcast the message to all agents except the speaker + groupchat.append(message, speaker) + for agent in groupchat.agents: + if agent != speaker: + self.send(message, agent, request_reply=False, silent=True) + def run_chat( self, messages: Optional[List[Dict]] = None, @@ -1183,7 +1227,6 @@ def run_chat( # NOTE: We do not also append to groupchat.messages, # since groupchat handles its own introductions - # Swarm if self.groupchat.speaker_selection_method == "swarm": config.allow_repeat_speaker = True # Swarms allow the last speaker to be the next speaker @@ -1193,14 +1236,17 @@ def run_chat( a.client_cache = self.client_cache for i in range(groupchat.max_round): self._last_speaker = speaker - groupchat.append(message, speaker) - # broadcast the message to all agents except the speaker - for agent in groupchat.agents: - if agent != speaker: - self.send(message, agent, request_reply=False, silent=True) - if self._is_termination_msg(message) or i == groupchat.max_round - 1: - # The conversation is over or it's the last round - break + if isinstance(message, list): + for m in message: + self._broadcast_message(groupchat, m, speaker) + for m in message: + if self._is_termination_msg(m) or i == groupchat.max_round - 1: + break + else: + self._broadcast_message(groupchat, message, speaker) + if self._is_termination_msg(message) or i == groupchat.max_round - 1: + # The conversation is over or it's the last round + break try: if next_speaker: # Speaker has already been selected (swarm) @@ -1214,32 +1260,14 @@ def run_chat( iostream.print(colored(f"\nNext speaker: {speaker.name}\n", "green"), flush=True) # Update the context_variables on the agent - if self.groupchat.speaker_selection_method == "swarm" and isinstance(speaker, SwarmAgent): + if isinstance(speaker, SwarmAgent): speaker.context_variables.update(groupchat.context_variables) # let the speaker speak - reply = speaker.generate_reply(sender=self) + reply = speaker.generate_reply(sender=self) # reply must be a dict or a list of dicts(only for swarm) - # If we have a swarm reply, update context variables - if isinstance(reply, SwarmResult): - if reply.context_variables: - self.groupchat.context_variables.update(reply.context_variables) - - reply_value = "\n".join(reply.values) - - if reply.next_agent is not None: - next_speaker = groupchat.agent_by_name(reply.next_agent) - else: - # If there are multiple replies, it indicates multiple tool calls - # In this case we will see if any of the replies contains an agent Transfer and set the reply to that - if len(reply.values) > 1: - for content in reply.values: - if content in groupchat.agent_names: - reply_value = content - break - - # Replaces the swarm result with string value - reply = reply_value + if groupchat.speaker_selection_method == "swarm": + next_speaker = self._process_reply_from_swarm(reply, speaker) # process the swarm reply: Update except KeyboardInterrupt: # let the admin agent speak if interrupted @@ -1268,8 +1296,13 @@ def run_chat( reply["content"] = self.clear_agents_history(reply, groupchat) # The speaker sends the message without requesting a reply - speaker.send(reply, self, request_reply=False, silent=silent) - message = self.last_message(speaker) + if isinstance(reply, list): + for r in reply: + speaker.send(r, self, request_reply=False, silent=silent) + message = reply + else: + speaker.send(reply, self, request_reply=False, silent=silent) + message = self.last_message(speaker) if self.client_cache is not None: for a in groupchat.agents: a.client_cache = a.previous_cache diff --git a/autogen/agentchat/swarm/swarm_agent.py b/autogen/agentchat/swarm/swarm_agent.py index c6250a455c..529d0e68f8 100644 --- a/autogen/agentchat/swarm/swarm_agent.py +++ b/autogen/agentchat/swarm/swarm_agent.py @@ -34,6 +34,11 @@ class SwarmResult(BaseModel): agent: Optional["SwarmAgent"] = None context_variables: dict = {} + +# 1. SwarmResult should be a single instance, a single tool call can return one result only. +# 2. In generate_reply_with_tool_calls, We only process the tool_responses from a single message from generate_tool_calls_reply + + class SwarmAgent(ConversableAgent): def __init__( self, @@ -86,14 +91,14 @@ def generate_reply_with_tool_calls( return False, None if messages is None: messages = self._oai_messages[sender] + + # print("messages", messages) + # print(self.llm_config['tools']) + # exit() response = self._generate_oai_reply_from_client(client, self._oai_system_message + messages, self.client_cache) - - print(response) + if isinstance(response, str): - return True, SwarmResult( - values=response, - next_agent=self.name, - ) + return True, response elif isinstance(response, dict): # Tool calls, inject context_variables back in to the response before executing the tools if "tool_calls" in response: @@ -113,16 +118,16 @@ def generate_reply_with_tool_calls( # Update the tool call with new arguments tool_call["function"]["arguments"] = json.dumps(current_args) - _, func_response = self.generate_tool_calls_reply([response]) - - return_values = [] - for response in func_response["tool_responses"]: - return_values.append(response["content"]) + # Generate tool calls reply + _, tool_message = self.generate_tool_calls_reply([response]) - return True, SwarmResult( - values=return_values, - next_agent=None, - ) + # a tool_response example: + # { + # "role": "tool", + # "content": A str, or an object (SwarmResult, SwarmAgent, etc.) + # "tool_call_id": + # }, + return True, [response] + tool_message["tool_responses"] else: raise ValueError("Invalid response type:", type(response)) @@ -143,17 +148,16 @@ def add_single_function(self, func: Callable, description=""): del f_no_context["function"]["parameters"]["properties"][__CONTEXT_VARIABLES_PARAM_NAME__] if "required" in f_no_context["function"]["parameters"]: required = f_no_context["function"]["parameters"]["required"] - f_no_context["function"]["parameters"]["required"] = [param for param in required if param != __CONTEXT_VARIABLES_PARAM_NAME__] + f_no_context["function"]["parameters"]["required"] = [ + param for param in required if param != __CONTEXT_VARIABLES_PARAM_NAME__ + ] # If required list is empty, remove it if not f_no_context["function"]["parameters"]["required"]: del f_no_context["function"]["parameters"]["required"] self.update_tool_signature(f_no_context, is_remove=False) - self.register_function({func._name: self._wrap_function(func)}) - + self.register_function({func._name: func}) def add_functions(self, func_list: List[Callable]): for func in func_list: self.add_single_function(func) - - print(self.llm_config['tools']) diff --git a/autogen/function_utils.py b/autogen/function_utils.py index 06be34b6fa..89a6059ce1 100644 --- a/autogen/function_utils.py +++ b/autogen/function_utils.py @@ -213,6 +213,7 @@ def get_missing_annotations(typed_signature: inspect.Signature, required: List[s unannotated_with_default = all_missing.difference(missing) return missing, unannotated_with_default + def get_function_schema(f: Callable[..., Any], *, name: Optional[str] = None, description: str) -> Dict[str, Any]: """Get a JSON schema for a function as defined by the OpenAI API From 77d268c6fcc76b516c0f8dc6d452d64d3391344a Mon Sep 17 00:00:00 2001 From: Mark Sze Date: Sun, 17 Nov 2024 01:03:06 +0000 Subject: [PATCH 08/10] Update for context_variables and message content back to string --- autogen/agentchat/groupchat.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py index eb05ad492d..166ecff4c4 100644 --- a/autogen/agentchat/groupchat.py +++ b/autogen/agentchat/groupchat.py @@ -1189,11 +1189,19 @@ def _process_reply_from_swarm(self, reply: Union[Dict, List[Dict]], groupchat: G content = r.get("content") if isinstance(content, SwarmResult): if content.context_variables != {}: - groupchat.context_variables.update(content.context_variables) - if content.next_agent is not None: - next_agent = content.next_agent + self.groupchat.context_variables.update(content.context_variables) + if content.agent is not None: + next_agent = content.agent + + # Change content back to a string for consistency with messages + r["content"] = content.values elif isinstance(content, Agent): next_agent = content + + # Change content back to a string + # Consider adjusting this message, e.g. f"Transfer to {next_agent.name}" + r["content"] = next_agent.name + return next_agent def _broadcast_message(self, groupchat: GroupChat, message: Dict, speaker: Agent) -> None: From bcaeccc3ad792e5a064cb4356ea7d0bf4519171e Mon Sep 17 00:00:00 2001 From: Yiran Wu <32823396+kevin666aa@users.noreply.github.com> Date: Sun, 17 Nov 2024 00:19:41 -0500 Subject: [PATCH 09/10] update --- autogen/agentchat/groupchat.py | 19 +++---- autogen/agentchat/swarm/swarm_agent.py | 18 ++----- test/agentchat/test_groupchat.py | 75 +++++++++++++++++++++++++- 3 files changed, 85 insertions(+), 27 deletions(-) diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py index 166ecff4c4..9250d18612 100644 --- a/autogen/agentchat/groupchat.py +++ b/autogen/agentchat/groupchat.py @@ -445,7 +445,10 @@ def swarm_select_speaker(self, last_speaker: Agent, agents: Optional[List[Agent] # Always start with the first speaker if len(messages) <= 1: - print("aaaaaaa") + if last_speaker == user_agent: + for agent in agents: + if isinstance(agent, SwarmAgent): + return agent return user_agent last_message = messages[-1] # If the last message is a TRANSFER message, extract agent name and return them @@ -1189,19 +1192,12 @@ def _process_reply_from_swarm(self, reply: Union[Dict, List[Dict]], groupchat: G content = r.get("content") if isinstance(content, SwarmResult): if content.context_variables != {}: - self.groupchat.context_variables.update(content.context_variables) + groupchat.context_variables.update(content.context_variables) if content.agent is not None: next_agent = content.agent - - # Change content back to a string for consistency with messages - r["content"] = content.values elif isinstance(content, Agent): next_agent = content - - # Change content back to a string - # Consider adjusting this message, e.g. f"Transfer to {next_agent.name}" - r["content"] = next_agent.name - + r["content"] = str(r["content"]) return next_agent def _broadcast_message(self, groupchat: GroupChat, message: Dict, speaker: Agent) -> None: @@ -1275,7 +1271,7 @@ def run_chat( reply = speaker.generate_reply(sender=self) # reply must be a dict or a list of dicts(only for swarm) if groupchat.speaker_selection_method == "swarm": - next_speaker = self._process_reply_from_swarm(reply, speaker) # process the swarm reply: Update + next_speaker = self._process_reply_from_swarm(reply, groupchat) # process the swarm reply: Update except KeyboardInterrupt: # let the admin agent speak if interrupted @@ -1322,7 +1318,6 @@ async def a_run_chat( messages: Optional[List[Dict]] = None, sender: Optional[Agent] = None, config: Optional[GroupChat] = None, - context_variables: Optional[Dict] = {}, # For Swarms ): """Run a group chat asynchronously.""" if messages is None: diff --git a/autogen/agentchat/swarm/swarm_agent.py b/autogen/agentchat/swarm/swarm_agent.py index 529d0e68f8..f91d5055c8 100644 --- a/autogen/agentchat/swarm/swarm_agent.py +++ b/autogen/agentchat/swarm/swarm_agent.py @@ -29,15 +29,15 @@ class SwarmResult(BaseModel): agent (SwarmAgent): The swarm agent instance, if applicable. context_variables (dict): A dictionary of context variables. """ - values: str = "" agent: Optional["SwarmAgent"] = None context_variables: dict = {} + class Config: # Add this inner class + arbitrary_types_allowed = True -# 1. SwarmResult should be a single instance, a single tool call can return one result only. -# 2. In generate_reply_with_tool_calls, We only process the tool_responses from a single message from generate_tool_calls_reply - + def __str__(self): + return self.values class SwarmAgent(ConversableAgent): def __init__( @@ -92,9 +92,6 @@ def generate_reply_with_tool_calls( if messages is None: messages = self._oai_messages[sender] - # print("messages", messages) - # print(self.llm_config['tools']) - # exit() response = self._generate_oai_reply_from_client(client, self._oai_system_message + messages, self.client_cache) if isinstance(response, str): @@ -120,13 +117,6 @@ def generate_reply_with_tool_calls( # Generate tool calls reply _, tool_message = self.generate_tool_calls_reply([response]) - - # a tool_response example: - # { - # "role": "tool", - # "content": A str, or an object (SwarmResult, SwarmAgent, etc.) - # "tool_call_id": - # }, return True, [response] + tool_message["tool_responses"] else: raise ValueError("Invalid response type:", type(response)) diff --git a/test/agentchat/test_groupchat.py b/test/agentchat/test_groupchat.py index de5f8bab11..c7c702ca16 100755 --- a/test/agentchat/test_groupchat.py +++ b/test/agentchat/test_groupchat.py @@ -13,7 +13,8 @@ from types import SimpleNamespace from typing import Any, Dict, List, Optional from unittest import TestCase, mock - +import sys +import os import pytest from test_assistant_agent import KEY_LOC, OAI_CONFIG_LIST @@ -21,7 +22,79 @@ from autogen import Agent, AssistantAgent, GroupChat, GroupChatManager from autogen.agentchat.contrib.capabilities import transform_messages, transforms from autogen.exception_utils import AgentNameConflict, UndefinedNextAgent +from autogen.agentchat.swarm.swarm_agent import SwarmAgent, SwarmResult + +sys.path.append(os.path.join(os.path.dirname(__file__), "..")) +from conftest import skip_openai # noqa: E402 + +try: + from openai import OpenAI +except ImportError: + skip = True +else: + skip = False or skip_openai + + +@pytest.mark.skipif(skip, reason="openai not installed OR requested to skip") +def test_swarm_agent(): + context_variables = {"1": False, "2": False, "3": False} + + def update_context_1(context_variables: dict) -> str: + context_variables["1"] = True + return SwarmResult(value="success", context_variables=context_variables) + + def update_context_2_and_transfer_to_3(context_variables: dict) -> str: + context_variables["2"] = True + return SwarmResult(value="success", context_variables=context_variables, agent=agent_3) + + def update_context_3(context_variables: dict) -> str: + context_variables["3"] = True + return SwarmResult(value="success", context_variables=context_variables) + + def transfer_to_agent_2() -> SwarmAgent: + return agent_2 + + agent_1 = SwarmAgent( + name="Agent_1", + system_message="You are Agent 1, first, call the function to update context 1, and transfer to Agent 2", + llm_config=llm_config, + functions=[update_context_1, transfer_to_agent_2], + ) + + agent_2 = SwarmAgent( + name="Agent_2", + system_message="You are Agent 2, call the function that updates context 2 and transfer to Agent 3", + llm_config=llm_config, + functions=[update_context_2_and_transfer_to_3], + ) + + agent_3 = SwarmAgent( + name="Agent_3", + system_message="You are Agent 3, first, call the function to update context 3, and then reply TERMINATE", + llm_config=llm_config, + functions=[update_context_3], + ) + + user = UserProxyAgent( + name="Human_User", + system_message="Human user", + human_input_mode="ALWAYS", + code_execution_config=False, + ) + groupchat = GroupChat( + agents=[user, agent_1, agent_2, agent_3], + messages=[], + max_round=10, + speaker_selection_method="swarm", + context_variables=context_variables, + ) + manager = GroupChatManager(groupchat=groupchat, llm_config=None) + + chat_result = user.initiate_chat(manager, message="start") + assert context_variables["1"] == True, "Expected context_variables['1'] to be True" + assert context_variables["2"] == True, "Expected context_variables['2'] to be True" + assert context_variables["3"] == True, "Expected context_variables['3'] to be True" def test_func_call_groupchat(): agent1 = autogen.ConversableAgent( From 320e96f01f7328a999d0be42d5434111e0f0527b Mon Sep 17 00:00:00 2001 From: Yiran Wu <32823396+kevin666aa@users.noreply.github.com> Date: Sun, 17 Nov 2024 00:21:00 -0500 Subject: [PATCH 10/10] update --- autogen/function_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autogen/function_utils.py b/autogen/function_utils.py index 89a6059ce1..f4a6531fe5 100644 --- a/autogen/function_utils.py +++ b/autogen/function_utils.py @@ -8,7 +8,7 @@ import inspect import json from logging import getLogger -from typing import Any, Callable, Dict, ForwardRef, List, Optional, Set, Tuple, Type, TypeVar, Union, cast +from typing import Any, Callable, Dict, ForwardRef, List, Optional, Set, Tuple, Type, TypeVar, Union from pydantic import BaseModel, Field from typing_extensions import Annotated, Literal, get_args, get_origin