Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Chore] Syntax updated to Python version 3.9 #248

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions autogen/_pydantic.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
#
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
# SPDX-License-Identifier: MIT
from typing import Any, Dict, Optional, Tuple, Type, Union, get_args
from typing import Any, Tuple, TypeVar, Union, get_args

from pydantic import BaseModel
from pydantic.version import VERSION as PYDANTIC_VERSION
Expand All @@ -30,7 +30,7 @@ def type2schema(t: Any) -> JsonSchemaValue:
"""
return TypeAdapter(t).json_schema()

def model_dump(model: BaseModel) -> Dict[str, Any]:
def model_dump(model: BaseModel) -> dict[str, Any]:
"""Convert a pydantic model to a dict

Args:
Expand Down Expand Up @@ -59,7 +59,7 @@ def model_dump_json(model: BaseModel) -> str:
from pydantic import schema_of
from pydantic.typing import evaluate_forwardref as evaluate_forwardref # type: ignore[no-redef]

JsonSchemaValue = Dict[str, Any] # type: ignore[misc]
JsonSchemaValue = dict[str, Any] # type: ignore[misc]

def type2schema(t: Any) -> JsonSchemaValue:
"""Convert a type to a JSON schema
Expand Down Expand Up @@ -92,7 +92,7 @@ def type2schema(t: Any) -> JsonSchemaValue:

return d

def model_dump(model: BaseModel) -> Dict[str, Any]:
def model_dump(model: BaseModel) -> dict[str, Any]:
"""Convert a pydantic model to a dict

Args:
Expand Down
16 changes: 8 additions & 8 deletions autogen/agentchat/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def description(self) -> str:

def send(
self,
message: Union[Dict[str, Any], str],
message: Union[dict[str, Any], str],
recipient: "Agent",
request_reply: Optional[bool] = None,
) -> None:
Expand All @@ -44,7 +44,7 @@ def send(

async def a_send(
self,
message: Union[Dict[str, Any], str],
message: Union[dict[str, Any], str],
recipient: "Agent",
request_reply: Optional[bool] = None,
) -> None:
Expand All @@ -60,7 +60,7 @@ async def a_send(

def receive(
self,
message: Union[Dict[str, Any], str],
message: Union[dict[str, Any], str],
sender: "Agent",
request_reply: Optional[bool] = None,
) -> None:
Expand All @@ -75,7 +75,7 @@ def receive(

async def a_receive(
self,
message: Union[Dict[str, Any], str],
message: Union[dict[str, Any], str],
sender: "Agent",
request_reply: Optional[bool] = None,
) -> None:
Expand All @@ -91,10 +91,10 @@ async def a_receive(

def generate_reply(
self,
messages: Optional[List[Dict[str, Any]]] = None,
messages: Optional[list[dict[str, Any]]] = None,
sender: Optional["Agent"] = None,
**kwargs: Any,
) -> Union[str, Dict[str, Any], None]:
) -> Union[str, dict[str, Any], None]:
"""Generate a reply based on the received messages.

Args:
Expand All @@ -109,10 +109,10 @@ def generate_reply(

async def a_generate_reply(
self,
messages: Optional[List[Dict[str, Any]]] = None,
messages: Optional[list[dict[str, Any]]] = None,
sender: Optional["Agent"] = None,
**kwargs: Any,
) -> Union[str, Dict[str, Any], None]:
) -> Union[str, dict[str, Any], None]:
"""(Async) Generate a reply based on the received messages.

Args:
Expand Down
4 changes: 2 additions & 2 deletions autogen/agentchat/assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ def __init__(
self,
name: str,
system_message: Optional[str] = DEFAULT_SYSTEM_MESSAGE,
llm_config: Optional[Union[Dict, Literal[False]]] = None,
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
llm_config: Optional[Union[dict, Literal[False]]] = None,
is_termination_msg: Optional[Callable[[dict], bool]] = None,
max_consecutive_auto_reply: Optional[int] = None,
human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "NEVER",
description: Optional[str] = None,
Expand Down
22 changes: 11 additions & 11 deletions autogen/agentchat/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
from .utils import consolidate_chat_info

logger = logging.getLogger(__name__)
Prerequisite = Tuple[int, int]
Prerequisite = tuple[int, int]


@dataclass
Expand All @@ -27,21 +27,21 @@ class ChatResult:

chat_id: int = None
"""chat id"""
chat_history: List[Dict[str, Any]] = None
chat_history: list[dict[str, Any]] = None
"""The chat history."""
summary: str = None
"""A summary obtained from the chat."""
cost: Dict[str, dict] = None # keys: "usage_including_cached_inference", "usage_excluding_cached_inference"
cost: dict[str, dict] = None # keys: "usage_including_cached_inference", "usage_excluding_cached_inference"
"""The cost of the chat.
The value for each usage type is a dictionary containing cost information for that specific type.
- "usage_including_cached_inference": Cost information on the total usage, including the tokens in cached inference.
- "usage_excluding_cached_inference": Cost information on the usage of tokens, excluding the tokens in cache. No larger than "usage_including_cached_inference".
"""
human_input: List[str] = None
human_input: list[str] = None
"""A list of human input solicited during the chat."""


def _validate_recipients(chat_queue: List[Dict[str, Any]]) -> None:
def _validate_recipients(chat_queue: list[dict[str, Any]]) -> None:
"""
Validate recipients exits and warn repetitive recipients.
"""
Expand All @@ -56,7 +56,7 @@ def _validate_recipients(chat_queue: List[Dict[str, Any]]) -> None:
)


def __create_async_prerequisites(chat_queue: List[Dict[str, Any]]) -> List[Prerequisite]:
def __create_async_prerequisites(chat_queue: list[dict[str, Any]]) -> list[Prerequisite]:
"""
Create list of Prerequisite (prerequisite_chat_id, chat_id)
"""
Expand All @@ -73,7 +73,7 @@ def __create_async_prerequisites(chat_queue: List[Dict[str, Any]]) -> List[Prere
return prerequisites


def __find_async_chat_order(chat_ids: Set[int], prerequisites: List[Prerequisite]) -> List[int]:
def __find_async_chat_order(chat_ids: set[int], prerequisites: list[Prerequisite]) -> list[int]:
"""Find chat order for async execution based on the prerequisite chats

args:
Expand Down Expand Up @@ -122,7 +122,7 @@ def _post_process_carryover_item(carryover_item):
return str(carryover_item)


def __post_carryover_processing(chat_info: Dict[str, Any]) -> None:
def __post_carryover_processing(chat_info: dict[str, Any]) -> None:
iostream = IOStream.get_default()

if "message" not in chat_info:
Expand Down Expand Up @@ -158,7 +158,7 @@ def __post_carryover_processing(chat_info: Dict[str, Any]) -> None:
iostream.print(colored("\n" + "*" * 80, "blue"), flush=True, sep="")


def initiate_chats(chat_queue: List[Dict[str, Any]]) -> List[ChatResult]:
def initiate_chats(chat_queue: list[dict[str, Any]]) -> list[ChatResult]:
"""Initiate a list of chats.
Args:
chat_queue (List[Dict]): A list of dictionaries containing the information about the chats.
Expand Down Expand Up @@ -234,7 +234,7 @@ def _on_chat_future_done(chat_future: asyncio.Future, chat_id: int):


async def _dependent_chat_future(
chat_id: int, chat_info: Dict[str, Any], prerequisite_chat_futures: Dict[int, asyncio.Future]
chat_id: int, chat_info: dict[str, Any], prerequisite_chat_futures: dict[int, asyncio.Future]
) -> asyncio.Task:
"""
Create an async Task for each chat.
Expand Down Expand Up @@ -272,7 +272,7 @@ async def _dependent_chat_future(
return chat_res_future


async def a_initiate_chats(chat_queue: List[Dict[str, Any]]) -> Dict[int, ChatResult]:
async def a_initiate_chats(chat_queue: list[dict[str, Any]]) -> dict[int, ChatResult]:
"""(async) Initiate a list of chats.

args:
Expand Down
34 changes: 17 additions & 17 deletions autogen/agentchat/contrib/agent_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
logger = logging.getLogger(__name__)


def _config_check(config: Dict):
def _config_check(config: dict):
# check config loading
assert config.get("coding", None) is not None, 'Missing "coding" in your config.'
assert config.get("default_llm_config", None) is not None, 'Missing "default_llm_config" in your config.'
Expand Down Expand Up @@ -220,11 +220,11 @@ def __init__(
self.config_file_location = config_file_location

self.building_task: str = None
self.agent_configs: List[Dict] = []
self.open_ports: List[str] = []
self.agent_procs: Dict[str, Tuple[sp.Popen, str]] = {}
self.agent_procs_assign: Dict[str, Tuple[autogen.ConversableAgent, str]] = {}
self.cached_configs: Dict = {}
self.agent_configs: list[dict] = []
self.open_ports: list[str] = []
self.agent_procs: dict[str, tuple[sp.Popen, str]] = {}
self.agent_procs_assign: dict[str, tuple[autogen.ConversableAgent, str]] = {}
self.cached_configs: dict = {}

self.max_agents = max_agents

Expand All @@ -236,8 +236,8 @@ def set_agent_model(self, model: str):

def _create_agent(
self,
agent_config: Dict,
member_name: List[str],
agent_config: dict,
member_name: list[str],
llm_config: dict,
use_oai_assistant: Optional[bool] = False,
) -> autogen.AssistantAgent:
Expand Down Expand Up @@ -366,14 +366,14 @@ def clear_all_agents(self, recycle_endpoint: Optional[bool] = True):
def build(
self,
building_task: str,
default_llm_config: Dict,
default_llm_config: dict,
coding: Optional[bool] = None,
code_execution_config: Optional[Dict] = None,
code_execution_config: Optional[dict] = None,
use_oai_assistant: Optional[bool] = False,
user_proxy: Optional[autogen.ConversableAgent] = None,
max_agents: Optional[int] = None,
**kwargs,
) -> Tuple[List[autogen.ConversableAgent], Dict]:
) -> tuple[list[autogen.ConversableAgent], dict]:
"""
Auto build agents based on the building task.

Expand Down Expand Up @@ -496,15 +496,15 @@ def build_from_library(
self,
building_task: str,
library_path_or_json: str,
default_llm_config: Dict,
default_llm_config: dict,
top_k: int = 3,
coding: Optional[bool] = None,
code_execution_config: Optional[Dict] = None,
code_execution_config: Optional[dict] = None,
use_oai_assistant: Optional[bool] = False,
embedding_model: Optional[str] = "all-mpnet-base-v2",
user_proxy: Optional[autogen.ConversableAgent] = None,
**kwargs,
) -> Tuple[List[autogen.ConversableAgent], Dict]:
) -> tuple[list[autogen.ConversableAgent], dict]:
"""
Build agents from a library.
The library is a list of agent configs, which contains the name and system_message for each agent.
Expand Down Expand Up @@ -551,7 +551,7 @@ def build_from_library(
try:
agent_library = json.loads(library_path_or_json)
except json.decoder.JSONDecodeError:
with open(library_path_or_json, "r") as f:
with open(library_path_or_json) as f:
agent_library = json.load(f)
except Exception as e:
raise e
Expand Down Expand Up @@ -663,7 +663,7 @@ def build_from_library(

def _build_agents(
self, use_oai_assistant: Optional[bool] = False, user_proxy: Optional[autogen.ConversableAgent] = None, **kwargs
) -> Tuple[List[autogen.ConversableAgent], Dict]:
) -> tuple[list[autogen.ConversableAgent], dict]:
"""
Build agents with generated configs.

Expand Down Expand Up @@ -731,7 +731,7 @@ def load(
config_json: Optional[str] = None,
use_oai_assistant: Optional[bool] = False,
**kwargs,
) -> Tuple[List[autogen.ConversableAgent], Dict]:
) -> tuple[list[autogen.ConversableAgent], dict]:
"""
Load building configs and call the build function to complete building without calling online LLMs' api.

Expand Down
6 changes: 3 additions & 3 deletions autogen/agentchat/contrib/agent_eval/agent_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@


def generate_criteria(
llm_config: Optional[Union[Dict, Literal[False]]] = None,
llm_config: Optional[Union[dict, Literal[False]]] = None,
task: Task = None,
additional_instructions: str = "",
max_round=2,
Expand Down Expand Up @@ -67,8 +67,8 @@ def generate_criteria(


def quantify_criteria(
llm_config: Optional[Union[Dict, Literal[False]]] = None,
criteria: List[Criterion] = None,
llm_config: Optional[Union[dict, Literal[False]]] = None,
criteria: list[Criterion] = None,
task: Task = None,
test_case: str = "",
ground_truth: str = "",
Expand Down
4 changes: 2 additions & 2 deletions autogen/agentchat/contrib/agent_eval/criterion.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ class Criterion(BaseModel):

name: str
description: str
accepted_values: List[str]
sub_criteria: List[Criterion] = list()
accepted_values: list[str]
sub_criteria: list[Criterion] = list()

@staticmethod
def parse_json_str(criteria: str):
Expand Down
10 changes: 5 additions & 5 deletions autogen/agentchat/contrib/agent_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ def __init__(
)
self._client = autogen.OpenAIWrapper(**self.llm_config)

def record_one_conversation(self, conversation_history: List[Dict], is_satisfied: bool = None):
def record_one_conversation(self, conversation_history: list[dict], is_satisfied: bool = None):
"""
record one conversation history.
Args:
Expand All @@ -234,10 +234,10 @@ def record_one_conversation(self, conversation_history: List[Dict], is_satisfied
], "The input is invalid. Please input 1 or 0. 1 represents satisfied. 0 represents not satisfied."
is_satisfied = True if reply == "1" else False
self._trial_conversations_history.append(
{"Conversation {i}".format(i=len(self._trial_conversations_history)): conversation_history}
{f"Conversation {len(self._trial_conversations_history)}": conversation_history}
)
self._trial_conversations_performance.append(
{"Conversation {i}".format(i=len(self._trial_conversations_performance)): 1 if is_satisfied else 0}
{f"Conversation {len(self._trial_conversations_performance)}": 1 if is_satisfied else 0}
)

def step(self):
Expand Down Expand Up @@ -290,8 +290,8 @@ def step(self):
incumbent_functions = self._update_function_call(incumbent_functions, actions)

remove_functions = list(
set([key for dictionary in self._trial_functions for key in dictionary.keys()])
- set([key for dictionary in incumbent_functions for key in dictionary.keys()])
{key for dictionary in self._trial_functions for key in dictionary.keys()}
- {key for dictionary in incumbent_functions for key in dictionary.keys()}
)

register_for_llm = []
Expand Down
Loading
Loading