From e6237d44a1b2383cc246995db83d468a1d4f246a Mon Sep 17 00:00:00 2001 From: Chi Wang Date: Wed, 27 Mar 2024 20:43:01 -0700 Subject: [PATCH] simplify getting-started; update news (#2175) * simplify getting-started; update news * bug fix --- README.md | 12 +++-- .../docker_commandline_code_executor.py | 3 +- .../coding/local_commandline_code_executor.py | 3 +- autogen/version.py | 2 +- test/coding/test_commandline_code_executor.py | 6 ++- website/docs/Getting-Started.mdx | 49 +++++++++---------- 6 files changed, 38 insertions(+), 37 deletions(-) diff --git a/README.md b/README.md index 76f469ecef5f..a5f4ea36f73b 100644 --- a/README.md +++ b/README.md @@ -12,23 +12,25 @@

--> +:fire: Mar 26: Andrew Ng gave a shoutout to AutoGen in [What's next for AI agentic workflows](https://youtu.be/sal78ACtGTc?si=JduUzN_1kDnMq0vF) at Sequoia Capital's AI Ascent. + :fire: Mar 3: What's new in AutoGen? 📰[Blog](https://microsoft.github.io/autogen/blog/2024/03/03/AutoGen-Update); 📺[Youtube](https://www.youtube.com/watch?v=j_mtwQiaLGU). :fire: Mar 1: the first AutoGen multi-agent experiment on the challenging [GAIA](https://huggingface.co/spaces/gaia-benchmark/leaderboard) benchmark achieved the No. 1 accuracy in all the three levels. -:fire: Jan 30: AutoGen is highlighted by Peter Lee in Microsoft Research Forum [Keynote](https://t.co/nUBSjPDjqD). +:tada: Jan 30: AutoGen is highlighted by Peter Lee in Microsoft Research Forum [Keynote](https://t.co/nUBSjPDjqD). -:fire: Dec 31: [AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation Framework](https://arxiv.org/abs/2308.08155) is selected by [TheSequence: My Five Favorite AI Papers of 2023](https://thesequence.substack.com/p/my-five-favorite-ai-papers-of-2023). +:tada: Dec 31: [AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation Framework](https://arxiv.org/abs/2308.08155) is selected by [TheSequence: My Five Favorite AI Papers of 2023](https://thesequence.substack.com/p/my-five-favorite-ai-papers-of-2023). -:fire: Nov 8: AutoGen is selected into [Open100: Top 100 Open Source achievements](https://www.benchcouncil.org/evaluation/opencs/annual.html) 35 days after spinoff. +:tada: Nov 8: AutoGen is selected into [Open100: Top 100 Open Source achievements](https://www.benchcouncil.org/evaluation/opencs/annual.html) 35 days after spinoff. -:fire: Nov 6: AutoGen is mentioned by Satya Nadella in a [fireside chat](https://youtu.be/0pLBvgYtv6U). +:tada: Nov 6: AutoGen is mentioned by Satya Nadella in a [fireside chat](https://youtu.be/0pLBvgYtv6U). -:fire: Nov 1: AutoGen is the top trending repo on GitHub in October 2023. +:tada: Nov 1: AutoGen is the top trending repo on GitHub in October 2023. :tada: Oct 03: AutoGen spins off from FLAML on GitHub and has a major paper update (first version on Aug 16). diff --git a/autogen/coding/docker_commandline_code_executor.py b/autogen/coding/docker_commandline_code_executor.py index f1db7cd07e74..8d8931a830f1 100644 --- a/autogen/coding/docker_commandline_code_executor.py +++ b/autogen/coding/docker_commandline_code_executor.py @@ -83,8 +83,7 @@ def __init__( if isinstance(work_dir, str): work_dir = Path(work_dir) - if not work_dir.exists(): - raise ValueError(f"Working directory {work_dir} does not exist.") + work_dir.mkdir(exist_ok=True) client = docker.from_env() diff --git a/autogen/coding/local_commandline_code_executor.py b/autogen/coding/local_commandline_code_executor.py index b75f54ff121b..4bcf988746e1 100644 --- a/autogen/coding/local_commandline_code_executor.py +++ b/autogen/coding/local_commandline_code_executor.py @@ -71,8 +71,7 @@ def __init__( if isinstance(work_dir, str): work_dir = Path(work_dir) - if not work_dir.exists(): - raise ValueError(f"Working directory {work_dir} does not exist.") + work_dir.mkdir(exist_ok=True) self._timeout = timeout self._work_dir: Path = work_dir diff --git a/autogen/version.py b/autogen/version.py index 198d6db62730..f2892741976e 100644 --- a/autogen/version.py +++ b/autogen/version.py @@ -1 +1 @@ -__version__ = "0.2.20" +__version__ = "0.2.21" diff --git a/test/coding/test_commandline_code_executor.py b/test/coding/test_commandline_code_executor.py index aeb62349b388..49d4d35cf10e 100644 --- a/test/coding/test_commandline_code_executor.py +++ b/test/coding/test_commandline_code_executor.py @@ -1,5 +1,6 @@ from pathlib import Path import sys +import os import tempfile import uuid import pytest @@ -10,7 +11,8 @@ from autogen.coding.docker_commandline_code_executor import DockerCommandLineCodeExecutor from autogen.coding.local_commandline_code_executor import LocalCommandLineCodeExecutor -from conftest import MOCK_OPEN_AI_API_KEY, skip_docker +sys.path.append(os.path.join(os.path.dirname(__file__), "..")) +from conftest import MOCK_OPEN_AI_API_KEY, skip_docker # noqa: E402 if skip_docker or not is_docker_running(): classes_to_test = [LocalCommandLineCodeExecutor] @@ -52,7 +54,7 @@ def test_commandline_executor_init(cls) -> None: assert executor.timeout == 10 and str(executor.work_dir) == "." # Try invalid working directory. - with pytest.raises(ValueError, match="Working directory .* does not exist."): + with pytest.raises(FileNotFoundError): executor = cls(timeout=111, work_dir="/invalid/directory") diff --git a/website/docs/Getting-Started.mdx b/website/docs/Getting-Started.mdx index 0e90a322f554..a933ae3cc500 100644 --- a/website/docs/Getting-Started.mdx +++ b/website/docs/Getting-Started.mdx @@ -38,30 +38,37 @@ pip install pyautogen ``` + + +```python +from autogen import AssistantAgent, UserProxyAgent + +llm_config = {"model": "gpt-4", "api_key": os.environ["OPENAI_API_KEY"]} +assistant = AssistantAgent("assistant", llm_config=llm_config) +user_proxy = UserProxyAgent("user_proxy", code_execution_config=False) + +# Start the chat +user_proxy.initiate_chat( + assistant, + message="Tell me a joke about NVDA and TESLA stock prices.", +) +``` + + :::warning When asked, be sure to check the generated code before continuing to ensure it is safe to run. ::: ```python +import autogen from autogen import AssistantAgent, UserProxyAgent -from autogen.coding import LocalCommandLineCodeExecutor - -import os -from pathlib import Path - -llm_config = { - "config_list": [{"model": "gpt-4", "api_key": os.environ["OPENAI_API_KEY"]}], -} - -work_dir = Path("coding") -work_dir.mkdir(exist_ok=True) +llm_config = {"model": "gpt-4", "api_key": os.environ["OPENAI_API_KEY"]} assistant = AssistantAgent("assistant", llm_config=llm_config) -code_executor = LocalCommandLineCodeExecutor(work_dir=work_dir) user_proxy = UserProxyAgent( - "user_proxy", code_execution_config={"executor": code_executor} + "user_proxy", code_execution_config={"executor": autogen.coding.LocalCommandLineCodeExecutor(work_dir="coding")} ) # Start the chat @@ -75,20 +82,12 @@ user_proxy.initiate_chat( ```python +import autogen from autogen import AssistantAgent, UserProxyAgent -from autogen.coding import DockerCommandLineCodeExecutor - -import os -from pathlib import Path -llm_config = { - "config_list": [{"model": "gpt-4", "api_key": os.environ["OPENAI_API_KEY"]}], -} +llm_config = {"model": "gpt-4", "api_key": os.environ["OPENAI_API_KEY"]} -work_dir = Path("coding") -work_dir.mkdir(exist_ok=True) - -with DockerCommandLineCodeExecutor(work_dir=work_dir) as code_executor: +with autogen.coding.DockerCommandLineCodeExecutor(work_dir="coding") as code_executor: assistant = AssistantAgent("assistant", llm_config=llm_config) user_proxy = UserProxyAgent( "user_proxy", code_execution_config={"executor": code_executor} @@ -103,7 +102,7 @@ with DockerCommandLineCodeExecutor(work_dir=work_dir) as code_executor: Open `coding/plot.png` to see the generated plot. - + :::tip