From 433dc30198ba5e2f04ea6800de798633b66c0f49 Mon Sep 17 00:00:00 2001 From: skzhang1 Date: Sat, 16 Nov 2024 16:00:33 -0500 Subject: [PATCH 1/4] update --- NOTICE.md | 8 ++++---- README.md | 6 +++--- TRANSPARENCY_FAQS.md | 2 +- .../blog/2023-10-18-RetrieveChat/index.mdx | 12 +++++------ .../blog/2023-10-26-TeachableAgent/index.mdx | 8 ++++---- website/blog/2023-11-20-AgentEval/index.mdx | 4 ++-- .../blog/2023-11-26-Agent-AutoBuild/index.mdx | 4 ++-- .../blog/2024-01-25-AutoGenBench/index.mdx | 2 +- .../blog/2024-01-26-Custom-Models/index.mdx | 2 +- .../blog/2024-03-03-AutoGen-Update/index.mdx | 8 ++++---- website/blog/2024-06-21-AgentEval/index.mdx | 8 ++++---- .../2024-06-24-AltModels-Classes/index.mdx | 4 ++-- .../docs/contributor-guide/contributing.md | 6 +++--- website/docs/contributor-guide/docker.md | 16 +++++++-------- .../docs/contributor-guide/documentation.md | 2 +- website/docs/ecosystem/llamaindex.md | 2 +- website/docs/installation/Docker.md | 20 +++++++++---------- website/docusaurus.config.js | 8 ++++---- website/process_notebooks.py | 2 +- .../ExploreContent/ExploreContent.js | 12 +++++------ .../HomepageFeatures/HomepageFeatures.js | 6 +++--- .../PopularResource/PopularResource.js | 6 +++--- website/static/img/autogen.svg | 2 +- 23 files changed, 75 insertions(+), 75 deletions(-) diff --git a/NOTICE.md b/NOTICE.md index 0065be8674..6abbf5912c 100644 --- a/NOTICE.md +++ b/NOTICE.md @@ -2,9 +2,9 @@ Copyright (c) 2023-2024, Owners of https://github.com/ag2ai -This project is a fork of https://github.com/microsoft/autogen. +This project is a fork of https://github.com/ag2ai/ag2. -The [original project](https://github.com/microsoft/autogen) is licensed under the MIT License as detailed in [LICENSE_original_MIT](./license_original/LICENSE_original_MIT). The fork was created from version v0.2.35 of the original project. +The [original project](https://github.com/ag2ai/ag2) is licensed under the MIT License as detailed in [LICENSE_original_MIT](./license_original/LICENSE_original_MIT). The fork was created from version v0.2.35 of the original project. This project, i.e., https://github.com/ag2ai/ag2, is licensed under the Apache License, Version 2.0 as detailed in [LICENSE](./LICENSE) @@ -13,7 +13,7 @@ This project, i.e., https://github.com/ag2ai/ag2, is licensed under the Apache L Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ongoing MIT-licensed contributions: -This project regularly incorporates code merged from the [original repository](https://github.com/microsoft/autogen) after the initial fork. This merged code remains under the original MIT license. For specific details on merged commits, please refer to the project's commit history. -The MIT license applies to portions of code originating from the [original repository](https://github.com/microsoft/autogen) as described above. +This project regularly incorporates code merged from the [original repository](https://github.com/ag2ai/ag2) after the initial fork. This merged code remains under the original MIT license. For specific details on merged commits, please refer to the project's commit history. +The MIT license applies to portions of code originating from the [original repository](https://github.com/ag2ai/ag2) as described above. Last updated: 08/25/2024 diff --git a/README.md b/README.md index 56cc33c3a1..9a3509629c 100644 --- a/README.md +++ b/README.md @@ -59,7 +59,7 @@ We adopt the Apache 2.0 license from v0.3. This enhances our commitment to open- :tada: Dec 31, 2023: [AutoGen: Enabling Next-Gen LLM Applications via Multi-Agent Conversation Framework](https://arxiv.org/abs/2308.08155) is selected by [TheSequence: My Five Favorite AI Papers of 2023](https://thesequence.substack.com/p/my-five-favorite-ai-papers-of-2023). - + @@ -335,9 +335,9 @@ Explore detailed implementations with sample code and applications to help you g ## License This project is licensed under the [Apache License, Version 2.0 (Apache-2.0)](./LICENSE). -This project is a spin-off of https://github.com/microsoft/autogen and contains code under two licenses: +This project is a spin-off of https://github.com/ag2ai/ag2 and contains code under two licenses: -- The original code from https://github.com/microsoft/autogen is licensed under the MIT License. See the [LICENSE_original_MIT](./license_original/LICENSE_original_MIT) file for details. +- The original code from https://github.com/ag2ai/ag2 is licensed under the MIT License. See the [LICENSE_original_MIT](./license_original/LICENSE_original_MIT) file for details. - Modifications and additions made in this fork are licensed under the Apache License, Version 2.0. See the [LICENSE](./LICENSE) file for the full license text. diff --git a/TRANSPARENCY_FAQS.md b/TRANSPARENCY_FAQS.md index addf29d8b8..dcdaec8ea4 100644 --- a/TRANSPARENCY_FAQS.md +++ b/TRANSPARENCY_FAQS.md @@ -31,7 +31,7 @@ While AutoGen automates LLM workflows, decisions about how to use specific LLM o - Current version of AutoGen was evaluated on six applications to illustrate its potential in simplifying the development of high-performance multi-agent applications. These applications are selected based on their real-world relevance, problem difficulty and problem solving capabilities enabled by AutoGen, and innovative potential. - These applications involve using AutoGen to solve math problems, question answering, decision making in text world environments, supply chain optimization, etc. For each of these domains AutoGen was evaluated on various success based metrics (i.e., how often the AutoGen based implementation solved the task). And, in some cases, AutoGen based approach was also evaluated on implementation efficiency (e.g., to track reductions in developer effort to build). More details can be found at: https://aka.ms/AutoGen/TechReport - The team has conducted tests where a “red” agent attempts to get the default AutoGen assistant to break from its alignment and guardrails. The team has observed that out of 70 attempts to break guardrails, only 1 was successful in producing text that would have been flagged as problematic by Azure OpenAI filters. The team has not observed any evidence that AutoGen (or GPT models as hosted by OpenAI or Azure) can produce novel code exploits or jailbreak prompts, since direct prompts to “be a hacker”, “write exploits”, or “produce a phishing email” are refused by existing filters. -- We also evaluated [a team of AutoGen agents](https://github.com/microsoft/autogen/tree/gaia_multiagent_v01_march_1st/samples/tools/autogenbench/scenarios/GAIA/Templates/Orchestrator) on the [GAIA benchmarks](https://arxiv.org/abs/2311.12983), and got [SOTA results](https://huggingface.co/spaces/gaia-benchmark/leaderboard) as of +- We also evaluated [a team of AutoGen agents](https://github.com/ag2ai/ag2/tree/gaia_multiagent_v01_march_1st/samples/tools/autogenbench/scenarios/GAIA/Templates/Orchestrator) on the [GAIA benchmarks](https://arxiv.org/abs/2311.12983), and got [SOTA results](https://huggingface.co/spaces/gaia-benchmark/leaderboard) as of March 1, 2024. ## What are the limitations of AutoGen? How can users minimize the impact of AutoGen’s limitations when using the system? diff --git a/website/blog/2023-10-18-RetrieveChat/index.mdx b/website/blog/2023-10-18-RetrieveChat/index.mdx index c264f05779..abc95a3fe1 100644 --- a/website/blog/2023-10-18-RetrieveChat/index.mdx +++ b/website/blog/2023-10-18-RetrieveChat/index.mdx @@ -204,7 +204,7 @@ We are using chromadb as the default vector database, you can also use mongodb, by simply set `vector_db` to `mongodb`, `pgvector` and `qdrant` in `retrieve_config`, respectively. To plugin any other dbs, you can also extend class `agentchat.contrib.vectordb.base`, -check out the code [here](https://github.com/microsoft/autogen/blob/main/autogen/agentchat/contrib/vectordb/base.py). +check out the code [here](https://github.com/ag2ai/ag2/blob/main/autogen/agentchat/contrib/vectordb/base.py). ## Advanced Usage of RAG Agents @@ -399,8 +399,8 @@ The online app and the source code are hosted in [HuggingFace](https://huggingfa ## Read More You can check out more example notebooks for RAG use cases: -- [Automated Code Generation and Question Answering with Retrieval Augmented Agents](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_RetrieveChat.ipynb) -- [Group Chat with Retrieval Augmented Generation (with 5 group member agents and 1 manager agent)](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_groupchat_RAG.ipynb) -- [Using RetrieveChat with Qdrant for Retrieve Augmented Code Generation and Question Answering](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_RetrieveChat_qdrant.ipynb) -- [Using RetrieveChat Powered by PGVector for Retrieve Augmented Code Generation and Question Answering](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_RetrieveChat_pgvector.ipynb) -- [Using RetrieveChat Powered by MongoDB Atlas for Retrieve Augmented Code Generation and Question Answering](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_RetrieveChat_mongodb.ipynb) +- [Automated Code Generation and Question Answering with Retrieval Augmented Agents](https://github.com/ag2ai/ag2/blob/main/notebook/agentchat_RetrieveChat.ipynb) +- [Group Chat with Retrieval Augmented Generation (with 5 group member agents and 1 manager agent)](https://github.com/ag2ai/ag2/blob/main/notebook/agentchat_groupchat_RAG.ipynb) +- [Using RetrieveChat with Qdrant for Retrieve Augmented Code Generation and Question Answering](https://github.com/ag2ai/ag2/blob/main/notebook/agentchat_RetrieveChat_qdrant.ipynb) +- [Using RetrieveChat Powered by PGVector for Retrieve Augmented Code Generation and Question Answering](https://github.com/ag2ai/ag2/blob/main/notebook/agentchat_RetrieveChat_pgvector.ipynb) +- [Using RetrieveChat Powered by MongoDB Atlas for Retrieve Augmented Code Generation and Question Answering](https://github.com/ag2ai/ag2/blob/main/notebook/agentchat_RetrieveChat_mongodb.ipynb) diff --git a/website/blog/2023-10-26-TeachableAgent/index.mdx b/website/blog/2023-10-26-TeachableAgent/index.mdx index c567d5e41f..6b82dc495e 100644 --- a/website/blog/2023-10-26-TeachableAgent/index.mdx +++ b/website/blog/2023-10-26-TeachableAgent/index.mdx @@ -24,13 +24,13 @@ In order to make effective decisions about memo storage and retrieval, the `Teac AutoGen contains four code examples that use `Teachability`. -1. Run [chat_with_teachable_agent.py](https://github.com/microsoft/autogen/blob/main/test/agentchat/contrib/capabilities/chat_with_teachable_agent.py) to converse with a teachable agent. +1. Run [chat_with_teachable_agent.py](https://github.com/ag2ai/ag2/blob/main/test/agentchat/contrib/capabilities/chat_with_teachable_agent.py) to converse with a teachable agent. -2. Run [test_teachable_agent.py](https://github.com/microsoft/autogen/blob/main/test/agentchat/contrib/capabilities/test_teachable_agent.py) for quick unit testing of a teachable agent. +2. Run [test_teachable_agent.py](https://github.com/ag2ai/ag2/blob/main/test/agentchat/contrib/capabilities/test_teachable_agent.py) for quick unit testing of a teachable agent. -3. Use the Jupyter notebook [agentchat_teachability.ipynb](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_teachability.ipynb) to step through examples discussed below. +3. Use the Jupyter notebook [agentchat_teachability.ipynb](https://github.com/ag2ai/ag2/blob/main/notebook/agentchat_teachability.ipynb) to step through examples discussed below. -4. Use the Jupyter notebook [agentchat_teachable_oai_assistants.ipynb](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_teachable_oai_assistants.ipynb) to make arbitrary OpenAI Assistants teachable through `GPTAssistantAgent`. +4. Use the Jupyter notebook [agentchat_teachable_oai_assistants.ipynb](https://github.com/ag2ai/ag2/blob/main/notebook/agentchat_teachable_oai_assistants.ipynb) to make arbitrary OpenAI Assistants teachable through `GPTAssistantAgent`. ## Basic Usage of Teachability diff --git a/website/blog/2023-11-20-AgentEval/index.mdx b/website/blog/2023-11-20-AgentEval/index.mdx index b40d774810..b80592102a 100644 --- a/website/blog/2023-11-20-AgentEval/index.mdx +++ b/website/blog/2023-11-20-AgentEval/index.mdx @@ -14,7 +14,7 @@ tags: [LLM, GPT, evaluation, task utility] **TL;DR:** * As a developer of an LLM-powered application, how can you assess the utility it brings to end users while helping them with their tasks? * To shed light on the question above, we introduce `AgentEval` — the first version of the framework to assess the utility of any LLM-powered application crafted to assist users in specific tasks. AgentEval aims to simplify the evaluation process by automatically proposing a set of criteria tailored to the unique purpose of your application. This allows for a comprehensive assessment, quantifying the utility of your application against the suggested criteria. -* We demonstrate how `AgentEval` work using [math problems dataset](https://ag2ai.github.io/autogen/blog/2023/06/28/MathChat) as an example in the [following notebook](https://github.com/microsoft/autogen/blob/main/notebook/agenteval_cq_math.ipynb). Any feedback would be useful for future development. Please contact us on our [Discord](http://aka.ms/autogen-dc). +* We demonstrate how `AgentEval` work using [math problems dataset](https://ag2ai.github.io/autogen/blog/2023/06/28/MathChat) as an example in the [following notebook](https://github.com/ag2ai/ag2/blob/main/notebook/agenteval_cq_math.ipynb). Any feedback would be useful for future development. Please contact us on our [Discord](http://aka.ms/autogen-dc). ## Introduction @@ -54,7 +54,7 @@ critic = autogen.AssistantAgent( ) ``` -Next, the critic is given successful and failed examples of the task execution; then, it is able to return a list of criteria (Fig. 1). For reference, use the [following notebook](https://github.com/microsoft/autogen/blob/main/notebook/agenteval_cq_math.ipynb). +Next, the critic is given successful and failed examples of the task execution; then, it is able to return a list of criteria (Fig. 1). For reference, use the [following notebook](https://github.com/ag2ai/ag2/blob/main/notebook/agenteval_cq_math.ipynb). * The goal of `QuantifierAgent` is to quantify each of the suggested criteria (Fig. 1), providing us with an idea of the utility of this system for the given task. Here is an example of how it can be defined: diff --git a/website/blog/2023-11-26-Agent-AutoBuild/index.mdx b/website/blog/2023-11-26-Agent-AutoBuild/index.mdx index 762b98dfa3..3272e7ce18 100644 --- a/website/blog/2023-11-26-Agent-AutoBuild/index.mdx +++ b/website/blog/2023-11-26-Agent-AutoBuild/index.mdx @@ -14,8 +14,8 @@ user prompt required, powered by a new designed class **AgentBuilder**. AgentBui leveraging [vLLM](https://docs.vllm.ai/en/latest/index.html) and [FastChat](https://github.com/lm-sys/FastChat). Checkout example notebooks and source code for reference: -- [AutoBuild Examples](https://github.com/microsoft/autogen/blob/main/notebook/autobuild_basic.ipynb) -- [AgentBuilder](https://github.com/microsoft/autogen/blob/main/autogen/agentchat/contrib/agent_builder.py) +- [AutoBuild Examples](https://github.com/ag2ai/ag2/blob/main/notebook/autobuild_basic.ipynb) +- [AgentBuilder](https://github.com/ag2ai/ag2/blob/main/autogen/agentchat/contrib/agent_builder.py) ## Introduction In this blog, we introduce **AutoBuild**, a pipeline that can automatically build multi-agent systems for complex tasks. diff --git a/website/blog/2024-01-25-AutoGenBench/index.mdx b/website/blog/2024-01-25-AutoGenBench/index.mdx index c148dedfa5..b2d8b68fe5 100644 --- a/website/blog/2024-01-25-AutoGenBench/index.mdx +++ b/website/blog/2024-01-25-AutoGenBench/index.mdx @@ -141,7 +141,7 @@ While we are announcing AutoGenBench, we note that it is very much an evolving p - Introduce new core metrics including total costs, task completion time, conversation turns, etc. - Provide tighter integration with AgentEval and AutoGen Studio -For an up to date tracking of our work items on this project, please see [AutoGenBench Work Items](https://github.com/microsoft/autogen/issues/973) +For an up to date tracking of our work items on this project, please see [AutoGenBench Work Items](https://github.com/ag2ai/ag2/issues/973) ## Call for Participation diff --git a/website/blog/2024-01-26-Custom-Models/index.mdx b/website/blog/2024-01-26-Custom-Models/index.mdx index 81a9ad383d..d97685ebe4 100644 --- a/website/blog/2024-01-26-Custom-Models/index.mdx +++ b/website/blog/2024-01-26-Custom-Models/index.mdx @@ -13,7 +13,7 @@ AutoGen now supports custom models! This feature empowers users to define and lo ## Quickstart -An interactive and easy way to get started is by following the notebook [here](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_custom_model.ipynb) which loads a local model from HuggingFace into AutoGen and uses it for inference, and making changes to the class provided. +An interactive and easy way to get started is by following the notebook [here](https://github.com/ag2ai/ag2/blob/main/notebook/agentchat_custom_model.ipynb) which loads a local model from HuggingFace into AutoGen and uses it for inference, and making changes to the class provided. ### Step 1: Create the custom model client class diff --git a/website/blog/2024-03-03-AutoGen-Update/index.mdx b/website/blog/2024-03-03-AutoGen-Update/index.mdx index ca5a7713b3..80af9db237 100644 --- a/website/blog/2024-03-03-AutoGen-Update/index.mdx +++ b/website/blog/2024-03-03-AutoGen-Update/index.mdx @@ -138,7 +138,7 @@ The community has been working hard to address them in several dimensions: We are working on agent-based evaluation tools and benchmarking tools. For example: -- [AgentEval](/blog/2023/11/20/AgentEval). Our [research](https://arxiv.org/abs/2402.09015) finds that LLM agents built with AutoGen can be used to automatically identify evaluation criteria and assess the performance from task descriptions and execution logs. It is demonstrated as a [notebook example](https://github.com/microsoft/autogen/blob/main/notebook/agenteval_cq_math.ipynb). Feedback and help are welcome for building it into the library. +- [AgentEval](/blog/2023/11/20/AgentEval). Our [research](https://arxiv.org/abs/2402.09015) finds that LLM agents built with AutoGen can be used to automatically identify evaluation criteria and assess the performance from task descriptions and execution logs. It is demonstrated as a [notebook example](https://github.com/ag2ai/ag2/blob/main/notebook/agenteval_cq_math.ipynb). Feedback and help are welcome for building it into the library. - [AutoGenBench](/blog/2024/01/25/AutoGenBench). AutoGenBench is a commandline tool for downloading, configuring, running an agentic benchmark, and reporting results. It is designed to allow repetition, isolation and instrumentation, leveraging the new [runtime logging](/docs/notebooks/agentchat_logging) feature. These tools have been used for improving the AutoGen library as well as applications. For example, the new state-of-the-art performance achieved by a multi-agent solution to the [GAIA](https://huggingface.co/spaces/gaia-benchmark/leaderboard) benchmark has benefited from these evaluation tools. @@ -147,7 +147,7 @@ These tools have been used for improving the AutoGen library as well as applicat We are making rapid progress in further improving the interface to make it even easier to build agent applications. For example: -- [AutoBuild](/blog/2023/11/26/Agent-AutoBuild). AutoBuild is an ongoing area of research to automatically create or select a group of agents for a given task and objective. If successful, it will greatly reduce the effort from users or developers when using the multi-agent technology. It also paves the way for agentic decomposition to handle complex tasks. It is available as an experimental feature and demonstrated in two modes: free-form [creation](https://github.com/microsoft/autogen/blob/main/notebook/autobuild_basic.ipynb) and [selection](https://github.com/microsoft/autogen/blob/main/notebook/autobuild_agent_library.ipynb) from a library. +- [AutoBuild](/blog/2023/11/26/Agent-AutoBuild). AutoBuild is an ongoing area of research to automatically create or select a group of agents for a given task and objective. If successful, it will greatly reduce the effort from users or developers when using the multi-agent technology. It also paves the way for agentic decomposition to handle complex tasks. It is available as an experimental feature and demonstrated in two modes: free-form [creation](https://github.com/ag2ai/ag2/blob/main/notebook/autobuild_basic.ipynb) and [selection](https://github.com/ag2ai/ag2/blob/main/notebook/autobuild_agent_library.ipynb) from a library. - [AutoGen Studio](/blog/2023/12/01/AutoGenStudio). AutoGen Studio is a no-code UI for fast experimentation with the multi-agent conversations. It lowers the barrier of entrance to the AutoGen technology. Models, agents, and workflows can all be configured without writing code. And chatting with multiple agents in a playground is immediately available after the configuration. Although only a subset of `autogen` features are available in this sample app, it demonstrates a promising experience. It has generated tremendous excitement in the community. - Conversation Programming+. The [AutoGen paper](https://arxiv.org/abs/2308.08155) introduced a key concept of _Conversation Programming_, which can be used to program diverse conversation patterns such as 1-1 chat, group chat, hierarchical chat, nested chat etc. While we offered dynamic group chat as an example of high-level orchestration, it made other patterns relatively less discoverable. Therefore, we have added more convenient conversation programming features which enables easier definition of other types of complex workflow, such as [finite state machine based group chat](/blog/2024/02/11/FSM-GroupChat), [sequential chats](/docs/notebooks/agentchats_sequential_chats), and [nested chats](/docs/notebooks/agentchat_nestedchat). Many users have found them useful in implementing specific patterns, which have been always possible but more obvious with the added features. I will write another blog post for a deep dive. @@ -173,8 +173,8 @@ The extensible design of AutoGen makes it easy to integrate with new technologie The above only covers a subset of new features and roadmap. There are many other interesting new features, integration examples or sample apps: -- new features like stateful code execution, [tool decorators](/docs/Use-Cases/agent_chat#tool-calling), [long context handling](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_capability_long_context_handling.ipynb), [web agents](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_surfer.ipynb). -- integration examples like using [guidance](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_guidance.ipynb) to generate structured response. +- new features like stateful code execution, [tool decorators](/docs/Use-Cases/agent_chat#tool-calling), [long context handling](https://github.com/ag2ai/ag2/blob/main/notebook/agentchat_capability_long_context_handling.ipynb), [web agents](https://github.com/ag2ai/ag2/blob/main/notebook/agentchat_surfer.ipynb). +- integration examples like using [guidance](https://github.com/ag2ai/ag2/blob/main/notebook/agentchat_guidance.ipynb) to generate structured response. - sample apps like [AutoAnny](/blog/2024/02/02/AutoAnny). ## Call for Help diff --git a/website/blog/2024-06-21-AgentEval/index.mdx b/website/blog/2024-06-21-AgentEval/index.mdx index 874c7fe060..0801faaae2 100644 --- a/website/blog/2024-06-21-AgentEval/index.mdx +++ b/website/blog/2024-06-21-AgentEval/index.mdx @@ -76,7 +76,7 @@ def generate_criteria( Parameters: * llm_config (dict or bool): llm inference configuration. -* task ([Task](https://github.com/microsoft/autogen/tree/main/autogen/agentchat/contrib/agent_eval/task.py)): The task to evaluate. +* task ([Task](https://github.com/ag2ai/ag2/tree/main/autogen/agentchat/contrib/agent_eval/task.py)): The task to evaluate. * additional_instructions (str, optional): Additional instructions for the criteria agent. * max_round (int, optional): The maximum number of rounds to run the conversation. * use_subcritic (bool, optional): Whether to use the Subcritic agent to generate subcriteria. The Subcritic agent will break down a generated criteria into smaller criteria to be assessed. @@ -138,8 +138,8 @@ def quantify_criteria( Parameters: * llm_config (dict or bool): llm inference configuration. -* criteria ([Criterion](https://github.com/microsoft/autogen/tree/main/autogen/agentchat/contrib/agent_eval/criterion.py)): A list of criteria for evaluating the utility of a given task. This can either be generated by the `generate_criteria` function or manually created. -* task ([Task](https://github.com/microsoft/autogen/tree/main/autogen/agentchat/contrib/agent_eval/task.py)): The task to evaluate. It should match the one used during the `generate_criteria` step. +* criteria ([Criterion](https://github.com/ag2ai/ag2/tree/main/autogen/agentchat/contrib/agent_eval/criterion.py)): A list of criteria for evaluating the utility of a given task. This can either be generated by the `generate_criteria` function or manually created. +* task ([Task](https://github.com/ag2ai/ag2/tree/main/autogen/agentchat/contrib/agent_eval/task.py)): The task to evaluate. It should match the one used during the `generate_criteria` step. * test_case (str): The execution chain to assess. Typically this is a json list of messages but could be any string representation of a conversation chain. * ground_truth (str): The ground truth for the test case. @@ -189,7 +189,7 @@ AgentEval represents a significant advancement in the evaluation of LLM-powered ## Further reading -Please refer to our [paper](https://arxiv.org/abs/2405.02178) and [codebase](https://github.com/microsoft/autogen/tree/main/autogen/agentchat/contrib/agent_eval) for more details about AgentEval. +Please refer to our [paper](https://arxiv.org/abs/2405.02178) and [codebase](https://github.com/ag2ai/ag2/tree/main/autogen/agentchat/contrib/agent_eval) for more details about AgentEval. If you find this blog useful, please consider citing: ```bobtex diff --git a/website/blog/2024-06-24-AltModels-Classes/index.mdx b/website/blog/2024-06-24-AltModels-Classes/index.mdx index e74a9ac2b1..d624c80096 100644 --- a/website/blog/2024-06-24-AltModels-Classes/index.mdx +++ b/website/blog/2024-06-24-AltModels-Classes/index.mdx @@ -21,7 +21,7 @@ These new client classes tailor AutoGen's underlying messages to each provider's Using them is as simple as installing the client-specific library and updating your LLM config with the relevant `api_type` and `model`. We'll demonstrate how to use them below. -The community is continuing to enhance and build new client classes as cloud-based inference providers arrive. So, watch this space, and feel free to [discuss](https://discord.gg/pAbnFJrkgZ) or [develop](https://github.com/microsoft/autogen/pulls) another one. +The community is continuing to enhance and build new client classes as cloud-based inference providers arrive. So, watch this space, and feel free to [discuss](https://discord.gg/pAbnFJrkgZ) or [develop](https://github.com/ag2ai/ag2/pulls) another one. ## Benefits of choice @@ -61,7 +61,7 @@ Here are some tips when working with these client classes: - **Context length** - as your conversation gets longer, models need to support larger context lengths, be mindful of what the model supports and consider using [Transform Messages](https://ag2ai.github.io/ag2/docs/topics/handling_long_contexts/intro_to_transform_messages) to manage context size. - **Provider parameters** - providers have parameters you can set such as temperature, maximum tokens, top-k, top-p, and safety. See each client class in AutoGen's [API Reference](https://ag2ai.github.io/ag2/docs/reference/oai/gemini) or [documentation](https://ag2ai.github.io/ag2/docs/topics/non-openai-models/cloud-gemini) for details. - **Prompts** - prompt engineering is critical in guiding smaller LLMs to do what you need. [ConversableAgent](https://ag2ai.github.io/ag2/docs/reference/agentchat/conversable_agent), [GroupChat](https://ag2ai.github.io/ag2/docs/reference/agentchat/groupchat), [UserProxyAgent](https://ag2ai.github.io/ag2/docs/reference/agentchat/user_proxy_agent), and [AssistantAgent](https://ag2ai.github.io/ag2/docs/reference/agentchat/assistant_agent) all have customizable prompt attributes that you can tailor. Here are some prompting tips from [Anthropic](https://docs.anthropic.com/en/docs/build-with-claude/prompt-engineering/overview)([+Library](https://docs.anthropic.com/en/prompt-library/library)), [Mistral AI](https://docs.mistral.ai/guides/prompting_capabilities/), [Together.AI](https://docs.together.ai/docs/examples), and [Meta](https://llama.meta.com/docs/how-to-guides/prompting/). -- **Help!** - reach out on the AutoGen [Discord](https://discord.gg/pAbnFJrkgZ) or [log an issue](https://github.com/microsoft/autogen/issues) if you need help with or can help improve these client classes. +- **Help!** - reach out on the AutoGen [Discord](https://discord.gg/pAbnFJrkgZ) or [log an issue](https://github.com/ag2ai/ag2/issues) if you need help with or can help improve these client classes. Now it's time to try them out. diff --git a/website/docs/contributor-guide/contributing.md b/website/docs/contributor-guide/contributing.md index 0475b5b75f..fda9596883 100644 --- a/website/docs/contributor-guide/contributing.md +++ b/website/docs/contributor-guide/contributing.md @@ -1,4 +1,4 @@ -# Contributing to AutoGen +# Contributing to AG2 The project welcomes contributions from developers and organizations worldwide. Our goal is to foster a collaborative and inclusive community where diverse perspectives and expertise can drive innovation and enhance the project's capabilities. Whether you are an individual contributor or represent an organization, we invite you to join us in shaping the future of this project. Together, we can build something truly remarkable. Possible contributions include but not limited to: @@ -36,14 +36,14 @@ To maintain proper licensing and copyright notices, please include the following # SPDX-License-Identifier: Apache-2.0 ``` -For files that contain or are derived from the original MIT-licensed code from https://github.com/microsoft/autogen, please use this extended header: +For files that contain or are derived from the original MIT-licensed code from https://github.com/ag2ai/ag2, please use this extended header: ```python # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai # # SPDX-License-Identifier: Apache-2.0 # -# Portions derived from https://github.com/microsoft/autogen are under the MIT License. +# Portions derived from https://github.com/ag2ai/ag2 are under the MIT License. # SPDX-License-Identifier: MIT ``` diff --git a/website/docs/contributor-guide/docker.md b/website/docs/contributor-guide/docker.md index 1fd603f181..f71bc68eb4 100644 --- a/website/docs/contributor-guide/docker.md +++ b/website/docs/contributor-guide/docker.md @@ -1,14 +1,14 @@ # Docker for Development -For developers contributing to the AutoGen project, we offer a specialized Docker environment. This setup is designed to streamline the development process, ensuring that all contributors work within a consistent and well-equipped environment. +For developers contributing to the AG2 project, we offer a specialized Docker environment. This setup is designed to streamline the development process, ensuring that all contributors work within a consistent and well-equipped environment. -## Autogen Developer Image (ag2_dev_img) +## AG2 Developer Image (ag2_dev_img) -- **Purpose**: The `ag2_dev_img` is tailored for contributors to the AutoGen project. It includes a suite of tools and configurations that aid in the development and testing of new features or fixes. -- **Usage**: This image is recommended for developers who intend to contribute code or documentation to AutoGen. -- **Forking the Project**: It's advisable to fork the AutoGen GitHub project to your own repository. This allows you to make changes in a separate environment without affecting the main project. +- **Purpose**: The `ag2_dev_img` is tailored for contributors to the AG2 project. It includes a suite of tools and configurations that aid in the development and testing of new features or fixes. +- **Usage**: This image is recommended for developers who intend to contribute code or documentation to AG2. +- **Forking the Project**: It's advisable to fork the AG2 GitHub project to your own repository. This allows you to make changes in a separate environment without affecting the main project. - **Updating Dockerfile**: Modify your copy of `Dockerfile` in the `dev` folder as needed for your development work. -- **Submitting Pull Requests**: Once your changes are ready, submit a pull request from your branch to the upstream AutoGen GitHub project for review and integration. For more details on contributing, see the [AutoGen Contributing](https://ag2ai.github.io/ag2/docs/Contribute) page. +- **Submitting Pull Requests**: Once your changes are ready, submit a pull request from your branch to the upstream AG2 GitHub project for review and integration. For more details on contributing, see the [AG2 Contributing](https://ag2ai.github.io/ag2/docs/Contribute) page. ## Building the Developer Docker Image @@ -25,7 +25,7 @@ For developers contributing to the AutoGen project, we offer a specialized Docke git clone --branch {branch-name} https://github.com/ag2ai/ag2.git # cd to your new directory - cd autogen + cd ag2 # build your Docker image docker build -f .devcontainer/dev/Dockerfile -t autogen_dev-srv_img . @@ -47,5 +47,5 @@ docker run -it -p 8081:3000 -v /home/AutoGenDeveloper/autogen-newcode:newstuff/ ## Develop in Remote Container -If you use vscode, you can open the autogen folder in a [Container](https://code.visualstudio.com/docs/remote/containers). +If you use vscode, you can open the ag2 folder in a [Container](https://code.visualstudio.com/docs/remote/containers). We have provided the configuration in [devcontainer](https://github.com/ag2ai/ag2/blob/main/.devcontainer). They can be used in GitHub codespace too. Developing AutoGen in dev containers is recommended. diff --git a/website/docs/contributor-guide/documentation.md b/website/docs/contributor-guide/documentation.md index 3694aacc3f..304d6c4045 100644 --- a/website/docs/contributor-guide/documentation.md +++ b/website/docs/contributor-guide/documentation.md @@ -2,7 +2,7 @@ ## How to get a notebook rendered on the website -See [here](https://github.com/microsoft/autogen/blob/main/notebook/contributing.md#how-to-get-a-notebook-displayed-on-the-website) for instructions on how to get a notebook in the `notebook` directory rendered on the website. +See [here](https://github.com/ag2ai/ag2/blob/main/notebook/contributing.md#how-to-get-a-notebook-displayed-on-the-website) for instructions on how to get a notebook in the `notebook` directory rendered on the website. ## Build documentation locally diff --git a/website/docs/ecosystem/llamaindex.md b/website/docs/ecosystem/llamaindex.md index 4e9cffeedc..32a18e27ad 100644 --- a/website/docs/ecosystem/llamaindex.md +++ b/website/docs/ecosystem/llamaindex.md @@ -4,4 +4,4 @@ [Llamaindex](https://www.llamaindex.ai/) allows the users to create Llamaindex agents and integrate them in autogen conversation patterns. -- [Llamaindex + AutoGen Code Examples](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_group_chat_with_llamaindex_agents.ipynb) +- [Llamaindex + AutoGen Code Examples](https://github.com/ag2ai/ag2/blob/main/notebook/agentchat_group_chat_with_llamaindex_agents.ipynb) diff --git a/website/docs/installation/Docker.md b/website/docs/installation/Docker.md index b1598652e5..580d56bbcd 100644 --- a/website/docs/installation/Docker.md +++ b/website/docs/installation/Docker.md @@ -1,37 +1,37 @@ # Docker -Docker, an indispensable tool in modern software development, offers a compelling solution for AutoGen's setup. Docker allows you to create consistent environments that are portable and isolated from the host OS. With Docker, everything AutoGen needs to run, from the operating system to specific libraries, is encapsulated in a container, ensuring uniform functionality across different systems. The Dockerfiles necessary for AutoGen are conveniently located in the project's GitHub repository at [https://github.com/ag2ai/ag2/tree/main/.devcontainer](https://github.com/ag2ai/ag2/tree/main/.devcontainer). +Docker, an indispensable tool in modern software development, offers a compelling solution for AG2's setup. Docker allows you to create consistent environments that are portable and isolated from the host OS. With Docker, everything AG2 needs to run, from the operating system to specific libraries, is encapsulated in a container, ensuring uniform functionality across different systems. The Dockerfiles necessary for AG2 are conveniently located in the project's GitHub repository at [https://github.com/ag2ai/ag2/tree/main/.devcontainer](https://github.com/ag2ai/ag2/tree/main/.devcontainer). -**Pre-configured DockerFiles**: The AutoGen Project offers pre-configured Dockerfiles for your use. These Dockerfiles will run as is, however they can be modified to suit your development needs. Please see the README.md file in autogen/.devcontainer +**Pre-configured DockerFiles**: The AG2 Project offers pre-configured Dockerfiles for your use. These Dockerfiles will run as is, however they can be modified to suit your development needs. Please see the README.md file in autogen/.devcontainer -- **ag2ai_base_img**: For a basic setup, you can use the `ag2ai_base_img` to run simple scripts or applications. This is ideal for general users or those new to AutoGen. +- **ag2ai_base_img**: For a basic setup, you can use the `ag2ai_base_img` to run simple scripts or applications. This is ideal for general users or those new to AG2. - **ag2ai_full_img**: Advanced users or those requiring more features can use `ag2ai_full_img`. Be aware that this version loads ALL THE THINGS and thus is very large. Take this into consideration if you build your application off of it. ## Step 1: Install Docker -- **General Installation**: Follow the [official Docker installation instructions](https://docs.docker.com/get-docker/). This is your first step towards a containerized environment, ensuring a consistent and isolated workspace for AutoGen. +- **General Installation**: Follow the [official Docker installation instructions](https://docs.docker.com/get-docker/). This is your first step towards a containerized environment, ensuring a consistent and isolated workspace for AG2. - **For Mac Users**: If you encounter issues with the Docker daemon, consider using [colima](https://smallsharpsoftwaretools.com/tutorials/use-colima-to-run-docker-containers-on-macos/). Colima offers a lightweight alternative to manage Docker containers efficiently on macOS. ## Step 2: Build a Docker Image -AutoGen now provides updated Dockerfiles tailored for different needs. Building a Docker image is akin to setting the foundation for your project's environment: +AG2 now provides updated Dockerfiles tailored for different needs. Building a Docker image is akin to setting the foundation for your project's environment: -- **Autogen Basic**: Ideal for general use, this setup includes common Python libraries and essential dependencies. Perfect for those just starting with AutoGen. +- **AG2 Basic**: Ideal for general use, this setup includes common Python libraries and essential dependencies. Perfect for those just starting with AG2. ```bash docker build -f .devcontainer/Dockerfile -t ag2ai_base_img https://github.com/ag2ai/ag2.git#main ``` -- **Autogen Advanced**: Advanced users or those requiring all the things that AutoGen has to offer `ag2ai_full_img` +- **AG2 Advanced**: Advanced users or those requiring all the things that AG2 has to offer `ag2ai_full_img` ```bash docker build -f .devcontainer/full/Dockerfile -t ag2ai_full_img https://github.com/ag2ai/ag2.git#main ``` -## Step 3: Run AutoGen Applications from Docker Image +## Step 3: Run AG2 Applications from Docker Image -Here's how you can run an application built with AutoGen, using the Docker image: +Here's how you can run an application built with AG2, using the Docker image: 1. **Mount Your Directory**: Use the Docker `-v` flag to mount your local application directory to the Docker container. This allows you to develop on your local machine while running the code in a consistent Docker environment. For example: @@ -41,7 +41,7 @@ Here's how you can run an application built with AutoGen, using the Docker image Here, `$(pwd)/myapp` is your local directory, and `/home/ag2ai/ag2/myapp` is the path in the Docker container where your code will be located. -2. **Mount your code:** Now suppose you have your application built with AutoGen in a main script named `twoagent.py` ([example](https://github.com/ag2ai/ag2/blob/main/test/twoagent.py)) in a folder named `myapp`. With the command line below, you can mount your folder and run the application in Docker. +2. **Mount your code:** Now suppose you have your application built with AG2 in a main script named `twoagent.py` ([example](https://github.com/ag2ai/ag2/blob/main/test/twoagent.py)) in a folder named `myapp`. With the command line below, you can mount your folder and run the application in Docker. ```python # Mount the local folder `myapp` into docker image and run the script named "twoagent.py" in the docker. diff --git a/website/docusaurus.config.js b/website/docusaurus.config.js index 7b4189574f..0454996323 100644 --- a/website/docusaurus.config.js +++ b/website/docusaurus.config.js @@ -13,7 +13,7 @@ customPostCssPlugin = () => { }; module.exports = { - title: "AutoGen", + title: "AG2", tagline: "An Open-Source Programming Framework for Agentic AI", url: "https://ag2ai.github.io", baseUrl: "/ag2/", @@ -44,9 +44,9 @@ module.exports = { }, }, navbar: { - title: "AutoGen", + title: "AG2", logo: { - alt: "AutoGen", + alt: "AG2", src: "img/ag.svg", }, items: [ @@ -188,7 +188,7 @@ module.exports = { ], }, ], - copyright: `Copyright © ${new Date().getFullYear()} AutoGen Authors`, + copyright: `Copyright © ${new Date().getFullYear()} AG2 Authors`, }, // announcementBar: { // id: "whats_new", diff --git a/website/process_notebooks.py b/website/process_notebooks.py index 66cc1b4967..6fc5845c8b 100755 --- a/website/process_notebooks.py +++ b/website/process_notebooks.py @@ -2,7 +2,7 @@ # # SPDX-License-Identifier: Apache-2.0 # -# Portions derived from https://github.com/microsoft/autogen are under the MIT License. +# Portions derived from https://github.com/ag2ai/ag2 are under the MIT License. # SPDX-License-Identifier: MIT #!/usr/bin/env python diff --git a/website/src/components/ExploreContent/ExploreContent.js b/website/src/components/ExploreContent/ExploreContent.js index 655a829338..35c8074b4b 100644 --- a/website/src/components/ExploreContent/ExploreContent.js +++ b/website/src/components/ExploreContent/ExploreContent.js @@ -10,7 +10,7 @@ const firstDataRow = [ link: "/docs/Getting-Started", description: ( <> - Learn how to get start with AutoGen. Follow the instruction to quickly build-up your first AutoGen application. + Learn how to get start with AG2. Follow the instruction to quickly build-up your first AG2 application. ), }, @@ -19,7 +19,7 @@ const firstDataRow = [ link: "docs/tutorial/introduction", description: ( <> - This tutorial introduces basic concepts and building blocks of AutoGen. + This tutorial introduces basic concepts and building blocks of AG2. ), }, @@ -28,7 +28,7 @@ const firstDataRow = [ link: "docs/topics", description: ( <> - Users' guide to different functionalities of AutoGen, including CodeExecution, GroupChat, and more. + Users' guide to different functionalities of AG2, including CodeExecution, GroupChat, and more. ), }, @@ -40,7 +40,7 @@ const secondDataRow = [ link: "docs/Examples", description: ( <> - Learn different examples demonstrating the usage of AutoGen in various scenarios. + Learn different examples demonstrating the usage of AG2 in various scenarios. ), }, @@ -49,7 +49,7 @@ const secondDataRow = [ link: "docs/Gallery", description: ( <> - A collection of different applications built using AutoGen. + A collection of different applications built using AG2. ), }, @@ -58,7 +58,7 @@ const secondDataRow = [ link: "docs/contributor-guide/contributing", description: ( <> - Learn about how you can contribute to AutoGen and this documentation, including pushing patches, code review and more. + Learn about how you can contribute to AG2 and this documentation, including pushing patches, code review and more. ), }, diff --git a/website/src/components/HomepageFeatures/HomepageFeatures.js b/website/src/components/HomepageFeatures/HomepageFeatures.js index 475b78be84..4ad64f27e0 100644 --- a/website/src/components/HomepageFeatures/HomepageFeatures.js +++ b/website/src/components/HomepageFeatures/HomepageFeatures.js @@ -10,7 +10,7 @@ const FeatureList = [ docLink: './docs/Use-Cases/agent_chat', description: ( <> - AutoGen provides multi-agent conversation framework as a high-level abstraction. With this framework, one can conveniently build LLM workflows. + AG2 provides multi-agent conversation framework as a high-level abstraction. With this framework, one can conveniently build LLM workflows. ), }, @@ -20,7 +20,7 @@ const FeatureList = [ docLink: './docs/Use-Cases/agent_chat#diverse-applications-implemented-with-autogen', description: ( <> - AutoGen offers a collection of working systems spanning a wide range of applications from various domains and complexities. + AG2 offers a collection of working systems spanning a wide range of applications from various domains and complexities. ), }, @@ -30,7 +30,7 @@ const FeatureList = [ docLink: './docs/Use-Cases/enhanced_inference', description: ( <> - AutoGen supports enhanced LLM inference APIs, which can be used to improve inference performance and reduce cost. + AG2 supports enhanced LLM inference APIs, which can be used to improve inference performance and reduce cost. ), }, diff --git a/website/src/components/PopularResource/PopularResource.js b/website/src/components/PopularResource/PopularResource.js index d051f46bd7..0d2e48da08 100644 --- a/website/src/components/PopularResource/PopularResource.js +++ b/website/src/components/PopularResource/PopularResource.js @@ -19,7 +19,7 @@ function PopularResources() {