diff --git a/.github/workflows/deploy-website.yml b/.github/workflows/deploy-website.yml
index cf0f89e6a7..0d5cd637dc 100644
--- a/.github/workflows/deploy-website.yml
+++ b/.github/workflows/deploy-website.yml
@@ -25,12 +25,12 @@ jobs:
-
-
+
+
Page Redirection
- If you are not redirected automatically, follow this link to the new documentation.
+ If you are not redirected automatically, follow this link to the new documentation.
EOF
diff --git a/website/README.md b/website/README.md
index 22a4e10d6a..8bf386b0f2 100644
--- a/website/README.md
+++ b/website/README.md
@@ -9,7 +9,7 @@ To build and test documentation locally, begin by downloading and installing [No
## Installation
```console
-pip install pydoc-markdown pyyaml colored
+pip install pydoc-markdown pyyaml termcolor nbconvert
```
### Install Quarto
@@ -25,7 +25,7 @@ Install it [here](https://github.com/quarto-dev/quarto-cli/releases).
Navigate to the `website` folder and run:
```console
-pydoc-markdown
+python ./process_api_reference.py
python ./process_notebooks.py render
npm install
```
diff --git a/website/blog/2023-04-21-LLM-tuning-math/index.mdx b/website/blog/2023-04-21-LLM-tuning-math/index.mdx
index a5378af6cf..c8c97f9308 100644
--- a/website/blog/2023-04-21-LLM-tuning-math/index.mdx
+++ b/website/blog/2023-04-21-LLM-tuning-math/index.mdx
@@ -32,7 +32,7 @@ We adapt the models using 20 examples in the train set, using the problem statem
- top_p: The parameter that controls the probability mass of the output tokens. Only tokens with a cumulative probability less than or equal to top-p are considered. A lower top-p means more diversity but less coherence. We search for the optimal top-p in the range of [0, 1].
- max_tokens: The maximum number of tokens that can be generated for each output. We search for the optimal max length in the range of [50, 1000].
- n: The number of responses to generate. We search for the optimal n in the range of [1, 100].
-- prompt: We use the template: "{problem} Solve the problem carefully. Simplify your answer as much as possible. Put the final answer in \\boxed{{}}." where {problem} will be replaced by the math problem instance.
+- prompt: We use the template: "\{problem} Solve the problem carefully. Simplify your answer as much as possible. Put the final answer in \\boxed\{{}}." where \{problem} will be replaced by the math problem instance.
In this experiment, when n > 1, we find the answer with highest votes among all the responses and then select it as the final answer to compare with the ground truth. For example, if n = 5 and 3 of the responses contain a final answer 301 while 2 of the responses contain a final answer 159, we choose 301 as the final answer. This can help with resolving potential errors due to randomness. We use the average accuracy and average inference cost as the metric to evaluate the performance over a dataset. The inference cost of a particular instance is measured by the price per 1K tokens and the number of tokens consumed.
diff --git a/website/blog/2024-12-18-RealtimeAgent/img/1_service_running.png b/website/blog/2024-12-20-RealtimeAgent/img/1_service_running.png
similarity index 100%
rename from website/blog/2024-12-18-RealtimeAgent/img/1_service_running.png
rename to website/blog/2024-12-20-RealtimeAgent/img/1_service_running.png
diff --git a/website/blog/2024-12-18-RealtimeAgent/img/2_incoming_call.png b/website/blog/2024-12-20-RealtimeAgent/img/2_incoming_call.png
similarity index 100%
rename from website/blog/2024-12-18-RealtimeAgent/img/2_incoming_call.png
rename to website/blog/2024-12-20-RealtimeAgent/img/2_incoming_call.png
diff --git a/website/blog/2024-12-18-RealtimeAgent/img/3_request_for_flight_cancellation.png b/website/blog/2024-12-20-RealtimeAgent/img/3_request_for_flight_cancellation.png
similarity index 100%
rename from website/blog/2024-12-18-RealtimeAgent/img/3_request_for_flight_cancellation.png
rename to website/blog/2024-12-20-RealtimeAgent/img/3_request_for_flight_cancellation.png
diff --git a/website/blog/2024-12-18-RealtimeAgent/img/4_flight_number_name.png b/website/blog/2024-12-20-RealtimeAgent/img/4_flight_number_name.png
similarity index 100%
rename from website/blog/2024-12-18-RealtimeAgent/img/4_flight_number_name.png
rename to website/blog/2024-12-20-RealtimeAgent/img/4_flight_number_name.png
diff --git a/website/blog/2024-12-18-RealtimeAgent/img/5_refund_policy.png b/website/blog/2024-12-20-RealtimeAgent/img/5_refund_policy.png
similarity index 100%
rename from website/blog/2024-12-18-RealtimeAgent/img/5_refund_policy.png
rename to website/blog/2024-12-20-RealtimeAgent/img/5_refund_policy.png
diff --git a/website/blog/2024-12-18-RealtimeAgent/img/6_flight_refunded.png b/website/blog/2024-12-20-RealtimeAgent/img/6_flight_refunded.png
similarity index 100%
rename from website/blog/2024-12-18-RealtimeAgent/img/6_flight_refunded.png
rename to website/blog/2024-12-20-RealtimeAgent/img/6_flight_refunded.png
diff --git a/website/blog/2024-12-18-RealtimeAgent/img/realtime_agent_swarm.png b/website/blog/2024-12-20-RealtimeAgent/img/realtime_agent_swarm.png
similarity index 100%
rename from website/blog/2024-12-18-RealtimeAgent/img/realtime_agent_swarm.png
rename to website/blog/2024-12-20-RealtimeAgent/img/realtime_agent_swarm.png
diff --git a/website/blog/2024-12-18-RealtimeAgent/index.mdx b/website/blog/2024-12-20-RealtimeAgent/index.mdx
similarity index 100%
rename from website/blog/2024-12-18-RealtimeAgent/index.mdx
rename to website/blog/2024-12-20-RealtimeAgent/index.mdx
diff --git a/website/blog/2024-12-18-Reasoning-Update/img/reasoningagent_1.png b/website/blog/2024-12-20-Reasoning-Update/img/reasoningagent_1.png
similarity index 100%
rename from website/blog/2024-12-18-Reasoning-Update/img/reasoningagent_1.png
rename to website/blog/2024-12-20-Reasoning-Update/img/reasoningagent_1.png
diff --git a/website/blog/2024-12-18-Reasoning-Update/index.mdx b/website/blog/2024-12-20-Reasoning-Update/index.mdx
similarity index 100%
rename from website/blog/2024-12-18-Reasoning-Update/index.mdx
rename to website/blog/2024-12-20-Reasoning-Update/index.mdx
diff --git a/website/blog/2024-12-18-Tools-interoperability/index.mdx b/website/blog/2024-12-20-Tools-interoperability/index.mdx
similarity index 100%
rename from website/blog/2024-12-18-Tools-interoperability/index.mdx
rename to website/blog/2024-12-20-Tools-interoperability/index.mdx
diff --git a/website/docs/Examples.mdx b/website/docs/Examples.mdx
index aa92f534a7..f693a6e584 100644
--- a/website/docs/Examples.mdx
+++ b/website/docs/Examples.mdx
@@ -11,111 +11,111 @@ Links to notebook examples:
### Code Generation, Execution, and Debugging
-- Automated Task Solving with Code Generation, Execution & Debugging - [View Notebook](/docs/notebooks/agentchat_auto_feedback_from_code_execution)
-- Automated Code Generation and Question Answering with Retrieval Augmented Agents - [View Notebook](/docs/notebooks/agentchat_RetrieveChat)
-- Automated Code Generation and Question Answering with [Qdrant](https://qdrant.tech/) based Retrieval Augmented Agents - [View Notebook](/docs/notebooks/agentchat_RetrieveChat_qdrant)
+- Automated Task Solving with Code Generation, Execution & Debugging - [View Notebook](/notebooks/agentchat_auto_feedback_from_code_execution)
+- Automated Code Generation and Question Answering with Retrieval Augmented Agents - [View Notebook](/notebooks/agentchat_RetrieveChat)
+- Automated Code Generation and Question Answering with [Qdrant](https://qdrant.tech/) based Retrieval Augmented Agents - [View Notebook](/notebooks/agentchat_RetrieveChat_qdrant)
### Multi-Agent Collaboration (>3 Agents)
-- Automated Task Solving by Group Chat (with 3 group member agents and 1 manager agent) - [View Notebook](/docs/notebooks/agentchat_groupchat)
-- Automated Data Visualization by Group Chat (with 3 group member agents and 1 manager agent) - [View Notebook](/docs/notebooks/agentchat_groupchat_vis)
-- Automated Complex Task Solving by Group Chat (with 6 group member agents and 1 manager agent) - [View Notebook](/docs/notebooks/agentchat_groupchat_research)
-- Automated Task Solving with Coding & Planning Agents - [View Notebook](/docs/notebooks/agentchat_planning)
-- Automated Task Solving with transition paths specified in a graph - [View Notebook](/docs/notebooks/agentchat_groupchat_finite_state_machine)
-- Running a group chat as an inner-monolgue via the SocietyOfMindAgent - [View Notebook](/docs/notebooks/agentchat_society_of_mind)
-- Running a group chat with custom speaker selection function - [View Notebook](/docs/notebooks/agentchat_groupchat_customized)
+- Automated Task Solving by Group Chat (with 3 group member agents and 1 manager agent) - [View Notebook](/notebooks/agentchat_groupchat)
+- Automated Data Visualization by Group Chat (with 3 group member agents and 1 manager agent) - [View Notebook](/notebooks/agentchat_groupchat_vis)
+- Automated Complex Task Solving by Group Chat (with 6 group member agents and 1 manager agent) - [View Notebook](/notebooks/agentchat_groupchat_research)
+- Automated Task Solving with Coding & Planning Agents - [View Notebook](/notebooks/agentchat_planning)
+- Automated Task Solving with transition paths specified in a graph - [View Notebook](/notebooks/agentchat_groupchat_finite_state_machine)
+- Running a group chat as an inner-monolgue via the SocietyOfMindAgent - [View Notebook](/notebooks/agentchat_society_of_mind)
+- Running a group chat with custom speaker selection function - [View Notebook](/notebooks/agentchat_groupchat_customized)
### Sequential Multi-Agent Chats
-- Solving Multiple Tasks in a Sequence of Chats Initiated by a Single Agent - [View Notebook](/docs/notebooks/agentchat_multi_task_chats)
-- Async-solving Multiple Tasks in a Sequence of Chats Initiated by a Single Agent - [View Notebook](/docs/notebooks/agentchat_multi_task_async_chats)
-- Solving Multiple Tasks in a Sequence of Chats Initiated by Different Agents - [View Notebook](/docs/notebooks/agentchats_sequential_chats)
+- Solving Multiple Tasks in a Sequence of Chats Initiated by a Single Agent - [View Notebook](/notebooks/agentchat_multi_task_chats)
+- Async-solving Multiple Tasks in a Sequence of Chats Initiated by a Single Agent - [View Notebook](/notebooks/agentchat_multi_task_async_chats)
+- Solving Multiple Tasks in a Sequence of Chats Initiated by Different Agents - [View Notebook](/notebooks/agentchats_sequential_chats)
### Nested Chats
-- Solving Complex Tasks with Nested Chats - [View Notebook](/docs/notebooks/agentchat_nestedchat)
-- Solving Complex Tasks with A Sequence of Nested Chats - [View Notebook](/docs/notebooks/agentchat_nested_sequential_chats)
-- OptiGuide for Solving a Supply Chain Optimization Problem with Nested Chats with a Coding Agent and a Safeguard Agent - [View Notebook](/docs/notebooks/agentchat_nestedchat_optiguide)
-- Conversational Chess with Nested Chats and Tool Use - [View Notebook](/docs/notebooks/agentchat_nested_chats_chess)
+- Solving Complex Tasks with Nested Chats - [View Notebook](/notebooks/agentchat_nestedchat)
+- Solving Complex Tasks with A Sequence of Nested Chats - [View Notebook](/notebooks/agentchat_nested_sequential_chats)
+- OptiGuide for Solving a Supply Chain Optimization Problem with Nested Chats with a Coding Agent and a Safeguard Agent - [View Notebook](/notebooks/agentchat_nestedchat_optiguide)
+- Conversational Chess with Nested Chats and Tool Use - [View Notebook](/notebooks/agentchat_nested_chats_chess)
### Swarms
-- Orchestrating agents in a Swarm - [View Notebook](/docs/notebooks/agentchat_swarm)
-- Orchestrating agents in a Swarm (Enhanced) - [View Notebook](/docs/notebooks/agentchat_swarm_enhanced)
+- Orchestrating agents in a Swarm - [View Notebook](/notebooks/agentchat_swarm)
+- Orchestrating agents in a Swarm (Enhanced) - [View Notebook](/notebooks/agentchat_swarm_enhanced)
### Applications
-- Automated Continual Learning from New Data - [View Notebook](/docs/notebooks/agentchat_stream)
+- Automated Continual Learning from New Data - [View Notebook](/notebooks/agentchat_stream)
{/* - [OptiGuide](https://github.com/microsoft/optiguide) - Coding, Tool Using, Safeguarding & Question Answering for Supply Chain Optimization */}
- [AutoAnny](https://github.com/ag2ai/build-with-ag2/tree/main/samples/apps/auto-anny) - A Discord bot built using AutoGen
### RAG
-- GraphRAG agent using FalkorDB (feat. swarms and Google Maps API) - [View Notebook](/docs/notebooks/agentchat_swarm_graphrag_trip_planner)
+- GraphRAG agent using FalkorDB (feat. swarms and Google Maps API) - [View Notebook](/notebooks/agentchat_swarm_graphrag_trip_planner)
### Tool Use
-- **Web Search**: Solve Tasks Requiring Web Info - [View Notebook](/docs/notebooks/agentchat_web_info)
-- Use Provided Tools as Functions - [View Notebook](/docs/notebooks/agentchat_function_call_currency_calculator)
-- Use Tools via Sync and Async Function Calling - [View Notebook](/docs/notebooks/agentchat_function_call_async)
-- Task Solving with Langchain Provided Tools as Functions - [View Notebook](/docs/notebooks/agentchat_langchain)
-- **RAG**: Group Chat with Retrieval Augmented Generation (with 5 group member agents and 1 manager agent) - [View Notebook](/docs/notebooks/agentchat_groupchat_RAG)
-- Function Inception: Enable AutoGen agents to update/remove functions during conversations. - [View Notebook](/docs/notebooks/agentchat_inception_function)
-- Agent Chat with Whisper - [View Notebook](/docs/notebooks/agentchat_video_transcript_translate_with_whisper)
-- Constrained Responses via Guidance - [View Notebook](/docs/notebooks/agentchat_guidance)
-- Browse the Web with Agents - [View Notebook](/docs/notebooks/agentchat_surfer)
-- **SQL**: Natural Language Text to SQL Query using the [Spider](https://yale-lily.github.io/spider) Text-to-SQL Benchmark - [View Notebook](/docs/notebooks/agentchat_sql_spider)
-- **Web Scraping**: Web Scraping with Apify - [View Notebook](/docs/notebooks/agentchat_webscraping_with_apify)
-- **Write a software app, task by task, with specially designed functions.** - [View Notebook](/docs/notebooks/agentchat_function_call_code_writing).
+- **Web Search**: Solve Tasks Requiring Web Info - [View Notebook](/notebooks/agentchat_web_info)
+- Use Provided Tools as Functions - [View Notebook](/notebooks/agentchat_function_call_currency_calculator)
+- Use Tools via Sync and Async Function Calling - [View Notebook](/notebooks/agentchat_function_call_async)
+- Task Solving with Langchain Provided Tools as Functions - [View Notebook](/notebooks/agentchat_langchain)
+- **RAG**: Group Chat with Retrieval Augmented Generation (with 5 group member agents and 1 manager agent) - [View Notebook](/notebooks/agentchat_groupchat_RAG)
+- Function Inception: Enable AutoGen agents to update/remove functions during conversations. - [View Notebook](/notebooks/agentchat_inception_function)
+- Agent Chat with Whisper - [View Notebook](/notebooks/agentchat_video_transcript_translate_with_whisper)
+- Constrained Responses via Guidance - [View Notebook](/notebooks/agentchat_guidance)
+- Browse the Web with Agents - [View Notebook](/notebooks/agentchat_surfer)
+- **SQL**: Natural Language Text to SQL Query using the [Spider](https://yale-lily.github.io/spider) Text-to-SQL Benchmark - [View Notebook](/notebooks/agentchat_sql_spider)
+- **Web Scraping**: Web Scraping with Apify - [View Notebook](/notebooks/agentchat_webscraping_with_apify)
+- **Write a software app, task by task, with specially designed functions.** - [View Notebook](/notebooks/agentchat_function_call_code_writing).
### Human Involvement
- Simple example in ChatGPT style [View example](https://github.com/ag2ai/build-with-ag2/blob/main/samples/simple_chat.py)
-- Auto Code Generation, Execution, Debugging and **Human Feedback** - [View Notebook](/docs/notebooks/agentchat_human_feedback)
-- Automated Task Solving with GPT-4 + **Multiple Human Users** - [View Notebook](/docs/notebooks/agentchat_two_users)
-- Agent Chat with **Async Human Inputs** - [View Notebook](/docs/notebooks/async_human_input)
+- Auto Code Generation, Execution, Debugging and **Human Feedback** - [View Notebook](/notebooks/agentchat_human_feedback)
+- Automated Task Solving with GPT-4 + **Multiple Human Users** - [View Notebook](/notebooks/agentchat_two_users)
+- Agent Chat with **Async Human Inputs** - [View Notebook](/notebooks/async_human_input)
### Agent Teaching and Learning
-- Teach Agents New Skills & Reuse via Automated Chat - [View Notebook](/docs/notebooks/agentchat_teaching)
-- Teach Agents New Facts, User Preferences and Skills Beyond Coding - [View Notebook](/docs/notebooks/agentchat_teachability)
-- Teach OpenAI Assistants Through GPTAssistantAgent - [View Notebook](/docs/notebooks/agentchat_teachable_oai_assistants)
-- Agent Optimizer: Train Agents in an Agentic Way - [View Notebook](/docs/notebooks/agentchat_agentoptimizer)
+- Teach Agents New Skills & Reuse via Automated Chat - [View Notebook](/notebooks/agentchat_teaching)
+- Teach Agents New Facts, User Preferences and Skills Beyond Coding - [View Notebook](/notebooks/agentchat_teachability)
+- Teach OpenAI Assistants Through GPTAssistantAgent - [View Notebook](/notebooks/agentchat_teachable_oai_assistants)
+- Agent Optimizer: Train Agents in an Agentic Way - [View Notebook](/notebooks/agentchat_agentoptimizer)
### Multi-Agent Chat with OpenAI Assistants in the loop
-- Hello-World Chat with OpenAi Assistant in AutoGen - [View Notebook](/docs/notebooks/agentchat_oai_assistant_twoagents_basic)
-- Chat with OpenAI Assistant using Function Call - [View Notebook](/docs/notebooks/agentchat_oai_assistant_function_call)
-- Chat with OpenAI Assistant with Code Interpreter - [View Notebook](/docs/notebooks/agentchat_oai_code_interpreter)
-- Chat with OpenAI Assistant with Retrieval Augmentation - [View Notebook](/docs/notebooks/agentchat_oai_assistant_retrieval)
-- OpenAI Assistant in a Group Chat - [View Notebook](/docs/notebooks/agentchat_oai_assistant_groupchat)
-- GPTAssistantAgent based Multi-Agent Tool Use - [View Notebook](/docs/notebooks/gpt_assistant_agent_function_call)
+- Hello-World Chat with OpenAi Assistant in AutoGen - [View Notebook](/notebooks/agentchat_oai_assistant_twoagents_basic)
+- Chat with OpenAI Assistant using Function Call - [View Notebook](/notebooks/agentchat_oai_assistant_function_call)
+- Chat with OpenAI Assistant with Code Interpreter - [View Notebook](/notebooks/agentchat_oai_code_interpreter)
+- Chat with OpenAI Assistant with Retrieval Augmentation - [View Notebook](/notebooks/agentchat_oai_assistant_retrieval)
+- OpenAI Assistant in a Group Chat - [View Notebook](/notebooks/agentchat_oai_assistant_groupchat)
+- GPTAssistantAgent based Multi-Agent Tool Use - [View Notebook](/notebooks/gpt_assistant_agent_function_call)
### Non-OpenAI Models
-- Conversational Chess using non-OpenAI Models - [View Notebook](/docs/notebooks/agentchat_nested_chats_chess_altmodels)
+- Conversational Chess using non-OpenAI Models - [View Notebook](/notebooks/agentchat_nested_chats_chess_altmodels)
### Multimodal Agent
-- Multimodal Agent Chat with DALLE and GPT-4V - [View Notebook](/docs/notebooks/agentchat_dalle_and_gpt4v)
-- Multimodal Agent Chat with Llava - [View Notebook](/docs/notebooks/agentchat_lmm_llava)
-- Multimodal Agent Chat with GPT-4V - [View Notebook](/docs/notebooks/agentchat_lmm_gpt-4v)
+- Multimodal Agent Chat with DALLE and GPT-4V - [View Notebook](/notebooks/agentchat_dalle_and_gpt4v)
+- Multimodal Agent Chat with Llava - [View Notebook](/notebooks/agentchat_lmm_llava)
+- Multimodal Agent Chat with GPT-4V - [View Notebook](/notebooks/agentchat_lmm_gpt-4v)
### Long Context Handling
{/* - Conversations with Chat History Compression Enabled - [View Notebook](https://github.com/ag2ai/ag2/blob/main/notebook/agentchat_compression.ipynb) */}
-- Long Context Handling as A Capability - [View Notebook](/docs/notebooks/agentchat_transform_messages)
+- Long Context Handling as A Capability - [View Notebook](/notebooks/agentchat_transform_messages)
### Evaluation and Assessment
-- AgentEval: A Multi-Agent System for Assess Utility of LLM-powered Applications - [View Notebook](/docs/notebooks/agenteval_cq_math)
+- AgentEval: A Multi-Agent System for Assess Utility of LLM-powered Applications - [View Notebook](/notebooks/agenteval_cq_math)
### Automatic Agent Building
-- Automatically Build Multi-agent System with AgentBuilder - [View Notebook](/docs/notebooks/autobuild_basic)
-- Automatically Build Multi-agent System from Agent Library - [View Notebook](/docs/notebooks/autobuild_agent_library)
+- Automatically Build Multi-agent System with AgentBuilder - [View Notebook](/notebooks/autobuild_basic)
+- Automatically Build Multi-agent System from Agent Library - [View Notebook](/notebooks/autobuild_agent_library)
### Observability
-- Track LLM calls, tool usage, actions and errors using AgentOps - [View Notebook](/docs/notebooks/agentchat_agentops)
-- Cost Calculation - [View Notebook](/docs/notebooks/agentchat_cost_token_tracking)
+- Track LLM calls, tool usage, actions and errors using AgentOps - [View Notebook](/notebooks/agentchat_agentops)
+- Cost Calculation - [View Notebook](/notebooks/agentchat_cost_token_tracking)
## Enhanced Inferences
diff --git a/website/docs/Use-Cases/enhanced_inference.mdx b/website/docs/Use-Cases/enhanced_inference.mdx
index 0b838a92f9..f2bba10f01 100644
--- a/website/docs/Use-Cases/enhanced_inference.mdx
+++ b/website/docs/Use-Cases/enhanced_inference.mdx
@@ -241,7 +241,7 @@ response = client.create(
The example above will try to use text-ada-001, gpt-3.5-turbo-instruct, and text-davinci-003 iteratively, until a valid json string is returned or the last config is used. One can also repeat the same model in the list for multiple times (with different seeds) to try one model multiple times for increasing the robustness of the final response.
-*Advanced use case: Check this [blogpost](/blog/2023/05/18/GPT-adaptive-humaneval) to find how to improve GPT-4's coding performance from 68% to 90% while reducing the inference cost.*
+*Advanced use case: Check this [blogpost](/blog/2023-05-18-GPT-adaptive-humaneval/index) to find how to improve GPT-4's coding performance from 68% to 90% while reducing the inference cost.*
## Templating
diff --git a/website/docs/installation/Optional-Dependencies.mdx b/website/docs/installation/Optional-Dependencies.mdx
index fd91cf7ecf..33c6891d4c 100644
--- a/website/docs/installation/Optional-Dependencies.mdx
+++ b/website/docs/installation/Optional-Dependencies.mdx
@@ -6,8 +6,8 @@ AG2 installs OpenAI package by default. To use LLMs by other providers, you can
pip install autogen[gemini,anthropic,mistral,together,groq,cohere]
```
-Check out the [notebook](/docs/notebooks/autogen_uniformed_api_calling) and
-[blogpost](/blog/2024/06/24/AltModels-Classes) for more details.
+Check out the [notebook](/notebooks/autogen_uniformed_api_calling) and
+[blogpost](/blog/2024-06-24-AltModels-Classes/index) for more details.
## LLM Caching
@@ -90,7 +90,7 @@ To use Teachability, please install AG2 with the [teachable] option.
pip install "autogen[teachable]"
```
-Example notebook: [Chatting with a teachable agent](/docs/notebooks/agentchat_teachability)
+Example notebook: [Chatting with a teachable agent](/notebooks/agentchat_teachability)
## Large Multimodal Model (LMM) Agents
@@ -100,7 +100,7 @@ We offered Multimodal Conversable Agent and LLaVA Agent. Please install with the
pip install "autogen[lmm]"
```
-Example notebook: [LLaVA Agent](/docs/notebooks/agentchat_lmm_llava)
+Example notebook: [LLaVA Agent](/notebooks/agentchat_lmm_llava)
## mathchat
@@ -120,7 +120,7 @@ To use a graph in `GroupChat`, particularly for graph visualization, please inst
pip install "autogen[graph]"
```
-Example notebook: [Finite State Machine graphs to set speaker transition constraints](/docs/notebooks/agentchat_groupchat_finite_state_machine)
+Example notebook: [Finite State Machine graphs to set speaker transition constraints](/notebooks/agentchat_groupchat_finite_state_machine)
## Long Context Handling
diff --git a/website/docs/topics/non-openai-models/about-using-nonopenai-models.mdx b/website/docs/topics/non-openai-models/about-using-nonopenai-models.mdx
index c71f11183b..7c966973b7 100644
--- a/website/docs/topics/non-openai-models/about-using-nonopenai-models.mdx
+++ b/website/docs/topics/non-openai-models/about-using-nonopenai-models.mdx
@@ -77,5 +77,5 @@ to assign specific models to agents.
For more advanced users, you can create your own custom model client class, enabling
you to define and load your own models.
-See the [AutoGen with Custom Models: Empowering Users to Use Their Own Inference Mechanism](/blog/2024/01/26/Custom-Models)
-blog post and [this notebook](/docs/notebooks/agentchat_custom_model/) for a guide to creating custom model client classes.
+See the [AutoGen with Custom Models: Empowering Users to Use Their Own Inference Mechanism](/blog/2024-01-26-Custom-Models/index)
+blog post and [this notebook](/notebooks/agentchat_custom_model/) for a guide to creating custom model client classes.
diff --git a/website/docs/topics/openai-assistant/gpt_assistant_agent.mdx b/website/docs/topics/openai-assistant/gpt_assistant_agent.mdx
index 595765e4ab..c41efd5092 100644
--- a/website/docs/topics/openai-assistant/gpt_assistant_agent.mdx
+++ b/website/docs/topics/openai-assistant/gpt_assistant_agent.mdx
@@ -19,9 +19,9 @@ Key Features of the GPTAssistantAgent:
For a practical illustration, here are some examples:
-- [Chat with OpenAI Assistant using function call](/docs/notebooks/agentchat_oai_assistant_function_call) demonstrates how to leverage function calling to enable intelligent function selection.
-- [GPTAssistant with Code Interpreter](/docs/notebooks/agentchat_oai_code_interpreter) showcases the integration of the Code Interpreter tool which executes Python code dynamically within applications.
-- [Group Chat with GPTAssistantAgent](/docs/notebooks/agentchat_oai_assistant_groupchat) demonstrates how to use the GPTAssistantAgent in AutoGen's group chat mode, enabling collaborative task performance through automated chat with agents powered by LLMs, tools, or humans.
+- [Chat with OpenAI Assistant using function call](/notebooks/agentchat_oai_assistant_function_call) demonstrates how to leverage function calling to enable intelligent function selection.
+- [GPTAssistant with Code Interpreter](/notebooks/agentchat_oai_code_interpreter) showcases the integration of the Code Interpreter tool which executes Python code dynamically within applications.
+- [Group Chat with GPTAssistantAgent](/notebooks/agentchat_oai_assistant_groupchat) demonstrates how to use the GPTAssistantAgent in AutoGen's group chat mode, enabling collaborative task performance through automated chat with agents powered by LLMs, tools, or humans.
## Create a OpenAI Assistant in Autogen
diff --git a/website/docs/tutorial/conversation-patterns.ipynb b/website/docs/tutorial/conversation-patterns.ipynb
index 56004e3b3b..e624ed29ca 100644
--- a/website/docs/tutorial/conversation-patterns.ipynb
+++ b/website/docs/tutorial/conversation-patterns.ipynb
@@ -1568,7 +1568,7 @@
"You can hide [tool usages](/docs/tutorial/tool-use) within a single agent by having the tool-caller agent \n",
"starts a nested chat with a tool-executor agent and then use the result\n",
"of the nested chat to generate a response.\n",
- "See the [nested chats for tool use notebook](/docs/notebooks/agentchat_nested_chats_chess) for an example."
+ "See the [nested chats for tool use notebook](/notebooks/agentchat_nested_chats_chess) for an example."
]
},
{
diff --git a/website/docs/tutorial/introduction.ipynb b/website/docs/tutorial/introduction.ipynb
index 92876d2554..cb95acf1d4 100644
--- a/website/docs/tutorial/introduction.ipynb
+++ b/website/docs/tutorial/introduction.ipynb
@@ -65,7 +65,7 @@
"\n",
"You can switch each component on or off and customize it to suit the need of \n",
"your application. For advanced users, you can add additional components to the agent\n",
- "by using [`registered_reply`](../reference/agentchat/conversable_agent/#register_reply). "
+ "by using [`registered_reply`](../reference/agentchat/conversable_agent/#register-reply). "
]
},
{
diff --git a/website/docs/tutorial/tool-use.ipynb b/website/docs/tutorial/tool-use.ipynb
index 792bfefde8..fc1c4ee39a 100644
--- a/website/docs/tutorial/tool-use.ipynb
+++ b/website/docs/tutorial/tool-use.ipynb
@@ -163,10 +163,10 @@
"for it to be useful in conversation. \n",
"The agent registered with the tool's signature\n",
"through \n",
- "[`register_for_llm`](/docs/reference/agentchat/conversable_agent#register_for_llm)\n",
+ "[`register_for_llm`](/docs/reference/agentchat/conversable_agent#register-for-llm)\n",
"can call the tool;\n",
"the agent registered with the tool's function object through \n",
- "[`register_for_execution`](/docs/reference/agentchat/conversable_agent#register_for_execution)\n",
+ "[`register_for_execution`](/docs/reference/agentchat/conversable_agent#register-for-execution)\n",
"can execute the tool's function."
]
},
@@ -175,7 +175,7 @@
"metadata": {},
"source": [
"Alternatively, you can use \n",
- "[`autogen.register_function`](/docs/reference/agentchat/conversable_agent#register_function-1)\n",
+ "[`autogen.register_function`](/docs/reference/agentchat/conversable_agent#register-function)\n",
"function to register a tool with both agents at once."
]
},
@@ -227,114 +227,114 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
"What is (44232 + 13312 / (232 - 32)) * 5?\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[31m\n",
- ">>>>>>>> USING AUTO REPLY...\u001B[0m\n",
- "\u001B[33mAssistant\u001B[0m (to User):\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mAssistant\u001b[0m (to User):\n",
"\n",
- "\u001B[32m***** Suggested tool call (call_4rElPoLggOYJmkUutbGaSTX1): calculator *****\u001B[0m\n",
+ "\u001b[32m***** Suggested tool call (call_4rElPoLggOYJmkUutbGaSTX1): calculator *****\u001b[0m\n",
"Arguments: \n",
"{\n",
" \"a\": 232,\n",
" \"b\": 32,\n",
" \"operator\": \"-\"\n",
"}\n",
- "\u001B[32m***************************************************************************\u001B[0m\n",
+ "\u001b[32m***************************************************************************\u001b[0m\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[35m\n",
- ">>>>>>>> EXECUTING FUNCTION calculator...\u001B[0m\n",
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[35m\n",
+ ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
- "\u001B[32m***** Response from calling tool (call_4rElPoLggOYJmkUutbGaSTX1) *****\u001B[0m\n",
+ "\u001b[32m***** Response from calling tool (call_4rElPoLggOYJmkUutbGaSTX1) *****\u001b[0m\n",
"200\n",
- "\u001B[32m**********************************************************************\u001B[0m\n",
+ "\u001b[32m**********************************************************************\u001b[0m\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[31m\n",
- ">>>>>>>> USING AUTO REPLY...\u001B[0m\n",
- "\u001B[33mAssistant\u001B[0m (to User):\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mAssistant\u001b[0m (to User):\n",
"\n",
- "\u001B[32m***** Suggested tool call (call_SGtr8tK9A4iOCJGdCqkKR2Ov): calculator *****\u001B[0m\n",
+ "\u001b[32m***** Suggested tool call (call_SGtr8tK9A4iOCJGdCqkKR2Ov): calculator *****\u001b[0m\n",
"Arguments: \n",
"{\n",
" \"a\": 13312,\n",
" \"b\": 200,\n",
" \"operator\": \"/\"\n",
"}\n",
- "\u001B[32m***************************************************************************\u001B[0m\n",
+ "\u001b[32m***************************************************************************\u001b[0m\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[35m\n",
- ">>>>>>>> EXECUTING FUNCTION calculator...\u001B[0m\n",
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[35m\n",
+ ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
- "\u001B[32m***** Response from calling tool (call_SGtr8tK9A4iOCJGdCqkKR2Ov) *****\u001B[0m\n",
+ "\u001b[32m***** Response from calling tool (call_SGtr8tK9A4iOCJGdCqkKR2Ov) *****\u001b[0m\n",
"66\n",
- "\u001B[32m**********************************************************************\u001B[0m\n",
+ "\u001b[32m**********************************************************************\u001b[0m\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[31m\n",
- ">>>>>>>> USING AUTO REPLY...\u001B[0m\n",
- "\u001B[33mAssistant\u001B[0m (to User):\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mAssistant\u001b[0m (to User):\n",
"\n",
- "\u001B[32m***** Suggested tool call (call_YsR95CM1Ice2GZ7ZoStYXI6M): calculator *****\u001B[0m\n",
+ "\u001b[32m***** Suggested tool call (call_YsR95CM1Ice2GZ7ZoStYXI6M): calculator *****\u001b[0m\n",
"Arguments: \n",
"{\n",
" \"a\": 44232,\n",
" \"b\": 66,\n",
" \"operator\": \"+\"\n",
"}\n",
- "\u001B[32m***************************************************************************\u001B[0m\n",
+ "\u001b[32m***************************************************************************\u001b[0m\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[35m\n",
- ">>>>>>>> EXECUTING FUNCTION calculator...\u001B[0m\n",
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[35m\n",
+ ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
- "\u001B[32m***** Response from calling tool (call_YsR95CM1Ice2GZ7ZoStYXI6M) *****\u001B[0m\n",
+ "\u001b[32m***** Response from calling tool (call_YsR95CM1Ice2GZ7ZoStYXI6M) *****\u001b[0m\n",
"44298\n",
- "\u001B[32m**********************************************************************\u001B[0m\n",
+ "\u001b[32m**********************************************************************\u001b[0m\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[31m\n",
- ">>>>>>>> USING AUTO REPLY...\u001B[0m\n",
- "\u001B[33mAssistant\u001B[0m (to User):\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mAssistant\u001b[0m (to User):\n",
"\n",
- "\u001B[32m***** Suggested tool call (call_oqZn4rTjyvXYcmjAXkvVaJm1): calculator *****\u001B[0m\n",
+ "\u001b[32m***** Suggested tool call (call_oqZn4rTjyvXYcmjAXkvVaJm1): calculator *****\u001b[0m\n",
"Arguments: \n",
"{\n",
" \"a\": 44298,\n",
" \"b\": 5,\n",
" \"operator\": \"*\"\n",
"}\n",
- "\u001B[32m***************************************************************************\u001B[0m\n",
+ "\u001b[32m***************************************************************************\u001b[0m\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[35m\n",
- ">>>>>>>> EXECUTING FUNCTION calculator...\u001B[0m\n",
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[35m\n",
+ ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
- "\u001B[32m***** Response from calling tool (call_oqZn4rTjyvXYcmjAXkvVaJm1) *****\u001B[0m\n",
+ "\u001b[32m***** Response from calling tool (call_oqZn4rTjyvXYcmjAXkvVaJm1) *****\u001b[0m\n",
"221490\n",
- "\u001B[32m**********************************************************************\u001B[0m\n",
+ "\u001b[32m**********************************************************************\u001b[0m\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[31m\n",
- ">>>>>>>> USING AUTO REPLY...\u001B[0m\n",
- "\u001B[33mAssistant\u001B[0m (to User):\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mAssistant\u001b[0m (to User):\n",
"\n",
"The result of the calculation is 221490. TERMINATE\n",
"\n",
@@ -600,16 +600,16 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
"What is (1423 - 123) / 3 + (32 + 23) * 5?\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[31m\n",
- ">>>>>>>> USING AUTO REPLY...\u001B[0m\n",
- "\u001B[33mAssistant\u001B[0m (to User):\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mAssistant\u001b[0m (to User):\n",
"\n",
- "\u001B[32m***** Suggested tool call (call_Uu4diKtxlTfkwXuY6MmJEb4E): calculator *****\u001B[0m\n",
+ "\u001b[32m***** Suggested tool call (call_Uu4diKtxlTfkwXuY6MmJEb4E): calculator *****\u001b[0m\n",
"Arguments: \n",
"{\n",
" \"input\": {\n",
@@ -618,27 +618,27 @@
" \"operator\": \"+\"\n",
" }\n",
"}\n",
- "\u001B[32m***************************************************************************\u001B[0m\n",
+ "\u001b[32m***************************************************************************\u001b[0m\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
- "\u001B[32m***** Response from calling tool (call_Uu4diKtxlTfkwXuY6MmJEb4E) *****\u001B[0m\n",
+ "\u001b[32m***** Response from calling tool (call_Uu4diKtxlTfkwXuY6MmJEb4E) *****\u001b[0m\n",
"Error: Expecting value: line 1 column 29 (char 28)\n",
" You argument should follow json format.\n",
- "\u001B[32m**********************************************************************\u001B[0m\n",
+ "\u001b[32m**********************************************************************\u001b[0m\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[31m\n",
- ">>>>>>>> USING AUTO REPLY...\u001B[0m\n",
- "\u001B[33mAssistant\u001B[0m (to User):\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mAssistant\u001b[0m (to User):\n",
"\n",
"I apologize for the confusion, I seem to have made a mistake. Let me recalculate the expression properly.\n",
"\n",
"First, we need to do the calculations within the brackets. So, calculating (1423 - 123), (32 + 23), and then performing remaining operations.\n",
- "\u001B[32m***** Suggested tool call (call_mx3M3fNOwikFNoqSojDH1jIr): calculator *****\u001B[0m\n",
+ "\u001b[32m***** Suggested tool call (call_mx3M3fNOwikFNoqSojDH1jIr): calculator *****\u001b[0m\n",
"Arguments: \n",
"{\n",
" \"input\": {\n",
@@ -647,25 +647,25 @@
" \"operator\": \"-\"\n",
" }\n",
"}\n",
- "\u001B[32m***************************************************************************\u001B[0m\n",
+ "\u001b[32m***************************************************************************\u001b[0m\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[35m\n",
- ">>>>>>>> EXECUTING FUNCTION calculator...\u001B[0m\n",
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[35m\n",
+ ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
- "\u001B[32m***** Response from calling tool (call_mx3M3fNOwikFNoqSojDH1jIr) *****\u001B[0m\n",
+ "\u001b[32m***** Response from calling tool (call_mx3M3fNOwikFNoqSojDH1jIr) *****\u001b[0m\n",
"1300\n",
- "\u001B[32m**********************************************************************\u001B[0m\n",
+ "\u001b[32m**********************************************************************\u001b[0m\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[31m\n",
- ">>>>>>>> USING AUTO REPLY...\u001B[0m\n",
- "\u001B[33mAssistant\u001B[0m (to User):\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mAssistant\u001b[0m (to User):\n",
"\n",
- "\u001B[32m***** Suggested tool call (call_hBAL2sYi6Y5ZtTHCNPCmxdN3): calculator *****\u001B[0m\n",
+ "\u001b[32m***** Suggested tool call (call_hBAL2sYi6Y5ZtTHCNPCmxdN3): calculator *****\u001b[0m\n",
"Arguments: \n",
"{\n",
" \"input\": {\n",
@@ -674,25 +674,25 @@
" \"operator\": \"+\"\n",
" }\n",
"}\n",
- "\u001B[32m***************************************************************************\u001B[0m\n",
+ "\u001b[32m***************************************************************************\u001b[0m\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[35m\n",
- ">>>>>>>> EXECUTING FUNCTION calculator...\u001B[0m\n",
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[35m\n",
+ ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
- "\u001B[32m***** Response from calling tool (call_hBAL2sYi6Y5ZtTHCNPCmxdN3) *****\u001B[0m\n",
+ "\u001b[32m***** Response from calling tool (call_hBAL2sYi6Y5ZtTHCNPCmxdN3) *****\u001b[0m\n",
"55\n",
- "\u001B[32m**********************************************************************\u001B[0m\n",
+ "\u001b[32m**********************************************************************\u001b[0m\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[31m\n",
- ">>>>>>>> USING AUTO REPLY...\u001B[0m\n",
- "\u001B[33mAssistant\u001B[0m (to User):\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mAssistant\u001b[0m (to User):\n",
"\n",
- "\u001B[32m***** Suggested tool call (call_wO3AP7EDeJvsVLCpvv5LohUa): calculator *****\u001B[0m\n",
+ "\u001b[32m***** Suggested tool call (call_wO3AP7EDeJvsVLCpvv5LohUa): calculator *****\u001b[0m\n",
"Arguments: \n",
"{\n",
" \"input\": {\n",
@@ -701,25 +701,25 @@
" \"operator\": \"/\"\n",
" }\n",
"}\n",
- "\u001B[32m***************************************************************************\u001B[0m\n",
+ "\u001b[32m***************************************************************************\u001b[0m\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[35m\n",
- ">>>>>>>> EXECUTING FUNCTION calculator...\u001B[0m\n",
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[35m\n",
+ ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
- "\u001B[32m***** Response from calling tool (call_wO3AP7EDeJvsVLCpvv5LohUa) *****\u001B[0m\n",
+ "\u001b[32m***** Response from calling tool (call_wO3AP7EDeJvsVLCpvv5LohUa) *****\u001b[0m\n",
"433\n",
- "\u001B[32m**********************************************************************\u001B[0m\n",
+ "\u001b[32m**********************************************************************\u001b[0m\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[31m\n",
- ">>>>>>>> USING AUTO REPLY...\u001B[0m\n",
- "\u001B[33mAssistant\u001B[0m (to User):\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mAssistant\u001b[0m (to User):\n",
"\n",
- "\u001B[32m***** Suggested tool call (call_kQ2hDhqem8BHNlaHaE9ezvvQ): calculator *****\u001B[0m\n",
+ "\u001b[32m***** Suggested tool call (call_kQ2hDhqem8BHNlaHaE9ezvvQ): calculator *****\u001b[0m\n",
"Arguments: \n",
"{\n",
" \"input\": {\n",
@@ -728,25 +728,25 @@
" \"operator\": \"*\"\n",
" }\n",
"}\n",
- "\u001B[32m***************************************************************************\u001B[0m\n",
+ "\u001b[32m***************************************************************************\u001b[0m\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[35m\n",
- ">>>>>>>> EXECUTING FUNCTION calculator...\u001B[0m\n",
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[35m\n",
+ ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
- "\u001B[32m***** Response from calling tool (call_kQ2hDhqem8BHNlaHaE9ezvvQ) *****\u001B[0m\n",
+ "\u001b[32m***** Response from calling tool (call_kQ2hDhqem8BHNlaHaE9ezvvQ) *****\u001b[0m\n",
"275\n",
- "\u001B[32m**********************************************************************\u001B[0m\n",
+ "\u001b[32m**********************************************************************\u001b[0m\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[31m\n",
- ">>>>>>>> USING AUTO REPLY...\u001B[0m\n",
- "\u001B[33mAssistant\u001B[0m (to User):\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mAssistant\u001b[0m (to User):\n",
"\n",
- "\u001B[32m***** Suggested tool call (call_1FLDUdvAZmjlSD7g5GFFJOpO): calculator *****\u001B[0m\n",
+ "\u001b[32m***** Suggested tool call (call_1FLDUdvAZmjlSD7g5GFFJOpO): calculator *****\u001b[0m\n",
"Arguments: \n",
"{\n",
" \"input\": {\n",
@@ -755,23 +755,23 @@
" \"operator\": \"+\"\n",
" }\n",
"}\n",
- "\u001B[32m***************************************************************************\u001B[0m\n",
+ "\u001b[32m***************************************************************************\u001b[0m\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[35m\n",
- ">>>>>>>> EXECUTING FUNCTION calculator...\u001B[0m\n",
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[35m\n",
+ ">>>>>>>> EXECUTING FUNCTION calculator...\u001b[0m\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
- "\u001B[33mUser\u001B[0m (to Assistant):\n",
+ "\u001b[33mUser\u001b[0m (to Assistant):\n",
"\n",
- "\u001B[32m***** Response from calling tool (call_1FLDUdvAZmjlSD7g5GFFJOpO) *****\u001B[0m\n",
+ "\u001b[32m***** Response from calling tool (call_1FLDUdvAZmjlSD7g5GFFJOpO) *****\u001b[0m\n",
"708\n",
- "\u001B[32m**********************************************************************\u001B[0m\n",
+ "\u001b[32m**********************************************************************\u001b[0m\n",
"\n",
"--------------------------------------------------------------------------------\n",
- "\u001B[31m\n",
- ">>>>>>>> USING AUTO REPLY...\u001B[0m\n",
- "\u001B[33mAssistant\u001B[0m (to User):\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mAssistant\u001b[0m (to User):\n",
"\n",
"The calculation result of the expression (1423 - 123) / 3 + (32 + 23) * 5 is 708. Let's proceed to the next task.\n",
"TERMINATE\n",
diff --git a/website/docs/tutorial/what-next.mdx b/website/docs/tutorial/what-next.mdx
index d73fc615db..194e507fa6 100644
--- a/website/docs/tutorial/what-next.mdx
+++ b/website/docs/tutorial/what-next.mdx
@@ -28,7 +28,7 @@ topics:
## Dig Deeper
- Read the [user guide](/docs/topics) to learn more
-- Read the examples and guides in the [notebooks section](/docs/notebooks)
+- Read the examples and guides in the [notebooks section](/notebooks)
- Check [research](/docs/Research) and [blog](/blog)
## Get Help
diff --git a/website/mint-style.css b/website/mint-style.css
index 25e8388dce..7cc309c825 100644
--- a/website/mint-style.css
+++ b/website/mint-style.css
@@ -38,7 +38,7 @@ h4 {
h1 {
font-size: 3rem !important;
-
+ word-break: break-all;
code {
font-size: 2rem !important;
}
diff --git a/website/mint.json b/website/mint.json
index 5ee80c8390..8513025af2 100644
--- a/website/mint.json
+++ b/website/mint.json
@@ -300,6 +300,17 @@
"docs/reference/agentchat/contrib/web_surfer"
]
},
+ {
+ "group": "agentchat.realtime_agent",
+ "pages": [
+ "docs/reference/agentchat/realtime_agent/client",
+ "docs/reference/agentchat/realtime_agent/function_observer",
+ "docs/reference/agentchat/realtime_agent/realtime_agent",
+ "docs/reference/agentchat/realtime_agent/realtime_observer",
+ "docs/reference/agentchat/realtime_agent/twilio_observer",
+ "docs/reference/agentchat/realtime_agent/websocket_observer"
+ ]
+ },
"docs/reference/agentchat/agent",
"docs/reference/agentchat/assistant_agent",
"docs/reference/agentchat/chat",
@@ -344,6 +355,33 @@
"docs/reference/coding/utils"
]
},
+ {
+ "group": "interop",
+ "pages": [
+ {
+ "group": "interop.crewai",
+ "pages": [
+ "docs/reference/interop/crewai/crewai"
+ ]
+ },
+ {
+ "group": "interop.langchain",
+ "pages": [
+ "docs/reference/interop/langchain/langchain"
+ ]
+ },
+ {
+ "group": "interop.pydantic_ai",
+ "pages": [
+ "docs/reference/interop/pydantic_ai/pydantic_ai",
+ "docs/reference/interop/pydantic_ai/pydantic_ai_tool"
+ ]
+ },
+ "docs/reference/interop/interoperability",
+ "docs/reference/interop/interoperable",
+ "docs/reference/interop/registry"
+ ]
+ },
{
"group": "io",
"pages": [
@@ -377,6 +415,12 @@
"docs/reference/oai/together"
]
},
+ {
+ "group": "tools",
+ "pages": [
+ "docs/reference/tools/tool"
+ ]
+ },
"docs/reference/browser_utils",
"docs/reference/code_utils",
"docs/reference/exception_utils",
@@ -450,6 +494,9 @@
{
"group": "Recent posts",
"pages": [
+ "blog/2024-12-20-Tools-interoperability/index",
+ "blog/2024-12-20-Reasoning-Update/index",
+ "blog/2024-12-20-RealtimeAgent/index",
"blog/2024-12-06-FalkorDB-Structured/index",
"blog/2024-12-02-ReasoningAgent2/index",
"blog/2024-11-27-Prompt-Leakage-Probing/index",
@@ -571,7 +618,9 @@
"notebooks/JSON_mode_example",
"notebooks/agentchat_RetrieveChat",
"notebooks/agentchat_graph_rag_neo4j",
- "notebooks/agentchat_swarm_enhanced"
+ "notebooks/agentchat_swarm_enhanced",
+ "notebooks/agentchat_realtime_swarm",
+ "notebooks/agentchat_reasoning_agent"
]
},
"notebooks/Gallery"
diff --git a/website/snippets/data/NotebooksMetadata.mdx b/website/snippets/data/NotebooksMetadata.mdx
index d5c69dcf3a..51783730a4 100644
--- a/website/snippets/data/NotebooksMetadata.mdx
+++ b/website/snippets/data/NotebooksMetadata.mdx
@@ -868,7 +868,7 @@ export const notebooksMetadata = [
},
{
"title": "Tool Use",
- "link": "/docs/notebooks/tool-use",
+ "link": "/notebooks/tool-use",
"description": "",
"image": null,
"tags": [],
@@ -1003,5 +1003,212 @@ export const notebooksMetadata = [
"image": null,
"tags": [],
"source": "/website/docs/topics/non-openai-models/cloud-cerebras.ipynb"
+ },
+ {
+ "title": "RealtimeAgent in a Swarm Orchestration",
+ "link": "/notebooks/agentchat_realtime_swarm",
+ "description": "Swarm Ochestration",
+ "image": null,
+ "tags": [
+ "orchestration",
+ "group chat",
+ "swarm"
+ ],
+ "source": "/notebook/agentchat_realtime_swarm.ipynb"
+ },
+ {
+ "title": "ReasoningAgent - Advanced LLM Reasoning with Multiple Search Strategies",
+ "link": "/notebooks/agentchat_reasoning_agent",
+ "description": "Use ReasoningAgent for o1 style reasoning in Agentic workflows with LLMs using AG2",
+ "image": null,
+ "tags": [
+ "reasoning agent",
+ "tree of thoughts"
+ ],
+ "source": "/notebook/agentchat_reasoning_agent.ipynb"
+ },
+ {
+ "title": "LLM Configuration",
+ "link": "/notebooks/llm_configuration",
+ "description": "",
+ "image": null,
+ "tags": [],
+ "source": "/website/docs/topics/llm_configuration.ipynb"
+ },
+ {
+ "title": "Command Line Code Executor",
+ "link": "/notebooks/cli-code-executor",
+ "description": "",
+ "image": null,
+ "tags": [],
+ "source": "/website/docs/topics/code-execution/cli-code-executor.ipynb"
+ },
+ {
+ "title": "Custom Code Executor",
+ "link": "/notebooks/custom-executor",
+ "description": "",
+ "image": null,
+ "tags": [],
+ "source": "/website/docs/topics/code-execution/custom-executor.ipynb"
+ },
+ {
+ "title": "Jupyter Code Executor",
+ "link": "/notebooks/jupyter-code-executor",
+ "description": "",
+ "image": null,
+ "tags": [],
+ "source": "/website/docs/topics/code-execution/jupyter-code-executor.ipynb"
+ },
+ {
+ "title": "User Defined Functions",
+ "link": "/notebooks/user-defined-functions",
+ "description": "",
+ "image": null,
+ "tags": [],
+ "source": "/website/docs/topics/code-execution/user-defined-functions.ipynb"
+ },
+ {
+ "title": "Customize Speaker Selection",
+ "link": "/notebooks/customized_speaker_selection",
+ "description": "Custom Speaker Selection Function",
+ "image": null,
+ "tags": [
+ "orchestration",
+ "group chat"
+ ],
+ "source": "/website/docs/topics/groupchat/customized_speaker_selection.ipynb"
+ },
+ {
+ "title": "Resuming a GroupChat",
+ "link": "/notebooks/resuming_groupchat",
+ "description": "Resume Group Chat",
+ "image": null,
+ "tags": [
+ "resume",
+ "orchestration",
+ "group chat"
+ ],
+ "source": "/website/docs/topics/groupchat/resuming_groupchat.ipynb"
+ },
+ {
+ "title": "Using Transform Messages during Speaker Selection",
+ "link": "/notebooks/transform_messages_speaker_selection",
+ "description": "Custom Speaker Selection Function",
+ "image": null,
+ "tags": [
+ "orchestration",
+ "long context handling",
+ "group chat"
+ ],
+ "source": "/website/docs/topics/groupchat/transform_messages_speaker_selection.ipynb"
+ },
+ {
+ "title": "Using Custom Model Client classes with Auto Speaker Selection",
+ "link": "/notebooks/using_custom_model_client_classes",
+ "description": "",
+ "image": null,
+ "tags": [],
+ "source": "/website/docs/topics/groupchat/using_custom_model_client_classes.ipynb"
+ },
+ {
+ "title": "Cohere",
+ "link": "/notebooks/cloud-cohere",
+ "description": "",
+ "image": null,
+ "tags": [],
+ "source": "/website/docs/topics/non-openai-models/cloud-cohere.ipynb"
+ },
+ {
+ "title": "Using Gemini in AutoGen with Other LLMs",
+ "link": "/notebooks/cloud-gemini",
+ "description": "Using Gemini with AutoGen",
+ "image": null,
+ "tags": [
+ "gemini"
+ ],
+ "source": "/website/docs/topics/non-openai-models/cloud-gemini.ipynb"
+ },
+ {
+ "title": "Use AutoGen with Gemini via VertexAI",
+ "link": "/notebooks/cloud-gemini_vertexai",
+ "description": "Using Gemini with AutoGen via VertexAI",
+ "image": null,
+ "tags": [
+ "gemini",
+ "vertexai"
+ ],
+ "source": "/website/docs/topics/non-openai-models/cloud-gemini_vertexai.ipynb"
+ },
+ {
+ "title": "Groq",
+ "link": "/notebooks/cloud-groq",
+ "description": "",
+ "image": null,
+ "tags": [],
+ "source": "/website/docs/topics/non-openai-models/cloud-groq.ipynb"
+ },
+ {
+ "title": "Mistral AI",
+ "link": "/notebooks/cloud-mistralai",
+ "description": "",
+ "image": null,
+ "tags": [],
+ "source": "/website/docs/topics/non-openai-models/cloud-mistralai.ipynb"
+ },
+ {
+ "title": "Together.AI",
+ "link": "/notebooks/cloud-togetherai",
+ "description": "",
+ "image": null,
+ "tags": [],
+ "source": "/website/docs/topics/non-openai-models/cloud-togetherai.ipynb"
+ },
+ {
+ "title": "LiteLLM with Ollama",
+ "link": "/notebooks/local-litellm-ollama",
+ "description": "",
+ "image": null,
+ "tags": [],
+ "source": "/website/docs/topics/non-openai-models/local-litellm-ollama.ipynb"
+ },
+ {
+ "title": "LM Studio",
+ "link": "/notebooks/local-lm-studio",
+ "description": "",
+ "image": null,
+ "tags": [],
+ "source": "/website/docs/topics/non-openai-models/local-lm-studio.ipynb"
+ },
+ {
+ "title": "Ollama",
+ "link": "/notebooks/local-ollama",
+ "description": "",
+ "image": null,
+ "tags": [],
+ "source": "/website/docs/topics/non-openai-models/local-ollama.ipynb"
+ },
+ {
+ "title": "LLM Reflection",
+ "link": "/notebooks/reflection",
+ "description": "",
+ "image": null,
+ "tags": [],
+ "source": "/website/docs/topics/prompting-and-reasoning/reflection.ipynb"
+ },
+ {
+ "title": "Terminating Conversations Between Agents",
+ "link": "/notebooks/chat-termination",
+ "description": "",
+ "image": null,
+ "tags": [],
+ "source": "/website/docs/tutorial/chat-termination.ipynb"
+ },
+ {
+ "title": "Code Executors",
+ "link": "/notebooks/code-executors",
+ "description": "",
+ "image": null,
+ "tags": [],
+ "source": "/website/docs/tutorial/code-executors.ipynb"
}
];