Skip to content

Commit

Permalink
Fixed agent&awel examples (eosphoros-ai#2256)
Browse files Browse the repository at this point in the history
  • Loading branch information
csunny authored Jan 9, 2025
1 parent d521300 commit 523ad32
Show file tree
Hide file tree
Showing 14 changed files with 156 additions and 334 deletions.
9 changes: 5 additions & 4 deletions dbgpt/rag/embedding/embeddings.py
Original file line number Diff line number Diff line change
Expand Up @@ -803,9 +803,10 @@ def embed_query(self, text: str) -> List[float]:
"Please install ollama by command `pip install ollama"
) from e
try:
return (
Client(self.api_url).embeddings(model=self.model_name, prompt=text)
)["embedding"]
embedding = Client(self.api_url).embeddings(
model=self.model_name, prompt=text
)
return list(embedding["embedding"])
except ollama.ResponseError as e:
raise ValueError(f"**Ollama Response Error, Please CheckErrorInfo.**: {e}")

Expand Down Expand Up @@ -839,7 +840,7 @@ async def aembed_query(self, text: str) -> List[float]:
embedding = await AsyncClient(host=self.api_url).embeddings(
model=self.model_name, prompt=text
)
return embedding["embedding"]
return list(embedding["embedding"])
except ollama.ResponseError as e:
raise ValueError(f"**Ollama Response Error, Please CheckErrorInfo.**: {e}")

Expand Down
32 changes: 32 additions & 0 deletions examples/__main__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# TODO add example run code here

import asyncio

# Agents examples
from .agents.auto_plan_agent_dialogue_example import main as auto_plan_main
from .agents.awel_layout_agents_chat_examples import main as awel_layout_main
from .agents.custom_tool_agent_example import main as custom_tool_main
from .agents.plugin_agent_dialogue_example import main as plugin_main
from .agents.retrieve_summary_agent_dialogue_example import (
main as retrieve_summary_main,
)
from .agents.sandbox_code_agent_example import main as sandbox_code_main
from .agents.single_agent_dialogue_example import main as single_agent_main
from .agents.sql_agent_dialogue_example import main as sql_main

if __name__ == "__main__":

# Run the examples

## Agent examples
asyncio.run(auto_plan_main())
asyncio.run(awel_layout_main())
asyncio.run(custom_tool_main())
asyncio.run(retrieve_summary_main())
asyncio.run(plugin_main())
asyncio.run(sandbox_code_main())
asyncio.run(single_agent_main())
asyncio.run(sql_main())

## awel examples
print("hello world!")
15 changes: 8 additions & 7 deletions examples/agents/auto_plan_agent_dialogue_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@
Execute the following command in the terminal:
Set env params.
.. code-block:: shell
export OPENAI_API_KEY=sk-xx
export OPENAI_API_BASE=https://xx:80/v1
export SILICONFLOW_API_KEY=sk-xx
export SILICONFLOW_API_BASE=https://xx:80/v1
run example.
..code-block:: shell
Expand All @@ -33,13 +33,14 @@


async def main():
from dbgpt.model.proxy import OpenAILLMClient
from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient

agent_memory = AgentMemory()
from dbgpt.model.proxy.llms.tongyi import TongyiLLMClient

llm_client = TongyiLLMClient(
model_alias="qwen2-72b-instruct",
llm_client = SiliconFlowLLMClient(
model_alias=os.getenv(
"SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct"
),
)

context: AgentContext = AgentContext(
Expand Down
14 changes: 7 additions & 7 deletions examples/agents/awel_layout_agents_chat_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,13 @@
Set env params.
.. code-block:: shell
export OPENAI_API_KEY=sk-xx
export OPENAI_API_BASE=https://xx:80/v1
export SILICONFLOW_API_KEY=sk-xx
export SILICONFLOW_API_BASE=https://xx:80/v1
run example.
..code-block:: shell
python examples/agents/awel_layout_agents_chat_examples.py
"""

import asyncio
import os

Expand All @@ -34,15 +33,16 @@


async def main():
from dbgpt.model.proxy import OpenAILLMClient

agent_memory = AgentMemory()
agent_memory.gpts_memory.init(conv_id="test456")
try:
from dbgpt.model.proxy.llms.tongyi import TongyiLLMClient
from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient

llm_client = TongyiLLMClient(
model_alias="qwen2-72b-instruct",
llm_client = SiliconFlowLLMClient(
model_alias=os.getenv(
"SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct"
),
)

context: AgentContext = AgentContext(conv_id="test456", gpts_app_name="信息析助手")
Expand Down
15 changes: 10 additions & 5 deletions examples/agents/custom_tool_agent_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,12 +44,17 @@ def count_directory_files(path: Annotated[str, Doc("The directory path")]) -> in


async def main():
from dbgpt.model.proxy import OpenAILLMClient

llm_client = OpenAILLMClient(model_alias="gpt-3.5-turbo")
context: AgentContext = AgentContext(conv_id="test456")
from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient

llm_client = SiliconFlowLLMClient(
model_alias=os.getenv(
"SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct"
),
)
agent_memory = AgentMemory()
agent_memory.gpts_memory.init(conv_id="test456")

context: AgentContext = AgentContext(conv_id="test456", gpts_app_name="工具助手")

tools = ToolPack([simple_calculator, count_directory_files])

Expand Down Expand Up @@ -77,7 +82,7 @@ async def main():
)

# dbgpt-vis message infos
print(await agent_memory.gpts_memory.one_chat_completions("test456"))
print(await agent_memory.gpts_memory.app_link_chat_message("test456"))


if __name__ == "__main__":
Expand Down
23 changes: 14 additions & 9 deletions examples/agents/plugin_agent_dialogue_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
Set env params.
.. code-block:: shell
export OPENAI_API_KEY=sk-xx
export OPENAI_API_BASE=https://xx:80/v1
export SILICONFLOW_API_KEY=sk-xx
export SILICONFLOW_API_BASE=https://xx:80/v1
run example.
..code-block:: shell
Expand All @@ -20,19 +20,24 @@
from dbgpt.agent import AgentContext, AgentMemory, LLMConfig, UserProxyAgent
from dbgpt.agent.expand.tool_assistant_agent import ToolAssistantAgent
from dbgpt.agent.resource import AutoGPTPluginToolPack
from dbgpt.configs.model_config import ROOT_PATH

current_dir = os.getcwd()
parent_dir = os.path.dirname(current_dir)
test_plugin_dir = os.path.join(parent_dir, "test_files/plugins")
test_plugin_dir = os.path.join(ROOT_PATH, "examples/test_files/plugins")


async def main():
from dbgpt.model.proxy import OpenAILLMClient
from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient

llm_client = OpenAILLMClient(model_alias="gpt-3.5-turbo")
context: AgentContext = AgentContext(conv_id="test456")
llm_client = SiliconFlowLLMClient(
model_alias=os.getenv(
"SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct"
),
)

agent_memory = AgentMemory()
agent_memory.gpts_memory.init(conv_id="test456")

context: AgentContext = AgentContext(conv_id="test456", gpts_app_name="插件对话助手")

tools = AutoGPTPluginToolPack(test_plugin_dir)

Expand All @@ -54,7 +59,7 @@ async def main():
)

# dbgpt-vis message infos
print(await agent_memory.gpts_memory.one_chat_completions("test456"))
print(await agent_memory.gpts_memory.app_link_chat_message("test456"))


if __name__ == "__main__":
Expand Down
35 changes: 20 additions & 15 deletions examples/agents/retrieve_summary_agent_dialogue_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
Set env params.
.. code-block:: shell
export OPENAI_API_KEY=sk-xx
export OPENAI_API_BASE=https://xx:80/v1
export SILICONFLOW_API_KEY=sk-xx
export SILICONFLOW_API_BASE=https://xx:80/v1
run example.
..code-block:: shell
Expand All @@ -18,20 +18,27 @@
import os

from dbgpt.agent import AgentContext, AgentMemory, LLMConfig, UserProxyAgent
from dbgpt.agent.expand.retrieve_summary_assistant_agent import (
RetrieveSummaryAssistantAgent,
)
from dbgpt.agent.expand.summary_assistant_agent import SummaryAssistantAgent
from dbgpt.configs.model_config import ROOT_PATH


async def summary_example_with_success():
from dbgpt.model.proxy import OpenAILLMClient
async def main():
from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient

llm_client = SiliconFlowLLMClient(
model_alias=os.getenv(
"SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct"
),
)
context: AgentContext = AgentContext(
conv_id="retrieve_summarize", gpts_app_name="Summary Assistant"
)

llm_client = OpenAILLMClient(model_alias="gpt-3.5-turbo-16k")
context: AgentContext = AgentContext(conv_id="retrieve_summarize")
agent_memory = AgentMemory()
agent_memory.gpts_memory.init(conv_id="retrieve_summarize")

summarizer = (
await RetrieveSummaryAssistantAgent()
await SummaryAssistantAgent()
.bind(context)
.bind(LLMConfig(llm_client=llm_client))
.bind(agent_memory)
Expand All @@ -47,6 +54,7 @@ async def summary_example_with_success():
"https://en.wikipedia.org/wiki/Chernobyl_disaster",
]

# TODO add a tool to load the pdf and internet files
await user_proxy.initiate_chat(
recipient=summarizer,
reviewer=user_proxy,
Expand All @@ -55,11 +63,8 @@ async def summary_example_with_success():
)

# dbgpt-vis message infos
print(await agent_memory.gpts_memory.one_chat_completions("retrieve_summarize"))
print(await agent_memory.gpts_memory.app_link_chat_message("retrieve_summarize"))


if __name__ == "__main__":
asyncio.run(summary_example_with_success())
print(
"\033[92m=======================The Summary Assistant with Successful Results Ended==================\n\n\033[91m"
)
asyncio.run(main())
25 changes: 21 additions & 4 deletions examples/agents/sandbox_code_agent_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,9 @@
environment. The code execution environment is isolated from the host system,
preventing access to the internet and other external resources.
"""

import asyncio
import logging
import os
from typing import Optional, Tuple

from dbgpt.agent import (
Expand Down Expand Up @@ -270,11 +270,28 @@ async def correctness_check(


async def main():
from dbgpt.model.proxy import OpenAILLMClient
from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient

llm_client = OpenAILLMClient(model_alias="gpt-4o-mini")
llm_client = SiliconFlowLLMClient(
model_alias=os.getenv(
"SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct"
),
)
context: AgentContext = AgentContext(conv_id="test123")
agent_memory = AgentMemory(HybridMemory[AgentMemoryFragment].from_chroma())

# TODO Embedding and Rerank model refactor
from dbgpt.rag.embedding import OpenAPIEmbeddings

silicon_embeddings = OpenAPIEmbeddings(
api_url=os.getenv("SILICONFLOW_API_BASE") + "/embeddings",
api_key=os.getenv("SILICONFLOW_API_KEY"),
model_name="BAAI/bge-large-zh-v1.5",
)
agent_memory = AgentMemory(
HybridMemory[AgentMemoryFragment].from_chroma(
embeddings=silicon_embeddings,
)
)
agent_memory.gpts_memory.init("test123")

coder = (
Expand Down
23 changes: 8 additions & 15 deletions examples/agents/single_agent_dialogue_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,28 +17,21 @@
import asyncio
import os

from dbgpt.agent import (
AgentContext,
AgentMemory,
AgentMemoryFragment,
HybridMemory,
LLMConfig,
UserProxyAgent,
)
from dbgpt.agent import AgentContext, AgentMemory, LLMConfig, UserProxyAgent
from dbgpt.agent.expand.code_assistant_agent import CodeAssistantAgent


async def main():
from dbgpt.model.proxy import OpenAILLMClient
from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient

# llm_client = OpenAILLMClient(model_alias="gpt-3.5-turbo")
from dbgpt.model.proxy.llms.tongyi import TongyiLLMClient

llm_client = TongyiLLMClient(
model_alias="qwen2-72b-instruct",
llm_client = SiliconFlowLLMClient(
model_alias=os.getenv(
"SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct"
),
)

context: AgentContext = AgentContext(conv_id="test123")
context: AgentContext = AgentContext(conv_id="test123", gpts_app_name="代码助手")

agent_memory = AgentMemory()
agent_memory.gpts_memory.init(conv_id="test123")
try:
Expand Down
Loading

0 comments on commit 523ad32

Please sign in to comment.