Skip to content

Commit

Permalink
feat: Added neoagent for testing and as a base-agent
Browse files Browse the repository at this point in the history
Alternative, simple agent for chatting using the ReAct template
  • Loading branch information
WilliamMRS committed Nov 11, 2024
1 parent 13c6023 commit 0012835
Show file tree
Hide file tree
Showing 4 changed files with 106 additions and 34 deletions.
118 changes: 101 additions & 17 deletions core/Agents/neoagent.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
from langchain_core.tools import tool
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import MessagesState, StateGraph, START, END
from langgraph.prebuilt import ToolNode
from langchain_core.messages import BaseMessage, AIMessageChunk, HumanMessage, AIMessage, ToolMessage
from langgraph.prebuilt import ToolNode, tools_condition

from models import Model #Models for chatGPT

Expand All @@ -24,28 +25,111 @@
ReAct is a simple multi-step agent architecture.
Smaller graphs are often better understood by the LLMs.
"""
# Defining the model TODO: Make this configurable with Llama, Grok, Gemini, Claude
model = ChatOpenAI(
model = Model.gpt_4o,
temperature=0,
max_tokens=16384, # Max tokens for mini. For gpt4o it's 128k
) # Using ChatGPT hardcoded (TODO: Make this dynamic)
# Defining the checkpoint memory saver.
memory = MemorySaver()
class Neoagent:
def __init__(self):
print("""
------------------------------
Instantiated NeoAgent....
------------------------------
""")
system_prompt = "You are Jarvis, an AI assistant here to help the human accomplish tasks. Respond in a conversational, natural style that sounds good when spoken aloud. Keep responses short and to the point, using clear, engaging language. When explaining your thought process, be concise and only describe essential steps to maintain a conversational flow."
# Defining the model TODO: Make this configurable with Llama, Grok, Gemini, Claude
model = ChatOpenAI(
model = Model.gpt_4o,
temperature=0,
max_tokens=16384, # Max tokens for mini. For gpt4o it's 128k
) # Using ChatGPT hardcoded (TODO: Make this dynamic)
# Defining the checkpoint memory saver.
memory = MemorySaver()

# Defining the tavily web-search tool
tavily = TavilySearchResults(max_results=2)
# Defining the tavily web-search tool
tavily = TavilySearchResults(max_results=2)

tools = [add, tavily]
tool_node = ToolNode(tools)
# Adding tools and creating the tool node.
tools = [add, tavily]
tool_node = ToolNode(tools)
llm_with_tools = model.bind_tools(tools)

bound_model = model.bind_tools(tools)
class State(TypedDict):
messages: Annotated[list, add_messages]

class State(TypedDict):
messages: Annotated[list, add_messages]
graph_builder = StateGraph(State)

graph_builder = StateGraph(State)
#Executive node that thinks about the problem or query at hand.
def executive_node(state: State):
if not state["messages"]:
state["messages"] = [("system", system_prompt)]
return {"messages": [llm_with_tools.invoke(state["messages"])]}

graph_builder.add_node("executive_node", executive_node)
graph_builder.add_node("tools", tool_node) # The prebuilt tool node added as "tools"

graph_builder.add_conditional_edges(
"executive_node",
tools_condition,
)

# add conditionals, entry point and compile the graph. Exit is defined in the tools node if required.
graph_builder.add_edge("tools", "executive_node")
graph_builder.set_entry_point("executive_node")
self.graph = graph_builder.compile(checkpointer=memory)

# Draws the graph visually
with open("neoagent.png", 'wb') as f:
f.write(self.graph.get_graph().draw_mermaid_png())

# Streams graph updates using websockets.
def stream_graph_updates(self, user_input: str):
config = {"configurable": {"thread_id": "1"}} # TODO: Remove. This is just a placeholder
for event in self.graph.stream({"messages": [("user", user_input)]}, config):
for value in event.values():
print("Assistant:", value["messages"][-1].content)

async def run(self, user_prompt: str, socketio):
"""
Run the agent with a user prompt and emit the response and total tokens via socket
"""

# TODO: Make the chats saved and restored, using this ID as the guiding values.
# Sets the thread_id for the conversation
config = {"configurable": {"thread_id": "1"}}

try:
input = {"messages": [("human", user_prompt)]}
socketio.emit("start_message", " ")
config = {"configurable": {"thread_id": "1"}} # Thread here is hardcoded for now.
async for event in self.graph.astream_events(input, config, version='v2'): # The config uses the memory checkpoint to save chat state. Only in-memory, not persistent yet.
event_type = event.get('event')
# Focuses only on the 'on_chain_stream'-events.
# There may be better events to base the response on
if event_type == 'on_chain_end' and event['name'] == 'LangGraph':
ai_message = event['data']['output']['messages'][-1]

if isinstance(ai_message, AIMessage):
print(ai_message)
if 'tool_calls' in ai_message.additional_kwargs:
try:
tool_call = ai_message.additional_kwargs['tool_calls'][0]['function']
#tool_call_id = ai_message.additional_kwargs['call_tool'][0]['tool_call_id']
socketio.emit("tool_call", tool_call)
continue
except Exception as e:
return e

socketio.emit("chunk", ai_message.content)
socketio.emit("tokens", ai_message.usage_metadata['total_tokens'])
continue

if event_type == 'on_chain_stream' and event['name'] == 'tools':
tool_response = event['data']['chunk']['messages'][-1]
if isinstance(tool_response, ToolMessage):
socketio.emit("tool_response", tool_response.content)
continue

return "success"
except Exception as e:
print(e)
return e
"""
# Updating the state requires creating a new state (following state immutability for history and checkpoints)
Expand Down
18 changes: 1 addition & 17 deletions core/graphAgent.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,28 +83,12 @@ def run_stream_only(self, user_prompt: str):
for chunk in self.llm.stream(user_prompt):
yield chunk.content

#for running the agent comment out for testing in terminal
async def run(self, user_prompt: str, socketio):
"""
Run the agent with a user prompt and emit the response and total tokens via socket
"""
try:
if 'system:' in user_prompt:
user_prompt = user_prompt.replace('system:', 'suggested prompt for my ai:')

# TODO: Link to the current chatID
# Graph.trim_history to remove excess tokens above the limit.
chat_history = [("human", "How many planets are there in the solar system?"),
("ai", "There are eight planets in our solar system. They are Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, and Neptune.")]

input = {"messages": [
("system", """
You are Jarvis, an AI assistant here to help the human accomplish tasks.
Respond in a conversational, natural style that sounds good when spoken aloud.
Keep responses short and to the point, using clear, engaging language.
When explaining your thought process, be concise and only describe essential steps to maintain a conversational flow.
""")
] + chat_history + [("human", user_prompt)]}
input = {"messages": [("human", user_prompt)]}
socketio.emit("start_message", " ")
config = {"configurable": {"thread_id": "1"}} # Thread here is hardcoded for now.
async for event in self.graph.astream_events(input, config, version='v2'): # The config uses the memory checkpoint to save chat state. Only in-memory, not persistent yet.
Expand Down
4 changes: 4 additions & 0 deletions core/main.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from flask import Flask, request, url_for, jsonify
from graphAgent import Graph
from Agents.neoagent import Neoagent
from models import Model
from summarize_chat import summarize_chat
from rag import embed_and_store
Expand Down Expand Up @@ -29,7 +30,10 @@
socketio = SocketIO(app, cors_allowed_origins="*") # Enable CORS for WebSocket

# Agent instantiation
# Graph() contains all complex tools
# Neoagent() is a simple ReAct agent that only has websearch and the add tool. For testing purposes.
jarvis = Graph() # API key is configured in agent.py
#jarvis = Neoagent()

# Initialize active_chats with the correct format
active_chats = defaultdict(lambda: {"chat_history": []})
Expand Down
Binary file added core/neoagent.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.

0 comments on commit 0012835

Please sign in to comment.