Skip to content

Commit

Permalink
Use local thread
Browse files Browse the repository at this point in the history
  • Loading branch information
hinthornw committed Oct 3, 2024
1 parent e778bd0 commit ce6df74
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 20 deletions.
2 changes: 0 additions & 2 deletions src/chatbot/configuration.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,6 @@ class ChatConfigurable:
model: str = "anthropic/claude-3-5-sonnet-20240620"
delay_seconds: int = 10 # For debouncing memory creation
system_prompt: str = SYSTEM_PROMPT
# None will default to connecting to the local deployment
memory_service_url: str | None = None

@classmethod
def from_runnable_config(
Expand Down
24 changes: 9 additions & 15 deletions src/chatbot/graph.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
"""Example chatbot that incorporates user memories."""

import uuid
from dataclasses import dataclass
from datetime import datetime, timezone

from langchain_core.runnables import RunnableConfig
from langgraph.graph import StateGraph
from langgraph.graph.message import Messages, add_messages
from langgraph.store.base import BaseStore
from langgraph_sdk import get_client
from typing_extensions import Annotated

Expand All @@ -21,18 +21,19 @@ class ChatState:
messages: Annotated[list[Messages], add_messages]


async def bot(state: ChatState, config: RunnableConfig) -> dict[str, list[Messages]]:
async def bot(
state: ChatState, config: RunnableConfig, store: BaseStore
) -> dict[str, list[Messages]]:
"""Prompt the bot to resopnd to the user, incorporating memories (if provided)."""
configurable = ChatConfigurable.from_runnable_config(config)
memory_client = get_client(url=configurable.memory_service_url)
namespace = (configurable.user_id,)
# This lists ALL user memories in the provided namespace (up to the `limit`)
# you can also filter by content.
user_memory = await memory_client.store.search_items(namespace)
items = await store.asearch(namespace)

model = init_model(configurable.model)
prompt = configurable.system_prompt.format(
user_info=format_memories([item for item in user_memory["items"]]),
user_info=format_memories(items),
time=datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S"),
)
m = await model.ainvoke(
Expand All @@ -45,16 +46,9 @@ async def bot(state: ChatState, config: RunnableConfig) -> dict[str, list[Messag
async def schedule_memories(state: ChatState, config: RunnableConfig) -> None:
"""Prompt the bot to respond to the user, incorporating memories (if provided)."""
configurable = ChatConfigurable.from_runnable_config(config)
memory_client = get_client(url=configurable.memory_service_url)
memory_thread = str(
uuid.uuid5(
uuid.NAMESPACE_DNS,
configurable.user_id + config["configurable"]["thread_id"],
)
)
await memory_client.threads.create(thread_id=memory_thread, if_exists="do_nothing")
memory_client = get_client()
await memory_client.runs.create(
thread_id=memory_thread,
thread_id=config["configurable"]["thread_id"],
assistant_id=configurable.mem_assistant_id,
input={
# the service dedupes messages by ID, so we can send the full convo each time
Expand All @@ -66,7 +60,7 @@ async def schedule_memories(state: ChatState, config: RunnableConfig) -> None:
"user_id": configurable.user_id,
},
},
multitask_strategy="rollback",
multitask_strategy="enqueue",
# This lets us "debounce" repeated requests to the memory graph
# if the user is actively engaging in a conversation
after_seconds=configurable.delay_seconds,
Expand Down
7 changes: 4 additions & 3 deletions src/chatbot/utils.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,19 @@
"""Define utility functions for your graph."""

from typing import Any, Mapping, Optional
from typing import Optional

from langchain.chat_models import init_chat_model
from langchain_core.language_models import BaseChatModel
from langgraph.store.base import Item


def format_memories(memories: Optional[list[Mapping[str, Any]]]) -> str:
def format_memories(memories: Optional[list[Item]]) -> str:
"""Format the user's memories."""
if not memories:
return ""
# Note Bene: You can format better than this....
formatted_memories = "\n".join(
f'{str(m["value"])}\tLast updated: {m["updated_at"]}' for m in memories
f"{str(m.value)}\tLast updated: {m.updated_at}" for m in memories
)
return f"""
Expand Down

0 comments on commit ce6df74

Please sign in to comment.