Skip to content

Commit

Permalink
cr
Browse files Browse the repository at this point in the history
  • Loading branch information
hwchase17 committed Aug 23, 2024
1 parent 04dce92 commit 64ac68f
Show file tree
Hide file tree
Showing 3 changed files with 380 additions and 211 deletions.
140 changes: 140 additions & 0 deletions backend/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -386,3 +386,143 @@ class InputSchema(TypedDict):
workflow.add_edge("response_synthesizer_cohere", END)

graph = workflow.compile()

from langgraph.graph import MessagesState

Check failure on line 390 in backend/graph.py

View workflow job for this annotation

GitHub Actions / build (3.11)

Ruff (E402)

backend/graph.py:390:1: E402 Module level import not at top of file


class ResearcherState(TypedDict):
sub_question: str
queries: list[str]
documents: Annotated[list[Document], update_documents]


class AgentState(MessagesState):
sub_questions: list[str]
documents: Annotated[list[Document], update_documents]


generate_queries_prompt = """Generate 3 search queries to search for \
to answer the user's question. These search queries should be diverse in nature - do not generate \
repetitive ones."""

def generate_queries(state: ResearcherState):
messages = [
{"role": "system", "content": generate_queries_prompt},
{"role": "human", "content": state['sub_question']}
]

class Response(TypedDict):
queries: list[str]

response = gpt_4o_mini.with_structured_output(Response).invoke(messages)
return response

class QueryState(TypedDict):
query: str

def query(state: QueryState):
retriever = get_retriever()
docs = retriever.invoke(state['query'])
return {"documents": docs}


from langgraph.constants import Send

Check failure on line 429 in backend/graph.py

View workflow job for this annotation

GitHub Actions / build (3.11)

Ruff (E402)

backend/graph.py:429:1: E402 Module level import not at top of file
from langgraph.graph import END, START

Check failure on line 430 in backend/graph.py

View workflow job for this annotation

GitHub Actions / build (3.11)

Ruff (E402)

backend/graph.py:430:1: E402 Module level import not at top of file
def do_queries(state: ResearcherState) -> Literal['query']:
return [Send("query", {"query": q}) for q in state['queries']]

researcher = StateGraph(ResearcherState)
researcher.add_node(query)
researcher.add_node(generate_queries)
researcher.add_edge(START, "generate_queries")
researcher.add_conditional_edges("generate_queries", do_queries)
researcher.add_edge("query", END)
researcher = researcher.compile()


generate_questions_prompt = """You are a LangChain expert, here to assist with any and all questions or issues with LangChain, LangGraph, LangSmith, or any related functionality. Users may come to you with questions or issues.
You are world class researcher. Based on the conversation below, you have the option to generate 3 research questions to research in the LangChain documentation to resolve the users question. These questions should be diverse and cover a spectrum of possibilities. If the question references multiple concepts, or seems like understanding multiple concepts may be needed in order to answer, you should generate a sub question for each of those concepts. Do not answer directly about any LangChain questions without calling the research tool
Because you will research these questions in the LangChain documentation, if you need any more information from the user ask them for that BEFORE calling this tool. You should respond asking for more information only when:
- The user complains about an error but doesnt provide the error
- The user says something isn't working but doesnt explain why/how it's not working.
Otherwise, go ahead and research their question!"""
def generate_questions(state: AgentState):
messages = [{"role": "system", "content": generate_questions_prompt}] + state['messages']

class ResearchQuestions(TypedDict):
"""Ask research questions."""
sub_questions: list[str]

response = gpt_4o_mini.bind_tools([ResearchQuestions]).invoke(messages)
if len(response.tool_calls) == 0:
return {"messages": response}
else:
return response.tool_calls[0]['args']

def route_question(state: AgentState) -> Literal['generate', 'researcher', END]:
if len(state.get("sub_questions", [])) > 0:
return [Send("researcher", {"sub_question": q}) for q in state["sub_questions"][:1]]
else:
if isinstance(state['messages'][-1], HumanMessage):
return "generate"
else:
return END

def remove_question(state):
return {"sub_questions": state["sub_questions"][1:]}


RESPONSE_TEMPLATE1 = """\
You are an expert programmer and problem-solver, tasked with answering any question \
about Langchain.
Generate a comprehensive and informative answer for the \
given question based solely on the provided search results (URL and content). \
Do NOT ramble, and adjust your response length based on the question. If they ask \
a question that can be answered in one sentence, do that. If 5 paragraphs of detail is needed, \
do that. You must \
only use information from the provided search results. Use an unbiased and \
journalistic tone. Combine search results together into a coherent answer. Do not \
repeat text. Cite search results using [${{number}}] notation. Only cite the most \
relevant results that answer the question accurately. Place these citations at the end \
of the individual sentence or paragraph that reference them. \
Do not put them all at the end, but rather sprinkle them throughout. If \
different results refer to different entities within the same name, write separate \
answers for each entity.
You should use bullet points in your answer for readability. Put citations where they apply
rather than putting them all at the end. DO NOT PUT THEM ALL THAT END, PUT THEM IN THE BULLET POINTS.
If there is nothing in the context relevant to the question at hand, do NOT make up an answer. \
Rather, tell them why you're unsure and ask for any additional information that may help you answer better.
Anything between the following `context` html blocks is retrieved from a knowledge \
bank, not part of the conversation with the user.
<context>
{context}
<context/>
"""


def generate(state: AgentState):
context = format_docs(state['documents'])
prompt = RESPONSE_TEMPLATE1.format(context=context)
response = gpt_4o_mini.invoke([{"role": "system", "content": prompt}] + state['messages'])
return {"messages": response}

agent = StateGraph(AgentState, input=MessagesState, output=MessagesState)
agent.add_node("researcher", researcher)
agent.add_node(generate_questions)
agent.add_node(generate)
agent.add_node(remove_question)
agent.add_conditional_edges("generate_questions", route_question)
agent.add_edge("researcher", "remove_question")
agent.add_conditional_edges("remove_question", route_question)
agent.add_edge("generate", END)
agent.add_edge(START, "generate_questions")
agent = agent.compile()
22 changes: 16 additions & 6 deletions langgraph.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,17 @@
{
"dependencies": ["."],
"graphs": {
"chat": "./backend/graph.py:graph"
},
"env": ".env"
}
"python_version": "3.11",
"dockerfile_lines": [],
"dependencies": [
"."
],
"graphs": {
"chat": "./backend/graph.py:graph",
"researcher": "./backend/graph.py:researcher",
"agent": "./backend/graph.py:agent"
},
"env": [
"OPENAI_API_KEY",
"WEAVIATE_API_KEY",
"WEAVIATE_URL"
]
}
Loading

0 comments on commit 64ac68f

Please sign in to comment.