Skip to content

Commit

Permalink
signals bot v0.1
Browse files Browse the repository at this point in the history
  • Loading branch information
beingkk committed Dec 6, 2023
1 parent a0ab8a7 commit 6d20a91
Show file tree
Hide file tree
Showing 17 changed files with 610 additions and 83 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -169,3 +169,4 @@ src/scraping/data/*
src/evals/parenting_chatbot/*
src/genai/parenting_chatbot/prodigy_eval/_scrap/*
!src/genai/parenting_chatbot/prodigy_eval/data/
!src/genai/sandbox/signals/data/
401 changes: 401 additions & 0 deletions signals_app.py

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions src/genai/sandbox/signals/data/00_system.jsonl
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"role": "user", "content": "###Instructions###\nYou are a helpful, kind, intelligent and polite futurist. You work for the UK innovation agency Nesta, and your task is to engage the user about the future signals and trends that Nesta has researched, by helping the user imagine and appreciate how the signals will impact their life. You will personalise the user experience by taking the information provided by the user and tailoring your explanation to the user background. Here are the future signals that you can talk about: {signals}. Do not discuss other future signals as this is not part of this year's Nesta's Signals edition."}
3 changes: 3 additions & 0 deletions src/genai/sandbox/signals/data/01_intro.jsonl
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
{"role": "assistant", "content": "Hi, I’m Scout, Discovery Hub’s experimental AI assistant which helps people explore and interpret signals about the future. ✨"}
{"role": "assistant", "content": "This year we have collected signals about a variety of topics, from green energy to education, to health and even sleep."}
{"role": "assistant", "content": "Tell me one or two things about you and your interests, so that I can suggest which future signals might be the most relevant to you!"}
2 changes: 2 additions & 0 deletions src/genai/sandbox/signals/data/02_signal_impact.jsonl
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
{"role": "user", "content": "Start your answer by explaining in one clear sentence how the selected future signal might be relevant to the user, given the user information and conversation history. Then describe three ways how the selected future signal might impact them. Keep these descriptions short, two-three sentences at most. Finish your answer by encouraging the user to ask questions about this signal (note that you will try your best to answer them) or suggest to ask about the other future signals. Remember that you must be patient and never offend or be aggressive. \n\n###Future signal###{signal}\n\n###User information### Here is what the user told you about themselves: {user_input}.\n\n###Answer###"
}
2 changes: 2 additions & 0 deletions src/genai/sandbox/signals/data/03_signal_choice.jsonl
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
{"role": "user", "content": "Start your answer by explaining each of the signals in one clear sentence (use similar language to the signals descriptions). If possible, indicate how a signal might be relevant to the user, given the user information and conversation history. Finish your answer by asking the user to choose one of the signals to hear more about it. Remember that you must be patient and never offend or be aggressive. \n\n###Future signals###{signals}\n\n###User information### Here is what the user told you about themselves: {user_input}.\n\n###Answer###"
}
2 changes: 2 additions & 0 deletions src/genai/sandbox/signals/data/04_follow_up.jsonl
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
{"role": "user", "content": "Answer to the user's most recent message. Be as concise or detailed as necessary. Use the information from the future signal description when relevant. Keep your answers conversational and three to four sentences long at most. \n\n###Future signal###{signal}\n\n###User information### Here is what the user told you: {user_input}.\n\n###Answer###"
}
15 changes: 15 additions & 0 deletions src/genai/sandbox/signals/data/func_intent.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
{
"name": "predict_intent",
"description": "Predict what is the user's intent",
"parameters": {
"type": "object",
"properties": {
"prediction": {
"type": "string",
"enum": ["new_signal", "more_signals", "following_up"],
"description": "The predicted intent"
}
},
"required": ["prediction"]
}
}
15 changes: 15 additions & 0 deletions src/genai/sandbox/signals/data/func_top_signal.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
{
"name": "predict_top_signal",
"description": "Predict which one of the signal is the most relevant to user input",
"parameters": {
"type": "object",
"properties": {
"prediction": {
"type": "string",
"enum": [],
"description": "The predicted most relevant signal"
}
},
"required": ["prediction"]
}
}
18 changes: 18 additions & 0 deletions src/genai/sandbox/signals/data/func_top_three_signals.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
{
"name": "predict_top_signals",
"description": "Predict which three signals are the most relevant to user input",
"parameters": {
"type": "object",
"properties": {
"prediction": {
"type": "array",
"items": {
"type": "string",
"enum": []
},
"description": "The predicted most relevant signals"
}
},
"required": ["prediction"]
}
}
14 changes: 14 additions & 0 deletions src/genai/sandbox/signals/data/intent_actions.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
[
{
"name": "new_signal",
"description": "User wishes to change the topic and talk about an new future signal. Alternatively, the user has been just presented with a set of future signal options by the assistant, and the user has now chosen which signal to talk about more."
},
{
"name": "more_signals",
"description": "User has asked to hear more about other future signals"
},
{
"name": "following_up",
"description": "User is following up with another question about the signal that's being discussed just now."
}
]
1 change: 1 addition & 0 deletions src/genai/sandbox/signals/data/prompt_intent.jsonl
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"role": "user", "content": "You are a helpful chatbot talking with the user about the articles of future signals that we wrote. ###Instructions### Predict the intended action of the user, what the user wishes you to carry out based on the conversation history. Pay attention to the most recent messages\n\n###Possible intents###\n{intents}\n\n"}
1 change: 1 addition & 0 deletions src/genai/sandbox/signals/data/prompt_top_signal.jsonl
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"role": "user", "content": "###Instructions### Predict which of the following future signals is the most relevant to user input. You have to choose one of these signals. \n\n###Future signal summaries###\n{signals}\n\n###User input:\n{user_input}"}
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"role": "user", "content": "###Instructions### Predict which three of the following future signals are the most relevant to user input. You have to choose three of these signals. \n\n###Future signal summaries###\n{signals}\n\n###User input:\n{user_input}"}
37 changes: 37 additions & 0 deletions src/genai/sandbox/signals/data/signals_2023.json

Large diffs are not rendered by default.

161 changes: 87 additions & 74 deletions src/genai/sandbox/signals/signals_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@
path_func_top_signal = PROMPT_PATH + "func_top_signal.json"
path_prompt_top_signal = PROMPT_PATH + "prompt_top_signal.jsonl"
# Top three signals function
path_func_top_signals = PROMPT_PATH + "func_top_signals.json"
path_prompt_top_signals = PROMPT_PATH + "prompt_top_signals.jsonl"
path_func_top_three_signals = PROMPT_PATH + "func_top_three_signals.json"
path_prompt_top_three_signals = PROMPT_PATH + "prompt_top_three_signals.jsonl"
# Intent detection function
path_func_intent = PROMPT_PATH + "func_intent.json"
path_prompt_intent = PROMPT_PATH + "prompt_intent.jsonl"
Expand Down Expand Up @@ -147,7 +147,7 @@ def predict_top_signal(user_message: str, signals: list) -> str:
return top_signal['prediction']


def predict_top_three_signals(user_message: str, signals: list) -> str:
def predict_top_three_signals(user_message: str, signals: list) -> list:
"""Predict the top signal from the user's message.
Args:
Expand All @@ -157,22 +157,25 @@ def predict_top_three_signals(user_message: str, signals: list) -> str:
str: The top signal from the user's message.
"""
# Function call
func_top_signals = json.loads(open(path_func_top_signals).read())
func_top_signals['parameters']['properties']['prediction']['enum'] = signals

message = MessageTemplate.load(path_prompt_top_signals)
function = FunctionTemplate.load(func_top_signals)
func_top_signals = json.loads(open(path_func_top_three_signals).read())
func_top_signals['parameters']['properties']['prediction']['items']['enum'] = signals
print(func_top_signals)
message = MessageTemplate.load(path_prompt_top_three_signals)
function_top_three = FunctionTemplate.load(func_top_signals)

response = TextGenerator.generate(
model=selected_model,
temperature=temperature,
messages=[message],
message_kwargs={"signals": signals_descriptions, "user_input": user_message},
stream=False,
functions=[function.to_prompt()],
functions=[function_top_three.to_prompt()],
function_call={"name": "predict_top_signals"},
)
top_signals = json.loads(response['choices'][0]['message']['function_call']['arguments'])
print(message)
print(f"Prediction: {top_signals}")
print(response)
return top_signals['prediction']

def signals_bot(sidebar: bool = True) -> None:
Expand All @@ -199,10 +202,7 @@ def signals_bot(sidebar: bool = True) -> None:
st.title("Signals chatbot")
st.write("Let's discuss the future!")

# Keep track of discussed signals
st.session_state.signals = []

# First time runnig the app
# First time running the app
if "messages" not in st.session_state:
# Record of messages to display on the app
st.session_state.messages = []
Expand All @@ -211,16 +211,20 @@ def signals_bot(sidebar: bool = True) -> None:
# Keep track of which state we're in
st.session_state.state = "start"
# Fetch system and introduction messages
with st.chat_message("assistant"):
# Add system message to the history
system_message = read_jsonl(PATH_SYSTEM)
st.session_state.history.append(system_message)
# Add the intro messages
intro_messages = read_jsonl(PATH_INTRO)
print(intro_messages)
for m in intro_messages:
st.session_state.messages.append(m)
st.session_state.history.append(m)
st.session_state.signals = []

# Add system message to the history
system_message = read_jsonl(PATH_SYSTEM)[0]
system_message = MessageTemplate.load(system_message)
system_message.format_message(**{"signals": signals_descriptions})
st.session_state.history.append(system_message.to_prompt())
print(system_message.to_prompt())
# Add the intro messages
intro_messages = read_jsonl(PATH_INTRO)
print(intro_messages)
for m in intro_messages:
st.session_state.messages.append(m)
st.session_state.history.append(m)

# Display chat messages on app rerun
for message in st.session_state.messages:
Expand All @@ -241,34 +245,38 @@ def signals_bot(sidebar: bool = True) -> None:
st.session_state.user_info = user_message
st.session_state.state = "chatting"
else:
intent = predict_intent(user_message, st.session_state.messages)
intent = predict_intent(user_message, st.session_state.history)
print(intent)
# intent = "following_up"

if intent == "new_signal":
# Predict the signal to explain
allowed_signals = [s for s in signals if s not in st.session_state.signals]
signal_to_explain = predict_top_signal(user_message, allowed_signals)
st.session_state.signals += signal_to_explain
st.session_state.signals.append(signal_to_explain)
st.session_state.active_signal = signal_to_explain
print(signal_to_explain)
print(f"I have these signals in memory: {st.session_state.signals}")
# Explain the signal
instruction = MessageTemplate.load(path_prompt_impact)
message_history = [MessageTemplate.load(m) for m in st.session_state.messages]
message_history = [MessageTemplate.load(m) for m in st.session_state.history]
message_history += [instruction]
message_placeholder = st.empty()
full_response = ""
for response in TextGenerator.generate(
model=selected_model,
temperature=temperature,
messages=message_history,
message_kwargs={
"signal": signals_dict[signal_to_explain]['full_text'],
"user_input": st.session_state.user_info
},
stream=True,
):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in TextGenerator.generate(
model=selected_model,
temperature=temperature,
messages=message_history,
message_kwargs={
"signal": signals_dict[signal_to_explain]['full_text'],
"user_input": st.session_state.user_info
},
stream=True,
):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
st.session_state.history.append({"role": "assistant", "content": full_response})

Expand All @@ -278,50 +286,55 @@ def signals_bot(sidebar: bool = True) -> None:
# Provide an overview of the impacts of signal on the reader
# Ask which one the bot should elaborate on
allowed_signals = [s for s in signals if s not in st.session_state.signals]
top_signals = predict_top_three_signals(user_message, allowed_signals)[0:3]
top_signals = predict_top_three_signals(st.session_state.user_info, allowed_signals)
print(allowed_signals)
print(top_signals)
print(top_signals[0:3])
# Explain the signal
instruction = MessageTemplate.load(path_prompt_choice)
top_signals_text = generate_signals_texts(signals_data, top_signals)
message_history = [MessageTemplate.load(m) for m in st.session_state.messages]
message_history = [MessageTemplate.load(m) for m in st.session_state.history]
message_history += [instruction]
message_placeholder = st.empty()
full_response = ""
for response in TextGenerator.generate(
model=selected_model,
temperature=temperature,
messages=message_history,
message_kwargs={
"signals": top_signals_text,
"user_input": st.session_state.user_info
},
stream=True,
):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in TextGenerator.generate(
model=selected_model,
temperature=temperature,
messages=message_history,
message_kwargs={
"signals": top_signals_text,
"user_input": st.session_state.user_info
},
stream=True,
):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
st.session_state.history.append({"role": "assistant", "content": full_response})

elif intent == "following_up":
#Follow up the user's message
instruction = MessageTemplate.load(path_prompt_following_up)
message_history = [MessageTemplate.load(m) for m in st.session_state.messages]
message_history = [MessageTemplate.load(m) for m in st.session_state.history]
message_history += [instruction]
message_placeholder = st.empty()
full_response = ""
for response in TextGenerator.generate(
model=selected_model,
temperature=temperature,
messages=message_history,
message_kwargs={
"signal": signals_dict[st.session_state.active_signal]['full_text'],
"user_input": user_message
},
stream=True,
):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in TextGenerator.generate(
model=selected_model,
temperature=temperature,
messages=message_history,
message_kwargs={
"signal": signals_dict[st.session_state.active_signal]['full_text'],
"user_input": user_message
},
stream=True,
):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)

st.session_state.messages.append({"role": "assistant", "content": full_response})
st.session_state.history.append({"role": "assistant", "content": full_response})
Expand Down
Loading

0 comments on commit 6d20a91

Please sign in to comment.