Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(opentrons-ai-client): send chat history instead user prompt #15226

Closed
wants to merge 10 commits into from
Closed
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@
"share_your_thoughts": "Share your thoughts here",
"side_panel_body": "Write a prompt in natural language to generate a Reagent Transfer or a PCR protocol for the OT-2 or Opentrons Flex using the Opentrons Python Protocol API.",
"side_panel_header": "Use natural language to generate protocols with OpentronsAI powered by OpenAI",
"try_example_prompts": "Stuck? Try these example prompts to get started.",
"simulate_description": "Once OpentronsAI has written your protocol, type `simulate` in the prompt box to try it out.",
"try_example_prompts": "Stuck? Try these example prompts to get started.",
"type_your_prompt": "Type your prompt...",
"well_allocations": "Well allocations: Describe where liquids should go in labware.",
"what_if_you": "<span>What if you don’t provide all of those pieces of information? <bold>OpentronsAI asks you to provide it!</bold></span>",
Expand Down
3 changes: 0 additions & 3 deletions opentrons-ai-client/src/molecules/ChatFooter/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import {
COLORS,
DIRECTION_COLUMN,
Flex,
// POSITION_STICKY,
SPACING,
StyledText,
TYPOGRAPHY,
Expand All @@ -17,8 +16,6 @@ export function ChatFooter(): JSX.Element {

return (
<Flex
// position={POSITION_STICKY}
bottom="0"
width="100%"
gridGap={SPACING.spacing24}
flexDirection={DIRECTION_COLUMN}
Expand Down
20 changes: 17 additions & 3 deletions opentrons-ai-client/src/molecules/InputPrompt/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,13 @@ import {
TYPOGRAPHY,
} from '@opentrons/components'
import { SendButton } from '../../atoms/SendButton'
import { preparedPromptAtom, chatDataAtom } from '../../resources/atoms'
import {
preparedPromptAtom,
chatDataAtom,
chatHistoryAtom,
} from '../../resources/atoms'

import type { ChatData } from '../../resources/types'
import type { Chat, ChatData } from '../../resources/types'

const url =
'https://fk0py9eu3e.execute-api.us-east-2.amazonaws.com/sandbox/chat/completion'
Expand All @@ -37,6 +41,7 @@ export function InputPrompt(): JSX.Element {
})
const [preparedPrompt] = useAtom(preparedPromptAtom)
const [, setChatData] = useAtom(chatDataAtom)
const [chatHistory, setChatHistory] = useAtom(chatHistoryAtom)
const [submitted, setSubmitted] = React.useState<boolean>(false)

const [data, setData] = React.useState<any>(null)
Expand All @@ -63,8 +68,12 @@ export function InputPrompt(): JSX.Element {
audience: 'sandbox-ai-api',
},
})
const message: Chat = {
role: 'user',
content: prompt,
}
const postData = {
message: prompt,
messages: [...chatHistory, message],
koji marked this conversation as resolved.
Show resolved Hide resolved
fake: false,
}
const headers = {
Expand All @@ -87,6 +96,10 @@ export function InputPrompt(): JSX.Element {
role: 'user',
reply: userPrompt,
}
setChatHistory(chatHistory => [
...chatHistory,
{ role: 'user', content: userPrompt },
])
setChatData(chatData => [...chatData, userInput])
void fetchData(userPrompt)
setSubmitted(true)
Expand All @@ -104,6 +117,7 @@ export function InputPrompt(): JSX.Element {
role,
reply,
}
setChatHistory(chatHistory => [...chatHistory, { role, content: reply }])
setChatData(chatData => [...chatData, assistantResponse])
setSubmitted(false)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ export function ChatContainer(): JSX.Element {
</ChatDataContainer>
</Flex>
<span ref={scrollRef} />
<Flex position={POSITION_STICKY}>
<Flex position={POSITION_STICKY} bottom="0">
<ChatFooter />
</Flex>
</Flex>
Expand Down
4 changes: 3 additions & 1 deletion opentrons-ai-client/src/resources/atoms.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
// jotai's atoms
import { atom } from 'jotai'
import type { ChatData } from './types'
import type { Chat, ChatData } from './types'

/** preparedPromptAtom is for PromptButton */
export const preparedPromptAtom = atom<string>('')

/** ChatDataAtom is for chat data (user prompt and response from OpenAI API) */
export const chatDataAtom = atom<ChatData[]>([])

export const chatHistoryAtom = atom<Chat[]>([])
11 changes: 10 additions & 1 deletion opentrons-ai-client/src/resources/types.ts
Original file line number Diff line number Diff line change
@@ -1,8 +1,17 @@
/** assistant: ChatGPT API, user: user */
type Role = 'assistant' | 'user'
export interface ChatData {
/** assistant: ChatGPT API, user: user */
role: 'assistant' | 'user'
role: Role
/** content ChatGPT API return or user prompt */
reply: string
/** for testing purpose will be removed and this is not used in the app */
fake?: boolean
}

export interface Chat {
/** assistant: ChatGPT API, user: user */
role: Role
/** content ChatGPT API return or user prompt */
content: string
}
6 changes: 3 additions & 3 deletions opentrons-ai-server/api/domain/openai_predict.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,15 +94,15 @@ class atomic_descr(BaseModel):
descriptions.append(x)
return descriptions

def refine_response(self, assitant_message: str) -> str:
if assitant_message is None:
def refine_response(self, assistant_message: str) -> str:
if assistant_message is None:
return ""
system_message: ChatCompletionMessageParam = {
"role": "system",
"content": f"{general_rules_1}\n Please leave useful comments for each command.",
}

user_message: ChatCompletionMessageParam = {"role": "user", "content": assitant_message}
user_message: ChatCompletionMessageParam = {"role": "user", "content": assistant_message}

response = self.client.chat.completions.create(
model=self.settings.OPENAI_MODEL_NAME,
Expand Down
2 changes: 1 addition & 1 deletion opentrons-ai-server/api/handler/function.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def create_chat_completion(event: Dict[str, Any]) -> Dict[str, Any]:

settings: Settings = Settings.build()
openai: OpenAIPredict = OpenAIPredict(settings=settings)
response: Union[str, None] = openai.predict(prompt=body.message)
response: Union[str, None] = openai.predict(prompt=body.message, chat_completion_message_params=body.history)

if response is None or response == "":
return create_response(HTTPStatus.NO_CONTENT, ChatResponse(reply="No response was generated", fake=body.fake).model_dump())
Expand Down
9 changes: 9 additions & 0 deletions opentrons-ai-server/api/models/chat_request.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,15 @@
from typing import List, Optional

from openai.types.chat import ChatCompletionMessageParam
from pydantic import BaseModel


class Chat(BaseModel):
role: str
content: str


class ChatRequest(BaseModel):
message: str
history: Optional[List[ChatCompletionMessageParam]] = None
fake: bool
4 changes: 2 additions & 2 deletions opentrons-ai-server/tests/test_chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
@pytest.mark.unit
def test_chat_request_model() -> None:
# Test valid data
request_data = {"message": "Hello", "fake": False}
request_data = {"message": "Hello", "chat_history": [], "fake": False}
request = ChatRequest(**request_data)
assert request.message == "Hello"
assert request.fake is False
Expand All @@ -28,5 +28,5 @@ def test_chat_response_model() -> None:

# Test invalid data
with pytest.raises(ValidationError):
invalid_response_data = {"reply": 123, "fake": "false"}
invalid_response_data = {"reply": 123, "history": "history", "fake": "false"}
ChatResponse(**invalid_response_data)
Loading