forked from Mozilla-Ocho/streamlit-lwl-demo
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgenerators.py
58 lines (54 loc) · 2.27 KB
/
generators.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import streamlit as st
from openai import OpenAI
import json
from prompts.key_topics import key_topics_prompt
from prompts.learning_context import learning_context_prompt
from prompts.quiz_topic_questions import questions_prompt
@st.cache_data(persist="disk")
def generate_key_topics(*, source_material, model, _openai_api_key):
client = OpenAI(api_key=_openai_api_key)
prompt = key_topics_prompt(source_material = source_material)
response = client.chat.completions.create(
model= model,
response_format={ "type": "json_object" },
messages=[
{"role": "system", "content": "You are a helpful assistant designed to output JSON."},
{"role": "user", "content": prompt}
],
temperature=0
)
body = response.choices[0].message.content
topics = json.loads(body)["topics"] # prompt schema asks for this key
topics.insert(0, "Overview") # insert "overview" as first topic
return [topics, prompt, response]
@st.cache_data(persist="disk")
def generate_learning_context(*, goals, skills, model, _openai_api_key):
client = OpenAI(api_key=_openai_api_key)
prompt = learning_context_prompt(goals=goals, skills=skills)
response = client.chat.completions.create(
model= model,
messages=[
{"role": "user", "content": prompt}
],
temperature=0.4
)
body = response.choices[0].message.content
learning_context = body
return [learning_context, prompt, response]
@st.cache_data(persist="disk")
def generate_questions(*, for_key_topic, learning_context, source_material, model, _openai_api_key):
client = OpenAI(api_key=_openai_api_key)
prompt = questions_prompt(for_key_topic=for_key_topic, learning_context=learning_context, source_material=source_material)
response = client.chat.completions.create(
model= model,
response_format={ "type": "json_object" },
messages=[
{"role": "system", "content": "You are a helpful assistant designed to output JSON."},
{"role": "user", "content": prompt}
],
#max_tokens= 4096,
temperature=0.3
)
body = response.choices[0].message.content
questions = json.loads(body)["questions"]
return [questions, prompt, response]