-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathllmis.py
70 lines (66 loc) · 3.06 KB
/
llmis.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate
from langchain.chains import LLMChain
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from whisperiss import listening,speak
import warnings
from yolois import eyes
from dotenv import load_dotenv
from clip import generate_captions
load_dotenv()
warnings.filterwarnings("ignore",category=UserWarning,module='whisper.transcribe',lineno=114)
warnings.filterwarnings("ignore",message = "FP16 is not supported on CPU; using FP32 instead")
listis="Listening"
while True:
lis="Do you wanna see or listen? "
print(lis)
speak(lis)
propis = listening()
print(propis)
if "listen" in propis.lower().strip():
print("Listening...")
speak(listis)
prop = listening()
llm = ChatOpenAI(temperature=0.3, model_name="gpt-3.5-turbo")
if "use google" in prop.lower().strip():
toolsis = load_tools(["google-serper"])
# toolsis = load_tools(["google-serper","chatgpt"])
agent = initialize_agent(toolsis, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
print("Transcribed Audio:", prop)
response = agent.run(prop)
print("\033[34m"+"Generated Response: "+ response+"\033[0m")
speak(response)
else:
# llm = ChatOpenAI(temperature=0.3, model_name="gpt-3.5-turbo")
summ = PromptTemplate(input_variables=[], template=prop)
print(prop)
chain = LLMChain(llm=llm, prompt=summ)
response = chain.run(name=prop)
print("\033[34m"+"Generated Response: "+ response+"\033[0m")
speak(response)
elif "exit" in propis.lower().strip():
break
else:
speak("Seeing")
eyes()
txt1=generate_captions(r'captured_image.jpg')
print("Listening...")
speak(listis)
prop = listening()
promptis= f" Given one description of the scene as {txt1} . Treat all the answers for the same scene but with different details. Use all the details and answer the question using the above information as detailed and as accurately according to the information provided as possible. Donot make any assumptions from your side. {prop}"
llm = ChatOpenAI(temperature=0.3, model_name="gpt-3.5-turbo")
print("Transcribed Audio:", prop)
if "use google" in promptis.lower().strip():
toolsis = load_tools(["google-serper"])
agent = initialize_agent(toolsis, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
response = agent.run(promptis)
print("\033[34m"+"Generated Response: "+ response+"\033[0m")
speak(response)
else:
summ = PromptTemplate(input_variables=[], template=promptis)
chain = LLMChain(llm=llm, prompt=summ)
response = chain.run(name=prop)
print("\033[34m"+"Generated Response: "+ response+"\033[0m")
speak(response)