-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #1 from pmohun/feature/malcolm
Refactor to simpler base with ability to call predefined or custom prompts
- Loading branch information
Showing
6 changed files
with
145 additions
and
457 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
__pycache__ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,13 +1 @@ | ||
api_key = None | ||
engine = 'davinci' | ||
model = None | ||
headers = { | ||
"Content-Type": "application/json", | ||
"Authorization": f"Bearer {api_key}" | ||
} | ||
|
||
# Model Resources | ||
from calvin.model import * | ||
|
||
|
||
|
||
from .calvin import * |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,151 +1,146 @@ | ||
import openai | ||
import requests | ||
import os | ||
import json | ||
import argparse | ||
import sys | ||
import tqdm | ||
import requests | ||
|
||
# # # # | ||
# Calvin - abstraction module over openai's API # | ||
# # # # | ||
|
||
import json | ||
import requests | ||
|
||
# calvin - abstraction layer over openai's API | ||
class Calvin: | ||
|
||
# Constructor | ||
def __init__(self, api_key, engine="davinci"): | ||
self.engine = engine | ||
self.headers = { | ||
"Content-Type": "application/json", | ||
"Authorization": f"Bearer {api_key}" | ||
} | ||
self.prompts = json.load(open("./calvin/prompts.json")) | ||
|
||
# Change engine setting | ||
def set_engine(self, engine): | ||
self.engine = engine | ||
|
||
# List Engines GET | ||
# Lists the currently available engines, and provides basic information about each option such as the owner and availability. | ||
def list_engines(self): | ||
url = "https://api.openai.com/v1/engines" | ||
response = requests.get(url, headers=self.headers) | ||
print(response.text) | ||
|
||
return response.text | ||
|
||
# Retrieve Engine GET | ||
# Retrieves an engine instance, providing basic information about the engine such as the owner and availability. | ||
def retrieve_engine(self): | ||
url = f"https://api.openai.com/v1/engines/{self.engine}" | ||
response = requests.get(url, headers=self.headers) | ||
print(response.text) | ||
|
||
return response.text | ||
|
||
# Create Completion POST | ||
# Create a completion. This is the main endpoint of the API. | ||
# Returns new text as well as, if requested, the probabilities over each alternative token at each position. | ||
def create_completion(self, payload): | ||
url = f"https://api.openai.com/v1/engines/{self.engine}/completions" | ||
response = requests.post(url, headers=self.headers, json=payload) | ||
print(response.text) | ||
|
||
return response | ||
|
||
# Search POST | ||
# Perform a semantic search over a list of documents. | ||
def search(self, payload): | ||
url = f"https://api.openai.com/v1/engines/{self.engine}/search" | ||
response = requests.post(url, headers=self.headers, json=payload) | ||
print(response.text) | ||
|
||
return response.text | ||
|
||
# Test function to confirm it works | ||
def test_create_completion(self): | ||
payload = { | ||
"prompt": "Once upon a time", | ||
"max_tokens": 5, | ||
"temperature": 1, | ||
"top_p": 1, | ||
"n": 1 | ||
} | ||
|
||
self.create_completion(payload) | ||
|
||
# Import premade prompts from json | ||
def load_json(self): | ||
f = open("./calvin/prompts.json") | ||
data = json.load(f) | ||
return data | ||
|
||
# List available prompts | ||
def list_prompts(self): | ||
data = self.prompts | ||
print(list(data.keys())) | ||
return data | ||
|
||
|
||
|
||
def create(prompt, data): | ||
#text = input() | ||
response = openai.Completion.create(engine="davinci", prompt=data[prompt][0]['prompt'], max_tokens=150) | ||
output = response["choices"][0]['text'] | ||
|
||
stop = "\n-" | ||
|
||
def main(prompt): | ||
|
||
# TODO: add check for API key from class 'Calvin' | ||
openai.api_key = Calvin.headers['Authorization'] | ||
data = json.load(open("prompts.json")) | ||
parser = argparse.ArgumentParser(description=None) | ||
parser.add_argument( | ||
"-v", | ||
"--verbose", | ||
action="count", | ||
dest="verbosity", | ||
default=0, | ||
help="Set verbosity.", | ||
) | ||
parser.add_argument("-n", "--number", type=int, default=10) | ||
parser.add_argument("-e", "--engine", default="davinci") | ||
parser.add_argument("-b", "--best_of", type=int) | ||
parser.add_argument("-m", "--max_batch", type=int, default=25) | ||
args = parser.parse_args() | ||
|
||
n = args.number | ||
batches = [] | ||
while n > 0: | ||
take = min(n, args.max_batch) | ||
batches.append(take) | ||
n -= take | ||
|
||
choices = [] | ||
for b in tqdm.tqdm(batches): | ||
completion = openai.Completion.create( | ||
prompt=prompt, | ||
n=b, | ||
engine=args.engine, | ||
max_tokens=300, | ||
stop=stop, | ||
temperature=data[prompt][0]['temperature'], | ||
logprobs=0, | ||
) | ||
choices += completion.choices | ||
|
||
def score(a): | ||
return sum(a.logprobs.token_logprobs) / len(a.logprobs.token_logprobs) | ||
|
||
choices = sorted(choices, key=lambda a: -score(a)) | ||
for i, choice in enumerate(choices): | ||
print(f"======") | ||
print(choice.text.strip()) | ||
|
||
return 0 | ||
|
||
if __name__ == "__main__": | ||
prompt = input('Enter prompt: ') | ||
sys.exit(main(prompt)) | ||
# Defile module-scoped variables | ||
engine = "davinci" | ||
headers = {} | ||
prompts = {} | ||
|
||
|
||
# Initialize module | ||
def initialize(_api_key, _engine=engine): | ||
global engine, headers, prompts | ||
|
||
if _engine: | ||
engine = _engine | ||
|
||
headers = { | ||
"Content-Type": "application/json", | ||
"Authorization": f"Bearer {_api_key}" | ||
} | ||
|
||
prompts = json.load(open("./calvin/prompts.json")) | ||
|
||
# | ||
def get_globals(): | ||
global engine, headers, prompts | ||
|
||
print({ | ||
engine: engine, | ||
headers: headers, | ||
prompts: prompts # Figure out why this wont serialize -_- | ||
}) | ||
|
||
# Change engine setting | ||
def set_engine(_engine): | ||
global engine | ||
engine = _engine | ||
|
||
# List Engines GET | ||
# Lists the currently available engines, and provides basic information about each option such as the owner and availability. | ||
def list_engines(): | ||
global headers | ||
|
||
url = "https://api.openai.com/v1/engines" | ||
response = requests.get(url, headers=headers) | ||
|
||
if response.ok: | ||
print(f"JSON: {response.json()}") | ||
return response.json() | ||
else: | ||
return response.raise_for_status() | ||
|
||
# Retrieve Engine GET | ||
# Retrieves an engine instance, providing basic information about the engine such as the owner and availability. | ||
def retrieve_engine(): | ||
global headers | ||
|
||
url = f"https://api.openai.com/v1/engines/{engine}" | ||
response = requests.get(url, headers=headers) | ||
|
||
if response.ok: | ||
print(f"JSON: {response.json()}") | ||
return response.json() | ||
else: | ||
return response.raise_for_status() | ||
|
||
# Create Completion POST | ||
# Returns new text as well as, if requested, the probabilities over each alternative token at each position. | ||
def create_completion(payload): | ||
global engine, headers | ||
|
||
url = f"https://api.openai.com/v1/engines/{engine}/completions" | ||
response = requests.post(url, headers=headers, json=payload) | ||
|
||
if response.ok: | ||
print(f"JSON: {response.json()}") | ||
return response.json() | ||
else: | ||
return response.raise_for_status() | ||
|
||
def complete_prompt(prompt): | ||
payload = { | ||
"prompt": prompt, | ||
"max_tokens": 150, | ||
"temperature": 0.9, | ||
"top_p": 1, | ||
"n": 1 | ||
} | ||
|
||
create_completion(payload) | ||
|
||
def complete_predefined_prompt(prompt_key, index=0): | ||
global prompts | ||
|
||
if prompts[prompt_key] in prompts: | ||
create_completion(prompts[prompt_key][index]) | ||
|
||
return "Unable to access predefined prompt." | ||
|
||
|
||
# Search POST | ||
# Perform a semantic search over a list of documents. | ||
def search(payload): | ||
global engine, headers | ||
|
||
url = f"https://api.openai.com/v1/engines/{engine}/search" | ||
response = requests.post(url, headers=headers, json=payload) | ||
|
||
if response.ok: | ||
print(f"JSON: {response.json()}") | ||
return response.json() | ||
else: | ||
return response.raise_for_status() | ||
|
||
# Test function to confirm it works | ||
def test_create_completion(): | ||
payload = { | ||
"prompt": "Once upon a time", | ||
"max_tokens": 5, | ||
"temperature": 1, | ||
"top_p": 1, | ||
"n": 1 | ||
} | ||
|
||
create_completion(payload) | ||
|
||
|
||
|
||
""" | ||
Example completion request payload | ||
{ | ||
"prompt": "Once upon a time", | ||
"max_tokens": 5, | ||
"temperature": 1, | ||
"top_p": 1, | ||
"n": 1, | ||
"stream": false, | ||
"logprobs": null, | ||
"stop": "\n" | ||
} | ||
""" |
Oops, something went wrong.