Skip to content

Commit

Permalink
watch yo code mf
Browse files Browse the repository at this point in the history
  • Loading branch information
Erisfiregamer1 committed Dec 3, 2023
1 parent 0e662e6 commit 032876e
Show file tree
Hide file tree
Showing 7 changed files with 366 additions and 28 deletions.
12 changes: 5 additions & 7 deletions bots/gpt_4.ts
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ export async function send(
messages: OpenAI.Chat.ChatCompletionMessage[],
prompt: string,
userid: string,
images: string[]
): Promise<response> {
// here we go

Expand All @@ -51,7 +50,7 @@ export async function send(
});
}

const content_arr = []
/*const content_arr = []
content_arr.push({
type: "text",
Expand All @@ -67,15 +66,13 @@ export async function send(
image_url: imgurl
})
})
}
}*/

messages.push({
role: "user",
content: content_arr, // how do I force update type definitions again?!
content: prompt,
});



const res = await fetch("https://api.openai.com/v1/chat/completions", {
method: "POST",
headers: {
Expand All @@ -84,7 +81,7 @@ export async function send(
},
body: JSON.stringify({
max_tokens: 4096,
model: "gpt-4-vision-preview",
model: "gpt-4-1106-preview",
messages: messages,
user: userid,
}),
Expand All @@ -95,6 +92,7 @@ export async function send(

if (isError(resp)) {
// Fuck.
console.log(resp.error.message)
throw resp.error.message; // well at least they know why the fuck it crashed??
}

Expand Down
Empty file removed bots/llama2.ts
Empty file.
81 changes: 81 additions & 0 deletions bots/openrouter.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
import OpenAI from "npm:openai";

import { config } from "npm:dotenv";
config();

export let isEnabled = true;

type ChatCompletionError = {
error: {
message: string;
type: string;
param: null; // Not sure about this one tbh,
code: string;
};
};

type response = {
oaires: OpenAI.Chat.Completions.ChatCompletion;
messages: OpenAI.Chat.ChatCompletionMessage[];
};

function isError(
value: ChatCompletionError | OpenAI.Chat.Completions.ChatCompletion,
): value is ChatCompletionError {
return "error" in value;
}

const db = await Deno.openKv("./db.sqlite")

export async function send(
messages: OpenAI.Chat.ChatCompletionMessage[],
prompt: string,
userid: string,
model: string,
api_key: string
): Promise<response> {
// here we go

if (!isEnabled) {
throw "not_enabled";
}

if (messages.length === 0) {
messages.push({
role: "system",
content: `You are ${model}, an LLM hosted by OpenRouter.`,
});
}

messages.push({
role: "user",
content: prompt,
});

const res = await fetch("https://openrouter.ai/api/v1/chat/completions", {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${api_key}`,
},
body: JSON.stringify({
model,
messages: messages,
}),
});

const resp: OpenAI.Chat.Completions.ChatCompletion | ChatCompletionError =
await res.json();

if (isError(resp)) {
// Fuck.
throw resp.error.message; // well at least they know why the fuck it crashed??
}

messages.push(resp.choices[0].message);

return {
oaires: resp,
messages,
};
}
132 changes: 132 additions & 0 deletions bots/types.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
// Definitions of subtypes are below

type Request = {
// Either "messages" or "prompt" is required
messages?: Message[];
prompt?: string;

// If "model" is unspecified, uses the user's default
model?: string; // See "Supported Models" section

// Additional optional parameters
frequency_penalty?: number;
logit_bias?: { [key: number]: number }; // Only available for OpenAI models
max_tokens?: number; // Required for some models, so defaults to 512
n?: number;
presence_penalty?: number;
response_format?: { type: 'text' | 'json_object' };
seed?: number; // Only available for OpenAI models
stop?: string | string[];
stream?: boolean; // Enable streaming
temperature?: number;
top_p?: number;

// Function-calling
tools?: Tool[];
tool_choice?: ToolChoice;

// OpenRouter-only parameters
transforms?: string[] // See "Prompt Transforms" section
models?: string[] // See "Fallback Models" section
route?: 'fallback' // See "Fallback Models" section
};

// Subtypes:

type TextContent = {
type: 'text';
text: string;
};

type ImageContentPart = {
type: 'image_url';
image_url: {
url: string; // URL or base64 encoded image data
detail?: string; // Optional, defaults to 'auto'
};
};

type ContentPart = TextContent | ImageContentPart;

type Message = {
role: 'user' | 'assistant' | 'system' | 'tool';
content: string
| ContentPart[]; // Only for the 'user' role
name?: string;
};

type FunctionDescription = {
description?: string;
name: string;
parameters: object; // JSON Schema object
};

type Tool = {
type: 'function';
function: FunctionDescription;
};

type ToolChoice = 'none' | 'auto' | {
type: 'function';
function: {
name: string;
};
};

// Definitions of subtypes are below

type Response = {
id: string;
// Depending on whether you set "stream" to "true" and
// whether you passed in "messages" or a "prompt", you
// will get a different output shape
choices: (NonStreamingChoice | StreamingChoice | NonChatChoice | Error)[];
created: number; // Unix timestamp
model: string;
object: 'chat.completion';
};

// Subtypes:

type NonChatChoice = {
finish_reason: string | null;
text: string;
}

type NonStreamingChoice = {
finish_reason: string | null; // Depends on the model. Ex: 'stop' | 'length' | 'content_filter' | 'tool_calls' | 'function_call'
message: {
content: string | null;
role: string;
tool_calls?: ToolCall[];
// Deprecated, replaced by tool_calls
function_call?: FunctionCall;
};
};

type StreamingChoice = {
finish_reason: string | null;
delta: {
content: string | null;
role?: string;
tool_calls?: ToolCall[];
// Deprecated, replaced by tool_calls
function_call?: FunctionCall;
};
};

type Error = {
code: number; // See "Error Handling" section
message: string;
}

type FunctionCall = {
name: string;
arguments: string; // JSON format arguments
};

type ToolCall = {
id: string;
type: 'function';
function: FunctionCall;
};
8 changes: 8 additions & 0 deletions changelog.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
# 11/2/2023

Scrapped the llama2.ts file in favor of openrouter
Added openrouter support
Worked on the basis for model swapping
Rethought a bit of code I had in place
Switched GPT4 back to turbo so I can use functions
Began integration of VDB (GPT4 only for now while I get my shit together)
Loading

0 comments on commit 032876e

Please sign in to comment.