From 032876ef879d0518283c58eac9616e9bfa1ef075 Mon Sep 17 00:00:00 2001 From: ErisWS Date: Sat, 2 Dec 2023 21:50:56 -0500 Subject: [PATCH] watch yo code mf --- bots/gpt_4.ts | 12 ++--- bots/llama2.ts | 0 bots/openrouter.ts | 81 ++++++++++++++++++++++++++++ bots/types.ts | 132 +++++++++++++++++++++++++++++++++++++++++++++ changelog.md | 8 +++ main.ts | 92 +++++++++++++++++++++++++------ slashcode.ts | 69 ++++++++++++++++++++++-- 7 files changed, 366 insertions(+), 28 deletions(-) delete mode 100644 bots/llama2.ts create mode 100644 bots/openrouter.ts create mode 100644 bots/types.ts create mode 100644 changelog.md diff --git a/bots/gpt_4.ts b/bots/gpt_4.ts index 4ee56df..c247d03 100644 --- a/bots/gpt_4.ts +++ b/bots/gpt_4.ts @@ -36,7 +36,6 @@ export async function send( messages: OpenAI.Chat.ChatCompletionMessage[], prompt: string, userid: string, - images: string[] ): Promise { // here we go @@ -51,7 +50,7 @@ export async function send( }); } - const content_arr = [] + /*const content_arr = [] content_arr.push({ type: "text", @@ -67,15 +66,13 @@ export async function send( image_url: imgurl }) }) - } + }*/ messages.push({ role: "user", - content: content_arr, // how do I force update type definitions again?! + content: prompt, }); - - const res = await fetch("https://api.openai.com/v1/chat/completions", { method: "POST", headers: { @@ -84,7 +81,7 @@ export async function send( }, body: JSON.stringify({ max_tokens: 4096, - model: "gpt-4-vision-preview", + model: "gpt-4-1106-preview", messages: messages, user: userid, }), @@ -95,6 +92,7 @@ export async function send( if (isError(resp)) { // Fuck. + console.log(resp.error.message) throw resp.error.message; // well at least they know why the fuck it crashed?? } diff --git a/bots/llama2.ts b/bots/llama2.ts deleted file mode 100644 index e69de29..0000000 diff --git a/bots/openrouter.ts b/bots/openrouter.ts new file mode 100644 index 0000000..051e646 --- /dev/null +++ b/bots/openrouter.ts @@ -0,0 +1,81 @@ +import OpenAI from "npm:openai"; + +import { config } from "npm:dotenv"; +config(); + +export let isEnabled = true; + +type ChatCompletionError = { + error: { + message: string; + type: string; + param: null; // Not sure about this one tbh, + code: string; + }; +}; + +type response = { + oaires: OpenAI.Chat.Completions.ChatCompletion; + messages: OpenAI.Chat.ChatCompletionMessage[]; +}; + +function isError( + value: ChatCompletionError | OpenAI.Chat.Completions.ChatCompletion, +): value is ChatCompletionError { + return "error" in value; +} + +const db = await Deno.openKv("./db.sqlite") + +export async function send( + messages: OpenAI.Chat.ChatCompletionMessage[], + prompt: string, + userid: string, + model: string, + api_key: string +): Promise { + // here we go + + if (!isEnabled) { + throw "not_enabled"; + } + + if (messages.length === 0) { + messages.push({ + role: "system", + content: `You are ${model}, an LLM hosted by OpenRouter.`, + }); + } + + messages.push({ + role: "user", + content: prompt, + }); + + const res = await fetch("https://openrouter.ai/api/v1/chat/completions", { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${api_key}`, + }, + body: JSON.stringify({ + model, + messages: messages, + }), + }); + + const resp: OpenAI.Chat.Completions.ChatCompletion | ChatCompletionError = + await res.json(); + + if (isError(resp)) { + // Fuck. + throw resp.error.message; // well at least they know why the fuck it crashed?? + } + + messages.push(resp.choices[0].message); + + return { + oaires: resp, + messages, + }; +} diff --git a/bots/types.ts b/bots/types.ts new file mode 100644 index 0000000..f14e493 --- /dev/null +++ b/bots/types.ts @@ -0,0 +1,132 @@ +// Definitions of subtypes are below + +type Request = { + // Either "messages" or "prompt" is required + messages?: Message[]; + prompt?: string; + + // If "model" is unspecified, uses the user's default + model?: string; // See "Supported Models" section + + // Additional optional parameters + frequency_penalty?: number; + logit_bias?: { [key: number]: number }; // Only available for OpenAI models + max_tokens?: number; // Required for some models, so defaults to 512 + n?: number; + presence_penalty?: number; + response_format?: { type: 'text' | 'json_object' }; + seed?: number; // Only available for OpenAI models + stop?: string | string[]; + stream?: boolean; // Enable streaming + temperature?: number; + top_p?: number; + + // Function-calling + tools?: Tool[]; + tool_choice?: ToolChoice; + + // OpenRouter-only parameters + transforms?: string[] // See "Prompt Transforms" section + models?: string[] // See "Fallback Models" section + route?: 'fallback' // See "Fallback Models" section +}; + +// Subtypes: + +type TextContent = { + type: 'text'; + text: string; +}; + +type ImageContentPart = { + type: 'image_url'; + image_url: { + url: string; // URL or base64 encoded image data + detail?: string; // Optional, defaults to 'auto' + }; +}; + +type ContentPart = TextContent | ImageContentPart; + +type Message = { + role: 'user' | 'assistant' | 'system' | 'tool'; + content: string + | ContentPart[]; // Only for the 'user' role + name?: string; +}; + +type FunctionDescription = { + description?: string; + name: string; + parameters: object; // JSON Schema object +}; + +type Tool = { + type: 'function'; + function: FunctionDescription; +}; + +type ToolChoice = 'none' | 'auto' | { + type: 'function'; + function: { + name: string; + }; +}; + +// Definitions of subtypes are below + +type Response = { + id: string; + // Depending on whether you set "stream" to "true" and + // whether you passed in "messages" or a "prompt", you + // will get a different output shape + choices: (NonStreamingChoice | StreamingChoice | NonChatChoice | Error)[]; + created: number; // Unix timestamp + model: string; + object: 'chat.completion'; +}; + +// Subtypes: + +type NonChatChoice = { + finish_reason: string | null; + text: string; +} + +type NonStreamingChoice = { + finish_reason: string | null; // Depends on the model. Ex: 'stop' | 'length' | 'content_filter' | 'tool_calls' | 'function_call' + message: { + content: string | null; + role: string; + tool_calls?: ToolCall[]; + // Deprecated, replaced by tool_calls + function_call?: FunctionCall; + }; +}; + +type StreamingChoice = { + finish_reason: string | null; + delta: { + content: string | null; + role?: string; + tool_calls?: ToolCall[]; + // Deprecated, replaced by tool_calls + function_call?: FunctionCall; + }; +}; + +type Error = { + code: number; // See "Error Handling" section + message: string; +} + +type FunctionCall = { + name: string; + arguments: string; // JSON format arguments +}; + +type ToolCall = { + id: string; + type: 'function'; + function: FunctionCall; +}; \ No newline at end of file diff --git a/changelog.md b/changelog.md new file mode 100644 index 0000000..32cc7ce --- /dev/null +++ b/changelog.md @@ -0,0 +1,8 @@ +# 11/2/2023 + +Scrapped the llama2.ts file in favor of openrouter +Added openrouter support +Worked on the basis for model swapping +Rethought a bit of code I had in place +Switched GPT4 back to turbo so I can use functions +Began integration of VDB (GPT4 only for now while I get my shit together) \ No newline at end of file diff --git a/main.ts b/main.ts index c90ceaa..65a1af6 100644 --- a/main.ts +++ b/main.ts @@ -2,6 +2,8 @@ import * as chatgpt from "./bots/chatgpt.ts"; // import * as bing_chat from "./bots/bing_chat.ts"; import * as gpt4 from "./bots/gpt_4.ts"; // import * as palm from "./bots/palm.ts"; +import * as openrouter from "./bots/openrouter.ts"; + import OpenAI from "npm:openai"; @@ -61,7 +63,16 @@ New database example: id: "completion-37", messages: [{}] // Alan, insert message object every time you finish here. Wait, Alan, are you still on the team? } - ] + ], + openrouter: { + api_key: "whatever_the_fuck_it_is" + llama2: [ + { + id: "completion-37", + messages: [{}] // Alan, insert message object every time you finish here. Wait, Alan, are you still on the team? + } + ] + } } } }, @@ -87,7 +98,7 @@ client.on("messageCreate", async (message) => { "Looks like this is your first time using this bot! Run /info to learn how to use the full potential of this bot.", ); error = true; - } else if (!llm.match(/^(chatgpt|bing|bard|gpt4|llama2)$/g)) { + } else if (!llm.match(/^(chatgpt|bing|bard|gpt4|llama2)$/g) && !llm.startsWith("openrouter^")) { // current LLM is corrupt. notify user and reset llm = "gpt4"; await db.set(["users", message.author.id, "current_bot"], llm); @@ -118,12 +129,26 @@ client.on("messageCreate", async (message) => { } } - let messages = (await db.get([ - "users", - message.author.id, - "conversations", - llm, - ])).value; + let messages; + + if (llm.startsWith("openrouter^")) { + const llm_real = llm.split("^") + + messages = (await db.get([ + "users", + message.author.id, + "conversations", + "openrouter", + llm_real[llm_real.length - 1] + ])).value; + } else { + messages = (await db.get([ + "users", + message.author.id, + "conversations", + llm, + ])).value; + } if (messages === null) { // No conversations for this LLM. @@ -131,10 +156,6 @@ client.on("messageCreate", async (message) => { id: "New Conversation", messages: [], }]; - await db.set( - ["users", message.author.id, "conversations", llm], - messages, - ); if (error === false) { await message.reply( @@ -148,8 +169,48 @@ client.on("messageCreate", async (message) => { const msg = await message.reply("Sending message..."); let resp: gptresponse; + if (llm.startsWith("openrouter^")) { + const llm_real = llm.split("^") + + const api_key = (await db.get([ + "users", + message.author.id, + "conversations", + "openrouter", + "api_key" + ])).value!; + + resp = await openrouter.send( + curmsgs, + message.content, + message.author.id, + llm_real[llm_real.length - 1], + api_key + ) + + messages[curconv].messages = resp.messages; + + await db.set( + ["users", message.author.id, "conversations", "openrouter", llm_real[llm_real.length - 1]], + messages, + ); + + const messagechunks = splitStringIntoChunks( + resp.oaires.choices[0].message.content, + 2000, + ); - if (llm === "chatgpt") { + let i = 0; + + messagechunks.forEach(async (chunk) => { + if (i <= 0) { + await msg.edit(chunk); + i++; + } else { + await message.reply(chunk); + } + }); + } else if (llm === "chatgpt") { if (!chatgpt.isEnabled) { msg.edit( "This LLM isn't enabled! Please switch to a different LLM to use this bot.", @@ -204,7 +265,7 @@ client.on("messageCreate", async (message) => { return; } - const images: string[] = [] + /*const images: string[] = [] message.attachments.forEach((image) => { images.push(image.url) @@ -212,14 +273,13 @@ client.on("messageCreate", async (message) => { message.stickers.forEach((image) => { images.push(image.url) - }) + })*/ try { resp = await gpt4.send( curmsgs, message.content, message.author.id, - images ); messages[curconv].messages = resp.messages; diff --git a/slashcode.ts b/slashcode.ts index 50b086f..4dae76a 100644 --- a/slashcode.ts +++ b/slashcode.ts @@ -32,7 +32,11 @@ import { EmbedBuilder, PermissionFlagsBits, SlashCommandBuilder, + ModalBuilder, StringSelectMenuBuilder, + TextInputStyle, + ModalActionRowComponentBuilder, + TextInputBuilder, } from "npm:discord.js"; const commands: SlashCommandBuilder[] = []; @@ -156,7 +160,11 @@ command16.addStringOption((option) => .setRequired(true) ); -const botamt = 16; +const command17 = new SlashCommandBuilder(); +command17.setName("set-ai-openrouter"); +command17.setDescription("Select an OpenRouter AI"); + +const botamt = 17; for (let i = 1; i - 1 < botamt; i++) { const commandname = "command" + i; commands.push(eval(commandname)); @@ -187,10 +195,33 @@ client.on("ready", async () => { client.on("interactionCreate", async (interaction) => { if (interaction.isStringSelectMenu()) { if (interaction.customId === "set-ai") { - await interaction.reply({ - content: "Command not implemented", - ephemeral: true, - }); + const llm = interaction.values[0] + + console.log(llm) + + await db.set(["users", interaction.user.id, "current_bot"], llm); + + await interaction.reply({ content: `Set your LLM to \`${llm}\`!`, ephemeral: true }); + } + } else if (interaction.isModalSubmit()) { + if (interaction.customId === "set-ai-openrouter") { + + const or_llm = interaction.fields.getTextInputValue('modelName') + const api_key = interaction.fields.getTextInputValue('apiKey') + + let llm = `openrouter^${or_llm}`; + + await db.set([ + "users", + interaction.user.id, + "conversations", + "openrouter", + "api_key" + ], api_key); + await db.set(["users", interaction.user.id, "current_bot"], llm); + + + await interaction.reply({ content: `Set your LLM to \`${llm}\`!`, ephemeral: true }); } } if (!interaction.isChatInputCommand()) return; @@ -246,6 +277,8 @@ client.on("interactionCreate", async (interaction) => { messages: [], }; + + await db.set( ["users", interaction.user.id, "conversations", llm], messages, @@ -401,5 +434,31 @@ client.on("interactionCreate", async (interaction) => { `Something went wrong making the images! All I know is the error was "${err}".`, ); } + } else if (interaction.commandName === "set-ai-openrouter") { + const modal = new ModalBuilder() + .setCustomId('set-ai-openrouter') + .setTitle('Set your OpenRouter model'); + + + const favoriteColorInput = new TextInputBuilder() + .setCustomId('modelName') + .setLabel("OpenRouter model") + .setStyle(TextInputStyle.Short); + + const hobbiesInput = new TextInputBuilder() + .setCustomId('apiKey') + .setLabel("OpenRouter API key") + .setStyle(TextInputStyle.Short); + + // An action row only holds one text input, + // so you need one action row per text input. + const firstActionRow = new ActionRowBuilder().addComponents(favoriteColorInput); + const secondActionRow = new ActionRowBuilder().addComponents(hobbiesInput); + + // Add inputs to the modal + modal.addComponents(firstActionRow, secondActionRow); + + // Show the modal to the user + await interaction.showModal(modal); } });