Skip to content

Commit

Permalink
default messge
Browse files Browse the repository at this point in the history
  • Loading branch information
Stan370 committed Mar 1, 2024
1 parent fe94fbd commit 849e723
Show file tree
Hide file tree
Showing 3 changed files with 220 additions and 3 deletions.
3 changes: 1 addition & 2 deletions app/api/openai/route.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import { getServerConfig } from "@/config/server";
import { ServerRuntime } from "next";
import OpenAI from "openai";
import { ChatCompletionCreateParamsBase } from "openai/resources/chat/completions.mjs";

export const runtime: ServerRuntime = "edge";

Expand All @@ -23,7 +22,7 @@ export async function POST(request: Request) {

const response = await openai.chat.completions.create({
model: "gpt-3.5-turbo",
messages: messages as ChatCompletionCreateParamsBase["messages"],
messages: [{ role: 'user', content: 'Say this is a test' }],
stream: true,
}
,{headers:{ Accept: '*/*' } });
Expand Down
18 changes: 17 additions & 1 deletion app/chat/page.tsx
Original file line number Diff line number Diff line change
@@ -1,10 +1,26 @@
"use client";

import { useState } from "react";
import Siderbar from "../components/Siderbar";
import { OpenAIChatMessage } from "@/lib/ModelSetting";

const Chat = () => {
const [message, setMessage] = useState("");
const [conversations, setConversations] = useState(0);

const agents: OpenAIChatMessage[] = [
{
role: "system",
name:"GPT Prompt builder",
content:
'Read all of the instructions below and once you understand them say "Shall we begin:" I want you to become my Prompt Creator. Your goal is to help me craft the best possible prompt for my needs. The prompt will be used by you, ChatGPT. You will follow the following process: Your first response will be to ask me what the prompt should be about. I will provide my answer, but we will need to improve it through continual iterations by going through the next steps. Based on my input, you will generate 3 sections. Revised Prompt (provide your rewritten prompt. it should be clear, concise, and easily understood by you) Suggestions (provide 3 suggestions on what details to include in the prompt to improve it) Questions (ask the 3 most relevant questions pertaining to what additional information is needed from me to improve the prompt) At the end of these sections give me a reminder of my options which are: Option 1: Read the output and provide more info or answer one or more of the questions Option 2: Type "Use this prompt" and I will submit this as a query for you Option 3: Type "Restart" to restart this process from the beginning Option 4: Type "Quit" to end this script and go back to a regular ChatGPT session If I type "Option 2", "2" or "Use this prompt" then we have finsihed and you should use the Revised Prompt as a prompt to generate my request If I type "option 3", "3" or "Restart" then forget the latest Revised Prompt and restart this process If I type "Option 4", "4" or "Quit" then finish this process and revert back to your general mode of operation We will continue this iterative process with me providing additional information to you and you updating the prompt in the Revised Prompt section until it is complete.',
},
{
role: "user",
content:
"I really enjoyed reading To Kill a Mockingbird, could you recommend me a book that is similar and tell me why?",
},
];

return (
<div className="relative min-h-screen flex flex-row bg-gray-50 dark:bg-[#17171a] dark:text-red-50 ">
<Siderbar></Siderbar>
Expand Down
202 changes: 202 additions & 0 deletions lib/ModelSetting.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,202 @@
/**
* LLM 模型
*/
export enum LanguageModel {
/**
* GPT 3.5 Turbo
*/
GPT3_5 = "gpt-3.5-turbo",
GPT3_5_1106 = "gpt-3.5-turbo-1106",
GPT3_5_16K = "gpt-3.5-turbo-16k",
/**
* GPT 4
*/
GPT4 = "gpt-4",
GPT4_32K = "gpt-4-32k",
GPT4_PREVIEW = "gpt-4-0125-preview",
GPT4_VISION_PREVIEW = "gpt-4-vision-preview",
}

export interface ChatModelCard {
description?: string;
displayName?: string;
/**
* whether model supports file upload
*/
files?: boolean;
/**
* whether model supports function call
*/
functionCall?: boolean;
hidden?: boolean;
id: string;
/**
* whether model is custom
*/
isCustom?: boolean;
/**
* whether model is legacy (deprecated but not removed yet)
*/
legacy?: boolean;
tokens?: number;
/**
* whether model supports vision
*/
vision?: boolean;
}

export interface ModelProviderCard {
chatModels: ChatModelCard[];
enabled?: boolean;
id: string;
}

// 语言模型的设置参数
export interface LLMParams {
/**
* 控制生成文本中的惩罚系数,用于减少重复性
* @default 0
*/
frequency_penalty?: number;
/**
* 生成文本的最大长度
*/
max_tokens?: number;
/**
* 控制生成文本中的惩罚系数,用于减少主题的变化
* @default 0
*/
presence_penalty?: number;
/**
* 生成文本的随机度量,用于控制文本的创造性和多样性
* @default 0.6
*/
temperature?: number;
/**
* 控制生成文本中最高概率的单个 token
* @default 1
*/
top_p?: number;
}

export type LLMRoleType = "user" | "system" | "assistant" | "function";

export interface LLMMessage {
content: string;
role: LLMRoleType;
}

interface UserMessageContentPartText {
text: string;
type: "text";
}
interface UserMessageContentPartImage {
image_url: {
detail?: "auto" | "low" | "high";
url: string;
};
type: "image_url";
}

export type UserMessageContentPart =
| UserMessageContentPartText
| UserMessageContentPartImage;

export interface OpenAIChatMessage {
/**
* @title 内容
* @description 消息内容
*/
content: string | UserMessageContentPart[];

function_call?: {
arguments: string;
name: string;
};
name?: string;
/**
* 角色
* @description 消息发送者的角色
*/
role: LLMRoleType;
}

/**
* @title Chat Stream Payload
*/
export interface ChatStreamPayload {
/**
* @title 控制生成文本中的惩罚系数,用于减少重复性
* @default 0
*/
frequency_penalty?: number;
/**
* @title 生成文本的最大长度
*/
max_tokens?: number;
/**
* @title 聊天信息列表
*/
messages: OpenAIChatMessage[];
/**
* @title 模型名称
*/
model: string;
/**
* @title 返回的文本数量
*/
n?: number;
/**
* 开启的插件列表
*/
plugins?: string[];
/**
* @title 控制生成文本中的惩罚系数,用于减少主题的变化
* @default 0
*/
presence_penalty?: number;
/**
* @default openai
*/
provider?: string;
/**
* @title 是否开启流式请求
* @default true
*/
stream?: boolean;
/**
* @title 生成文本的随机度量,用于控制文本的创造性和多样性
* @default 0.5
*/
temperature: number;
tool_choice?: string;
tools?: ChatCompletionTool[];
/**
* @title 控制生成文本中最高概率的单个令牌
* @default 1
*/
top_p?: number;
}

export interface ChatCompletionFunctions {
/**
* The description of what the function does.
* @type {string}
* @memberof ChatCompletionFunctions
*/
description?: string;
/**
* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
* @type {string}
* @memberof ChatCompletionFunctions
*/
name: string;
/**
* The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/gpt/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.
* @type {{ [key: string]: any }}
* @memberof ChatCompletionFunctions
*/
parameters?: {
[key: string]: any;
};
}

0 comments on commit 849e723

Please sign in to comment.