diff --git a/README.md b/README.md index c23d38e..5722205 100644 --- a/README.md +++ b/README.md @@ -3,10 +3,6 @@ logo - - - logo -

@@ -15,7 +11,7 @@

- + Contributor Covenant @@ -23,7 +19,7 @@ License - +

@@ -52,18 +48,18 @@ npm i unillm ```ts import { UniLLM } from 'unillm'; -const uniLLM = new UniLLM(); +const unillm = new UniLLM(); // OpenAI -const response = await uniLLM.createChatCompletion("openai/gpt-3.5-turbo", { messages: ... }); -const response = await uniLLM.createChatCompletion("openai/gpt-4", { messages: ... }); +const response = await unillm.createChatCompletion("openai/gpt-3.5-turbo", { messages: ... }); +const response = await unillm.createChatCompletion("openai/gpt-4", { messages: ... }); // Anthropic -const response = await uniLLM.createChatCompletion("anthropic/claude-2", { messages: ... }); -const response = await uniLLM.createChatCompletion("anthropic/claude-1-instant", { messages: ... }); +const response = await unillm.createChatCompletion("anthropic/claude-2", { messages: ... }); +const response = await unillm.createChatCompletion("anthropic/claude-1-instant", { messages: ... }); // Azure OpenAI -const response = await uniLLM.createChatCompletion("azure/openai/", { messages: ... }); +const response = await unillm.createChatCompletion("azure/openai/", { messages: ... }); // More coming soon! ``` @@ -75,7 +71,7 @@ Want to see more examples? Check out the **[interactive docs](https://docs.unill To enable streaming, simply provide `stream: true` in the options object. Here is an example: ```ts -const response = await uniLLM.createChatCompletion("openai/gpt-3.5-turbo", { +const response = await unillm.createChatCompletion("openai/gpt-3.5-turbo", { messages: ..., stream: true }); @@ -87,7 +83,7 @@ Want to see more examples? Check out the **[interactive docs](https://docs.unill We welcome contributions from the community! Please feel free to submit pull requests or create issues for bugs or feature suggestions. -If you want to contribute but not sure how, join our [Discord](https://pezzo.cc/discord) and we'll be happy to help you out! +If you want to contribute but not sure how, join our [Discord](https://discord.gg/XcEVPePwn2) and we'll be happy to help you out! Please check out [CONTRIBUTING.md](CONTRIBUTING.md) before contributing. diff --git a/apps/demo/app/api/chat/route.ts b/apps/demo/app/api/chat/route.ts index 7a9e64d..8232ce1 100644 --- a/apps/demo/app/api/chat/route.ts +++ b/apps/demo/app/api/chat/route.ts @@ -4,9 +4,9 @@ import { UniLLM } from "unillm"; export async function POST(req: Request) { const { messages, llm } = await req.json(); - const uniLLM = new UniLLM(); + const unillm = new UniLLM(); - const response = await uniLLM.createChatCompletion(llm, { + const response = await unillm.createChatCompletion(llm, { temperature: 0, max_tokens: 500, messages: [...messages], diff --git a/apps/docs/components/DynamicCodeExample.tsx b/apps/docs/components/DynamicCodeExample.tsx index 19e0b02..dd3b55d 100644 --- a/apps/docs/components/DynamicCodeExample.tsx +++ b/apps/docs/components/DynamicCodeExample.tsx @@ -47,7 +47,7 @@ export const getSelectContent = (allowedProvider?) => { {models.map(({ name, value }) => ( - +
{providerName} diff --git a/apps/docs/pages/index.mdx b/apps/docs/pages/index.mdx index 3aa8fee..8eb8dd9 100644 --- a/apps/docs/pages/index.mdx +++ b/apps/docs/pages/index.mdx @@ -33,10 +33,10 @@ import { UniLLM } from 'unillm'; */ // Setup UniLLM -const uniLLM = new UniLLM(); +const unillm = new UniLLM(); // Use any LLM provider and model -const response = await uniLLM.createChatCompletion("#MODEL#", { +const response = await unillm.createChatCompletion("#MODEL#", { temperature: 0, messages: [ { diff --git a/apps/docs/pages/providers-and-models/anthropic.mdx b/apps/docs/pages/providers-and-models/anthropic.mdx index fec5571..ae4729f 100644 --- a/apps/docs/pages/providers-and-models/anthropic.mdx +++ b/apps/docs/pages/providers-and-models/anthropic.mdx @@ -18,10 +18,10 @@ import { UniLLM } from 'unillm'; */ // Setup UniLLM -const uniLLM = new UniLLM(); +const unillm = new UniLLM(); // Use any LLM provider and model -const response = await uniLLM.createChatCompletion("#MODEL#", { +const response = await unillm.createChatCompletion("#MODEL#", { temperature: 0, messages: [ { diff --git a/apps/docs/pages/providers-and-models/azure-openai.mdx b/apps/docs/pages/providers-and-models/azure-openai.mdx index 256e1fb..9e23799 100644 --- a/apps/docs/pages/providers-and-models/azure-openai.mdx +++ b/apps/docs/pages/providers-and-models/azure-openai.mdx @@ -21,10 +21,10 @@ import { UniLLM } from 'unillm'; */ // Setup UniLLM -const uniLLM = new UniLLM(); +const unillm = new UniLLM(); // Use any LLM provider and model -const response = await uniLLM.createChatCompletion("#MODEL#", { +const response = await unillm.createChatCompletion("#MODEL#", { temperature: 0, messages: [ { diff --git a/apps/docs/pages/providers-and-models/openai.mdx b/apps/docs/pages/providers-and-models/openai.mdx index b38d407..816323c 100644 --- a/apps/docs/pages/providers-and-models/openai.mdx +++ b/apps/docs/pages/providers-and-models/openai.mdx @@ -18,10 +18,10 @@ import { UniLLM } from 'unillm'; */ // Setup UniLLM -const uniLLM = new UniLLM(); +const unillm = new UniLLM(); // Use any LLM provider and model -const response = await uniLLM.createChatCompletion("#MODEL#", { +const response = await unillm.createChatCompletion("#MODEL#", { temperature: 0, messages: [ { diff --git a/packages/unillm-node/tests/anthropic.test.ts b/packages/unillm-node/tests/anthropic.test.ts index 9209768..7148812 100644 --- a/packages/unillm-node/tests/anthropic.test.ts +++ b/packages/unillm-node/tests/anthropic.test.ts @@ -4,14 +4,14 @@ import * as utils from "./utils/validation.util"; import type { ChatCompletionChunk } from "openai/resources/chat"; import { testParams } from "./utils/test-data.util"; -const uniLLM = new UniLLM(); +const unillm = new UniLLM(); describe("#createChatCompletion - Anthropic", () => { const model = "anthropic/claude-2"; describe("Non streaming", () => { it("Should return a valid chat completion response", async () => { - const response = await uniLLM.createChatCompletion(model, { + const response = await unillm.createChatCompletion(model, { ...testParams, stream: false, }); @@ -23,7 +23,7 @@ describe("#createChatCompletion - Anthropic", () => { it("Should throw an error and return a unified error response", async () => { let errorOccurred = false; try { - await uniLLM.createChatCompletion(model, { + await unillm.createChatCompletion(model, { ...testParams, stream: false, messages: [], @@ -40,7 +40,7 @@ describe("#createChatCompletion - Anthropic", () => { describe("Streaming", () => { it("Should return a valid iterable chat completion stream", async () => { - const response = await uniLLM.createChatCompletion(model, { + const response = await unillm.createChatCompletion(model, { ...testParams, stream: true, }); diff --git a/packages/unillm-node/tests/azure-openai.test.ts b/packages/unillm-node/tests/azure-openai.test.ts index 0b59c8b..fa7a3e6 100644 --- a/packages/unillm-node/tests/azure-openai.test.ts +++ b/packages/unillm-node/tests/azure-openai.test.ts @@ -5,12 +5,12 @@ import type { ChatCompletionChunk } from "openai/resources/chat"; import { testFunctions, testParams } from "./utils/test-data.util"; const deployment = process.env.AZURE_OPENAI_DEPLOYMENT; -const uniLLM = new UniLLM(); +const unillm = new UniLLM(); describe("#createChatCompletion - Azure OpenAI", () => { describe("Non streaming", () => { it("Should return a valid chat completion response", async () => { - const response = await uniLLM.createChatCompletion( + const response = await unillm.createChatCompletion( `azure/openai/${deployment}`, { ...testParams, @@ -23,7 +23,7 @@ describe("#createChatCompletion - Azure OpenAI", () => { }); it("Should return a valid function calling response", async () => { - const response = await uniLLM.createChatCompletion( + const response = await unillm.createChatCompletion( `azure/openai/${deployment}`, { ...testParams, @@ -39,7 +39,7 @@ describe("#createChatCompletion - Azure OpenAI", () => { it("Should throw an error and return a unified error response", async () => { let errorOccurred = false; try { - await uniLLM.createChatCompletion(`azure/openai/${deployment}`, { + await unillm.createChatCompletion(`azure/openai/${deployment}`, { ...testParams, stream: false, messages: [], @@ -56,7 +56,7 @@ describe("#createChatCompletion - Azure OpenAI", () => { describe("Streaming", () => { it("Should return a valid iterable chat completion stream", async () => { - const stream = await uniLLM.createChatCompletion( + const stream = await unillm.createChatCompletion( `azure/openai/${deployment}`, { ...testParams, diff --git a/packages/unillm-node/tests/openai.test.ts b/packages/unillm-node/tests/openai.test.ts index fde72b9..f53a8c4 100644 --- a/packages/unillm-node/tests/openai.test.ts +++ b/packages/unillm-node/tests/openai.test.ts @@ -4,14 +4,14 @@ import * as utils from "./utils/validation.util"; import type { ChatCompletionChunk } from "openai/resources/chat"; import { testParams, testFunctions } from "./utils/test-data.util"; -const uniLLM = new UniLLM(); +const unillm = new UniLLM(); describe("#createChatCompletion - OpenAI", () => { const model = "openai/gpt-3.5-turbo"; describe("Non streaming", () => { it("Should return a valid chat completion response", async () => { - const response = await uniLLM.createChatCompletion(model, { + const response = await unillm.createChatCompletion(model, { ...testParams, stream: false, }); @@ -21,7 +21,7 @@ describe("#createChatCompletion - OpenAI", () => { }); it("Should return a valid function calling response", async () => { - const response = await uniLLM.createChatCompletion(model, { + const response = await unillm.createChatCompletion(model, { ...testParams, stream: false, functions: testFunctions, @@ -34,7 +34,7 @@ describe("#createChatCompletion - OpenAI", () => { it("Should throw an error and return a unified error response", async () => { let errorOccurred = false; try { - await uniLLM.createChatCompletion(model, { + await unillm.createChatCompletion(model, { ...testParams, stream: false, messages: [], @@ -51,7 +51,7 @@ describe("#createChatCompletion - OpenAI", () => { describe("Streaming", () => { it("Should return a valid iterable chat completion stream", async () => { - const stream = await uniLLM.createChatCompletion(model, { + const stream = await unillm.createChatCompletion(model, { ...testParams, stream: true, });