diff --git a/apps/docs/pages/index.mdx b/apps/docs/pages/index.mdx
index 3aa8fee..8eb8dd9 100644
--- a/apps/docs/pages/index.mdx
+++ b/apps/docs/pages/index.mdx
@@ -33,10 +33,10 @@ import { UniLLM } from 'unillm';
*/
// Setup UniLLM
-const uniLLM = new UniLLM();
+const unillm = new UniLLM();
// Use any LLM provider and model
-const response = await uniLLM.createChatCompletion("#MODEL#", {
+const response = await unillm.createChatCompletion("#MODEL#", {
temperature: 0,
messages: [
{
diff --git a/apps/docs/pages/providers-and-models/anthropic.mdx b/apps/docs/pages/providers-and-models/anthropic.mdx
index fec5571..ae4729f 100644
--- a/apps/docs/pages/providers-and-models/anthropic.mdx
+++ b/apps/docs/pages/providers-and-models/anthropic.mdx
@@ -18,10 +18,10 @@ import { UniLLM } from 'unillm';
*/
// Setup UniLLM
-const uniLLM = new UniLLM();
+const unillm = new UniLLM();
// Use any LLM provider and model
-const response = await uniLLM.createChatCompletion("#MODEL#", {
+const response = await unillm.createChatCompletion("#MODEL#", {
temperature: 0,
messages: [
{
diff --git a/apps/docs/pages/providers-and-models/azure-openai.mdx b/apps/docs/pages/providers-and-models/azure-openai.mdx
index 256e1fb..9e23799 100644
--- a/apps/docs/pages/providers-and-models/azure-openai.mdx
+++ b/apps/docs/pages/providers-and-models/azure-openai.mdx
@@ -21,10 +21,10 @@ import { UniLLM } from 'unillm';
*/
// Setup UniLLM
-const uniLLM = new UniLLM();
+const unillm = new UniLLM();
// Use any LLM provider and model
-const response = await uniLLM.createChatCompletion("#MODEL#", {
+const response = await unillm.createChatCompletion("#MODEL#", {
temperature: 0,
messages: [
{
diff --git a/apps/docs/pages/providers-and-models/openai.mdx b/apps/docs/pages/providers-and-models/openai.mdx
index b38d407..816323c 100644
--- a/apps/docs/pages/providers-and-models/openai.mdx
+++ b/apps/docs/pages/providers-and-models/openai.mdx
@@ -18,10 +18,10 @@ import { UniLLM } from 'unillm';
*/
// Setup UniLLM
-const uniLLM = new UniLLM();
+const unillm = new UniLLM();
// Use any LLM provider and model
-const response = await uniLLM.createChatCompletion("#MODEL#", {
+const response = await unillm.createChatCompletion("#MODEL#", {
temperature: 0,
messages: [
{
diff --git a/packages/unillm-node/tests/anthropic.test.ts b/packages/unillm-node/tests/anthropic.test.ts
index 9209768..7148812 100644
--- a/packages/unillm-node/tests/anthropic.test.ts
+++ b/packages/unillm-node/tests/anthropic.test.ts
@@ -4,14 +4,14 @@ import * as utils from "./utils/validation.util";
import type { ChatCompletionChunk } from "openai/resources/chat";
import { testParams } from "./utils/test-data.util";
-const uniLLM = new UniLLM();
+const unillm = new UniLLM();
describe("#createChatCompletion - Anthropic", () => {
const model = "anthropic/claude-2";
describe("Non streaming", () => {
it("Should return a valid chat completion response", async () => {
- const response = await uniLLM.createChatCompletion(model, {
+ const response = await unillm.createChatCompletion(model, {
...testParams,
stream: false,
});
@@ -23,7 +23,7 @@ describe("#createChatCompletion - Anthropic", () => {
it("Should throw an error and return a unified error response", async () => {
let errorOccurred = false;
try {
- await uniLLM.createChatCompletion(model, {
+ await unillm.createChatCompletion(model, {
...testParams,
stream: false,
messages: [],
@@ -40,7 +40,7 @@ describe("#createChatCompletion - Anthropic", () => {
describe("Streaming", () => {
it("Should return a valid iterable chat completion stream", async () => {
- const response = await uniLLM.createChatCompletion(model, {
+ const response = await unillm.createChatCompletion(model, {
...testParams,
stream: true,
});
diff --git a/packages/unillm-node/tests/azure-openai.test.ts b/packages/unillm-node/tests/azure-openai.test.ts
index 0b59c8b..fa7a3e6 100644
--- a/packages/unillm-node/tests/azure-openai.test.ts
+++ b/packages/unillm-node/tests/azure-openai.test.ts
@@ -5,12 +5,12 @@ import type { ChatCompletionChunk } from "openai/resources/chat";
import { testFunctions, testParams } from "./utils/test-data.util";
const deployment = process.env.AZURE_OPENAI_DEPLOYMENT;
-const uniLLM = new UniLLM();
+const unillm = new UniLLM();
describe("#createChatCompletion - Azure OpenAI", () => {
describe("Non streaming", () => {
it("Should return a valid chat completion response", async () => {
- const response = await uniLLM.createChatCompletion(
+ const response = await unillm.createChatCompletion(
`azure/openai/${deployment}`,
{
...testParams,
@@ -23,7 +23,7 @@ describe("#createChatCompletion - Azure OpenAI", () => {
});
it("Should return a valid function calling response", async () => {
- const response = await uniLLM.createChatCompletion(
+ const response = await unillm.createChatCompletion(
`azure/openai/${deployment}`,
{
...testParams,
@@ -39,7 +39,7 @@ describe("#createChatCompletion - Azure OpenAI", () => {
it("Should throw an error and return a unified error response", async () => {
let errorOccurred = false;
try {
- await uniLLM.createChatCompletion(`azure/openai/${deployment}`, {
+ await unillm.createChatCompletion(`azure/openai/${deployment}`, {
...testParams,
stream: false,
messages: [],
@@ -56,7 +56,7 @@ describe("#createChatCompletion - Azure OpenAI", () => {
describe("Streaming", () => {
it("Should return a valid iterable chat completion stream", async () => {
- const stream = await uniLLM.createChatCompletion(
+ const stream = await unillm.createChatCompletion(
`azure/openai/${deployment}`,
{
...testParams,
diff --git a/packages/unillm-node/tests/openai.test.ts b/packages/unillm-node/tests/openai.test.ts
index fde72b9..f53a8c4 100644
--- a/packages/unillm-node/tests/openai.test.ts
+++ b/packages/unillm-node/tests/openai.test.ts
@@ -4,14 +4,14 @@ import * as utils from "./utils/validation.util";
import type { ChatCompletionChunk } from "openai/resources/chat";
import { testParams, testFunctions } from "./utils/test-data.util";
-const uniLLM = new UniLLM();
+const unillm = new UniLLM();
describe("#createChatCompletion - OpenAI", () => {
const model = "openai/gpt-3.5-turbo";
describe("Non streaming", () => {
it("Should return a valid chat completion response", async () => {
- const response = await uniLLM.createChatCompletion(model, {
+ const response = await unillm.createChatCompletion(model, {
...testParams,
stream: false,
});
@@ -21,7 +21,7 @@ describe("#createChatCompletion - OpenAI", () => {
});
it("Should return a valid function calling response", async () => {
- const response = await uniLLM.createChatCompletion(model, {
+ const response = await unillm.createChatCompletion(model, {
...testParams,
stream: false,
functions: testFunctions,
@@ -34,7 +34,7 @@ describe("#createChatCompletion - OpenAI", () => {
it("Should throw an error and return a unified error response", async () => {
let errorOccurred = false;
try {
- await uniLLM.createChatCompletion(model, {
+ await unillm.createChatCompletion(model, {
...testParams,
stream: false,
messages: [],
@@ -51,7 +51,7 @@ describe("#createChatCompletion - OpenAI", () => {
describe("Streaming", () => {
it("Should return a valid iterable chat completion stream", async () => {
- const stream = await uniLLM.createChatCompletion(model, {
+ const stream = await unillm.createChatCompletion(model, {
...testParams,
stream: true,
});