Skip to content

Commit

Permalink
fix(docs): use lowercase
Browse files Browse the repository at this point in the history
  • Loading branch information
arielweinberger committed Oct 22, 2023
1 parent 4963d8f commit 9251929
Show file tree
Hide file tree
Showing 10 changed files with 35 additions and 39 deletions.
24 changes: 10 additions & 14 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,6 @@
<a href="https://docs.unillm.ai/#gh-light-mode-only" target="_blank">
<img src=".github/assets/logo-light-mode.svg" alt="logo" width="280">
</a>

<a href="https://docs.unillm.ai/#gh-dark-mode-only" target="_blank">
<img src=".github/assets/logo-dark-mode.svg" alt="logo" width="280">
</a>
</h1>
</p>

Expand All @@ -15,15 +11,15 @@
</p>

<p align="center">
<img src="https://github.com/pezzolabs/pezzo/actions/workflows/ci.yaml/badge.svg" />
<img src="https://github.com/pezzolabs/unillm/actions/workflows/ci.yaml/badge.svg" />
<a href="CODE_OF_CONDUCT.md">
<img src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg" alt="Contributor Covenant">
</a>
<a href="https://opensource.org/licenses/MIT">
<img src="https://img.shields.io/badge/License-MIT-blue.svg" alt="License">
</a>
<a href="https://www.npmjs.com/package/unillm" target="_blank">
<img src="https://img.shields.io/badge/npm-@pezzo/client-green">
<img src="https://img.shields.io/badge/npm-unillm-green">
</a>
</p>

Expand Down Expand Up @@ -52,18 +48,18 @@ npm i unillm
```ts
import { UniLLM } from 'unillm';

const uniLLM = new UniLLM();
const unillm = new UniLLM();

// OpenAI
const response = await uniLLM.createChatCompletion("openai/gpt-3.5-turbo", { messages: ... });
const response = await uniLLM.createChatCompletion("openai/gpt-4", { messages: ... });
const response = await unillm.createChatCompletion("openai/gpt-3.5-turbo", { messages: ... });
const response = await unillm.createChatCompletion("openai/gpt-4", { messages: ... });

// Anthropic
const response = await uniLLM.createChatCompletion("anthropic/claude-2", { messages: ... });
const response = await uniLLM.createChatCompletion("anthropic/claude-1-instant", { messages: ... });
const response = await unillm.createChatCompletion("anthropic/claude-2", { messages: ... });
const response = await unillm.createChatCompletion("anthropic/claude-1-instant", { messages: ... });

// Azure OpenAI
const response = await uniLLM.createChatCompletion("azure/openai/<deployment-name>", { messages: ... });
const response = await unillm.createChatCompletion("azure/openai/<deployment-name>", { messages: ... });

// More coming soon!
```
Expand All @@ -75,7 +71,7 @@ Want to see more examples? Check out the **[interactive docs](https://docs.unill
To enable streaming, simply provide `stream: true` in the options object. Here is an example:

```ts
const response = await uniLLM.createChatCompletion("openai/gpt-3.5-turbo", {
const response = await unillm.createChatCompletion("openai/gpt-3.5-turbo", {
messages: ...,
stream: true
});
Expand All @@ -87,7 +83,7 @@ Want to see more examples? Check out the **[interactive docs](https://docs.unill

We welcome contributions from the community! Please feel free to submit pull requests or create issues for bugs or feature suggestions.

If you want to contribute but not sure how, join our [Discord](https://pezzo.cc/discord) and we'll be happy to help you out!
If you want to contribute but not sure how, join our [Discord](https://discord.gg/XcEVPePwn2) and we'll be happy to help you out!

Please check out [CONTRIBUTING.md](CONTRIBUTING.md) before contributing.

Expand Down
4 changes: 2 additions & 2 deletions apps/demo/app/api/chat/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@ import { UniLLM } from "unillm";
export async function POST(req: Request) {
const { messages, llm } = await req.json();

const uniLLM = new UniLLM();
const unillm = new UniLLM();

const response = await uniLLM.createChatCompletion(llm, {
const response = await unillm.createChatCompletion(llm, {
temperature: 0,
max_tokens: 500,
messages: [...messages],
Expand Down
2 changes: 1 addition & 1 deletion apps/docs/components/DynamicCodeExample.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ export const getSelectContent = (allowedProvider?) => {
</Select.Label>

{models.map(({ name, value }) => (
<Select.Item value={value}>
<Select.Item key={value} value={value}>
<div className="flex items-center justify-center">

<Image className="mr-2 rounded-sm" src={logo} width={20} height={20} alt={providerName} />
Expand Down
4 changes: 2 additions & 2 deletions apps/docs/pages/index.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,10 @@ import { UniLLM } from 'unillm';
*/

// Setup UniLLM
const uniLLM = new UniLLM();
const unillm = new UniLLM();

// Use any LLM provider and model
const response = await uniLLM.createChatCompletion("#MODEL#", {
const response = await unillm.createChatCompletion("#MODEL#", {
temperature: 0,
messages: [
{
Expand Down
4 changes: 2 additions & 2 deletions apps/docs/pages/providers-and-models/anthropic.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,10 @@ import { UniLLM } from 'unillm';
*/

// Setup UniLLM
const uniLLM = new UniLLM();
const unillm = new UniLLM();

// Use any LLM provider and model
const response = await uniLLM.createChatCompletion("#MODEL#", {
const response = await unillm.createChatCompletion("#MODEL#", {
temperature: 0,
messages: [
{
Expand Down
4 changes: 2 additions & 2 deletions apps/docs/pages/providers-and-models/azure-openai.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@ import { UniLLM } from 'unillm';
*/

// Setup UniLLM
const uniLLM = new UniLLM();
const unillm = new UniLLM();

// Use any LLM provider and model
const response = await uniLLM.createChatCompletion("#MODEL#", {
const response = await unillm.createChatCompletion("#MODEL#", {
temperature: 0,
messages: [
{
Expand Down
4 changes: 2 additions & 2 deletions apps/docs/pages/providers-and-models/openai.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,10 @@ import { UniLLM } from 'unillm';
*/

// Setup UniLLM
const uniLLM = new UniLLM();
const unillm = new UniLLM();

// Use any LLM provider and model
const response = await uniLLM.createChatCompletion("#MODEL#", {
const response = await unillm.createChatCompletion("#MODEL#", {
temperature: 0,
messages: [
{
Expand Down
8 changes: 4 additions & 4 deletions packages/unillm-node/tests/anthropic.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,14 @@ import * as utils from "./utils/validation.util";
import type { ChatCompletionChunk } from "openai/resources/chat";
import { testParams } from "./utils/test-data.util";

const uniLLM = new UniLLM();
const unillm = new UniLLM();

describe("#createChatCompletion - Anthropic", () => {
const model = "anthropic/claude-2";

describe("Non streaming", () => {
it("Should return a valid chat completion response", async () => {
const response = await uniLLM.createChatCompletion(model, {
const response = await unillm.createChatCompletion(model, {
...testParams,
stream: false,
});
Expand All @@ -23,7 +23,7 @@ describe("#createChatCompletion - Anthropic", () => {
it("Should throw an error and return a unified error response", async () => {
let errorOccurred = false;
try {
await uniLLM.createChatCompletion(model, {
await unillm.createChatCompletion(model, {
...testParams,
stream: false,
messages: [],
Expand All @@ -40,7 +40,7 @@ describe("#createChatCompletion - Anthropic", () => {

describe("Streaming", () => {
it("Should return a valid iterable chat completion stream", async () => {
const response = await uniLLM.createChatCompletion(model, {
const response = await unillm.createChatCompletion(model, {
...testParams,
stream: true,
});
Expand Down
10 changes: 5 additions & 5 deletions packages/unillm-node/tests/azure-openai.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,12 @@ import type { ChatCompletionChunk } from "openai/resources/chat";
import { testFunctions, testParams } from "./utils/test-data.util";

const deployment = process.env.AZURE_OPENAI_DEPLOYMENT;
const uniLLM = new UniLLM();
const unillm = new UniLLM();

describe("#createChatCompletion - Azure OpenAI", () => {
describe("Non streaming", () => {
it("Should return a valid chat completion response", async () => {
const response = await uniLLM.createChatCompletion(
const response = await unillm.createChatCompletion(
`azure/openai/${deployment}`,
{
...testParams,
Expand All @@ -23,7 +23,7 @@ describe("#createChatCompletion - Azure OpenAI", () => {
});

it("Should return a valid function calling response", async () => {
const response = await uniLLM.createChatCompletion(
const response = await unillm.createChatCompletion(
`azure/openai/${deployment}`,
{
...testParams,
Expand All @@ -39,7 +39,7 @@ describe("#createChatCompletion - Azure OpenAI", () => {
it("Should throw an error and return a unified error response", async () => {
let errorOccurred = false;
try {
await uniLLM.createChatCompletion(`azure/openai/${deployment}`, {
await unillm.createChatCompletion(`azure/openai/${deployment}`, {
...testParams,
stream: false,
messages: [],
Expand All @@ -56,7 +56,7 @@ describe("#createChatCompletion - Azure OpenAI", () => {

describe("Streaming", () => {
it("Should return a valid iterable chat completion stream", async () => {
const stream = await uniLLM.createChatCompletion(
const stream = await unillm.createChatCompletion(
`azure/openai/${deployment}`,
{
...testParams,
Expand Down
10 changes: 5 additions & 5 deletions packages/unillm-node/tests/openai.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,14 @@ import * as utils from "./utils/validation.util";
import type { ChatCompletionChunk } from "openai/resources/chat";
import { testParams, testFunctions } from "./utils/test-data.util";

const uniLLM = new UniLLM();
const unillm = new UniLLM();

describe("#createChatCompletion - OpenAI", () => {
const model = "openai/gpt-3.5-turbo";

describe("Non streaming", () => {
it("Should return a valid chat completion response", async () => {
const response = await uniLLM.createChatCompletion(model, {
const response = await unillm.createChatCompletion(model, {
...testParams,
stream: false,
});
Expand All @@ -21,7 +21,7 @@ describe("#createChatCompletion - OpenAI", () => {
});

it("Should return a valid function calling response", async () => {
const response = await uniLLM.createChatCompletion(model, {
const response = await unillm.createChatCompletion(model, {
...testParams,
stream: false,
functions: testFunctions,
Expand All @@ -34,7 +34,7 @@ describe("#createChatCompletion - OpenAI", () => {
it("Should throw an error and return a unified error response", async () => {
let errorOccurred = false;
try {
await uniLLM.createChatCompletion(model, {
await unillm.createChatCompletion(model, {
...testParams,
stream: false,
messages: [],
Expand All @@ -51,7 +51,7 @@ describe("#createChatCompletion - OpenAI", () => {

describe("Streaming", () => {
it("Should return a valid iterable chat completion stream", async () => {
const stream = await uniLLM.createChatCompletion(model, {
const stream = await unillm.createChatCompletion(model, {
...testParams,
stream: true,
});
Expand Down

0 comments on commit 9251929

Please sign in to comment.