Skip to content

Commit

Permalink
Merge pull request #45 from rellfy/credentials
Browse files Browse the repository at this point in the history
Use Credentials struct for auth
  • Loading branch information
rellfy authored Dec 18, 2024
2 parents 0dc71b4 + 8db387b commit 39dd271
Show file tree
Hide file tree
Showing 14 changed files with 410 additions and 201 deletions.
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "openai"
version = "1.0.0-alpha.16"
version = "1.0.0-alpha.17"
authors = ["Lorenzo Fontoura <[email protected]>", "valentinegb"]
edition = "2021"
description = "An unofficial Rust library for the OpenAI API."
Expand Down
48 changes: 37 additions & 11 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,24 +10,50 @@ An unofficial Rust library for the OpenAI API.
> There may be breaking changes between versions while in alpha.
> See [Implementation Progress](#implementation-progress).
## Core Principles

- Modularity
- Library, not a wrapper
- Idiomatic Rust
- Environmental variables should be the prioritized method of authentication,
but not the only way to do things

## Examples

Examples can be found in the `examples` directory.

As the package is still a work in progress and there may be breaking changes,
examples are not available for all the crate's functionality.
Please note that examples are not available for all the crate's functionality,
PRs are appreciated to expand the coverage.

Currently, there are examples for the `completions` module and the `chat` module.
Currently, there are examples for the `completions` module and the `chat`
module.
For other modules, refer to the `tests` submodules for some reference.

### Chat Example

```rust
// Relies on OPENAI_KEY and optionally OPENAI_BASE_URL.
let credentials = Credentials::from_env();
let messages = vec![
ChatCompletionMessage {
role: ChatCompletionMessageRole::System,
content: Some("You are a helpful assistant.".to_string()),
name: None,
function_call: None,
},
ChatCompletionMessage {
role: ChatCompletionMessageRole::User,
content: Some("Tell me a random crab fact".to_string()),
name: None,
function_call: None,
},
];
let chat_completion = ChatCompletion::builder("gpt-4o", messages.clone())
.credentials(credentials.clone())
.create()
.await
.unwrap();
let returned_message = chat_completion.choices.first().unwrap().message.clone();
// Assistant: Sure! Here's a random crab fact: ...
println!(
"{:#?}: {}",
returned_message.role,
returned_message.content.unwrap().trim()
);
```

## Implementation Progress

`██████████` Models
Expand Down
13 changes: 4 additions & 9 deletions examples/chat_cli.rs
Original file line number Diff line number Diff line change
@@ -1,21 +1,15 @@
use std::{
env,
io::{stdin, stdout, Write},
};

use dotenvy::dotenv;

use openai::{
chat::{ChatCompletion, ChatCompletionMessage, ChatCompletionMessageRole},
set_base_url, set_key,
Credentials,
};
use std::io::{stdin, stdout, Write};

#[tokio::main]
async fn main() {
// Make sure you have a file named `.env` with the `OPENAI_KEY` environment variable defined!
dotenv().unwrap();
set_key(env::var("OPENAI_KEY").unwrap());
set_base_url(env::var("OPENAI_BASE_URL").unwrap_or_default());
let credentials = Credentials::from_env();

let mut messages = vec![ChatCompletionMessage {
role: ChatCompletionMessageRole::System,
Expand All @@ -39,6 +33,7 @@ async fn main() {
});

let chat_completion = ChatCompletion::builder("gpt-3.5-turbo", messages.clone())
.credentials(credentials.clone())
.create()
.await
.unwrap();
Expand Down
39 changes: 39 additions & 0 deletions examples/chat_simple.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
use dotenvy::dotenv;
use openai::{
chat::{ChatCompletion, ChatCompletionMessage, ChatCompletionMessageRole},
Credentials,
};

#[tokio::main]
async fn main() {
// Make sure you have a file named `.env` with the `OPENAI_KEY` environment variable defined!
dotenv().unwrap();
// Relies on OPENAI_KEY and optionally OPENAI_BASE_URL.
let credentials = Credentials::from_env();
let messages = vec![
ChatCompletionMessage {
role: ChatCompletionMessageRole::System,
content: Some("You are a helpful assistant.".to_string()),
name: None,
function_call: None,
},
ChatCompletionMessage {
role: ChatCompletionMessageRole::User,
content: Some("Tell me a random crab fact".to_string()),
name: None,
function_call: None,
},
];
let chat_completion = ChatCompletion::builder("gpt-4o", messages.clone())
.credentials(credentials.clone())
.create()
.await
.unwrap();
let returned_message = chat_completion.choices.first().unwrap().message.clone();
// Assistant: Sure! Here's a random crab fact: Crabs communicate with each other by drumming or waving their pincers.
println!(
"{:#?}: {}",
returned_message.role,
returned_message.content.unwrap().trim()
);
}
10 changes: 4 additions & 6 deletions examples/chat_stream_cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,16 @@ use dotenvy::dotenv;
use openai::chat::{ChatCompletion, ChatCompletionDelta};
use openai::{
chat::{ChatCompletionMessage, ChatCompletionMessageRole},
set_key,
};
use std::{
env,
io::{stdin, stdout, Write},
Credentials,
};
use std::io::{stdin, stdout, Write};
use tokio::sync::mpsc::Receiver;

#[tokio::main]
async fn main() {
// Make sure you have a file named `.env` with the `OPENAI_KEY` environment variable defined!
dotenv().unwrap();
set_key(env::var("OPENAI_KEY").unwrap());
let credentials = Credentials::from_env();

let mut messages = vec![ChatCompletionMessage {
role: ChatCompletionMessageRole::System,
Expand All @@ -38,6 +35,7 @@ async fn main() {
});

let chat_stream = ChatCompletionDelta::builder("gpt-3.5-turbo", messages.clone())
.credentials(credentials.clone())
.create_stream()
.await
.unwrap();
Expand Down
7 changes: 4 additions & 3 deletions examples/completions_cli.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
use dotenvy::dotenv;
use openai::{completions::Completion, set_key};
use std::{env, io::stdin};
use openai::{completions::Completion, Credentials};
use std::io::stdin;

#[tokio::main]
async fn main() {
// Make sure you have a file named `.env` with the `OPENAI_KEY` environment variable defined!
dotenv().unwrap();
set_key(env::var("OPENAI_KEY").unwrap());
let credentials = Credentials::from_env();

loop {
println!("Prompt:");
Expand All @@ -18,6 +18,7 @@ async fn main() {
let completion = Completion::builder("gpt-3.5-turbo-instruct")
.prompt(&prompt)
.max_tokens(1024)
.credentials(credentials.clone())
.create()
.await
.unwrap();
Expand Down
44 changes: 29 additions & 15 deletions src/chat.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
//! Given a chat conversation, the model will return a chat completion response.
use super::{openai_post, ApiResponseOrError, Usage};
use super::{openai_post, ApiResponseOrError, Credentials, Usage};
use crate::openai_request_stream;
use derive_builder::Builder;
use futures_util::StreamExt;
Expand Down Expand Up @@ -210,6 +210,10 @@ pub struct ChatCompletionRequest {
#[builder(default)]
#[serde(skip_serializing_if = "Option::is_none")]
response_format: Option<ChatCompletionResponseFormat>,
/// The credentials to use for this request.
#[serde(skip_serializing)]
#[builder(default)]
credentials: Option<Credentials>,
}

#[derive(Serialize, Debug, Clone, Eq, PartialEq)]
Expand Down Expand Up @@ -245,17 +249,24 @@ impl<C> ChatCompletionGeneric<C> {
}

impl ChatCompletion {
pub async fn create(request: &ChatCompletionRequest) -> ApiResponseOrError<Self> {
openai_post("chat/completions", request).await
pub async fn create(request: ChatCompletionRequest) -> ApiResponseOrError<Self> {
let credentials_opt = request.credentials.clone();
openai_post("chat/completions", &request, credentials_opt).await
}
}

impl ChatCompletionDelta {
pub async fn create(
request: &ChatCompletionRequest,
request: ChatCompletionRequest,
) -> Result<Receiver<Self>, CannotCloneRequestError> {
let stream =
openai_request_stream(Method::POST, "chat/completions", |r| r.json(request)).await?;
let credentials_opt = request.credentials.clone();
let stream = openai_request_stream(
Method::POST,
"chat/completions",
|r| r.json(&request),
credentials_opt,
)
.await?;
let (tx, rx) = channel::<Self>(32);
tokio::spawn(forward_deserialized_chat_response_stream(stream, tx));
Ok(rx)
Expand Down Expand Up @@ -440,14 +451,14 @@ async fn forward_deserialized_chat_response_stream(

impl ChatCompletionBuilder {
pub async fn create(self) -> ApiResponseOrError<ChatCompletion> {
ChatCompletion::create(&self.build().unwrap()).await
ChatCompletion::create(self.build().unwrap()).await
}

pub async fn create_stream(
mut self,
) -> Result<Receiver<ChatCompletionDelta>, CannotCloneRequestError> {
self.stream = Some(Some(true));
ChatCompletionDelta::create(&self.build().unwrap()).await
ChatCompletionDelta::create(self.build().unwrap()).await
}
}

Expand All @@ -461,14 +472,12 @@ fn clone_default_unwrapped_option_string(string: &Option<String>) -> String {
#[cfg(test)]
mod tests {
use super::*;
use crate::set_key;
use dotenvy::dotenv;
use std::env;

#[tokio::test]
async fn chat() {
dotenv().ok();
set_key(env::var("OPENAI_KEY").unwrap());
let credentials = Credentials::from_env();

let chat_completion = ChatCompletion::builder(
"gpt-3.5-turbo",
Expand All @@ -481,6 +490,7 @@ mod tests {
)
.temperature(0.0)
.response_format(ChatCompletionResponseFormat::text())
.credentials(credentials)
.create()
.await
.unwrap();
Expand All @@ -503,7 +513,7 @@ mod tests {
#[tokio::test]
async fn chat_seed() {
dotenv().ok();
set_key(env::var("OPENAI_KEY").unwrap());
let credentials = Credentials::from_env();

let chat_completion = ChatCompletion::builder(
"gpt-3.5-turbo",
Expand All @@ -520,6 +530,7 @@ mod tests {
// Determinism currently comes from temperature 0, not seed.
.temperature(0.0)
.seed(1337u64)
.credentials(credentials)
.create()
.await
.unwrap();
Expand All @@ -540,7 +551,7 @@ mod tests {
#[tokio::test]
async fn chat_stream() {
dotenv().ok();
set_key(env::var("OPENAI_KEY").unwrap());
let credentials = Credentials::from_env();

let chat_stream = ChatCompletion::builder(
"gpt-3.5-turbo",
Expand All @@ -552,6 +563,7 @@ mod tests {
}],
)
.temperature(0.0)
.credentials(credentials)
.create_stream()
.await
.unwrap();
Expand All @@ -574,7 +586,7 @@ mod tests {
#[tokio::test]
async fn chat_function() {
dotenv().ok();
set_key(env::var("OPENAI_KEY").unwrap());
let credentials = Credentials::from_env();

let chat_stream = ChatCompletion::builder(
"gpt-4o",
Expand All @@ -601,6 +613,7 @@ mod tests {
})),
}])
.temperature(0.2)
.credentials(credentials)
.create_stream()
.await
.unwrap();
Expand Down Expand Up @@ -642,7 +655,7 @@ mod tests {
#[tokio::test]
async fn chat_response_format_json() {
dotenv().ok();
set_key(env::var("OPENAI_KEY").unwrap());
let credentials = Credentials::from_env();
let chat_completion = ChatCompletion::builder(
"gpt-3.5-turbo",
[ChatCompletionMessage {
Expand All @@ -655,6 +668,7 @@ mod tests {
.temperature(0.0)
.seed(1337u64)
.response_format(ChatCompletionResponseFormat::json_object())
.credentials(credentials)
.create()
.await
.unwrap();
Expand Down
Loading

0 comments on commit 39dd271

Please sign in to comment.