Skip to content

Commit

Permalink
fix(perplexity): allow to specify chat messages
Browse files Browse the repository at this point in the history
  • Loading branch information
roushou committed Sep 18, 2024
1 parent ad2df49 commit dc2da8b
Show file tree
Hide file tree
Showing 8 changed files with 25 additions and 10 deletions.
6 changes: 6 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,9 @@
## What's Changed in perplexity-v0.1.1
* fix(perplexity): allow to specify chat messages
* chore: clean-up

**Full Changelog**: https://github.com///compare/perplexity-v0.1.0...perplexity-v0.1.1

## What's Changed in opai-v0.3.1
* refactor(openai): use struct instead of enum for `O1`

Expand Down
4 changes: 2 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 0 additions & 1 deletion examples/perplexity-chat-completion/README.md

This file was deleted.

Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[package]
name = "perplexity-chat-completion"
name = "perplexity-chat"
version = "0.1.0"
edition = "2021"
publish = false
Expand Down
1 change: 1 addition & 0 deletions examples/perplexity-chat/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Perplexity Chat Example
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use perplexity::{
client::{Client, CreateChatCompletion, Model},
client::{Client, CreateChatCompletion, Message, Model, Role},
config::Config,
};

Expand All @@ -11,7 +11,11 @@ async fn main() {
let config = Config::new(api_key);
let client = Client::new(config).unwrap();

let message = CreateChatCompletion::new(Model::Llama31SonarLargeOnline);
let messages: Vec<Message> = vec![Message {
role: Role::User,
content: "Find me the best pad thai restaurant in Bangkok".to_string(),
}];
let message = CreateChatCompletion::new(Model::Llama31SonarLargeOnline, messages);
let result = client.create_completion(message).await.unwrap();
println!("{:?}", result);
}
2 changes: 1 addition & 1 deletion perplexity/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "perplexity"
version = "0.1.0"
version = "0.1.1"
edition.workspace = true
authors.workspace = true
description = "Perplexity Rust SDK"
Expand Down
11 changes: 8 additions & 3 deletions perplexity/src/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ pub enum FinishReason {
}

/// The message generated by the model.
#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Message {
/// The contents of the message in this turn of conversation.
pub content: String,
Expand All @@ -118,7 +118,7 @@ pub struct CompletionDelta {
pub role: Role,
}

#[derive(Debug, Serialize, Deserialize)]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum Role {
System,
Expand All @@ -143,6 +143,9 @@ pub struct CreateChatCompletion {
/// The name of the model that will complete your prompt.
pub model: Model,

/// The conversation messages.
pub messages: Vec<Message>,

/// The maximum number of completion tokens returned by the API. The total number of tokens requested in **max_tokens** plus the number of prompt tokens sent in messages must not exceed the context window token limit of model requested. If left unspecified, then the model will generate tokens until either it reaches its stop token or the end of its context window.
#[serde(skip_serializing_if = "Option::is_none")]
pub max_tokens: Option<u64>,
Expand Down Expand Up @@ -200,9 +203,10 @@ pub struct CreateChatCompletion {
}

impl CreateChatCompletion {
pub fn new(model: Model) -> Self {
pub fn new(model: Model, messages: Vec<Message>) -> Self {
Self {
model,
messages,
..Default::default()
}
}
Expand Down Expand Up @@ -277,6 +281,7 @@ impl Default for CreateChatCompletion {
fn default() -> Self {
Self {
model: Model::Llama31SonarLargeOnline,
messages: Vec::new(),
stream: false,
max_tokens: None,
temperature: 0.2,
Expand Down

0 comments on commit dc2da8b

Please sign in to comment.