Skip to content

Commit

Permalink
Modified the code to include all the latest models and changed OpenAI…
Browse files Browse the repository at this point in the history
… core dll to use one provided by OpenAI instead of thirdparty
  • Loading branch information
Tomasz Juszczak committed Oct 7, 2024
1 parent 150da25 commit cd8cc3d
Show file tree
Hide file tree
Showing 17 changed files with 155 additions and 99 deletions.
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,7 @@ riderModule.iml
/_ReSharper.Caches/
/Slack-GPT-Socket/app-log.db
/Slack-GPT-Socket/app.db

*appsettings.json
*appsettings.Development.json
/Slack-GPT-Socket/db/*
1 change: 1 addition & 0 deletions .idea/.idea.Slack-GPT-Socket/.idea/.name

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

26 changes: 16 additions & 10 deletions Slack-GPT-Socket/GptApi/GptClient.cs
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
using Microsoft.Extensions.Options;
using System.Diagnostics;
using Microsoft.Extensions.Options;
using Newtonsoft.Json;
using OpenAI;
using OpenAI.Chat;
using Slack_GPT_Socket.Settings;
using Slack_GPT_Socket.Utilities.LiteDB;

Expand Down Expand Up @@ -32,7 +35,7 @@ public GptClient(
{
Timeout = TimeSpan.FromMinutes(10)
};
_api = new OpenAIClient(settings.Value.OpenAIKey, OpenAIClientSettings.Default, httpClient);
_api = new OpenAIClient(settings.Value.OpenAIKey);
_log = log;
_gptDefaults = gptDefaults.Value;
_resolver = new GptClientResolver(customCommands, _gptDefaults, userCommandDb);
Expand All @@ -44,10 +47,10 @@ public GptClient(
/// <param name="chatPrompts">The list of chat prompts.</param>
/// <param name="userId">The user identifier.</param>
/// <returns>A task representing the asynchronous operation, with a result of the generated response.</returns>
public async Task<GptResponse> GeneratePrompt(List<WritableChatPrompt> chatPrompts, string userId)
public async Task<GptResponse> GeneratePrompt(List<WritableMessage> chatPrompts, string userId)
{
// get the last prompt
var userPrompt = chatPrompts.Last(chatPrompt => chatPrompt.Role == "user");
var userPrompt = chatPrompts.Last(chatPrompt => chatPrompt.Role == Role.User);
var prompt = GptRequest.Default(_gptDefaults);
prompt.UserId = userId;
prompt.Prompt = userPrompt.Content;
Expand All @@ -56,15 +59,18 @@ public async Task<GptResponse> GeneratePrompt(List<WritableChatPrompt> chatPromp

try
{
var result = await _api.ChatEndpoint.GetCompletionAsync(chatRequest);
_log.LogInformation("GPT response: {Response}", result.FirstChoice);
var sw = Stopwatch.StartNew();
var model = _api.GetChatClient(chatRequest.Model);
var result = await model.CompleteChatAsync(chatRequest.Messages, chatRequest.Options);
var chatCompletion = result.Value;
_log.LogInformation("GPT response: {Response}", JsonConvert.SerializeObject(chatCompletion));

return new GptResponse
{
Message = result.FirstChoice,
Model = prompt.Model,
Usage = result.Usage,
ProcessingTime = result.ProcessingTime
Message = chatCompletion.Content.Last().Text,
Model = chatCompletion.Model,
Usage = chatCompletion.Usage,
ProcessingTime = sw.Elapsed
};
}
catch (Exception e)
Expand Down
68 changes: 36 additions & 32 deletions Slack-GPT-Socket/GptApi/GptClientResolver.cs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
using System.Text.RegularExpressions;
using OpenAI;
using OpenAI.Chat;
using Slack_GPT_Socket.GptApi.ParameterResolvers;
using Slack_GPT_Socket.Settings;
Expand Down Expand Up @@ -31,9 +32,8 @@ public GptClientResolver(GptCustomCommands customCommands, GptDefaults gptDefaul
/// <param name="chatPrompts">The list of chat prompts.</param>
/// <param name="request">The GPT request.</param>
/// <returns>A ChatRequest instance.</returns>
public ChatRequest ParseRequest(List<WritableChatPrompt> chatPrompts, GptRequest request)
public (IEnumerable<ChatMessage> Messages, ChatCompletionOptions Options, string Model) ParseRequest(List<WritableMessage> chatPrompts, GptRequest request)
{
GptSystemMessageBuilder? contextMessage = null;
foreach (var chatPrompt in chatPrompts)
{
var content = GptRequest.Default(_gptDefaults);
Expand All @@ -42,43 +42,47 @@ public ChatRequest ParseRequest(List<WritableChatPrompt> chatPrompts, GptRequest
ResolveModel(ref content);
ResolveParameters(ref content);
chatPrompt.Content = content.Prompt;

// TODO Refactor this into a separate resolver.
if (content.System.IsContextMessage == ContextMessageStatus.Set) contextMessage = content.System;
else if (content.System.IsContextMessage == ContextMessageStatus.Cleared) contextMessage = null;

}

ResolveModel(ref request);
ResolveParameters(ref request);

var requestPrompts = new List<WritableMessage>();
requestPrompts.AddRange(chatPrompts);

if (contextMessage != null && !request.System.IsModified)
request.System = contextMessage;

WritableChatPrompt system;
if (request.System.ShouldReplace)
system = new WritableChatPrompt("system", "__system__", request.System.Build());
else
var messages = new List<ChatMessage>();
var options = new ChatCompletionOptions
{
system = new WritableChatPrompt("system", "__system__",
$"You are a helpful assistant. Today is {DateTime.Now:yyyy-MM-ddTHH:mm:ssZ} " + request.System.Build());
MaxOutputTokenCount = request.MaxTokens,
Temperature = request.Temperature,
TopP = request.TopP,
PresencePenalty = request.PresencePenalty,
FrequencyPenalty = request.FrequencyPenalty,
EndUserId = request.UserId,
};
foreach (var chatPrompt in chatPrompts)
{
switch (chatPrompt.Role)
{
case Role.User:
messages.Add(new UserChatMessage(chatPrompt.Content));
break;
case Role.Assistant:
messages.Add(new AssistantChatMessage(chatPrompt.Content));
break;
case Role.System:
messages.Add(new SystemChatMessage(chatPrompt.Content));
break;
case Role.Tool:
messages.Add(new ToolChatMessage(chatPrompt.Content));
break;
default:
throw new ArgumentOutOfRangeException();
}
}

var requestPrompts = new List<WritableChatPrompt>();
requestPrompts.Add(system);
requestPrompts.AddRange(chatPrompts);

var chatRequest = new ChatRequest(
requestPrompts.Select(p => new ChatPrompt(p.Role, p.Content)).ToList(),
maxTokens: request.MaxTokens,
temperature: request.Temperature,
topP: request.TopP,
presencePenalty: request.PresencePenalty,
frequencyPenalty: request.FrequencyPenalty,
model: request.Model,
user: request.UserId
);

return chatRequest;
return (messages, options, request.Model);
}

/// <summary>
Expand Down Expand Up @@ -107,7 +111,7 @@ private void ResolveModel(ref GptRequest input)
}
}

if (modelFound) return;
if (!modelFound) return;

var inputModel = input.Model;
// check if current model is valid
Expand Down
13 changes: 7 additions & 6 deletions Slack-GPT-Socket/GptApi/GptRequest.cs
Original file line number Diff line number Diff line change
Expand Up @@ -50,35 +50,36 @@ private GptRequest(){}
/// <summary>
/// Gets or sets the maximum number of tokens in the generated response.
/// </summary>
public int MaxTokens { get; set; } = 4000;
public int? MaxTokens { get; set; }

/// <summary>
/// Gets or sets the temperature for randomness in the response.
/// </summary>
public float Temperature { get; set; } = 0.7f;
public float? Temperature { get; set; }

/// <summary>
/// Gets or sets the Top-P sampling value for the response.
/// </summary>
public float TopP { get; set; } = 1f;
public float? TopP { get; set; }

/// <summary>
/// Gets or sets the presence penalty for the generated response.
/// </summary>
public float PresencePenalty { get; set; } = 0f;
public float? PresencePenalty { get; set; }

/// <summary>
/// Gets or sets the frequency penalty for the generated response.
/// </summary>
public float FrequencyPenalty { get; set; } = 0f;
public float? FrequencyPenalty { get; set; }

/// <summary>
/// Gets or sets the model used for generating the response.
/// </summary>
public string Model { get; set; } = "gpt-4";
public string Model { get; set; } = "gpt-4o";

/// <summary>
/// Gets or sets the system identifier (optional).
/// </summary>
public GptSystemMessageBuilder System { get; set; } = new();

}
3 changes: 2 additions & 1 deletion Slack-GPT-Socket/GptApi/GptResponse.cs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
using OpenAI;
using OpenAI.Chat;

namespace Slack_GPT_Socket.GptApi;

Expand All @@ -25,7 +26,7 @@ public class GptResponse
/// <summary>
/// Gets or sets the usage information (optional).
/// </summary>
public Usage? Usage { get; set; }
public ChatTokenUsage? Usage { get; set; }

/// <summary>
/// Gets or sets the processing time of the response (optional).
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,15 @@ public class MaxTokenResolver : IParameterResolver
public string BuildShortHelpText(GptDefaults gptDefaults, string userId)
{
return
$"{Name}: limits tokens in output, default {gptDefaults.MaxTokens?.ToString() ?? "4000"} (GPT-3.5: 4000, GPT-4: 8000)";
$"{Name}: limits tokens in output, default {gptDefaults.MaxTokens?.ToString() ?? "4000"} (refer to https://openai.com/api/pricing/ for more information)";
}

/// <inheritdoc />
public string BuildHelpText(GptDefaults gptDefaults, string commandName, string userId)
{
var names = string.Join("\n", Names);
return
$"{names}\n\t: limits tokens in output, default {gptDefaults.MaxTokens?.ToString() ?? "4000"} (GPT-3.5: 4000, GPT-4: 8000)\n";
$"{names}\n\t: limits tokens in output, default {gptDefaults.MaxTokens?.ToString() ?? "4000"} (refer to https://openai.com/api/pricing/ for more information)\n";
}

/// <inheritdoc />
Expand Down
4 changes: 2 additions & 2 deletions Slack-GPT-Socket/GptApi/ParameterResolvers/ModelResolver.cs
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ public void Resolve(GptRequest input, ParameterEventArgs args)
return;
}

// if no model was found, set args as has value to false
args.HasValue = false;
// because we can own a model that is not hardcoded, use input value at face value
input.Model = testValue;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,12 @@ public class ParameterManager : IEnumerable<IParameterResolver>
/// </summary>
private ModelInfo[] _models = new ModelInfo[]
{
new("o1-preview", "o1"),
new("o1-mini", "o1-mini"),
new("gpt-4o", "gpt4o"),
new("gpt-4", "gpt4"),
new("gpt-4o-mini", "gpt4o-mini"),
new("gpt-4-turbo", "gpt4-turbo"),
new("gpt-3.5-turbo", "chatgpt", "gpt-3", "gpt3", "turbo")
};

Expand Down
Original file line number Diff line number Diff line change
@@ -1,17 +1,27 @@
using OpenAI;

namespace Slack_GPT_Socket.GptApi;

public enum Role
{
User,
Assistant,
System,
Tool,
}

/// <summary>
/// Represents a writable chat prompt used for generating AI responses.
/// </summary>
public sealed class WritableChatPrompt
public sealed class WritableMessage
{
/// <summary>
/// Initializes a new instance of the <see cref="WritableChatPrompt" /> class.
/// Initializes a new instance of the <see cref="WritableMessage" /> class.
/// </summary>
/// <param name="role">The role of the chat prompt (e.g., "user" or "system").</param>
/// <param name="userId">The userID that sent the message</param>
/// <param name="content">The content of the chat prompt.</param>
public WritableChatPrompt(string role, string userId, string content)
public WritableMessage(Role role, string userId, string content)
{
UserId = userId;
Role = role;
Expand All @@ -21,7 +31,7 @@ public WritableChatPrompt(string role, string userId, string content)
/// <summary>
/// Gets or sets the role of the chat prompt.
/// </summary>
public string Role { get; set; }
public Role Role { get; set; }

/// <summary>
/// Gets or sets the user identifier, that sent the message.
Expand Down
20 changes: 18 additions & 2 deletions Slack-GPT-Socket/Program.cs
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,24 @@

builder.Services.AddSingleton<GptClient>();
builder.Services.AddSingleton<GptCustomCommands>();
builder.Services.AddSingleton<ILiteDatabase>(x =>
new LiteDatabase(builder.Configuration.GetConnectionString("LiteDB") ?? "Filename=:memory:;Mode=Memory;Cache=Shared")
builder.Services.AddSingleton<ILiteDatabase>(x =>
{
var connectionStringRaw = builder.Configuration.GetConnectionString("LiteDB") ??
"Filename=:memory:;Mode=Memory;Cache=Shared";
var connectionString = new ConnectionString(connectionStringRaw);
if (connectionString.Filename != ":memory:")
{
var directory = Path.GetDirectoryName(connectionString.Filename);
if (!string.IsNullOrEmpty(directory))
{
Directory.CreateDirectory(directory);
}
}
var db = new LiteDatabase(connectionString);
return db;
}
);

builder.Services.AddSingleton<IUserCommandDb, UserCommandDb>();
Expand Down
7 changes: 4 additions & 3 deletions Slack-GPT-Socket/Slack-GPT-Socket.csproj
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
<Project Sdk="Microsoft.NET.Sdk.Web">

<PropertyGroup>
<TargetFramework>net7.0</TargetFramework>
<TargetFramework>net8.0</TargetFramework>
<Nullable>enable</Nullable>
<ImplicitUsings>enable</ImplicitUsings>
<RootNamespace>Slack_GPT_Socket</RootNamespace>
<DockerDefaultTargetOS>Linux</DockerDefaultTargetOS>
<LangVersion>12</LangVersion>
</PropertyGroup>

<ItemGroup>
Expand All @@ -15,9 +16,9 @@
</ItemGroup>

<ItemGroup>
<PackageReference Include="LiteDB" Version="5.0.16" />
<PackageReference Include="LiteDB" Version="5.0.21" />
<PackageReference Include="Octokit" Version="5.0.4" />
<PackageReference Include="OpenAI-DotNet" Version="6.3.2" />
<PackageReference Include="OpenAI" Version="2.0.0" />
<PackageReference Include="SlackNet" Version="0.10.22" />
<PackageReference Include="SlackNet.AspNetCore" Version="0.10.22" />
</ItemGroup>
Expand Down
Loading

0 comments on commit cd8cc3d

Please sign in to comment.