Skip to content

Commit

Permalink
Rename 'client' to 'provider' and LlmProvider to AIProvider for bette…
Browse files Browse the repository at this point in the history
…r readability
  • Loading branch information
brichet committed Oct 31, 2024
1 parent 6e11c72 commit 9329b59
Show file tree
Hide file tree
Showing 8 changed files with 51 additions and 51 deletions.
6 changes: 3 additions & 3 deletions schema/llm-provider.json → schema/ai-provider.json
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
{
"title": "LLM provider",
"title": "AI provider",
"description": "Provider settings",
"type": "object",
"properties": {
"provider": {
"type": "string",
"title": "The LLM provider",
"description": "The LLM provider to use for chat and completion",
"title": "The AI provider",
"description": "The AI provider to use for chat and completion",
"default": "None",
"enum": ["None", "MistralAI"]
},
Expand Down
20 changes: 10 additions & 10 deletions src/chat-handler.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,14 @@ export type ConnectionMessage = {
export class ChatHandler extends ChatModel {
constructor(options: ChatHandler.IOptions) {
super(options);
this._llmClient = options.llmClient;
this._provider = options.provider;
}

get llmClient(): BaseChatModel | null {
return this._llmClient;
get provider(): BaseChatModel | null {
return this._provider;
}
set llmClient(client: BaseChatModel | null) {
this._llmClient = client;
set provider(provider: BaseChatModel | null) {
this._provider = provider;
}

async sendMessage(message: INewMessage): Promise<boolean> {
Expand All @@ -46,10 +46,10 @@ export class ChatHandler extends ChatModel {
};
this.messageAdded(msg);

if (this._llmClient === null) {
if (this._provider === null) {
const botMsg: IChatMessage = {
id: UUID.uuid4(),
body: '**Chat client not configured**',
body: '**AI provider not configured for the chat**',
sender: { username: 'ERROR' },
time: Date.now(),
type: 'msg'
Expand All @@ -69,7 +69,7 @@ export class ChatHandler extends ChatModel {
})
);

const response = await this._llmClient.invoke(messages);
const response = await this._provider.invoke(messages);
// TODO: fix deprecated response.text
const content = response.text;
const botMsg: IChatMessage = {
Expand All @@ -96,12 +96,12 @@ export class ChatHandler extends ChatModel {
super.messageAdded(message);
}

private _llmClient: BaseChatModel | null;
private _provider: BaseChatModel | null;
private _history: IChatHistory = { messages: [] };
}

export namespace ChatHandler {
export interface IOptions extends ChatModel.IOptions {
llmClient: BaseChatModel | null;
provider: BaseChatModel | null;
}
}
2 changes: 1 addition & 1 deletion src/completion-provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ export class CompletionProvider implements IInlineCompletionProvider {
* Get the LLM completer.
*/
get llmCompleter(): LLM | null {
return this._completer?.client || null;
return this._completer?.provider || null;
}

async fetch(
Expand Down
34 changes: 17 additions & 17 deletions src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,18 +15,18 @@ import { IRenderMimeRegistry } from '@jupyterlab/rendermime';
import { ISettingRegistry } from '@jupyterlab/settingregistry';

import { ChatHandler } from './chat-handler';
import { LlmProvider } from './provider';
import { ILlmProvider } from './token';
import { AIProvider } from './provider';
import { IAIProvider } from './token';

const chatPlugin: JupyterFrontEndPlugin<void> = {
id: 'jupyterlab-codestral:chat',
description: 'LLM chat extension',
autoStart: true,
optional: [INotebookTracker, ISettingRegistry, IThemeManager],
requires: [ILlmProvider, IRenderMimeRegistry],
requires: [IAIProvider, IRenderMimeRegistry],
activate: async (
app: JupyterFrontEnd,
llmProvider: ILlmProvider,
aiProvider: IAIProvider,
rmRegistry: IRenderMimeRegistry,
notebookTracker: INotebookTracker | null,
settingsRegistry: ISettingRegistry | null,
Expand All @@ -41,12 +41,12 @@ const chatPlugin: JupyterFrontEndPlugin<void> = {
}

const chatHandler = new ChatHandler({
llmClient: llmProvider.chatModel,
provider: aiProvider.chatModel,
activeCellManager: activeCellManager
});

llmProvider.modelChange.connect(() => {
chatHandler.llmClient = llmProvider.chatModel;
aiProvider.modelChange.connect(() => {
chatHandler.provider = aiProvider.chatModel;
});

let sendWithShiftEnter = false;
Expand Down Expand Up @@ -94,38 +94,38 @@ const chatPlugin: JupyterFrontEndPlugin<void> = {
}
};

const llmProviderPlugin: JupyterFrontEndPlugin<ILlmProvider> = {
id: 'jupyterlab-codestral:llm-provider',
const aiProviderPlugin: JupyterFrontEndPlugin<IAIProvider> = {
id: 'jupyterlab-codestral:ai-provider',
autoStart: true,
requires: [ICompletionProviderManager, ISettingRegistry],
provides: ILlmProvider,
provides: IAIProvider,
activate: (
app: JupyterFrontEnd,
manager: ICompletionProviderManager,
settingRegistry: ISettingRegistry
): ILlmProvider => {
const llmProvider = new LlmProvider({ completionProviderManager: manager });
): IAIProvider => {
const aiProvider = new AIProvider({ completionProviderManager: manager });

settingRegistry
.load(llmProviderPlugin.id)
.load(aiProviderPlugin.id)
.then(settings => {
const updateProvider = () => {
const provider = settings.get('provider').composite as string;
llmProvider.setModels(provider, settings.composite);
aiProvider.setModels(provider, settings.composite);
};

settings.changed.connect(() => updateProvider());
updateProvider();
})
.catch(reason => {
console.error(
`Failed to load settings for ${llmProviderPlugin.id}`,
`Failed to load settings for ${aiProviderPlugin.id}`,
reason
);
});

return llmProvider;
return aiProvider;
}
};

export default [chatPlugin, llmProviderPlugin];
export default [chatPlugin, aiProviderPlugin];
2 changes: 1 addition & 1 deletion src/llm-models/base-completer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ export interface IBaseCompleter {
/**
* The LLM completer.
*/
client: LLM;
provider: LLM;

/**
* The fetch request for the LLM completer.
Expand Down
12 changes: 6 additions & 6 deletions src/llm-models/codestral-completer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,12 @@ const INTERVAL = 1000;

export class CodestralCompleter implements IBaseCompleter {
constructor() {
this._mistralClient = new MistralAI({
this._mistralProvider = new MistralAI({
apiKey: 'TMP',
model: 'codestral-latest'
});
this._throttler = new Throttler(async (data: CompletionRequest) => {
const response = await this._mistralClient.completionWithRetry(
const response = await this._mistralProvider.completionWithRetry(
data,
{},
false
Expand All @@ -36,8 +36,8 @@ export class CodestralCompleter implements IBaseCompleter {
}, INTERVAL);
}

get client(): LLM {
return this._mistralClient;
get provider(): LLM {
return this._mistralProvider;
}

async fetch(
Expand All @@ -51,7 +51,7 @@ export class CodestralCompleter implements IBaseCompleter {
const data = {
prompt,
suffix,
model: this._mistralClient.model,
model: this._mistralProvider.model,
// temperature: 0,
// top_p: 1,
// max_tokens: 1024,
Expand All @@ -70,5 +70,5 @@ export class CodestralCompleter implements IBaseCompleter {
}

private _throttler: Throttler;
private _mistralClient: MistralAI;
private _mistralProvider: MistralAI;
}
16 changes: 8 additions & 8 deletions src/provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@ import { ReadonlyPartialJSONObject } from '@lumino/coreutils';

import { CompletionProvider } from './completion-provider';
import { getChatModel, IBaseCompleter } from './llm-models';
import { ILlmProvider } from './token';
import { IAIProvider } from './token';

export class LlmProvider implements ILlmProvider {
constructor(options: LlmProvider.IOptions) {
export class AIProvider implements IAIProvider {
constructor(options: AIProvider.IOptions) {
this._completionProvider = new CompletionProvider({ name: 'None' });
options.completionProviderManager.registerInlineProvider(
this._completionProvider
Expand Down Expand Up @@ -57,26 +57,26 @@ export class LlmProvider implements ILlmProvider {

// Update the inline completion provider settings.
if (this._completionProvider.llmCompleter) {
LlmProvider.updateConfig(this._completionProvider.llmCompleter, settings);
AIProvider.updateConfig(this._completionProvider.llmCompleter, settings);
}

// Update the chat LLM settings.
if (this._llmChatModel) {
LlmProvider.updateConfig(this._llmChatModel, settings);
AIProvider.updateConfig(this._llmChatModel, settings);
}
}

get modelChange(): ISignal<ILlmProvider, void> {
get modelChange(): ISignal<IAIProvider, void> {
return this._modelChange;
}

private _completionProvider: CompletionProvider;
private _llmChatModel: BaseChatModel | null = null;
private _name: string = 'None';
private _modelChange = new Signal<ILlmProvider, void>(this);
private _modelChange = new Signal<IAIProvider, void>(this);
}

export namespace LlmProvider {
export namespace AIProvider {
/**
* The options for the LLM provider.
*/
Expand Down
10 changes: 5 additions & 5 deletions src/token.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,14 @@ import { ISignal } from '@lumino/signaling';

import { IBaseCompleter } from './llm-models';

export interface ILlmProvider {
export interface IAIProvider {
name: string | null;
completer: IBaseCompleter | null;
chatModel: BaseChatModel | null;
modelChange: ISignal<ILlmProvider, void>;
modelChange: ISignal<IAIProvider, void>;
}

export const ILlmProvider = new Token<ILlmProvider>(
'jupyterlab-codestral:LlmProvider',
'Provider for chat and completion LLM client'
export const IAIProvider = new Token<IAIProvider>(
'jupyterlab-codestral:AIProvider',
'Provider for chat and completion LLM provider'
);

0 comments on commit 9329b59

Please sign in to comment.