diff --git a/schema/llm-provider.json b/schema/ai-provider.json similarity index 74% rename from schema/llm-provider.json rename to schema/ai-provider.json index c85bf43..d4b9a04 100644 --- a/schema/llm-provider.json +++ b/schema/ai-provider.json @@ -1,12 +1,12 @@ { - "title": "LLM provider", + "title": "AI provider", "description": "Provider settings", "type": "object", "properties": { "provider": { "type": "string", - "title": "The LLM provider", - "description": "The LLM provider to use for chat and completion", + "title": "The AI provider", + "description": "The AI provider to use for chat and completion", "default": "None", "enum": ["None", "MistralAI"] }, diff --git a/src/chat-handler.ts b/src/chat-handler.ts index 0191302..18417f6 100644 --- a/src/chat-handler.ts +++ b/src/chat-handler.ts @@ -25,14 +25,14 @@ export type ConnectionMessage = { export class ChatHandler extends ChatModel { constructor(options: ChatHandler.IOptions) { super(options); - this._llmClient = options.llmClient; + this._provider = options.provider; } - get llmClient(): BaseChatModel | null { - return this._llmClient; + get provider(): BaseChatModel | null { + return this._provider; } - set llmClient(client: BaseChatModel | null) { - this._llmClient = client; + set provider(provider: BaseChatModel | null) { + this._provider = provider; } async sendMessage(message: INewMessage): Promise { @@ -46,10 +46,10 @@ export class ChatHandler extends ChatModel { }; this.messageAdded(msg); - if (this._llmClient === null) { + if (this._provider === null) { const botMsg: IChatMessage = { id: UUID.uuid4(), - body: '**Chat client not configured**', + body: '**AI provider not configured for the chat**', sender: { username: 'ERROR' }, time: Date.now(), type: 'msg' @@ -69,7 +69,7 @@ export class ChatHandler extends ChatModel { }) ); - const response = await this._llmClient.invoke(messages); + const response = await this._provider.invoke(messages); // TODO: fix deprecated response.text const content = response.text; const botMsg: IChatMessage = { @@ -96,12 +96,12 @@ export class ChatHandler extends ChatModel { super.messageAdded(message); } - private _llmClient: BaseChatModel | null; + private _provider: BaseChatModel | null; private _history: IChatHistory = { messages: [] }; } export namespace ChatHandler { export interface IOptions extends ChatModel.IOptions { - llmClient: BaseChatModel | null; + provider: BaseChatModel | null; } } diff --git a/src/completion-provider.ts b/src/completion-provider.ts index 53b2051..b2ac0b1 100644 --- a/src/completion-provider.ts +++ b/src/completion-provider.ts @@ -40,7 +40,7 @@ export class CompletionProvider implements IInlineCompletionProvider { * Get the LLM completer. */ get llmCompleter(): LLM | null { - return this._completer?.client || null; + return this._completer?.provider || null; } async fetch( diff --git a/src/index.ts b/src/index.ts index fa939a3..2cc8bdc 100644 --- a/src/index.ts +++ b/src/index.ts @@ -15,18 +15,18 @@ import { IRenderMimeRegistry } from '@jupyterlab/rendermime'; import { ISettingRegistry } from '@jupyterlab/settingregistry'; import { ChatHandler } from './chat-handler'; -import { LlmProvider } from './provider'; -import { ILlmProvider } from './token'; +import { AIProvider } from './provider'; +import { IAIProvider } from './token'; const chatPlugin: JupyterFrontEndPlugin = { id: 'jupyterlab-codestral:chat', description: 'LLM chat extension', autoStart: true, optional: [INotebookTracker, ISettingRegistry, IThemeManager], - requires: [ILlmProvider, IRenderMimeRegistry], + requires: [IAIProvider, IRenderMimeRegistry], activate: async ( app: JupyterFrontEnd, - llmProvider: ILlmProvider, + aiProvider: IAIProvider, rmRegistry: IRenderMimeRegistry, notebookTracker: INotebookTracker | null, settingsRegistry: ISettingRegistry | null, @@ -41,12 +41,12 @@ const chatPlugin: JupyterFrontEndPlugin = { } const chatHandler = new ChatHandler({ - llmClient: llmProvider.chatModel, + provider: aiProvider.chatModel, activeCellManager: activeCellManager }); - llmProvider.modelChange.connect(() => { - chatHandler.llmClient = llmProvider.chatModel; + aiProvider.modelChange.connect(() => { + chatHandler.provider = aiProvider.chatModel; }); let sendWithShiftEnter = false; @@ -94,24 +94,24 @@ const chatPlugin: JupyterFrontEndPlugin = { } }; -const llmProviderPlugin: JupyterFrontEndPlugin = { - id: 'jupyterlab-codestral:llm-provider', +const aiProviderPlugin: JupyterFrontEndPlugin = { + id: 'jupyterlab-codestral:ai-provider', autoStart: true, requires: [ICompletionProviderManager, ISettingRegistry], - provides: ILlmProvider, + provides: IAIProvider, activate: ( app: JupyterFrontEnd, manager: ICompletionProviderManager, settingRegistry: ISettingRegistry - ): ILlmProvider => { - const llmProvider = new LlmProvider({ completionProviderManager: manager }); + ): IAIProvider => { + const aiProvider = new AIProvider({ completionProviderManager: manager }); settingRegistry - .load(llmProviderPlugin.id) + .load(aiProviderPlugin.id) .then(settings => { const updateProvider = () => { const provider = settings.get('provider').composite as string; - llmProvider.setModels(provider, settings.composite); + aiProvider.setModels(provider, settings.composite); }; settings.changed.connect(() => updateProvider()); @@ -119,13 +119,13 @@ const llmProviderPlugin: JupyterFrontEndPlugin = { }) .catch(reason => { console.error( - `Failed to load settings for ${llmProviderPlugin.id}`, + `Failed to load settings for ${aiProviderPlugin.id}`, reason ); }); - return llmProvider; + return aiProvider; } }; -export default [chatPlugin, llmProviderPlugin]; +export default [chatPlugin, aiProviderPlugin]; diff --git a/src/llm-models/base-completer.ts b/src/llm-models/base-completer.ts index 8374db4..498abf6 100644 --- a/src/llm-models/base-completer.ts +++ b/src/llm-models/base-completer.ts @@ -8,7 +8,7 @@ export interface IBaseCompleter { /** * The LLM completer. */ - client: LLM; + provider: LLM; /** * The fetch request for the LLM completer. diff --git a/src/llm-models/codestral-completer.ts b/src/llm-models/codestral-completer.ts index 8f3e6ee..f1168c8 100644 --- a/src/llm-models/codestral-completer.ts +++ b/src/llm-models/codestral-completer.ts @@ -16,12 +16,12 @@ const INTERVAL = 1000; export class CodestralCompleter implements IBaseCompleter { constructor() { - this._mistralClient = new MistralAI({ + this._mistralProvider = new MistralAI({ apiKey: 'TMP', model: 'codestral-latest' }); this._throttler = new Throttler(async (data: CompletionRequest) => { - const response = await this._mistralClient.completionWithRetry( + const response = await this._mistralProvider.completionWithRetry( data, {}, false @@ -36,8 +36,8 @@ export class CodestralCompleter implements IBaseCompleter { }, INTERVAL); } - get client(): LLM { - return this._mistralClient; + get provider(): LLM { + return this._mistralProvider; } async fetch( @@ -51,7 +51,7 @@ export class CodestralCompleter implements IBaseCompleter { const data = { prompt, suffix, - model: this._mistralClient.model, + model: this._mistralProvider.model, // temperature: 0, // top_p: 1, // max_tokens: 1024, @@ -70,5 +70,5 @@ export class CodestralCompleter implements IBaseCompleter { } private _throttler: Throttler; - private _mistralClient: MistralAI; + private _mistralProvider: MistralAI; } diff --git a/src/provider.ts b/src/provider.ts index 1eed586..de88ba3 100644 --- a/src/provider.ts +++ b/src/provider.ts @@ -6,10 +6,10 @@ import { ReadonlyPartialJSONObject } from '@lumino/coreutils'; import { CompletionProvider } from './completion-provider'; import { getChatModel, IBaseCompleter } from './llm-models'; -import { ILlmProvider } from './token'; +import { IAIProvider } from './token'; -export class LlmProvider implements ILlmProvider { - constructor(options: LlmProvider.IOptions) { +export class AIProvider implements IAIProvider { + constructor(options: AIProvider.IOptions) { this._completionProvider = new CompletionProvider({ name: 'None' }); options.completionProviderManager.registerInlineProvider( this._completionProvider @@ -57,26 +57,26 @@ export class LlmProvider implements ILlmProvider { // Update the inline completion provider settings. if (this._completionProvider.llmCompleter) { - LlmProvider.updateConfig(this._completionProvider.llmCompleter, settings); + AIProvider.updateConfig(this._completionProvider.llmCompleter, settings); } // Update the chat LLM settings. if (this._llmChatModel) { - LlmProvider.updateConfig(this._llmChatModel, settings); + AIProvider.updateConfig(this._llmChatModel, settings); } } - get modelChange(): ISignal { + get modelChange(): ISignal { return this._modelChange; } private _completionProvider: CompletionProvider; private _llmChatModel: BaseChatModel | null = null; private _name: string = 'None'; - private _modelChange = new Signal(this); + private _modelChange = new Signal(this); } -export namespace LlmProvider { +export namespace AIProvider { /** * The options for the LLM provider. */ diff --git a/src/token.ts b/src/token.ts index 3148938..626be4a 100644 --- a/src/token.ts +++ b/src/token.ts @@ -4,14 +4,14 @@ import { ISignal } from '@lumino/signaling'; import { IBaseCompleter } from './llm-models'; -export interface ILlmProvider { +export interface IAIProvider { name: string | null; completer: IBaseCompleter | null; chatModel: BaseChatModel | null; - modelChange: ISignal; + modelChange: ISignal; } -export const ILlmProvider = new Token( - 'jupyterlab-codestral:LlmProvider', - 'Provider for chat and completion LLM client' +export const IAIProvider = new Token( + 'jupyterlab-codestral:AIProvider', + 'Provider for chat and completion LLM provider' );