Skip to content

Commit

Permalink
Merge pull request #372 from reorproject/use-vercel-ai-in-renderer
Browse files Browse the repository at this point in the history
Use vercel ai in renderer
  • Loading branch information
samlhuillier authored Aug 25, 2024
2 parents 51aecbc + bb5a559 commit e2159ab
Show file tree
Hide file tree
Showing 37 changed files with 1,480 additions and 1,541 deletions.
2 changes: 1 addition & 1 deletion .husky/pre-commit
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
echo "\nRunning some lint checks on your (hopefully) beautiful code 😅\n"
echo "\nRunning some lint checks on your (hopefully) beautiful code\n"
npm run lint:fix && npm run type-check
10 changes: 2 additions & 8 deletions electron/main/electron-store/ipcHandlers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import {
import WindowsManager from '../common/windowManager'

import { initializeAndMaybeMigrateStore } from './storeSchemaMigrator'
import { ChatHistory } from '@/components/Chat/chatUtils'
import { Chat } from '@/components/Chat/chatUtils'

export const registerStoreHandlers = (store: Store<StoreSchema>, windowsManager: WindowsManager) => {
initializeAndMaybeMigrateStore(store)
Expand Down Expand Up @@ -87,12 +87,6 @@ export const registerStoreHandlers = (store: Store<StoreSchema>, windowsManager:

ipcMain.handle('get-default-embedding-model', () => store.get(StoreKeys.DefaultEmbeddingModelAlias))

ipcMain.handle('get-hardware-config', () => store.get(StoreKeys.Hardware))

ipcMain.handle('set-hardware-config', (event, hardwareConfig) => {
store.set(StoreKeys.Hardware, hardwareConfig)
})

ipcMain.handle('set-llm-generation-params', (event, generationParams) => {
store.set(StoreKeys.LLMGenerationParameters, generationParams)
})
Expand Down Expand Up @@ -156,7 +150,7 @@ export const registerStoreHandlers = (store: Store<StoreSchema>, windowsManager:
return chatHistoriesCorrespondingToVault
})

ipcMain.handle('update-chat-history', (event, newChat: ChatHistory) => {
ipcMain.handle('update-chat-history', (event, newChat: Chat) => {
const vaultDir = windowsManager.getVaultDirectoryForWinContents(event.sender)
const allChatHistories = store.get(StoreKeys.ChatHistories)
if (!vaultDir) {
Expand Down
45 changes: 14 additions & 31 deletions electron/main/electron-store/storeConfig.ts
Original file line number Diff line number Diff line change
@@ -1,27 +1,20 @@
import { ChatHistory } from '@/components/Chat/chatUtils'
import { Chat } from '@/components/Chat/chatUtils'

export interface BaseLLMConfig {
modelName: string
contextLength: number
errorMsg?: string
}
export type APIInterface = 'openai' | 'anthropic'

export interface OpenAILLMConfig extends BaseLLMConfig {
type: 'openai'
engine: 'openai'
apiURL: string
apiKey: string
export interface LLMAPIConfig {
name: string
apiInterface: APIInterface
apiURL?: string
apiKey?: string
}

export interface AnthropicLLMConfig extends BaseLLMConfig {
type: 'anthropic'
engine: 'anthropic'
apiURL: string
apiKey: string
export interface LLMConfig {
modelName: string
apiName: string
contextLength?: number
}

export type LLMConfig = OpenAILLMConfig | AnthropicLLMConfig

export type LLMGenerationParameters = {
maxTokens?: number
temperature?: number
Expand All @@ -38,15 +31,6 @@ export interface EmbeddingModelWithLocalPath {
type: 'local'
localPath: string
}
export type RAGConfig = {
maxRAGExamples: number
}

export type HardwareConfig = {
useGPU: boolean
useCUDA: boolean
useVulkan: boolean
}

export type Tab = {
id: string // Unique ID for the tab, useful for operations
Expand All @@ -65,16 +49,15 @@ export interface StoreSchema {
directoryFromPreviousSession?: string
}
LLMs: LLMConfig[]
LLMAPIs: LLMAPIConfig[]
embeddingModels: {
[modelAlias: string]: EmbeddingModelConfig
}
defaultLLM: string
defaultEmbedFuncRepo: string
RAG?: RAGConfig
hardware: HardwareConfig
llmGenerationParameters: LLMGenerationParameters
chatHistories: {
[vaultDir: string]: ChatHistory[]
[vaultDir: string]: Chat[]
}
analytics?: boolean
chunkSize: number
Expand All @@ -91,11 +74,11 @@ export enum StoreKeys {
SchemaVersion = 'schemaVersion',
DirectoryFromPreviousSession = 'user.directoryFromPreviousSession',
LLMs = 'LLMs',
LLMAPIs = 'LLMAPIs',
EmbeddingModels = 'embeddingModels',
DefaultLLM = 'defaultLLM',
DefaultEmbeddingModelAlias = 'defaultEmbeddingModelAlias',
MaxRAGExamples = 'RAG.maxRAGExamples',
Hardware = 'hardware',
LLMGenerationParameters = 'llmGenerationParameters',
ChatHistories = 'chatHistories',
ChunkSize = 'chunkSize',
Expand Down
53 changes: 38 additions & 15 deletions electron/main/electron-store/storeSchemaMigrator.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,9 @@ import Store from 'electron-store'

import { StoreKeys, StoreSchema } from './storeConfig'
import { defaultEmbeddingModelRepos } from '../vector-database/embeddings'
import { defaultOllamaAPI } from '../llm/models/Ollama'

const currentSchemaVersion = 1
const currentSchemaVersion = 2

const setupDefaultAnalyticsValue = (store: Store<StoreSchema>) => {
if (store.get(StoreKeys.Analytics) === undefined) {
Expand All @@ -17,18 +18,6 @@ const setupDefaultSpellCheckValue = (store: Store<StoreSchema>) => {
}
}

const setupDefaultHardwareConfig = (store: Store<StoreSchema>) => {
const hardwareConfig = store.get(StoreKeys.Hardware)

if (!hardwareConfig) {
store.set(StoreKeys.Hardware, {
useGPU: process.platform === 'darwin' && process.arch === 'arm64',
useCUDA: false,
useVulkan: false,
})
}
}

const setupDefaultEmbeddingModels = (store: Store<StoreSchema>) => {
const embeddingModels = store.get(StoreKeys.EmbeddingModels)

Expand All @@ -46,6 +35,15 @@ const setupDefaultEmbeddingModels = (store: Store<StoreSchema>) => {
}
}

export const setupDefaultLLMAPIs = (store: Store<StoreSchema>) => {
const llmAPIs = store.get(StoreKeys.LLMAPIs)

const existingOllamaAPI = llmAPIs?.find((api) => api.name === defaultOllamaAPI.name)
if (!existingOllamaAPI) {
store.set(StoreKeys.LLMAPIs, [defaultOllamaAPI])
}
}

export function setupDefaultStoreValues(store: Store<StoreSchema>) {
if (!store.get(StoreKeys.MaxRAGExamples)) {
store.set(StoreKeys.MaxRAGExamples, 15)
Expand All @@ -61,15 +59,40 @@ export function setupDefaultStoreValues(store: Store<StoreSchema>) {

setupDefaultEmbeddingModels(store)

setupDefaultHardwareConfig(store)
setupDefaultLLMAPIs(store)
}

function ensureChatHistoryIsCorrectProperty(store: Store<StoreSchema>) {
const chatHistories = store.get(StoreKeys.ChatHistories)
if (!chatHistories) {
return
}

Object.keys(chatHistories).forEach((vaultDir) => {
const chats = chatHistories[vaultDir]
chats.map((chat) => {
const outputChat = chat
if (chat.displayableChatHistory) {
outputChat.messages = chat.displayableChatHistory
delete outputChat.displayableChatHistory
}
return outputChat
})
chatHistories[vaultDir] = chats
})

store.set(StoreKeys.ChatHistories, chatHistories)
}

export const initializeAndMaybeMigrateStore = (store: Store<StoreSchema>) => {
const storeSchemaVersion = store.get(StoreKeys.SchemaVersion)
if (storeSchemaVersion !== currentSchemaVersion) {
store.set(StoreKeys.SchemaVersion, currentSchemaVersion)
store.set(StoreKeys.LLMs, [])
store.set(StoreKeys.LLMAPIs, [])
store.set(StoreKeys.DefaultLLM, '')
}

ensureChatHistoryIsCorrectProperty(store)

setupDefaultStoreValues(store)
}
117 changes: 2 additions & 115 deletions electron/main/filesystem/ipcHandlers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,7 @@ import { ipcMain, BrowserWindow, dialog } from 'electron'
import Store from 'electron-store'

import WindowsManager from '../common/windowManager'
import { StoreKeys, StoreSchema } from '../electron-store/storeConfig'
import { createPromptWithContextLimitFromContent, PromptWithContextLimit } from '../llm/contextLimit'
import { ollamaService, openAISession } from '../llm/ipcHandlers'
import { getLLMConfig } from '../llm/llmConfig'
import addExtensionToFilenameIfNoExtensionPresent from '../path/path'
import { StoreSchema } from '../electron-store/storeConfig'
import { DBEntry } from '../vector-database/schema'
import {
convertFileInfoListToDBItems,
Expand All @@ -22,12 +18,10 @@ import {
createFileRecursive,
isHidden,
GetFilesInfoListForListOfPaths,
GetFilesInfoList,
markdownExtensions,
startWatchingDirectory,
updateFileListForRenderer,
} from './filesystem'
import { FileInfoTree, AugmentPromptWithFileProps, WriteFileProps, RenameFileProps } from './types'
import { FileInfoTree, WriteFileProps, RenameFileProps } from './types'

const registerFileHandlers = (store: Store<StoreSchema>, _windowsManager: WindowsManager) => {
const windowsManager = _windowsManager
Expand Down Expand Up @@ -170,28 +164,6 @@ const registerFileHandlers = (store: Store<StoreSchema>, _windowsManager: Window
orchestrateEntryMove(windowInfo.dbTableClient, sourcePath, destinationPath)
})

ipcMain.handle(
'augment-prompt-with-file',
async (_event, { prompt, llmName, filePath }: AugmentPromptWithFileProps): Promise<PromptWithContextLimit> => {
const content = fs.readFileSync(filePath, 'utf-8')

const llmSession = openAISession
const llmConfig = await getLLMConfig(store, ollamaService, llmName)
if (!llmConfig) {
throw new Error(`LLM ${llmName} not configured.`)
}
const systemPrompt = 'Based on the following information:\n'
const { prompt: filePrompt, contextCutoffAt } = createPromptWithContextLimitFromContent(
content,
systemPrompt,
prompt,
llmSession.getTokenizer(llmName),
llmConfig.contextLength,
)
return { prompt: filePrompt, contextCutoffAt }
},
)

ipcMain.handle('get-filesystem-paths-as-db-items', async (_event, filePaths: string[]): Promise<DBEntry[]> => {
const fileItems = GetFilesInfoListForListOfPaths(filePaths)

Expand All @@ -200,96 +172,11 @@ const registerFileHandlers = (store: Store<StoreSchema>, _windowsManager: Window
return dbItems.flat()
})

ipcMain.handle(
'generate-flashcards-from-file',
async (event, { prompt, llmName, filePath }: AugmentPromptWithFileProps): Promise<string> => {
// actual response required { question: string, answer: string} []
const llmSession = openAISession

const llmConfig = await getLLMConfig(store, ollamaService, llmName)

if (!llmConfig) {
throw new Error(`LLM ${llmName} not configured.`)
}
if (!filePath) {
throw new Error('Current file path is not provided for flashcard agent.')
}
const fileResults = fs.readFileSync(filePath, 'utf-8')
const { prompt: promptToCreateAtomicFacts } = createPromptWithContextLimitFromContent(
fileResults,
'',
`Extract atomic facts that can be used for students to study, based on this query: ${prompt}`,
llmSession.getTokenizer(llmName),
llmConfig.contextLength,
)
const llmGeneratedFacts = await llmSession.response(
llmName,
llmConfig,
[
{
role: 'system',
content: `You are an experienced teacher reading through some notes a student has made and extracting atomic facts. You never come up with your own facts. You generate atomic facts directly from what you read.
An atomic fact is a fact that relates to a single piece of knowledge and makes it easy to create a question for which the atomic fact is the answer"`,
},
{
role: 'user',
content: promptToCreateAtomicFacts,
},
],
false,
store.get(StoreKeys.LLMGenerationParameters),
)

const basePrompt = 'Given the following atomic facts:\n'
const flashcardQuery =
'Create useful FLASHCARDS that can be used for students to study using ONLY the context. Format is Q: <insert question> A: <insert answer>.'
const { prompt: promptToCreateFlashcardsWithAtomicFacts } = createPromptWithContextLimitFromContent(
llmGeneratedFacts.choices[0].message.content || '',
basePrompt,
flashcardQuery,
llmSession.getTokenizer(llmName),
llmConfig.contextLength,
)

// call the query to respond
const llmGeneratedFlashcards = await llmSession.response(
llmName,
llmConfig,
[
{
role: 'system',
content: `You are an experienced teacher that is reading some facts given to you so that you can generate flashcards as JSON for your student for review.
You never come up with your own facts. You will generate flashcards using the atomic facts given.
An atomic fact is a fact that relates to a single piece of knowledge and makes it easy to create a question for which the atomic fact is the answer"`,
},
{
role: 'user',
content: promptToCreateFlashcardsWithAtomicFacts,
},
],
true,
store.get(StoreKeys.LLMGenerationParameters),
)
const content = llmGeneratedFlashcards.choices[0].message.content || ''
return content
},
)

ipcMain.handle('get-files-in-directory', (event, dirName: string) => {
const itemsInDir = fs.readdirSync(dirName).filter((item) => !isHidden(item))
return itemsInDir
})

ipcMain.handle('get-files-in-directory-recursive', (event, dirName: string) => {
const fileNameSet = new Set<string>()

const fileList = GetFilesInfoList(dirName)
fileList.forEach((file) => {
fileNameSet.add(addExtensionToFilenameIfNoExtensionPresent(file.path, markdownExtensions, '.md'))
})
return Array.from(fileNameSet)
})

ipcMain.handle('open-directory-dialog', async () => {
const result = await dialog.showOpenDialog({
properties: ['openDirectory', 'createDirectory'],
Expand Down
Loading

0 comments on commit e2159ab

Please sign in to comment.