diff --git a/vscode-extension/src/extension.ts b/vscode-extension/src/extension.ts index a2bddca6b..f2a9c06e4 100644 --- a/vscode-extension/src/extension.ts +++ b/vscode-extension/src/extension.ts @@ -59,7 +59,7 @@ export function activate(context: vscode.ExtensionContext) { vscode.commands.registerCommand( COMMANDS.SETUP_ENVIRONMENT_VARIABLES, () => { - setupEnvironmentVariables(); + setupEnvironmentVariables(context); } ) ); @@ -723,7 +723,7 @@ async function checkPip() { * 2) If .env file doesn't exist * b) Add template file containing helper lines from 1a above */ -async function setupEnvironmentVariables() { +async function setupEnvironmentVariables(context: vscode.ExtensionContext) { // Use home dir because env variables should be global. I get the argument // for having in the workspace dir. I personally feel this is more // annoying to setup every time you create a new project when using the @@ -741,6 +741,8 @@ async function setupEnvironmentVariables() { } else if (!text.endsWith(".env")) { return "File path must end in .env file"; } + // TODO: Check that file path is a "/.env" file (linux) or "\.env" (Windows) + // TODO: Check that env path is contained within workspace hierarchy // (Ex: can't have .env file in a sibling dir otherwise AIConfig // loadenv can't read it) @@ -756,53 +758,43 @@ async function setupEnvironmentVariables() { } if (fs.existsSync(envPath)) { - // Append some lines in code to help guide users - // https://ai.google.dev/tutorials/setup - // https://platform.openai.com/api-keys - // Also add HF one vscode.window.showInformationMessage( "Env file already exists, will implement next PR" ); } else { - // File doesn't exist in path - vscode.window.showInformationMessage( - "Env file does not already, will implement next PR" + // Create the .env file from the sample + const envTemplatePath = vscode.Uri.joinPath( + context.extensionUri, + "static", + "env_template.env" ); - } - // // Create the model registry file from the sample - // const sampleModelRegistryPath = vscode.Uri.joinPath( - // context.extensionUri, - // "static", - // "example_aiconfig_model_registry.py" - // ); + try { + await vscode.workspace.fs.copy( + envTemplatePath, + vscode.Uri.file(envPath), + { overwrite: false } + ); + } catch (err) { + vscode.window.showErrorMessage( + `Error creating new file ${envTemplatePath}: ${err}` + ); + } - // try { - // await vscode.workspace.fs.copy( - // sampleModelRegistryPath, - // vscode.Uri.file(modelRegistryPath), - // { overwrite: false } - // ); - // } catch (err) { - // vscode.window.showErrorMessage( - // `Error creating new file ${modelRegistryPath}. Error is ${err}` - // ); - // } - - // const doc = await vscode.workspace.openTextDocument(modelRegistryPath); - // if (doc) { - // vscode.window.showTextDocument(doc); - // vscode.window.showInformationMessage( - // "Please customize your new model registry." - // ); - // } - - // let config = vscode.workspace.getConfiguration(EXTENSION_NAME); - // await handleCustomModelRegistryUpdate( - // config, - // aiconfigEditorManager, - // modelRegistryPath - // ); + // TODO: Figure out how to ensure that walkthrough tab doesn't get lost + // when running this command and opening new doc + const doc = await vscode.workspace.openTextDocument(envPath); + if (doc) { + vscode.window.showTextDocument(doc, { + preview: false, + // Tried using vscode.ViewColumn.Beside but that creates a split view + viewColumn: vscode.ViewColumn.Active, + }); + vscode.window.showInformationMessage( + "Please define your environment variables." + ); + } + } } async function shareAIConfig( diff --git a/vscode-extension/static/env_template.env b/vscode-extension/static/env_template.env new file mode 100644 index 000000000..d75aee355 --- /dev/null +++ b/vscode-extension/static/env_template.env @@ -0,0 +1,15 @@ +# Uncomment the following lines to set these common env variables for +# accessing generative AI models. If your model requires other env variables +# you can also set those in this file + +## Used for models from OpenAI such as Chat GPT and Dall-E +## You can get your api key from https://platform.openai.com/api-keys +# OPENAI_API_KEY= + +## Used for models from Google such as Gemini and Gemma +## You can get your api key from https://ai.google.dev/tutorials/setup +# GOOGLE_API_KEY= + +## Used for models hosted on Hugging Face: https://huggingface.co/models +## You can get your token from https://huggingface.co/settings/tokens +# HUGGING_FACE_API_TOKEN=