From 896a02297a68d350ce815d39a399a9e1fe7b8650 Mon Sep 17 00:00:00 2001 From: Justin Merrell Date: Tue, 30 Jan 2024 14:14:48 -0500 Subject: [PATCH] feat: prompt cleanup and new starter examples --- cmd/project/functions.go | 4 +- cmd/project/project.go | 68 +++++++++++++------ .../Hello World}/.runpodignore | 0 .../Hello World}/builder/requirements.txt | 1 - .../Hello World}/src/handler.py | 0 .../LLM}/.runpodignore | 0 .../LLM/builder/requirements.txt | 14 ++++ .../starter_examples/LLM/src/handler.py | 36 ++++++++++ .../Stable Diffusion/.runpodignore | 10 +++ .../Stable Diffusion/builder/requirements.txt | 13 ++++ .../Stable Diffusion/src/handler.py | 42 ++++++++++++ .../llama2/builder/requirements.txt | 4 -- .../starter_templates/llama2/src/handler.py | 36 ---------- 13 files changed, 163 insertions(+), 65 deletions(-) rename cmd/project/{starter_templates/default => starter_examples/Hello World}/.runpodignore (100%) rename cmd/project/{starter_templates/default => starter_examples/Hello World}/builder/requirements.txt (96%) rename cmd/project/{starter_templates/default => starter_examples/Hello World}/src/handler.py (100%) rename cmd/project/{starter_templates/llama2 => starter_examples/LLM}/.runpodignore (100%) create mode 100644 cmd/project/starter_examples/LLM/builder/requirements.txt create mode 100644 cmd/project/starter_examples/LLM/src/handler.py create mode 100644 cmd/project/starter_examples/Stable Diffusion/.runpodignore create mode 100644 cmd/project/starter_examples/Stable Diffusion/builder/requirements.txt create mode 100644 cmd/project/starter_examples/Stable Diffusion/src/handler.py delete mode 100644 cmd/project/starter_templates/llama2/builder/requirements.txt delete mode 100644 cmd/project/starter_templates/llama2/src/handler.py diff --git a/cmd/project/functions.go b/cmd/project/functions.go index 8d34fe5..c0ae2e0 100644 --- a/cmd/project/functions.go +++ b/cmd/project/functions.go @@ -18,7 +18,7 @@ import ( // TODO: embed all hidden files even those not at top level // -//go:embed starter_templates/* starter_templates/*/.* +//go:embed starter_examples/* starter_examples/*/.* var starterTemplates embed.FS //go:embed example.toml @@ -27,7 +27,7 @@ var tomlTemplate embed.FS //go:embed exampleDockerfile var dockerfileTemplate embed.FS -const basePath string = "starter_templates" +const basePath string = "starter_examples" func baseDockerImage(cudaVersion string) string { return fmt.Sprintf("runpod/base:0.4.4-cuda%s", cudaVersion) diff --git a/cmd/project/project.go b/cmd/project/project.go index 6139833..2c6b7f6 100644 --- a/cmd/project/project.go +++ b/cmd/project/project.go @@ -3,6 +3,7 @@ package project import ( "cli/api" "fmt" + "os" "strings" "github.com/manifoldco/promptui" @@ -28,6 +29,7 @@ func prompt(message string) string { } return s } + func contains(input string, choices []string) bool { for _, choice := range choices { if input == choice { @@ -36,6 +38,7 @@ func contains(input string, choices []string) bool { } return false } + func promptChoice(message string, choices []string, defaultChoice string) string { var s string = "" for !contains(s, choices) { @@ -84,12 +87,13 @@ func selectNetworkVolume() (networkVolumeId string, err error) { networkVolumeId = options[i].Value return networkVolumeId, nil } + func selectStarterTemplate() (template string, err error) { type StarterTemplateOption struct { Name string // The string to display Value string // The actual value to use } - templates, err := starterTemplates.ReadDir("starter_templates") + templates, err := starterTemplates.ReadDir("starter_examples") if err != nil { fmt.Println("Something went wrong trying to fetch starter templates") fmt.Println(err) @@ -129,39 +133,59 @@ var NewProjectCmd = &cobra.Command{ Use: "new", Args: cobra.ExactArgs(0), Short: "create a new project", - Long: "create a new Runpod project folder", + Long: "create a new RunPod project folder", Run: func(cmd *cobra.Command, args []string) { fmt.Println("Creating a new project...") + + // Project Name if projectName == "" { projectName = prompt("Enter the project name") - } else { - fmt.Println("Project name: " + projectName) } + fmt.Println("Project name: " + projectName) + + // Starter Example if modelType == "" { - template, err := selectStarterTemplate() - modelType = template + starterExample, err := selectStarterTemplate() + modelType = starterExample if err != nil { modelType = "" } } - cudaVersion := promptChoice("Select a CUDA version, or press enter to use the default", + + // CUDA Version + cudaVersion := promptChoice("Select CUDA Version [default: 11.8.0]: ", []string{"11.1.1", "11.8.0", "12.1.0"}, "11.8.0") - pythonVersion := promptChoice("Select a Python version, or press enter to use the default", + + // Python Version + pythonVersion := promptChoice("Select Python Version [default: 3.10]: ", []string{"3.8", "3.9", "3.10", "3.11"}, "3.10") - fmt.Printf(` -Project Summary: - - Project Name: %s - - Starter Template: %s - - CUDA Version: %s - - Python Version: %s - `, projectName, modelType, cudaVersion, pythonVersion) - fmt.Println() - fmt.Println("The project will be created in the current directory.") - //TODO confirm y/n - createNewProject(projectName, cudaVersion, - pythonVersion, modelType, modelName, initCurrentDir) - fmt.Printf("Project %s created successfully!", projectName) - fmt.Println() + + // Project Summary + fmt.Println("\nProject Summary:") + fmt.Println("------------------------------------------------") + fmt.Printf("Project Name : %s\n", projectName) + fmt.Printf("Starter Example : %s\n", modelType) + fmt.Printf("CUDA Version : %s\n", cudaVersion) + fmt.Printf("Python Version : %s\n", pythonVersion) + fmt.Println("------------------------------------------------") + + // Confirm + currentDir, err := os.Getwd() + if err != nil { + fmt.Println("Error getting current directory:", err) + return + } + + fmt.Printf("\nThe project will be created in the current directory: %s\n", currentDir) + confirm := promptChoice("Proceed with creation? [yes/no, default: yes]: ", []string{"yes", "no"}, "yes") + if confirm != "yes" { + fmt.Println("Project creation cancelled.") + return + } + + // Create Project + createNewProject(projectName, cudaVersion, pythonVersion, modelType, modelName, initCurrentDir) + fmt.Printf("\nProject %s created successfully!\n", projectName) fmt.Println("From your project root run `runpodctl project dev` to start a development pod.") }, } diff --git a/cmd/project/starter_templates/default/.runpodignore b/cmd/project/starter_examples/Hello World/.runpodignore similarity index 100% rename from cmd/project/starter_templates/default/.runpodignore rename to cmd/project/starter_examples/Hello World/.runpodignore diff --git a/cmd/project/starter_templates/default/builder/requirements.txt b/cmd/project/starter_examples/Hello World/builder/requirements.txt similarity index 96% rename from cmd/project/starter_templates/default/builder/requirements.txt rename to cmd/project/starter_examples/Hello World/builder/requirements.txt index 5a3f4c6..b752a48 100644 --- a/cmd/project/starter_templates/default/builder/requirements.txt +++ b/cmd/project/starter_examples/Hello World/builder/requirements.txt @@ -6,4 +6,3 @@ # To learn more, see https://pip.pypa.io/en/stable/reference/requirements-file-format/ <> -hf_transfer diff --git a/cmd/project/starter_templates/default/src/handler.py b/cmd/project/starter_examples/Hello World/src/handler.py similarity index 100% rename from cmd/project/starter_templates/default/src/handler.py rename to cmd/project/starter_examples/Hello World/src/handler.py diff --git a/cmd/project/starter_templates/llama2/.runpodignore b/cmd/project/starter_examples/LLM/.runpodignore similarity index 100% rename from cmd/project/starter_templates/llama2/.runpodignore rename to cmd/project/starter_examples/LLM/.runpodignore diff --git a/cmd/project/starter_examples/LLM/builder/requirements.txt b/cmd/project/starter_examples/LLM/builder/requirements.txt new file mode 100644 index 0000000..f19b619 --- /dev/null +++ b/cmd/project/starter_examples/LLM/builder/requirements.txt @@ -0,0 +1,14 @@ +# Required Python packages get listed here, one per line. +# Recommended to lock the version number to avoid unexpected changes. + +# You can also install packages from a git repository, e.g.: +# git+https://github.com/runpod/runpod-python.git +# To learn more, see https://pip.pypa.io/en/stable/reference/requirements-file-format/ + +<> +hf_transfer + +torch +accelerate +transformers +sentencepiece diff --git a/cmd/project/starter_examples/LLM/src/handler.py b/cmd/project/starter_examples/LLM/src/handler.py new file mode 100644 index 0000000..8278d53 --- /dev/null +++ b/cmd/project/starter_examples/LLM/src/handler.py @@ -0,0 +1,36 @@ +''' A starter example for a handler file using RunPod and a large language model for text generation. ''' + +import io +import base64 +from typing import Dict + +import runpod +from transformers import T5Tokenizer, T5ForConditionalGeneration + +# Initialize the tokenizer and model +tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-base") +model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-base", device_map="auto").to("cuda") + + +def handler(job: Dict[str, any]) -> str: + """ + Handler function for processing a job. + + Args: + job (dict): A dictionary containing the job input. + + Returns: + str: The generated text response. + """ + + job_input = job['input'] + input_text = job_input['text'] + + input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to("cuda") + outputs = model.generate(input_ids) + response = tokenizer.decode(outputs[0]) + + return response + + +runpod.serverless.start({"handler": handler}) diff --git a/cmd/project/starter_examples/Stable Diffusion/.runpodignore b/cmd/project/starter_examples/Stable Diffusion/.runpodignore new file mode 100644 index 0000000..55823af --- /dev/null +++ b/cmd/project/starter_examples/Stable Diffusion/.runpodignore @@ -0,0 +1,10 @@ +# Similar to .gitignore +# Matches will not be synced to the development pod or cause the development pod to reload. + +Dockerfile +__pycache__/ +*.pyc +.*.swp +.git/ +*.tmp +*.log \ No newline at end of file diff --git a/cmd/project/starter_examples/Stable Diffusion/builder/requirements.txt b/cmd/project/starter_examples/Stable Diffusion/builder/requirements.txt new file mode 100644 index 0000000..2536652 --- /dev/null +++ b/cmd/project/starter_examples/Stable Diffusion/builder/requirements.txt @@ -0,0 +1,13 @@ +# Required Python packages get listed here, one per line. +# Recommended to lock the version number to avoid unexpected changes. + +# You can also install packages from a git repository, e.g.: +# git+https://github.com/runpod/runpod-python.git +# To learn more, see https://pip.pypa.io/en/stable/reference/requirements-file-format/ + +<> +hf_transfer + +accelerate +diffusers +transformers diff --git a/cmd/project/starter_examples/Stable Diffusion/src/handler.py b/cmd/project/starter_examples/Stable Diffusion/src/handler.py new file mode 100644 index 0000000..c816cff --- /dev/null +++ b/cmd/project/starter_examples/Stable Diffusion/src/handler.py @@ -0,0 +1,42 @@ +''' A starter example for a handler file using RunPod and diffusers for image generation. ''' + +import io +import base64 +from typing import Dict + +import runpod +from diffusers import AutoPipelineForText2Image +import torch + +# Initialize the pipeline +pipe = AutoPipelineForText2Image.from_pretrained( + "stabilityai/sdxl-turbo", # model name + torch_dtype=torch.float16, variant="fp16" + ).to("cuda") + + +def handler(job: Dict[str, any]) -> str: + """ + Handler function for processing a job. + + Args: + job (dict): A dictionary containing the job input. + + Returns: + str: A base64 encoded string of the generated image. + """ + + job_input = job['input'] + prompt = job_input['prompt'] + + image = pipe(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0] + + with io.BytesIO() as buffer: + image.save(buffer, format="PNG") + image_bytes = buffer.getvalue() + base64_image = base64.b64encode(image_bytes).decode('utf-8') + + return f"data:image/png;base64,{base64_image}" + + +runpod.serverless.start({"handler": handler}) diff --git a/cmd/project/starter_templates/llama2/builder/requirements.txt b/cmd/project/starter_templates/llama2/builder/requirements.txt deleted file mode 100644 index 5a94046..0000000 --- a/cmd/project/starter_templates/llama2/builder/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -# List your python dependencies here. See https://pip.pypa.io/en/stable/user_guide/#requirements-files - -runpod==1.2.0 -transformers>=4.33.0 diff --git a/cmd/project/starter_templates/llama2/src/handler.py b/cmd/project/starter_templates/llama2/src/handler.py deleted file mode 100644 index 3e4e679..0000000 --- a/cmd/project/starter_templates/llama2/src/handler.py +++ /dev/null @@ -1,36 +0,0 @@ -''' A template for a Llama2 handler file. ''' -# pylint: skip-file - -import runpod -import inspect -from transformers import HfApi - -SELECTED_MODEL = "<>" - -def get_model_framework(model_name): - api = HfApi() - model_files = api.model_info(model_name).files - - # Check the files associated with the model - if "pytorch_model.bin" in model_files: - return "PyTorch" - elif "tf_model.h5" in model_files: - return "TensorFlow" - else: - return "Unknown" - -def prepare_inputs(text, **kwargs): - # Filter kwargs based on what the tokenizer accepts - filtered_args = {k: v for k, v in kwargs.items() if k in valid_args} - - inputs = tokenizer(text, return_tensors="pt", **filtered_args) - return inputs - - -def handle_request(text, **input_args): - inputs = prepare_inputs(text, **input_args) - with torch.no_grad(): - outputs = model(**inputs) - return process_outputs(outputs) - -runpod.serverless.start({"handler": handle_request})