From c9f3d4fd8d232d124818d7f95986588f474a0652 Mon Sep 17 00:00:00 2001 From: Yu xing Date: Wed, 21 Aug 2024 13:59:30 -0700 Subject: [PATCH] move import position --- nexa/gguf/nexa_inference_image.py | 4 ++-- nexa/gguf/nexa_inference_text.py | 3 ++- nexa/gguf/nexa_inference_vlm.py | 4 ++-- nexa/gguf/nexa_inference_voice.py | 3 +-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/nexa/gguf/nexa_inference_image.py b/nexa/gguf/nexa_inference_image.py index 473b9f2f..3e2123ad 100644 --- a/nexa/gguf/nexa_inference_image.py +++ b/nexa/gguf/nexa_inference_image.py @@ -6,7 +6,6 @@ import time from pathlib import Path -from nexa.gguf.sd.stable_diffusion import StableDiffusion from nexa.general import pull_model from nexa.constants import ( DEFAULT_IMG_GEN_PARAMS, @@ -44,7 +43,7 @@ class NexaImageInference: streamlit (bool): Run the inference in Streamlit UI. """ - from nexa.gguf.sd.stable_diffusion import StableDiffusion + def __init__(self, model_path, **kwargs): self.model_path = None @@ -85,6 +84,7 @@ def __init__(self, model_path, **kwargs): @SpinningCursorAnimation() def _load_model(self, model_path: str): with suppress_stdout_stderr(): + from nexa.gguf.sd.stable_diffusion import StableDiffusion self.model = StableDiffusion( model_path=self.downloaded_path, lora_model_dir=self.params.get("lora_dir", ""), diff --git a/nexa/gguf/nexa_inference_text.py b/nexa/gguf/nexa_inference_text.py index 23804abf..9c9a3c32 100644 --- a/nexa/gguf/nexa_inference_text.py +++ b/nexa/gguf/nexa_inference_text.py @@ -39,7 +39,7 @@ class NexaTextInference: top_k (int): Top-k sampling parameter. top_p (float): Top-p sampling parameter """ - from nexa.gguf.llama.llama import Llama + def __init__(self, model_path, stop_words=None, **kwargs): self.params = DEFAULT_TEXT_GEN_PARAMS self.params.update(kwargs) @@ -110,6 +110,7 @@ def _load_model(self): logging.debug(f"Loading model from {self.downloaded_path}") start_time = time.time() with suppress_stdout_stderr(): + from nexa.gguf.llama.llama import Llama self.model = Llama( model_path=self.downloaded_path, verbose=self.profiling, diff --git a/nexa/gguf/nexa_inference_vlm.py b/nexa/gguf/nexa_inference_vlm.py index 157bf28e..1e2ab005 100644 --- a/nexa/gguf/nexa_inference_vlm.py +++ b/nexa/gguf/nexa_inference_vlm.py @@ -19,7 +19,6 @@ ) from nexa.general import pull_model from nexa.gguf.lib_utils import is_gpu_available -from nexa.gguf.llama.llama import Llama from nexa.gguf.llama.llama_chat_format import ( Llava15ChatHandler, Llava16ChatHandler, @@ -79,7 +78,7 @@ class NexaVLMInference: top_k (int): Top-k sampling parameter. top_p (float): Top-p sampling parameter """ - from nexa.gguf.llama.llama import Llama + def __init__(self, model_path, stop_words=None, **kwargs): self.params = DEFAULT_TEXT_GEN_PARAMS @@ -151,6 +150,7 @@ def _load_model(self): if self.projector_downloaded_path else None ) + from nexa.gguf.llama.llama import Llama self.model = Llama( model_path=self.downloaded_path, chat_handler=self.projector, diff --git a/nexa/gguf/nexa_inference_voice.py b/nexa/gguf/nexa_inference_voice.py index 3ab103d6..372a72f6 100644 --- a/nexa/gguf/nexa_inference_voice.py +++ b/nexa/gguf/nexa_inference_voice.py @@ -7,9 +7,8 @@ from nexa.constants import EXIT_REMINDER, NEXA_RUN_MODEL_MAP_VOICE, DEFAULT_VOICE_GEN_PARAMS from nexa.general import pull_model -from nexa.utils import nexa_prompt from faster_whisper import WhisperModel -from nexaai.utils import nexa_prompt, SpinningCursorAnimation, suppress_stdout_stderr +from nexa.utils import nexa_prompt, SpinningCursorAnimation, suppress_stdout_stderr logging.basicConfig(level=logging.INFO)