From fb46d8d4f9d1b1cfbada952e5287674db6424790 Mon Sep 17 00:00:00 2001 From: qiqiWav Date: Fri, 15 Nov 2024 22:06:34 +0000 Subject: [PATCH] terminal loading --- nexa/gguf/llama/_utils_spinner.py | 40 ++++++++++++++++++++++++++++ nexa/gguf/nexa_inference_audio_lm.py | 16 +++++++++-- nexa/gguf/nexa_inference_image.py | 22 +++++++++++++++ nexa/gguf/nexa_inference_text.py | 11 ++++++++ nexa/gguf/nexa_inference_tts.py | 11 ++++++++ nexa/gguf/nexa_inference_vlm.py | 12 ++++++++- nexa/gguf/nexa_inference_vlm_omni.py | 11 ++++++++ nexa/gguf/nexa_inference_voice.py | 11 ++++++++ 8 files changed, 131 insertions(+), 3 deletions(-) create mode 100644 nexa/gguf/llama/_utils_spinner.py diff --git a/nexa/gguf/llama/_utils_spinner.py b/nexa/gguf/llama/_utils_spinner.py new file mode 100644 index 00000000..5ccafed8 --- /dev/null +++ b/nexa/gguf/llama/_utils_spinner.py @@ -0,0 +1,40 @@ +import sys +import threading +import time +import os + +def get_spinner_style(style="default"): + spinners = { + "default": '⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏' + } + return spinners.get(style, spinners["default"]) + +def spinning_cursor(style="default"): + while True: + for cursor in get_spinner_style(style): + yield cursor + +def show_spinner(stop_event, style="default", message=""): + spinner = spinning_cursor(style) + + fd = os.open('/dev/tty', os.O_WRONLY) + + while not stop_event.is_set(): + display = f"\r{message} {next(spinner)}" if message else f"\r{next(spinner)}" + os.write(fd, display.encode()) + time.sleep(0.1) + + os.write(fd, b"\r" + b" " * (len(message) + 2)) + os.write(fd, b"\r") + os.close(fd) + +def start_spinner(style="default", message=""): + stop_event = threading.Event() + spinner_thread = threading.Thread(target=show_spinner, args=(stop_event, style, message)) + spinner_thread.daemon = True + spinner_thread.start() + return stop_event, spinner_thread + +def stop_spinner(stop_event, spinner_thread): + stop_event.set() + spinner_thread.join() \ No newline at end of file diff --git a/nexa/gguf/nexa_inference_audio_lm.py b/nexa/gguf/nexa_inference_audio_lm.py index f93153fc..8fb8134a 100644 --- a/nexa/gguf/nexa_inference_audio_lm.py +++ b/nexa/gguf/nexa_inference_audio_lm.py @@ -147,12 +147,24 @@ def run(self): """ Run the audio language model inference loop. """ + from nexa.gguf.llama._utils_spinner import start_spinner, stop_spinner + try: while True: audio_path = self._get_valid_audio_path() user_input = nexa_prompt("Enter text (leave empty if no prompt): ") - with suppress_stdout_stderr(): - response = self.inference(audio_path, user_input) + + stop_event, spinner_thread = start_spinner( + style="default", + message="" + ) + + try: + with suppress_stdout_stderr(): + response = self.inference(audio_path, user_input) + finally: + stop_spinner(stop_event, spinner_thread) + print(f"{response}") except KeyboardInterrupt: diff --git a/nexa/gguf/nexa_inference_image.py b/nexa/gguf/nexa_inference_image.py index 0813f725..c1bdb7ac 100644 --- a/nexa/gguf/nexa_inference_image.py +++ b/nexa/gguf/nexa_inference_image.py @@ -203,12 +203,20 @@ def txt2img( return images def run_txt2img(self): + from nexa.gguf.llama._utils_spinner import start_spinner, stop_spinner + while True: try: prompt = nexa_prompt("Enter your prompt: ") negative_prompt = nexa_prompt( "Enter your negative prompt (press Enter to skip): " ) + + stop_event, spinner_thread = start_spinner( + style="default", + message="" + ) + try: images = self.txt2img( prompt, @@ -225,6 +233,9 @@ def run_txt2img(self): self._save_images(images) except Exception as e: logging.error(f"Error during text to image generation: {e}") + finally: + stop_spinner(stop_event, spinner_thread) + except KeyboardInterrupt: print(EXIT_REMINDER) except Exception as e: @@ -270,6 +281,8 @@ def img2img( return images def run_img2img(self): + from nexa.gguf.llama._utils_spinner import start_spinner, stop_spinner + while True: try: image_path = nexa_prompt("Enter the path to your image: ") @@ -277,6 +290,12 @@ def run_img2img(self): negative_prompt = nexa_prompt( "Enter your negative prompt (press Enter to skip): " ) + + stop_event, spinner_thread = start_spinner( + style="default", + message="" + ) + images = self.img2img( image_path, prompt, @@ -292,6 +311,9 @@ def run_img2img(self): if images: self._save_images(images) + + stop_spinner(stop_event, spinner_thread) + except KeyboardInterrupt: print(EXIT_REMINDER) except Exception as e: diff --git a/nexa/gguf/nexa_inference_text.py b/nexa/gguf/nexa_inference_text.py index e8be232b..1daeb174 100644 --- a/nexa/gguf/nexa_inference_text.py +++ b/nexa/gguf/nexa_inference_text.py @@ -152,6 +152,8 @@ def run(self): """ CLI interactive session. Not for SDK. """ + from nexa.gguf.llama._utils_spinner import start_spinner, stop_spinner + while True: generated_text = "" try: @@ -160,9 +162,16 @@ def run(self): generation_start_time = time.time() + stop_event, spinner_thread = start_spinner( + style="default", + message="" + ) + if self.chat_format: output = self._chat(user_input) first_token = True + stop_spinner(stop_event, spinner_thread) + for chunk in output: if first_token: decoding_start_time = time.time() @@ -179,6 +188,8 @@ def run(self): else: output = self._complete(user_input) first_token = True + stop_spinner(stop_event, spinner_thread) + for chunk in output: if first_token: decoding_start_time = time.time() diff --git a/nexa/gguf/nexa_inference_tts.py b/nexa/gguf/nexa_inference_tts.py index 04488637..b0dafc8e 100644 --- a/nexa/gguf/nexa_inference_tts.py +++ b/nexa/gguf/nexa_inference_tts.py @@ -130,12 +130,23 @@ def _load_model(self): def run(self): + from nexa.gguf.llama._utils_spinner import start_spinner, stop_spinner + while True: try: user_input = input("Enter text to generate audio: ") + + stop_event, spinner_thread = start_spinner( + style="default", + message="" + ) + audio_data = self.audio_generation(user_input) + self._save_audio(audio_data, self.sampling_rate, self.params["output_path"]) logging.info(f"Audio saved to {self.params['output_path']}") + + stop_spinner(stop_event, spinner_thread) except KeyboardInterrupt: print("Exiting...") break diff --git a/nexa/gguf/nexa_inference_vlm.py b/nexa/gguf/nexa_inference_vlm.py index 214462f8..9b170b6e 100644 --- a/nexa/gguf/nexa_inference_vlm.py +++ b/nexa/gguf/nexa_inference_vlm.py @@ -221,6 +221,8 @@ def embed( return self.model.embed(input, normalize, truncate, return_count) def run(self): + from nexa.gguf.llama._utils_spinner import start_spinner, stop_spinner + # I just use completion, no conversation history while True: try: @@ -239,14 +241,22 @@ def run(self): print("Please provide an image or text input.") continue + stop_event, spinner_thread = start_spinner( + style="default", + message="" + ) + output = self._chat(user_input, image_path) + stop_spinner(stop_event, spinner_thread) + for chunk in output: delta = chunk["choices"][0]["delta"] if "role" in delta: print(delta["role"], end=": ", flush=True) elif "content" in delta: print(delta["content"], end="", flush=True) - generated_text += delta["content"] + generated_text += delta["content"] + except KeyboardInterrupt: pass except Exception as e: diff --git a/nexa/gguf/nexa_inference_vlm_omni.py b/nexa/gguf/nexa_inference_vlm_omni.py index 87e8db48..83876569 100644 --- a/nexa/gguf/nexa_inference_vlm_omni.py +++ b/nexa/gguf/nexa_inference_vlm_omni.py @@ -105,6 +105,8 @@ def _load_model(self): raise def run(self): + from nexa.gguf.llama._utils_spinner import start_spinner, stop_spinner + while True: try: image_path = nexa_prompt("Image Path (required): ") @@ -112,7 +114,16 @@ def run(self): print(f"Image path: {image_path} not found, running omni VLM without image input.") # Skip user input for OCR version user_input = "" if self.omni_vlm_version == "vlm-81-ocr" else nexa_prompt() + + stop_event, spinner_thread = start_spinner( + style="default", + message="" + ) + response = self.inference(user_input, image_path) + + stop_spinner(stop_event, spinner_thread) + print(f"\nResponse: {response}") except KeyboardInterrupt: print("\nExiting...") diff --git a/nexa/gguf/nexa_inference_voice.py b/nexa/gguf/nexa_inference_voice.py index 84864713..3bdb1d25 100644 --- a/nexa/gguf/nexa_inference_voice.py +++ b/nexa/gguf/nexa_inference_voice.py @@ -81,10 +81,21 @@ def _load_model(self): logging.debug("Model loaded successfully") def run(self): + from nexa.gguf.llama._utils_spinner import start_spinner, stop_spinner + while True: try: audio_path = nexa_prompt("Enter the path to your audio file: ") + + stop_event, spinner_thread = start_spinner( + style="default", + message="" + ) + self._transcribe_audio(audio_path) + + stop_spinner(stop_event, spinner_thread) + except KeyboardInterrupt: print(EXIT_REMINDER) except Exception as e: