Skip to content

Commit

Permalink
Version 1.2.0
Browse files Browse the repository at this point in the history
New:
- Uses ollama offiical python package instead of openapi
- Ask for confirmation when resetting settings changes
- Added option to see all pulled ollama models instead of fixed ones (now you can choose from all of the downaloded models you have in your ollama library)

Improvements:
Most models should work smoothly now without any extra text after generation. Now you can choose from already pulled models instead of a fixed list.
  • Loading branch information
Fus3n committed Apr 3, 2024
1 parent caac570 commit f698c9e
Show file tree
Hide file tree
Showing 6 changed files with 141 additions and 40 deletions.
1 change: 1 addition & 0 deletions icons/refresh-icon.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
24 changes: 22 additions & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,23 @@
annotated-types==0.6.0
anyio==4.3.0
certifi==2024.2.2
colorama==0.4.6
distro==1.9.0
exceptiongroup==1.2.0
h11==0.14.0
httpcore==1.0.4
httpx==0.27.0
idna==3.6
ollama==0.1.8
packaging==23.2
pydantic==2.6.2
pydantic_core==2.16.3
PySide6==6.6.2
openai==1.12.0
qdarkstyle==3.2.3
PySide6_Addons==6.6.2
PySide6_Essentials==6.6.2
QDarkStyle==3.2.3
QtPy==2.4.1
shiboken6==6.6.2
sniffio==1.3.0
tqdm==4.66.2
typing_extensions==4.9.0
43 changes: 21 additions & 22 deletions src/backend.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from openai import OpenAI, NotFoundError, APIConnectionError
# from openai import OpenAI, NotFoundError, APIConnectionError
from ollama import Client, RequestError, ChatResponse, ResponseError
from consts import DEFAULT_EXAMPLES
from configmanager import ConfigManger

Expand All @@ -15,19 +16,17 @@ def __init__(self) -> None:
conf = self.conf_manager.get_config()
self.base_url: str = conf["base_url"]
self.model: str = conf["model"]
self.simple_check()
self.check_base_url()

self.examples = DEFAULT_EXAMPLES
self.system_msg = conf["system_msg"]

self.__client = OpenAI(
base_url = self.base_url,
api_key="ollama",
self.__client = Client(
host=self.base_url,
)

self.final_examples = []

def simple_check(self):
def check_base_url(self):
if not self.base_url.endswith("/v1"):
if self.base_url[-1] == "/":
self.base_url += "v1"
Expand All @@ -39,10 +38,9 @@ def reload_settings(self):
conf = self.conf_manager.get_config()
if self.base_url != (new_base_url := conf["base_url"]):
self.base_url = new_base_url
self.simple_check()
self.__client = OpenAI(
base_url = self.base_url,
api_key="ollama",
self.check_base_url()
self.__client = Client(
host=self.base_url,
)

self.system_msg = conf["system_msg"]
Expand All @@ -63,7 +61,6 @@ def generate_result(self, first: str, second: str) -> tuple[str | None, str | No
if not first or not second:
return None, "Invalid Input"

# TODO: don't check it every generation.
self.reload_settings()

result = f'"{first} + {second}"'
Expand All @@ -75,20 +72,22 @@ def generate_result(self, first: str, second: str) -> tuple[str | None, str | No
messages.extend(self.final_examples)
messages.append({"role": "user", "content": result})
try:
response = self.__client.chat.completions.create(
response: ChatResponse = self.__client.chat(
model=self.model,
messages=messages,
max_tokens=15,
n=1,
temperature=0,
top_p=1,
stream=False,
options={
"top_p": 1,
"temperature": 0,
"stop": ["[\n", "\r\n", "\n", "\"", "<|im_end|>"]
}
)
except NotFoundError:
return None, "Invalid Base Url"
except APIConnectionError:
return None, "Connection Error, make sure ollama is running in background and try again"
except RequestError as e:
return None, str(e)
except ResponseError as e:
return None, str(e)

return response.choices[0].message.content, None
return response['message']['content'], None

if __name__ == '__main__':
llm = BackendLLM()
Expand Down
6 changes: 0 additions & 6 deletions src/consts.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,4 @@ class ExampleEntry(TypedDict):
"🔥 Fire",
]

MODELS = [
"llama2",
"mistral",
"llava"
]

DEFAULT_MODEL = "llama2"
20 changes: 20 additions & 0 deletions src/fetch_ollama_models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
from PySide6.QtCore import Signal, QThread
import httpx

class FetchModels(QThread):
fetched = Signal(list)
error = Signal(str)

def __init__(self, client):
super().__init__(None)
self.client = client

def run(self):
try:
ollama_models = self.client.list()
ollama_models = [model["name"] for model in ollama_models['models']]
self.fetched.emit(ollama_models)
except httpx.ConnectError as e:
self.error.emit("Ollama is not running in the background, please make sure it is, and try again.\nIf it is running and you are still getting this, make sure your base url is correct.")
except Exception as e:
self.error.emit(str(e))
87 changes: 77 additions & 10 deletions src/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,20 +10,32 @@
QComboBox,
QListWidget,
QListWidgetItem,
QMessageBox
QMessageBox,
QProgressBar,
)
from PySide6.QtCore import Qt
from PySide6.QtCore import Qt
from PySide6.QtGui import QIcon
from fetch_ollama_models import FetchModels
from configmanager import ConfigManger
from consts import MODELS, DEFAULT_SYSTEM_MSG, DEFAULT_BASE_URL, DEFAULT_MODEL, DEFAULT_EXAMPLES, DEFAULT_CHIPS
from consts import DEFAULT_SYSTEM_MSG, DEFAULT_BASE_URL, DEFAULT_MODEL, DEFAULT_EXAMPLES, DEFAULT_CHIPS
from example_entry_widget import ExampleEntry, ChipEntry
from ollama import Client

class Settings(QDialog):

def __init__(self, parent: QWidget | None = None) -> None:
super().__init__(parent)
self.conf_manager = ConfigManger()
conf = self.conf_manager.get_config()

self.setWindowTitle("Settings")
self.setMinimumSize(980, 650)
self.__client = Client(host=conf["base_url"])

self.model_fetcher = FetchModels(self.__client)
self.model_fetcher.started.connect(self.fetch_models_started)
self.model_fetcher.fetched.connect(self.fetch_models_finished)
self.model_fetcher.error.connect(self.fetch_models_error)

self.heading_layout = QHBoxLayout()
self.heading_layout.setContentsMargins(0, 0, 0, 0)
Expand Down Expand Up @@ -65,8 +77,6 @@ def __init__(self, parent: QWidget | None = None) -> None:
self.chips_lay.addWidget(add_chip_btn)
self.chips_lay.addWidget(self.chips_list)

conf = self.conf_manager.get_config()

# populate list
for example in conf["examples"]:
self.add_example_entry(example["from_str"], example["result_str"])
Expand All @@ -78,12 +88,32 @@ def __init__(self, parent: QWidget | None = None) -> None:
self.lay.setSpacing(5)

self.lay.addLayout(self.heading_layout) # Add heading_layout to the main layout
self.lay.addWidget(QLabel("Model"))

self.models_lay = QHBoxLayout()
self.lay.addLayout(self.models_lay)

self.models_choice = QComboBox()
self.models_choice.addItems(MODELS)
self.models_choice.setCurrentText(conf["model"])
self.lay.addWidget(QLabel("Model"))
self.lay.addWidget(self.models_choice)
self.refresh_btn = QLabel()
self.loading_pb = QProgressBar()
self.loading_pb.setRange(0, 0)
self.loading_pb.setMaximumHeight(25)
self.models_lay.addWidget(self.models_choice, stretch=1)

# check of conf dict has the key "models" and if so if the list is empty or not
if "ollama_models" in conf and conf["ollama_models"]:
self.models_choice.addItems(conf["ollama_models"])
self.models_choice.setCurrentText(conf["model"])
else:
self.model_fetcher.start()

self.refresh_btn.setCursor(Qt.PointingHandCursor)
# on click
self.refresh_btn.mousePressEvent = lambda ev: self.model_fetcher.start()
icon = QIcon("icons/refresh-icon.svg")
self.refresh_btn.setPixmap(icon.pixmap(18)) # TODO: update to have file path constants
self.refresh_btn.setStyleSheet("background-color: transparent; color: red;;")
self.models_lay.addWidget(self.refresh_btn)

self.lay.addWidget(QLabel("Base URL"))
self.base_url_input = QLineEdit()
Expand All @@ -101,7 +131,6 @@ def __init__(self, parent: QWidget | None = None) -> None:
self.system_prompt_input.setText(conf["system_msg"])
self.lay.addWidget(self.system_prompt_input)


buttons_layout = QHBoxLayout()
buttons_layout.setAlignment(Qt.AlignmentFlag.AlignTop)
ok_button = QPushButton("Save")
Expand All @@ -126,9 +155,36 @@ def __init__(self, parent: QWidget | None = None) -> None:

self.setLayout(self.dialog_lay)

def fetch_models_started(self):
self.models_lay.removeWidget(self.models_choice)
self.models_lay.insertWidget(0, self.loading_pb, stretch=1)
self.refresh_btn.setEnabled(False)

def reset_models_layout(self):
self.loading_pb.setParent(None)
self.models_lay.insertWidget(0, self.models_choice, stretch=1)

def fetch_models_finished(self, ollama_models: list[str]):
self.models_choice.clear()
self.models_choice.addItems(ollama_models)
self.reset_models_layout()

current_model = self.conf_manager.get_value("model")
if current_model in ollama_models:
self.models_choice.setCurrentText(current_model)

self.refresh_btn.setEnabled(True)
self.conf_manager.set_key_value("ollama_models", ollama_models)

def fetch_models_error(self, error: str):
self.reset_models_layout()
self.refresh_btn.setEnabled(True)
QMessageBox.critical(self, "Error", error)

def save_settings(self):
conf = self.conf_manager.get_config()
conf["model"] = self.models_choice.currentText()
conf["ollama_models"] = [self.models_choice.itemText(i) for i in range(self.models_choice.count())]
conf["base_url"] = self.base_url_input.text()
conf["system_msg"] = self.system_prompt_input.toPlainText()

Expand All @@ -153,6 +209,17 @@ def save_settings(self):
self.accept()

def reset_settings(self):
# ask for confirmation in dialog
dialog = QMessageBox()
dialog.setWindowTitle("Reset Settings")
dialog.setText("Are you sure you want to reset all settings?")
dialog.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
dialog.setDefaultButton(QMessageBox.No)

result = dialog.exec()
if result == QMessageBox.No:
return

self.models_choice.setCurrentText(DEFAULT_MODEL)
self.base_url_input.setText(DEFAULT_BASE_URL)
self.system_prompt_input.setText(DEFAULT_SYSTEM_MSG)
Expand Down

0 comments on commit f698c9e

Please sign in to comment.