Skip to content

Commit

Permalink
Merge pull request #104 from yukiarimo/dev
Browse files Browse the repository at this point in the history
Release V6.5.4
  • Loading branch information
yukiarimo authored Aug 28, 2024
2 parents d3226c5 + 369de78 commit 632609d
Show file tree
Hide file tree
Showing 11 changed files with 331 additions and 237 deletions.
4 changes: 2 additions & 2 deletions lib/audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def create_speaker_embedding(waveform):
speaker_embeddings = speaker_embeddings.squeeze().cpu().numpy()
return speaker_embeddings

audio_array, sampling_rate = librosa.load("/Users/yuki/Documents/Github/yuna-ai/static/audio/" + config["server"]["yuna_audio_name"], sr=16000)
audio_array, sampling_rate = librosa.load("/Users/yuki/Documents/Github/yuna-ai/static/audio/" + config["server"]["yuna_reference_audio"], sr=16000)

# Create a dictionary to mimic the dataset structure
custom_audio = {
Expand Down Expand Up @@ -157,7 +157,7 @@ def speak_text(text, reference_audio=config['server']['yuna_reference_audio'], o
audio = AudioSegment.from_file("/Users/yuki/Documents/Github/yuna-ai/static/audio/audio.aiff")
audio.export("/Users/yuki/Documents/Github/yuna-ai/static/audio/audio.mp3", format='mp3')
elif mode == "siri-pv":
command = f'say -v {config["server"]["yuna_audio_name"]} -o /Users/yuki/Documents/Github/yuna-ai/static/audio/audio.aiff {repr(text)}'
command = f'say -v {config["server"]["yuna_reference_audio"]} -o /Users/yuki/Documents/Github/yuna-ai/static/audio/audio.aiff {repr(text)}'
print(command)
exit_status = os.system(command)

Expand Down
19 changes: 10 additions & 9 deletions lib/generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def get_config(config=None):
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain_community.embeddings import GPT4AllEmbeddings
from langchain_huggingface import HuggingFaceEmbeddings
from langchain.chains import RetrievalQA
from langchain_community.llms import LlamaCpp

Expand All @@ -31,12 +31,15 @@ def __init__(self, config):
self.model = Llama(
model_path="lib/models/yuna/" + config["server"]["yuna_default_model"],
n_ctx=config["ai"]["context_length"],
last_n_tokens_size=config["ai"]["last_n_tokens_size"],
seed=config["ai"]["seed"],
n_batch=config["ai"]["batch_size"],
n_gpu_layers=config["ai"]["gpu_layers"],
n_threads=config["ai"]["threads"],
use_mmap=config["ai"]["use_mmap"],
use_mlock=config["ai"]["use_mlock"],
flash_attn=config["ai"]["flash_attn"],
offload_kqv=config["ai"]["offload_kqv"],
verbose=False
) if config["server"]["yuna_text_mode"] == "native" else ""

Expand Down Expand Up @@ -173,7 +176,7 @@ def generate(self, chat_id, speech=False, text="", template=None, chat_history_m
return ''.join(response) if isinstance(response, (list, type((lambda: (yield))()))) else response
return response

def processTextFile(self, text_file, question, temperature=0.8, max_new_tokens=128, context_window=256):
def processTextFile(self, text_file, question, temperature):
# Load text file data
loader = TextLoader(text_file)
data = loader.load()
Expand All @@ -182,16 +185,14 @@ def processTextFile(self, text_file, question, temperature=0.8, max_new_tokens=1
text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=0)
docs = text_splitter.split_documents(data)

# Generate embeddings locally using GPT4All
gpt4all_embeddings = GPT4AllEmbeddings()
vectorstore = Chroma.from_documents(documents=docs, embedding=gpt4all_embeddings)
# Generate embeddings using HuggingFaceEmbeddings
huggingface_embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
vectorstore = Chroma.from_documents(documents=docs, embedding=huggingface_embeddings)

# Load GPT4All model for inference
llm = LlamaCpp(
model_path="lib/models/yuna/" + self.config["server"]["yuna_default_model"],
model_path="lib/models/yuna/yuna-ai-v3-q5_k_m.gguf",
temperature=temperature,
max_new_tokens=max_new_tokens,
context_window=context_window,
verbose=False,
)

Expand All @@ -200,7 +201,7 @@ def processTextFile(self, text_file, question, temperature=0.8, max_new_tokens=1

# Ask a question
result = qa.invoke(question)
return result
return result['result']

def get_history_text(self, chat_history, text, useHistory, yunaConfig):
history = ''
Expand Down
2 changes: 1 addition & 1 deletion lib/router.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ def handle_textfile_request(chat_generator, self):
query = request.form['query']
text_file.save('static/text/content.txt')

result = chat_generator.processTextFile('static/text/content.txt', query)
result = chat_generator.processTextFile('static/text/content.txt', query, 0.6)

return jsonify({'response': result})

Expand Down
21 changes: 12 additions & 9 deletions lib/vision.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,16 @@
model_path="lib/models/agi/miru/" + config["server"]["miru_default_model"],
chat_handler=MoondreamChatHandler(clip_model_path="lib/models/agi/miru/" + config["server"]["eyes_default_model"]),
n_ctx=4096,
last_n_tokens_size=config["ai"]["last_n_tokens_size"],
seed=config["ai"]["seed"],
n_batch=config["ai"]["batch_size"],
n_gpu_layers=config["ai"]["gpu_layers"],
n_threads=config["ai"]["threads"],
use_mmap=config["ai"]["use_mmap"],
use_mlock=config["ai"]["use_mlock"],
flash_attn=config["ai"]["flash_attn"],
verbose=False,
offload_kqv=config["ai"]["offload_kqv"],
verbose=False
) if config["ai"]["miru"] == True else ""

if config["ai"]["art"] == True:
Expand Down Expand Up @@ -50,14 +53,14 @@ def capture_image(image_path=None, prompt=None, use_cpu=False, speech=False):

result = llm.create_chat_completion(
messages = [
{"role": "system", "content": "You are an assistant who perfectly describes images and answers questions about them."},
{
"role": "user",
"content": [
{"type" : "text", "text": prompt},
{"type": "image_url", "image_url": {"url": "file://" + abs_image_path } }
]
}
{"role": "system", "content": "You are an assistant who perfectly describes images and answers questions about them."},
{
"role": "user",
"content": [
{"type" : "text", "text": prompt},
{"type": "image_url", "image_url": {"url": "file://" + abs_image_path } }
]
}
]
)

Expand Down
27 changes: 18 additions & 9 deletions static/config.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,17 +4,17 @@
"Yuki",
"Yuna"
],
"hinmitsu": false,
"himitsu": false,
"agi": false,
"emotions": false,
"art": false,
"miru": false,
"search": false,
"max_new_tokens": 512,
"context_length": 2048,
"max_new_tokens": 256,
"context_length": 1536,
"temperature": 0.7,
"repetition_penalty": 1.1,
"last_n_tokens": 128,
"last_n_tokens_size": 128,
"seed": -1,
"top_k": 100,
"top_p": 0.92,
Expand All @@ -33,14 +33,14 @@
"batch_size": 512,
"threads": 8,
"gpu_layers": -1,
"use_mmap": true,
"flash_attn": true,
"use_mlock": true
"use_mlock": true,
"offload_kqv": true
},
"server": {
"port": "",
"url": "",
"default_history_file": "history_template:general.json",
"images": "images/",
"yuna_default_model": "yuna-ai-v3-q5_k_m.gguf",
"miru_default_model": "yuna-ai-miru-v0.gguf",
"eyes_default_model": "yuna-ai-miru-eye-v0.gguf",
Expand All @@ -49,10 +49,19 @@
"device": "mps",
"yuna_text_mode": "native",
"yuna_audio_mode": "siri",
"yuna_audio_name": "1.wav",
"yuna_reference_audio": "audio.mp3",
"yuna_reference_audio": "1.wav",
"output_audio_format": "audio.aiff"
},
"settings": {
"streaming": true,
"customConfig": true,
"default_history_file": "history_template:general.json",
"default_kanojo": "Yuna",
"default_prompt_template": "dialog",
"background_call": false,
"nsfw_filter": false,
"dark_mode": true
},
"security": {
"secret_key": "YourSecretKeyHere123!",
"encryption_key": "zWZnu-lxHCTgY_EqlH4raJjxNJIgPlvXFbdk45bca_I=",
Expand Down
3 changes: 1 addition & 2 deletions static/css/index.css
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ body {
height: 100vh !important;
}

#profile-card {
.profile-card {
overflow-y: scroll;
height: 100vh;
}
Expand Down Expand Up @@ -185,7 +185,6 @@ button {
.settings-container {
margin: 50px auto;
background-color: #1f1f1f;
padding: 20px;
border-radius: 8px;
margin-top: 50px !important;
}
Expand Down
40 changes: 24 additions & 16 deletions static/js/creator.js
Original file line number Diff line number Diff line change
@@ -1,28 +1,26 @@
// Get the necessary elements
const promptTemplateTextarea = document.querySelector('#article-prompt-template');
const bodyTextTextarea = document.querySelector('#body-text-article-container');
const resultTextarea = document.querySelector('#result-create-article');
const submitButton = document.getElementById('send-create-article');
const promptTemplateTextareaArticle = document.querySelector('#article-prompt-template');
const bodyTextTextareaArticle = document.querySelector('#body-text-article-container');
const resultTextareaArticle = document.querySelector('#result-create-article');
const submitButtonArticle = document.getElementById('send-create-article');

// Set the default text for the Prompt Template block
const defaultPromptTemplate = promptTemplateManager.buildPrompt('himitsuAssistant');
promptTemplateTextarea.value = defaultPromptTemplate.replace('### Instruction:\n', '### Instruction:\n{body_text}');
promptTemplateTextareaArticle.value = defaultPromptTemplate.replace('### Instruction:\n', '### Instruction:\n{body_text}');

// Function to send the request to the server
async function sendRequest() {
activeElement = document.getElementById('body-text-article-container');

const bodyText = bodyTextTextarea.value;
const promptTemplate = promptTemplateTextarea.value.replace('{body_text}', bodyText);
async function sendRequestArticle() {
const bodyText = bodyTextTextareaArticle.value;
const promptTemplate = promptTemplateTextareaArticle.value.replace('{body_text}', bodyText);

// Clear the result textarea before starting
resultTextarea.value = '';
resultTextareaArticle.value = '';

messageManagerInstance.sendMessage(promptTemplate, null, imageData = '', url = '/message', naked = false, stream = true, outputElement = resultTextarea);
messageManagerInstance.sendMessage(promptTemplate, null, imageData = '', url = '/message', naked = false, stream = true, outputElement = resultTextareaArticle);
}

// Add an event listener to the submit button
submitButton.addEventListener('click', sendRequest);
submitButtonArticle.addEventListener('click', sendRequestArticle);

// Get the necessary elements for the Presentation tab
const presentationPromptTemplateTextarea = document.getElementById('presentation-prompt-template');
Expand All @@ -38,8 +36,6 @@ presentationPromptTemplateTextarea.value = defaultPresentationPromptTemplate;

// Function to send the request to the server for the Presentation tab
async function sendPresentationRequest() {
activeElement = document.getElementById('user-input-presentation-container');

const userInput = presentationUserInputTextarea.value;
const promptTemplate = presentationPromptTemplateTextarea.value.replace('{user_input}', userInput);

Expand All @@ -56,4 +52,16 @@ function copyToDraft() {

// Add event listeners to the buttons
generateButton.addEventListener('click', sendPresentationRequest);
copyToDraftButton.addEventListener('click', copyToDraft);
copyToDraftButton.addEventListener('click', copyToDraft);

// Function for naked mode
const nakedWorkArea = document.querySelector('#naked-work-area');
const submitButtonNaked = document.getElementById('send-create-naked');

// Function to send the request to the server
async function sendRequestNaked() {
messageManagerInstance.sendMessage(nakedWorkArea.value, null, imageData = '', url = '/message', naked = false, stream = true, outputElement = nakedWorkArea);
}

// Add an event listener to the submit button
submitButtonNaked.addEventListener('click', sendRequestNaked);
Loading

0 comments on commit 632609d

Please sign in to comment.