From 7c0a9b6e6cbb9fde5970961a8711aea0d8c5f697 Mon Sep 17 00:00:00 2001 From: ScottVR Date: Mon, 20 Mar 2023 12:28:15 -0500 Subject: [PATCH 1/2] fixed various syntax errors keeping the code from functioning: missing "scripts." on line 13, (un)indent on lines 611 and 618 of inspector. various ] and ) issues on ln 245, 328, 445, 486, and 599, change += to + on ln 259 of solver. --- scripts/embedding_inspector.py | 6 +++--- scripts/embedding_solver.py | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/scripts/embedding_inspector.py b/scripts/embedding_inspector.py index 92fcfff..359c2ba 100644 --- a/scripts/embedding_inspector.py +++ b/scripts/embedding_inspector.py @@ -10,7 +10,7 @@ import torch, os from modules.textual_inversion.textual_inversion import Embedding import math, random -from embedding_solver import EmbeddingGroupFinder +from scripts.embedding_solver import EmbeddingGroupFinder MAX_TABS = 20 # max number of tokens to save per embedding. MAX_NUM_MIX = 100 # number of tokens that can be mixed to make a new token. @@ -608,14 +608,14 @@ def add_tab(): mix_inputs.append(gr.Textbox(label="Name "+str(n), lines=1, placeholder="Enter name of token/embedding or ID")) with gr.Column(): mix_sliders.append(gr.Slider(label="Multiplier",value=1.0,minimum=-1.0, maximum=1.0, step=0.1)) - if MAX_NUM_MIX>SHOW_NUM_MIX: + if MAX_NUM_MIX>SHOW_NUM_MIX: with gr.Accordion('',open=False): for n in range(SHOW_NUM_MIX,MAX_NUM_MIX): tab.add_row(gr.Row()) with tab.rows[-1]: with gr.Column(): mix_inputs.append(gr.Textbox(label="Name "+str(n), lines=1, placeholder="Enter name of token/embedding or ID")) - with gr.Column(): + with gr.Column(): mix_sliders.append(gr.Slider(label="Multiplier",value=1.0,minimum=-1.0, maximum=1.0, step=0.1)) with gr.Row(): diff --git a/scripts/embedding_solver.py b/scripts/embedding_solver.py index 28b1d32..4bb0c39 100644 --- a/scripts/embedding_solver.py +++ b/scripts/embedding_solver.py @@ -242,7 +242,7 @@ def print_best(self, best_emb_groups_list, partial=False): for best_emb_groups, iterations in best_emb_groups_list: if best_emb_groups is None: continue - self.textbox += f"\n\n%0.1f :: Iteration: {iterations}\n" % (time.time() - self.start_time)) + self.textbox += f"\n\n%0.1f :: Iteration: {iterations}\n" % (time.time() - self.start_time) for subset_id in [len(best_emb_groups) - 1]: subset = best_emb_groups[subset_id] if len(subset) == 0: @@ -256,7 +256,7 @@ def print_best(self, best_emb_groups_list, partial=False): self.mix_sliders.append(weight) self.textbox += f" {weight} * {mapped_emb_id}: {emb_id_to_name(int(mapped_emb_id), self.tokenizer)}\n\n" if partial == True: - return self.textbox += "---------------------------------\n\n" + return self.textbox = "---------------------------------\n\n" s = sorted(self.target_emb) rel_diff = s[int(0.75 * len(self.target_emb))] - s[int(0.25 * len(self.target_emb) )] self.textbox += f"this_emb: {group_vec.detach().numpy()}\n" @@ -325,7 +325,7 @@ def encode_token(self, t): t = t.lower() try: if t[-1] == "*": - return self.orig_to_sorted[text_to_emb_ids([t[:-1]]] + return self.orig_to_sorted[text_to_emb_ids([t[:-1]])] else: return self.orig_to_sorted[text_to_emb_ids(t + "", self.tokenizer)] except KeyError: @@ -442,7 +442,7 @@ def evaluate_optimization_methodology(self, last_time, score_improvement): best_optimization_records = [ (self.optimization_records[key], key) for key in self.optimization_records ] best_optimization_records.sort() for rec, key in best_optimization_records: - self.textbox += "*/t=%0.7f (%s), #=%d, t=%0.1f\n" % (rec[0], key, rec[1], rec[2])) + self.textbox += "*/t=%0.7f (%s), #=%d, t=%0.1f\n" % (rec[0], key, rec[1], rec[2]) self.textbox += "\n" self.last_printed_optimization_time = cur_time @@ -483,7 +483,7 @@ def solve(self): while self.emb_id < len(self.target_embs): t = self.target_embs[self.emb_id] - self.textbox += f"\n === Embedding # {self.emb_id+1} / {len(self.target_embs)} === \n\n") + self.textbox += f"\n === Embedding # {self.emb_id+1} / {len(self.target_embs)} === \n\n" # if not self.do_resume: self.set_target(t) @@ -596,7 +596,7 @@ def solve_one(self): self.save_near_emb_cache() if len(to_precache) > 0: - self.textbox += self.time_str() + f" {emb_id_to_name(to_precache[0, self.tokenizer)]}> \r" + self.textbox += self.time_str() + f" {emb_id_to_name(to_precache[0, self.tokenizer])}> \r" self.near_emb_cache[to_precache[0]] = self.find_near_embs(self.orig_all_embs[to_precache[0]]) to_precache = to_precache[1:] if len(to_precache) == 0: From 5a60dca4a0e8bfb7231a3d8374a68c4c76cac257 Mon Sep 17 00:00:00 2001 From: ScottVR Date: Mon, 20 Mar 2023 12:46:03 -0500 Subject: [PATCH 2/2] fixed a fatfinger-on-exit issue I didn't notice before I pushed --- scripts/embedding_solver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/embedding_solver.py b/scripts/embedding_solver.py index 4bb0c39..a14bbf0 100644 --- a/scripts/embedding_solver.py +++ b/scripts/embedding_solver.py @@ -256,7 +256,7 @@ def print_best(self, best_emb_groups_list, partial=False): self.mix_sliders.append(weight) self.textbox += f" {weight} * {mapped_emb_id}: {emb_id_to_name(int(mapped_emb_id), self.tokenizer)}\n\n" if partial == True: - return self.textbox = "---------------------------------\n\n" + return self.textbox + "---------------------------------\n\n" s = sorted(self.target_emb) rel_diff = s[int(0.75 * len(self.target_emb))] - s[int(0.25 * len(self.target_emb) )] self.textbox += f"this_emb: {group_vec.detach().numpy()}\n"