Skip to content

Commit

Permalink
fix
Browse files Browse the repository at this point in the history
  • Loading branch information
dayesouza committed Dec 5, 2024
1 parent bf8170e commit d666a16
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 15 deletions.
2 changes: 1 addition & 1 deletion app/util/ui_components.py
Original file line number Diff line number Diff line change
Expand Up @@ -804,7 +804,7 @@ def build_validation_ui(

def check_ai_configuration(enforce_structured_output=False):
ai_configuration = UIOpenAIConfiguration().get_configuration()
if ai_configuration.api_key == "":
if ai_configuration.api_key == "" and ai_configuration.api_type == "Open AI":
st.warning("Please set your OpenAI API key in the Settings page.")
if ai_configuration.model == "":
st.warning("Please set your OpenAI model in the Settings page.")
Expand Down
22 changes: 9 additions & 13 deletions app/workflows/query_text_data/workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,11 +64,7 @@ async def create(sv: SessionVariables, workflow=None):
with uploader_tab:
st.markdown("##### Upload data for processing")

upload_type = st.radio(
"Upload type",
options=["Raw files", "Processed data"],
key=f"{workflow}_data_source",
)
upload_type = "Raw files"
files = None
file_chunks = None
if upload_type == "Raw files":
Expand Down Expand Up @@ -135,14 +131,14 @@ async def create(sv: SessionVariables, workflow=None):
message = message.replace("**1** periods", "**1** period")
st.success(message)

if qtd.label_to_chunks and upload_type == "Raw files":
st.download_button(
label="Download chunk data",
help="Export chunk data as CSV",
data=qtd.get_chunks_as_df().to_csv(),
file_name=f"processed_data_{len(qtd.label_to_chunks)}_query_text.csv",
mime="text/csv",
)
# if qtd.label_to_chunks and upload_type == "Raw files":
# st.download_button(
# label="Download chunk data",
# help="Export chunk data as CSV",
# data=qtd.get_chunks_as_df().to_csv(),
# file_name=f"processed_data_{len(qtd.label_to_chunks)}_query_text.csv",
# mime="text/csv",
# )

with graph_tab:
if qtd.stage.value < QueryTextDataStage.CHUNKS_PROCESSED.value:
Expand Down
2 changes: 1 addition & 1 deletion intelligence_toolkit/AI/base_embedder.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ async def embed_store_many(
hash_all_texts = [hash_text(item["text"]) for item in batch_data]
existing = self.vector_store.search_by_column(hash_all_texts, "hash")

if len(existing.get("vector")) > 0 and cache_data:
if len(existing.get("vector")) > 0:
existing_texts = existing.sort_values("text")
for item in existing_texts.to_numpy():
all_data.append(
Expand Down

0 comments on commit d666a16

Please sign in to comment.