From c3b3709b70b3b643f9d6c834faccf0e21f58e4df Mon Sep 17 00:00:00 2001 From: Tom Zayats Date: Tue, 10 Dec 2024 10:25:59 -0800 Subject: [PATCH] update msg --- journeys/evaluation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/journeys/evaluation.py b/journeys/evaluation.py index e97776bb..ec01ffac 100644 --- a/journeys/evaluation.py +++ b/journeys/evaluation.py @@ -580,7 +580,7 @@ def evaluation_mode_show() -> None: evaluation_data_dialog() st.write( - "Welcome to evaluation mode 🧪! Here you can evaluate your semantic model against multiple golden queries where the expected SQL is known from the **Evaluation Table**. The accuracy metric will be computed, and the results of the evaluation run will be stored in the **Evaluation Results Table**." + "Welcome!🧪 In the evaluation mode you can evaluate your semantic model using pairs of golden queries/questions and their expected SQL statements. These pairs should be captured in an **Evaluation Table**. Accuracy metrics will be shown and the results will be stored in an **Evaluation Results Table**." ) # TODO: find a less awkward way of specifying this.