From 59c36226b7a5ad2ae2f62442c9ea2614f6f9981d Mon Sep 17 00:00:00 2001 From: Mark Botterill Date: Wed, 8 May 2024 11:07:54 +0000 Subject: [PATCH 01/18] Change message format --- backend/scripts/send_slack_report/send_slack_report.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/backend/scripts/send_slack_report/send_slack_report.py b/backend/scripts/send_slack_report/send_slack_report.py index 6c6d68e8e51..4db31f8effc 100755 --- a/backend/scripts/send_slack_report/send_slack_report.py +++ b/backend/scripts/send_slack_report/send_slack_report.py @@ -97,8 +97,7 @@ def classify_initial_queries(): total_initial_queries = sum(tally_json.values()) for k, v in tally_json.items(): percentage = v / total_initial_queries * 100 - classifications += f"There were {v} queries (representing {percentage:.1f}% of\ - all initial queries) about {k}\n" + classifications += f"{k}: {v} queries ({percentage:.1f}%\n" return classifications From 26b154e1498e8fdd5cf6133d53dfc7168262c88b Mon Sep 17 00:00:00 2001 From: Mark Botterill Date: Wed, 8 May 2024 11:19:39 +0000 Subject: [PATCH 02/18] Mirror prod --- backend/scripts/send_slack_report/send_slack_report.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/scripts/send_slack_report/send_slack_report.py b/backend/scripts/send_slack_report/send_slack_report.py index 4db31f8effc..f08be3c1959 100755 --- a/backend/scripts/send_slack_report/send_slack_report.py +++ b/backend/scripts/send_slack_report/send_slack_report.py @@ -97,7 +97,7 @@ def classify_initial_queries(): total_initial_queries = sum(tally_json.values()) for k, v in tally_json.items(): percentage = v / total_initial_queries * 100 - classifications += f"{k}: {v} queries ({percentage:.1f}%\n" + classifications += f"{k}: {v} queries ({percentage:.1f}%)\n" return classifications From 967038ac9e651e1923e658fa8f93276a25a89d34 Mon Sep 17 00:00:00 2001 From: Mark Botterill <97025274+markbotterill@users.noreply.github.com> Date: Fri, 10 May 2024 13:13:38 +0100 Subject: [PATCH 03/18] Only send weekly --- .github/workflows/send-slack-metrics.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/send-slack-metrics.yml b/.github/workflows/send-slack-metrics.yml index 0b571aa2f95..4cdd81e8654 100644 --- a/.github/workflows/send-slack-metrics.yml +++ b/.github/workflows/send-slack-metrics.yml @@ -2,7 +2,7 @@ name: Send Slack Metrics # NB This only works on the default (prod) branch on: schedule: - - cron: '0 12 * * *' + - cron: '0 9 * * 5' jobs: deploy: From 5f5f554e12ae1c83cb25209afe4c516db57ada0c Mon Sep 17 00:00:00 2001 From: Mark Botterill Date: Fri, 24 May 2024 13:38:26 +0000 Subject: [PATCH 04/18] Add automated response generation to HubGPT --- Makefile | 3 + backend/scripts/hubgpt_eval_automation.py | 99 +++++++++++++++++++++++ 2 files changed, 102 insertions(+) create mode 100644 backend/scripts/hubgpt_eval_automation.py diff --git a/Makefile b/Makefile index e6b7d456eb3..bb17d0f8a4d 100644 --- a/Makefile +++ b/Makefile @@ -34,3 +34,6 @@ re-deploy: send-slack-metrics: docker exec danswer-stack-background-1 python /app/scripts/send_slack_report/send_slack_report.py + +send-hubgpt-eval: + docker exec danswer-stack-background-1 python /app/scripts/send_slack_report/hubgpt_eval_automation.py diff --git a/backend/scripts/hubgpt_eval_automation.py b/backend/scripts/hubgpt_eval_automation.py new file mode 100644 index 00000000000..6008c67edfc --- /dev/null +++ b/backend/scripts/hubgpt_eval_automation.py @@ -0,0 +1,99 @@ +# This file is used to demonstrate how to use the backend APIs directly +# In this case, the equivalent of asking a question in Danswer Chat in a new chat session +import datetime +import json +import os + +import pandas as pd +import requests +from slack_sdk import WebClient + + +def create_new_chat_session(danswer_url: str, api_key: str | None) -> int: + headers = {"Authorization": f"Bearer {api_key}"} if api_key else None + session_endpoint = danswer_url + "/api/chat/create-chat-session" + + response = requests.post(session_endpoint, headers=headers, json={"persona_id": 0}) + response.raise_for_status() + + new_session_id = response.json()["chat_session_id"] + return new_session_id + + +def process_question(danswer_url: str, question: str, api_key: str | None) -> None: + message_endpoint = danswer_url + "/api/chat/send-message" + + chat_session_id = create_new_chat_session(danswer_url, api_key) + + headers = {"Authorization": f"Bearer {api_key}"} if api_key else None + + data = { + "message": question, + "chat_session_id": chat_session_id, + "parent_message_id": None, + # Default Question Answer prompt + "prompt_id": 0, + # Not specifying any specific docs to chat to, we want to run a search + "search_doc_ids": None, + "retrieval_options": { + "run_search": "always", + "real_time": True, + "enable_auto_detect_filters": False, + # No filters applied, check all sources, document-sets, time ranges, etc. + "filters": {}, + }, + } + + with requests.post(message_endpoint, headers=headers, json=data) as response: + response.raise_for_status() + response_str = "" + for packet in response.iter_lines(): + response_text = json.loads(packet.decode()) + # Can also check "top_documents" to capture the streamed search results + # that include the highest matching documents to the query + # or check "message_id" to get the message_id used as parent_message_id + # to create follow-up messages + new_token = response_text.get("answer_piece") + if new_token: + response_str += new_token + return response_str + + +def upload_to_slack(filename, channel_id): + slack_client = WebClient(token=os.environ.get("SLACK_BOT_TOKEN")) + size = os.stat(filename).st_size + response = slack_client.files_getUploadURLExternal(filename=filename, length=size) + upload_url = response.data["upload_url"] + file_id = response.data["file_id"] + post_response = requests.post(url=upload_url, data=open(filename, "rb")) + if post_response.status_code == 200: + upload_response = slack_client.files_completeUploadExternal( + files=[{"id": file_id, "title": "Monthly Performance Evaluation"}], + channel_id=channel_id, + ) + return upload_response.status_code + + +if __name__ == "__main__": + data = pd.read_csv("hubgpt_eval_automated.csv") + + queries_list = data.Query.tolist() + + responses = [] + + for num, query in enumerate(queries_list): + print(f"Query {num+1}/{len(queries_list)}: {query}") + response = process_question( + danswer_url=os.getenv("WEB_DOMAIN"), question=query, api_key=None + ) + responses.append(response) + print("\n ------------------- \n") + + today_str = str(datetime.date.today()) + data[today_str] = responses + + # Record + send info + data.to_csv("hubgpt_eval_automated.csv", index=False) + print("Complete") + CHANNEL_ID = os.environ.get("METRICS_CHANNEL_ID") + upload_to_slack("hubgpt_eval_automated.csv", CHANNEL_ID) From 3793d5bcba1575d537c3ed6927f726add2d9d74e Mon Sep 17 00:00:00 2001 From: Mark Botterill Date: Fri, 24 May 2024 16:29:19 +0000 Subject: [PATCH 05/18] Finalize automation of query testing --- .github/workflows/send_performance_demo.yml | 39 +++++++++++++++++ .gitignore | 1 + backend/scripts/hubgpt_eval_automation.py | 48 +++++++++++++++++++++ 3 files changed, 88 insertions(+) create mode 100644 .github/workflows/send_performance_demo.yml diff --git a/.github/workflows/send_performance_demo.yml b/.github/workflows/send_performance_demo.yml new file mode 100644 index 00000000000..bc072e20bb7 --- /dev/null +++ b/.github/workflows/send_performance_demo.yml @@ -0,0 +1,39 @@ +name: Run Performance Test +# NB This only works on the default (prod) branch +on: + workflow_dispatch: + # schedule: + # - cron: '0 9 1-7 * 5' + +jobs: + deploy: + runs-on: ubuntu-latest + environment: + name: production + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Configure SSH + run: | + mkdir -p ~/.ssh/ + echo "$PROD_SSH_KEY" > ~/.ssh/hubgpt_prod.key + chmod 600 ~/.ssh/hubgpt_prod.key + cat >>~/.ssh/config <>>>>>> Stashed changes from slack_sdk import WebClient @@ -13,7 +19,15 @@ def create_new_chat_session(danswer_url: str, api_key: str | None) -> int: headers = {"Authorization": f"Bearer {api_key}"} if api_key else None session_endpoint = danswer_url + "/api/chat/create-chat-session" +<<<<<<< Updated upstream response = requests.post(session_endpoint, headers=headers, json={"persona_id": 0}) +======= + response = requests.post( + session_endpoint, + headers=headers, + json={"persona_id": 0} + ) +>>>>>>> Stashed changes response.raise_for_status() new_session_id = response.json()["chat_session_id"] @@ -57,8 +71,12 @@ def process_question(danswer_url: str, question: str, api_key: str | None) -> No if new_token: response_str += new_token return response_str +<<<<<<< Updated upstream +======= + +>>>>>>> Stashed changes def upload_to_slack(filename, channel_id): slack_client = WebClient(token=os.environ.get("SLACK_BOT_TOKEN")) size = os.stat(filename).st_size @@ -68,6 +86,7 @@ def upload_to_slack(filename, channel_id): post_response = requests.post(url=upload_url, data=open(filename, "rb")) if post_response.status_code == 200: upload_response = slack_client.files_completeUploadExternal( +<<<<<<< Updated upstream files=[{"id": file_id, "title": "Monthly Performance Evaluation"}], channel_id=channel_id, ) @@ -94,6 +113,35 @@ def upload_to_slack(filename, channel_id): # Record + send info data.to_csv("hubgpt_eval_automated.csv", index=False) +======= + files=[{"id": file_id, "title": "Monthly Performance Evaluation"}], channel_id=channel_id + ) + return upload_response.status_code + +if __name__ == "__main__": + + data = pd.read_csv("hubgpt_eval_automated.csv") + + queries_list = data.Query.tolist() + + responses = [] + + for num, query in enumerate(queries_list): + print(f"Query {num+1}/{len(queries_list)}: {query}") + # response = process_question(danswer_url="https://hubgpt-staging.idinsight.io", + # question=query, + # api_key=None) + response = 1 + print(response) + responses.append(response) + print("\n ------------------- \n") + + today_str = str(datetime.date.today()) + data[today_str] = responses + + # Record + send info + data.to_csv("hubgpt_eval_automated.csv", index = False) +>>>>>>> Stashed changes print("Complete") CHANNEL_ID = os.environ.get("METRICS_CHANNEL_ID") upload_to_slack("hubgpt_eval_automated.csv", CHANNEL_ID) From 7281e7d3982ff99a86d10c895c6331ccb0418c38 Mon Sep 17 00:00:00 2001 From: Mark Botterill Date: Fri, 24 May 2024 16:34:12 +0000 Subject: [PATCH 06/18] Fix typo --- .github/workflows/send_performance_demo.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/send_performance_demo.yml b/.github/workflows/send_performance_demo.yml index bc072e20bb7..224fe350f1b 100644 --- a/.github/workflows/send_performance_demo.yml +++ b/.github/workflows/send_performance_demo.yml @@ -31,9 +31,9 @@ jobs: PROD_SSH_KEY: ${{ secrets.PROD_SSH_KEY }} PROD_EC2_HOST_IP: ${{ secrets.PROD_EC2_HOST_IP }} - - name: Trigger metric dispatch + - name: Trigger sample query run run: | ssh prod << 'EOF' cd danswer - make send-slack-metrics + make send-hubgpt-eval EOF From 061d60535e4c1371107f16d7b7539eb919a15934 Mon Sep 17 00:00:00 2001 From: Mark Botterill Date: Fri, 24 May 2024 16:35:21 +0000 Subject: [PATCH 07/18] Fix merge --- backend/scripts/hubgpt_eval_automation.py | 47 ----------------------- 1 file changed, 47 deletions(-) diff --git a/backend/scripts/hubgpt_eval_automation.py b/backend/scripts/hubgpt_eval_automation.py index e602532a4ce..b1601f492d1 100644 --- a/backend/scripts/hubgpt_eval_automation.py +++ b/backend/scripts/hubgpt_eval_automation.py @@ -3,15 +3,9 @@ import datetime import json import os -<<<<<<< Updated upstream import pandas as pd import requests -======= -import pandas as pd -import requests - ->>>>>>> Stashed changes from slack_sdk import WebClient @@ -19,15 +13,7 @@ def create_new_chat_session(danswer_url: str, api_key: str | None) -> int: headers = {"Authorization": f"Bearer {api_key}"} if api_key else None session_endpoint = danswer_url + "/api/chat/create-chat-session" -<<<<<<< Updated upstream response = requests.post(session_endpoint, headers=headers, json={"persona_id": 0}) -======= - response = requests.post( - session_endpoint, - headers=headers, - json={"persona_id": 0} - ) ->>>>>>> Stashed changes response.raise_for_status() new_session_id = response.json()["chat_session_id"] @@ -71,12 +57,8 @@ def process_question(danswer_url: str, question: str, api_key: str | None) -> No if new_token: response_str += new_token return response_str -<<<<<<< Updated upstream -======= - ->>>>>>> Stashed changes def upload_to_slack(filename, channel_id): slack_client = WebClient(token=os.environ.get("SLACK_BOT_TOKEN")) size = os.stat(filename).st_size @@ -86,34 +68,6 @@ def upload_to_slack(filename, channel_id): post_response = requests.post(url=upload_url, data=open(filename, "rb")) if post_response.status_code == 200: upload_response = slack_client.files_completeUploadExternal( -<<<<<<< Updated upstream - files=[{"id": file_id, "title": "Monthly Performance Evaluation"}], - channel_id=channel_id, - ) - return upload_response.status_code - - -if __name__ == "__main__": - data = pd.read_csv("hubgpt_eval_automated.csv") - - queries_list = data.Query.tolist() - - responses = [] - - for num, query in enumerate(queries_list): - print(f"Query {num+1}/{len(queries_list)}: {query}") - response = process_question( - danswer_url=os.getenv("WEB_DOMAIN"), question=query, api_key=None - ) - responses.append(response) - print("\n ------------------- \n") - - today_str = str(datetime.date.today()) - data[today_str] = responses - - # Record + send info - data.to_csv("hubgpt_eval_automated.csv", index=False) -======= files=[{"id": file_id, "title": "Monthly Performance Evaluation"}], channel_id=channel_id ) return upload_response.status_code @@ -141,7 +95,6 @@ def upload_to_slack(filename, channel_id): # Record + send info data.to_csv("hubgpt_eval_automated.csv", index = False) ->>>>>>> Stashed changes print("Complete") CHANNEL_ID = os.environ.get("METRICS_CHANNEL_ID") upload_to_slack("hubgpt_eval_automated.csv", CHANNEL_ID) From 397b5f19ddaa3efc66149c3c03aef8621aae64c2 Mon Sep 17 00:00:00 2001 From: Mark Botterill Date: Fri, 24 May 2024 16:38:00 +0000 Subject: [PATCH 08/18] Removed incorrect filename --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index bb17d0f8a4d..8d2db05b767 100644 --- a/Makefile +++ b/Makefile @@ -36,4 +36,4 @@ send-slack-metrics: docker exec danswer-stack-background-1 python /app/scripts/send_slack_report/send_slack_report.py send-hubgpt-eval: - docker exec danswer-stack-background-1 python /app/scripts/send_slack_report/hubgpt_eval_automation.py + docker exec danswer-stack-background-1 python /app/scripts/hubgpt_eval_automation.py From e2a848e978c95c3f3ef29d88fe1af921a0ec259c Mon Sep 17 00:00:00 2001 From: Mark Botterill Date: Fri, 24 May 2024 16:40:20 +0000 Subject: [PATCH 09/18] Fix CSV issue --- backend/scripts/hubgpt_eval_automation.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/backend/scripts/hubgpt_eval_automation.py b/backend/scripts/hubgpt_eval_automation.py index b1601f492d1..8d8eea5b926 100644 --- a/backend/scripts/hubgpt_eval_automation.py +++ b/backend/scripts/hubgpt_eval_automation.py @@ -74,7 +74,7 @@ def upload_to_slack(filename, channel_id): if __name__ == "__main__": - data = pd.read_csv("hubgpt_eval_automated.csv") + data = pd.read_csv("hubgpt_eval.csv") queries_list = data.Query.tolist() @@ -94,7 +94,7 @@ def upload_to_slack(filename, channel_id): data[today_str] = responses # Record + send info - data.to_csv("hubgpt_eval_automated.csv", index = False) + data.to_csv("hubgpt_eval.csv", index = False) print("Complete") CHANNEL_ID = os.environ.get("METRICS_CHANNEL_ID") - upload_to_slack("hubgpt_eval_automated.csv", CHANNEL_ID) + upload_to_slack("hubgpt_eval.csv", CHANNEL_ID) From cfe8b9b3aeeadac8a6dbd15d21cbd5f10a828a71 Mon Sep 17 00:00:00 2001 From: Mark Botterill Date: Fri, 24 May 2024 16:44:47 +0000 Subject: [PATCH 10/18] Fix path --- backend/scripts/hubgpt_eval_automation.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/backend/scripts/hubgpt_eval_automation.py b/backend/scripts/hubgpt_eval_automation.py index 8d8eea5b926..3e3578d9682 100644 --- a/backend/scripts/hubgpt_eval_automation.py +++ b/backend/scripts/hubgpt_eval_automation.py @@ -8,6 +8,7 @@ import requests from slack_sdk import WebClient +CSV_PATH = "/app/scripts/hubgpt_eval.csv" def create_new_chat_session(danswer_url: str, api_key: str | None) -> int: headers = {"Authorization": f"Bearer {api_key}"} if api_key else None @@ -74,7 +75,7 @@ def upload_to_slack(filename, channel_id): if __name__ == "__main__": - data = pd.read_csv("hubgpt_eval.csv") + data = pd.read_csv(CSV_PATH) queries_list = data.Query.tolist() @@ -82,7 +83,7 @@ def upload_to_slack(filename, channel_id): for num, query in enumerate(queries_list): print(f"Query {num+1}/{len(queries_list)}: {query}") - # response = process_question(danswer_url="https://hubgpt-staging.idinsight.io", + # response = process_question(danswer_url="https:/CSV_PATHdinsight.io", # question=query, # api_key=None) response = 1 @@ -94,7 +95,7 @@ def upload_to_slack(filename, channel_id): data[today_str] = responses # Record + send info - data.to_csv("hubgpt_eval.csv", index = False) + data.to_csv(CSV_PATH, index = False) print("Complete") CHANNEL_ID = os.environ.get("METRICS_CHANNEL_ID") - upload_to_slack("hubgpt_eval.csv", CHANNEL_ID) + upload_to_slack(CSV_PATH, CHANNEL_ID) From 90df74e0d31d79e9a27f41f9b8d62b43e6bd02c7 Mon Sep 17 00:00:00 2001 From: Mark Botterill Date: Fri, 24 May 2024 16:48:11 +0000 Subject: [PATCH 11/18] Ready for prod --- backend/scripts/hubgpt_eval_automation.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/backend/scripts/hubgpt_eval_automation.py b/backend/scripts/hubgpt_eval_automation.py index 3e3578d9682..49b505b5708 100644 --- a/backend/scripts/hubgpt_eval_automation.py +++ b/backend/scripts/hubgpt_eval_automation.py @@ -83,11 +83,9 @@ def upload_to_slack(filename, channel_id): for num, query in enumerate(queries_list): print(f"Query {num+1}/{len(queries_list)}: {query}") - # response = process_question(danswer_url="https:/CSV_PATHdinsight.io", - # question=query, - # api_key=None) - response = 1 - print(response) + response = process_question( + danswer_url=os.getenv("WEB_DOMAIN"), question=query, api_key=None + ) responses.append(response) print("\n ------------------- \n") From b70033242904af1b725b8520bf65e2eaacc91702 Mon Sep 17 00:00:00 2001 From: Mark Botterill Date: Fri, 24 May 2024 17:01:07 +0000 Subject: [PATCH 12/18] Final tweaks --- backend/scripts/hubgpt_eval_automation.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/backend/scripts/hubgpt_eval_automation.py b/backend/scripts/hubgpt_eval_automation.py index 49b505b5708..988ca199fe4 100644 --- a/backend/scripts/hubgpt_eval_automation.py +++ b/backend/scripts/hubgpt_eval_automation.py @@ -8,7 +8,7 @@ import requests from slack_sdk import WebClient -CSV_PATH = "/app/scripts/hubgpt_eval.csv" +CSV_PATH = "hubgpt_eval.csv" def create_new_chat_session(danswer_url: str, api_key: str | None) -> int: headers = {"Authorization": f"Bearer {api_key}"} if api_key else None @@ -84,9 +84,10 @@ def upload_to_slack(filename, channel_id): for num, query in enumerate(queries_list): print(f"Query {num+1}/{len(queries_list)}: {query}") response = process_question( - danswer_url=os.getenv("WEB_DOMAIN"), question=query, api_key=None + danswer_url="https://hubgpt.idinsight.io", question=query, api_key=None ) responses.append(response) + print(response) print("\n ------------------- \n") today_str = str(datetime.date.today()) From 955b21de17d63fd4a27e6dccf9a42f01b47acf14 Mon Sep 17 00:00:00 2001 From: Mark Botterill Date: Fri, 24 May 2024 17:04:46 +0000 Subject: [PATCH 13/18] CSV path --- backend/scripts/hubgpt_eval_automation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/scripts/hubgpt_eval_automation.py b/backend/scripts/hubgpt_eval_automation.py index 988ca199fe4..015eb358b09 100644 --- a/backend/scripts/hubgpt_eval_automation.py +++ b/backend/scripts/hubgpt_eval_automation.py @@ -8,7 +8,7 @@ import requests from slack_sdk import WebClient -CSV_PATH = "hubgpt_eval.csv" +CSV_PATH = "/app/scipts/hubgpt_eval.csv" def create_new_chat_session(danswer_url: str, api_key: str | None) -> int: headers = {"Authorization": f"Bearer {api_key}"} if api_key else None From 9bd779efccaf749b9f332ac2b861db2eb53ab303 Mon Sep 17 00:00:00 2001 From: Mark Botterill Date: Fri, 24 May 2024 17:06:40 +0000 Subject: [PATCH 14/18] Fix CSV path typo --- backend/scripts/hubgpt_eval_automation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/scripts/hubgpt_eval_automation.py b/backend/scripts/hubgpt_eval_automation.py index 015eb358b09..2e5018687b3 100644 --- a/backend/scripts/hubgpt_eval_automation.py +++ b/backend/scripts/hubgpt_eval_automation.py @@ -8,7 +8,7 @@ import requests from slack_sdk import WebClient -CSV_PATH = "/app/scipts/hubgpt_eval.csv" +CSV_PATH = "/app/scripts/hubgpt_eval.csv" def create_new_chat_session(danswer_url: str, api_key: str | None) -> int: headers = {"Authorization": f"Bearer {api_key}"} if api_key else None From f69ea131a2aae7da78c87781fae10462b6de7a95 Mon Sep 17 00:00:00 2001 From: Mark Botterill Date: Tue, 28 May 2024 10:45:27 +0000 Subject: [PATCH 15/18] Add oauth toggling to Make command --- Makefile | 5 + backend/scripts/hubgpt_eval_automation.py | 3 +- .../docker-compose.analytics.yml | 226 ++++++++++++++++++ 3 files changed, 233 insertions(+), 1 deletion(-) create mode 100644 deployment/docker_compose/docker-compose.analytics.yml diff --git a/Makefile b/Makefile index 8d2db05b767..adf7fc2798d 100644 --- a/Makefile +++ b/Makefile @@ -36,4 +36,9 @@ send-slack-metrics: docker exec danswer-stack-background-1 python /app/scripts/send_slack_report/send_slack_report.py send-hubgpt-eval: + cd /home/ec2-user/danswer/deployment/docker_compose && \ + docker compose -p danswer-stack -f docker-compose.analytics.yml up -d --build + sleep 150 docker exec danswer-stack-background-1 python /app/scripts/hubgpt_eval_automation.py + docker compose -p danswer-stack -f docker-compose.prod.yml up -d --build + diff --git a/backend/scripts/hubgpt_eval_automation.py b/backend/scripts/hubgpt_eval_automation.py index 2e5018687b3..0f199c6868e 100644 --- a/backend/scripts/hubgpt_eval_automation.py +++ b/backend/scripts/hubgpt_eval_automation.py @@ -97,4 +97,5 @@ def upload_to_slack(filename, channel_id): data.to_csv(CSV_PATH, index = False) print("Complete") CHANNEL_ID = os.environ.get("METRICS_CHANNEL_ID") - upload_to_slack(CSV_PATH, CHANNEL_ID) + # upload_to_slack(CSV_PATH, CHANNEL_ID) + print("Bing bong") \ No newline at end of file diff --git a/deployment/docker_compose/docker-compose.analytics.yml b/deployment/docker_compose/docker-compose.analytics.yml new file mode 100644 index 00000000000..db2ac4b845a --- /dev/null +++ b/deployment/docker_compose/docker-compose.analytics.yml @@ -0,0 +1,226 @@ +version: '3' +services: + api_server: + image: danswer/danswer-backend:latest + build: + context: ../../backend + dockerfile: Dockerfile + command: > + /bin/sh -c "alembic upgrade head && echo \"Starting Danswer Api Server\" && uvicorn danswer.main:app --host 0.0.0.0 --port 8080" + depends_on: + - relational_db + - index + restart: always + env_file: + - .env.analytics + environment: + - AUTH_TYPE=${AUTH_TYPE:-google_oauth} + - POSTGRES_HOST=relational_db + - VESPA_HOST=index + - MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server} + volumes: + - local_dynamic_storage:/home/storage + - file_connector_tmp_storage:/home/file_connector_storage + - model_cache_nltk:/root/nltk_data/ + - model_cache_huggingface:/root/.cache/huggingface/ + extra_hosts: + - "host.docker.internal:host-gateway" + logging: + driver: json-file + options: + max-size: "50m" + max-file: "6" + + + background: + image: danswer/danswer-backend:latest + build: + context: ../../backend + dockerfile: Dockerfile + command: /usr/bin/supervisord + depends_on: + - relational_db + - index + restart: always + env_file: + - .env.analytics + environment: + - AUTH_TYPE=${AUTH_TYPE:-google_oauth} + - POSTGRES_HOST=relational_db + - VESPA_HOST=index + - MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server} + - INDEXING_MODEL_SERVER_HOST=${INDEXING_MODEL_SERVER_HOST:-indexing_model_server} + volumes: + - local_dynamic_storage:/home/storage + - file_connector_tmp_storage:/home/file_connector_storage + - model_cache_nltk:/root/nltk_data/ + - model_cache_huggingface:/root/.cache/huggingface/ + extra_hosts: + - "host.docker.internal:host-gateway" + logging: + driver: json-file + options: + max-size: "50m" + max-file: "6" + + + web_server: + image: danswer/danswer-web-server:latest + build: + context: ../../web + dockerfile: Dockerfile + args: + - NEXT_PUBLIC_DISABLE_STREAMING=${NEXT_PUBLIC_DISABLE_STREAMING:-false} + depends_on: + - api_server + restart: always + env_file: + - .env.analytics + environment: + - INTERNAL_URL=http://api_server:8080 + logging: + driver: json-file + options: + max-size: "50m" + max-file: "6" + relational_db: + image: postgres:15.2-alpine + ports: + - "5432:5432" + restart: always + # POSTGRES_USER and POSTGRES_PASSWORD should be set in .env file + env_file: + - .env.analytics + volumes: + - db_volume:/var/lib/postgresql/data + logging: + driver: json-file + options: + max-size: "50m" + max-file: "6" + + + inference_model_server: + image: danswer/danswer-model-server:latest + build: + context: ../../backend + dockerfile: Dockerfile.model_server + command: > + /bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-false}\" = \"True\" ]; then + echo 'Skipping service...'; + exit 0; + else + exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000; + fi" + restart: on-failure + environment: + - MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-} + # Set to debug to get more fine-grained logs + - LOG_LEVEL=${LOG_LEVEL:-info} + volumes: + - model_cache_torch:/root/.cache/torch/ + - model_cache_huggingface:/root/.cache/huggingface/ + logging: + driver: json-file + options: + max-size: "50m" + max-file: "6" + + + indexing_model_server: + image: danswer/danswer-model-server:latest + build: + context: ../../backend + dockerfile: Dockerfile.model_server + command: > + /bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-false}\" = \"True\" ]; then + echo 'Skipping service...'; + exit 0; + else + exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000; + fi" + restart: on-failure + environment: + - MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-} + - INDEXING_ONLY=True + # Set to debug to get more fine-grained logs + - LOG_LEVEL=${LOG_LEVEL:-info} + volumes: + - model_cache_torch:/root/.cache/torch/ + - model_cache_huggingface:/root/.cache/huggingface/ + logging: + driver: json-file + options: + max-size: "50m" + max-file: "6" + + + # This container name cannot have an underscore in it due to Vespa expectations of the URL + index: + image: vespaengine/vespa:8.277.17 + restart: always + ports: + - "19071:19071" + - "8081:8081" + volumes: + - vespa_volume:/opt/vespa/var + logging: + driver: json-file + options: + max-size: "50m" + max-file: "6" + + caddy: + image: caddy:2-alpine + restart: always + ports: + - "80:80" + - "443:443" + volumes: + - ../data/caddy/Caddyfile:/etc/caddy/Caddyfile + - caddy_data:/data + - caddy_config:/config + env_file: + - .env.analytics + depends_on: + - api_server + - web_server + logging: + driver: json-file + options: + max-size: "50m" + max-file: "6" + # Run with --profile model-server to bring up the danswer-model-server container + model_server: + image: danswer/danswer-model-server:latest + build: + context: ../../backend + dockerfile: Dockerfile.model_server + profiles: + - "model-server" + command: uvicorn model_server.main:app --host 0.0.0.0 --port 9000 + restart: always + environment: + - DOCUMENT_ENCODER_MODEL=${DOCUMENT_ENCODER_MODEL:-} + - NORMALIZE_EMBEDDINGS=${NORMALIZE_EMBEDDINGS:-} + - MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-} + # Set to debug to get more fine-grained logs + - LOG_LEVEL=${LOG_LEVEL:-info} + volumes: + - model_cache_torch:/root/.cache/torch/ + - model_cache_huggingface:/root/.cache/huggingface/ + logging: + driver: json-file + options: + max-size: "50m" + max-file: "6" +volumes: + local_dynamic_storage: + file_connector_tmp_storage: # used to store files uploaded by the user temporarily while we are indexing them + db_volume: + vespa_volume: + model_cache_torch: + model_cache_nltk: + model_cache_huggingface: + caddy_data: + caddy_config: \ No newline at end of file From c0896e12dfce76678ddebcd24bb070222ed3bd5a Mon Sep 17 00:00:00 2001 From: Mark Botterill Date: Tue, 28 May 2024 12:48:04 +0000 Subject: [PATCH 16/18] Correct Makefile + add compose up/ down commands --- Makefile | 3 +++ backend/scripts/hubgpt_eval_automation.py | 5 ++--- .../docker_compose/docker-compose.analytics.yml | 14 +++++++------- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index adf7fc2798d..a12a9bd4ae3 100644 --- a/Makefile +++ b/Makefile @@ -37,8 +37,11 @@ send-slack-metrics: send-hubgpt-eval: cd /home/ec2-user/danswer/deployment/docker_compose && \ + docker compose -p danswer-stack down && \ docker compose -p danswer-stack -f docker-compose.analytics.yml up -d --build sleep 150 docker exec danswer-stack-background-1 python /app/scripts/hubgpt_eval_automation.py + cd /home/ec2-user/danswer/deployment/docker_compose && \ + docker compose -p danswer-stack down && \ docker compose -p danswer-stack -f docker-compose.prod.yml up -d --build diff --git a/backend/scripts/hubgpt_eval_automation.py b/backend/scripts/hubgpt_eval_automation.py index 0f199c6868e..446be261445 100644 --- a/backend/scripts/hubgpt_eval_automation.py +++ b/backend/scripts/hubgpt_eval_automation.py @@ -74,7 +74,7 @@ def upload_to_slack(filename, channel_id): return upload_response.status_code if __name__ == "__main__": - + print("Starting query run") data = pd.read_csv(CSV_PATH) queries_list = data.Query.tolist() @@ -97,5 +97,4 @@ def upload_to_slack(filename, channel_id): data.to_csv(CSV_PATH, index = False) print("Complete") CHANNEL_ID = os.environ.get("METRICS_CHANNEL_ID") - # upload_to_slack(CSV_PATH, CHANNEL_ID) - print("Bing bong") \ No newline at end of file + upload_to_slack(CSV_PATH, CHANNEL_ID) diff --git a/deployment/docker_compose/docker-compose.analytics.yml b/deployment/docker_compose/docker-compose.analytics.yml index db2ac4b845a..4b12ff0e4c4 100644 --- a/deployment/docker_compose/docker-compose.analytics.yml +++ b/deployment/docker_compose/docker-compose.analytics.yml @@ -12,9 +12,9 @@ services: - index restart: always env_file: - - .env.analytics + - .env_analytics environment: - - AUTH_TYPE=${AUTH_TYPE:-google_oauth} + - AUTH_TYPE=disabled - POSTGRES_HOST=relational_db - VESPA_HOST=index - MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server} @@ -43,9 +43,9 @@ services: - index restart: always env_file: - - .env.analytics + - .env_analytics environment: - - AUTH_TYPE=${AUTH_TYPE:-google_oauth} + - AUTH_TYPE=disabled - POSTGRES_HOST=relational_db - VESPA_HOST=index - MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server} @@ -75,7 +75,7 @@ services: - api_server restart: always env_file: - - .env.analytics + - .env_analytics environment: - INTERNAL_URL=http://api_server:8080 logging: @@ -90,7 +90,7 @@ services: restart: always # POSTGRES_USER and POSTGRES_PASSWORD should be set in .env file env_file: - - .env.analytics + - .env_analytics volumes: - db_volume:/var/lib/postgresql/data logging: @@ -181,7 +181,7 @@ services: - caddy_data:/data - caddy_config:/config env_file: - - .env.analytics + - .env_analytics depends_on: - api_server - web_server From 5ef5e8190f88f2b75964c615b66785d980489955 Mon Sep 17 00:00:00 2001 From: Mark Botterill Date: Tue, 28 May 2024 13:04:29 +0000 Subject: [PATCH 17/18] Add note on URL mods for staging test --- backend/scripts/hubgpt_eval_automation.py | 1 + 1 file changed, 1 insertion(+) diff --git a/backend/scripts/hubgpt_eval_automation.py b/backend/scripts/hubgpt_eval_automation.py index 446be261445..3b32a247fcc 100644 --- a/backend/scripts/hubgpt_eval_automation.py +++ b/backend/scripts/hubgpt_eval_automation.py @@ -84,6 +84,7 @@ def upload_to_slack(filename, channel_id): for num, query in enumerate(queries_list): print(f"Query {num+1}/{len(queries_list)}: {query}") response = process_question( + # Change to staging for staging testing danswer_url="https://hubgpt.idinsight.io", question=query, api_key=None ) responses.append(response) From 6199735691395d4b6a6be65a131009f75e8563ec Mon Sep 17 00:00:00 2001 From: Mark Botterill Date: Fri, 31 May 2024 12:38:57 +0000 Subject: [PATCH 18/18] Expand Slack report to attach historic data --- backend/scripts/send_slack_report/queries.py | 23 +- .../send_slack_report/send_slack_report.py | 203 +++++++++++------- 2 files changed, 141 insertions(+), 85 deletions(-) diff --git a/backend/scripts/send_slack_report/queries.py b/backend/scripts/send_slack_report/queries.py index f25b7b15e3f..4a761ac85be 100644 --- a/backend/scripts/send_slack_report/queries.py +++ b/backend/scripts/send_slack_report/queries.py @@ -73,12 +73,17 @@ }, } INITIAL_MESSAGES_QUERY = """ - SELECT message as initial_query FROM ( - SELECT *, - ROW_NUMBER() OVER (PARTITION BY chat_session_id ORDER BY time_sent ASC) as rn - FROM chat_message - WHERE (time_sent >= (NOW() AT TIME ZONE 'UTC') - INTERVAL '7 days') - AND (message_type = 'USER') - ) sub - WHERE sub.rn = 1 - ORDER BY sub.time_sent ASC;""" +WITH subquery AS ( + SELECT cm.time_sent, cs.user_id, cm.message, cm.id, cf.is_positive, cf.feedback_text, + ROW_NUMBER() OVER (PARTITION BY cm.chat_session_id ORDER BY cm.time_sent ASC) AS rn + FROM chat_message cm + LEFT JOIN chat_session cs ON cs.id = cm.chat_session_id + LEFT JOIN chat_feedback cf ON cf.chat_message_id = cm.id + WHERE cm.time_sent >= (NOW() AT TIME ZONE 'UTC') - INTERVAL '7 days' + AND cm.message_type = 'USER' +) +SELECT time_sent, user_id, message, id, is_positive, feedback_text +FROM subquery +WHERE rn = 1 +ORDER BY time_sent ASC; +""" diff --git a/backend/scripts/send_slack_report/send_slack_report.py b/backend/scripts/send_slack_report/send_slack_report.py index f08be3c1959..a663484abf5 100755 --- a/backend/scripts/send_slack_report/send_slack_report.py +++ b/backend/scripts/send_slack_report/send_slack_report.py @@ -1,5 +1,6 @@ import json import os +import re import pandas as pd import plotly.express as px @@ -15,47 +16,56 @@ from danswer.utils.logger import setup_logger - +# Global Variables and Paths +CSV_PATH = "/app/scripts/send_slack_report/all_data.csv" +POSTGRES_USER = os.environ.get("POSTGRES_USER", "postgres") +POSTGRES_PASSWORD = os.environ.get("POSTGRES_PASSWORD", "password") +POSTGRES_HOST = os.environ.get("POSTGRES_HOST", "localhost") +POSTGRES_PORT = os.environ.get("POSTGRES_PORT", "5432") +POSTGRES_DB = os.environ.get("POSTGRES_DB", "postgres") +SLACK_BOT_TOKEN = os.environ.get("SLACK_BOT_TOKEN") +GEN_AI_API_KEY = os.environ.get("GEN_AI_API_KEY") +METRICS_CHANNEL_ID = os.environ.get("METRICS_CHANNEL_ID") + +# Setup Logger logger = setup_logger() def get_engine(): - POSTGRES_USER = os.environ.get("POSTGRES_USER") or "postgres" - POSTGRES_PASSWORD = os.environ.get("POSTGRES_PASSWORD") or "password" - POSTGRES_HOST = os.environ.get("POSTGRES_HOST") or "localhost" - POSTGRES_PORT = os.environ.get("POSTGRES_PORT") or "5432" - POSTGRES_DB = os.environ.get("POSTGRES_DB") or "postgres" - + """Create and return a SQLAlchemy engine.""" engine = create_engine( f"postgresql://{POSTGRES_USER}:{POSTGRES_PASSWORD}@{POSTGRES_HOST}:{POSTGRES_PORT}/{POSTGRES_DB}" ) - return engine +def execute_numerical_query(engine, query): + """Execute a SQL query and return the resulting number.""" + with engine.connect() as connection: + result = connection.execute(text(query.replace("\n", ""))) + return result.scalar() + + def get_counts(): - """Fetches counts based on the specified period from the global queries dictionary.""" + """Fetch usage counts based on the specified period from the global queries dictionary.""" results = {"medium": [], "time_period": [], "count": []} - engine = get_engine() - with engine.connect() as connection: - for period in USAGE_QUERIES.keys(): - for key, query in USAGE_QUERIES[period].items(): - result = connection.execute(text(query.replace("\n", ""))) - results["count"].append(result.scalar()) - results["medium"].append(key) - results["time_period"].append(period) + for period in USAGE_QUERIES.keys(): + for key, query in USAGE_QUERIES[period].items(): + count = execute_numerical_query(engine, query) + results["count"].append(count) + results["medium"].append(key) + results["time_period"].append(period) return pd.DataFrame(results) def get_last_week_counts(df): - """Take a DataFrame and returns a dictionary of counts ofr users - from the last 7 days across Slack, Web and unique users""" + """Return counts for the last 7 days across different mediums.""" last_week_count = {} for medium in MEDIUMS: - count = df.query(f"time_period =='last_7_days' and medium == '{medium}'")[ + count = df.query(f"time_period == 'last_7_days' and medium == '{medium}'")[ "count" ].iloc[0] last_week_count[medium] = count @@ -63,99 +73,140 @@ def get_last_week_counts(df): def save_bar_plot(df, filename): + """Save a bar plot of the data and return the filename.""" fig = px.bar(df, x="medium", y="count", color="time_period", barmode="group") fig.write_image(file=filename, format="jpg") return filename -def upload_to_slack_and_delete(filename, channel_id): - slack_client = WebClient(token=os.environ.get("SLACK_BOT_TOKEN")) +def upload_file_to_slack(filename, channel_id, title, delete_after_upload=False): + """Upload a file to Slack and optionally delete it locally.""" + slack_client = WebClient(token=SLACK_BOT_TOKEN) size = os.stat(filename).st_size response = slack_client.files_getUploadURLExternal(filename=filename, length=size) - upload_url = response.data["upload_url"] - file_id = response.data["file_id"] - post_response = requests.post(url=upload_url, data=open(filename, "rb")) + upload_url = response["upload_url"] + file_id = response["file_id"] + + with open(filename, "rb") as file: + post_response = requests.post(url=upload_url, data=file) + if post_response.status_code == 200: - upload_response = slack_client.files_completeUploadExternal( - files=[{"id": file_id, "title": "Metrics graph"}], channel_id=channel_id + slack_client.files_completeUploadExternal( + files=[{"id": file_id, "title": title}], channel_id=channel_id ) - # Clean up - os.remove(filename) - return upload_response.status_code + if delete_after_upload: + os.remove(filename) + return 200 + else: + logger.error(f"Failed to upload {filename} to Slack.") + return post_response.status_code + +def categorize(text): + """Categorize the given text based on predefined categories.""" + categories = ["PROJECTS", "POLICIES", "RESOURCES", "TEAMS", "MISCELLANEOUS"] + regex_pattern = r"\b(" + "|".join(categories) + r")\b" + match = re.search(regex_pattern, text, re.IGNORECASE) + return match.group(1).upper() if match else "MISCELLANEOUS" -def classify_initial_queries(): + +def gather_and_combine_data(): + """Gather past week's data, concatenate with existing data, and dispatch as a CSV.""" engine = get_engine() with engine.connect() as connection: df = pd.read_sql_query(INITIAL_MESSAGES_QUERY, connection) - logger.info("Initial queries recieved") - client = OpenAI(api_key=os.environ.get("GEN_AI_API_KEY")) - label_series = df["initial_query"].map(lambda x: label_question(x, client)) - logger.info("Labelling complete") - tally_json = json.loads(label_series.value_counts().to_json()) - classifications = "" - total_initial_queries = sum(tally_json.values()) - for k, v in tally_json.items(): - percentage = v / total_initial_queries * 100 - classifications += f"{k}: {v} queries ({percentage:.1f}%)\n" - return classifications + logger.info("Initial queries received") + + # Fill missing user IDs with 'SLACK' + df["user_id"] = df["user_id"].fillna("SLACK") + clean_weekly = df.drop_duplicates(subset="id").copy() + clean_weekly["time_sent"] = clean_weekly["time_sent"].dt.date + + # Combine with historic data + overlap_ids = clean_weekly["id"] + full_df = pd.read_csv(CSV_PATH) + clean_all_time_df = full_df[~full_df["id"].isin(overlap_ids)] + combined_df = ( + pd.concat([clean_all_time_df, clean_weekly]) + .sort_values(by="time_sent") + .reset_index(drop=True) + ) + combined_df.to_csv(CSV_PATH, index=False) + logger.info("Combined with historic data and saved to CSV") + + return clean_weekly + + +def classify_initial_queries(clean_weekly): + """Classify the initial queries and prepare a summary.""" + # Label data using OpenAI + client = OpenAI(api_key=GEN_AI_API_KEY) + clean_weekly["labels"] = clean_weekly["message"].apply( + lambda x: label_question(x, client) + ) + clean_weekly["labels"] = clean_weekly["labels"].apply(categorize) + logger.info("Labelling complete") + + # Prepare classification summary + tally_json = json.loads(clean_weekly["labels"].value_counts().to_json()) + total_initial_queries = sum(tally_json.values()) + classifications = "\n".join( + f"{k}: {v} queries ({v / total_initial_queries * 100:.1f}%)" + for k, v in tally_json.items() + ) + return classifications def create_message(last_week_count, classifications): - message = ( + """Create a summary message to send to Slack.""" + return ( f"Hello Users!\n\n" f"Here are some updates from HubGPT regarding the last 7 days:\n" - f"- {last_week_count['slack_messages']}: Slack messages in the last 7 days.\n" - f"- {last_week_count['web_messages']}: Web App messages in the last 7 days.\n" - f"- {last_week_count['distinct_web_users']}: Unique users on the Web App.\n" + f"- {last_week_count.get('slack_messages', 0)} Slack messages in the last 7 days.\n" + f"- {last_week_count.get('web_messages', 0)} Web App messages in the last 7 days.\n" + f"- {last_week_count.get('distinct_web_users', 0)} Unique users on the Web App.\n" "Usage breakdown:\n" f"{classifications}" ) - return message -def send_message(user_id, message): - SLACK_BOT_TOKEN = os.environ.get("SLACK_BOT_TOKEN") - if not SLACK_BOT_TOKEN: - logger.debug( - "Slack OAuth token not provided. Check env prod template for guidance" - ) - return None - logger.info("Initializing Slack client") - - slack_client = WebClient(token=SLACK_BOT_TOKEN) - - logger.info("Sending Slack message") - # Send a message to the user - slack_client.chat_postMessage(channel=user_id, text=message) - logger.info("Message sent") - return None +def send_message(channel_id, message): + """Send a message to the specified Slack channel.""" + try: + slack_client = WebClient(token=SLACK_BOT_TOKEN) + slack_client.chat_postMessage(channel=channel_id, text=message) + logger.info("Message sent to Slack channel") + except Exception as e: + logger.error(f"Failed to send message to Slack channel {channel_id}: {e}") def send_usage_report_to_slack(channel_id): + """Generate and send the usage report to Slack.""" counts_df = get_counts() - classifications = classify_initial_queries() - + clean_weekly = gather_and_combine_data() + classifications = classify_initial_queries(clean_weekly) last_week_counts = get_last_week_counts(counts_df) - - file = save_bar_plot(counts_df, "metrics.jpg") - + plot_filename = save_bar_plot(counts_df, "metrics.jpg") message = create_message(last_week_counts, classifications) send_message(channel_id, message) - upload_status = upload_to_slack_and_delete(file, channel_id) - return upload_status + upload_file_to_slack( + plot_filename, channel_id, "Metrics graph", delete_after_upload=True + ) + upload_file_to_slack(CSV_PATH, channel_id, "Historic data") if __name__ == "__main__": try: - CHANNEL_ID = os.environ.get("METRICS_CHANNEL_ID") - if CHANNEL_ID: + if METRICS_CHANNEL_ID: logger.info("Starting Slack usage report") - send_usage_report_to_slack(CHANNEL_ID) + send_usage_report_to_slack(METRICS_CHANNEL_ID) else: - logger.warning("Slack Metrics Channel ID token not provided.") - logger.warning("Check env prod template for guidance.") + logger.warning( + "Slack Metrics Channel ID token not provided. Check env prod template for guidance." + ) except Exception as e: - logger.exception("An error occurred while sending usage report to Slack: %s", e) + logger.exception( + "An error occurred while sending usage report to Slack", exc_info=e + )