From 96d767228a5b33b61c92330d5bb0e184c0d768b0 Mon Sep 17 00:00:00 2001 From: Maram Hasanain Date: Mon, 28 Aug 2023 15:06:42 +0300 Subject: [PATCH] Update ArSarcasm_BLOOMZ_Zeroshot.py --- .../sarcasm/ArSarcasm_BLOOMZ_ZeroShot.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/assets/ar/sentiment_emotion_others/sarcasm/ArSarcasm_BLOOMZ_ZeroShot.py b/assets/ar/sentiment_emotion_others/sarcasm/ArSarcasm_BLOOMZ_ZeroShot.py index 7cef9f1c..5b1dc138 100644 --- a/assets/ar/sentiment_emotion_others/sarcasm/ArSarcasm_BLOOMZ_ZeroShot.py +++ b/assets/ar/sentiment_emotion_others/sarcasm/ArSarcasm_BLOOMZ_ZeroShot.py @@ -1,7 +1,5 @@ from llmebench.datasets import ArSarcasmDataset - from llmebench.models import PetalsModel - from llmebench.tasks import SarcasmTask @@ -28,9 +26,10 @@ def config(): def prompt(input_sample): prompt_string = ( - f"Predict whether the tweet is sarcastic or not. If it is sarcastic, respond with 'TRUE'. If it is not sarcastic, respond with 'FALSE'.\n\n" - f"text: {input_sample}\n" - f"label: " + 'Predict whether the following "tweet" is sarcastic. Return "yes" if the tweet is sarcastic and "no" if the tweet is not sarcastic. Provide only label.\n\ntweet: ' + + input_sample + + "\n" + "label: \n" ) return { "prompt": prompt_string, @@ -41,9 +40,9 @@ def post_process(response): label = response["outputs"].strip().lower() label = label.replace("", "").replace("", "") - if label == "true": + if label == "yes": return "TRUE" - elif label == "false": + elif label == "no": return "FALSE" else: return None