diff --git a/assets/ar/factuality_disinformation_harmful_content/factuality/COVID19Factuality_GPT35_ZeroShot.py b/assets/ar/factuality_disinformation_harmful_content/factuality/COVID19Factuality_GPT35_ZeroShot.py new file mode 100644 index 00000000..e6e17bfa --- /dev/null +++ b/assets/ar/factuality_disinformation_harmful_content/factuality/COVID19Factuality_GPT35_ZeroShot.py @@ -0,0 +1,56 @@ +from llmebench.datasets import COVID19FactualityDataset +from llmebench.models import LegacyOpenAIModel +from llmebench.tasks import FactualityTask + + +def config(): + return { + "dataset": COVID19FactualityDataset, + "dataset_args": {}, + "task": FactualityTask, + "task_args": {}, + "model": LegacyOpenAIModel, + "model_args": { + "class_labels": ["yes", "no"], + "max_tries": 30, + }, + "general_args": { + "data_path": "data/factuality_disinformation_harmful_content/factuality_covid19/covid19_infodemic_arabic_data_factuality_binary_test.tsv" + }, + } + + +def prompt(input_sample): + prompt_string = ( + f"Detect the information in the sentence as correct or incorrect. Use label as yes or no.\n\n" + f"text: {input_sample}\n" + f"label: \n" + ) + return { + "system_message": "You are an AI assistant that helps people find information.", + "messages": [ + { + "sender": "user", + "text": prompt_string, + } + ], + } + + +def post_process(response): + label = response["choices"][0]["text"].lower().replace(".", "").lower() + + if label.startswith("I am unable to verify".lower()) or label.startswith( + "I am unable to categorize".lower() + ): + label_fixed = None + elif "incorrect" in label or "label: no" in label: + label_fixed = "no" + elif "correct" in label or "label: yes" in label: + label_fixed = "yes" + elif "no" == label or "yes" == label: + label_fixed = label + else: + label_fixed = None + + return label_fixed