-
Notifications
You must be signed in to change notification settings - Fork 18
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add factuality, disinformation, harmful content assets for JAIS
- Loading branch information
Showing
13 changed files
with
847 additions
and
0 deletions.
There are no files selected for viewing
46 changes: 46 additions & 0 deletions
46
...ctuality_disinformation_harmful_content/adult_content_detection/Adult_JAIS13b_ZeroShot.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,46 @@ | ||
from llmebench.datasets import AdultDataset | ||
from llmebench.models import FastChatModel | ||
from llmebench.tasks import AdultTask | ||
|
||
|
||
def metadata(): | ||
return { | ||
"author": "Arabic Language Technologies, QCRI, HBKU", | ||
"model": "JAIS-13b", | ||
"description": "Locally hosted JAIS-13b-chat model using FastChat.", | ||
"scores": {"Macro-F1": ""}, | ||
} | ||
|
||
|
||
def config(): | ||
return { | ||
"dataset": AdultDataset, | ||
"task": AdultTask, | ||
"model": FastChatModel, | ||
"model_args": { | ||
"class_labels": ["ADULT", "NOT_ADULT"], | ||
"max_tries": 3, | ||
}, | ||
} | ||
|
||
|
||
def prompt(input_sample): | ||
base_prompt = ( | ||
f'Given the following tweet, label it as "ADULT" or "NOT_ADULT" based on the content of the tweet.\n\n' | ||
f"tweet: {input_sample}\n" | ||
f"label: \n" | ||
) | ||
return [ | ||
{ | ||
"role": "user", | ||
"content": base_prompt, | ||
}, | ||
] | ||
|
||
|
||
def post_process(response): | ||
out = response["choices"][0]["message"]["content"].replace("label: ", "") | ||
j = out.find(".") | ||
if j > 0: | ||
out = out[0:j] | ||
return out |
73 changes: 73 additions & 0 deletions
73
...ty_disinformation_harmful_content/attentionworthy/CT22Attentionworthy_JAIS13b_ZeroShot.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,73 @@ | ||
from llmebench.datasets import CT22AttentionworthyDataset | ||
from llmebench.models import FastChatModel | ||
from llmebench.tasks import AttentionworthyTask | ||
|
||
|
||
def metadata(): | ||
return { | ||
"author": "Arabic Language Technologies, QCRI, HBKU", | ||
"model": "JAIS-13b", | ||
"description": "Locally hosted JAIS-13b-chat model using FastChat.", | ||
"scores": {"Macro-F1": ""}, | ||
} | ||
|
||
|
||
def config(): | ||
return { | ||
"dataset": CT22AttentionworthyDataset, | ||
"task": AttentionworthyTask, | ||
"model": FastChatModel, | ||
"model_args": { | ||
"class_labels": [ | ||
"yes_discusses_action_taken", | ||
"harmful", | ||
"yes_discusses_cure", | ||
"yes_asks_question", | ||
"no_not_interesting", | ||
"yes_other", | ||
"yes_blame_authorities", | ||
"yes_contains_advice", | ||
"yes_calls_for_action", | ||
], | ||
"max_tries": 30, | ||
}, | ||
"general_args": {"test_split": "ar"}, | ||
} | ||
|
||
|
||
def prompt(input_sample): | ||
base_prompt = ( | ||
f'Annotate "tweet" into one of the following categories: yes_discusses_action_taken, harmful, yes_discusses_cure, yes_asks_question, no_not_interesting, yes_other, yes_blame_authorities, yes_contains_advice, yes_calls_for_action\n\n' | ||
f"tweet: {input_sample}\n" | ||
f"label: \n" | ||
) | ||
return [ | ||
{ | ||
"role": "user", | ||
"content": base_prompt, | ||
}, | ||
] | ||
|
||
|
||
def post_process(response): | ||
label = response["choices"][0]["message"]["content"] | ||
|
||
label = ( | ||
label.lower() | ||
.replace(" - ", ", ") | ||
.replace(",", "") | ||
.replace(".", "") | ||
.replace("label:", "") | ||
) | ||
label = label.strip() | ||
# label = re.sub("\s+", "_", label) | ||
if label.startswith("no"): | ||
label_fixed = "no_not_interesting" | ||
elif label == "yes_discusses_covid-19_vaccine_side_effects": | ||
label_fixed = "yes_discusses_cure" | ||
elif label == "yes_harmful": | ||
label_fixed = "harmful" | ||
elif label.startswith("yes"): | ||
label_fixed = label | ||
|
||
return label_fixed |
64 changes: 64 additions & 0 deletions
64
...ty_disinformation_harmful_content/checkworthyness/CT22Checkworthiness_JAIS13b_ZeroShot.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,64 @@ | ||
import re | ||
|
||
from llmebench.datasets import CT22CheckworthinessDataset | ||
from llmebench.models import FastChatModel | ||
from llmebench.tasks import CheckworthinessTask | ||
|
||
|
||
def metadata(): | ||
return { | ||
"author": "Arabic Language Technologies, QCRI, HBKU", | ||
"model": "JAIS-13b", | ||
"description": "Locally hosted JAIS-13b-chat model using FastChat.", | ||
"scores": {"Macro-F1": ""}, | ||
} | ||
|
||
|
||
def config(): | ||
return { | ||
"dataset": CT22CheckworthinessDataset, | ||
"task": CheckworthinessTask, | ||
"model": FastChatModel, | ||
"model_args": { | ||
"class_labels": ["0", "1"], | ||
"max_tries": 30, | ||
}, | ||
"general_args": {"test_split": "ar"}, | ||
} | ||
|
||
|
||
def prompt(input_sample): | ||
base_prompt = ( | ||
f'Annotate the "tweet" into "one" of the following categories: checkworthy or not_checkworthy\n\n' | ||
f"tweet: {input_sample}\n" | ||
f"label: \n" | ||
) | ||
return [ | ||
{ | ||
"role": "user", | ||
"content": base_prompt, | ||
}, | ||
] | ||
|
||
|
||
def post_process(response): | ||
label = response["choices"][0]["message"]["content"] | ||
|
||
label = label.replace("label:", "").strip() | ||
|
||
if "label: " in label: | ||
arr = label.split("label: ") | ||
label = arr[1].strip() | ||
|
||
if label == "checkworthy" or label == "Checkworthy": | ||
label_fixed = "1" | ||
elif label == "Not_checkworthy." or label == "not_checkworthy": | ||
label_fixed = "0" | ||
elif "not_checkworthy" in label or "label: not_checkworthy" in label: | ||
label_fixed = "0" | ||
elif "checkworthy" in label or "label: checkworthy" in label: | ||
label_fixed = "1" | ||
else: | ||
label_fixed = None | ||
|
||
return label_fixed |
56 changes: 56 additions & 0 deletions
56
...r/factuality_disinformation_harmful_content/claim_detection/CT22Claim_JAIS13b_ZeroShot.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,56 @@ | ||
from llmebench.datasets import CT22ClaimDataset | ||
from llmebench.models import FastChatModel | ||
from llmebench.tasks import ClaimDetectionTask | ||
|
||
|
||
def metadata(): | ||
return { | ||
"author": "Arabic Language Technologies, QCRI, HBKU", | ||
"model": "JAIS-13b", | ||
"description": "Locally hosted JAIS-13b-chat model using FastChat.", | ||
"scores": {"Macro-F1": ""}, | ||
} | ||
|
||
|
||
def config(): | ||
return { | ||
"dataset": CT22ClaimDataset, | ||
"task": ClaimDetectionTask, | ||
"model": FastChatModel, | ||
"model_args": { | ||
"class_labels": ["0", "1"], | ||
"max_tries": 30, | ||
}, | ||
"general_args": {"test_split": "ar"}, | ||
} | ||
|
||
|
||
def prompt(input_sample): | ||
base_prompt = ( | ||
f"Given the following tweet, please identify if it contains a claim. If it does, annotate 'yes', if it does not, annotate 'no'\n\n" | ||
f"tweet: {input_sample}\n" | ||
f"label: \n" | ||
) | ||
return [ | ||
{ | ||
"role": "user", | ||
"content": base_prompt, | ||
}, | ||
] | ||
|
||
|
||
def post_process(response): | ||
label = response["choices"][0]["message"]["content"] | ||
|
||
label = label.replace("label:", "").strip() | ||
|
||
if "label: " in label: | ||
arr = label.split("label: ") | ||
label = arr[1].strip() | ||
|
||
if label == "yes" or label == "the sentence contains a factual claim": | ||
label_fixed = "1" | ||
if label == "no": | ||
label_fixed = "0" | ||
|
||
return label_fixed |
63 changes: 63 additions & 0 deletions
63
...ar/factuality_disinformation_harmful_content/factuality/ANSFactuality_JAIS13b_ZeroShot.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,63 @@ | ||
from llmebench.datasets import ANSFactualityDataset | ||
from llmebench.models import FastChatModel | ||
from llmebench.tasks import FactualityTask | ||
|
||
|
||
def metadata(): | ||
return { | ||
"author": "Arabic Language Technologies, QCRI, HBKU", | ||
"model": "JAIS-13b", | ||
"description": "Locally hosted JAIS-13b-chat model using FastChat.", | ||
"scores": {"Macro-F1": ""}, | ||
} | ||
|
||
|
||
def config(): | ||
return { | ||
"dataset": ANSFactualityDataset, | ||
"task": FactualityTask, | ||
"model": FastChatModel, | ||
"model_args": { | ||
"max_tries": 3, | ||
}, | ||
} | ||
|
||
|
||
def prompt(input_sample): | ||
base_prompt = ( | ||
"Detect whether the information in the sentence is factually true or false. " | ||
"Answer only by true or false.\n\n" | ||
+ "Sentence: " | ||
+ input_sample | ||
+ "\nlabel: \n" | ||
) | ||
|
||
return [ | ||
{ | ||
"role": "user", | ||
"content": base_prompt, | ||
}, | ||
] | ||
|
||
|
||
def post_process(response): | ||
input_label = response["choices"][0]["message"]["content"] | ||
input_label = input_label.replace(".", "").strip().lower() | ||
|
||
if ( | ||
"true" in input_label | ||
or "label: 1" in input_label | ||
or "label: yes" in input_label | ||
): | ||
pred_label = "true" | ||
elif ( | ||
"false" in input_label | ||
or "label: 0" in input_label | ||
or "label: no" in input_label | ||
): | ||
pred_label = "false" | ||
else: | ||
print("label problem!! " + input_label) | ||
pred_label = None | ||
|
||
return pred_label |
55 changes: 55 additions & 0 deletions
55
...actuality_disinformation_harmful_content/factuality/COVID19Factuality_JAIS13b_ZeroShot.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,55 @@ | ||
from llmebench.datasets import COVID19FactualityDataset | ||
from llmebench.models import FastChatModel | ||
from llmebench.tasks import FactualityTask | ||
|
||
|
||
def metadata(): | ||
return { | ||
"author": "Arabic Language Technologies, QCRI, HBKU", | ||
"model": "JAIS-13b", | ||
"description": "Locally hosted JAIS-13b-chat model using FastChat.", | ||
"scores": {"Macro-F1": ""}, | ||
} | ||
|
||
|
||
def config(): | ||
return { | ||
"dataset": COVID19FactualityDataset, | ||
"task": FactualityTask, | ||
"model": FastChatModel, | ||
"model_args": { | ||
"class_labels": ["yes", "no"], | ||
"max_tries": 30, | ||
}, | ||
} | ||
|
||
|
||
def prompt(input_sample): | ||
base_prompt = ( | ||
f'Annotate the "tweet" into one of the following categories: correct or incorrect\n\n' | ||
f"tweet: {input_sample}\n" | ||
f"label: \n" | ||
) | ||
return [ | ||
{ | ||
"role": "user", | ||
"content": base_prompt, | ||
}, | ||
] | ||
|
||
|
||
def post_process(response): | ||
label = response["choices"][0]["message"]["content"] | ||
|
||
if label.startswith("I am unable to verify".lower()) or label.startswith( | ||
"I am unable to categorize".lower() | ||
): | ||
label_fixed = None | ||
elif "label: incorrect" in label or "incorrect" in label: | ||
label_fixed = "no" | ||
elif "label: correct" in label or "correct" in label: | ||
label_fixed = "yes" | ||
else: | ||
label_fixed = None | ||
|
||
return label_fixed |
Oops, something went wrong.