forked from uds-lsv/AAdaM
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy patheval_sentemb.py
96 lines (75 loc) · 3.49 KB
/
eval_sentemb.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import os
import argparse
import json
import torch
from scipy.stats import spearmanr
import pytablewriter as ptw
from sentence_transformers import SentenceTransformer
from utils import read_file, cosine_sim
NUM_FOLD = 10
class STEmbedder:
"""Sentence Transformer embedding"""
def __init__(self, model_name_or_path, device='cuda', cache_folder=None):
self.model = SentenceTransformer(model_name_or_path, cache_folder=cache_folder).to(device)
self.device = device
def compute_embeddings(self, text, batch_size=128, normalize=True):
embeddings_np = self.model.encode(text,
batch_size=batch_size,
show_progress_bar=False,
device=self.device,
normalize_embeddings=normalize)
return embeddings_np
def predict(s1_embeddings, s2_embeddings):
scores = []
for e1, e2 in zip(s1_embeddings, s2_embeddings):
scores.append(cosine_sim(e1, e2))
return scores
def main(args):
# load model
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
embedder = STEmbedder(args.model_name_or_path, device=device, cache_folder=args.cache_dir)
# evaluation
all_results = {}
for l in args.languages:
print(f"Language: {l}")
results = {}
for k in range(NUM_FOLD):
_, sentence_pairs, true_scores = read_file(f"{args.data_dir}/{l}/{l}_dev_{k}.csv")
################## predict the scores ##################
# compute embeddings
s1 = [s[0] for s in sentence_pairs]
s2 = [s[1] for s in sentence_pairs]
s1_embeddings = embedder.compute_embeddings(s1)
s2_embeddings = embedder.compute_embeddings(s2)
pred_scores = predict(s1_embeddings, s2_embeddings)
###################################### ##################
spearman = spearmanr(true_scores, pred_scores)[0]
results[f"{k}"] = spearman * 100
results['avg'] = round(sum(results.values()) / len(results.values()), 2)
all_results[l] = results
# write to disk
os.makedirs(f"{args.output_dir}/{l}", exist_ok=True)
with open(f"{args.output_dir}/{l}/result.json", 'w') as fp:
json.dump(results, fp, indent=2)
# print
writer = ptw.MarkdownTableWriter()
writer.headers = [""] + args.languages
writer.value_matrix = [[f"Fold {k}"] + [round(all_results[l][str(k)], 2) for l in args.languages] for k in
range(NUM_FOLD)]
writer.value_matrix.append(["avg"] + [round(all_results[l]['avg'], 2) for l in args.languages])
writer.write_table()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./data/')
parser.add_argument('--output_dir', type=str, default='./results/sentence_transformer')
parser.add_argument('--language', type=str, default='all')
parser.add_argument('--model_name_or_path', type=str,
default='sentence-transformers/paraphrase-multilingual-mpnet-base-v2')
parser.add_argument('--cache_dir', type=str, default='')
args = parser.parse_args()
args.output_dir = f"{args.output_dir}/{args.model_name_or_path}"
if args.language == 'all':
args.languages = ['eng', 'amh', 'arq', 'ary', 'esp', 'hau', 'mar', 'tel']
else:
args.languages = [args.language]
main(args)