forked from ishwnews/MASS
-
Notifications
You must be signed in to change notification settings - Fork 0
/
glue-xnli.py
executable file
·118 lines (97 loc) · 4.35 KB
/
glue-xnli.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import argparse
from src.utils import bool_flag, initialize_exp
from src.evaluation.glue import GLUE
from src.evaluation.xnli import XNLI
from src.model.embedder import SentenceEmbedder
GLUE_TASKS = ['MNLI', 'QQP', 'QNLI', 'MRPC', 'RTE', 'STS-B', 'SST-2', 'CoLA']
XNLI_TASKS = ['XNLI']
TASKS = GLUE_TASKS + XNLI_TASKS
# parse parameters
parser = argparse.ArgumentParser(description='Train on GLUE or XNLI')
# main parameters
parser.add_argument("--exp_name", type=str, default="",
help="Experiment name")
parser.add_argument("--dump_path", type=str, default="",
help="Experiment dump path")
parser.add_argument("--exp_id", type=str, default="",
help="Experiment ID")
# float16
parser.add_argument("--fp16", type=bool_flag, default=False,
help="Run model with float16")
# evaluation task / pretrained model
parser.add_argument("--transfer_tasks", type=str, default="",
help="Transfer tasks, example: 'MNLI,RTE,XNLI' ")
parser.add_argument("--model_path", type=str, default="",
help="Model location")
# data
parser.add_argument("--data_path", type=str, default="",
help="Data path")
parser.add_argument("--max_vocab", type=int, default=-1,
help="Maximum vocabulary size (-1 to disable)")
parser.add_argument("--min_count", type=int, default=0,
help="Minimum vocabulary count")
# batch parameters
parser.add_argument("--max_len", type=int, default=256,
help="Maximum length of sentences (after BPE)")
parser.add_argument("--group_by_size", type=bool_flag, default=False,
help="Sort sentences by size during the training")
parser.add_argument("--batch_size", type=int, default=32,
help="Number of sentences per batch")
parser.add_argument("--max_batch_size", type=int, default=0,
help="Maximum number of sentences per batch (used in combination with tokens_per_batch, 0 to disable)")
parser.add_argument("--tokens_per_batch", type=int, default=-1,
help="Number of tokens per batch")
# model / optimization
parser.add_argument("--finetune_layers", type=str, default='0:_1',
help="Layers to finetune. 0 = embeddings, _1 = last encoder layer")
parser.add_argument("--weighted_training", type=bool_flag, default=False,
help="Use a weighted loss during training")
parser.add_argument("--dropout", type=float, default=0,
help="Fine-tuning dropout")
parser.add_argument("--optimizer", type=str, default="adam,lr=0.0001",
help="Optimizer")
parser.add_argument("--n_epochs", type=int, default=100,
help="Maximum number of epochs")
parser.add_argument("--epoch_size", type=int, default=-1,
help="Epoch size (-1 for full pass over the dataset)")
# debug
parser.add_argument("--debug_train", type=bool_flag, default=False,
help="Use valid sets for train sets (faster loading)")
parser.add_argument("--debug_slurm", type=bool_flag, default=False,
help="Debug multi-GPU / multi-node within a SLURM job")
# parse parameters
params = parser.parse_args()
if params.tokens_per_batch > -1:
params.group_by_size = True
# check parameters
assert os.path.isdir(params.data_path)
assert os.path.isfile(params.model_path)
# tasks
params.transfer_tasks = params.transfer_tasks.split(',')
assert len(params.transfer_tasks) > 0
assert all([task in TASKS for task in params.transfer_tasks])
# reload pretrained model
embedder = SentenceEmbedder.reload(params.model_path, params)
# reload langs from pretrained model
params.n_langs = embedder.pretrain_params['n_langs']
params.id2lang = embedder.pretrain_params['id2lang']
params.lang2id = embedder.pretrain_params['lang2id']
# initialize the experiment / build sentence embedder
logger = initialize_exp(params)
scores = {}
# prepare trainers / evaluators
glue = GLUE(embedder, scores, params)
xnli = XNLI(embedder, scores, params)
# run
for task in params.transfer_tasks:
if task in GLUE_TASKS:
glue.run(task)
if task in XNLI_TASKS:
xnli.run()