-
Notifications
You must be signed in to change notification settings - Fork 567
/
main.py
228 lines (195 loc) · 9.11 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
# encoding=utf8
import os
import codecs
import pickle
import itertools
from collections import OrderedDict
import tensorflow as tf
import numpy as np
from model import Model
from loader import load_sentences, update_tag_scheme
from loader import char_mapping, tag_mapping
from loader import augment_with_pretrained, prepare_dataset
from utils import get_logger, make_path, clean, create_model, save_model
from utils import print_config, save_config, load_config, test_ner
from data_utils import load_word2vec, create_input, input_from_line, BatchManager
flags = tf.app.flags
flags.DEFINE_boolean("clean", False, "clean train folder")
flags.DEFINE_boolean("train", False, "Wither train the model")
# configurations for the model
flags.DEFINE_integer("seg_dim", 20, "Embedding size for segmentation, 0 if not used")
flags.DEFINE_integer("char_dim", 100, "Embedding size for characters")
flags.DEFINE_integer("lstm_dim", 100, "Num of hidden units in LSTM")
flags.DEFINE_string("tag_schema", "iobes", "tagging schema iobes or iob")
# configurations for training
flags.DEFINE_float("clip", 5, "Gradient clip")
flags.DEFINE_float("dropout", 0.5, "Dropout rate")
flags.DEFINE_float("batch_size", 20, "batch size")
flags.DEFINE_float("lr", 0.001, "Initial learning rate")
flags.DEFINE_string("optimizer", "adam", "Optimizer for training")
flags.DEFINE_boolean("pre_emb", True, "Wither use pre-trained embedding")
flags.DEFINE_boolean("zeros", False, "Wither replace digits with zero")
flags.DEFINE_boolean("lower", True, "Wither lower case")
flags.DEFINE_integer("max_epoch", 100, "maximum training epochs")
flags.DEFINE_integer("steps_check", 100, "steps per checkpoint")
flags.DEFINE_string("ckpt_path", "ckpt", "Path to save model")
flags.DEFINE_string("summary_path", "summary", "Path to store summaries")
flags.DEFINE_string("log_file", "train.log", "File for log")
flags.DEFINE_string("map_file", "maps.pkl", "file for maps")
flags.DEFINE_string("vocab_file", "vocab.json", "File for vocab")
flags.DEFINE_string("config_file", "config_file", "File for config")
flags.DEFINE_string("script", "conlleval", "evaluation script")
flags.DEFINE_string("result_path", "result", "Path for results")
flags.DEFINE_string("emb_file", "wiki_100.utf8", "Path for pre_trained embedding")
flags.DEFINE_string("train_file", os.path.join("data", "example.train"), "Path for train data")
flags.DEFINE_string("dev_file", os.path.join("data", "example.dev"), "Path for dev data")
flags.DEFINE_string("test_file", os.path.join("data", "example.test"), "Path for test data")
FLAGS = tf.app.flags.FLAGS
assert FLAGS.clip < 5.1, "gradient clip should't be too much"
assert 0 <= FLAGS.dropout < 1, "dropout rate between 0 and 1"
assert FLAGS.lr > 0, "learning rate must larger than zero"
assert FLAGS.optimizer in ["adam", "sgd", "adagrad"]
# config for the model
def config_model(char_to_id, tag_to_id):
config = OrderedDict()
config["num_chars"] = len(char_to_id)
config["char_dim"] = FLAGS.char_dim
config["num_tags"] = len(tag_to_id)
config["seg_dim"] = FLAGS.seg_dim
config["lstm_dim"] = FLAGS.lstm_dim
config["batch_size"] = FLAGS.batch_size
config["emb_file"] = FLAGS.emb_file
config["clip"] = FLAGS.clip
config["dropout_keep"] = 1.0 - FLAGS.dropout
config["optimizer"] = FLAGS.optimizer
config["lr"] = FLAGS.lr
config["tag_schema"] = FLAGS.tag_schema
config["pre_emb"] = FLAGS.pre_emb
config["zeros"] = FLAGS.zeros
config["lower"] = FLAGS.lower
return config
def evaluate(sess, model, name, data, id_to_tag, logger):
logger.info("evaluate:{}".format(name))
ner_results = model.evaluate(sess, data, id_to_tag)
eval_lines = test_ner(ner_results, FLAGS.result_path)
for line in eval_lines:
logger.info(line)
f1 = float(eval_lines[1].strip().split()[-1])
if name == "dev":
best_test_f1 = model.best_dev_f1.eval()
if f1 > best_test_f1:
tf.assign(model.best_dev_f1, f1).eval()
logger.info("new best dev f1 score:{:>.3f}".format(f1))
return f1 > best_test_f1
elif name == "test":
best_test_f1 = model.best_test_f1.eval()
if f1 > best_test_f1:
tf.assign(model.best_test_f1, f1).eval()
logger.info("new best test f1 score:{:>.3f}".format(f1))
return f1 > best_test_f1
def train():
# load data sets
train_sentences = load_sentences(FLAGS.train_file, FLAGS.lower, FLAGS.zeros)
dev_sentences = load_sentences(FLAGS.dev_file, FLAGS.lower, FLAGS.zeros)
test_sentences = load_sentences(FLAGS.test_file, FLAGS.lower, FLAGS.zeros)
# Use selected tagging scheme (IOB / IOBES)
update_tag_scheme(train_sentences, FLAGS.tag_schema)
update_tag_scheme(test_sentences, FLAGS.tag_schema)
# create maps if not exist
if not os.path.isfile(FLAGS.map_file):
# create dictionary for word
if FLAGS.pre_emb:
dico_chars_train = char_mapping(train_sentences, FLAGS.lower)[0]
dico_chars, char_to_id, id_to_char = augment_with_pretrained(
dico_chars_train.copy(),
FLAGS.emb_file,
list(itertools.chain.from_iterable(
[[w[0] for w in s] for s in test_sentences])
)
)
else:
_c, char_to_id, id_to_char = char_mapping(train_sentences, FLAGS.lower)
# Create a dictionary and a mapping for tags
_t, tag_to_id, id_to_tag = tag_mapping(train_sentences)
with open(FLAGS.map_file, "wb") as f:
pickle.dump([char_to_id, id_to_char, tag_to_id, id_to_tag], f)
else:
with open(FLAGS.map_file, "rb") as f:
char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)
# prepare data, get a collection of list containing index
train_data = prepare_dataset(
train_sentences, char_to_id, tag_to_id, FLAGS.lower
)
dev_data = prepare_dataset(
dev_sentences, char_to_id, tag_to_id, FLAGS.lower
)
test_data = prepare_dataset(
test_sentences, char_to_id, tag_to_id, FLAGS.lower
)
print("%i / %i / %i sentences in train / dev / test." % (
len(train_data), 0, len(test_data)))
train_manager = BatchManager(train_data, FLAGS.batch_size)
dev_manager = BatchManager(dev_data, 100)
test_manager = BatchManager(test_data, 100)
# make path for store log and model if not exist
make_path(FLAGS)
if os.path.isfile(FLAGS.config_file):
config = load_config(FLAGS.config_file)
else:
config = config_model(char_to_id, tag_to_id)
save_config(config, FLAGS.config_file)
make_path(FLAGS)
log_path = os.path.join("log", FLAGS.log_file)
logger = get_logger(log_path)
print_config(config, logger)
# limit GPU memory
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
steps_per_epoch = train_manager.len_data
with tf.Session(config=tf_config) as sess:
model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec, config, id_to_char, logger)
logger.info("start training")
loss = []
for i in range(100):
for batch in train_manager.iter_batch(shuffle=True):
step, batch_loss = model.run_step(sess, True, batch)
loss.append(batch_loss)
if step % FLAGS.steps_check == 0:
iteration = step // steps_per_epoch + 1
logger.info("iteration:{} step:{}/{}, "
"NER loss:{:>9.6f}".format(
iteration, step%steps_per_epoch, steps_per_epoch, np.mean(loss)))
loss = []
best = evaluate(sess, model, "dev", dev_manager, id_to_tag, logger)
if best:
save_model(sess, model, FLAGS.ckpt_path, logger)
evaluate(sess, model, "test", test_manager, id_to_tag, logger)
def evaluate_line():
config = load_config(FLAGS.config_file)
logger = get_logger(FLAGS.log_file)
# limit GPU memory
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
with open(FLAGS.map_file, "rb") as f:
char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)
with tf.Session(config=tf_config) as sess:
model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec, config, id_to_char, logger)
while True:
# try:
# line = input("请输入测试句子:")
# result = model.evaluate_line(sess, input_from_line(line, char_to_id), id_to_tag)
# print(result)
# except Exception as e:
# logger.info(e)
line = input("请输入测试句子:")
result = model.evaluate_line(sess, input_from_line(line, char_to_id), id_to_tag)
print(result)
def main(_):
if FLAGS.train:
if FLAGS.clean:
clean(FLAGS)
train()
else:
evaluate_line()
if __name__ == "__main__":
tf.app.run(main)