-
Notifications
You must be signed in to change notification settings - Fork 19
/
preprocess.py
99 lines (90 loc) · 2.75 KB
/
preprocess.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
from multiprocessing import Pool
from collections import Counter
import sys, re
import argparse
from google_bert import BasicTokenizer
BUFSIZE = 40960000
def parse_config():
parser = argparse.ArgumentParser()
parser.add_argument('--src_file', type=str, default='toy/sample_from_zhwiki')
parser.add_argument('--tgt_file', type=str, default='data/toy')
parser.add_argument('--nprocessors', type=int, default=8)
return parser.parse_args()
_split_set = set(['!', '?', '。'])
def _is_split_point(ch):
if ch in _split_set:
return True
return False
def work_news_char(line):
"This function only works for news at char level"
tokenizer = BasicTokenizer()
line = line.strip()
if line == "":
return [[]]
char_seq = tokenizer.tokenize(line)
res = []
sent = []
for ch in char_seq:
sent.append(ch)
if len(sent)>=20 and _is_split_point(ch):
res.append(sent)
sent = []
if sent:
if len(sent) <= 3 and len(res)>0:
res[-1].extend(sent)
else:
res.append(sent)
return res
def work_wiki_char(line):
"This function only works for zhwiki at char level"
tokenizer = BasicTokenizer()
line = line.strip()
if line == "":
return []
if line.startswith("</doc>"):
return [[]]
if line.startswith("<doc id="):
return [[]]
char_seq = tokenizer.tokenize(line)
res = []
sent = []
for ch in char_seq:
sent.append(ch)
if len(sent)>=20 and _is_split_point(ch):
res.append(sent)
sent = []
if sent:
if len(sent) <= 3 and len(res)>0:
res[-1].extend(sent)
else:
res.append(sent)
return res
if __name__ == "__main__":
args = parse_config()
pool = Pool(args.nprocessors)
stream = open(args.src_file, encoding='utf8')
cnt = Counter()
with open(args.tgt_file, 'w', encoding ='utf8') as fo:
while True:
lines = stream.readlines(BUFSIZE)
if not lines:
break
res = pool.map(work_wiki_char, lines, len(lines)//args.nprocessors)
if not res:
continue
all_lines = []
for lines in res:
all_lines.extend(lines)
empty = True
for line in all_lines:
if line:
cnt.update(line)
fo.write(' '.join(line)+'\n')
empty = False
else:
if not empty:
fo.write('\n')
empty = True
with open(args.tgt_file+'_vocab', 'w', encoding ='utf8') as fo:
for x, y in cnt.most_common():
fo.write(x+'\t'+str(y)+'\n')