-
Notifications
You must be signed in to change notification settings - Fork 0
/
subset_topicmodels.py
135 lines (112 loc) · 3.98 KB
/
subset_topicmodels.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
"""
Generate topic models for philoso corpus, NB1 subset, and NB2 corpus.
"""
import sys
import os
import glob
import re
import logging
logging.basicConfig(
filename='lda_models.log',
format='%(asctime)s : %(levelname)s : %(message)s',
level=logging.INFO
)
from smart_open import open
import pandas as pd
from gensim.models import LdaMulticore
from gensim import corpora
import NL_helpers
import NL_topicmodels
def fit_and_save_models(corpus, corpus_name, num_topics):
"""Code to fit model for corpus with given numbers of topics (iterable).
Naming convention ignores that the number of topics is *also* a
hyperparameter.
"""
# Fit model
for num_topics in num_topics:
lda_model = LdaMulticore(
corpus,
num_topics= num_topics,
workers = 15, # real cores - 1
chunksize = 500,
id2word=corpus.dictionary,
iterations = 500,
passes = 25,
eval_every = 100
)
# Save model
lda_model.save(f'lda_models/{corpus_name}_{num_topics}.ldamodel')
# Extract keywords for each topic, save a csv via pandas dataframe.
topic_words = NL_topicmodels.topics_and_keywords(lda_model)
topic_words_df = pd.DataFrame.from_dict(topic_words, orient='index')
topic_words_df.to_csv(
f'lda_models/{corpus_name}_{num_topics}.csv'
)
def get_dictionary(corpus_df, corpus_name):
"""Given name of corpus, attempt to load pre-generated dictionary.
If not dictionary present, generate one."""
try:
dictionary = corpora.Dictionary.load(
f'dictionaries/{corpus_name}.dict'
)
except FileNotFoundError:
minimum_in_docs = 10
dictionary = corpora.Dictionary(corpus_df['Tokenised'])
dictionary.filter_extremes(no_below=minimum_in_docs, no_above=0.4)
dictionary.compactify()
dictionary.save(f'dictionaries/{corpus_name}.dict')
return dictionary
def main():
"""The main function."""
corpus_names = ['nb2_v2_philoso']
num_topics = [10, 50, 100, 500]
for corpus_name in corpus_names:
if corpus_name in ['philoso', 'nb2_philoso', 'nb2_v2_philoso']:
corpus = pd.read_pickle(f'pickles/{corpus_name}_df.tar.gz')
corpus['Tokenised'] = corpus['Text'].apply(NL_helpers.tokenise_and_stop)
dictionary = get_dictionary(corpus, corpus_name)
corpus = NL_topicmodels.NL_corpus(corpus, dictionary)
fit_and_save_models(corpus, corpus_name, num_topics)
elif corpus_name == 'nb1_philoso':
dictionary = corpora.Dictionary.load(
f'dictionaries/{corpus_name}.dict'
)
corpus = NL_topicmodels.NL_streamed_corpus(
'/home/joshua/Documents/philoso_nb1/bags.csv',
dictionary
)
fit_and_save_models(corpus, corpus_name, num_topics)
main()
# df = pd.read_pickle('pickles/nb1_philoso_df.tar.gz')
# dict = corpora.Dictionary.load('dictionaries/nb1_philoso.dict')
# df['Tokenised'] = df['Text'].apply(NL_helpers.tokenise_and_stop)
# corpus = NL_topicmodels.NL_corpus(df, dict)
# corpus.items['BOW'].to_csv('/home/joshua/Documents/philoso_nb1/bags.csv')
# file_stream = open('/home/joshua/Documents/philoso_nb1/bags.csv', 'r')
#
# counter = 0
# for line in file_stream:
# if counter < 10:
# id, bag_string = line.split(',"')
# pairs = bag_string[2:-4].split('), (')
# bow = []
# for pair in pairs:
# i, j = pair.split(', ')
# bow.append((int(i), int(j)))
# counter+=1
# print(bow)
#
# line = file_stream.readline()
# id, bag_string = line.split(',"')
# pairs = bag_string[2:-4].split('), (')
# pairs
# bag_string
# corpus_name = 'nb1_philoso'
# dictionary = corpora.Dictionary.load(
# f'dictionaries/{corpus_name}.dict'
# )
# corpus = NL_topicmodels.NL_streamed_corpus(
# '/home/joshua/Documents/philoso_nb1/bags.csv',
# dictionary
# )
# len(corpus)