-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathkmeans.py
64 lines (49 loc) · 2.38 KB
/
kmeans.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import numpy as np
import pandas as pd
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# Build question features
question_info = pd.read_csv("data/question_info.txt", sep="\t", header=None, names=[
"question_id", "tag", "word_id", "char_id", "upvotes", "answers", "top_answers"
])
question_info['ease'] = ( question_info['answers'] - question_info['answers'].min() ) / question_info['answers'].max()
question_info['popularity'] = ( question_info['top_answers'] - question_info['top_answers'].min() ) / question_info['top_answers'].max()
question_info['votability'] = ( question_info['upvotes'] - question_info['upvotes'].min() ) / question_info['upvotes'].max()
question_info['nTag'] = ( question_info['tag'] - question_info['tag'].min() ) / question_info['tag'].max()
question_info['answerability'] = ( question_info['top_answers'] / question_info['answers'] )
question_info['answerability'] = question_info['answerability'].fillna(0)
vctorizer = CountVectorizer(lambda s: s.split('/'))
# qWords = pd.DataFrame( vctorizer.fit_transform(question_info['word_id']).todense() )
# qWords.columns = map(lambda i: "w" + str(i), range(len(qWords.columns)))
qChars = pd.DataFrame( vctorizer.fit_transform(question_info['char_id']).todense() )
qChars.columns = map(lambda i: "c" + str(i), range(len(qChars.columns)))
# question_info = pd.concat([question_info, qWords, qChars], axis=1, join_axes=[question_info.index])
question_info = pd.concat([question_info, qChars], axis=1, join_axes=[question_info.index])
# FEATURES = [ "tag", "upvotes", "answers", "top_answers", "answerability" ]
# FEATURES = [ "ease", "popularity", "votability", "answerability" ]
FEATURES = qChars.columns.tolist()
# + qWords.columns.tolist() + qChars.columns.tolist()
km = KMeans(n_clusters=20,
init='k-means++',
n_init=10,
max_iter=100,
tol=0.0001,
precompute_distances='auto',
verbose=0,
random_state=None,
copy_x=True,
n_jobs=1,
algorithm='auto')
km.fit(question_info[FEATURES])
question_info['clusters'] = km.labels_
res = question_info[['question_id', 'clusters']]
res.to_csv('data/question-clusters.csv', sep=",", index=None)
for c in km.cluster_centers_:
plt.plot(c)
plt.show()
# for c in range(5):
# plt.hist(question_info[question_info.clusters == c]['tag'], range(21), stacked=True, normed = True)
# plt.show()