-
Notifications
You must be signed in to change notification settings - Fork 0
/
cat0.5.py
94 lines (73 loc) · 2.12 KB
/
cat0.5.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
# -*- coding: utf-8 -*-
import os
import io
import numpy
from pandas import DataFrame
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from textblob import Word
import string
from textblob.classifiers import NaiveBayesClassifier
from textblob import TextBlob
from stemming.porter2 import stem
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import time
train = [
('water', 'water'),
('water', 'water'),
('water', 'water'),
('log', 'water'),
('jal', 'water'),
('drain', 'water'),
('sewag', 'water'),
('burgler', 'police'),
('मौत', 'police'),
('thief', 'police'),
('robbery', 'police'),
('murder', 'police'),
('medicin', 'doctor'),
('ill', 'doctor'),
('sick', 'doctor'),
('दवा', 'doctor'),
('बीमार', 'doctor'),
]
def dataFrame():
rows = []
index = []
for message, cat in train:
rows.append({'message': message, 'class': cat})
#index.append(filename)
return DataFrame(rows)
data = DataFrame({'message': [], 'class': []})
data = data.append(dataFrame())
vectorizer = CountVectorizer()
counts = vectorizer.fit_transform(data['message'].values)
print counts
classifier = MultinomialNB()
targets = data['class'].values
classifier.fit(counts, targets)
#examples = ['water logging','murders']
e=raw_input("Enter\n")
e=e.lower()
replace_punctuation = string.maketrans(string.punctuation, ' '*len(string.punctuation))
e = e.translate(replace_punctuation)
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(e)
filtered_sentence = [w for w in word_tokens if not w in stop_words]
filtered_sentence = []
for w in word_tokens:
if w not in stop_words:
b=Word(w)
#t=t.replace(w,b.lemmatize())
# use porterstemming for faster
filtered_sentence.append(stem(b))
e=' '.join(filtered_sentence)
print("lemantized sentence="+e)
e=[e]
example_counts = vectorizer.transform(e)
print("\n")
print example_counts
predictions = classifier.predict(example_counts)
print predictions[0]