forked from rizwan09/LanModeledProgramGeneartion-master
-
Notifications
You must be signed in to change notification settings - Fork 0
/
model_rnd.py
75 lines (62 loc) · 3.18 KB
/
model_rnd.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
###############################################################################
# Author: Md Rizwan Parvez
# Project: LanModeledProgramGeneration
# Date Created: 3/27/2017
# many codes are adopted from Wasi Ahmad QuestionClassifier
# File Description: This is the main script from where all experimental
# execution begins.
###############################################################################
import torch, helper
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import util
from embedding_layer import Embedding_Drop_Layer
from encoder import EncoderDropoutRNN
from decoder import DecoderLinear
class LanguageModel(nn.Module):
def __init__(self, dictionary, args):
""""Constructor of the class."""
super(LanguageModel, self).__init__()
self.dictionary = dictionary
self.config = args
self.vocab_size = len(self.dictionary)
self.embedding_drop = Embedding_Drop_Layer(self.vocab_size, self.config.emsize, self.config.dropout)
self.encoder_drop = EncoderDropoutRNN(args)
self.decoder = DecoderLinear(self.config.nhid, self.vocab_size)
# if tie_weights:
# if nhid != ninp:
# raise ValueError('When using the tied flag, nhid must be equal to emsize')
# self.decoder.weight = self.encoder.weight
self.init_weights()
# self.rnn_type = rnn_type
# self.nhid = nhid
# self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
#self.embedding_drop.init_embedding_weights(self.dictionary, self.embeddings_index, self.config.emsize) ## it initializes with glove embeddings
self.embedding_drop.init_weight(initrange)#don't know why for encoder only need to init hidden n cell states not weights
self.decoder.init_weights(initrange)
def forward(self, input, hidden):
## accepts tensor or variable and make it variable
#input = util.getVariable(input)
#hidden = util.getVariable(hidden)
input = util.repackage_hidden(input, self.config.cuda)
hidden = util.repackage_hidden(hidden, self.config.cuda)
emb_drop = self.embedding_drop(input)
#print (' in model rnd after embedding emb: ', emb_drop.size())
output, hidden = self.encoder_drop(emb_drop, hidden)
#print ('in model rnd after encoding without transpose output: ' , output.size())
#output = output.permute(1,0,2)
#print ('in model rnd after encoding after transpose before decode output: ' , output.size())
decoded = self.decoder(output)
#print ('in model rnd after decoding final output: ' , output.size()) #process evrything and returns in batch x seq_len x vocab_size or seq x bsz x vocab
return decoded, hidden
def init_hidden(self, bsz):
return self.encoder_drop.init_weights(bsz)
# weight = next(self.parameters()).data
# if self.rnn_type == 'LSTM':
# return (Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()),
# Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()))
# else:
# return Variable(weight.new(self.nlayers, bsz, self.nhid).zero_())