Skip to content

Commit

Permalink
0.1.3 fix a bug to integrate gpus for dgl models
Browse files Browse the repository at this point in the history
  • Loading branch information
kexinhuang12345 committed May 14, 2021
1 parent fc750f7 commit b77535a
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 1 deletion.
4 changes: 4 additions & 0 deletions DeepPurpose/encoders.py
Original file line number Diff line number Diff line change
Expand Up @@ -351,6 +351,7 @@ def __init__(self, in_feats, hidden_feats=None, max_degree = None, activation=No
self.transform = nn.Linear(predictor_hidden_size * 2, predictor_dim)

def forward(self, bg):
bg = bg.to(device)
feats = bg.ndata.pop('h')
node_feats = self.gnn(bg, feats)
node_feats = self.node_to_graph(node_feats)
Expand All @@ -373,6 +374,7 @@ def __init__(self, predictor_dim=None):
self.transform = nn.Linear(300, predictor_dim)

def forward(self, bg):
bg = bg.to(device)
node_feats = [
bg.ndata.pop('atomic_number'),
bg.ndata.pop('chirality_type')
Expand Down Expand Up @@ -400,6 +402,7 @@ def __init__(self, predictor_dim=None):
self.transform = nn.Linear(300, predictor_dim)

def forward(self, bg):
bg = bg.to(device)
node_feats = [
bg.ndata.pop('atomic_number'),
bg.ndata.pop('chirality_type')
Expand Down Expand Up @@ -432,6 +435,7 @@ def __init__(self, node_feat_size, edge_feat_size, num_layers = 2, num_timesteps
self.transform = nn.Linear(graph_feat_size, predictor_dim)

def forward(self, bg):
bg = bg.to(device)
node_feats = bg.ndata.pop('h')
edge_feats = bg.edata.pop('e')

Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def readme():
name="DeepPurpose",
packages = ['DeepPurpose'],
package_data={'DeepPurpose': ['ESPF/*']},
version="0.1.2",
version="0.1.3",
author="Kexin Huang, Tianfan Fu",
license="BSD-3-Clause",
author_email="[email protected]",
Expand Down

0 comments on commit b77535a

Please sign in to comment.