diff --git a/examples/mxnet/gat/gat_batch.py b/examples/mxnet/gat/gat_batch.py index ecd36e31a4a7..44545ced910d 100644 --- a/examples/mxnet/gat/gat_batch.py +++ b/examples/mxnet/gat/gat_batch.py @@ -178,10 +178,11 @@ def main(args): test_mask = test_mask.as_in_context(ctx) val_mask = val_mask.as_in_context(ctx) # create graph - g = DGLGraph(data.graph) + g = data.graph # add self-loop + g.remove_edges_from(g.selfloop_edges()) + g = DGLGraph(g) g.add_edges(g.nodes(), g.nodes()) - # create model model = GAT(g, args.num_layers, diff --git a/examples/mxnet/gcn/train.py b/examples/mxnet/gcn/train.py index dd574d9150a1..1c90099bee3f 100644 --- a/examples/mxnet/gcn/train.py +++ b/examples/mxnet/gcn/train.py @@ -52,9 +52,11 @@ def main(args): test_mask = test_mask.as_in_context(ctx) # create GCN model - g = DGLGraph(data.graph) + g = data.graph if args.self_loop: - g.add_edges(g.nodes(), g.nodes()) + g.remove_edges_from(g.selfloop_edges()) + g.add_edges_from(zip(g.nodes(), g.nodes())) + g = DGLGraph(g) # normalization degs = g.in_degrees().astype('float32') norm = mx.nd.power(degs, -0.5) diff --git a/examples/pytorch/gat/train.py b/examples/pytorch/gat/train.py index 5ff3d9efca3c..e45c334074e1 100644 --- a/examples/pytorch/gat/train.py +++ b/examples/pytorch/gat/train.py @@ -65,11 +65,12 @@ def main(args): val_mask = val_mask.cuda() test_mask = test_mask.cuda() - # create DGL graph - g = DGLGraph(data.graph) - n_edges = g.number_of_edges() + g = data.graph # add self loop + g.remove_edges_from(g.selfloop_edges()) + g = DGLGraph(g) g.add_edges(g.nodes(), g.nodes()) + n_edges = g.number_of_edges() # create model heads = ([args.num_heads] * args.num_layers) + [args.num_out_heads] model = GAT(g, diff --git a/examples/pytorch/gcn/gcn_mp.py b/examples/pytorch/gcn/gcn_mp.py index b138636a8ff3..24affad94840 100644 --- a/examples/pytorch/gcn/gcn_mp.py +++ b/examples/pytorch/gcn/gcn_mp.py @@ -147,10 +147,12 @@ def main(args): test_mask = test_mask.cuda() # graph preprocess and calculate normalization factor - g = DGLGraph(data.graph) - n_edges = g.number_of_edges() + g = data.graph + g.remove_edges_from(g.selfloop_edges()) + g = DGLGraph(g) # add self loop g.add_edges(g.nodes(), g.nodes()) + n_edges = g.number_of_edges() # normalization degs = g.in_degrees().float() norm = torch.pow(degs, -0.5) diff --git a/examples/pytorch/gcn/train.py b/examples/pytorch/gcn/train.py index 153679db968b..bbfeb57accfb 100644 --- a/examples/pytorch/gcn/train.py +++ b/examples/pytorch/gcn/train.py @@ -54,11 +54,13 @@ def main(args): test_mask = test_mask.cuda() # graph preprocess and calculate normalization factor - g = DGLGraph(data.graph) - n_edges = g.number_of_edges() + g = data.graph # add self loop if args.self_loop: - g.add_edges(g.nodes(), g.nodes()) + g.remove_edges_from(g.selfloop_edges()) + g.add_edges_from(zip(g.nodes(), g.nodes())) + g = DGLGraph(g) + n_edges = g.number_of_edges() # normalization degs = g.in_degrees().float() norm = torch.pow(degs, -0.5)