11
11
import torch
12
12
import torch .nn as nn
13
13
import torch .nn .functional as F
14
+ import dgl
14
15
from dgl import DGLGraph
15
16
from dgl .data import register_data_args , load_data
16
17
from dgl .nn .pytorch .conv import SAGEConv
@@ -48,31 +49,27 @@ def forward(self, graph, inputs):
48
49
return h
49
50
50
51
51
- def evaluate (model , graph , features , labels , mask ):
52
+ def evaluate (model , graph , features , labels , nid ):
52
53
model .eval ()
53
54
with torch .no_grad ():
54
55
logits = model (graph , features )
55
- logits = logits [mask ]
56
- labels = labels [mask ]
56
+ logits = logits [nid ]
57
+ labels = labels [nid ]
57
58
_ , indices = torch .max (logits , dim = 1 )
58
59
correct = torch .sum (indices == labels )
59
60
return correct .item () * 1.0 / len (labels )
60
61
61
62
def main (args ):
62
63
# load and preprocess dataset
63
64
data = load_data (args )
64
- features = torch .FloatTensor (data .features )
65
- labels = torch .LongTensor (data .labels )
66
- if hasattr (torch , 'BoolTensor' ):
67
- train_mask = torch .BoolTensor (data .train_mask )
68
- val_mask = torch .BoolTensor (data .val_mask )
69
- test_mask = torch .BoolTensor (data .test_mask )
70
- else :
71
- train_mask = torch .ByteTensor (data .train_mask )
72
- val_mask = torch .ByteTensor (data .val_mask )
73
- test_mask = torch .ByteTensor (data .test_mask )
65
+ g = data [0 ]
66
+ features = g .ndata ['feat' ]
67
+ labels = g .ndata ['label' ]
68
+ train_mask = g .ndata ['train_mask' ]
69
+ val_mask = g .ndata ['val_mask' ]
70
+ test_mask = g .ndata ['test_mask' ]
74
71
in_feats = features .shape [1 ]
75
- n_classes = data .num_labels
72
+ n_classes = data .num_classes
76
73
n_edges = data .graph .number_of_edges ()
77
74
print ("""----Data statistics------'
78
75
#Edges %d
@@ -97,11 +94,15 @@ def main(args):
97
94
test_mask = test_mask .cuda ()
98
95
print ("use cuda:" , args .gpu )
99
96
97
+ train_nid = train_mask .nonzero ().squeeze ()
98
+ val_nid = val_mask .nonzero ().squeeze ()
99
+ test_nid = test_mask .nonzero ().squeeze ()
100
+
100
101
# graph preprocess and calculate normalization factor
101
- g = data .graph
102
- g .remove_edges_from (nx .selfloop_edges (g ))
103
- g = DGLGraph (g )
102
+ g = dgl .remove_self_loop (g )
104
103
n_edges = g .number_of_edges ()
104
+ if cuda :
105
+ g = g .int ().to (args .gpu )
105
106
106
107
# create GraphSAGE model
107
108
model = GraphSAGE (in_feats ,
@@ -126,7 +127,7 @@ def main(args):
126
127
t0 = time .time ()
127
128
# forward
128
129
logits = model (g , features )
129
- loss = F .cross_entropy (logits [train_mask ], labels [train_mask ])
130
+ loss = F .cross_entropy (logits [train_nid ], labels [train_nid ])
130
131
131
132
optimizer .zero_grad ()
132
133
loss .backward ()
@@ -135,13 +136,13 @@ def main(args):
135
136
if epoch >= 3 :
136
137
dur .append (time .time () - t0 )
137
138
138
- acc = evaluate (model , g , features , labels , val_mask )
139
+ acc = evaluate (model , g , features , labels , val_nid )
139
140
print ("Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | "
140
141
"ETputs(KTEPS) {:.2f}" .format (epoch , np .mean (dur ), loss .item (),
141
142
acc , n_edges / np .mean (dur ) / 1000 ))
142
143
143
144
print ()
144
- acc = evaluate (model , g , features , labels , test_mask )
145
+ acc = evaluate (model , g , features , labels , test_nid )
145
146
print ("Test Accuracy {:.4f}" .format (acc ))
146
147
147
148
0 commit comments