forked from wangsl/python-embedding
-
Notifications
You must be signed in to change notification settings - Fork 2
/
testNN.py
66 lines (51 loc) · 1.27 KB
/
testNN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
#!/bin/env python
import sys
from math import sin
import torch
import numpy
import subgrid
#load the neural network
nn = subgrid.load_paper_net('cpu')
def my_testNN(x, y):
print(x[:10])
x = x.reshape((1, 2, 25, 25), order='F')
x = x.astype(numpy.float32)
x = torch.tensor(x)
with torch.no_grad():
out = nn(x)
out = out.numpy().astype(numpy.float64)
out = out.flatten(order='F')
print(out[:10])
y[:] = out[:]
sys.stdout.flush()
def my_test(x) :
print(' From Python test: {}'.format(x.size))
for i in range(x.size) :
x[i] += 1.0
a = torch.from_numpy(x)
print(a)
b = torch.from_numpy(x.astype(numpy.float32))
print(b)
if torch.cuda.is_available() :
b_dev = b.cuda()
print(b_dev)
my_test2()
sys.stdout.flush()
def my_test2() :
print(' **** From my_test2 ****')
return
if __name__ == '__main__':
import numpy as np
import torch
x = np.arange(1, 1251).astype(numpy.float32)
x = x / 100
print(x[:10])
x = x.reshape((1, 2, 25, 25), order='F')
x = torch.tensor(x)
with torch.no_grad():
out = nn(x)
out = out.numpy()
out = out.flatten(order='F')
print("BEGINNING OF PYTHON")
print(out[:10])
print("END OF PYTHON")