Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixes some issues #9

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cascadenet/network/layers/conv.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import theano
import lasagne
from lasagne.layers import Layer, prelu
from helper import ensure_set_name
from .helper import ensure_set_name

if theano.config.device == 'cuda':
from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer
Expand Down
2 changes: 1 addition & 1 deletion cascadenet/network/layers/data_consistency.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import theano.tensor as T
from lasagne.layers import MergeLayer, get_output
from fourier import FFT2Layer, FFTCLayer
from .fourier import FFT2Layer, FFTCLayer
from cascadenet.network.theano_extensions.fft_helper import fftshift, ifftshift
from cascadenet.network.theano_extensions.fft import fft, ifft

Expand Down
6 changes: 3 additions & 3 deletions cascadenet/network/layers/fourier.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,11 @@
from cascadenet.network.theano_extensions.gpuarray.fft2 import cufft2 as fft2g
from cascadenet.network.theano_extensions.gpuarray.fft2 import cuifft2 as ifft2g
use_cuda = True
print "Using GPU version of fft layers"
print ("Using GPU version of fft layers")
else:
from cascadenet.network.theano_extensions.fft2_lasagne import fft2, ifft2
use_cuda = False
print "Using CPU version of fft layers"
print ("Using CPU version of fft layers")


from cascadenet.network.theano_extensions.fft_helper import fftshift, ifftshift
Expand Down Expand Up @@ -215,7 +215,7 @@ def get_output_for(self, input, **kwargs):
# nt = self.data_shape[-1]
# out, updates = theano.scan(loop_over_n,
# non_sequences=input,
# sequences=xrange(nt))
# sequences=range(nt))
# return out

out, updates = theano.scan(self.transform, sequences=input)
Expand Down
2 changes: 1 addition & 1 deletion cascadenet/network/layers/pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def get_output_shape_for(self, input_shape):

tr = len(output_shape) - self.n

for i in xrange(self.n):
for i in range(self.n):
output_shape[tr+i] = pool.pool_output_length(input_shape[tr+i],
pool_size=self.pool_size[i],
stride=self.stride[i],
Expand Down
10 changes: 5 additions & 5 deletions cascadenet/network/layers/shape.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@ def get_output_for(self, input, deterministic=False, **kwargs):
out = T.zeros((input.shape[0], self.output_shape[1],
self.output_shape[2], self.output_shape[3]))

for x in xrange(self.r):
for x in range(self.r):
# loop across all feature maps belonging to this channel
for y in xrange(self.r):
for y in range(self.r):
out = T.inc_subtensor(out[:, :, x::self.r, y::self.r],
input[:, self.r*x+y::self.r*self.r, :, :])
return out
Expand All @@ -51,10 +51,10 @@ def get_output_for(self, input, deterministic=False, **kwargs):
out = T.zeros((input.shape[0], self.output_shape[1],
self.output_shape[2], self.output_shape[3], self.output_shape[4]))

for x in xrange(r):
for x in range(r):
# loop across all feature maps belonging to this channel
for y in xrange(r):
for z in xrange(r):
for y in range(r):
for z in range(r):
out = T.inc_subtensor(out[..., x::r, y::r, z::r],
input[:, (r**2)*x+(r*y)+z::r*r*r, ...])
return out
Expand Down
2 changes: 1 addition & 1 deletion cascadenet/network/layers/simple.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import theano.tensor as T
import lasagne
from lasagne.layers import Layer
from helper import ensure_set_name
from .helper import ensure_set_name


class IdLayer(Layer):
Expand Down
6 changes: 3 additions & 3 deletions cascadenet/network/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ def cascade_resnet(pr, net, input_layer, n=5, nf=64, b=lasagne.init.Constant, **
n_channel = shape[1]
net[pr+'conv1'] = l.Conv(input_layer, nf, 3, b=b(), name=pr+'conv1')

for i in xrange(2, n):
for i in range(2, n):
net[pr+'conv%d'%i] = l.Conv(net[pr+'conv%d'%(i-1)], nf, 3, b=b(),
name=pr+'conv%d'%i)

Expand Down Expand Up @@ -37,7 +37,7 @@ def cascade_resnet_3d_avg(pr, net, input_layer, n=5, nf=64,
# Conv layers
net[pr+'conv1'] = l.Conv3D(net[pr+'kavg'], nf, k, b=b(), name=pr+'conv1')

for i in xrange(2, n):
for i in range(2, n):
net[pr+'conv%d'%i] = l.Conv3D(net[pr+'conv%d'%(i-1)], nf, k, b=b(),
name=pr+'conv%d'%i)

Expand Down Expand Up @@ -67,7 +67,7 @@ def build_cascade_cnn_from_list(shape, net_meta, lmda=None):
j = 0
for cascade_net, cascade_n in net_meta:
# Cascade layer
for i in xrange(cascade_n):
for i in range(cascade_n):
pr = 'c%d_' % j
net, output_layer = cascade_net(pr, net, input_layer,
**{'cascade_i': j})
Expand Down
4 changes: 2 additions & 2 deletions main_2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def iterate_minibatch(data, batch_size, shuffle=True):
if shuffle:
data = np.random.permutation(data)

for i in xrange(0, n, batch_size):
for i in range(0, n, batch_size):
yield data[i:i+batch_size]


Expand Down Expand Up @@ -171,7 +171,7 @@ def compile_fn(network, net_config, args):
train, validate, test = create_dummy_data()

print('Start Training...')
for epoch in xrange(num_epoch):
for epoch in range(num_epoch):
t_start = time.time()
# Training
train_err = 0
Expand Down
4 changes: 2 additions & 2 deletions main_3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def iterate_minibatch(data, batch_size, shuffle=True):
if shuffle:
data = np.random.permutation(data)

for i in xrange(0, n, batch_size):
for i in range(0, n, batch_size):
yield data[i:i+batch_size]


Expand Down Expand Up @@ -173,7 +173,7 @@ def compile_fn(network, net_config, args):
# Compile function
train_fn, val_fn = compile_fn(net, net_config, args)

for epoch in xrange(num_epoch):
for epoch in range(num_epoch):
t_start = time.time()
# Training
train_err = 0
Expand Down
8 changes: 4 additions & 4 deletions utils/compressed_sensing.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def shear_grid_mask(shape, acceleration_rate, sample_low_freq=True,
Nt, Nx, Ny = shape
start = np.random.randint(0, acceleration_rate)
mask = np.zeros((Nt, Nx))
for t in xrange(Nt):
for t in range(Nt):
mask[t, (start+t)%acceleration_rate::acceleration_rate] = 1

xc = Nx / 2
Expand Down Expand Up @@ -135,15 +135,15 @@ def perturbed_shear_grid_mask(shape, acceleration_rate, sample_low_freq=True,
Nt, Nx, Ny = shape
start = np.random.randint(0, acceleration_rate)
mask = np.zeros((Nt, Nx))
for t in xrange(Nt):
for t in range(Nt):
mask[t, (start+t)%acceleration_rate::acceleration_rate] = 1

# brute force
rand_code = np.random.randint(0, 3, size=Nt*Nx)
shift = np.array([-1, 0, 1])[rand_code]
new_mask = np.zeros_like(mask)
for t in xrange(Nt):
for x in xrange(Nx):
for t in range(Nt):
for x in range(Nx):
if mask[t, x]:
new_mask[t, (x + shift[t*x])%Nx] = 1

Expand Down