diff --git a/ptypy/accelerate/base/engines/ML_serial.py b/ptypy/accelerate/base/engines/ML_serial.py index c468755b8..1e953d26a 100644 --- a/ptypy/accelerate/base/engines/ML_serial.py +++ b/ptypy/accelerate/base/engines/ML_serial.py @@ -34,7 +34,7 @@ class ML_serial(ML): """ Defaults: - [smooth_gradient.method] + [smooth_gradient_method] default = convolution type = str help = Method to be used for smoothing the gradient, choose between ```convolution``` or ```fft```. @@ -153,12 +153,12 @@ def engine_prepare(self): self.ML_model.prepare() def _get_smooth_gradient(self, data, sigma): - if self.p.smooth_gradient.method == "convolution": + if self.p.smooth_gradient_method == "convolution": return complex_gaussian_filter(data, sigma) elif self.p.smooth_gradient_method == "fft": return complex_gaussian_filter_fft(data, sigma) else: - raise NotImplementedError("smooth_gradient.method can only be ```convolution``` or ```fft```.") + raise NotImplementedError("smooth_gradient_method should be ```convolution``` or ```fft```.") def _replace_ob_grad(self): new_ob_grad = self.ob_grad_new @@ -246,8 +246,7 @@ def engine_iterate(self, num=1): # Smoothing preconditioner if self.smooth_gradient: for name, s in self.ob_h.storages.items(): - # s.data[:] -= self._get_smooth_gradient(self.ob_grad.storages[name].data, self.smooth_gradient.sigma) - s.data[:] -= self._get_smooth_gradient_fft(self.ob_grad.storages[name].data, self.smooth_gradient.sigma) + s.data[:] -= self._get_smooth_gradient(self.ob_grad.storages[name].data, self.smooth_gradient.sigma) else: self.ob_h -= self.ob_grad diff --git a/ptypy/accelerate/cuda_pycuda/array_utils.py b/ptypy/accelerate/cuda_pycuda/array_utils.py index 8c4c8efd0..2845a8b93 100644 --- a/ptypy/accelerate/cuda_pycuda/array_utils.py +++ b/ptypy/accelerate/cuda_pycuda/array_utils.py @@ -356,13 +356,12 @@ def allocate(self, shape, sigma=1.): def _compute_kernel(self, shape, sigma): # Create kernel - self.sigma = sigma - if len(sigma) == 1: + sigma = np.array(sigma) + if sigma.ndim <= 1: sigma = np.array([sigma, sigma]) - elif len(sigma) == 2: - sigma = np.array(sigma) - else: - raise NotImplementedError("Only batches of 2D arrays allowed!") + if sigma.ndim > 2: + raise NotImplementedError("Only batches of 2D arrays allowed!") + self.sigma = sigma return gaussian_kernel_2d(shape, sigma[0], sigma[1]).astype(self.dtype) def filter(self, data, sigma=None): diff --git a/ptypy/accelerate/cuda_pycuda/engines/ML_pycuda.py b/ptypy/accelerate/cuda_pycuda/engines/ML_pycuda.py index 0f7097357..b712f8974 100644 --- a/ptypy/accelerate/cuda_pycuda/engines/ML_pycuda.py +++ b/ptypy/accelerate/cuda_pycuda/engines/ML_pycuda.py @@ -90,11 +90,11 @@ def engine_initialize(self): self.qu_htod = cuda.Stream() self.qu_dtoh = cuda.Stream() - if self.p.smooth_gradient.method == "convolution": + if self.p.smooth_gradient_method == "convolution": self.GSK = GaussianSmoothingKernel(queue=self.queue) self.GSK.tmp = None - if self.p.smooth_gradient.method == "fft": + if self.p.smooth_gradient_method == "fft": self.FGSK = FFTGaussianSmoothingKernel(queue=self.queue) # Real/Fourier Support Kernel @@ -259,14 +259,14 @@ def _set_pr_ob_ref_for_data(self, dev='gpu', container=None, sync_copy=False): self._set_pr_ob_ref_for_data(dev=dev, container=container, sync_copy=sync_copy) def _get_smooth_gradient(self, data, sigma): - if self.p.smooth_gradient.method == "convolution": + if self.p.smooth_gradient_method == "convolution": if self.GSK.tmp is None: self.GSK.tmp = gpuarray.empty(data.shape, dtype=np.complex64) self.GSK.convolution(data, [sigma, sigma], tmp=self.GSK.tmp) - elif self.p.smooth_gradient.method == "fft": + elif self.p.smooth_gradient_method == "fft": self.FGSK.filter(data, sigma) else: - raise NotImplementedError("smooth_gradient.method can only be ```convolution``` or ```fft```.") + raise NotImplementedError("smooth_gradient_method should be ```convolution``` or ```fft```.") return data def _replace_ob_grad(self): @@ -275,8 +275,7 @@ def _replace_ob_grad(self): if self.smooth_gradient: self.smooth_gradient.sigma *= (1. - self.p.smooth_gradient_decay) for name, s in new_ob_grad.storages.items(): - #s.gpu = self._get_smooth_gradient(s.gpu, self.smooth_gradient.sigma) - s.gpu = self._get_smooth_gradient_fft(s.gpu, self.smooth_gradient.sigma) + s.gpu = self._get_smooth_gradient(s.gpu, self.smooth_gradient.sigma) return self._replace_grad(self.ob_grad, new_ob_grad)