Skip to content

Commit

Permalink
remove multiple exit from prange (#1901)
Browse files Browse the repository at this point in the history
* remove multiple exit from prange
* closes #1900 
* skips computation of KL divergence if inf was already computed add unit test
Signed-off-by: Edoardo Pasca <[email protected]>
  • Loading branch information
paskino authored Oct 16, 2024
1 parent a475974 commit 6485259
Show file tree
Hide file tree
Showing 3 changed files with 44 additions and 20 deletions.
4 changes: 3 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
* 24.x.x
- Bug fixes:
- Fix bug with 'median' and 'mean' methods in Masker averaging over the wrong axes.
- Enhancements:
- Removed multiple exits from numba implementation of KullbackLeibler divergence (#1901)

* 24.2.0
- New Features:
Expand All @@ -21,7 +23,7 @@
- Internal refactor: Separate framework into multiple files (#1692)
- Allow the SIRT algorithm to take `initial=None` (#1906)
- Add checks on equality method of `AcquisitionData` and `ImageData` for equality of data type and geometry (#1919)
- Add check on equality method of `AcquisitionGeometry` for equality of dimension labels (#1919)
- Add check on equality method of `AcquisitionGeometry` for equality of dimension labels (#1919)
- Testing:
- New unit tests for operators and functions to check for in place errors and the behaviour of `out` (#1805)
- Updates in SPDHG vs PDHG unit test to reduce test time and adjustments to parameters (#1898)
Expand Down
44 changes: 25 additions & 19 deletions Wrappers/Python/cil/optimisation/functions/KullbackLeibler.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,25 +343,9 @@ def kl_gradient_mask(x, b, out, eta, mask):
@njit(parallel=True)
def kl_div(x, y, eta):
accumulator = numpy.zeros(get_num_threads(), dtype=numpy.float64)
has_inf = 0
for i in prange(x.size):
X = x.flat[i]
Y = y.flat[i] + eta.flat[i]
if X > 0 and Y > 0:
# out.flat[i] = X * numpy.log(X/Y) - X + Y
accumulator[get_thread_id()] += X * numpy.log(X/Y) - X + Y
elif X == 0 and Y >= 0:
# out.flat[i] = Y
accumulator[get_thread_id()] += Y
else:
# out.flat[i] = numpy.inf
return numpy.inf
return sum(accumulator)

@njit(parallel=True)
def kl_div_mask(x, y, eta, mask):
accumulator = numpy.zeros(get_num_threads(), dtype=numpy.float64)
for i in prange(x.size):
if mask.flat[i] > 0:
if has_inf == 0:
X = x.flat[i]
Y = y.flat[i] + eta.flat[i]
if X > 0 and Y > 0:
Expand All @@ -372,7 +356,29 @@ def kl_div_mask(x, y, eta, mask):
accumulator[get_thread_id()] += Y
else:
# out.flat[i] = numpy.inf
return numpy.inf
accumulator[get_thread_id()] = numpy.inf
has_inf = 1
return sum(accumulator)

@njit(parallel=True)
def kl_div_mask(x, y, eta, mask):
accumulator = numpy.zeros(get_num_threads(), dtype=numpy.float64)
has_inf = 0
for i in prange(x.size):
if has_inf == 0:
if mask.flat[i] > 0:
X = x.flat[i]
Y = y.flat[i] + eta.flat[i]
if X > 0 and Y > 0:
# out.flat[i] = X * numpy.log(X/Y) - X + Y
accumulator[get_thread_id()] += X * numpy.log(X/Y) - X + Y
elif X == 0 and Y >= 0:
# out.flat[i] = Y
accumulator[get_thread_id()] += Y
else:
# out.flat[i] = numpy.inf
accumulator[get_thread_id()] = numpy.inf
has_inf = 1
return sum(accumulator)

# convex conjugate
Expand Down
16 changes: 16 additions & 0 deletions Wrappers/Python/test/test_function_KullbackLeibler.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,22 @@ def test_KullbackLeibler_numba_call(self):

numpy.testing.assert_allclose(f(u1), f_np(u1), rtol=1e-5)

def test_KullbackLeibler_numba_kl_div_has_inf(self):
x = self.u1 * self.mask
x *= -1

from cil.optimisation.functions.KullbackLeibler import kl_div

numpy.testing.assert_equal(kl_div(x.array, self.b1.array, self.eta.array), numpy.inf)

def test_KullbackLeibler_numba_kl_div_mask_has_inf(self):
x = self.u1 * self.mask
x *= -1

from cil.optimisation.functions.KullbackLeibler import kl_div_mask

numpy.testing.assert_equal(kl_div_mask(x.array, self.b1.array, self.eta.array, self.mask.array), numpy.inf)

def test_KullbackLeibler_numba_call_mask(self):
f = self.f
f_np = self.f_np
Expand Down

0 comments on commit 6485259

Please sign in to comment.