Skip to content

Commit

Permalink
Create a tests for two functions.
Browse files Browse the repository at this point in the history
  • Loading branch information
3nthusia5t committed May 7, 2024
1 parent b973aa8 commit 1658bfc
Show file tree
Hide file tree
Showing 4 changed files with 105 additions and 5 deletions.
3 changes: 1 addition & 2 deletions .github/workflows/python-app.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install flake8 pytest
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Lint with flake8
run: |
Expand All @@ -36,4 +35,4 @@ jobs:
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: |
pytest
python test_entro.py
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
venv/
__pycache__
10 changes: 7 additions & 3 deletions entro.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
def calculate_histogram(data, hist_out):
# Initialize shared memory for local histogram
local_hist = cuda.shared.array(256, dtype=np.uint32)

#The Thread id is supposed to be from 0-256. (256 threads per block)
tx = cuda.threadIdx.x

local_hist[tx] = 0
Expand All @@ -19,10 +21,11 @@ def calculate_histogram(data, hist_out):
idx = cuda.grid(1)
stride = cuda.gridsize(1)
for i in range(idx, data.shape[0], stride):
#count
cuda.atomic.add(local_hist, data[i], 1)
cuda.syncthreads()


#local_hist is shared memory, the other threads will handle other indexes.
cuda.atomic.add(hist_out, tx, local_hist[tx])


Expand All @@ -35,7 +38,7 @@ def calculate_entropy(hist, total_pixels, entropy_out):
if prob != 0:
entropy_out[i] = -prob * math.log2(prob)
else:
entropy_out[i] = -0.00001 * math.log2(0.00001)
entropy_out[i] = -0.000001 * math.log2(0.000001)

@cuda.jit
def sum_array(arr, result):
Expand Down Expand Up @@ -110,7 +113,8 @@ def entropy_with_cuda(data):


cuda.synchronize()


#todo: remove sum() make it parrarel
return entropy_sum.sum()

def is_supported_cuda():
Expand Down
95 changes: 95 additions & 0 deletions test_entro.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
import numpy as np
from numba import cuda
import unittest
import math
from scipy.stats import entropy
from numba.core.errors import NumbaPerformanceWarning

# Functions to test
from entro import calculate_histogram, calculate_entropy

class TestCalculateHistogram(unittest.TestCase):

def test_histogram_calculation(self):
import warnings
warnings.simplefilter('ignore', category=NumbaPerformanceWarning)

data = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
hist_out = np.zeros(256, dtype=np.uint32)

expected_hist = np.histogram(data, bins=256, range=(0, 255))[0]

# Run the function
calculate_histogram[1, 256](data, hist_out)

np.testing.assert_array_equal(hist_out[:10], expected_hist[:10], "Histograms do not match")

def entropy_from_histogram(hist):
"""
Calculate entropy from a histogram.
Parameters:
hist (array_like): 1-D array representing the histogram.
Returns:
float: Entropy value.
"""
# Normalize histogram to obtain probability distribution
prob_dist = hist / np.sum(hist)

# Remove zero probabilities to avoid logarithm of zero
prob_dist = prob_dist[prob_dist != 0]

# Calculate entropy
entropy = -np.sum(prob_dist * np.log2(prob_dist))

return entropy

class TestCalculateEntropy(unittest.TestCase):
def test_entropy_calculation(self):
import warnings
warnings.simplefilter('ignore', category=NumbaPerformanceWarning)

# Test data
hist = np.array([10, 20, 30, 40, 50, 20, 30, 20, 30])

# Expected value
expected_entropy = entropy(hist, base=2)


# Actual value
cuda.synchronize()
entropy_out_gpu = cuda.device_array(hist.size, dtype=np.float32)
threadsperblock_entropy = hist.sum()
calculate_entropy[1, 1](hist, hist.sum(), entropy_out_gpu)
result = entropy_out_gpu.copy_to_host().sum()
# Assert with some error
np.testing.assert_allclose(result, expected_entropy, rtol=1e-6, atol=1e-6, err_msg="Entropies do not match")


def test_random_histograms(self):
import warnings
warnings.simplefilter('ignore', category=NumbaPerformanceWarning)

for i in range(0, 1000):
# Test data
hist = np.random.randint(1, 1000, size=np.random.randint(1, 2000))
# Expected value
expected_entropy = entropy(hist, base=2)

# Actual value
cuda.synchronize()
entropy_out_gpu = cuda.device_array(hist.size, dtype=np.float32)
threadsperblock_entropy = hist.size
blockspergrid_entropy = min((hist.size + (threadsperblock_entropy - 1)) // threadsperblock_entropy, 1024)
calculate_entropy[threadsperblock_entropy, blockspergrid_entropy](hist, hist.sum(), entropy_out_gpu)
result = entropy_out_gpu.copy_to_host().sum()

# Assert with some error
np.testing.assert_allclose(result, expected_entropy, rtol=1e-4, atol=1e-4, err_msg="Entropies do not match")
del result
del entropy_out_gpu


if __name__ == '__main__':
unittest.main()

0 comments on commit 1658bfc

Please sign in to comment.