Skip to content

Commit

Permalink
Round simple stats to avoid minute differences
Browse files Browse the repository at this point in the history
- Seems like the regression tests are failing due to extremely small
  differences in the statistics like 1e-9 difference. Not sure what's
  causing this issue though.

Signed-off-by: Fabrice Normandin <[email protected]>
  • Loading branch information
lebrice committed May 31, 2024
1 parent 76eb22f commit f37b503
Show file tree
Hide file tree
Showing 5 changed files with 24 additions and 25 deletions.
3 changes: 0 additions & 3 deletions project/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,9 +183,6 @@ def accelerator(request: pytest.FixtureRequest):
return accelerator


_cuda_available = torch.cuda.is_available()


@pytest.fixture(
scope="session",
params=None,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,20 +1,20 @@
'0':
device: cpu
hash: 1082905456378942323
max: 2.125603675842285
mean: -0.007423439994454384
min: -1.9888888597488403
max: 2.1256
mean: -0.00742
min: -1.98889
shape:
- 128
- 3
- 32
- 32
sum: -2919.015380859375
sum: -2919.01538
'1':
device: cpu
hash: 3692171093056153318
max: 9
mean: 4.5546875
mean: 4.55469
min: 0
shape:
- 128
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
'0':
device: cpu
hash: -3706536913713083016
max: 2.821486711502075
mean: 0.47488248348236084
min: -0.4242129623889923
max: 2.82149
mean: 0.47488
min: -0.42421
shape:
- 128
- 1
Expand All @@ -14,7 +14,7 @@
device: cpu
hash: -4023601292826392021
max: 9
mean: 4.5546875
mean: 4.55469
min: 0
shape:
- 128
Expand Down
10 changes: 5 additions & 5 deletions project/datamodules/datamodules_test/test_first_batch/mnist.yaml
Original file line number Diff line number Diff line change
@@ -1,20 +1,20 @@
'0':
device: cpu
hash: 4338584025941619046
max: 2.821486711502075
mean: 0.014241953380405903
min: -0.4242129623889923
max: 2.82149
mean: 0.01424
min: -0.42421
shape:
- 128
- 1
- 28
- 28
sum: 1429.20849609375
sum: 1429.2085
'1':
device: cpu
hash: 1596942422053415325
max: 9
mean: 4.2421875
mean: 4.24219
min: 0
shape:
- 128
Expand Down
18 changes: 10 additions & 8 deletions project/utils/tensor_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@

logger = get_logger(__name__)

PRECISION = 5


@functools.singledispatch
def to_ndarray(v: Any) -> np.ndarray | None:
Expand Down Expand Up @@ -360,10 +362,10 @@ def ndarray_simple_attributes(array: np.ndarray) -> dict:
return {
"shape": tuple(array.shape),
"hash": _hash(array),
"min": array.min().item(),
"max": array.max().item(),
"sum": array.sum().item(),
"mean": array.mean(),
"min": round(array.min().item(), PRECISION),
"max": round(array.max().item(), PRECISION),
"sum": round(array.sum().item(), PRECISION),
"mean": round(array.mean(), PRECISION),
}


Expand All @@ -378,10 +380,10 @@ def tensor_simple_attributes(tensor: Tensor) -> dict:
return {
"shape": tuple(tensor.shape) if not tensor.is_nested else get_shape_ish(tensor),
"hash": _hash(tensor),
"min": tensor.min().item(),
"max": tensor.max().item(),
"sum": tensor.sum().item(),
"mean": tensor.float().mean().item(),
"min": round(tensor.min().item(), PRECISION),
"max": round(tensor.max().item(), PRECISION),
"sum": round(tensor.sum().item(), PRECISION),
"mean": round(tensor.float().mean().item(), PRECISION),
"device": (
"cpu" if tensor.device.type == "cpu" else f"{tensor.device.type}:{tensor.device.index}"
),
Expand Down

0 comments on commit f37b503

Please sign in to comment.