Skip to content

Commit

Permalink
Merge pull request #1 from sanjaymjoshi/fin2
Browse files Browse the repository at this point in the history
Stats for finite number of samples
  • Loading branch information
sanjaymjoshi authored May 14, 2023
2 parents b03f95c + b0dc322 commit d339991
Show file tree
Hide file tree
Showing 3 changed files with 230 additions and 0 deletions.
3 changes: 3 additions & 0 deletions relistats/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,5 @@
""" See submodules
"""
import logging

logger = logging.getLogger(__name__)
136 changes: 136 additions & 0 deletions relistats/binom_fin.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
""" Reliability Engineering Statistics for Binomial Distributions
Also known as Bernoulli Trials.
Reference:
S.M. Joshi, "Computation of Reliability Statistics for
Success-Failure Experiments," arXiv:2303.03167 [stat.ME], March 2023.
https://doi.org/10.48550/arXiv.2303.03167
"""
from relistats import logger
from relistats.binomial import confidence


def conf_fin(n: int, f: int, m: int, d: int) -> tuple:
"""Confidence [0, 1] in reliability r for finite population size.
Returns tuple with second value as actual reliability used for computations.
:param n: number of samples tested
:type n: int, >=0
:param f: number of failures in n samples
:type f: int, >=0
:param m: remaining samples in population
:type m: int, >= 0
:param d: maximum number of defects in remaining m samples
:type d: int, >=0
:return: Tuple of (confidence, actual reliability)
:rtype: tuple
"""
if n <= 0 or f < 0 or m < 0 or d < 0:
return (None, None)

if m == 0:
# No more samples remaining. We have full confidence in current level of reliability
return (1, 1 - f / n)

total_samples = n + m
total_failures = f + d
if total_failures > total_samples:
return (None, None)

actual_r = 1 - total_failures / total_samples
if d >= m:
# even if all remaining samples fail, we are still ok. Full confidence.
return (1, actual_r)

if d == 0:
# Cannot calculate probability of zero failures, hence bump up the
# remaining samples by 1 and calculate probability that there is exactly
# 1 failure
d += 1
m += 1
total_samples += 1
total_failures += 1
actual_r = 1 - total_failures / total_samples

r_needed = 1 - d / m

actual_c = confidence(n, f, r_needed)
logger.debug(f"Confidence at r={r_needed} = {actual_c}, with actual_r={actual_r}")

return (actual_c, actual_r)


def reli_fin(n: int, f: int, c: float, m: int) -> tuple:
"""Minimum reliability at confidence level c for finite population size.
Returns tuple with second value as actual confidence used for computations.
:param n: number of samples
:type n: int, >=0
:param f: number of failures
:type f: int, >=0
:param c: confidence level
:type c: float, [0, 1]
:param m: remaining samples in population
:type m: int, >= 0
:return: (reliability, actual confidence)
"""
if n <= 0 or f < 0 or c < 0 or c > 1:
return (None, None)

# Calculate confidence for each case of remaining failures
# Start with 0 failures, i.e., highest reliability possible.
# The confidence will be lowest at this level. If the
# desired confidence is higher than this, keep increasing
# failures, i.e., keep reducing reliability until the
# desired confidence level is met or exceeded.
# Return that reliability (or 0 if it is not possible to
# achieve the desired level of confidence)

for d in range(m + 1):
c2, r2 = conf_fin(n, f, m, d)
if c2 >= c:
return (r2, c2)
return (0, c) # pragma: no cover
# This line is never reached in pytest!


def assur_fin(n: int, f: int, m: int, tol=0.001) -> tuple:
"""Assurance [0, 1], i.e., confidence = reliability.
Returns tuple with other values as reliability and confidence
used for computations.
:param n: number of samples
:type n: int, >=0
:param f: number of failures
:type f: int, >=0
:param tol: accuracy tolerance
:param m: remaining samples in population
:type m: int, >= 0
:type tol: float, optional
:return: (Assurance, reliability, confidence)
:rtype: tuple
"""
if n <= 0 or f < 0:
return (None, 0, 0)

# Calculate confidence for each case of remaining failures
# Start with 0 failures, i.e., highest reliability possible.
# The confidence will be lowest at this level. Set assurance
# as the minimum of reliability and confidence. Keep increasing
# failures, i.e., keep reducing reliability which will increase
# the confidence. Keep doing this while the assurance keeps
# increasing.
# Return that assurance

max_assurance = 0
max_reli = 0
max_conf = 0
for d in range(m + 1):
c2, r2 = conf_fin(n, f, m, d)
assurance = min([r2, c2])
if assurance > max_assurance:
max_assurance = assurance
max_reli = r2
max_conf = c2
return (max_assurance, max_reli, max_conf)
91 changes: 91 additions & 0 deletions test/test_binom_fin.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
import pytest

from relistats.binom_fin import assur_fin, conf_fin, reli_fin


def test_conf_fin() -> None:
ABS_TOL_CONFIDENCE = 0.001

assert conf_fin(4, 0, 0, 2) == (1, 1)
assert conf_fin(4, 1, 0, 2) == (1, 0.75)
assert conf_fin(4, 2, 0, 2) == (1, 0.5)
assert conf_fin(4, 3, 0, 2) == (1, 0.25)
assert conf_fin(4, 4, 0, 2) == (1, 0)

assert conf_fin(4, 0, 4, 2) == pytest.approx((0.938, 0.75), abs=ABS_TOL_CONFIDENCE)
assert conf_fin(4, 1, 4, 2) == pytest.approx((0.688, 0.625), abs=ABS_TOL_CONFIDENCE)
assert conf_fin(4, 2, 4, 2) == pytest.approx((0.313, 0.5), abs=ABS_TOL_CONFIDENCE)
assert conf_fin(4, 3, 4, 2) == pytest.approx((0.063, 0.375), abs=ABS_TOL_CONFIDENCE)
assert conf_fin(4, 3, 4, 3) == pytest.approx((0.316, 0.25), abs=ABS_TOL_CONFIDENCE)
assert conf_fin(4, 3, 4, 4) == pytest.approx((1, 0.125), abs=ABS_TOL_CONFIDENCE)

assert conf_fin(10, 0, 10, 0) == pytest.approx(
(0.614, 0.952), abs=ABS_TOL_CONFIDENCE
)
assert conf_fin(10, 0, 10, 1) == pytest.approx(
(0.651, 0.95), abs=ABS_TOL_CONFIDENCE
)
assert conf_fin(10, 0, 10, 2) == pytest.approx((0.893, 0.9), abs=ABS_TOL_CONFIDENCE)
assert conf_fin(10, 0, 10, 5) == pytest.approx(
(0.999, 0.75), abs=ABS_TOL_CONFIDENCE
)
assert conf_fin(10, 1, 10, 5) == pytest.approx((0.989, 0.7), abs=ABS_TOL_CONFIDENCE)
assert conf_fin(10, 2, 10, 5) == pytest.approx(
(0.945, 0.65), abs=ABS_TOL_CONFIDENCE
)
assert conf_fin(10, 3, 10, 5) == pytest.approx((0.828, 0.6), abs=ABS_TOL_CONFIDENCE)
assert conf_fin(10, 4, 10, 5) == pytest.approx(
(0.623, 0.55), abs=ABS_TOL_CONFIDENCE
)
assert conf_fin(10, 5, 10, 5) == pytest.approx((0.377, 0.5), abs=ABS_TOL_CONFIDENCE)
assert conf_fin(10, 6, 10, 5) == pytest.approx(
(0.172, 0.45), abs=ABS_TOL_CONFIDENCE
)

assert conf_fin(2, 0, 2, 5) == (None, None)
assert conf_fin(2, -2, 2, 0) == (None, None)
assert conf_fin(-2, 0, 2, 0) == (None, None)
assert conf_fin(2, 0, 2, -2) == (None, None)
assert conf_fin(2, 0, -2, -2) == (None, None)


def test_reli_fin() -> None:
ABS_TOL_RELIABILITY = 0.001
assert reli_fin(4, 0, 0.5, 0) == (1, 1)
assert reli_fin(4, 1, 0.5, 0) == (0.75, 1)
assert reli_fin(4, 2, 0.5, 0) == (0.5, 1)
assert reli_fin(4, 3, 0.5, 0) == (0.25, 1)
assert reli_fin(4, 4, 0.5, 0) == (0, 1)

assert reli_fin(4, 0, 0.5, 4) == pytest.approx(
(0.889, 0.590), abs=ABS_TOL_RELIABILITY
)
assert reli_fin(4, 1, 0.94, 4) == pytest.approx(
(0.5, 0.949), abs=ABS_TOL_RELIABILITY
)
assert reli_fin(4, 2, 0.31, 4) == pytest.approx(
(0.5, 0.313), abs=ABS_TOL_RELIABILITY
)
assert reli_fin(4, 3, 0.0039, 4) == pytest.approx(
(0.5, 0.004), abs=ABS_TOL_RELIABILITY
)
assert reli_fin(4, 3, 0.315, 4) == pytest.approx(
(0.25, 0.316), abs=ABS_TOL_RELIABILITY
)

assert reli_fin(2, 0, 2, 2) == (None, None)
assert reli_fin(2, -2, 0.5, 2) == (None, None)
assert reli_fin(-2, 0, 0.5, 2) == (None, None)
assert reli_fin(2, 0, -0.5, 2) == (None, None)


def test_assurance() -> None:
assert assur_fin(4, 1, 0) == pytest.approx((0.75, 0.75, 1), abs=0.001)
assert assur_fin(4, 2, 0) == pytest.approx((0.5, 0.5, 1), abs=0.001)
assert assur_fin(4, 3, 0) == pytest.approx((0.25, 0.25, 1), abs=0.001)
assert assur_fin(4, 0, 4) == pytest.approx((0.75, 0.75, 0.938), abs=0.001)
assert assur_fin(4, 1, 4) == pytest.approx((0.625, 0.625, 0.688), abs=0.001)
assert assur_fin(4, 1, 8) == pytest.approx((0.583, 0.583, 0.688), abs=0.001)

assert assur_fin(2, -2, 2) == (None, 0, 0)
assert assur_fin(-2, 0, 2) == (None, 0, 0)

0 comments on commit d339991

Please sign in to comment.