Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add min_snr() #329

Merged
merged 6 commits into from
May 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 21 additions & 7 deletions src/sdr/_detection/_approximation.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
@export
def albersheim(p_d: npt.ArrayLike, p_fa: npt.ArrayLike, n_nc: npt.ArrayLike = 1) -> npt.NDArray[np.float64]:
r"""
Estimates the minimum required single-sample SNR.
Estimates the minimum signal-to-noise ratio (SNR) required to achieve the desired probability of detection $P_{D}$.

Arguments:
p_d: The desired probability of detection $P_D$ in $(0, 1)$.
Expand All @@ -23,6 +23,9 @@ def albersheim(p_d: npt.ArrayLike, p_fa: npt.ArrayLike, n_nc: npt.ArrayLike = 1)
Returns:
The minimum required single-sample SNR $\gamma$ in dB.

See Also:
sdr.min_snr

Notes:
This function implements Albersheim's equation, given by

Expand All @@ -42,27 +45,38 @@ def albersheim(p_d: npt.ArrayLike, p_fa: npt.ArrayLike, n_nc: npt.ArrayLike = 1)
$$0.1 \leq P_D \leq 0.9$$
$$1 \le N_{NC} \le 8096 .$$

Albersheim's equation approximates a linear detector. However, the difference between linear and square-law
detectors in minimal, so Albersheim's equation finds wide use.

References:
- https://radarsp.weebly.com/uploads/2/1/4/7/21471216/albersheim_alternative_forms.pdf
- https://bpb-us-w2.wpmucdn.com/sites.gatech.edu/dist/5/462/files/2016/12/Noncoherent-Integration-Gain-Approximations.pdf
- https://www.mathworks.com/help/phased/ref/albersheim.html

Examples:
Compare the theoretical minimum required SNR using a linear detector in :func:`sdr.min_snr` with the
estimated minimum required SNR using Albersheim's approximation in :func:`sdr.albersheim`.

.. ipython:: python

p_d = 0.9; \
p_fa = np.logspace(-7, -3, 100)
p_fa = np.logspace(-12, -1, 21)

@savefig sdr_albersheim_1.png
plt.figure(); \
plt.semilogx(p_fa, sdr.albersheim(p_d, p_fa, n_nc=1), label="$N_{NC}$ = 1"); \
plt.semilogx(p_fa, sdr.albersheim(p_d, p_fa, n_nc=2), label="$N_{NC}$ = 2"); \
plt.semilogx(p_fa, sdr.albersheim(p_d, p_fa, n_nc=10), label="$N_{NC}$ = 10"); \
plt.semilogx(p_fa, sdr.albersheim(p_d, p_fa, n_nc=20), label="$N_{NC}$ = 20"); \
plt.semilogx(p_fa, sdr.albersheim(p_d, p_fa, n_nc=1), linestyle="--"); \
plt.semilogx(p_fa, sdr.albersheim(p_d, p_fa, n_nc=2), linestyle="--"); \
plt.semilogx(p_fa, sdr.albersheim(p_d, p_fa, n_nc=10), linestyle="--"); \
plt.semilogx(p_fa, sdr.albersheim(p_d, p_fa, n_nc=20), linestyle="--"); \
plt.gca().set_prop_cycle(None); \
plt.semilogx(p_fa, sdr.min_snr(p_d, p_fa, n_nc=1, detector="linear"), label="$N_{NC}$ = 1"); \
plt.semilogx(p_fa, sdr.min_snr(p_d, p_fa, n_nc=2, detector="linear"), label="$N_{NC}$ = 2"); \
plt.semilogx(p_fa, sdr.min_snr(p_d, p_fa, n_nc=10, detector="linear"), label="$N_{NC}$ = 10"); \
plt.semilogx(p_fa, sdr.min_snr(p_d, p_fa, n_nc=20, detector="linear"), label="$N_{NC}$ = 20"); \
plt.legend(); \
plt.xlabel("Probability of false alarm, $P_{FA}$"); \
plt.ylabel("Minimum required SNR (dB)"); \
plt.title(f"Estimated minimum required SNR across non-coherent combinations for $P_D = 0.9$\nusing Albersheim's approximation");
plt.title("Minimum required SNR across non-coherent combinations for $P_D = 0.9$\nfrom theory (solid) and Albersheim's approximation (dashed)");

Group:
detection-approximation
Expand Down
153 changes: 143 additions & 10 deletions src/sdr/_detection/_theory.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@


@export
@lru_cache
def h0_theory(
sigma2: float = 1.0,
detector: Literal["coherent", "linear", "square-law"] = "square-law",
Expand Down Expand Up @@ -197,6 +196,17 @@ def h0_theory(
Group:
detection-theory
"""
return _h0_theory(sigma2, detector, complex, n_c, n_nc)


@lru_cache
def _h0_theory(
sigma2: float = 1.0,
detector: Literal["coherent", "linear", "square-law"] = "square-law",
complex: bool = True,
n_c: int = 1,
n_nc: int | None = None,
) -> scipy.stats.rv_continuous:
sigma2 = float(sigma2)
if sigma2 <= 0:
raise ValueError(f"Argument `sigma2` must be positive, not {sigma2}.")
Expand Down Expand Up @@ -230,12 +240,7 @@ def h0_theory(
h0 = scipy.stats.norm(0, np.sqrt(sigma2_per))
elif detector == "linear":
h0 = scipy.stats.chi(nu, scale=np.sqrt(sigma2_per))
if complex:
h0 = _sum_distribution(h0, n_nc)
# h0 = _fit_summed_distribution(h0, n_nc, scipy.stats.norm if n_nc >= 5 else scipy.stats.weibull_min)
else:
h0 = _sum_distribution(h0, n_nc)
# h0 = _fit_summed_distribution(h0, n_nc, scipy.stats.norm if n_nc >= 10 else scipy.stats.weibull_min)
h0 = _sum_distribution(h0, n_nc)
elif detector == "square-law":
h0 = scipy.stats.chi2(nu * n_nc, scale=sigma2_per)
else:
Expand All @@ -245,7 +250,6 @@ def h0_theory(


@export
@lru_cache
def h1_theory(
snr: float,
sigma2: float = 1.0,
Expand Down Expand Up @@ -428,6 +432,18 @@ def h1_theory(
Group:
detection-theory
"""
return _h1_theory(snr, sigma2, detector, complex, n_c, n_nc)


@lru_cache
def _h1_theory(
snr: float,
sigma2: float = 1.0,
detector: Literal["coherent", "linear", "square-law"] = "square-law",
complex: bool = True,
n_c: int = 1,
n_nc: int | None = None,
) -> scipy.stats.rv_continuous:
snr = float(snr)
sigma2 = float(sigma2)
if sigma2 <= 0:
Expand Down Expand Up @@ -468,11 +484,16 @@ def h1_theory(
if complex:
# Rice distribution has 2 degrees of freedom
h1 = scipy.stats.rice(np.sqrt(lambda_), scale=np.sqrt(sigma2_per))
h1 = _sum_distribution(h1, n_nc)

# Sometimes this distribution has so much SNR that SciPy throws an error when computing the mean, etc.
# We need to check for that condition. If true, we cant approximate the distribution by a Gaussian,
# which doesn't suffer from the same error.
if np.isnan(h1.mean()):
h1 = scipy.stats.norm(np.sqrt(lambda_ * sigma2_per), scale=np.sqrt(sigma2_per))
else:
# Folded normal distribution has 1 degree of freedom
h1 = scipy.stats.foldnorm(np.sqrt(lambda_), scale=np.sqrt(sigma2_per))
h1 = _sum_distribution(h1, n_nc)
h1 = _sum_distribution(h1, n_nc)
elif detector == "square-law":
h1 = scipy.stats.ncx2(nu * n_nc, lambda_ * n_nc, scale=sigma2_per)
else:
Expand Down Expand Up @@ -893,3 +914,115 @@ def _calculate(p_fa, sigma2):
threshold = threshold.item()

return threshold


@export
def min_snr(
p_d: npt.ArrayLike,
p_fa: npt.ArrayLike,
detector: Literal["coherent", "linear", "square-law"] = "square-law",
complex: bool = True,
n_c: int = 1,
n_nc: int | None = None,
) -> npt.NDArray[np.float64]:
r"""
Computes the minimum signal-to-noise ratio (SNR) required to achieve the desired probability of detection $P_{D}$.

Arguments:
p_d: The desired probability of detection $P_{D}$ in $(0, 1)$.
p_fa: The probability of false alarm $P_{FA}$ in $(0, 1)$.
detector: The detector type.

- `"coherent"`: A coherent detector, $T(x) = \mathrm{Re}\{x[n]\}$.
- `"linear"`: A linear detector, $T(x) = \left| x[n] \right|$.
- `"square-law"`: A square-law detector, $T(x) = \left| x[n] \right|^2$.

complex: Indicates whether the input signal is real or complex. This affects how the SNR is converted
to noise variance.
n_c: The number of samples to coherently integrate $N_C$.
n_nc: The number of samples to non-coherently integrate $N_{NC}$. Non-coherent integration is only allowable
for linear and square-law detectors.

Returns:
The minimum signal-to-noise ratio (SNR) required to achieve the desired $P_{D}$.

See Also:
sdr.albersheim

Examples:
Compute the minimum required SNR to achieve $P_D = 0.9$ and $P_{FA} = 10^{-6}$ with a square-law detector.

.. ipython:: python

sdr.min_snr(0.9, 1e-6, detector="square-law")

Now suppose the signal is non-coherently integrated $N_{NC} = 10$ times. Notice the minimum required SNR
decreases, but less than 10 dB. This is because non-coherent integration is less efficient than coherent
integration.

.. ipython:: python

sdr.min_snr(0.9, 1e-6, detector="square-law", n_nc=10)

Now suppose the signal is coherently integrated by $N_C = 10$ samples before the square-law detector.
Notice the SNR now decreases by exactly 10 dB.

.. ipython:: python

sdr.min_snr(0.9, 1e-6, detector="square-law", n_c=10, n_nc=10)

Compare the theoretical minimum required SNR using a linear detector in :func:`sdr.min_snr` with the
estimated minimum required SNR using Albersheim's approximation in :func:`sdr.albersheim`.

.. ipython:: python

p_d = 0.9; \
p_fa = np.logspace(-12, -1, 21)

@savefig sdr_min_snr_1.png
plt.figure(); \
plt.semilogx(p_fa, sdr.albersheim(p_d, p_fa, n_nc=1), linestyle="--"); \
plt.semilogx(p_fa, sdr.albersheim(p_d, p_fa, n_nc=2), linestyle="--"); \
plt.semilogx(p_fa, sdr.albersheim(p_d, p_fa, n_nc=10), linestyle="--"); \
plt.semilogx(p_fa, sdr.albersheim(p_d, p_fa, n_nc=20), linestyle="--"); \
plt.gca().set_prop_cycle(None); \
plt.semilogx(p_fa, sdr.min_snr(p_d, p_fa, n_nc=1, detector="linear"), label="$N_{NC}$ = 1"); \
plt.semilogx(p_fa, sdr.min_snr(p_d, p_fa, n_nc=2, detector="linear"), label="$N_{NC}$ = 2"); \
plt.semilogx(p_fa, sdr.min_snr(p_d, p_fa, n_nc=10, detector="linear"), label="$N_{NC}$ = 10"); \
plt.semilogx(p_fa, sdr.min_snr(p_d, p_fa, n_nc=20, detector="linear"), label="$N_{NC}$ = 20"); \
plt.legend(); \
plt.xlabel("Probability of false alarm, $P_{FA}$"); \
plt.ylabel("Minimum required SNR (dB)"); \
plt.title("Minimum required SNR across non-coherent combinations for $P_D = 0.9$\nfrom theory (solid) and Albersheim's approximation (dashed)");

Group:
detection-theory
"""
p_d = np.asarray(p_d)
p_fa = np.asarray(p_fa)

calc_p_d = globals()["p_d"]

@np.vectorize
def _calculate(p_d, p_fa):
def _objective(snr):
return p_d - calc_p_d(snr, p_fa, detector, complex, n_c, n_nc)

# The max SNR may return p_d = NaN. If so, we need to reduce the max value or optimize.brentq() will error.
min_snr = -100 # dB
max_snr = 30 # dB
while True:
if np.isnan(_objective(max_snr)):
max_snr -= 10
else:
break

snr = scipy.optimize.brentq(_objective, min_snr, max_snr)

return snr

snr = _calculate(p_d, p_fa)
if snr.ndim == 0:
snr = snr.item()

return snr
12 changes: 6 additions & 6 deletions tests/detection/test_h0_h1_theory.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,14 +66,14 @@ def test_pdfs(detector, complex, n_c, n_nc):
if h0.mean() == 0:
assert np.mean(z_h0) == pytest.approx(h0.mean(), abs=0.1)
else:
assert np.mean(z_h0) == pytest.approx(h0.mean(), rel=0.1)
assert np.var(z_h0) == pytest.approx(h0.var(), rel=0.1)
assert np.mean(z_h0) == pytest.approx(h0.mean(), rel=0.2)
assert np.var(z_h0) == pytest.approx(h0.var(), rel=0.2)

assert np.mean(z_h1) == pytest.approx(h1.mean(), rel=0.1)
assert np.var(z_h1) == pytest.approx(h1.var(), rel=0.1)
assert np.mean(z_h1) == pytest.approx(h1.mean(), rel=0.2)
assert np.var(z_h1) == pytest.approx(h1.var(), rel=0.2)

assert p_fa_meas == pytest.approx(p_fa, rel=0.1)
assert p_d_meas == pytest.approx(p_d, rel=0.1)
assert p_fa_meas == pytest.approx(p_fa, rel=0.2)
assert p_d_meas == pytest.approx(p_d, rel=0.2)
except AssertionError as e:
# import matplotlib.pyplot as plt

Expand Down
Loading