Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Extend support to Python 3.11 #258

Merged
merged 12 commits into from
May 11, 2023
2 changes: 1 addition & 1 deletion .github/workflows/unittests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ jobs:
test:
strategy:
matrix:
python-version: ["3.8", "3.9", "3.10"]
python-version: ["3.8", "3.9", "3.10", "3.11"]
fail-fast: false

uses: N3PDF/workflows/.github/workflows/python-poetry-tests.yml@v2
Expand Down
1,552 changes: 765 additions & 787 deletions poetry.lock

Large diffs are not rendered by default.

10 changes: 5 additions & 5 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ authors = [
"F. Hekhorn <[email protected]>",
"N. Laurenti <[email protected]>",
"G. Magni <[email protected]>",
"T. Sharma <[email protected]>"
"T. Sharma <[email protected]>",
]
classifiers = [
"Programming Language :: Python",
Expand All @@ -37,12 +37,12 @@ packages = [
]

[tool.poetry.dependencies]
python = "^3.8,<3.11"
numpy = "^1.22"
scipy = "^1.7.3"
python = "^3.8,<3.12"
numpy = "^1.24"
scipy = "^1.10.1"
PyYAML = "^6.0"
lz4 = "^4.0.2"
numba = "^0.55.0"
numba = "^0.57.0"
# docs dependencies (for readthedocs, https://github.com/readthedocs/readthedocs.org/issues/4912#issuecomment-664002569)
Sphinx = { version = "^4.3.2", optional = true }
sphinx-rtd-theme = { version = "^1.0.0", optional = true }
Expand Down
1 change: 1 addition & 0 deletions src/eko/evolution_operator/operator_matrix_element.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ def build_ome(A, matching_order, a_s, backward_method):
# Print;
# .end
ome = np.eye(len(A[0]), dtype=np.complex_)
A = A[:, :, :]
A = np.ascontiguousarray(A)
if backward_method is InversionMethod.EXPANDED:
# expended inverse
Expand Down
6 changes: 3 additions & 3 deletions src/eko/interpolation.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def log_evaluate_Nx(N, logx, area_list):
logxmax = a[1]
coefs = a[2:]
# skip area completely?
if logx >= logxmax:
if logx >= logxmax or np.abs(logx - logxmax) < _atol_eps:
continue
umax = N * logxmax
umin = N * logxmin
Expand All @@ -118,12 +118,12 @@ def log_evaluate_Nx(N, logx, area_list):
# this condition is actually not necessary in python since
# there pow(0,0) == 1 and apparently this is inherited in
# Numba/C, however this is mathematically cleaner
if umax == 0.0 and k == 0:
if np.abs(umax) < _atol_eps and k == 0:
pmax = emax
else:
pmax = pow(-umax, k) * emax
# drop factor by analytics?
if logx >= logxmin:
if logx >= logxmin or np.abs(logx - logxmin) < _atol_eps:
pmin = 0
else:
pmin = pow(-umin, k) * emin
Expand Down
19 changes: 14 additions & 5 deletions src/eko/io/dictlike.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,17 +146,26 @@ def load_field(type_, value):
# valid classes, cf. the module docstring
return load_typing(type_, value)

# FIXME: python 3.8 does not recognize the origin of `npt.NDArray`, but at
# the same time it is not a proper class since `numpy==1.23.2`, since it is
# not recognized by `inspect.isclass`, and `type(npt.NDArray)` is not
# inheriting from `type`
# However, this check should be matched by every version of NumPy for every
# version of Python supported
# Once py3.8 will be dropped, move this back below `inspect.isclass`
# assertion
if np.ndarray in type_.__mro__ or issubclass(type_, np.ndarray):
# do not apply array on scalars
if isinstance(value, list):
return np.array(value)
return value

assert inspect.isclass(type_)

if issubclass(type_, DictLike):
return type_.from_dict(value)
if issubclass(type_, enum.Enum):
return load_enum(type_, value)
if issubclass(type_, np.ndarray) or np.ndarray in type_.__mro__:
# do not apply array on scalars
if isinstance(value, list):
return np.array(value)
return value
if isinstance(value, dict):
return type_(**value)

Expand Down
43 changes: 28 additions & 15 deletions tests/eko/test_interpolation.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,12 +205,12 @@ def p1Nref(N, lnx):
assert_almost_equal(p1N(N, lnx), p1Nref(N, lnx))

def test_log_eval_N(self):
xg = [np.exp(-1), 1.0]
xg = interpolation.XGrid([np.exp(-1.0), 1.0], True)
inter_N = interpolation.InterpolatorDispatcher(xg, 1)
# p_0(x) = -ln(x)
p0N = inter_N[0]
assert len(p0N.areas) == 1
p0_cs_ref = [0, -1]
p0_cs_ref = [0.0, -1.0]
for act_c, res_c in zip(p0N.areas[0], p0_cs_ref):
assert_almost_equal(act_c, res_c)

Expand All @@ -219,15 +219,16 @@ def p0Nref_full(N, lnx):
Full -> \tilde p_0(N) = exp(-N)(exp(N)-1-N)/N^2
MMa: Integrate[x^(n-1) (-Log[x]),{x,1/E,1}]
"""
return ((np.exp(N) - 1 - N) / N**2) * np.exp(-N * (lnx + 1))
return ((np.exp(N) - 1.0 - N) / N**2) * np.exp(-N * (lnx + 1.0))

def p0Nref_partial(N, lnx):
"partial = lower bound is neglected"
return (1 / N**2) * np.exp(-N * lnx)
return (1.0 / N**2) * np.exp(-N * lnx)

# p_1(x) = 1 + ln(x)
p1N = inter_N[1]
assert len(p1N.areas) == 1
p1_cs_ref = [1, 1]
p1_cs_ref = [1.0, 1.0]
for act_c, res_c in zip(p1N.areas[0], p1_cs_ref):
assert_almost_equal(act_c, res_c)

Expand All @@ -236,24 +237,36 @@ def p1Nref_full(N, lnx):
p_1(x) = 1+\ln(x) -> \tilde p_1(N) = (exp(-N)-1+N)/N^2
MMa: Integrate[x^(n-1) (1+Log[x]),{x,1/E,1}]
"""
return ((np.exp(-N) - 1 + N) / N**2) * np.exp(-N * lnx)
return ((np.exp(-N) - 1.0 + N) / N**2) * np.exp(-N * lnx)

def p1Nref_partial(N, lnx):
return (1 / N - 1 / N**2) * np.exp(-N * lnx)
return (1.0 / N - 1.0 / N**2) * np.exp(-N * lnx)

# iterate configurations
for N in [1.0, 2.0, complex(1.0, 1.0)]:
# check skip
assert_almost_equal(p0N(N, 0), 0)
assert_almost_equal(p1N(N, 0), 0)
assert_almost_equal(p0N(N, 0.0), 0.0)
assert_almost_equal(p1N(N, 0.0), 0.0)
# check values for full
for lnx in [-1, -0.5]:
assert_almost_equal(p0N(N, lnx), p0Nref_partial(N, lnx))
assert_almost_equal(p1N(N, lnx), p1Nref_partial(N, lnx))
for lnx in [-1.0, -0.5]:
assert_almost_equal(
p0N(N, lnx),
p0Nref_partial(N, lnx),
err_msg=f"p0N_partial,{N=},{lnx=}",
)
assert_almost_equal(
p1N(N, lnx),
p1Nref_partial(N, lnx),
err_msg=f"p1N_partial,{N=},{lnx=}",
)
# check values for full
for lnx in [-2, -3]:
assert_almost_equal(p0N(N, lnx), p0Nref_full(N, lnx))
assert_almost_equal(p1N(N, lnx), p1Nref_full(N, lnx))
for lnx in [-2.0, -3.0]:
assert_almost_equal(
p0N(N, lnx), p0Nref_full(N, lnx), err_msg=f"p0N_full,{N=},{lnx=}"
)
assert_almost_equal(
p1N(N, lnx), p1Nref_full(N, lnx), err_msg=f"p1N_full,{N=},{lnx=}"
)

def test_is_below_x(self):
for log in [False, True]:
Expand Down