diff --git a/benchmarks/eko/benchmark_evol_to_unity.py b/benchmarks/eko/benchmark_evol_to_unity.py index 0824a39d3..41bc4b4d0 100644 --- a/benchmarks/eko/benchmark_evol_to_unity.py +++ b/benchmarks/eko/benchmark_evol_to_unity.py @@ -8,6 +8,7 @@ from eko.interpolation import XGrid from eko.io import types from eko.io.runcards import OperatorCard, TheoryCard +from eko.matchings import Segment from eko.quantities.couplings import CouplingsInfo from eko.runner.legacy import Runner @@ -53,11 +54,10 @@ def test_operator_grid( path=tmp_path / "eko.tar", ).op_grid - q20 = 30 - q21 = 50 - nf = 4 - o = Operator(g.config, g.managers, nf, q20, q21) - o_back = Operator(g.config, g.managers, nf, q21, q20) + seg = Segment(30, 50, 4) + seg_back = Segment(50, 30, 4) + o = Operator(g.config, g.managers, seg) + o_back = Operator(g.config, g.managers, seg_back) o.compute() o_back.compute() diff --git a/benchmarks/eko/benchmark_msbar_evolution.py b/benchmarks/eko/benchmark_msbar_evolution.py index 26b8873af..6286e15ad 100644 --- a/benchmarks/eko/benchmark_msbar_evolution.py +++ b/benchmarks/eko/benchmark_msbar_evolution.py @@ -112,8 +112,8 @@ def benchmark_APFEL_msbar_evolution( order=theory_card.order, evmeth=couplevmeth, matching=np.power( - list(iter(theory_card.heavy.matching_ratios)), 2.0 - ), + theory_card.heavy.matching_ratios, 2.0 + ).tolist(), xif2=theory_card.xif**2, ).tolist(), thresholds_ratios=np.power( @@ -131,6 +131,9 @@ def benchmark_APFEL_msbar_evolution( qmasses[n].value ** 2, qmasses[n].scale ** 2, strong_coupling=as_VFNS, + thresholds_ratios=np.power( + theory_card.heavy.matching_ratios, 2.0 + ).tolist(), xif2=1.0, q2_to=Q2, nf_ref=n + 3, diff --git a/benchmarks/eko/benchmark_strong_coupling.py b/benchmarks/eko/benchmark_strong_coupling.py index ed186ca67..d13a264bd 100644 --- a/benchmarks/eko/benchmark_strong_coupling.py +++ b/benchmarks/eko/benchmark_strong_coupling.py @@ -3,7 +3,7 @@ import numpy as np import pytest -from eko import thresholds +from eko import matchings from eko.beta import beta_qcd from eko.couplings import Couplings from eko.io.runcards import TheoryCard @@ -138,13 +138,13 @@ def benchmark_APFEL_ffns(self): ), } # collect my values - threshold_holder = thresholds.ThresholdsAtlas.ffns(nf) + threshold_holder = matchings.Atlas.ffns(nf, 0.0) for order in [1, 2, 3]: as_FFNS = Couplings( couplings=ref_couplings(coupling_ref, scale_ref), order=(order, 0), method=CouplingEvolutionMethod.EXPANDED, - masses=threshold_holder.area_walls[1:-1], + masses=threshold_holder.walls[1:-1], hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) @@ -211,7 +211,7 @@ def benchmark_pegasus_ffns(self): ), } # collect my values - threshold_holder = thresholds.ThresholdsAtlas.ffns(nf) + threshold_holder = matchings.Atlas.ffns(nf, 0.0) couplings = ref_couplings(coupling_ref, scale_ref) couplings.max_num_flavs = 4 for order in [1, 2, 3, 4]: @@ -219,7 +219,7 @@ def benchmark_pegasus_ffns(self): couplings=couplings, order=(order, 0), method=CouplingEvolutionMethod.EXACT, - masses=threshold_holder.area_walls[1:-1], + masses=threshold_holder.walls[1:-1], hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) @@ -584,12 +584,12 @@ def benchmark_lhapdf_ffns_lo(self): scale_ref = 91.0 nf = 4 # collect my values - threshold_holder = thresholds.ThresholdsAtlas.ffns(nf) + threshold_holder = matchings.Atlas.ffns(nf, 0.0) as_FFNS_LO = Couplings( couplings=ref_couplings(coupling_ref, scale_ref), order=(1, 0), method=CouplingEvolutionMethod.EXACT, - masses=threshold_holder.area_walls[1:-1], + masses=threshold_holder.walls[1:-1], hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) @@ -630,7 +630,7 @@ def benchmark_apfel_exact(self): coupling_ref = np.array([0.118, 0.007496]) scale_ref = 90 # collect my values - threshold_holder = thresholds.ThresholdsAtlas.ffns(3) + threshold_holder = matchings.Atlas.ffns(3, 0.0) # LHAPDF cache apfel_vals_dict = { 1: np.array( @@ -663,7 +663,7 @@ def benchmark_apfel_exact(self): couplings=ref_couplings(coupling_ref, scale_ref), order=(order, 0), method=CouplingEvolutionMethod.EXACT, - masses=threshold_holder.area_walls[1:-1], + masses=threshold_holder.walls[1:-1], hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) @@ -697,7 +697,7 @@ def benchmark_lhapdf_exact(self): coupling_ref = np.array([0.118, 0.007496]) scale_ref = 90 # collect my values - threshold_holder = thresholds.ThresholdsAtlas.ffns(3) + threshold_holder = matchings.Atlas.ffns(3, 0.0) # LHAPDF cache lhapdf_vals_dict = { 1: np.array( @@ -738,7 +738,7 @@ def benchmark_lhapdf_exact(self): couplings=ref_couplings(coupling_ref, scale_ref), order=(order, 0), method=CouplingEvolutionMethod.EXACT, - masses=threshold_holder.area_walls[1:-1], + masses=threshold_holder.walls[1:-1], hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) @@ -861,20 +861,18 @@ def benchmark_APFEL_fact_to_ren_lha_settings(self, theory_card: TheoryCard): ) for mu2 in mu2s: my_val = sc.a(mu2 * xif2)[0] - path = sc.thresholds.path(mu2 * xif2) my_val_4 = sc.a(mu2 * xif2, nf_to=4)[0] - path_4 = sc.thresholds.path(mu2 * xif2, 4) + path_4 = sc.atlas.path((mu2 * xif2, 4)) my_val_3 = sc.a(mu2 * xif2, nf_to=3)[0] - path_3 = sc.thresholds.path(mu2 * xif2, 3) + path_3 = sc.atlas.path((mu2 * xif2, 3)) # path_4 it's not matched assert len(path_4) == 1 # path_3 is the same as path backward in nf and in q2. assert len(path_3) == 2 - assert len(path) == 2 assert path_3[1].nf < path_3[0].nf - assert path_3[1].q2_from < path_3[0].q2_from + assert path_3[1].origin < path_3[0].origin apfel_val_ref = 0.03478112968976964 if use_APFEL: diff --git a/benchmarks/lha_paper_bench.py b/benchmarks/lha_paper_bench.py index 71ddf3550..494376afa 100644 --- a/benchmarks/lha_paper_bench.py +++ b/benchmarks/lha_paper_bench.py @@ -1,7 +1,6 @@ """ Benchmark to :cite:`Giele:2002hx` (LO + NLO) and :cite:`Dittmar:2005ed` (NNLO). """ -import argparse import os from math import nan diff --git a/doc/source/overview/tutorials/alpha_s.ipynb b/doc/source/overview/tutorials/alpha_s.ipynb index 4752b596d..a9d75c0ca 100644 --- a/doc/source/overview/tutorials/alpha_s.ipynb +++ b/doc/source/overview/tutorials/alpha_s.ipynb @@ -18,13 +18,10 @@ "\n", "In `eko` the running of $\\alpha_s$ is managed by an independent class `eko.couplings.Couplings`.\n", "\n", - "To instantiate this object you need to specify at least the boundary conditions on $\\alpha_s(\\mu_{R,0})$ (`alpha_s_ref`,`scale_ref`),\n", - "and the masses of the heavy quarks with the relative thresholds ratios (`heavy_quark_masses`, `thresholds_ratios`). \n", - "Note that all scales have to be given squared. \n", + "To instantiate this object you need to specify at least the boundary conditions on $\\alpha_s(\\mu_{R,0})$ (`alphas`,`scale`), the masses of the heavy quarks with the relative thresholds ratios (`heavy_quark_masses`, `thresholds_ratios`), and the (QCD,QED) perturbative order (`order`).\n", "\n", - "You can also pass some optional arguments such as the (QCD,QED) perturbative order (`order`), \n", - "see [here](https://eko.readthedocs.io/en/latest/modules/eko/eko.html#eko.couplings.Couplings)\n", - "for the complete list." + "See [here](https://eko.readthedocs.io/en/latest/modules/eko/eko.html#eko.couplings.Couplings)\n", + "for detailed API." ] }, { @@ -36,17 +33,14 @@ }, "outputs": [], "source": [ - "import math\n", "import numpy as np\n", "from eko.couplings import Couplings\n", - "from eko.io import types as ekotypes\n", + "from eko.quantities.couplings import CouplingsInfo, CouplingEvolutionMethod\n", "from eko.quantities.heavy_quarks import MatchingScales, QuarkMassScheme\n", "\n", "# set the (alpha_s, alpha_em) reference values\n", - "alphas_ref = ekotypes.FloatRef(value=0.118, scale=91.0)\n", - "alphaem_ref = ekotypes.FloatRef(value=0.007496252, scale=math.nan)\n", - "couplings_ref = ekotypes.CouplingsRef(\n", - " alphas=alphas_ref, alphaem=alphaem_ref, num_flavs_ref=None, max_num_flavs=5\n", + "couplings_ref = CouplingsInfo(\n", + " alphas=0.118, alphaem=0.007496252, scale=91.0, num_flavs_ref=None, max_num_flavs=5\n", ")\n", "\n", "# set heavy quark masses and their threshold ratios\n", @@ -59,7 +53,7 @@ "sc = Couplings(\n", " couplings_ref,\n", " order,\n", - " ekotypes.CouplingEvolutionMethod.EXACT,\n", + " CouplingEvolutionMethod.EXACT,\n", " heavy_quark_masses,\n", " QuarkMassScheme.POLE,\n", " thresholds_ratios,\n", @@ -81,13 +75,15 @@ "cell_type": "code", "execution_count": 2, "id": "8b908bb3-cbcc-4d7f-b631-27f1f37464f8", - "metadata": {}, + "metadata": { + "tags": [] + }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "The value of alpha_s at Q^2=100 GeV^2 is: 0.17805952806666522\n" + "The value of alpha_s at Q^2=100 GeV^2 is: 0.17804304498411003\n" ] } ], @@ -113,9 +109,9 @@ "hash": "0a84ba3ac8703c04e87bc503a7d00188dfd591ad56130da93c406115a1e4a408" }, "kernelspec": { - "display_name": "eko-VHUucTmo-py3.10", + "display_name": "eko-KkPVjVhh-py3.10", "language": "python", - "name": "eko-vhuuctmo-py3.10" + "name": "eko-kkpvjvhh-py3.10" }, "language_info": { "codemirror_mode": { @@ -127,7 +123,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.7" + "version": "3.10.9" } }, "nbformat": 4, diff --git a/doc/source/overview/tutorials/dglap.ipynb b/doc/source/overview/tutorials/dglap.ipynb index d7f75847e..0e271a00c 100644 --- a/doc/source/overview/tutorials/dglap.ipynb +++ b/doc/source/overview/tutorials/dglap.ipynb @@ -34,8 +34,8 @@ } ], "source": [ - "import eko\n", "import pathlib\n", + "import eko\n", "\n", "eko.version.__version__" ] @@ -68,7 +68,7 @@ "th_card = example.theory()\n", "op_card = example.operator()\n", "# here we replace the grid with a very minimal one, to speed up the example\n", - "op_card.rotations.xgrid = [1e-3, 1e-2, 1e-1, 5e-1, 1.]" + "op_card.xgrid = [1e-3, 1e-2, 1e-1, 5e-1, 1.]" ] }, { @@ -89,19 +89,20 @@ "data": { "text/plain": [ "{'order': [1, 0],\n", - " 'couplings': {'alphas': {'value': 0.118, 'scale': 91.2},\n", - " 'alphaem': {'value': 0.007496252, 'scale': nan},\n", + " 'couplings': {'alphas': 0.118,\n", + " 'alphaem': 0.007496252,\n", + " 'scale': 91.2,\n", " 'max_num_flavs': 6,\n", - " 'num_flavs_ref': None},\n", - " 'num_flavs_init': None,\n", - " 'num_flavs_max_pdf': 6,\n", - " 'intrinsic_flavors': [4],\n", - " 'quark_masses': {'c': {'value': 2.0, 'scale': nan},\n", - " 'b': {'value': 4.5, 'scale': nan},\n", - " 't': {'value': 173.07, 'scale': nan}},\n", - " 'quark_masses_scheme': 'pole',\n", - " 'matching': {'c': 1.0, 'b': 1.0, 't': 1.0},\n", - " 'xif': 1.0}" + " 'num_flavs_ref': 5,\n", + " 'em_running': False},\n", + " 'heavy': {'num_flavs_init': 4,\n", + " 'num_flavs_max_pdf': 6,\n", + " 'intrinsic_flavors': [4],\n", + " 'masses': [[2.0, nan], [4.5, nan], [173.07, nan]],\n", + " 'masses_scheme': 'pole',\n", + " 'matching_ratios': [1.0, 1.0, 1.0]},\n", + " 'xif': 1.0,\n", + " 'n3lo_ad_variation': [0, 0, 0, 0]}" ] }, "execution_count": 3, @@ -123,23 +124,19 @@ "data": { "text/plain": [ "{'mu0': 1.65,\n", + " 'mugrid': [[100.0, 5]],\n", + " 'xgrid': [0.001, 0.01, 0.1, 0.5, 1.0],\n", " 'configs': {'evolution_method': 'iterate-exact',\n", " 'ev_op_max_order': [10, 0],\n", " 'ev_op_iterations': 10,\n", - " 'interpolation_polynomial_degree': 4,\n", - " 'interpolation_is_log': True,\n", " 'scvar_method': None,\n", " 'inversion_method': None,\n", + " 'interpolation_polynomial_degree': 4,\n", + " 'interpolation_is_log': True,\n", + " 'polarized': False,\n", + " 'time_like': False,\n", " 'n_integration_cores': 0},\n", " 'debug': {'skip_singlet': False, 'skip_non_singlet': False},\n", - " 'rotations': {'xgrid': [0.001, 0.01, 0.1, 0.5, 1.0],\n", - " 'pids': [22, -6, -5, -4, -3, -2, -1, 21, 1, 2, 3, 4, 5, 6],\n", - " '_targetgrid': None,\n", - " '_inputgrid': None,\n", - " '_targetpids': None,\n", - " '_inputpids': None},\n", - " '_mugrid': [100.0],\n", - " '_mu2grid': None,\n", " 'eko_version': '0.0.0'}" ] }, diff --git a/doc/source/overview/tutorials/output.ipynb b/doc/source/overview/tutorials/output.ipynb index 5387390ad..47345ac35 100644 --- a/doc/source/overview/tutorials/output.ipynb +++ b/doc/source/overview/tutorials/output.ipynb @@ -21,7 +21,9 @@ "cell_type": "code", "execution_count": 1, "id": "51dbf608-ccfb-4fb5-aca3-723a5f280f65", - "metadata": {}, + "metadata": { + "tags": [] + }, "outputs": [], "source": [ "import eko" @@ -39,7 +41,9 @@ "cell_type": "code", "execution_count": 2, "id": "daae3811-e7a6-4a7c-9f55-eef8ea1564b8", - "metadata": {}, + "metadata": { + "tags": [] + }, "outputs": [ { "name": "stdout", @@ -50,7 +54,7 @@ } ], "source": [ - "with eko.EKO.open(\"./myeko.tar\") as evolution_operator:\n", + "with eko.EKO.read(\"./myeko.tar\") as evolution_operator:\n", " print(type(evolution_operator))" ] }, @@ -72,13 +76,13 @@ "name": "stdout", "output_type": "stream", "text": [ - "TheoryCard(order=(1, 0), couplings=CouplingsRef(alphas=reference_running..ReferenceRunning(value=0.118, scale=91.2), alphaem=reference_running..ReferenceRunning(value=0.007496252, scale=nan), num_flavs_ref=None, max_num_flavs=6), num_flavs_init=None, num_flavs_max_pdf=6, intrinsic_flavors=[4], quark_masses=heavy_quark..HeavyQuarks(c=reference_running..ReferenceRunning(value=2.0, scale=nan), b=reference_running..ReferenceRunning(value=4.5, scale=nan), t=reference_running..ReferenceRunning(value=173.07, scale=nan)), quark_masses_scheme=, matching=heavy_quark..HeavyQuarks(c=1.0, b=1.0, t=1.0), xif=1.0)\n", - "OperatorCard(mu0=1.65, configs=Configs(evolution_method=, ev_op_max_order=(10, 0), ev_op_iterations=10, interpolation_polynomial_degree=4, interpolation_is_log=True, scvar_method=None, inversion_method=None, n_integration_cores=0), debug=Debug(skip_singlet=False, skip_non_singlet=False), rotations=Rotations(xgrid=, pids=array([22, -6, -5, -4, -3, -2, -1, 21, 1, 2, 3, 4, 5, 6]), _targetgrid=None, _inputgrid=None, _targetpids=None, _inputpids=None), _mugrid=array([100.]), _mu2grid=None, eko_version='0.0.0')\n" + "TheoryCard(order=(1, 0), couplings=CouplingsInfo(alphas=0.118, alphaem=0.007496252, scale=91.2, max_num_flavs=6, num_flavs_ref=5, em_running=False), heavy=HeavyInfo(num_flavs_init=4, num_flavs_max_pdf=6, intrinsic_flavors=[4, 5, 6], masses=[[2.0, nan], [4.5, nan], [173.07, nan]], masses_scheme=, matching_ratios=[1.0, 1.0, 1.0]), xif=1.0, n3lo_ad_variation=(0, 0, 0, 0))\n", + "OperatorCard(mu0=1.65, mugrid=[(100.0, 5)], xgrid=, configs=Configs(evolution_method=, ev_op_max_order=(10, 0), ev_op_iterations=10, scvar_method=None, inversion_method=None, interpolation_polynomial_degree=4, interpolation_is_log=True, polarized=False, time_like=False, n_integration_cores=0), debug=Debug(skip_singlet=False, skip_non_singlet=False), eko_version='0.0.0')\n" ] } ], "source": [ - "with eko.EKO.open(\"./myeko.tar\") as evolution_operator:\n", + "with eko.EKO.read(\"./myeko.tar\") as evolution_operator:\n", " # obtain theory card\n", " print(evolution_operator.theory_card)\n", " # or operator card\n", @@ -104,13 +108,13 @@ "name": "stdout", "output_type": "stream", "text": [ - "[10000.]\n" + "[(10000.0, 5)]\n" ] } ], "source": [ - "with eko.EKO.open(\"./myeko.tar\") as evolution_operator:\n", - " print(evolution_operator.mu2grid)" + "with eko.EKO.read(\"./myeko.tar\") as evolution_operator:\n", + " print(evolution_operator.evolgrid)" ] }, { @@ -138,8 +142,8 @@ } ], "source": [ - "with eko.EKO.open(\"./myeko.tar\") as evolution_operator:\n", - " with evolution_operator.operator(10000.) as op:\n", + "with eko.EKO.read(\"./myeko.tar\") as evolution_operator:\n", + " with evolution_operator.operator((10000.,5)) as op:\n", " print(f\"operator: {op.operator.shape}\")\n", " print(f\"error: {op.error.shape}\")" ] diff --git a/doc/source/overview/tutorials/pdf.ipynb b/doc/source/overview/tutorials/pdf.ipynb index c46408d31..4607ef891 100644 --- a/doc/source/overview/tutorials/pdf.ipynb +++ b/doc/source/overview/tutorials/pdf.ipynb @@ -49,6 +49,7 @@ "metadata": {}, "outputs": [], "source": [ + "import pathlib\n", "import eko\n", "from banana import toy\n", "pdf = toy.mkPDF(\"\", 0)" @@ -70,7 +71,7 @@ "outputs": [], "source": [ "from ekobox.apply import apply_pdf\n", - "with eko.EKO.open(\"./myeko.tar\") as evolution_operator:\n", + "with eko.EKO.read(\"./myeko.tar\") as evolution_operator:\n", " evolved_pdfs = apply_pdf(evolution_operator, pdf)" ] }, @@ -123,7 +124,7 @@ { "data": { "text/plain": [ - "array([ 2.40543555e+04, 3.77471543e+02, 3.33983197e+01, -1.16579864e+01,\n", + "array([ 2.58966998e+04, 4.74768306e+02, 3.57939013e+01, -1.03946292e+01,\n", " 0.00000000e+00])" ] }, @@ -194,8 +195,8 @@ "th_card = example.theory()\n", "op_card = example.operator()\n", "# here we replace the grid with a very minimal one, to speed up the example\n", - "op_card.rotations.xgrid = eko.interpolation.XGrid([1e-3, 1e-2, 1e-1, 5e-1, 1.])\n", - "op_card.mu2grid = [100., 10000.]\n", + "op_card.xgrid = eko.interpolation.XGrid([1e-3, 1e-2, 1e-1, 5e-1, 1.])\n", + "op_card.mugrid = [(10.,5), (100.,5)]\n", "# set QCD LO evolution\n", "th_card.orders = (1,0)" ] @@ -218,6 +219,13 @@ "id": "8c18c327", "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Overwriting old PDF installation\n" + ] + }, { "name": "stdout", "output_type": "stream", @@ -228,13 +236,15 @@ ], "source": [ "from ekobox.evol_pdf import evolve_pdfs\n", + "path = pathlib.Path(\"./myeko2.tar\")\n", + "path.unlink(missing_ok=True)\n", "evolve_pdfs(\n", " [pdf],\n", " th_card,\n", " op_card,\n", " install=True,\n", " name=\"Evolved_PDF\",\n", - " store_path=\"myeko2.tar\"\n", + " store_path=path\n", ")" ] }, @@ -285,7 +295,7 @@ "output_type": "stream", "text": [ "has gluon? True\n", - "xg(x=0.01, Q2=89.1) = 4.0502308231988415\n" + "xg(x=0.01, Q2=89.1) = 4.399576390180355\n" ] } ], @@ -337,32 +347,32 @@ } ], "source": [ + "from math import nan\n", "import numpy as np\n", "import lhapdf\n", "from ekobox.cards import example\n", "from eko.interpolation import make_grid\n", + "from eko.quantities.heavy_quarks import QuarkMassRef,HeavyQuarks\n", "\n", "# get the PDF object\n", "ct14llo = lhapdf.mkPDF(\"CT14llo\")\n", "\n", "# setup the operator card\n", "op_card = example.operator()\n", - "op_card.rotations.xgrid = eko.interpolation.XGrid(make_grid(30, 30)) # x grid\n", - "op_card.mu2grid = np.geomspace(10., 1e5, 5).tolist() # Q2 grid\n", + "op_card.xgrid = eko.interpolation.XGrid(make_grid(30, 30)) # x grid\n", + "op_card.mugrid = [(float(q),5) for q in np.geomspace(5., 100, 5)] # Q2 grid\n", "op_card.mu0 = 1.295000 # starting point for the evolution \n", "\n", "# setup the theory card - this can be mostly inferred from the PDF's .info file\n", "\n", "th_card = example.theory()\n", "th_card.orders = (1,0) # QCD LO\n", - "th_card.quark_masses.c.value = 1.3 # charm mass\n", - "th_card.quark_masses.b.value = 4.75 # bottom mass\n", - "th_card.quark_masses.t.value = 172. # top mass\n", - "th_card.couplings.alphas.value = 0.130000 # reference value of alpha_s\n", - "th_card.couplings.alphas.scale = 91.1876 # the reference scale at which alpha_s is provided\n", + "th_card.heavy.masses = HeavyQuarks([QuarkMassRef([1.3,nan]), QuarkMassRef([4.75,nan]), QuarkMassRef([172.,nan])]) # quark mass\n", + "th_card.couplings.alphas = 0.130000 # reference value of alpha_s\n", + "th_card.couplings.scale = 91.1876 # the reference scale at which alpha_s is provided\n", "th_card.couplings.num_flavs_ref = 5 # the number of flavors active at the alpha_s reference scale\n", "th_card.couplings.max_num_flavs = 5 # the maximum number of flavors active in the alpha_s evolution\n", - "th_card.num_flavs_init = 3 # the number of flavors active at the reference scale\n", + "th_card.couplings.num_flavs_init = 3 # the number of flavors active at the reference scale\n", "th_card.num_flavs_max_pdf = 5 # the maximum number of flavors active in the pdf evolution." ] }, @@ -380,6 +390,13 @@ "id": "05b81dca", "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Overwriting old PDF installation\n" + ] + }, { "name": "stdout", "output_type": "stream", @@ -390,14 +407,15 @@ ], "source": [ "from ekobox.evol_pdf import evolve_pdfs\n", - "\n", + "path = pathlib.Path(\"./myeko_ct14llo.tar\")\n", + "path.unlink(missing_ok=True)\n", "evolve_pdfs(\n", " [ct14llo],\n", " th_card,\n", " op_card,\n", " install=True,\n", " name=\"my_ct14llo\",\n", - " store_path=\"myeko_ct14llo.tar\"\n", + " store_path=path\n", ")" ] }, @@ -421,32 +439,32 @@ "output_type": "stream", "text": [ "LHAPDF 6.4.0 loading /home/felix/local/share/LHAPDF/my_ct14llo/my_ct14llo_0000.dat\n", - " x Q2 ct14llo my_ct14llo relative_diff\n", - "0 0.000010 10.0 4.560628e+01 4.559560e+01 0.000234\n", - "1 0.000173 10.0 2.185377e+01 2.184884e+01 0.000225\n", - "2 0.003000 10.0 8.734759e+00 8.732856e+00 0.000218\n", - "3 0.051962 10.0 1.873738e+00 1.874169e+00 -0.000230\n", - "4 0.900000 10.0 4.278472e-05 4.346894e-05 -0.015992\n", - "5 0.000010 100.0 1.288848e+02 1.287965e+02 0.000685\n", - "6 0.000173 100.0 4.661146e+01 4.657867e+01 0.000703\n", - "7 0.003000 100.0 1.323922e+01 1.322967e+01 0.000721\n", - "8 0.051962 100.0 1.978974e+00 1.979055e+00 -0.000041\n", - "9 0.900000 100.0 7.121784e-06 7.245044e-06 -0.017308\n", - "10 0.000010 1000.0 2.272552e+02 2.270758e+02 0.000790\n", - "11 0.000173 1000.0 6.959939e+01 6.954206e+01 0.000824\n", - "12 0.003000 1000.0 1.620144e+01 1.618786e+01 0.000838\n", - "13 0.051962 1000.0 1.924501e+00 1.924600e+00 -0.000051\n", - "14 0.900000 1000.0 2.135864e-06 2.175017e-06 -0.018331\n", - "15 0.000010 10000.0 3.314097e+02 3.311450e+02 0.000799\n", - "16 0.000173 10000.0 9.023010e+01 9.015576e+01 0.000824\n", - "17 0.003000 10000.0 1.825934e+01 1.824477e+01 0.000798\n", - "18 0.051962 10000.0 1.830992e+00 1.831291e+00 -0.000163\n", - "19 0.900000 10000.0 9.288458e-07 9.463689e-07 -0.018866\n", - "20 0.000010 100000.0 4.363142e+02 4.359699e+02 0.000789\n", - "21 0.000173 100000.0 1.085159e+02 1.084286e+02 0.000804\n", - "22 0.003000 100000.0 1.972432e+01 1.970961e+01 0.000746\n", - "23 0.051962 100000.0 1.730267e+00 1.730707e+00 -0.000254\n", - "24 0.900000 100000.0 5.051592e-07 5.156517e-07 -0.020771\n", + " x Q2 ct14llo my_ct14llo relative_diff\n", + "0 0.000010 25.000000 7.635785e+01 7.630461e+01 0.000697\n", + "1 0.000173 25.000000 3.194273e+01 3.192092e+01 0.000683\n", + "2 0.003000 25.000000 1.081843e+01 1.081086e+01 0.000701\n", + "3 0.051962 25.000000 1.958956e+00 1.958632e+00 0.000166\n", + "4 0.900000 25.000000 1.922415e-05 1.955026e-05 -0.016963\n", + "5 0.000010 111.803399 1.333957e+02 1.332985e+02 0.000729\n", + "6 0.000173 111.803399 4.777286e+01 4.773664e+01 0.000758\n", + "7 0.003000 111.803399 1.341028e+01 1.339967e+01 0.000791\n", + "8 0.051962 111.803399 1.978216e+00 1.978130e+00 0.000044\n", + "9 0.900000 111.803399 6.644805e-06 6.753652e-06 -0.016381\n", + "10 0.000010 500.000000 1.967032e+02 1.965456e+02 0.000801\n", + "11 0.000173 500.000000 6.291393e+01 6.286095e+01 0.000842\n", + "12 0.003000 500.000000 1.542347e+01 1.540996e+01 0.000876\n", + "13 0.051962 500.000000 1.947465e+00 1.947391e+00 0.000038\n", + "14 0.900000 500.000000 2.929060e-06 2.977306e-06 -0.016471\n", + "15 0.000010 2236.067977 2.633266e+02 2.631109e+02 0.000819\n", + "16 0.000173 2236.067977 7.708540e+01 7.701938e+01 0.000856\n", + "17 0.003000 2236.067977 1.700410e+01 1.698928e+01 0.000872\n", + "18 0.051962 2236.067977 1.893923e+00 1.893971e+00 -0.000025\n", + "19 0.900000 2236.067977 1.544450e-06 1.570997e-06 -0.017189\n", + "20 0.000010 10000.000000 3.314097e+02 3.311351e+02 0.000829\n", + "21 0.000173 10000.000000 9.023010e+01 9.015279e+01 0.000857\n", + "22 0.003000 10000.000000 1.825934e+01 1.824402e+01 0.000839\n", + "23 0.051962 10000.000000 1.830992e+00 1.831183e+00 -0.000104\n", + "24 0.900000 10000.000000 9.288458e-07 9.447927e-07 -0.017169\n", "my_ct14llo PDF set, member #0, version 1\n" ] } @@ -461,7 +479,8 @@ "\n", "# collect data\n", "log = {\"x\": [], \"Q2\" : [], \"ct14llo\": [], \"my_ct14llo\": [], \"relative_diff\": []} \n", - "for q2 in np.geomspace(10., 1e5, 5):\n", + "for q in np.geomspace(5., 100, 5):\n", + " q2 = q**2.\n", " for x in np.geomspace(1e-5, 0.9, 5):\n", " value = ct14llo.xfxQ2(pid, x, q2)\n", " my_value = my_ct14llo.xfxQ2(pid, x, q2)\n", diff --git a/poetry.lock b/poetry.lock index 3249915c4..6c76357d1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -204,14 +204,14 @@ css = ["tinycss2 (>=1.1.0,<1.2)"] [[package]] name = "certifi" -version = "2022.12.7" +version = "2023.5.7" description = "Python package for providing Mozilla's CA Bundle." category = "main" optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, - {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, + {file = "certifi-2023.5.7-py3-none-any.whl", hash = "sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716"}, + {file = "certifi-2023.5.7.tar.gz", hash = "sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7"}, ] [[package]] @@ -781,14 +781,14 @@ testing = ["covdefaults (>=2.3)", "coverage (>=7.2.3)", "diff-cover (>=7.5)", "p [[package]] name = "fonttools" -version = "4.39.3" +version = "4.39.4" description = "Tools to manipulate font files" category = "main" optional = true python-versions = ">=3.8" files = [ - {file = "fonttools-4.39.3-py3-none-any.whl", hash = "sha256:64c0c05c337f826183637570ac5ab49ee220eec66cf50248e8df527edfa95aeb"}, - {file = "fonttools-4.39.3.zip", hash = "sha256:9234b9f57b74e31b192c3fc32ef1a40750a8fbc1cd9837a7b7bfc4ca4a5c51d7"}, + {file = "fonttools-4.39.4-py3-none-any.whl", hash = "sha256:106caf6167c4597556b31a8d9175a3fdc0356fdcd70ab19973c3b0d4c893c461"}, + {file = "fonttools-4.39.4.zip", hash = "sha256:dba8d7cdb8e2bac1b3da28c5ed5960de09e59a2fe7e63bb73f5a59e57b0430d2"}, ] [package.extras] @@ -956,14 +956,14 @@ files = [ [[package]] name = "ipykernel" -version = "6.22.0" +version = "6.23.0" description = "IPython Kernel for Jupyter" category = "dev" optional = false python-versions = ">=3.8" files = [ - {file = "ipykernel-6.22.0-py3-none-any.whl", hash = "sha256:1ae6047c1277508933078163721bbb479c3e7292778a04b4bacf0874550977d6"}, - {file = "ipykernel-6.22.0.tar.gz", hash = "sha256:302558b81f1bc22dc259fb2a0c5c7cf2f4c0bdb21b50484348f7bafe7fb71421"}, + {file = "ipykernel-6.23.0-py3-none-any.whl", hash = "sha256:fc886f1dcdc0ec17f277e4d21fd071c857d381adcb04f3f3735d25325ca323c6"}, + {file = "ipykernel-6.23.0.tar.gz", hash = "sha256:bd6f487d9e2744c84f6e667d46462d7647a4c862e70e08282f05a52b9d4b705f"}, ] [package.dependencies] @@ -990,14 +990,14 @@ test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio" [[package]] name = "ipython" -version = "8.12.1" +version = "8.12.2" description = "IPython: Productive Interactive Computing" category = "main" optional = false python-versions = ">=3.8" files = [ - {file = "ipython-8.12.1-py3-none-any.whl", hash = "sha256:e3015a1a4aa09b3984fb81b9cef4f0772af5a549878b81efb094cda8bb121993"}, - {file = "ipython-8.12.1.tar.gz", hash = "sha256:2442915417763b62181009259782975fa50bb5eedb97ae97fb614204bf6ecc21"}, + {file = "ipython-8.12.2-py3-none-any.whl", hash = "sha256:ea8801f15dfe4ffb76dea1b09b847430ffd70d827b41735c64a0638a04103bfc"}, + {file = "ipython-8.12.2.tar.gz", hash = "sha256:c7b80eb7f5a855a88efc971fda506ff7a91c280b42cdae26643e0f601ea281ea"}, ] [package.dependencies] @@ -1573,14 +1573,14 @@ test = ["flaky", "ipykernel", "ipython", "ipywidgets", "nbconvert (>=7.0.0)", "p [[package]] name = "nbconvert" -version = "7.3.1" +version = "7.4.0" description = "Converting Jupyter Notebooks" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "nbconvert-7.3.1-py3-none-any.whl", hash = "sha256:d2e95904666f1ff77d36105b9de4e0801726f93b862d5b28f69e93d99ad3b19c"}, - {file = "nbconvert-7.3.1.tar.gz", hash = "sha256:78685362b11d2e8058e70196fe83b09abed8df22d3e599cf271f4d39fdc48b9e"}, + {file = "nbconvert-7.4.0-py3-none-any.whl", hash = "sha256:af5064a9db524f9f12f4e8be7f0799524bd5b14c1adea37e34e83c95127cc818"}, + {file = "nbconvert-7.4.0.tar.gz", hash = "sha256:51b6c77b507b177b73f6729dba15676e42c4e92bcb00edc8cc982ee72e7d89d7"}, ] [package.dependencies] @@ -2171,14 +2171,14 @@ plugins = ["importlib-metadata"] [[package]] name = "pylint" -version = "2.17.3" +version = "2.17.4" description = "python code static checker" category = "dev" optional = false python-versions = ">=3.7.2" files = [ - {file = "pylint-2.17.3-py3-none-any.whl", hash = "sha256:a6cbb4c6e96eab4a3c7de7c6383c512478f58f88d95764507d84c899d656a89a"}, - {file = "pylint-2.17.3.tar.gz", hash = "sha256:761907349e699f8afdcd56c4fe02f3021ab5b3a0fc26d19a9bfdc66c7d0d5cd5"}, + {file = "pylint-2.17.4-py3-none-any.whl", hash = "sha256:7a1145fb08c251bdb5cca11739722ce64a63db479283d10ce718b2460e54123c"}, + {file = "pylint-2.17.4.tar.gz", hash = "sha256:5dcf1d9e19f41f38e4e85d10f511e5b9c35e1aa74251bf95cdd8cb23584e2db1"}, ] [package.dependencies] @@ -3023,14 +3023,14 @@ files = [ [[package]] name = "urllib3" -version = "2.0.1" +version = "2.0.2" description = "HTTP library with thread-safe connection pooling, file post, and more." category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "urllib3-2.0.1-py3-none-any.whl", hash = "sha256:d75e5ece05ff170e323303fd924edf29e705f5ae057c489f453a686b639bb68a"}, - {file = "urllib3-2.0.1.tar.gz", hash = "sha256:2ce66a68134be469f5df5d46d724237489b3cd85b2bba2223dbbee1746548826"}, + {file = "urllib3-2.0.2-py3-none-any.whl", hash = "sha256:d055c2f9d38dc53c808f6fdc8eab7360b6fdbbde02340ed25cfbcd817c62469e"}, + {file = "urllib3-2.0.2.tar.gz", hash = "sha256:61717a1095d7e155cdb737ac7bb2f4324a858a1e2e6466f6d03ff630ca68d3cc"}, ] [package.extras] diff --git a/src/eko/beta.py b/src/eko/beta.py index 7eacf0d3a..8ce6658f1 100644 --- a/src/eko/beta.py +++ b/src/eko/beta.py @@ -6,9 +6,8 @@ import numba as nb -from eko.constants import zeta3 - from . import constants +from .constants import zeta3 @nb.njit(cache=True) diff --git a/src/eko/couplings.py b/src/eko/couplings.py index 4f8df06be..b0d39a070 100644 --- a/src/eko/couplings.py +++ b/src/eko/couplings.py @@ -8,16 +8,15 @@ """ import logging -import warnings -from typing import List +from typing import Iterable, List import numba as nb import numpy as np import scipy -from . import constants, thresholds +from . import constants, matchings from .beta import b_qcd, b_qed, beta_qcd, beta_qed -from .io.types import EvolutionMethod, Order +from .io.types import EvolutionMethod, Order, SquaredScale from .quantities.couplings import CouplingEvolutionMethod, CouplingsInfo from .quantities.heavy_quarks import QuarkMassScheme @@ -424,9 +423,9 @@ def __init__( couplings: CouplingsInfo, order: Order, method: CouplingEvolutionMethod, - masses: List[float], + masses: List[SquaredScale], hqm_scheme: QuarkMassScheme, - thresholds_ratios: List[float], + thresholds_ratios: Iterable[float], ): # Sanity checks def assert_positive(name, var): @@ -446,24 +445,19 @@ def assert_positive(name, var): self.method = method.value nf_ref = couplings.num_flavs_ref - max_nf = couplings.max_num_flavs scheme_name = hqm_scheme.name self.alphaem_running = couplings.em_running self.decoupled_running = False # create new threshold object self.a_ref = np.array(couplings.values) / 4.0 / np.pi # convert to a_s and a_em - self.thresholds = thresholds.ThresholdsAtlas( - masses, - couplings.scale**2, - nf_ref, - thresholds_ratios=thresholds_ratios, - max_nf=max_nf, - ) + matching_scales = (np.array(masses) * np.array(thresholds_ratios)).tolist() + self.thresholds_ratios = list(thresholds_ratios) + self.atlas = matchings.Atlas(matching_scales, (couplings.scale**2, nf_ref)) self.hqm_scheme = scheme_name logger.info( "Strong Coupling: a_s(µ_R^2=%f)%s=%f=%f/(4π)", - self.q2_ref, + self.mu2_ref, f"^(nf={nf_ref})" if nf_ref else "", self.a_ref[0], self.a_ref[0] * 4 * np.pi, @@ -472,7 +466,7 @@ def assert_positive(name, var): logger.info( "Electromagnetic Coupling: a_em(µ_R^2=%f)%s=%f=%f/(4π)\nalphaem" " running: %r\ndecoupled running: %r", - self.q2_ref, + self.mu2_ref, f"^(nf={nf_ref})" if nf_ref else "", self.a_ref[1], self.a_ref[1] * 4 * np.pi, @@ -483,9 +477,9 @@ def assert_positive(name, var): self.cache = {} @property - def q2_ref(self): + def mu2_ref(self): """Return reference scale.""" - return self.thresholds.q2_ref + return self.atlas.origin[0] def unidimensional_exact(self, beta0, b_vec, u, a_ref, method, rtol): """Compute single coupling via decoupled |RGE|. @@ -734,23 +728,22 @@ def a( numpy.ndarray couplings :math:`a_i(\mu_R^2) = \frac{\alpha_i(\mu_R^2)}{4\pi}` """ - # Set up the path to follow in order to go from q2_0 to q2_ref + # Set up the path to follow in order to go from mu2_0 to mu2_ref final_a = self.a_ref.copy() - path = self.thresholds.path(scale_to, nf_to) - is_downward = thresholds.is_downward_path(path) - shift = thresholds.flavor_shift(is_downward) + path = self.atlas.path((scale_to, nf_to)) + is_downward = matchings.is_downward_path(path) + shift = matchings.flavor_shift(is_downward) for k, seg in enumerate(path): # skip a very short segment, but keep the matching - if not np.isclose(seg.q2_from, seg.q2_to): - new_a = self.compute(final_a, seg.nf, seg.q2_from, seg.q2_to) + if not np.isclose(seg.origin, seg.target): + new_a = self.compute(final_a, seg.nf, seg.origin, seg.target) else: new_a = final_a # apply matching conditions: see hep-ph/9706430 # - if there is yet a step to go if k < len(path) - 1: - # q2_to is the threshold value - L = np.log(self.thresholds.thresholds_ratios[seg.nf - shift]) + L = np.log(self.thresholds_ratios[seg.nf - shift]) m_coeffs = ( compute_matching_coeffs_down(self.hqm_scheme, seg.nf - 1) if is_downward diff --git a/src/eko/evolution_operator/__init__.py b/src/eko/evolution_operator/__init__.py index df79e8c4f..649ffc332 100644 --- a/src/eko/evolution_operator/__init__.py +++ b/src/eko/evolution_operator/__init__.py @@ -26,6 +26,7 @@ from ..kernels import singlet_qed as qed_s from ..kernels import utils from ..kernels import valence_qed as qed_v +from ..matchings import Segment from ..member import OpMember logger = logging.getLogger(__name__) @@ -606,13 +607,13 @@ class Operator(sv.ModeMixin): full_labels_qed = br.full_unified_labels def __init__( - self, config, managers, nf, q2_from, q2_to, mellin_cut=5e-2, is_threshold=False + self, config, managers, segment: Segment, mellin_cut=5e-2, is_threshold=False ): self.config = config self.managers = managers - self.nf = nf - self.q2_from = q2_from - self.q2_to = q2_to + self.nf = segment.nf + self.q2_from = segment.origin + self.q2_to = segment.target # TODO make 'cut' external parameter? self._mellin_cut = mellin_cut self.is_threshold = is_threshold @@ -703,7 +704,7 @@ def compute_aem_list(self): as1 = self.a_s[1] aem0 = self.a_em[0] aem1 = self.a_em[1] - q2ref = self.managers["couplings"].q2_ref + q2ref = self.managers["couplings"].mu2_ref delta_from = abs(self.q2_from - q2ref) delta_to = abs(self.q2_to - q2ref) # I compute the values in aem_list starting from the mu2 diff --git a/src/eko/evolution_operator/grid.py b/src/eko/evolution_operator/grid.py index 6f5520a03..ca8a46d51 100644 --- a/src/eko/evolution_operator/grid.py +++ b/src/eko/evolution_operator/grid.py @@ -4,22 +4,29 @@ previously instantiated information and does the actual computation of the Q2s. """ - import logging -import numbers +from dataclasses import astuple +from typing import Dict, List, Optional import numpy as np import numpy.typing as npt from .. import member from .. import scale_variations as sv +from ..couplings import Couplings +from ..interpolation import InterpolatorDispatcher from ..io.runcards import Configs, Debug -from ..thresholds import flavor_shift, is_downward_path +from ..io.types import EvolutionPoint as EPoint +from ..io.types import Order +from ..matchings import Atlas, Segment, flavor_shift, is_downward_path from . import Operator, flavors, matching_condition, physical from .operator_matrix_element import OperatorMatrixElement logger = logging.getLogger(__name__) +OpDict = Dict[str, Optional[npt.NDArray]] +"""In particular, only the ``operator`` and ``error`` fields are expected.""" + class OperatorGrid(sv.ModeMixin): """Collection of evolution operators for several scales. @@ -39,40 +46,20 @@ class OperatorGrid(sv.ModeMixin): def __init__( self, - mu2grid: npt.NDArray, - order: tuple, - masses: tuple, + mu2grid: List[EPoint], + order: Order, + masses: List[float], mass_scheme, + thresholds_ratios: List[float], intrinsic_flavors: list, xif: float, n3lo_ad_variation: tuple, configs: Configs, debug: Debug, - thresholds_config, - couplings, - interpol_dispatcher, + atlas: Atlas, + couplings: Couplings, + interpol_dispatcher: InterpolatorDispatcher, ): - """Initialize `OperatorGrid`. - - Parameters - ---------- - config: dict - configuration dictionary - q2_grid: array - Grid in Q2 on where to to compute the operators - order: tuple(int,int) - orders in perturbation theory - thresholds_config: eko.thresholds.ThresholdsAtlas - Instance of :class:`~eko.thresholds.Threshold` containing information about the - thresholds - couplings: eko.couplings.StrongCoupling - Instance of :class:`~eko.couplings.StrongCoupling` able to generate a_s for - any q - kernel_dispatcher: eko.kernel_generation.KernelDispatcher - Instance of the :class:`~eko.kernel_generation.KernelDispatcher` with the - information about the kernels - - """ # check config = {} config["order"] = order @@ -84,6 +71,7 @@ def __init__( for i, q in enumerate("cbt"): config[f"m{q}"] = masses[i] + config["thresholds_ratios"] = thresholds_ratios method = config["method"] = configs.evolution_method.value config["backward_inversion"] = configs.inversion_method config["ev_op_max_order"] = configs.ev_op_max_order @@ -94,17 +82,6 @@ def __init__( config["polarized"] = configs.polarized config["time_like"] = configs.time_like - if method not in [ - "iterate-exact", - "iterate-expanded", - "truncated", - "ordered-truncated", - "decompose-exact", - "decompose-expanded", - "perturbative-exact", - "perturbative-expanded", - ]: - raise ValueError(f"Unknown evolution mode {method}") if order == (1, 0) and method != "iterate-exact": logger.warning("Evolution: In LO we use the exact solution always!") @@ -114,16 +91,15 @@ def __init__( self.config = config self.q2_grid = mu2grid self.managers = dict( - thresholds_config=thresholds_config, + thresholds_config=atlas, couplings=couplings, interpol_dispatcher=interpol_dispatcher, ) self._threshold_operators = {} self._matching_operators = {} - def get_threshold_operators(self, path): - """ - Generate the threshold operators. + def get_threshold_operators(self, path: List[Segment]) -> List[Operator]: + """Generate the threshold operators. This method is called everytime the OperatorGrid is asked for a grid on Q^2 with a list of the relevant areas. @@ -133,15 +109,8 @@ def get_threshold_operators(self, path): The internal dictionary is self._threshold_operators and its structure is: (q2_from, q2_to) -> eko.operators.Operator - It computes and stores the necessary macthing operators - Parameters - ---------- - path: list(`eko.thresholds.PathSegment`) - thresholds path + It computes and stores the necessary macthing operators. - Returns - ------- - thr_ops: list(eko.evolution_operator.Operator) """ # The base area is always that of the reference q thr_ops = [] @@ -149,14 +118,13 @@ def get_threshold_operators(self, path): is_downward = is_downward_path(path) shift = flavor_shift(is_downward) for seg in path[:-1]: - new_op_key = seg.tuple - thr_config = self.managers["thresholds_config"] - kthr = thr_config.thresholds_ratios[seg.nf - shift] + new_op_key = astuple(seg) + kthr = self.config["thresholds_ratios"][seg.nf - shift] ome = OperatorMatrixElement( self.config, self.managers, seg.nf - shift + 3, - seg.q2_to, + seg.target, is_downward, np.log(kthr), self.config["HQ"] == "MSBAR", @@ -164,76 +132,39 @@ def get_threshold_operators(self, path): if new_op_key not in self._threshold_operators: # Compute the operator and store it logger.info("Prepare threshold operator") - op_th = Operator( - self.config, - self.managers, - seg.nf, - seg.q2_from, - seg.q2_to, - is_threshold=True, - ) + op_th = Operator(self.config, self.managers, seg, is_threshold=True) op_th.compute() self._threshold_operators[new_op_key] = op_th thr_ops.append(self._threshold_operators[new_op_key]) # Compute the matching conditions and store it - if seg.q2_to not in self._matching_operators: + if seg.target not in self._matching_operators: ome.compute() - self._matching_operators[seg.q2_to] = ome.op_members + self._matching_operators[seg.target] = ome.op_members return thr_ops - def compute(self, q2grid=None): - """Compute all ekos for the `q2grid`. - - Parameters - ---------- - q2grid: list(float) - List of :math:`Q^2` + def compute(self) -> Dict[EPoint, dict]: + """Compute all ekos for the `q2grid`.""" + return {q2: self.generate(q2) for q2 in self.q2_grid} - Returns - ------- - list(dict) - List of ekos for each value of :math:`Q^2` - """ - # use input? - if q2grid is None: - q2grid = self.q2_grid - # normalize input - if isinstance(q2grid, numbers.Number): - q2grid = [q2grid] - # And now return the grid - grid_return = {} - for q2 in q2grid: - grid_return[q2] = self.generate(q2) - return grid_return - - def generate(self, q2): + def generate(self, q2: EPoint) -> OpDict: r"""Compute a single EKO. - Parameters - ---------- - q2: float - Target value of :math:`Q^2` + eko :math:`\mathbf E(Q^2 \leftarrow Q_0^2)` in flavor basis as numpy array. - Returns - ------- - dict - eko :math:`\mathbf E(Q^2 \leftarrow Q_0^2)` in flavor basis as numpy array """ # The lists of areas as produced by the thresholds path = self.managers["thresholds_config"].path(q2) # Prepare the path for the composition of the operator thr_ops = self.get_threshold_operators(path) # we start composing with the highest operator ... - operator = Operator( - self.config, self.managers, path[-1].nf, path[-1].q2_from, path[-1].q2_to - ) + operator = Operator(self.config, self.managers, path[-1]) operator.compute() - intrinsic_range = self.config["intrinsic_range"] + is_downward = is_downward_path(path) - if is_downward: - intrinsic_range = [4, 5, 6] + intrinsic_range = [4, 5, 6] if is_downward else self.config["intrinsic_range"] qed = self.config["order"][1] > 0 + final_op = physical.PhysicalOperator.ad_to_evol_map( operator.op_members, operator.nf, operator.q2_to, intrinsic_range, qed ) @@ -244,32 +175,23 @@ def generate(self, q2): ) # join with the basis rotation, since matching requires c+ (or likewise) + nf_match = op.nf - 1 if is_downward else op.nf + matching = matching_condition.MatchingCondition.split_ad_to_evol_map( + self._matching_operators[op.q2_to], + nf_match, + op.q2_to, + intrinsic_range=intrinsic_range, + qed=qed, + ) if is_downward: - matching = matching_condition.MatchingCondition.split_ad_to_evol_map( - self._matching_operators[op.q2_to], - op.nf - 1, - op.q2_to, - intrinsic_range=intrinsic_range, - qed=qed, - ) invrot = member.ScalarOperator.promote_names( flavors.rotate_matching_inverse(op.nf, qed), op.q2_to ) final_op = final_op @ matching @ invrot @ phys_op else: - matching = matching_condition.MatchingCondition.split_ad_to_evol_map( - self._matching_operators[op.q2_to], - op.nf, - op.q2_to, - intrinsic_range=intrinsic_range, - qed=qed, - ) rot = member.ScalarOperator.promote_names( flavors.rotate_matching(op.nf + 1, qed), op.q2_to ) final_op = final_op @ rot @ matching @ phys_op values, errors = final_op.to_flavor_basis_tensor(qed) - return { - "operator": values, - "error": errors, - } + return {"operator": values, "error": errors} diff --git a/src/eko/evolution_operator/operator_matrix_element.py b/src/eko/evolution_operator/operator_matrix_element.py index c073b13cb..a16b3f25e 100644 --- a/src/eko/evolution_operator/operator_matrix_element.py +++ b/src/eko/evolution_operator/operator_matrix_element.py @@ -10,11 +10,11 @@ import ekore.operator_matrix_elements.polarized.space_like as ome_ps import ekore.operator_matrix_elements.unpolarized.space_like as ome_us import ekore.operator_matrix_elements.unpolarized.time_like as ome_ut -from ekore import harmonics from .. import basis_rotation as br from .. import scale_variations as sv from ..io.types import InversionMethod +from ..matchings import Segment from . import Operator, QuadKerBase logger = logging.getLogger(__name__) @@ -185,12 +185,8 @@ class OperatorMatrixElement(Operator): configuration managers : dict managers - nf: int - number of active flavor below threshold - q2: float - matching scale - is_backward: bool - True for backward evolution + segment: Segment + path segment L: float :math:`\ln(\mu_F^2 / m_h^2)` is_msbar: bool @@ -215,7 +211,7 @@ class OperatorMatrixElement(Operator): full_labels_qed = copy.deepcopy(full_labels) def __init__(self, config, managers, nf, q2, is_backward, L, is_msbar): - super().__init__(config, managers, nf, q2, None) + super().__init__(config, managers, Segment(q2, q2, nf)) self.backward_method = config["backward_inversion"] if is_backward else None if is_backward: self.is_intrinsic = True diff --git a/src/eko/evolution_operator/physical.py b/src/eko/evolution_operator/physical.py index 10164fd68..c963e2a26 100644 --- a/src/eko/evolution_operator/physical.py +++ b/src/eko/evolution_operator/physical.py @@ -1,10 +1,6 @@ """Contains the :class:`PhysicalOperator` class.""" - -import numpy as np - from .. import basis_rotation as br from .. import member -from . import flavors class PhysicalOperator(member.OperatorBase): @@ -109,51 +105,3 @@ def ad_to_evol_map(cls, op_members, nf, q2_final, intrinsic_range, qed=False): m[f"{hq}-.{hq}-"] = op_id.copy() # map key to MemberName return cls.promote_names(m, q2_final) - - def to_flavor_basis_tensor(self, qed=False): - """ - Convert the computations into an rank 4 tensor over flavor operator space and momentum fraction operator space. - - Parameters - ---------- - qed : bool - activate qed - - Returns - ------- - tensor : numpy.ndarray - EKO - """ - nf_in, nf_out = flavors.get_range(self.op_members.keys(), qed) - len_pids = len(br.flavor_basis_pids) - len_xgrid = list(self.op_members.values())[0].value.shape[0] - # dimension will be pids^2 * xgrid^2 - value_tensor = np.zeros((len_pids, len_xgrid, len_pids, len_xgrid)) - error_tensor = value_tensor.copy() - for name, op in self.op_members.items(): - if not qed: - in_pids = flavors.pids_from_intrinsic_evol(name.input, nf_in, False) - out_pids = flavors.pids_from_intrinsic_evol(name.target, nf_out, True) - else: - in_pids = flavors.pids_from_intrinsic_unified_evol( - name.input, nf_in, False - ) - out_pids = flavors.pids_from_intrinsic_unified_evol( - name.target, nf_out, True - ) - for out_idx, out_weight in enumerate(out_pids): - for in_idx, in_weight in enumerate(in_pids): - # keep the outer index to the left as we're multiplying from the right - value_tensor[ - out_idx, # output pid (position) - :, # output momentum fraction - in_idx, # input pid (position) - :, # input momentum fraction - ] += out_weight * (op.value * in_weight) - error_tensor[ - out_idx, # output pid (position) - :, # output momentum fraction - in_idx, # input pid (position) - :, # input momentum fraction - ] += out_weight * (op.error * in_weight) - return value_tensor, error_tensor diff --git a/src/eko/io/access.py b/src/eko/io/access.py new file mode 100644 index 000000000..b8dec50a8 --- /dev/null +++ b/src/eko/io/access.py @@ -0,0 +1,95 @@ +"""Manage file system resources access.""" + +from dataclasses import dataclass +from pathlib import Path +from typing import Optional + +from . import exceptions + + +class ReadOnlyOperator(RuntimeError, exceptions.OutputError): + """It is not possible to write on a read-only operator. + + In particular, the behavior would be deceitful, since writing is possible + in-memory and even on the temporary folder. + But eventually, no writing will happen on a persistent archive, so any + modification is lost after exiting the program. + + """ + + +class ClosedOperator(RuntimeError, exceptions.OutputError): + """It is not possible to write on nor to read from a closed operator. + + This is milder issue than :class:`ReadOnlyOperator`, since in this case not + even the writing on the temporary folder would be possible. + + Instead, it will look like you can access some properties, but the operator + is actually closed, so it should not be used any longer in general. + However, for extremely simple properties, like those available in memory + from :class:`eko.io.struct.Metadata` or :class:`eko.io.struct.AccessConfigs`, there + is no need to raise on read, since those properties are actually available, + but they should always raise on writing, since there is no persistence for + the content written, and it can be deceitful. + + Still, the level of protection will be mild, since a thoruough protection + would clutter a lot the code, requiring a lot of maintenance. + "We are adult here". + + """ + + +@dataclass +class AccessConfigs: + """Configurations specified during opening of an EKO.""" + + path: Path + """The path to the permanent object.""" + readonly: bool + "Read-only flag" + open: bool + "EKO status" + + @property + def read(self): + """Check reading permission. + + Reading access is always granted on open operator. + + """ + return self.open + + @property + def write(self): + """Check writing permission.""" + return self.open and not self.readonly + + def assert_open(self): + """Assert operator is open. + + Raises + ------ + exceptions.ClosedOperator + if operator is closed + + """ + if not self.open: + raise ClosedOperator + + def assert_writeable(self, msg: Optional[str] = None): + """Assert operator is writeable. + + Raises + ------ + exceptions.ClosedOperator + see :meth:`assert_open` + exceptions.ReadOnlyOperator + if operators has been declared read-only + + """ + if msg is None: + msg = "" + + self.assert_open() + if self.readonly: + raise ReadOnlyOperator(msg) diff --git a/src/eko/io/bases.py b/src/eko/io/bases.py new file mode 100644 index 000000000..97a28dc36 --- /dev/null +++ b/src/eko/io/bases.py @@ -0,0 +1,117 @@ +"""Operators bases.""" +from dataclasses import dataclass, fields +from typing import Optional + +import numpy as np +import numpy.typing as npt + +from .. import basis_rotation as br +from .. import interpolation +from .dictlike import DictLike + + +@dataclass +class Bases(DictLike): + """Rotations related configurations. + + Here "Rotation" is intended in a broad sense: it includes both rotations in + flavor space (labeled with suffix `pids`) and in :math:`x`-space (labeled + with suffix `grid`). + Rotations in :math:`x`-space correspond to reinterpolate the result on a + different basis of polynomials. + + """ + + xgrid: interpolation.XGrid + """Internal momentum fraction grid.""" + _targetgrid: Optional[interpolation.XGrid] = None + _inputgrid: Optional[interpolation.XGrid] = None + _targetpids: Optional[npt.NDArray] = None + _inputpids: Optional[npt.NDArray] = None + + def __post_init__(self): + """Adjust types when loaded from serialized object.""" + for attr in ("xgrid", "_inputgrid", "_targetgrid"): + value = getattr(self, attr) + if value is None: + continue + if isinstance(value, (np.ndarray, list)): + setattr(self, attr, interpolation.XGrid(value)) + elif not isinstance(value, interpolation.XGrid): + setattr(self, attr, interpolation.XGrid.load(value)) + + @property + def pids(self): + """Internal flavor basis, used for computation.""" + return np.array(br.flavor_basis_pids) + + @property + def inputpids(self) -> npt.NDArray: + """Provide pids expected on the input PDF.""" + if self._inputpids is None: + return self.pids + return self._inputpids + + @inputpids.setter + def inputpids(self, value): + self._inputpids = value + + @property + def targetpids(self) -> npt.NDArray: + """Provide pids corresponding to the output PDF.""" + if self._targetpids is None: + return self.pids + return self._targetpids + + @targetpids.setter + def targetpids(self, value): + self._targetpids = value + + @property + def inputgrid(self) -> interpolation.XGrid: + """Provide :math:`x`-grid expected on the input PDF.""" + if self._inputgrid is None: + return self.xgrid + return self._inputgrid + + @inputgrid.setter + def inputgrid(self, value: interpolation.XGrid): + self._inputgrid = value + + @property + def targetgrid(self) -> interpolation.XGrid: + """Provide :math:`x`-grid corresponding to the output PDF.""" + if self._targetgrid is None: + return self.xgrid + return self._targetgrid + + @targetgrid.setter + def targetgrid(self, value: interpolation.XGrid): + self._targetgrid = value + + @classmethod + def from_dict(cls, dictionary: dict): + """Deserialize rotation. + + Load from full state, but with public names. + + """ + d = dictionary.copy() + for f in fields(cls): + if f.name.startswith("_"): + d[f.name] = d.pop(f.name[1:]) + return cls._from_dict(d) + + @property + def raw(self): + """Serialize rotation. + + Pass through interfaces, access internal values but with a public name. + + """ + d = self._raw() + for key in d.copy(): + if key.startswith("_"): + d[key[1:]] = d.pop(key) + + return d diff --git a/src/eko/io/exceptions.py b/src/eko/io/exceptions.py index 5411c7401..b924f8057 100644 --- a/src/eko/io/exceptions.py +++ b/src/eko/io/exceptions.py @@ -24,35 +24,3 @@ class OperatorLocationError(ValueError, OutputError): def __init__(self, path: os.PathLike): self.path = path super().__init__(f"Path '{path}' not in operators folder") - - -class ReadOnlyOperator(RuntimeError, OutputError): - """It is not possible to write on a read-only operator. - - In particular, the behavior would be deceitful, since writing is possible - in-memory and even on the temporary folder. - But eventually, no writing will happen on a persistent archive, so any - modification is lost after exiting the program. - - """ - - -class ClosedOperator(RuntimeError, OutputError): - """It is not possible to write on nor to read from a closed operator. - - This is milder issue than :class:`ReadOnlyOperator`, since in this case not - even the writing on the temporary folder would be possible. - - Instead, it will look like you can access some properties, but the operator - is actually closed, so it should not be used any longer in general. - However, for extremely simple properties, like those available in memory - from :class:`eko.io.struct.Metadata` or :class:`eko.io.struct.AccessConfigs`, there - is no need to raise on read, since those properties are actually available, - but they should always raise on writing, since there is no persistence for - the content written, and it can be deceitful. - - Still, the level of protection will be mild, since a thoruough protection - would clutter a lot the code, requiring a lot of maintenance. - "We are adult here". - - """ diff --git a/src/eko/io/inventory.py b/src/eko/io/inventory.py new file mode 100644 index 000000000..efdd2e9b0 --- /dev/null +++ b/src/eko/io/inventory.py @@ -0,0 +1,211 @@ +"""Manage assets used during computation.""" +import base64 +from dataclasses import asdict, dataclass, field +from pathlib import Path +from typing import Dict, Generic, Optional, Type, TypeVar + +import yaml + +from .access import AccessConfigs +from .items import Header, Operator + +NBYTES = 8 +ENDIANNESS = "little" + +HEADER_EXT = ".yaml" +ARRAY_EXT = [".npy", ".npz"] +COMPRESSED_EXT = ".lz4" +OPERATOR_EXT = [ext + COMPRESSED_EXT for ext in ARRAY_EXT] + + +class LookupError(ValueError): + """Failure in content retrieval from inventory.""" + + +def encode(header: Header): + """Extract an hash from a header.""" + return base64.urlsafe_b64encode( + abs(hash(header)).to_bytes(NBYTES, byteorder=ENDIANNESS) + ).decode(encoding="utf-8") + + +def header_name(header: Header): + """Determine header file name.""" + stem = encode(header) + return stem + HEADER_EXT + + +def operator_name(header: Header, err: bool): + """Determine operator file name, from the associated header.""" + stem = encode(header) + return stem + OPERATOR_EXT[1 if err else 0] + + +H = TypeVar("H", bound=Header) + + +@dataclass(frozen=True) +class Inventory(Generic[H]): + """Assets manager. + + In particular, manage autosave, autoload, and memory caching. + + """ + + path: Path + access: AccessConfigs + header_type: Type[Header] + cache: Dict[H, Optional[Operator]] = field(default_factory=dict) + contentless: bool = False + name: Optional[str] = None + """Only for logging purpose.""" + + def __str__(self) -> str: + return f"Inventory '{self.name}'" + + def lookup(self, stem: str, header: bool = False) -> Path: + """Look up for content path in inventory.""" + EXT = OPERATOR_EXT if not header else [HEADER_EXT] + found = [ + path + for path in self.path.iterdir() + if path.name.startswith(stem) + if "".join(path.suffixes) in EXT + ] + + if len(found) == 0: + raise LookupError(f"Item '{stem}' not available in {self}.") + elif len(found) > 1: + raise LookupError( + f"Too many items associated to '{stem}' in {self}:\n{found}" + ) + + return found[0] + + def __getitem__(self, header: H) -> Optional[Operator]: + r"""Retrieve operator for given header. + + If the operator is not already in memory, it will be automatically + loaded. + + """ + self.access.assert_open() + + try: + op = self.cache[header] + if op is not None or self.contentless: + return op + except KeyError: + pass + + stem = encode(header) + + # in case of contentless, check header availability instead + if self.contentless: + self.lookup(stem, header=True) + self.cache[header] = None + return None + + # for contentful inventories, check operator availability + oppath = self.lookup(stem) + + with open(oppath, "rb") as fd: + op = Operator.load(fd) + + self.cache[header] = op + return op + + def __setitem__(self, header: H, operator: Optional[Operator]): + """Set operator for given header. + + Header and operator are automatically dumped on disk. + + """ + self.access.assert_writeable() + + # always save the header on disk + headpath = self.path / header_name(header) + headpath.write_text(yaml.dump(asdict(header)), encoding="utf-8") + + # in case of contentless, set empty cache and exit + if self.contentless: + self.cache[header] = None + return + + # otherwise save also the operator, and add to the cache + assert operator is not None + + with_err = operator.error is not None + oppath = self.path / operator_name(header, err=with_err) + + with open(oppath, "wb") as fd: + operator.save(fd) + + self.cache[header] = operator + + def __delitem__(self, header: H): + """Drop operator from memory. + + Irrelevant for contentless inventories. + + Note + ---- + This method only drops the operator from memory, and it's not expected + to do anything else. + + Autosave is done on set, and explicit saves are performed by the + computation functions. + + If a further explicit save is required, repeat explicit assignment:: + + inventory[header] = inventory[header] + + This is only useful if the operator has been mutated in place, that in + general should be avoided, since the operator should only be the result + of a full computation or a library manipulation. + + """ + self.cache[header] = None + + def __iter__(self): + """Iterate over loaded content. + + This iteration is only over cache, so it might not be faithful with + respect to the real content on disk. + To iterate the full content of the disk, just call right before + :meth:`sync`. + + """ + yield from self.cache + + def __len__(self): + """Return the number of elements in the cache.""" + return len(self.cache) + + def sync(self): + """Sync the headers in the cache with the content on disk. + + In particular, headers on disk that are missing in the :attr:`cache` + are added to it, without loading actual operators in memory. + + Despite the name, the operation is non-destructive, so, even if cache + has been abused, nothing will be deleted nor unloaded. + + """ + for path in self.path.iterdir(): + if path.suffix != HEADER_EXT: + continue + + header = self.header_type( + **yaml.safe_load(path.read_text(encoding="utf-8")) + ) + self.cache[header] = None + + def __invert__(self): + """Alias for :meth:`sync`.""" + self.sync() + + def empty(self): + """Empty the in-memory cache.""" + for header in self.cache: + del self[header] diff --git a/src/eko/io/items.py b/src/eko/io/items.py new file mode 100644 index 000000000..1846602c9 --- /dev/null +++ b/src/eko/io/items.py @@ -0,0 +1,204 @@ +"""Inventory items definition.""" +import io +from dataclasses import asdict, dataclass +from typing import BinaryIO, Optional, Union + +import lz4.frame +import numpy as np +import numpy.typing as npt +from numpy.lib import npyio + +from .. import matchings +from . import exceptions +from .types import EvolutionPoint as EPoint +from .types import FlavorIndex, FlavorsNumber, SquaredScale + + +@dataclass(frozen=True) +class Header: + """Item header, containing metadata.""" + + +@dataclass(frozen=True) +class Evolution(Header): + """Information to compute an evolution operator. + + It describes the evolution with a fixed number of light flavors between two + scales. + + """ + + origin: SquaredScale + """Starting point.""" + target: SquaredScale + """Final point.""" + nf: FlavorsNumber + """Number of active flavors.""" + + cliff: bool = False + """Whether the operator is reaching a matching scale. + + Cliff operators are the only ones allowed to be intermediate, even though + they can also be final segments of an evolution path (see + :meth:`eko.matchings.Atlas.path`). + + Intermediate ones always have final scales :attr:`mu2` corresponding to + matching scales, and initial scales :attr:`mu20` corresponding to either + matching scales or the global initial scale of the |EKO|. + + Note + ---- + + The name of *cliff* operators stems from the following diagram:: + + nf = 3 -------------------------------------------------------- + | + nf = 4 -------------------------------------------------------- + | + nf = 5 -------------------------------------------------------- + | + nf = 6 -------------------------------------------------------- + + where each lane corresponds to |DGLAP| evolution with the relative number + of running flavors, and the vertical bridges are the perturbative matchings + between two different "adjacent" schemes. + + """ + + @classmethod + def from_atlas(cls, segment: matchings.Segment, cliff: bool = False): + """Create instance from analogous :class:`eko.matchings.Atlas` object.""" + return cls(**asdict(segment), cliff=cliff) + + @property + def as_atlas(self) -> matchings.Segment: + """The associated segment.""" + return matchings.Segment(self.origin, self.target, self.nf) + + +@dataclass(frozen=True) +class Matching(Header): + """Information to compute a matching operator. + + Describe the matching between two different flavor number schemes. + + """ + + scale: SquaredScale + hq: FlavorIndex + inverse: bool = False + + @classmethod + def from_atlas(cls, matching: matchings.Matching, inverse: bool = False): + """Create instance from analogous :class:`eko.matchings.Atlas` object.""" + return cls(**asdict(matching), inverse=inverse) + + @property + def as_atlas(self) -> matchings.Matching: + """The associated segment.""" + return matchings.Matching(self.scale, self.hq) + + +Recipe = Union[Evolution, Matching] + + +@dataclass(frozen=True) +class Target(Header): + """Target evolution point, labeling evolution from origin to there.""" + + scale: SquaredScale + nf: FlavorsNumber + + @classmethod + def from_ep(cls, ep: EPoint): + """Create instance from the :class:`EPoint` analogue.""" + return cls(*ep) + + @property + def ep(self) -> EPoint: + """Cast to :class:`EPoint`.""" + return (self.scale, self.nf) + + +@dataclass(frozen=True) +class Operator: + """Operator representation. + + To be used to hold the result of a computed 4-dim operator (either a raw + evolution operator or a matching condition). + + Note + ---- + IO works with streams in memory, in order to avoid intermediate write on + disk (keep read from and write to tar file only). + + """ + + operator: npt.NDArray + """Content of the evolution operator.""" + error: Optional[npt.NDArray] = None + """Errors on individual operator elements (mainly used for integration + error, but it can host any kind of error). + """ + + def save(self, stream: BinaryIO) -> bool: + """Save content of operator to bytes. + + The content is saved on a `stream`, in order to be able to perform the + operation both on disk and in memory. + + The returned value tells whether the operator saved contained or not + the error (this control even the format, ``npz`` with errors, ``npy`` + otherwise). + + """ + aux = io.BytesIO() + if self.error is None: + np.save(aux, self.operator) + else: + np.savez(aux, operator=self.operator, error=self.error) + aux.seek(0) + + # compress if requested + content = lz4.frame.compress(aux.read()) + + # write compressed data + stream.write(content) + stream.seek(0) + + # return the type of array dumped (i.e. 'npy' or 'npz') + return self.error is None + + @classmethod + def load(cls, stream: BinaryIO): + """Load operator from bytes. + + An input `stream` is used to load the operator from, in order to + support the operation both on disk and in memory. + + """ + extracted_stream = io.BytesIO(lz4.frame.decompress(stream.read())) + content = np.load(extracted_stream) + + if isinstance(content, np.ndarray): + op = content + err = None + elif isinstance(content, npyio.NpzFile): + op = content["operator"] + err = content["error"] + else: + # TODO: We might consider dropping this exception since np.load will always + # return a array (or fail on it's own) + raise exceptions.OperatorLoadingError( + "Not possible to load operator, content format not recognized" + ) + + return cls(operator=op, error=err) + + +@dataclass(frozen=True) +class Item: + """Inventory item.""" + + header: Header + content: Operator diff --git a/src/eko/io/legacy.py b/src/eko/io/legacy.py index 58899eae4..1df9941b1 100644 --- a/src/eko/io/legacy.py +++ b/src/eko/io/legacy.py @@ -6,17 +6,20 @@ import tarfile import tempfile import warnings +from typing import Dict, List import lz4.frame import numpy as np -import numpy.typing as npt import yaml from eko.interpolation import XGrid +from eko.io.runcards import flavored_mugrid +from eko.quantities.heavy_quarks import HeavyInfo, HeavyQuarkMasses, MatchingRatios from . import raw from .dictlike import DictLike from .struct import EKO, Operator +from .types import EvolutionPoint as EPoint from .types import RawCard @@ -55,7 +58,9 @@ def load_tar(source: os.PathLike, dest: os.PathLike, errors: bool = False): op5 = metaold.get("Q2grid") if op5 is None: op5 = metaold["mu2grid"] - grid = op5to4(op5, arrays) + grid = op5to4( + flavored_mugrid(op5, theory.heavy.masses, theory.heavy.matching_ratios), arrays + ) with EKO.create(dest) as builder: # here I'm plainly ignoring the static analyzer, the types are faking @@ -63,8 +68,8 @@ def load_tar(source: os.PathLike, dest: os.PathLike, errors: bool = False): # accept also these eko = builder.load_cards(theory, operator).build() # pylint: disable=E1101 - for mu2, op in grid.items(): - eko[mu2] = op + for ep, op in grid.items(): + eko[ep] = op eko.metadata.version = metaold.get("eko_version", "") eko.metadata.data_version = 0 @@ -80,12 +85,20 @@ class PseudoTheory(DictLike): """ - _void: str + heavy: HeavyInfo @classmethod def from_old(cls, old: RawCard): """Load from old metadata.""" - return cls(_void="") + heavy = HeavyInfo( + num_flavs_init=4, + num_flavs_max_pdf=None, + intrinsic_flavors=None, + masses=HeavyQuarkMasses([1.51, 4.92, 172.5]), + masses_scheme=None, + matching_ratios=MatchingRatios([1.0, 1.0, 1.0]), + ) + return cls(heavy=heavy) @dataclasses.dataclass @@ -98,7 +111,7 @@ class PseudoOperator(DictLike): """ mu20: float - mu2grid: npt.NDArray + evolgrid: List[EPoint] xgrid: XGrid configs: dict @@ -110,6 +123,9 @@ def from_old(cls, old: RawCard): if mu2list is None: mu2list = old["mu2grid"] mu2grid = np.array(mu2list) + evolgrid = flavored_mugrid( + np.sqrt(mu2grid).tolist(), [1.51, 4.92, 172.5], [1, 1, 1] + ) xgrid = XGrid(old["interpolation_xgrid"]) @@ -118,7 +134,7 @@ def from_old(cls, old: RawCard): interpolation_is_log=old.get("interpolation_is_log"), ) - return cls(mu20=mu20, mu2grid=mu2grid, xgrid=xgrid, configs=configs) + return cls(mu20=mu20, evolgrid=evolgrid, xgrid=xgrid, configs=configs) ARRAY_SUFFIX = ".npy.lz4" @@ -147,7 +163,7 @@ def load_arrays(dir: pathlib.Path) -> dict: """File name stem for operator errrors.""" -def op5to4(mu2grid: list, arrays: dict) -> dict: +def op5to4(evolgrid: List[EPoint], arrays: dict) -> Dict[EPoint, Operator]: """Load dictionary of 4-dim operators, from a single 5-dim one.""" def plural(name: str) -> list: @@ -169,7 +185,7 @@ def plural(name: str) -> list: err5 = [None] * len(op5) grid = {} - for mu2, op4, err4 in zip(mu2grid, op5, err5): - grid[mu2] = Operator(operator=op4, error=err4) + for ep, op4, err4 in zip(evolgrid, op5, err5): + grid[ep] = Operator(operator=op4, error=err4) return grid diff --git a/src/eko/io/manipulate.py b/src/eko/io/manipulate.py index 15173e800..e9cc89538 100644 --- a/src/eko/io/manipulate.py +++ b/src/eko/io/manipulate.py @@ -1,13 +1,15 @@ """Manipulate output generate by EKO.""" import logging import warnings -from typing import Optional +from typing import Callable, Optional, Union import numpy as np +import numpy.typing as npt from .. import basis_rotation as br from .. import interpolation -from .struct import EKO +from ..interpolation import XGrid +from .struct import EKO, Operator logger = logging.getLogger(__name__) @@ -16,24 +18,58 @@ SIMGRID_ROTATION = "ij,ajbk,kl->aibl" """Simultaneous grid rotation contraction indices.""" +Basis = Union[XGrid, npt.NDArray] + + +def rotation(new: Optional[Basis], old: Basis, check: Callable, compute: Callable): + """Define grid rotation. + + This function returns the new grid to be assigned and the rotation computed, + if the checks for a non-trivial new grid are passed. + + However, the check and the computation are delegated respectively to the + callables `check` and `compute`. + + """ + if new is None: + return old, None + + if check(new, old): + warnings.warn("The new grid is close to the current one") + return old, None + + return new, compute(new, old) + + +def xgrid_check(new: Optional[XGrid], old: XGrid): + """Check validity of new xgrid.""" + return new is not None and len(new) == len(old) and np.allclose(new.raw, old.raw) + + +def xgrid_compute_rotation(new: XGrid, old: XGrid, interpdeg: int, swap: bool = False): + """Compute rotation from old to new xgrid. + + By default, the roation is computed for a target xgrid. Whether the function + should be used for an input xgrid, the `swap` argument should be set to + `True`, in order to compute it in the other direction (i.e. the transposed). + + """ + if swap: + new, old = old, new + b = interpolation.InterpolatorDispatcher(old, interpdeg, False) + return b.get_interpolation(new.raw) + def xgrid_reshape( eko: EKO, - targetgrid: Optional[interpolation.XGrid] = None, - inputgrid: Optional[interpolation.XGrid] = None, + targetgrid: Optional[XGrid] = None, + inputgrid: Optional[XGrid] = None, ): """Reinterpolate operators on output and/or input grids. - The operation is in-place. + Target corresponds to the output PDF. - Parameters - ---------- - eko : - the operator to be rotated - targetgrid : - xgrid for the target (output PDF) - inputgrid : - xgrid for the input (input PDF) + The operation is in-place. """ eko.assert_permissions(write=True) @@ -41,77 +77,63 @@ def xgrid_reshape( # calling with no arguments is an error if targetgrid is None and inputgrid is None: raise ValueError("Nor inputgrid nor targetgrid was given") - # now check to the current status - if ( - targetgrid is not None - and len(targetgrid) == len(eko.rotations.targetgrid) - and np.allclose(targetgrid.raw, eko.rotations.targetgrid.raw) - ): - targetgrid = None - warnings.warn("The new targetgrid is close to the current targetgrid") - if ( - inputgrid is not None - and len(inputgrid) == len(eko.rotations.inputgrid) - and np.allclose(inputgrid.raw, eko.rotations.inputgrid.raw) - ): - inputgrid = None - warnings.warn("The new inputgrid is close to the current inputgrid") + + interpdeg = eko.operator_card.configs.interpolation_polynomial_degree + check = xgrid_check + crot = xgrid_compute_rotation + + # construct matrices + newtarget, targetrot = rotation( + targetgrid, + eko.bases.targetgrid, + check, + lambda new, old: crot(new, old, interpdeg), + ) + newinput, inputrot = rotation( + inputgrid, + eko.bases.inputgrid, + check, + lambda new, old: crot(new, old, interpdeg, swap=True), + ) + # after the checks: if there is still nothing to do, skip - if targetgrid is None and inputgrid is None: + if targetrot is None and inputrot is None: logger.debug("Nothing done.") return - - # construct matrices - if targetgrid is not None: - b = interpolation.InterpolatorDispatcher( - eko.rotations.targetgrid, - eko.operator_card.configs.interpolation_polynomial_degree, - False, - ) - target_rot = b.get_interpolation(targetgrid.raw) - eko.rotations.targetgrid = targetgrid - if inputgrid is not None: - b = interpolation.InterpolatorDispatcher( - inputgrid, - eko.operator_card.configs.interpolation_polynomial_degree, - False, - ) - input_rot = b.get_interpolation(eko.rotations.inputgrid.raw) - eko.rotations.inputgrid = inputgrid + # if no rotation is done, the grids are not modified + if targetrot is not None: + eko.bases.targetgrid = newtarget + if inputrot is not None: + eko.bases.inputgrid = newinput # build new grid - for q2, elem in eko.items(): - ops = elem.operator - errs = elem.error - if targetgrid is not None and inputgrid is None: - ops = np.einsum(TARGETGRID_ROTATION, target_rot, ops, optimize="optimal") - errs = ( - np.einsum(TARGETGRID_ROTATION, target_rot, errs, optimize="optimal") - if errs is not None - else None - ) - elif inputgrid is not None and targetgrid is None: - ops = np.einsum(INPUTGRID_ROTATION, ops, input_rot, optimize="optimal") - errs = ( - np.einsum(INPUTGRID_ROTATION, errs, input_rot, optimize="optimal") - if errs is not None - else None - ) + for ep, elem in eko.items(): + assert elem is not None + + operands = [elem.operator] + operands_errs = [elem.error] + + if targetrot is not None and inputrot is None: + contraction = TARGETGRID_ROTATION + elif inputrot is not None and targetrot is None: + contraction = INPUTGRID_ROTATION else: - ops = np.einsum( - SIMGRID_ROTATION, target_rot, ops, input_rot, optimize="optimal" - ) - errs = ( - np.einsum( - SIMGRID_ROTATION, target_rot, errs, input_rot, optimize="optimal" - ) - if errs is not None - else None - ) - elem.operator = ops - elem.error = errs + contraction = SIMGRID_ROTATION + + if targetrot is not None: + operands.insert(0, targetrot) + operands_errs.insert(0, targetrot) + if inputrot is not None: + operands.append(inputrot) + operands_errs.append(inputrot) + + new_operator = np.einsum(contraction, *operands, optimize="optimal") + if elem.error is not None: + new_error = np.einsum(contraction, *operands_errs, optimize="optimal") + else: + new_error = None - eko[q2] = elem + eko[ep] = Operator(operator=new_operator, error=new_error) eko.update() @@ -124,8 +146,8 @@ def xgrid_reshape( def flavor_reshape( eko: EKO, - targetpids: Optional[np.ndarray] = None, - inputpids: Optional[np.ndarray] = None, + targetpids: Optional[npt.NDArray] = None, + inputpids: Optional[npt.NDArray] = None, update: bool = True, ): """Change the operators to have in the output targetpids and/or in the input inputpids. @@ -151,12 +173,12 @@ def flavor_reshape( raise ValueError("Nor inputpids nor targetpids was given") # now check to the current status if targetpids is not None and np.allclose( - targetpids, np.eye(len(eko.rotations.targetpids)) + targetpids, np.eye(len(eko.bases.targetpids)) ): targetpids = None warnings.warn("The new targetpids is close to current basis") if inputpids is not None and np.allclose( - inputpids, np.eye(len(eko.rotations.inputpids)) + inputpids, np.eye(len(eko.bases.inputpids)) ): inputpids = None warnings.warn("The new inputpids is close to current basis") @@ -202,17 +224,15 @@ def flavor_reshape( if errs is not None else None ) - elem.operator = ops - elem.error = errs - eko[q2] = elem + eko[q2] = Operator(operator=ops, error=errs) # drop PIDs - keeping them int nevertheless # there is no meaningful way to set them in general, after rotation if inputpids is not None: - eko.rotations.inputpids = np.array([0] * len(eko.rotations.inputpids)) + eko.bases.inputpids = np.array([0] * len(eko.bases.inputpids)) if targetpids is not None: - eko.rotations.targetpids = np.array([0] * len(eko.rotations.targetpids)) + eko.bases.targetpids = np.array([0] * len(eko.bases.targetpids)) if update: eko.update() @@ -221,7 +241,7 @@ def flavor_reshape( def to_evol(eko: EKO, source: bool = True, target: bool = False): """Rotate the operator into evolution basis. - This also assigns also the pids. The operation is in-place. + This assigns also the pids. The operation is in-place. Parameters ---------- @@ -242,9 +262,9 @@ def to_evol(eko: EKO, source: bool = True, target: bool = False): flavor_reshape(eko, inputpids=inputpids, targetpids=targetpids, update=False) # assign pids if source: - eko.rotations.inputpids = inputpids + eko.bases.inputpids = inputpids if target: - eko.rotations.targetpids = targetpids + eko.bases.targetpids = targetpids eko.update() @@ -252,7 +272,7 @@ def to_evol(eko: EKO, source: bool = True, target: bool = False): def to_uni_evol(eko: EKO, source: bool = True, target: bool = False): """Rotate the operator into evolution basis. - This also assigns also the pids. The operation is in-place. + This assigns also the pids. The operation is in-place. Parameters ---------- @@ -273,8 +293,8 @@ def to_uni_evol(eko: EKO, source: bool = True, target: bool = False): flavor_reshape(eko, inputpids=inputpids, targetpids=targetpids, update=False) # assign pids if source: - eko.rotations.inputpids = inputpids + eko.bases.inputpids = inputpids if target: - eko.rotations.targetpids = targetpids + eko.bases.targetpids = targetpids eko.update() diff --git a/src/eko/io/metadata.py b/src/eko/io/metadata.py new file mode 100644 index 000000000..415b2dfa5 --- /dev/null +++ b/src/eko/io/metadata.py @@ -0,0 +1,103 @@ +"""Define `eko.EKO` metadata.""" +import logging +import os +import pathlib +from dataclasses import dataclass +from typing import Optional + +import yaml + +from .. import version as vmod +from .bases import Bases +from .dictlike import DictLike +from .paths import InternalPaths +from .types import EvolutionPoint as EPoint + +logger = logging.getLogger(__name__) + + +@dataclass +class Metadata(DictLike): + """Manage metadata, and keep them synced on disk. + + It is possible to have a metadata view, in which the path is not actually + connected (i.e. it is set to ``None``). In this case, no update will be + possible, of course. + + Note + ---- + Unfortunately, for nested structures it is not possible to detect a change + in their attributes, so a call to :meth:`update` has to be performed + manually. + + """ + + origin: EPoint + """Inital scale.""" + bases: Bases + """Manipulation information, describing the current status of the EKO (e.g. + `inputgrid` and `targetgrid`). + """ + # tagging information + _path: Optional[pathlib.Path] = None + """Path to temporary dir.""" + version: str = vmod.__version__ + """Library version used to create the corresponding file.""" + data_version: int = vmod.__data_version__ + """Specs version, to which the file adheres.""" + + @classmethod + def load(cls, path: os.PathLike): + """Load metadata from open folder. + + Parameters + ---------- + path: os.PathLike + the path to the open EKO folder + + Returns + ------- + bool + loaded metadata + + """ + path = pathlib.Path(path) + content = cls.from_dict( + yaml.safe_load(InternalPaths(path).metadata.read_text(encoding="utf-8")) + ) + content._path = path + return content + + def update(self): + """Update the disk copy of metadata.""" + if self._path is None: + logger.info("Impossible to set metadata, no file attached.") + else: + with open(InternalPaths(self._path).metadata, "w") as fd: + yaml.safe_dump(self.raw, fd) + + @property + def path(self): + """Access temporary dir path. + + Raises + ------ + RuntimeError + if path has not been initialized before + + """ + if self._path is None: + raise RuntimeError( + "Access to EKO directory attempted, but not dir has been set." + ) + return self._path + + @path.setter + def path(self, value: pathlib.Path): + """Set temporary dir path.""" + self._path = value + + @property + def raw(self): + """Override default :meth:`DictLike.raw` representation to exclude path.""" + return self.public_raw diff --git a/src/eko/io/paths.py b/src/eko/io/paths.py new file mode 100644 index 000000000..67100fdaa --- /dev/null +++ b/src/eko/io/paths.py @@ -0,0 +1,87 @@ +"""Define paths inside an `eko.EKO` object.""" +import pathlib +from dataclasses import dataclass + +import yaml + +THEORYFILE = "theory.yaml" +OPERATORFILE = "operator.yaml" +METADATAFILE = "metadata.yaml" +RECIPESDIR = "recipes" +PARTSDIR = "parts" +MATCHINGDIR = "matching" +OPERATORSDIR = "operators" + + +@dataclass +class InternalPaths: + """Paths inside an EKO folder. + + This structure exists to locate in a single place the internal structure of + an EKO folder. + + The only value required is the root path, everything else is computed + relative to this root. + In case only the relative paths are required, just create this structure + with :attr:`root` equal to emtpty string or ``"."``. + + """ + + root: pathlib.Path + "The root of the EKO folder (use placeholder if not relevant)" + + @property + def metadata(self): + """Metadata file.""" + return self.root / METADATAFILE + + @property + def recipes(self): + """Recipes folder.""" + return self.root / RECIPESDIR + + @property + def recipes_matching(self): + """Matching recipes folder.""" + return self.root / RECIPESDIR / MATCHINGDIR + + @property + def parts(self): + """Parts folder.""" + return self.root / PARTSDIR + + @property + def parts_matching(self): + """Matching parts folder.""" + return self.root / PARTSDIR / MATCHINGDIR + + @property + def operators(self): + """Operators folder. + + This is the one containing the actual EKO components, after + computation has been performed. + + """ + return self.root / OPERATORSDIR + + @property + def theory_card(self): + """Theory card dump.""" + return self.root / THEORYFILE + + @property + def operator_card(self): + """Operator card dump.""" + return self.root / OPERATORFILE + + def bootstrap(self, theory: dict, operator: dict, metadata: dict): + """Create directory structure.""" + self.metadata.write_text(yaml.dump(metadata), encoding="utf-8") + self.theory_card.write_text(yaml.dump(theory), encoding="utf-8") + self.operator_card.write_text(yaml.dump(operator), encoding="utf-8") + self.recipes.mkdir() + self.recipes_matching.mkdir() + self.parts.mkdir() + self.parts_matching.mkdir() + self.operators.mkdir() diff --git a/src/eko/io/runcards.py b/src/eko/io/runcards.py index 6c4bb726d..83c1dae95 100644 --- a/src/eko/io/runcards.py +++ b/src/eko/io/runcards.py @@ -5,35 +5,36 @@ Squares are consistenly taken inside. """ -from dataclasses import dataclass, fields +from dataclasses import dataclass from math import nan from typing import List, Optional, Union import numpy as np import numpy.typing as npt -from eko.thresholds import ThresholdsAtlas - from .. import basis_rotation as br from .. import interpolation, msbar_masses from .. import version as vmod from ..couplings import couplings_mod_ev +from ..matchings import Atlas, nf_default from ..quantities import heavy_quarks as hq from ..quantities.couplings import CouplingsInfo from ..quantities.heavy_quarks import HeavyInfo, QuarkMassScheme from .dictlike import DictLike +from .types import EvolutionMethod +from .types import EvolutionPoint as EPoint from .types import ( - EvolutionMethod, InversionMethod, N3LOAdVariation, Order, RawCard, ScaleVariationsMethod, + SquaredScale, T, - Target, ) +# TODO: add frozen @dataclass class TheoryCard(DictLike): """Represent theory card content.""" @@ -50,23 +51,6 @@ class TheoryCard(DictLike): """|N3LO| anomalous dimension variation: ``(gg_var, gq_var, qg_var, qq_var)``.""" -def masses(theory: TheoryCard, evmeth: EvolutionMethod): - """Compute masses in the chosen scheme.""" - if theory.heavy.masses_scheme is QuarkMassScheme.MSBAR: - return msbar_masses.compute( - theory.heavy.masses, - theory.couplings, - theory.order, - couplings_mod_ev(evmeth), - np.power(theory.heavy.matching_ratios, 2.0), - xif2=theory.xif**2, - ).tolist() - if theory.heavy.masses_scheme is QuarkMassScheme.POLE: - return [mq.value**2 for mq in theory.heavy.masses] - - raise ValueError(f"Unknown mass scheme '{theory.heavy.masses_scheme}'") - - @dataclass class Debug(DictLike): """Debug configurations.""" @@ -107,120 +91,13 @@ class Configs(DictLike): """Number of cores used to parallelize integration.""" -@dataclass -class Rotations(DictLike): - """Rotations related configurations. - - Here "Rotation" is intended in a broad sense: it includes both rotations in - flavor space (labeled with suffix `pids`) and in :math:`x`-space (labeled - with suffix `grid`). - Rotations in :math:`x`-space correspond to reinterpolate the result on a - different basis of polynomials. - - """ - - xgrid: interpolation.XGrid - """Internal momentum fraction grid.""" - _targetgrid: Optional[interpolation.XGrid] = None - _inputgrid: Optional[interpolation.XGrid] = None - _targetpids: Optional[npt.NDArray] = None - _inputpids: Optional[npt.NDArray] = None - - def __post_init__(self): - """Adjust types when loaded from serialized object.""" - for attr in ("xgrid", "_inputgrid", "_targetgrid"): - value = getattr(self, attr) - if value is None: - continue - if isinstance(value, (np.ndarray, list)): - setattr(self, attr, interpolation.XGrid(value)) - elif not isinstance(value, interpolation.XGrid): - setattr(self, attr, interpolation.XGrid.load(value)) - - @property - def pids(self): - """Internal flavor basis, used for computation.""" - return np.array(br.flavor_basis_pids) - - @property - def inputpids(self) -> npt.NDArray: - """Provide pids expected on the input PDF.""" - if self._inputpids is None: - return self.pids - return self._inputpids - - @inputpids.setter - def inputpids(self, value): - self._inputpids = value - - @property - def targetpids(self) -> npt.NDArray: - """Provide pids corresponding to the output PDF.""" - if self._targetpids is None: - return self.pids - return self._targetpids - - @targetpids.setter - def targetpids(self, value): - self._targetpids = value - - @property - def inputgrid(self) -> interpolation.XGrid: - """Provide :math:`x`-grid expected on the input PDF.""" - if self._inputgrid is None: - return self.xgrid - return self._inputgrid - - @inputgrid.setter - def inputgrid(self, value: interpolation.XGrid): - self._inputgrid = value - - @property - def targetgrid(self) -> interpolation.XGrid: - """Provide :math:`x`-grid corresponding to the output PDF.""" - if self._targetgrid is None: - return self.xgrid - return self._targetgrid - - @targetgrid.setter - def targetgrid(self, value: interpolation.XGrid): - self._targetgrid = value - - @classmethod - def from_dict(cls, dictionary: dict): - """Deserialize rotation. - - Load from full state, but with public names. - - """ - d = dictionary.copy() - for f in fields(cls): - if f.name.startswith("_"): - d[f.name] = d.pop(f.name[1:]) - return cls._from_dict(d) - - @property - def raw(self): - """Serialize rotation. - - Pass through interfaces, access internal values but with a public name. - - """ - d = self._raw() - for key in d.copy(): - if key.startswith("_"): - d[key[1:]] = d.pop(key) - - return d - - @dataclass class OperatorCard(DictLike): """Operator Card info.""" mu0: float """Initial scale.""" - mugrid: List[Target] + mugrid: List[EPoint] xgrid: interpolation.XGrid """Momentum fraction internal grid.""" @@ -245,6 +122,11 @@ def mu2grid(self) -> npt.NDArray: """Grid of squared final scales.""" return np.array([mu for mu, _ in self.mugrid]) ** 2 + @property + def evolgrid(self) -> List[EPoint]: + """Grid of squared final scales.""" + return [(mu**2, nf) for mu, nf in self.mugrid] + @property def pids(self): """Internal flavor basis, used for computation.""" @@ -291,6 +173,8 @@ def new_theory(self): new["order"] = [old["PTO"] + 1, old["QED"]] alphaem = self.fallback(old.get("alphaqed"), old.get("alphaem"), default=0.0) + ms = self.heavies("m%s", old) + ks = self.heavies("k%sThr", old) new["couplings"] = dict( alphas=old["alphas"], alphaem=alphaem, @@ -299,7 +183,9 @@ def new_theory(self): max_num_flavs=old["MaxNfAs"], ) new["heavy"] = { - "num_flavs_init": old["nf0"], + "num_flavs_init": nf_default(old["Qref"] ** 2.0, default_atlas(ms, ks)) + if old["nf0"] is None + else old["nf0"], "num_flavs_max_pdf": old["MaxNfPdf"], "matching_ratios": self.heavies("k%sThr", old), "masses_scheme": old["HQ"], @@ -309,11 +195,10 @@ def new_theory(self): if old.get(f"i{q}".upper()) == 1: intrinsic.append(idx + 4) new["heavy"]["intrinsic_flavors"] = intrinsic - ms = self.heavies("m%s", old) - mus = self.heavies("Qm%s", old) if old["HQ"] == "POLE": new["heavy"]["masses"] = [[m, nan] for m in ms] elif old["HQ"] == "MSBAR": + mus = self.heavies("Qm%s", old) new["heavy"]["masses"] = [[m, mu] for m, mu in zip(ms, mus)] else: raise ValueError(f"Unknown mass scheme '{old['HQ']}'") @@ -394,6 +279,21 @@ def update(theory: Union[RawCard, TheoryCard], operator: Union[RawCard, Operator return cards.new_theory, cards.new_operator +def default_atlas(masses: list, matching_ratios: list): + r"""Create default landscape. + + This method should not be used to write new runcards, but rather to have a + consistent default for comparison with other softwares and existing PDF + sets. + There is no one-to-one relation between number of running flavors and final + scales, unless matchings are all applied. But this is a custom choice, + since it is possible to have PDFs in different |FNS| at the same scales. + + """ + matchings = (np.array(masses) * np.array(matching_ratios)) ** 2 + return Atlas(matchings.tolist(), (0.0, 0)) + + def flavored_mugrid(mugrid: list, masses: list, matching_ratios: list): r"""Upgrade :math:`\mu^2` grid to contain also target number flavors. @@ -408,8 +308,23 @@ def flavored_mugrid(mugrid: list, masses: list, matching_ratios: list): since it is possible to have PDFs in different |FNS| at the same scales. """ - tc = ThresholdsAtlas( - masses=(np.array(masses) ** 2).tolist(), - thresholds_ratios=(np.array(matching_ratios) ** 2).tolist(), - ) - return [(mu, int(tc.nf(mu**2))) for mu in mugrid] + atlas = default_atlas(masses, matching_ratios) + return [(mu, nf_default(mu**2, atlas)) for mu in mugrid] + + +# TODO: move to a more suitable place +def masses(theory: TheoryCard, evmeth: EvolutionMethod) -> List[SquaredScale]: + """Compute masses in the chosen scheme.""" + if theory.heavy.masses_scheme is QuarkMassScheme.MSBAR: + return msbar_masses.compute( + theory.heavy.masses, + theory.couplings, + theory.order, + couplings_mod_ev(evmeth), + np.power(theory.heavy.matching_ratios, 2.0).tolist(), + xif2=theory.xif**2, + ).tolist() + if theory.heavy.masses_scheme is QuarkMassScheme.POLE: + return [mq.value**2 for mq in theory.heavy.masses] + + raise ValueError(f"Unknown mass scheme '{theory.heavy.masses_scheme}'") diff --git a/src/eko/io/struct.py b/src/eko/io/struct.py index 956fd3fc5..fecba3c57 100644 --- a/src/eko/io/struct.py +++ b/src/eko/io/struct.py @@ -1,8 +1,6 @@ """Define output representation structures.""" -import base64 import contextlib import copy -import io import logging import os import pathlib @@ -10,471 +8,48 @@ import tarfile import tempfile from dataclasses import dataclass -from typing import BinaryIO, Dict, Optional +from typing import List, Optional -import lz4.frame import numpy as np -import numpy.lib.npyio as npyio -import numpy.typing as npt import yaml from .. import interpolation -from .. import version as vmod from . import exceptions, raw -from .dictlike import DictLike -from .runcards import OperatorCard, Rotations, TheoryCard +from .access import AccessConfigs +from .bases import Bases +from .inventory import Inventory +from .items import Evolution, Matching, Operator, Recipe, Target +from .metadata import Metadata +from .paths import InternalPaths +from .runcards import OperatorCard, TheoryCard +from .types import EvolutionPoint as EPoint +from .types import SquaredScale logger = logging.getLogger(__name__) -THEORYFILE = "theory.yaml" -OPERATORFILE = "operator.yaml" -METADATAFILE = "metadata.yaml" -RECIPESDIR = "recipes" -PARTSDIR = "parts" -OPERATORSDIR = "operators" - -COMPRESSED_SUFFIX = ".lz4" - - -@dataclass -class Operator(DictLike): - """Operator representation. - - To be used to hold the result of a computed 4-dim operator (from a given - scale to another given one). - - Note - ---- - IO works with streams in memory, in order to avoid intermediate write on - disk (keep read from and write to tar file only). - - """ - - operator: npt.NDArray - """Content of the evolution operator.""" - error: Optional[npt.NDArray] = None - """Errors on individual operator elements (mainly used for integration - error, but it can host any kind of error). - """ - - def save(self, stream: BinaryIO, compress: bool = True) -> bool: - """Save content of operator to bytes. - - Parameters - ---------- - stream : BinaryIO - a stream where to save the operator content (in order to be able to - perform the operation both on disk and in memory) - compress : bool - flag to control optional compression (default: `True`) - - Returns - ------- - bool - whether the operator saved contained or not the error (this control - even the format, ``npz`` with errors, ``npy`` otherwise) - - """ - aux = io.BytesIO() - if self.error is None: - np.save(aux, self.operator) - else: - np.savez(aux, operator=self.operator, error=self.error) - aux.seek(0) - - # compress if requested - if compress: - content = lz4.frame.compress(aux.read()) - else: - content = aux.read() - - # write compressed data - stream.write(content) - stream.seek(0) - - # return the type of array dumped (i.e. 'npy' or 'npz') - return self.error is None - - @classmethod - def load(cls, stream: BinaryIO, compressed: bool = True): - """Load operator from bytes. - - Parameters - ---------- - stream : BinaryIO - a stream to load the operator from (to support the operation both on - disk and in memory) - compressed : bool - declare whether the stream is or is not compressed (default: `True`) - - Returns - ------- - Operator - the loaded instance - - """ - if compressed: - stream = io.BytesIO(lz4.frame.decompress(stream.read())) - content = np.load(stream) - - if isinstance(content, np.ndarray): - op = content - err = None - elif isinstance(content, npyio.NpzFile): - op = content["operator"] - err = content["error"] - else: - raise exceptions.OperatorLoadingError( - "Not possible to load operator, content format not recognized" - ) - - return cls(operator=op, error=err) - - -@dataclass -class InternalPaths: - """Paths inside an EKO folder. - - This structure exists to locate in a single place the internal structure of - an EKO folder. - - The only value required is the root path, everything else is computed - relative to this root. - In case only the relative paths are required, just create this structure - with :attr:`root` equal to emtpty string or ``"."``. - - """ - - root: pathlib.Path - "The root of the EKO folder (use placeholder if not relevant)" - - @property - def metadata(self): - """Metadata file.""" - return self.root / METADATAFILE - - @property - def recipes(self): - """Recipes folder.""" - return self.root / RECIPESDIR - - @property - def parts(self): - """Parts folder.""" - return self.root / PARTSDIR - - @property - def operators(self): - """Operators folder. - - This is the one containing the actual EKO components, after - computation has been performed. - - """ - return self.root / OPERATORSDIR - - @property - def theory_card(self): - """Theory card dump.""" - return self.root / THEORYFILE - - @property - def operator_card(self): - """Operator card dump.""" - return self.root / OPERATORFILE - - def bootstrap(self, theory: dict, operator: dict, metadata: dict): - """Create directory structure. - - Parameters - ---------- - dirpath : os.PathLike - path to create the directory into - theory : dict - theory card to be dumped - operator : dict - operator card to be dumped - metadata : dict - metadata of the operator - - """ - self.metadata.write_text(yaml.dump(metadata), encoding="utf-8") - self.theory_card.write_text(yaml.dump(theory), encoding="utf-8") - self.operator_card.write_text(yaml.dump(operator), encoding="utf-8") - self.recipes.mkdir() - self.parts.mkdir() - self.operators.mkdir() - - @staticmethod - def opname(mu2: float) -> str: - r"""Operator file name from :math:`\mu^2` value.""" - decoded = np.float64(mu2).tobytes() - return base64.urlsafe_b64encode(decoded).decode() - - def oppath(self, mu2: float) -> pathlib.Path: - r"""Retrieve operator file path from :math:`\mu^2` value. - - This method looks for an existing path matching. - - Parameters - ---------- - mu2: float - :math:`\mu2` scale specified - - Returns - ------- - pathlib.Path - the path retrieved, guaranteed to exist - - Raises - ------ - ValueError - if the path is not found, or more than one are matching the - specified value of :math:`\mu2` - - """ - name = self.opname(mu2) - oppaths = list( - filter(lambda path: path.name.startswith(name), self.operators.iterdir()) - ) - - if len(oppaths) == 0: - raise ValueError(f"mu2 value '{mu2}' not available.") - elif len(oppaths) > 1: - raise ValueError(f"Too many operators associated to '{mu2}':\n{oppaths}") - - return oppaths[0] - - def opcompressed(self, path: os.PathLike) -> bool: - """Check if the operator at the path specified is compressed. - - Parameters - ---------- - path: os.PathLike - the path to the operator to check - - Returns - ------- - bool - whether it is compressed - - Raises - ------ - OperatorLocationError - if the path is not inside the operators folder - - """ - path = pathlib.Path(path) - if self.operators not in path.parents: - raise exceptions.OperatorLocationError(path) - - return path.suffix == COMPRESSED_SUFFIX - - def opmu2(self, path: os.PathLike) -> float: - r"""Extract :math:`\mu2` value from operator path. - - Parameters - ---------- - path: os.PathLike - the path to the operator - - Returns - ------- - bool - the :math:`\mu2` value associated to the operator - - Raises - ------ - OperatorLocationError - if the path is not inside the operators folder - - """ - path = pathlib.Path(path) - if self.operators not in path.parents: - raise exceptions.OperatorLocationError(path) - - encoded = path.stem.split(".")[0] - decoded = base64.urlsafe_b64decode(encoded) - return float(np.frombuffer(decoded, dtype=np.float64)[0]) - - def opnewpath( - self, mu2: float, compress: bool = True, without_err: bool = True - ) -> pathlib.Path: - r"""Compute the path associated to :math:`\mu^2` value. - - Parameters - ---------- - mu2: float - :math:`\mu2` scale specified - - Returns - ------- - pathlib.Path - the path computed, it might already exists - - """ - suffix = ".npy" if without_err else ".npz" - if compress: - suffix += COMPRESSED_SUFFIX - return self.operators / (self.opname(mu2) + suffix) - - @property - def mu2grid(self) -> npt.NDArray[np.float64]: - r"""Provide the array of :math:`\mu2` values of existing operators.""" - if self.root is None: - raise RuntimeError() - - return np.array( - sorted( - map( - lambda p: self.opmu2(p), - InternalPaths(self.root).operators.iterdir(), - ) - ) - ) - - -@dataclass -class AccessConfigs: - """Configurations specified during opening of an EKO.""" - - path: pathlib.Path - """The path to the permanent object.""" - readonly: bool - "Read-only flag" - open: bool - "EKO status" - - def assert_open(self): - """Assert operator is open. - - Raises - ------ - exceptions.ClosedOperator - if operator is closed - - """ - if not self.open: - raise exceptions.ClosedOperator - - def assert_writeable(self, msg: Optional[str] = None): - """Assert operator is writeable. - - Raises - ------ - exceptions.ClosedOperator - see :meth:`assert_open` - exceptions.ReadOnlyOperator - if operators has been declared read-only - - """ - if msg is None: - msg = "" - - self.assert_open() - if self.readonly: - raise exceptions.ReadOnlyOperator(msg) - - @property - def read(self): - """Check reading permission. - - Reading access is always granted on open operator. - - """ - return self.open - - @property - def write(self): - """Check writing permission.""" - return self.open and not self.readonly - - -@dataclass -class Metadata(DictLike): - """Manage metadata, and keep them synced on disk. - - It is possible to have a metadata view, in which the path is not actually - connected (i.e. it is set to ``None``). In this case, no update will be - possible, of course. - - Note - ---- - Unfortunately, for nested structures it is not possible to detect a change - in their attributes, so a call to :meth:`update` has to be performed - manually. - - """ - - mu20: float - """Inital scale.""" - rotations: Rotations - """Manipulation information, describing the current status of the EKO (e.g. - `inputgrid` and `targetgrid`). - """ - # tagging information - _path: Optional[pathlib.Path] = None - """Path to temporary dir.""" - version: str = vmod.__version__ - """Library version used to create the corresponding file.""" - data_version: int = vmod.__data_version__ - """Specs version, to which the file adheres.""" - - @classmethod - def load(cls, path: os.PathLike): - """Load metadata from open folder. - - Parameters - ---------- - path: os.PathLike - the path to the open EKO folder - - Returns - ------- - bool - loaded metadata - - """ - path = pathlib.Path(path) - content = cls.from_dict( - yaml.safe_load(InternalPaths(path).metadata.read_text(encoding="utf-8")) - ) - content._path = path - return content - - def update(self): - """Update the disk copy of metadata.""" - if self._path is None: - logger.info("Impossible to set metadata, no file attached.") - else: - with open(InternalPaths(self._path).metadata, "w") as fd: - yaml.safe_dump(self.raw, fd) - - @property - def path(self): - """Access temporary dir path. - - Raises - ------ - RuntimeError - if path has not been initialized before - - """ - if self._path is None: - raise RuntimeError( - "Access to EKO directory attempted, but not dir has been set." - ) - return self._path - - @path.setter - def path(self, value: pathlib.Path): - """Set temporary dir path.""" - self._path = value - - @property - def raw(self): - """Override default :meth:`DictLike.raw` representation to exclude path.""" - return self.public_raw +TEMP_PREFIX = "eko-" + + +def inventories(path: pathlib.Path, access: AccessConfigs) -> dict: + """Set up empty inventories for object initialization.""" + paths = InternalPaths(path) + return dict( + recipes=Inventory( + paths.recipes, access, Evolution, contentless=True, name="recipes" + ), + recipes_matching=Inventory( + paths.recipes_matching, + access, + Matching, + contentless=True, + name="matching-recipes", + ), + parts=Inventory(paths.parts, access, Evolution, name="parts"), + parts_matching=Inventory( + paths.parts_matching, access, Matching, name="matching-parts" + ), + operators=Inventory(paths.operators, access, Target, name="operators"), + ) @dataclass @@ -510,8 +85,11 @@ class EKO: """ - # operators cache, contains the Q2 grid information - _operators: Dict[float, Optional[Operator]] + recipes: Inventory[Evolution] + recipes_matching: Inventory[Matching] + parts: Inventory[Evolution] + parts_matching: Inventory[Matching] + operators: Inventory[Target] # public containers # ----------------- @@ -528,30 +106,35 @@ def paths(self) -> InternalPaths: return InternalPaths(self.metadata.path) @property - def rotations(self) -> Rotations: - """Rotations information.""" - return self.metadata.rotations + def bases(self) -> Bases: + """Bases information.""" + return self.metadata.bases @property def xgrid(self) -> interpolation.XGrid: """Momentum fraction internal grid.""" - return self.rotations.xgrid + return self.bases.xgrid @xgrid.setter def xgrid(self, value: interpolation.XGrid): """Set `xgrid` value.""" - self.rotations.xgrid = value + self.bases.xgrid = value self.update() @property - def mu20(self) -> float: + def mu20(self) -> SquaredScale: """Provide squared initial scale.""" - return self.metadata.mu20 + return self.metadata.origin[0] @property - def mu2grid(self) -> npt.NDArray: + def mu2grid(self) -> List[SquaredScale]: """Provide the list of :math:`Q^2` as an array.""" - return np.array(list(self._operators)) + return [ep.scale for ep in self.operators] + + @property + def evolgrid(self) -> List[EPoint]: + """Provide the list of evolution points as an array.""" + return list(self) @property def theory_card(self): @@ -587,126 +170,58 @@ def permissions(self): """Provide permissions information.""" return dict(read=self.access.read, write=self.access.write) - # operator management + # recipes management # ------------------- - def __getitem__(self, mu2: float) -> Operator: - r"""Retrieve operator for given :math:`\mu^2`. - - If the operator is not already in memory, it will be automatically - loaded. - - Parameters - ---------- - mu2 : float - :math:`\mu^2` value labeling the operator to be retrieved - - Returns - ------- - Operator - the retrieved operator - - """ - self.access.assert_open() - - if mu2 in self._operators: - op = self._operators[mu2] - if op is not None: - return op - - oppath = self.paths.oppath(mu2) - compressed = self.paths.opcompressed(oppath) - - with open(oppath, "rb") as fd: - op = Operator.load(fd, compressed=compressed) - - self._operators[mu2] = op - return op - - def __setitem__(self, mu2: float, op: Operator, compress: bool = True): - """Set operator for given :math:`Q^2`. - - The operator is automatically dumped on disk. - - Parameters - ---------- - q2 : float - :math:`Q^2` value labeling the operator to be set - op : Operator - the retrieved operator - compress : bool - whether to save the operator compressed or not (default: `True`) - - """ - self.access.assert_writeable() - - if not isinstance(op, Operator): - raise ValueError("Only an Operator can be added to an EKO") - - without_err = op.error is None - oppath = self.paths.opnewpath(mu2, compress=compress, without_err=without_err) - with open(oppath, "wb") as fd: - without_err2 = op.save(fd, compress) - assert without_err == without_err2 + def load_recipes(self, recipes: List[Recipe]): + """Load recipes in bulk.""" + for recipe in recipes: + # leverage auto-save + if isinstance(recipe, Evolution): + self.recipes[recipe] = None + else: + self.recipes_matching[recipe] = None - self._operators[mu2] = op - - def __delitem__(self, q2: float): - """Drop operator from memory. - - This method only drops the operator from memory, and it's not expected - to do anything else. - - Autosave is done on set, and explicit saves are performed by the - computation functions. - - If a further explicit save is required, repeat explicit assignment:: - - eko[q2] = eko[q2] - - This is only useful if the operator has been mutated in place, that in - general should be avoided, since the operator should only be the result - of a full computation or a library manipulation. - - - Parameters - ---------- - q2 : float - the value of :math:`Q^2` for which the corresponding operator - should be dropped - - """ - self._operators[q2] = None + # operator management + # ------------------- + def __getitem__(self, ep: EPoint) -> Optional[Operator]: + r"""Retrieve operator for given evolution point.""" + return self.operators[Target.from_ep(ep)] + + def __setitem__(self, ep: EPoint, op: Operator): + """Set operator associated to an evolution point.""" + self.operators[Target.from_ep(ep)] = op + + def __delitem__(self, ep: EPoint): + """Drop operator from memory.""" + del self.operators[Target.from_ep(ep)] + + def __delattr__(self, name: str): + """Empty an inventory cache.""" + attr = getattr(self, name) + if isinstance(attr, Inventory): + attr.empty() + else: + super().__delattr__(name) @contextlib.contextmanager - def operator(self, q2: float): + def operator(self, ep: EPoint): """Retrieve an operator and discard it afterwards. To be used as a contextmanager: the operator is automatically loaded as usual, but on the closing of the context manager it is dropped from memory. - Parameters - ---------- - q2 : float - :math:`Q^2` value labeling the operator to be retrieved - """ try: - yield self[q2] + yield self[ep] finally: - del self[q2] + del self[ep] def __iter__(self): - """Iterate over keys (i.e. Q2 values). - - Yields - ------ - float - q2 values - - """ - yield from self._operators + """Iterate over keys (i.e. evolution points).""" + for target in self.operators: + yield target.ep def items(self): """Iterate operators, with minimal load. @@ -725,43 +240,34 @@ def items(self): immediately after """ - for q2 in self.mu2grid: - yield q2, self[q2] - del self[q2] + for target in self.operators: + # recast to evolution point + ep = target.ep - def __contains__(self, q2: float) -> bool: - """Check whether :math:`Q^2` operators are present. + # auto-load + op = self[ep] + assert op is not None + yield ep, op + # auto-unload + del self[ep] - 'Present' means, in this case, they are conceptually part of the - :class:`EKO`. But it is telling nothing about being loaded in memory or - not. + def __contains__(self, ep: EPoint) -> bool: + """Check whether the operator related to the evolution point is present. - Returns - ------- - bool - the result of checked condition + 'Present' means, in this case, they are available in the :class:`EKO`. + But it is telling nothing about being loaded in memory or not. """ - return q2 in self._operators + return Target.from_ep(ep) in self.operators def approx( - self, q2: float, rtol: float = 1e-6, atol: float = 1e-10 - ) -> Optional[float]: - """Look for close enough :math:`Q^2` value in the :class:`EKO`. + self, ep: EPoint, rtol: float = 1e-6, atol: float = 1e-10 + ) -> Optional[EPoint]: + r"""Look for close enough evolution point in the :class:`EKO`. - Parameters - ---------- - q2 : float - value of :math:`Q2` in which neighbourhood to look - rtol : float - relative tolerance - atol : float - absolute tolerance - - Returns - ------- - float or None - retrieved value of :math:`Q^2`, if a single one is found + The distance is mostly evaluated along the :math:`\mu^2` dimension, + while :math:`n_f` is considered with a discrete distance: if two points + have not the same, they are classified as far. Raises ------ @@ -769,19 +275,20 @@ def approx( if multiple values are find in the neighbourhood """ - q2s = self.mu2grid - close = q2s[np.isclose(q2, q2s, rtol=rtol, atol=atol)] + eps = np.array([ep_ for ep_ in self if ep_[1] == ep[1]]) + mu2s = np.array([mu2 for mu2, _ in eps]) + close = eps[np.isclose(ep[0], mu2s, rtol=rtol, atol=atol)] - if close.size == 1: - return close[0] - if close.size == 0: + if len(close) == 1: + return tuple(close[0]) + if len(close) == 0: return None - raise ValueError(f"Multiple values of Q2 have been found close to {q2}") + raise ValueError(f"Multiple values of Q2 have been found close to {ep}") def unload(self): """Fully unload the operators in memory.""" - for q2 in self: - del self[q2] + for ep in self: + del self[ep] # operator management # ------------------- @@ -824,7 +331,7 @@ def deepcopy(self, path: os.PathLike): new.access.readonly = False new.access.open = True - tmpdir = pathlib.Path(tempfile.mkdtemp()) + tmpdir = pathlib.Path(tempfile.mkdtemp(prefix=TEMP_PREFIX)) new.metadata.path = tmpdir # copy old dir to new dir tmpdir.rmdir() @@ -863,17 +370,18 @@ def open(cls, path: os.PathLike, mode="r"): elif mode in "a": load = True else: - raise ValueError + raise ValueError(f"Unknown file mode: {mode}") - tmpdir = pathlib.Path(tempfile.mkdtemp()) + tmpdir = pathlib.Path(tempfile.mkdtemp(prefix=TEMP_PREFIX)) if load: cls.load(path, tmpdir) metadata = Metadata.load(tmpdir) opened = cls( - _operators={mu2: None for mu2 in InternalPaths(metadata.path).mu2grid}, + **inventories(tmpdir, access), metadata=metadata, access=access, ) + opened.operators.sync() else: opened = Builder(path=tmpdir, access=access) @@ -979,7 +487,7 @@ def raw(self) -> dict: operators themselves """ - return dict(mu2grid=self.mu2grid.tolist(), metadata=self.metadata.raw) + return dict(mu2grid=self.mu2grid, metadata=self.metadata.raw) @dataclass @@ -1042,8 +550,8 @@ def build(self) -> EKO: self.access.open = True metadata = Metadata( _path=self.path, - mu20=self.operator.mu20, - rotations=Rotations(xgrid=self.operator.xgrid), + origin=(self.operator.mu20, self.theory.heavy.num_flavs_init), + bases=Bases(xgrid=self.operator.xgrid), ) InternalPaths(self.path).bootstrap( theory=self.theory.raw, @@ -1052,7 +560,7 @@ def build(self) -> EKO: ) self.eko = EKO( - _operators={mu2: None for mu2 in self.operator.mu2grid}, + **inventories(self.path, self.access), metadata=metadata, access=self.access, ) diff --git a/src/eko/io/types.py b/src/eko/io/types.py index 4d260e1c4..5573bafbb 100644 --- a/src/eko/io/types.py +++ b/src/eko/io/types.py @@ -25,10 +25,10 @@ IntrinsicFlavors = typing.List[FlavorIndex] N3LOAdVariation = typing.Tuple[int, int, int, int] -# Targets -# ------- +# Evolution coordinates +# --------------------- -Target = Tuple[LinearScale, FlavorsNumber] +EvolutionPoint = Tuple[Scale, FlavorsNumber] # Scale functions diff --git a/src/eko/kernels/singlet_qed.py b/src/eko/kernels/singlet_qed.py index e85ad5112..d6e57c537 100644 --- a/src/eko/kernels/singlet_qed.py +++ b/src/eko/kernels/singlet_qed.py @@ -5,7 +5,6 @@ from ekore import anomalous_dimensions as ad from .. import beta -from . import utils @nb.njit(cache=True) diff --git a/src/eko/matchings.py b/src/eko/matchings.py new file mode 100644 index 000000000..20d552726 --- /dev/null +++ b/src/eko/matchings.py @@ -0,0 +1,209 @@ +r"""Holds the classes that define the |FNS|.""" +import logging +from dataclasses import dataclass +from typing import List, Union + +import numpy as np + +from .io.types import EvolutionPoint as EPoint +from .io.types import FlavorIndex, FlavorsNumber, SquaredScale +from .quantities.heavy_quarks import MatchingScales + +logger = logging.getLogger(__name__) + + +@dataclass(frozen=True) +class Segment: + """Oriented path in the threshold landscape.""" + + origin: SquaredScale + """Starting point.""" + target: SquaredScale + """Final point.""" + nf: FlavorsNumber + """Number of active flavors.""" + + @property + def is_downward(self) -> bool: + """Return True if ``origin`` is bigger than ``target``.""" + return self.origin > self.target + + def __str__(self): + """Textual representation, mainly for logging purpose.""" + return f"Segment({self.origin} -> {self.target}, nf={self.nf})" + + +Path = List[Segment] + + +@dataclass(frozen=True) +class Matching: + """Matching between two different segments. + + The meaning of the flavor index `hq` is the PID of the corresponding heavy + quark. + + """ + + scale: SquaredScale + hq: FlavorIndex + + +MatchedPath = List[Union[Segment, Matching]] + + +class Atlas: + r"""Holds information about the matching scales. + + These scales are the :math:`Q^2` has to pass in order to get there from a + given :math:`Q^2_{ref}`. + + """ + + def __init__(self, matching_scales: MatchingScales, origin: EPoint): + """Create basic atlas.""" + self.walls = [0] + matching_scales + [np.inf] + self.origin = self.normalize(origin) + + logger.info(str(self)) + + def normalize(self, target: EPoint) -> EPoint: + """Fill number of flavors if needed.""" + if target[1] is not None: + return target + return (target[0], nf_default(target[0], self)) + + def __str__(self): + """Textual representation, mainly for logging purpose.""" + walls = " - ".join([f"{w:.2e}" for w in self.walls]) + return f"Atlas [{walls}], ref={self.origin[0]} @ {self.origin[1]}" + + @classmethod + def ffns(cls, nf: int, mu2: SquaredScale): + """Create a |FFNS| setup. + + The function creates simply sufficient thresholds at ``0`` (in the + beginning), since the number of flavors is determined by counting + from below. + + The origin is set with that number of flavors. + + """ + matching_scales = MatchingScales([0] * (nf - 3) + [np.inf] * (6 - nf)) + origin = (mu2, nf) + return cls(matching_scales, origin) + + def path(self, target: EPoint) -> Path: + """Determine the path to the target evolution point. + + Essentially, the path is always monotonic in the number of flavors, + increasing or decreasing the active flavors by one unit every time a + matching happens at the suitable scale. + + Examples + -------- + Since this can result in a counter-intuitive behavior, let's walk through some examples. + + Starting with the intuitive one: + >>> Atlas([10, 20, 30], (5, 3)).path((25, 5)) + [Segment(5, 10, 3), Segment(10, 20, 4), Segment(20, 25, 5)] + + If the number of flavor has been reached, it will continue walking + without matchin again. + >>> Atlas([10, 20, 30], (5, 3)).path((25, 4)) + [Segment(5, 10, 3), Segment(10, 25, 4)] + + It is irrelevant the scale you start from, to step from 3 to 4 you have + to cross the charm matching scale, whether this means walking upward or + downward. + >>> Atlas([10, 20, 30], (15, 3)).path((25, 5)) + [Segment(15, 10, 3), Segment(10, 20, 4), Segment(20, 25, 5)] + + An actual backward evolution is defined by lowering the number of + flavors going from origin to target. + >>> Atlas([10, 20, 30], (25, 5)).path((5, 3)) + [Segment(25, 20, 5), Segment(20, 10, 4), Segment(10, 5, 3)] + + But the only difference is in the matching between two segments, since + a single segment is always happening in a fixed number of flavors, and + it is completely analogue for upward or downward evolution. + + Note + ---- + + Since the only task required to determine a path is interleaving the + correct matching scales, this is done by slicing the walls. + + """ + mu20, nf0 = self.origin + mu2f, nff = self.normalize(target) + + # determine direction and python slice modifier + rc, shift = (-1, -3) if nff < nf0 else (1, -2) + + # join all necessary points in one list + boundaries = [mu20] + self.walls[nf0 + shift : nff + shift : rc] + [mu2f] + + return [ + Segment(boundaries[i], mu2, nf0 + i * rc) + for i, mu2 in enumerate(boundaries[1:]) + ] + + def matched_path(self, target: EPoint) -> MatchedPath: + """Determine the path to the target, including matchings. + + In practice, just a wrapper around :meth:`path` adding the intermediate + matchings. + + """ + path = self.path(target) + + prev = path[0] + matched: MatchedPath = [prev] + for seg in path[1:]: + matching = Matching(prev.target, max(prev.nf, seg.nf)) + + matched.append(matching) + matched.append(seg) + prev = seg + + return matched + + +def nf_default(mu2: SquaredScale, atlas: Atlas) -> FlavorsNumber: + r"""Determine the number of active flavors in the *default flow*. + + Default flow is defined by the natural sorting of the matching scales: + + .. math:: + + \mu_c < \mu_b < \mu_t + + So, the flow is defined starting with 3 flavors below the charm matching, + and increasing by one every time a matching scale is passed while + increasing the scale. + + """ + ref_idx = np.digitize(mu2, atlas.walls) + return int(2 + ref_idx) + + +def is_downward_path(path: Path) -> bool: + r"""Determine if a path is downward. + + Criterias are: + + - in the number of active flavors when the path list contains more than one + :class:`Segment`, note this can be different from each + :attr:`Segment.is_downward` + - in :math:`\mu^2`, when just one single :class:`Segment` is given + + """ + if len(path) == 1: + return path[0].is_downward + return path[1].nf < path[0].nf + + +def flavor_shift(is_downward: bool) -> int: + """Determine the shift to number of light flavors.""" + return 4 if is_downward else 3 diff --git a/src/eko/member.py b/src/eko/member.py index ad3fb2e19..f03b0e9b4 100644 --- a/src/eko/member.py +++ b/src/eko/member.py @@ -5,6 +5,9 @@ import numpy as np +from . import basis_rotation as br +from .evolution_operator import flavors + class OpMember: """ @@ -263,6 +266,50 @@ def operator_multiply(left, right, operation): new_oms[new_key] += operation(l_op, r_op) return new_oms + def to_flavor_basis_tensor(self, qed: bool = False): + """Convert the computations into an rank 4 tensor. + + A sparse tensor defined with dot-notation (e.g. ``S.g``) is converted + to a plain rank-4 array over flavor operator space and momentum + fraction operator space. + + If `qed` is passed, the unified intrinsic basis is used. + + """ + nf_in, nf_out = flavors.get_range(self.op_members.keys(), qed) + len_pids = len(br.flavor_basis_pids) + len_xgrid = list(self.op_members.values())[0].value.shape[0] + # dimension will be pids^2 * xgrid^2 + value_tensor = np.zeros((len_pids, len_xgrid, len_pids, len_xgrid)) + error_tensor = value_tensor.copy() + for name, op in self.op_members.items(): + if not qed: + in_pids = flavors.pids_from_intrinsic_evol(name.input, nf_in, False) + out_pids = flavors.pids_from_intrinsic_evol(name.target, nf_out, True) + else: + in_pids = flavors.pids_from_intrinsic_unified_evol( + name.input, nf_in, False + ) + out_pids = flavors.pids_from_intrinsic_unified_evol( + name.target, nf_out, True + ) + for out_idx, out_weight in enumerate(out_pids): + for in_idx, in_weight in enumerate(in_pids): + # keep the outer index to the left as we're multiplying from the right + value_tensor[ + out_idx, # output pid (position) + :, # output momentum fraction + in_idx, # input pid (position) + :, # input momentum fraction + ] += out_weight * (op.value * in_weight) + error_tensor[ + out_idx, # output pid (position) + :, # output momentum fraction + in_idx, # input pid (position) + :, # input momentum fraction + ] += out_weight * (op.error * in_weight) + return value_tensor, error_tensor + class ScalarOperator(OperatorBase): """Operator above space of real numbers.""" diff --git a/src/eko/msbar_masses.py b/src/eko/msbar_masses.py index 36f966801..356a81019 100644 --- a/src/eko/msbar_masses.py +++ b/src/eko/msbar_masses.py @@ -10,9 +10,9 @@ from .couplings import Couplings, invert_matching_coeffs from .gamma import gamma from .io.types import FlavorsNumber, Order +from .matchings import Atlas, flavor_shift, is_downward_path from .quantities.couplings import CouplingEvolutionMethod, CouplingsInfo from .quantities.heavy_quarks import HeavyQuarkMasses, QuarkMassRef, QuarkMassScheme -from .thresholds import ThresholdsAtlas, flavor_shift, is_downward_path def ker_exact(a0, a1, order, nf): @@ -268,7 +268,16 @@ def rge(m2, q2m_ref, strong_coupling, xif2, nf_ref): return float(msbar_mass) -def evolve(m2_ref, q2m_ref, strong_coupling, xif2, q2_to, nf_ref=None, nf_to=None): +def evolve( + m2_ref, + q2m_ref, + strong_coupling, + thresholds_ratios, + xif2, + q2_to, + nf_ref=None, + nf_to=None, +): r"""Perform the |MSbar| mass evolution up to given scale. It allows for different number of active flavors. @@ -297,13 +306,11 @@ def evolve(m2_ref, q2m_ref, strong_coupling, xif2, q2_to, nf_ref=None, nf_to=Non :math:`m_{\overline{MS}}(\mu_2)^2` """ - thr_atlas = ThresholdsAtlas( - np.array(strong_coupling.thresholds.area_walls)[1:-1], - q2m_ref, - nf_ref, - strong_coupling.thresholds.thresholds_ratios, + matching_scales = np.array(strong_coupling.atlas.walls)[1:-1] * np.array( + thresholds_ratios ) - path = thr_atlas.path(q2_to, nf_to) + atlas = Atlas(matching_scales.tolist(), (q2m_ref, nf_ref)) + path = atlas.path((q2_to, nf_to)) is_downward = is_downward_path(path) shift = flavor_shift(is_downward) @@ -311,14 +318,14 @@ def evolve(m2_ref, q2m_ref, strong_coupling, xif2, q2_to, nf_ref=None, nf_to=Non for k, seg in enumerate(path): # skip a very short segment, but keep the matching ker_evol = 1.0 - if not np.isclose(seg.q2_from, seg.q2_to): + if not np.isclose(seg.origin, seg.target): ker_evol = ( - ker_dispatcher(seg.q2_to, seg.q2_from, strong_coupling, xif2, seg.nf) + ker_dispatcher(seg.target, seg.origin, strong_coupling, xif2, seg.nf) ** 2 ) # apply matching condition if k < len(path) - 1: - L = np.log(thr_atlas.thresholds_ratios[seg.nf - shift]) + L = np.log(thresholds_ratios[seg.nf - shift]) m_coeffs = ( compute_matching_coeffs_down(seg.nf - 1) if is_downward @@ -328,7 +335,7 @@ def evolve(m2_ref, q2m_ref, strong_coupling, xif2, q2_to, nf_ref=None, nf_to=Non for pto in range(1, strong_coupling.order[0]): # 0**0=1, from NNLO there is a matching also in this case for logpow in range(pto + 1): - as_thr = strong_coupling.a(seg.q2_to * xif2, seg.nf - shift + 4)[0] + as_thr = strong_coupling.a(seg.target * xif2, seg.nf - shift + 4)[0] matching += as_thr**pto * L**logpow * m_coeffs[pto, logpow] ker_evol *= matching ev_mass *= ker_evol @@ -380,7 +387,7 @@ def sc(thr_masses): order=order, method=evmeth, masses=thr_masses, - thresholds_ratios=matching * xif2, + thresholds_ratios=(np.array(matching) * xif2).tolist(), hqm_scheme=QuarkMassScheme.MSBAR, ) @@ -410,12 +417,12 @@ def sc(thr_masses): if q_idx + 4 == nf_ref and q2m_ref > mu2_ref: raise ValueError( f"In MSbar scheme, Qm{hq} should be lower than Qref, " - "if alpha_s is given with nfref={nf_ref} at scale Qref={mu2_ref}" + f"if alpha_s is given with nfref={nf_ref} at scale Qref={mu2_ref}" ) if q_idx + 4 == nf_ref + 1 and q2m_ref < mu2_ref: raise ValueError( f"In MSbar scheme, Qm{hq} should be greater than Qref, " - "if alpha_s is given with nfref={nf_ref} at scale Qref={mu2_ref}" + f"if alpha_s is given with nfref={nf_ref} at scale Qref={mu2_ref}" ) # check that for higher patches you do forward running @@ -423,7 +430,7 @@ def sc(thr_masses): if q_idx + 3 >= nf_ref and q2m_ref >= m2_ref: raise ValueError( f"In MSbar scheme, Qm{hq} should be lower than m{hq} " - "if alpha_s is given with nfref={nf_ref} at scale Qref={mu2_ref}" + f"if alpha_s is given with nfref={nf_ref} at scale Qref={mu2_ref}" ) # check that for lower patches you do backward running @@ -432,7 +439,7 @@ def sc(thr_masses): if q2m_ref < m2_ref: raise ValueError( f"In MSbar scheme, Qm{hq} should be greater than m{hq}" - "if alpha_s is given with nfref={nf_ref} at scale Qref={mu2_ref}" + f"if alpha_s is given with nfref={nf_ref} at scale Qref={mu2_ref}" ) nf_target += 1 shift = 1 @@ -448,6 +455,7 @@ def sc(thr_masses): m2_ref, q2m_ref, sc(masses), + matching, xif2, q2_to, nf_ref=nf_ref_cur, diff --git a/src/eko/quantities/couplings.py b/src/eko/quantities/couplings.py index 12511d960..449abab38 100644 --- a/src/eko/quantities/couplings.py +++ b/src/eko/quantities/couplings.py @@ -31,19 +31,6 @@ class CouplingsInfo(DictLike): """ em_running: bool = False - @property - def alphas_ref(self): - r""":math:`\alpha_s` reference point.""" - return CouplingRef.typed(self.alphas, self.scale) - - @property - def alphaem_ref(self): - r""":math:`\alpha` reference point.""" - if self.em_running: - raise ValueError("Non-running electromagnetic coupling.") - - return CouplingRef.typed(self.alphaem, self.scale) - @property def values(self): """Collect only couplings values.""" diff --git a/src/eko/quantities/heavy_quarks.py b/src/eko/quantities/heavy_quarks.py index 3f061d977..df0285649 100644 --- a/src/eko/quantities/heavy_quarks.py +++ b/src/eko/quantities/heavy_quarks.py @@ -1,7 +1,7 @@ """Heavy quarks related quantities.""" import enum from dataclasses import dataclass -from typing import Generic, Optional, Sequence, TypeVar +from typing import Generic, List, Sequence, TypeVar import numpy as np @@ -65,19 +65,6 @@ def t(self, value: T): MatchingScales = HeavyQuarks[MatchingScale] -def scales_from_ratios( - ratios: MatchingRatios, masses: HeavyQuarkMasses -) -> MatchingScales: - """Convert ratios to squared scales. - - .. todo:: - - make this a method - - """ - return MatchingScales(*(np.power(ratios, 2.0) * np.power(masses, 2.0)).tolist()) - - # TODO: upgrade the following to StrEnum (requires py>=3.11) with that, it is # possible to replace all non-alias right sides with calls to enum.auto() @@ -96,9 +83,14 @@ class HeavyInfo(DictLike): This is meant to be used mainly as a theory card section, and to be passed around when all or a large part of this information is required. + Note + ---- + All scales and ratios in this structure are linear, so you can consider + them as quantities in :math:`GeV` or ratios of them. + """ - num_flavs_init: Optional[FlavorsNumber] + num_flavs_init: FlavorsNumber r"""Number of active flavors at fitting scale. I.e. :math:`n_{f,\text{ref}}(\mu^2_0)`, formerly called ``nf0``. @@ -116,6 +108,6 @@ class HeavyInfo(DictLike): """Matching scale of heavy quark masses""" @property - def matching_scales(self) -> MatchingScales: - """Compute matching scales.""" - return scales_from_ratios(self.matching_ratios, self.masses) + def squared_ratios(self) -> List[float]: + """Squared ratios of matching scales.""" + return np.power(self.matching_ratios, 2.0).tolist() diff --git a/src/eko/runner/__init__.py b/src/eko/runner/__init__.py index c985fb1fd..a487cdbaf 100644 --- a/src/eko/runner/__init__.py +++ b/src/eko/runner/__init__.py @@ -7,6 +7,10 @@ from . import legacy +# TODO: drop this altogether, replacing just with managed.solve +# it is currently kept not to break the interface, but the runcards upgrade and +# path conversion should be done by the caller, here we just clearly declare +# which types we expect def solve( theory_card: Union[RawCard, TheoryCard], operators_card: Union[RawCard, OperatorCard], @@ -26,18 +30,10 @@ def solve( to the solution of the |DGLAP| equation itself, and determine the resulting operator features. - Parameters - ---------- - theory_card : - theory parameters and related settings - operator_card : - solution configurations, and further EKO options - path : - path where to store the computed operator - Note ---- For further information about EKO inputs and output see :doc:`/code/IO` """ + # TODO: drop this legacy.Runner(theory_card, operators_card, path).compute() diff --git a/src/eko/runner/commons.py b/src/eko/runner/commons.py index 24eb37da2..c27b4c8a2 100644 --- a/src/eko/runner/commons.py +++ b/src/eko/runner/commons.py @@ -1,4 +1,12 @@ """Runners common utilities.""" +import numpy as np + +from ..couplings import Couplings, couplings_mod_ev +from ..interpolation import InterpolatorDispatcher +from ..io import runcards +from ..io.runcards import OperatorCard, TheoryCard +from ..io.types import ScaleVariationsMethod +from ..matchings import Atlas BANNER = r""" oooooooooooo oooo oooo \\ .oooooo. @@ -9,3 +17,38 @@ 888 o 888 `88b. `88b // d88' o888ooooood8 o888o o888o `Y8bood8P' """ + + +def interpolator(operator: OperatorCard) -> InterpolatorDispatcher: + """Create interpolator from runcards.""" + return InterpolatorDispatcher( + xgrid=operator.xgrid, + polynomial_degree=operator.configs.interpolation_polynomial_degree, + ) + + +def atlas(theory: TheoryCard, operator: OperatorCard) -> Atlas: + """Create thresholds atlas from runcards.""" + # TODO: cache result + masses = runcards.masses(theory, operator.configs.evolution_method) + matching_scales = np.power(theory.heavy.matching_ratios, 2.0) * np.array(masses) + return Atlas(matching_scales.tolist(), (operator.mu20, theory.heavy.num_flavs_init)) + + +def couplings(theory: TheoryCard, operator: OperatorCard) -> Couplings: + """Create couplings from runcards.""" + thresholds_ratios = np.power(theory.heavy.matching_ratios, 2.0) + masses = runcards.masses(theory, operator.configs.evolution_method) + return Couplings( + couplings=theory.couplings, + order=theory.order, + method=couplings_mod_ev(operator.configs.evolution_method), + masses=masses, + hqm_scheme=theory.heavy.masses_scheme, + thresholds_ratios=thresholds_ratios + * ( + theory.xif**2 + if operator.configs.scvar_method == ScaleVariationsMethod.EXPONENTIATED + else 1.0 + ), + ) diff --git a/src/eko/runner/compute.py b/src/eko/runner/compute.py deleted file mode 100644 index acaabe605..000000000 --- a/src/eko/runner/compute.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Compute parts from recipes.""" -from .parts import Part -from .recipes import Recipe - - -def compute(recipe: Recipe) -> Part: - """Compute EKO component in isolation.""" - return Part("ciao") diff --git a/src/eko/runner/legacy.py b/src/eko/runner/legacy.py index a4817c06c..c964c3415 100644 --- a/src/eko/runner/legacy.py +++ b/src/eko/runner/legacy.py @@ -3,14 +3,9 @@ import os from typing import Union -import numpy as np - -from .. import interpolation -from ..couplings import Couplings, couplings_mod_ev from ..evolution_operator.grid import OperatorGrid from ..io import EKO, Operator, runcards -from ..io.types import RawCard, ScaleVariationsMethod -from ..thresholds import ThresholdsAtlas +from ..io.types import RawCard from . import commons logger = logging.getLogger(__name__) @@ -49,58 +44,37 @@ def __init__( """ new_theory, new_operator = runcards.update(theory_card, operators_card) + new_theory.heavy.intrinsic_flavors = [4, 5, 6] # Store inputs self.path = path self._theory = new_theory # setup basis grid - bfd = interpolation.InterpolatorDispatcher( - xgrid=new_operator.xgrid, - polynomial_degree=new_operator.configs.interpolation_polynomial_degree, - ) - - # setup the Threshold path, compute masses if necessary - masses = runcards.masses(new_theory, new_operator.configs.evolution_method) + bfd = commons.interpolator(new_operator) # call explicitly iter to explain the static analyzer that is an # iterable - thresholds_ratios = np.power(list(iter(new_theory.heavy.matching_ratios)), 2.0) - tc = ThresholdsAtlas( - masses=masses, - q2_ref=new_operator.mu20, - nf_ref=new_theory.heavy.num_flavs_init, - thresholds_ratios=thresholds_ratios, - max_nf=new_theory.heavy.num_flavs_max_pdf, - ) + tc = commons.atlas(new_theory, new_operator) # strong coupling - sc = Couplings( - couplings=new_theory.couplings, - order=new_theory.order, - method=couplings_mod_ev(new_operator.configs.evolution_method), - masses=masses, - hqm_scheme=new_theory.heavy.masses_scheme, - thresholds_ratios=thresholds_ratios - * ( - new_theory.xif**2 - if new_operator.configs.scvar_method - == ScaleVariationsMethod.EXPONENTIATED - else 1.0 - ), - ) - # setup operator grid + cs = commons.couplings(new_theory, new_operator) # setup operator grid + + # compute masses if required + masses = runcards.masses(new_theory, new_operator.configs.evolution_method) + self.op_grid = OperatorGrid( - mu2grid=new_operator.mu2grid, + mu2grid=new_operator.evolgrid, order=new_theory.order, masses=masses, mass_scheme=new_theory.heavy.masses_scheme.value, + thresholds_ratios=new_theory.heavy.squared_ratios, intrinsic_flavors=new_theory.heavy.intrinsic_flavors, xif=new_theory.xif, configs=new_operator.configs, debug=new_operator.debug, - thresholds_config=tc, - couplings=sc, + atlas=tc, + couplings=cs, interpol_dispatcher=bfd, n3lo_ad_variation=new_theory.n3lo_ad_variation, ) @@ -122,5 +96,5 @@ def compute(self): """ with EKO.edit(self.path) as eko: # add all operators - for final_scale, op in self.op_grid.compute().items(): - eko[float(final_scale)] = Operator.from_dict(op) + for ep, op in self.op_grid.compute().items(): + eko[ep] = Operator(**op) diff --git a/src/eko/runner/managed.py b/src/eko/runner/managed.py new file mode 100644 index 000000000..c353decde --- /dev/null +++ b/src/eko/runner/managed.py @@ -0,0 +1,52 @@ +"""Fully managed runner. + +This is an automated runner, mainly suggested for small EKOs computations. + +The primitives used here to compute the various pieces are part of the public +interface, and should be directly used to manage a more complex run for a +considebaly large operator. + +Thus, parallelization and multi-node execution is possible using EKO primitives, +but not automatically performed. + +""" +from pathlib import Path + +from ..io.items import Evolution, Matching, Target +from ..io.runcards import OperatorCard, TheoryCard +from ..io.struct import EKO +from . import commons, operators, parts, recipes + + +def solve(theory: TheoryCard, operator: OperatorCard, path: Path): + """Solve DGLAP equations in terms of evolution kernel operators (EKO).""" + theory.heavy.intrinsic_flavors = [4, 5, 6] + + with EKO.create(path) as builder: + eko = builder.load_cards(theory, operator).build() # pylint: disable=E1101 + + atlas = commons.atlas(eko.theory_card, eko.operator_card) + + recs = recipes.create(eko.operator_card.evolgrid, atlas) + eko.load_recipes(recs) + + for recipe in eko.recipes: + assert isinstance(recipe, Evolution) + eko.parts[recipe] = parts.evolve(eko, recipe) + # flush the memory + del eko.parts[recipe] + for recipe in eko.recipes_matching: + assert isinstance(recipe, Matching) + eko.parts_matching[recipe] = parts.match(eko, recipe) + # flush the memory + del eko.parts_matching[recipe] + + for ep in operator.evolgrid: + headers = recipes.elements(ep, atlas) + parts_ = operators.retrieve(headers, eko.parts, eko.parts_matching) + target = Target.from_ep(ep) + eko.operators[target] = operators.join(parts_) + # flush the memory + del eko.parts + del eko.parts_matching + del eko.operators[target] diff --git a/src/eko/runner/operators.py b/src/eko/runner/operators.py new file mode 100644 index 000000000..5a0a219c7 --- /dev/null +++ b/src/eko/runner/operators.py @@ -0,0 +1,93 @@ +"""Combine parts into operators.""" +from functools import reduce +from typing import List + +import numpy as np +import numpy.typing as npt + +from ..io.inventory import Inventory +from ..io.items import Evolution, Operator, Recipe + + +def retrieve( + headers: List[Recipe], parts: Inventory, parts_matching: Inventory +) -> List[Operator]: + """Retrieve parts to be joined.""" + elements = [] + for head in headers: + inv = parts if isinstance(head, Evolution) else parts_matching + elements.append(inv[head]) + + return elements + + +def dot4(op1: npt.NDArray, op2: npt.NDArray) -> npt.NDArray: + """Dot product between rank 4 objects. + + The product is performed considering them as matrices indexed by pairs, so + linearizing the indices in pairs. + + """ + return np.einsum("aibj,bjck->aick", op1, op2) + + +def dotop(op1: Operator, op2: Operator) -> Operator: + r"""Dot product between two operators. + + Essentially a wrapper of :func:`dot4`, applying linear error propagation, + if applicable. + + Note + ---- + Linear error propagation requires matrices to be positive before taking the product. + + Indeed, for a simple product of two variables :math:`a \cdot b`, the error is + propagated in the following way: + + .. math:: + + \max_{\sgn_{da}, \sgn{db}} (a + da)(b + db) - ab = + \max_{\sgn_{da}, \sgn{db}} da \cdot b + a \cdot db + \mathcal{O}(d^2) = + |da \cdot b| + |a \cdot db| + \mathcal{O}(d^2) = + |da | \cdot |b| + |a| \cdot |db| + \mathcal{O}(d^2) + + Where the second step is a consequence of being able to vary the two + variations independently, and last just a trivial property of the product. + + But in a matrix multiplication, each element of the two matrices has an + independent variation associated. Thus: + + .. math:: + + \max_{\sgn_{da_i}, \sgn{db_i}} (a_i + da_i)(b_i + db_i) - a_i b_i = + \max_{\sgn_{da_i}, \sgn{db_i}} da_i \cdot b_i + a_i \cdot db_i + \mathcal{O}(d^2) = + |da_i| \cdot |b_i| + |a_i| \cdot |db_i| + \mathcal{O}(d^2) + + """ + val = dot4(op1.operator, op2.operator) + + if op1.error is not None and op2.error is not None: + err = dot4(np.abs(op1.operator), np.abs(op2.error)) + dot4( + np.abs(op1.error), np.abs(op2.operator) + ) + else: + err = None + + return Operator(val, err) + + +def join(elements: List[Operator]) -> Operator: + """Join the elements into the final operator. + + Note + ---- + Since the matrices composing the path have to be multiplied from the + destination to the origin, the input order, coming from path (which is + instead ``origin -> target``), is being reversed. + + .. todo:: + + consider if reversing the path... + + """ + return reduce(dotop, reversed(elements)) diff --git a/src/eko/runner/parts.py b/src/eko/runner/parts.py index aed1067aa..e385b8f49 100644 --- a/src/eko/runner/parts.py +++ b/src/eko/runner/parts.py @@ -1,38 +1,157 @@ -"""Operator components.""" -from abc import ABC -from dataclasses import dataclass +"""Compute operator components. -import numpy.typing as npt +.. todo:: -from ..io.dictlike import DictLike -from ..io.types import SquaredScale + This is the only part of the new runner making use of the global context + through the :class:`EKO` object. + After #242, the goal is to update :class:`Operator` and + :class:`OperatorMatrixElement` to simplify the computation and passing down + parameters. -@dataclass -class Part(DictLike, ABC): - """An atomic operator ingredient. +""" +import numpy as np - The operator is always in flavor basis, and on the x-grid specified for - computation, i.e. it is a rank-4 tensor of shape:: +from .. import evolution_operator as evop +from ..evolution_operator import matching_condition +from ..evolution_operator import operator_matrix_element as ome +from ..evolution_operator import physical +from ..io import EKO +from ..io.items import Evolution, Matching, Operator +from ..quantities.heavy_quarks import QuarkMassScheme +from . import commons - (flavor basis) x (x-grid) x (flavor basis) x (x-grid) + +def managers(eko: EKO) -> dict: + """Collect managers for operator computation. + + .. todo:: + + Legacy interface, avoid managers usage. + + """ + tcard = eko.theory_card + ocard = eko.operator_card + return dict( + thresholds_config=commons.atlas(tcard, ocard), + couplings=commons.couplings(tcard, ocard), + interpol_dispatcher=commons.interpolator(ocard), + ) + + +def blowup_info(eko: EKO) -> dict: + """Prepare common information to blow up to flavor basis. + + Note + ---- + ``intrinsic_range`` is a fully deprecated feature, here and anywhere else, + since a full range is already always used for backward evolution, and it is + not harmful to use it also for forward. + + Indeed, the only feature of non-intrinsic evolution is to absorb a + non-trivial boundary condition when an intrinsic PDF is defined. + But to achieve this, is sufficient to not specify any intrinsic boundary + condition at all, while if something is there, it is intuitive enough that + it will be consistently evolved. + + Moreover, since two different behavior are applied for the forward and + backward evolution, the intrinsic range is a "non-local" function, since it + does not depend only on the evolution segment, but also on the previous + evolution history (to determine if evolution is backward in flavor, + irrespectively of happening for an increasing or decreasing interval in + scale at fixed flavor). """ + return dict(intrinsic_range=[4, 5, 6], qed=eko.theory_card.order[1] > 0) + - operator: npt.NDArray +def evolve_configs(eko: EKO) -> dict: + """Create configs for :class:`Operator`. + .. todo:: + + Legacy interface, make use of a dedicated object. + + """ + tcard = eko.theory_card + ocard = eko.operator_card + return dict( + order=tcard.order, + xif2=tcard.xif**2, + method=ocard.configs.evolution_method.value, + ev_op_iterations=ocard.configs.ev_op_iterations, + ev_op_max_order=ocard.configs.ev_op_max_order, + polarized=ocard.configs.polarized, + time_like=ocard.configs.time_like, + debug_skip_singlet=ocard.debug.skip_singlet, + debug_skip_non_singlet=ocard.debug.skip_non_singlet, + n_integration_cores=ocard.configs.n_integration_cores, + ModSV=ocard.configs.scvar_method, + n3lo_ad_variation=tcard.n3lo_ad_variation, + ) -@dataclass -class Evolution(Part): - """Evolution in a fixed number of flavors.""" - final: bool - mu20: SquaredScale - mu2: SquaredScale +def evolve(eko: EKO, recipe: Evolution) -> Operator: + """Compute evolution in isolation.""" + op = evop.Operator( + evolve_configs(eko), managers(eko), recipe.as_atlas, is_threshold=recipe.cliff + ) + op.compute() + binfo = blowup_info(eko) + res, err = physical.PhysicalOperator.ad_to_evol_map( + op.op_members, op.nf, op.q2_to, **binfo + ).to_flavor_basis_tensor(qed=binfo["qed"]) + + return Operator(res, err) + + +def matching_configs(eko: EKO) -> dict: + """Create configs for :class:`OperatorMatrixElement`. + + .. todo:: + + Legacy interface, make use of a dedicated object. + + """ + tcard = eko.theory_card + ocard = eko.operator_card + return dict( + **evolve_configs(eko), + backward_inversion=ocard.configs.inversion_method, + intrinsic_range=tcard.heavy.intrinsic_flavors, + ) + + +def match(eko: EKO, recipe: Matching) -> Operator: + """Compute matching in isolation. + + Note + ---- + Compared to the old prescription, a dedicated rotation to a different + intrinsic basis is not needed any longer. + + All the operators are blown up to flavor basis, and they are saved and + joined in that unique basis. So, the only rotation used is towards that + basis, and encoded in the blowing up prescription. + + """ + kthr = eko.theory_card.heavy.squared_ratios[recipe.hq - 4] + op = ome.OperatorMatrixElement( + matching_configs(eko), + managers(eko), + recipe.hq - 1, + recipe.scale, + recipe.inverse, + np.log(kthr), + eko.theory_card.heavy.masses_scheme is QuarkMassScheme.MSBAR, + ) + op.compute() -@dataclass -class Matching(Part): - """Matching conditions between two different flavor schemes, at a given scale.""" + binfo = blowup_info(eko) + nf_match = op.nf - 1 if recipe.inverse else op.nf + res, err = matching_condition.MatchingCondition.split_ad_to_evol_map( + op.op_members, nf_match, recipe.scale, **binfo + ).to_flavor_basis_tensor(qed=binfo["qed"]) - mu2: SquaredScale + return Operator(res, err) diff --git a/src/eko/runner/recipes.py b/src/eko/runner/recipes.py index 0e6163b66..7a09554cb 100644 --- a/src/eko/runner/recipes.py +++ b/src/eko/runner/recipes.py @@ -1,54 +1,35 @@ """Recipes containing instructions for atomic computations.""" -from abc import ABC -from dataclasses import dataclass +from typing import List -from .. import EKO -from .. import scale_variations as sv -from ..io import runcards -from ..io.dictlike import DictLike -from ..io.types import SquaredScale -from ..thresholds import ThresholdsAtlas +from ..io.items import Evolution, Matching, Recipe +from ..io.types import EvolutionPoint as EPoint +from ..matchings import Atlas, Segment -@dataclass -class Recipe(DictLike, ABC): - """Base recipe structure.""" +def elements(ep: EPoint, atlas: Atlas) -> List[Recipe]: + """Determine recipes to compute a given operator.""" + recipes = [] - name: str + # expanded = eko.operator_card.configs.scvar_method is sv.Modes.expanded + # mu2f = mu2 * eko.theory_card.xif**2 if expanded else mu2 + blocks = atlas.matched_path(ep) + for block in blocks: + if isinstance(block, Segment): + cliff = block.target in atlas.walls + recipe = Evolution.from_atlas(block, cliff=cliff) + else: + recipe = Matching.from_atlas(block) -@dataclass -class EvolutionRecipe(Recipe): - """Recipe compute evolution with a fixed number of light flavors.""" + recipes.append(recipe) - final: bool - mu20: SquaredScale - mu2: SquaredScale + return recipes -@dataclass -class MatchingRecipe(Recipe): - """Recipe to compute the matching between two different flavor number schemes.""" - - mu2: SquaredScale - - -def create(eko: EKO): +def create(evolgrid: List[EPoint], atlas: Atlas) -> List[Recipe]: """Create all associated recipes.""" - _ = eko.theory_card.matching - - masses = runcards.masses( - eko.theory_card, eko.operator_card.configs.evolution_method - ) - - tc = ThresholdsAtlas( - masses=masses, - q2_ref=eko.operator_card.mu20, - nf_ref=eko.theory_card.num_flavs_init, - thresholds_ratios=None, - max_nf=eko.theory_card.num_flavs_max_pdf, - ) - - for mu2 in eko.mu2grid: - expanded = eko.operator_card.configs.scvar_method is sv.Modes.expanded - mu2f = mu2 * eko.theory_card.xif**2 if expanded else mu2 + recipes = [] + for ep in evolgrid: + recipes.extend(elements(ep, atlas)) + + return list(set(recipes)) diff --git a/src/eko/thresholds.py b/src/eko/thresholds.py deleted file mode 100644 index eb0bd7fa2..000000000 --- a/src/eko/thresholds.py +++ /dev/null @@ -1,277 +0,0 @@ -r"""Holds the classes that define the |FNS|.""" -import logging -from dataclasses import astuple, dataclass -from typing import List, Optional - -import numpy as np - -logger = logging.getLogger(__name__) - - -@dataclass -class PathSegment: - """Oriented path in the threshold landscape.""" - - q2_from: float - """Starting point.""" - q2_to: float - """Final point.""" - nf: int - """Number of active flavors.""" - - @property - def is_downward_q2(self) -> bool: - """Return True if ``q2_from`` is bigger than ``q2_to``.""" - return self.q2_from > self.q2_to - - @property - def tuple(self): - """Deprecated: use directly `dataclasses.astuple`.""" - return astuple(self) - - def __str__(self): - """Textual representation, mainly for logging purpose.""" - return f"PathSegment({self.q2_from} -> {self.q2_to}, nf={self.nf})" - - -class ThresholdsAtlas: - r"""Holds information about the matching scales. - - These scales are the :math:`Q^2` has to pass in order to get there from a - given :math:`Q^2_{ref}`. - - """ - - def __init__( - self, - masses: List[float], - q2_ref: Optional[float] = None, - nf_ref: Optional[int] = None, - thresholds_ratios: Optional[List[float]] = None, - max_nf: Optional[int] = None, - ): - """Create basic atlas. - - Parameters - ---------- - masses : - list of quark masses squared - q2_ref : - reference scale - nf_ref : - number of active flavors at the reference scale - thresholds_ratios : - list of ratios between matching scales and masses squared - max_nf : - maximum number of active flavors, if `None` no maximum is set - - """ - sorted_masses = sorted(masses) - if not np.allclose(masses, sorted_masses): - raise ValueError("masses need to be sorted") - # combine them - thresholds = self.build_area_walls(sorted_masses, thresholds_ratios, max_nf) - self.area_walls = [0] + thresholds + [np.inf] - - # check nf_ref - if nf_ref is not None: - if q2_ref is None: - raise ValueError( - "Without a reference Q2 value a reference number of flavors " - "does not make sense!" - ) - # else self.q2_ref is not None - nf_init = 2 + len(list(filter(lambda x: np.isclose(0, x), self.area_walls))) - if nf_ref < nf_init: - raise ValueError( - f"The reference number of flavors is set to {nf_ref}, " - f"but the atlas starts at {nf_init}" - ) - nf_final = 2 + len(list(filter(lambda x: x < np.inf, self.area_walls))) - if nf_ref > nf_final: - raise ValueError( - f"The reference number of flavors is set to {nf_ref}, " - f"but the atlas stops at {nf_final}" - ) - - # Init values - self.q2_ref = q2_ref - self.nf_ref = nf_ref - self.thresholds_ratios = thresholds_ratios - logger.info(str(self)) - - def __str__(self): - """Textual representation, mainly for logging purpose.""" - walls = " - ".join([f"{w:.2e}" for w in self.area_walls]) - return f"ThresholdsAtlas [{walls}], ref={self.q2_ref} @ {self.nf_ref}" - - @classmethod - def ffns(cls, nf: int, q2_ref: Optional[float] = None): - """Create a |FFNS| setup. - - The function creates simply sufficient thresholds at ``0`` (in the - beginning), since the number of flavors is determined by counting - from below. - - Parameters - ---------- - nf : - number of light flavors - q2_ref : - reference scale - - """ - return cls([0] * (nf - 3) + [np.inf] * (6 - nf), q2_ref) - - @staticmethod - def build_area_walls( - masses: List[float], - thresholds_ratios: Optional[List[float]] = None, - max_nf: Optional[int] = None, - ): - r"""Create the object from the informations on the run card. - - The thresholds are computed by :math:`(m_q \cdot k_q^{Thr})`. - - Parameters - ---------- - masses : - heavy quark masses squared - thresholds_ratios : - list of ratios between matching scales and masses squared - max_nf : - maximum number of flavors - - Returns - ------- - list - threshold list - - """ - if len(masses) != 3: - raise ValueError("There have to be 3 quark masses") - if thresholds_ratios is None: - thresholds_ratios = [1.0, 1.0, 1.0] - if len(thresholds_ratios) != 3: - raise ValueError("There have to be 3 quark threshold ratios") - if max_nf is None: - max_nf = 6 - - thresholds = [] - for m, k in zip(masses, thresholds_ratios): - thresholds.append(m * k) - # cut array = simply reduce some thresholds - thresholds = thresholds[: max_nf - 3] - return thresholds - - def path( - self, - q2_to: float, - nf_to: Optional[int] = None, - q2_from: Optional[float] = None, - nf_from: Optional[int] = None, - ): - """Get path from ``q2_from`` to ``q2_to``. - - Parameters - ---------- - q2_to: - target value of q2 - q2_from: - starting value of q2 - - Returns - ------- - list(PathSegment) - List of :class:`PathSegment` to go through in order to get from ``q2_from`` - to ``q2_to`` - - """ - # fallback to init config - if q2_from is None: - q2_from = self.q2_ref - if nf_from is None: - nf_from = self.nf_ref - # determine reference thresholds - if nf_from is None: - nf_from = 2 + np.digitize(q2_from, self.area_walls) - if nf_to is None: - nf_to = 2 + np.digitize(q2_to, self.area_walls) - # determine direction and python slice modifier - if nf_to < nf_from: - rc = -1 - shift = -3 - else: - rc = 1 - shift = -2 - # join all necessary points in one list - boundaries = ( - [q2_from] - + self.area_walls[nf_from + shift : int(nf_to) + shift : rc] - + [q2_to] - ) - segs = [ - PathSegment(boundaries[i], q2, nf_from + i * rc) - for i, q2 in enumerate(boundaries[1:]) - ] - return segs - - def nf(self, q2): - """Find the number of flavors active at the given scale. - - Parameters - ---------- - q2 : float - reference scale - - Returns - ------- - int - number of active flavors - - """ - ref_idx = np.digitize(q2, self.area_walls) - return 2 + ref_idx - - -def is_downward_path(path: List[PathSegment]) -> bool: - """Determine if a path is downward. - - Criterias are: - - - in the number of active flavors when the path list contains more than one - :class:`PathSegment`, note this can be different from each - :attr:`PathSegment.is_downward_q2` - - in :math:`Q^2` when just one single :class:`PathSegment` is given - - Parameters - ---------- - path : - path - - Returns - ------- - bool - True for a downward path - - """ - if len(path) == 1: - return path[0].is_downward_q2 - return path[1].nf < path[0].nf - - -def flavor_shift(is_downward: bool) -> int: - """Determine the shift to number of light flavors. - - Parameters - ---------- - is_downward : bool - True for a downward path - - Returns - ------- - int - shift to number of light flavors which can be 3 or 4 - - """ - return 4 if is_downward else 3 diff --git a/src/ekobox/apply.py b/src/ekobox/apply.py index f115e8337..edfdc422d 100644 --- a/src/ekobox/apply.py +++ b/src/ekobox/apply.py @@ -74,31 +74,28 @@ def apply_pdf_flavor( output PDFs and their associated errors for the computed mu2grid """ # create pdfs - pdfs = np.zeros((len(eko.rotations.inputpids), len(eko.rotations.inputgrid))) - for j, pid in enumerate(eko.rotations.inputpids): + pdfs = np.zeros((len(eko.bases.inputpids), len(eko.bases.inputgrid))) + for j, pid in enumerate(eko.bases.inputpids): if not lhapdf_like.hasFlavor(pid): continue pdfs[j] = np.array( - [ - lhapdf_like.xfxQ2(pid, x, eko.mu20) / x - for x in eko.rotations.inputgrid.raw - ] + [lhapdf_like.xfxQ2(pid, x, eko.mu20) / x for x in eko.bases.inputgrid.raw] ) # build output out_grid = {} - for mu2, elem in eko.items(): + for (mu2, nf), elem in eko.items(): pdf_final = np.einsum(CONTRACTION, elem.operator, pdfs, optimize="optimal") if elem.error is not None: error_final = np.einsum(CONTRACTION, elem.error, pdfs, optimize="optimal") else: error_final = None out_grid[mu2] = { - "pdfs": dict(zip(eko.rotations.targetpids, pdf_final)), + "pdfs": dict(zip(eko.bases.targetpids, pdf_final)), "errors": None, } if error_final is not None: - out_grid[mu2]["errors"] = dict(zip(eko.rotations.targetpids, error_final)) + out_grid[mu2]["errors"] = dict(zip(eko.bases.targetpids, error_final)) # rotate to evolution basis if flavor_rotation is not None: @@ -120,7 +117,7 @@ def apply_pdf_flavor( # rotate/interpolate to target grid if targetgrid is not None: b = interpolation.InterpolatorDispatcher( - xgrid=eko.rotations.targetgrid, + xgrid=eko.bases.targetgrid, polynomial_degree=eko.operator_card.configs.interpolation_polynomial_degree, mode_N=False, ) diff --git a/src/ekobox/cards.py b/src/ekobox/cards.py index ef7ada109..474e7d931 100644 --- a/src/ekobox/cards.py +++ b/src/ekobox/cards.py @@ -14,11 +14,11 @@ alphas=0.118, alphaem=0.007496252, scale=91.2, - num_flavs_ref=None, + num_flavs_ref=5, max_num_flavs=6, ), heavy=dict( - num_flavs_init=None, + num_flavs_init=4, num_flavs_max_pdf=6, intrinsic_flavors=[4], masses=[ReferenceRunning([mq, nan]) for mq in (2.0, 4.5, 173.07)], diff --git a/src/ekobox/evol_pdf.py b/src/ekobox/evol_pdf.py index 3be7201de..b6dbb5def 100644 --- a/src/ekobox/evol_pdf.py +++ b/src/ekobox/evol_pdf.py @@ -84,7 +84,7 @@ def evolve_pdfs( lambda pid, x, Q2, evolved_PDF=evolved_PDF: targetlist[targetlist.index(x)] * evolved_PDF[Q2]["pdfs"][pid][targetlist.index(x)], xgrid=targetlist, - mu2grid=operators_card.mu2grid, + evolgrid=operators_card.evolgrid, pids=np.array(br.flavor_basis_pids), ) # all_blocks will be useful in case there will be necessity to dump many blocks diff --git a/src/ekobox/genpdf/__init__.py b/src/ekobox/genpdf/__init__.py index a5ed41865..5df318de3 100644 --- a/src/ekobox/genpdf/__init__.py +++ b/src/ekobox/genpdf/__init__.py @@ -3,34 +3,30 @@ import logging import pathlib import shutil +from typing import Callable, List, Optional, Union import numpy as np from eko import basis_rotation as br +from eko.io.types import EvolutionPoint as EPoint from . import export, flavors, load logger = logging.getLogger(__name__) -def take_data(parent_pdf_set=None, members=False, xgrid=None, mu2grid=None): +def take_data( + parent_pdf_set: Optional[Union[str, dict]] = None, + members: bool = False, + xgrid: Optional[List[float]] = None, + evolgrid: Optional[List[EPoint]] = None, +): """ Auxiliary function for `generate_pdf`. It provides the info, the heads of the member files and the blocks to be generated to `generate_pdf`. - Parameters - ---------- - parent_pdf_set : None or str or dict - the PDF set to be used as parent set - members : bool - if true every member of the parent is loaded - xgrid : list(float) - produced x grid if given - mu2grid : list(float) - produced Q2 grid if given - Returns ------- info : dict @@ -42,8 +38,8 @@ def take_data(parent_pdf_set=None, members=False, xgrid=None, mu2grid=None): """ if xgrid is None: xgrid = np.geomspace(1e-9, 1, 240) - if mu2grid is None: - mu2grid = np.geomspace(1.3, 1e5, 35) + if evolgrid is None: + evolgrid = [(mu2, 0) for mu2 in np.geomspace(1.3, 1e5, 35)] # collect blocks all_blocks = [] info = None @@ -63,7 +59,7 @@ def take_data(parent_pdf_set=None, members=False, xgrid=None, mu2grid=None): else: toylh = toy.mkPDF("", 0) all_blocks.append( - [generate_block(toylh.xfxQ2, xgrid, mu2grid, br.flavor_basis_pids)] + [generate_block(toylh.xfxQ2, xgrid, evolgrid, br.flavor_basis_pids)] ) else: @@ -84,7 +80,7 @@ def take_data(parent_pdf_set=None, members=False, xgrid=None, mu2grid=None): if pid not in parent_pdf_set else parent_pdf_set[pid](x, Q2), xgrid, - mu2grid, + evolgrid, br.flavor_basis_pids, ) ] @@ -95,14 +91,14 @@ def take_data(parent_pdf_set=None, members=False, xgrid=None, mu2grid=None): def generate_pdf( - name, - labels, + name: str, + labels: List[int], parent_pdf_set=None, members=False, info_update=None, - install=False, - xgrid=None, - mu2grid=None, + install: bool = False, + xgrid: Optional[List[float]] = None, + evolgrid: Optional[List[EPoint]] = None, ): """ Generate a new PDF from a parent PDF with a set of flavors. @@ -133,46 +129,30 @@ def generate_pdf( Turning True the value of the `install` flag, it is possible to automatically install the generated PDF to the lhapdf directory. By default install is False. - Parameters - ---------- - name : str - target name - labels : - list of flavors - parent_pdf_set : - parent PDF - all : bool - iterate on members - install : bool - install on LHAPDF path - xgrid : list(float) - produced x grid if given - mu2grid : list(float) - produced Q2 grid if given - Examples -------- - To generate a PDF with a fixed function `f(x,Q2)` for some flavors - you can use the following snippet: - - >>> # f = lambda x,Q2 ... put the desired function here - >>> # mask = [list of active PIDs] - >>> generate_pdf(name, labels, parent_pdf_set={pid: f for pid in mask}) - - The |API| also provides the possibility to extract arbitrary flavor combinations: - using the debug PDF settings we can construct a "anti-QED-singlet" combination that - is usefull in debugging DIS codes since it does not couple in |LO|, but only - through the pure-singlet contributions (starting at |NNLO|) - - >>> from eko import basis_rotation as br - >>> from ekobox import genpdf - >>> import numpy as np - >>> anti_qed_singlet = np.zeros_like(br.flavor_basis_pids, dtype=np.float_) - >>> anti_qed_singlet[br.flavor_basis_pids.index(1)] = -4 - >>> anti_qed_singlet[br.flavor_basis_pids.index(-1)] = -4 - >>> anti_qed_singlet[br.flavor_basis_pids.index(2)] = 1 - >>> anti_qed_singlet[br.flavor_basis_pids.index(-2)] = 1 - >>> genpdf.generate_pdf("anti_qed_singlet", [anti_qed_singlet]) + To generate a PDF with a fixed function `f(x,Q2)` for some flavors + you can use the following snippet: + + >>> # f = lambda x,Q2 ... put the desired function here + >>> # mask = [list of active PIDs] + >>> generate_pdf(name, labels, parent_pdf_set={pid: f for pid in mask}) + + The |API| also provides the possibility to extract arbitrary flavor combinations: + using the debug PDF settings we can construct a "anti-QED-singlet" combination that + is usefull in debugging DIS codes since it does not couple in |LO|, but only + through the pure-singlet contributions (starting at |NNLO|) + + >>> from eko import basis_rotation as br + >>> from ekobox import genpdf + >>> import numpy as np + >>> anti_qed_singlet = np.zeros_like(br.flavor_basis_pids, dtype=np.float_) + >>> anti_qed_singlet[br.flavor_basis_pids.index(1)] = -4 + >>> anti_qed_singlet[br.flavor_basis_pids.index(-1)] = -4 + >>> anti_qed_singlet[br.flavor_basis_pids.index(2)] = 1 + >>> anti_qed_singlet[br.flavor_basis_pids.index(-2)] = 1 + >>> genpdf.generate_pdf("anti_qed_singlet", [anti_qed_singlet]) + """ pathlib.Path(name).mkdir(exist_ok=True) # Checking label basis @@ -187,7 +167,7 @@ def generate_pdf( # labels = verify_labels(args.labels) info, heads, all_blocks = take_data( - parent_pdf_set, members, xgrid=xgrid, mu2grid=mu2grid + parent_pdf_set, members, xgrid=xgrid, evolgrid=evolgrid ) # filter the PDF @@ -236,30 +216,14 @@ def install_pdf(name): shutil.move(str(src), str(target)) -def generate_block(xfxQ2, xgrid, mu2grid, pids): - """Generate an LHAPDF data block from a callable. - - Parameters - ---------- - xfxQ2 : callable - LHAPDF like callable - mu2grid : list(float) - Q grid - pids : list(int) - Flavors list - xgrid : list(float) - x grid - - Returns - ------- - dict - PDF block - - """ - block = dict(mu2grid=mu2grid, pids=pids, xgrid=xgrid) +def generate_block( + xfxQ2: Callable, xgrid: List[float], evolgrid: List[EPoint], pids: List[int] +) -> dict: + """Generate an LHAPDF data block from a callable.""" + block: dict = dict(mu2grid=[mu2 for mu2, _ in evolgrid], pids=pids, xgrid=xgrid) data = [] for x in xgrid: - for Q2 in mu2grid: - data.append(np.array([xfxQ2(pid, x, Q2) for pid in pids])) + for mu2, _ in evolgrid: + data.append(np.array([xfxQ2(pid, x, mu2) for pid in pids])) block["data"] = np.array(data) return block diff --git a/src/ekobox/utils.py b/src/ekobox/utils.py index b7fdd995b..095e605e9 100644 --- a/src/ekobox/utils.py +++ b/src/ekobox/utils.py @@ -36,14 +36,21 @@ def ekos_product( # TODO: add a control on the theory (but before we need to implement # another kind of output which includes the theory and operator runcards) - q2match = eko_ini.approx(eko_fin.operator_card.mu0**2, rtol=rtol, atol=atol) - if q2match is None: + ep_match = eko_ini.approx( + (eko_fin.operator_card.mu0**2, eko_fin.theory_card.heavy.num_flavs_init), + rtol=rtol, + atol=atol, + ) + if ep_match is None: raise ValueError( "Initial Q2 of final eko operator does not match any final Q2 in" " the initial eko operator" ) - ope1 = eko_ini[q2match].operator.copy() - ope1_error = eko_ini[q2match].error + ope1 = eko_ini[ep_match] + assert ope1 is not None + + ope1_value = ope1.operator.copy() + ope1_error = ope1.error if ope1_error is not None: ope1_error = ope1_error.copy() @@ -57,10 +64,10 @@ def ekos_product( if q2 in eko_ini: continue - op = np.einsum(CONTRACTION, ope1, op2.operator) + op = np.einsum(CONTRACTION, ope1_value, op2.operator) if ope1_error is not None and op2.error is not None: - error = np.einsum(CONTRACTION, ope1, op2.error) + np.einsum( + error = np.einsum(CONTRACTION, ope1_value, op2.error) + np.einsum( CONTRACTION, ope1_error, op2.operator ) else: diff --git a/src/ekomark/benchmark/runner.py b/src/ekomark/benchmark/runner.py index 7d9f9340a..9be2d5261 100644 --- a/src/ekomark/benchmark/runner.py +++ b/src/ekomark/benchmark/runner.py @@ -237,7 +237,7 @@ def log(self, theory, _, pdf, me, ext): ): rotate_to_evolution[3, :] = [0, 0, 0, 0, 0, -1, -1, 0, 1, 1, 0, 0, 0, 0] - with EKO.open(me) as eko: + with EKO.read(me) as eko: pdf_grid = apply.apply_pdf_flavor( eko, pdf, diff --git a/src/ekomark/plots.py b/src/ekomark/plots.py index eee5ef79b..92a155de3 100644 --- a/src/ekomark/plots.py +++ b/src/ekomark/plots.py @@ -141,24 +141,24 @@ def plot_dist(x, y, yerr, yref, title=None, oMx_min=1e-2, oMx_max=0.5): def plot_operator(var_name, op, op_err, log_operator=True, abs_operator=True): - """ - Plot a single operator as heat map. + """Plot a single operator as heat map. Parameters ---------- - ret : dict - DGLAP result - var_name : str - operator name - log_operator : bool, optional - plot on logarithmic scale - abs_operator : bool, optional - plot absolute value (instead of true value) + ret : dict + DGLAP result + var_name : str + operator name + log_operator : bool, optional + plot on logarithmic scale + abs_operator : bool, optional + plot absolute value (instead of true value) Returns ------- - fig : matplotlib.pyplot.figure - created figure + fig : matplotlib.pyplot.figure + created figure + """ # get # op = ret["operators"][var_name] @@ -210,25 +210,25 @@ def plot_operator(var_name, op, op_err, log_operator=True, abs_operator=True): def save_operators_to_pdf(path, theory, ops, me: EKO, skip_pdfs, change_lab=False): - """ - Output all operator heatmaps to PDF. + """Output all operator heatmaps to PDF. Parameters ---------- - path : str - path to the plot - theory : dict - theory card - ops : dict - operator card - me : eko.output.Output - DGLAP result - skip_pdfs : list - PDF to skip - change_lab : bool - set whether to rename the labels + path : str + path to the plot + theory : dict + theory card + ops : dict + operator card + me : eko.output.Output + DGLAP result + skip_pdfs : list + PDF to skip + change_lab : bool + set whether to rename the labels + """ - ops_names = list(me.rotations.targetpids) + ops_names = list(me.bases.targetpids) if np.allclose(ops_names, br.rotate_flavor_to_evolution): ops_names = br.evol_basis_pids else: diff --git a/src/ekore/anomalous_dimensions/unpolarized/space_like/aem1.py b/src/ekore/anomalous_dimensions/unpolarized/space_like/aem1.py index 33cfc751f..9163d722c 100644 --- a/src/ekore/anomalous_dimensions/unpolarized/space_like/aem1.py +++ b/src/ekore/anomalous_dimensions/unpolarized/space_like/aem1.py @@ -5,7 +5,6 @@ from eko import constants -from ....harmonics import cache as c from . import as1 diff --git a/src/ekore/harmonics/cache.py b/src/ekore/harmonics/cache.py index fb297fe4a..1bc8f54fe 100644 --- a/src/ekore/harmonics/cache.py +++ b/src/ekore/harmonics/cache.py @@ -108,7 +108,7 @@ def get( """ # Maybe improve error - if key < 0 or key > len(cache): + if key < 0 or key >= len(cache): raise RuntimeError # load the thing s = cache[key] @@ -127,10 +127,8 @@ def get( cache = update(w1.S1, S1mh, cache, (n - 1) / 2) s = recursive_harmonic_sum(cache[S1mh], (n - 1) / 2, 1, 1) elif key == Sm1: - cache = update(w1.S1, S1, cache, n) - cache = update(w1.S1, S1mh, cache, (n - 1) / 2) - cache = update(w1.S1, S1h, cache, n / 2) - s = w1.Sm1(n, cache[S1], cache[S1mh], cache[S1h], is_singlet) + cache = update_Sm1(cache, n, is_singlet) + s = cache[key] elif key == S1p2: cache = update(w1.S1, S1, cache, n) s = recursive_harmonic_sum(cache[S1], n, 2, 1) @@ -145,10 +143,8 @@ def get( cache = update(w2.S2, S2mh, cache, (n - 1) / 2) s = recursive_harmonic_sum(cache[S2mh], (n - 1) / 2, 1, 2) elif key == Sm2: - cache = update(w2.S2, S2, cache, n) - cache = update(w2.S2, S2mh, cache, (n - 1) / 2) - cache = update(w2.S2, S2h, cache, n / 2) - s = w2.Sm2(n, cache[S2], cache[S2mh], cache[S2h], is_singlet) + cache = update_Sm2(cache, n, is_singlet) + s = cache[key] # weight 3 elif key == S3: s = w3.S3(n) diff --git a/src/ekore/harmonics/log_functions.py b/src/ekore/harmonics/log_functions.py index 6f3202a52..c02aa4a2a 100644 --- a/src/ekore/harmonics/log_functions.py +++ b/src/ekore/harmonics/log_functions.py @@ -8,8 +8,6 @@ """ import numba as nb -from eko.constants import zeta3 - @nb.njit(cache=True) def lm11m1(n, S1): diff --git a/tests/conftest.py b/tests/conftest.py index ce91ee8cb..2e54dc171 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -11,6 +11,7 @@ from eko import interpolation from eko.io.runcards import OperatorCard, TheoryCard from eko.io.struct import EKO, Operator +from eko.io.types import EvolutionPoint from ekobox import cards @@ -80,7 +81,7 @@ def __init__(self, theory: TheoryCard, operator: OperatorCard, path: os.PathLike self.cache: Optional[EKO] = None @staticmethod - def _operators(mugrid: Iterable[float], shape: Tuple[int, int]): + def _operators(mugrid: Iterable[EvolutionPoint], shape: Tuple[int, int]): ops = {} for mu in mugrid: ops[mu] = Operator(np.random.rand(*shape, *shape)) @@ -94,7 +95,7 @@ def _create(self): lx = len(self.operator.xgrid) lpids = len(self.operator.pids) for mu2, op in self._operators( - mugrid=self.operator.mu2grid, shape=(lpids, lx) + mugrid=self.operator.evolgrid, shape=(lpids, lx) ).items(): self.cache[mu2] = op diff --git a/tests/eko/evolution_operator/test_grid.py b/tests/eko/evolution_operator/test_grid.py index a65bc21e4..804388db6 100644 --- a/tests/eko/evolution_operator/test_grid.py +++ b/tests/eko/evolution_operator/test_grid.py @@ -12,6 +12,7 @@ import pytest import eko.io.types +from eko import couplings from eko.quantities.couplings import CouplingEvolutionMethod from eko.runner import legacy @@ -22,12 +23,12 @@ class FakeEM(enum.Enum): BLUB = "blub" monkeypatch.setattr( - legacy, + couplings, "couplings_mod_ev", lambda *args: CouplingEvolutionMethod.EXACT, ) operator_card.configs.evolution_method = FakeEM.BLUB - with pytest.raises(ValueError, match="blub"): + with pytest.raises(ValueError, match="BLUB"): legacy.Runner(theory_ffns(3), operator_card, path=tmp_path / "eko.tar") # check LO @@ -43,33 +44,26 @@ def test_compute_mu2grid(theory_ffns, operator_card, tmp_path): opgrid = legacy.Runner( theory_ffns(3), operator_card, path=tmp_path / "eko.tar" ).op_grid - # q2 has not be precomputed - but should work nevertheless - opgrid.compute(3) - # we can also pass a single number opg = opgrid.compute() assert len(opg) == len(mugrid) assert all(k in op for k in ["operator", "error"] for op in opg.values()) - opg = opgrid.compute(3) - assert len(opg) == 1 - assert all(k in op for k in ["operator", "error"] for op in opg.values()) def test_grid_computation_VFNS(theory_card, operator_card, tmp_path): """Checks that the grid can be computed""" + mugrid = [(3, 4), (5, 5), (5, 4)] + operator_card.mugrid = mugrid opgrid = legacy.Runner( theory_card, operator_card, path=tmp_path / "eko.tar" ).op_grid - qgrid_check = [3, 5, 200**2] - operators = opgrid.compute(qgrid_check) - assert len(operators) == len(qgrid_check) + operators = opgrid.compute() + assert len(operators) == len(mugrid) def test_mod_expanded(theory_card, theory_ffns, operator_card, tmp_path: pathlib.Path): + mugrid = [(3, 4)] + operator_card.mugrid = mugrid operator_card.configs.scvar_method = eko.io.types.ScaleVariationsMethod.EXPANDED - theory_update = { - "order": (1, 0), - "ModSV": "expanded", - } epsilon = 1e-1 path = tmp_path / "eko.tar" for is_ffns, nf0 in zip([False, True], [5, 3]): @@ -81,11 +75,11 @@ def test_mod_expanded(theory_card, theory_ffns, operator_card, tmp_path: pathlib theory.heavy.num_flavs_init = nf0 path.unlink(missing_ok=True) opgrid = legacy.Runner(theory, operator_card, path=path).op_grid - opg = opgrid.compute(3) - theory_update["XIF"] = 1.0 + epsilon + opg = opgrid.compute() + theory.xif = 1.0 + epsilon path.unlink(missing_ok=True) sv_opgrid = legacy.Runner(theory, operator_card, path=path).op_grid - sv_opg = sv_opgrid.compute(3) + sv_opg = sv_opgrid.compute() np.testing.assert_allclose( - opg[3]["operator"], sv_opg[3]["operator"], atol=2.5 * epsilon + opg[(9, 4)]["operator"], sv_opg[(9, 4)]["operator"], atol=2.5 * epsilon ) diff --git a/tests/eko/evolution_operator/test_init.py b/tests/eko/evolution_operator/test_init.py index e0a65b33a..642de5bb7 100644 --- a/tests/eko/evolution_operator/test_init.py +++ b/tests/eko/evolution_operator/test_init.py @@ -14,6 +14,7 @@ from eko.kernels import non_singlet as ns from eko.kernels import non_singlet_qed as qed_ns from eko.kernels import singlet as s +from eko.matchings import Segment def test_quad_ker_errors(): @@ -59,6 +60,7 @@ def test_quad_ker(monkeypatch): monkeypatch.setattr(s, "dispatcher", lambda *args: np.identity(2)) params = [ ((1, 0), br.non_singlet_pids_map["ns+"], 0, "", 0.0, 0.0), + ((1, 0), br.non_singlet_pids_map["ns+"], 0, "", 0.123, 1.0), ((3, 1), br.non_singlet_pids_map["ns+u"], 0, "", 0.0, 0.0), ((1, 0), 100, 100, "", 0.123, 1.0), ((1, 0), 100, 21, "", 0.0, 0.0), @@ -190,7 +192,7 @@ def test_quad_ker(monkeypatch): class FakeCoupling: def __init__(self): self.alphaem_running = None - self.q2_ref = 0.0 + self.mu2_ref = 10.0 def a(self, scale_to=None, nf_to=None): return (0.1, 0.01) @@ -214,9 +216,7 @@ def test_labels(self): ev_op_iterations=1, ), fake_managers, - 3, - 1, - 2, + Segment(1, 2, 3), ) assert sorted(o.labels) == sorted(br.full_labels) o = Operator( @@ -229,9 +229,7 @@ def test_labels(self): ev_op_iterations=1, ), fake_managers, - 3, - 1, - 2, + Segment(1, 2, 3), ) assert sorted(o.labels) == [] @@ -246,9 +244,7 @@ def test_labels_qed(self): ev_op_iterations=1, ), fake_managers, - 3, - 1, - 2, + Segment(1, 2, 3), ) assert sorted(o.labels) == sorted(br.full_unified_labels) o = Operator( @@ -261,9 +257,7 @@ def test_labels_qed(self): ev_op_iterations=1, ), fake_managers, - 3, - 1, - 2, + Segment(1, 2, 3), ) assert sorted(o.labels) == [] @@ -282,9 +276,7 @@ def test_n_pools(self): ev_op_iterations=1, ), fake_managers, - 3, - 1, - 10, + Segment(1, 10, 3), ) assert o.n_pools == os.cpu_count() - excluded_cores @@ -296,7 +288,7 @@ def test_exponentiated(self, theory_ffns, operator_card, tmp_path): r = eko.runner.legacy.Runner(tcard, ocard, path=tmp_path / "eko.tar") g = r.op_grid # setup objs - o = Operator(g.config, g.managers, 3, 2.0, 10.0) + o = Operator(g.config, g.managers, Segment(2.0, 10.0, 3)) o.compute() self.check_lo(o) @@ -307,7 +299,7 @@ def test_compute_parallel(self, monkeypatch, theory_ffns, operator_card, tmp_pat r = eko.runner.legacy.Runner(tcard, ocard, path=tmp_path / "eko.tar") g = r.op_grid # setup objs - o = Operator(g.config, g.managers, 3, 2.0, 10.0) + o = Operator(g.config, g.managers, Segment(2.0, 10.0, 3)) # fake quad monkeypatch.setattr( scipy.integrate, "quad", lambda *args, **kwargs: np.random.rand(2) @@ -337,7 +329,7 @@ def test_compute_no_skip_sv( r = eko.runner.legacy.Runner(tcard, ocard, path=tmp_path / "eko.tar") g = r.op_grid # setup objs - o = Operator(g.config, g.managers, 3, 2.0, 2.0) + o = Operator(g.config, g.managers, Segment(2.0, 2.0, 3)) # fake quad v = 0.1234 monkeypatch.setattr( @@ -357,7 +349,7 @@ def test_compute(self, monkeypatch, theory_ffns, operator_card, tmp_path): ocard: OperatorCard = operator_card r = eko.runner.legacy.Runner(tcard, ocard, path=tmp_path / "eko.tar") g = r.op_grid - o = Operator(g.config, g.managers, 3, 2.0, 10.0) + o = Operator(g.config, g.managers, Segment(2.0, 10.0, 3)) # fake quad monkeypatch.setattr( scipy.integrate, "quad", lambda *args, **kwargs: np.random.rand(2) @@ -379,7 +371,7 @@ def test_compute(self, monkeypatch, theory_ffns, operator_card, tmp_path): # unity operators for n in range(1, 3 + 1): - o1 = Operator(g.config, g.managers, 3, 2.0, 2.0) + o1 = Operator(g.config, g.managers, Segment(2.0, 2.0, 3)) o1.config["order"] = (n, 0) o1.compute() for k in br.non_singlet_labels: @@ -393,7 +385,7 @@ def test_compute(self, monkeypatch, theory_ffns, operator_card, tmp_path): for n in range(1, 3 + 1): for qed in range(1, 2 + 1): g.config["order"] = (n, qed) - o1 = Operator(g.config, g.managers, 3, 2.0, 2.0) + o1 = Operator(g.config, g.managers, Segment(2.0, 2.0, 3)) # o1.config["order"] = (n, qed) o1.compute() for k in br.non_singlet_unified_labels: diff --git a/tests/eko/evolution_operator/test_ome.py b/tests/eko/evolution_operator/test_ome.py index 23ab65e9f..d7b1aa1ad 100644 --- a/tests/eko/evolution_operator/test_ome.py +++ b/tests/eko/evolution_operator/test_ome.py @@ -101,13 +101,75 @@ def test_quad_ker_errors(): # Test OME integration def test_quad_ker(monkeypatch): monkeypatch.setattr( - mellin, "Talbot_path", lambda *args: 2 + mellin, "Talbot_path", lambda *args: 3.0 ) # N=2 is a safe evaluation point monkeypatch.setattr( mellin, "Talbot_jac", lambda *args: complex(0, np.pi) ) # negate mellin prefactor monkeypatch.setattr(interpolation, "log_evaluate_Nx", lambda *args: 1) monkeypatch.setattr(interpolation, "evaluate_Nx", lambda *args: 1) + for is_log in [True, False]: + for order, p, t in [((3, 0), False, False), ((2, 0), False, True)]: + for sv_mode in [sv.Modes.expanded, sv.Modes.exponentiated]: + res_ns = quad_ker( + u=0, + order=order, + mode0=200, + mode1=200, + is_log=is_log, + logx=0.123, + areas=np.zeros(3), + backward_method=None, + a_s=0.0, + nf=3, + L=0.0, + sv_mode=sv_mode, + Lsv=0.0, + is_msbar=False, + is_polarized=p, + is_time_like=t, + ) + np.testing.assert_allclose(res_ns, 1.0) + res_s = quad_ker( + u=0, + order=order, + mode0=100, + mode1=100, + is_log=is_log, + logx=0.123, + areas=np.zeros(3), + backward_method=None, + a_s=0.0, + nf=3, + L=0.0, + sv_mode=sv_mode, + Lsv=0.0, + is_msbar=False, + is_polarized=p, + is_time_like=t, + ) + np.testing.assert_allclose(res_s, 1.0) + res_s = quad_ker( + u=0, + order=order, + mode0=100, + mode1=21, + is_log=is_log, + logx=0.0, + areas=np.zeros(3), + backward_method=None, + a_s=0.0, + nf=3, + L=0.0, + sv_mode=sv_mode, + Lsv=0.0, + is_msbar=False, + is_polarized=p, + is_time_like=t, + ) + np.testing.assert_allclose(res_s, 0.0) + + # test expanded intrisic inverse kernels zeros = np.zeros((2, 2)) monkeypatch.setattr( "ekore.operator_matrix_elements.unpolarized.space_like.A_non_singlet", @@ -118,91 +180,31 @@ def test_quad_ker(monkeypatch): "ekore.operator_matrix_elements.unpolarized.space_like.A_singlet", lambda *args: np.array([zeros, zeros, zeros]), ) - for is_log in [True, False]: - for order, p, t in [((3, 0), False, False), ((2, 0), False, True)]: + labels = [(200, 200), *br.singlet_labels] + for label in labels: + for sv_mode in [sv.Modes.expanded, sv.Modes.exponentiated]: res_ns = quad_ker( u=0, - order=order, - mode0=200, - mode1=200, - is_log=is_log, - logx=0.123, - areas=np.zeros(3), - backward_method=None, - a_s=0.0, - nf=3, - L=0.0, - sv_mode=sv.Modes.expanded, - Lsv=0.0, - is_msbar=False, - is_polarized=p, - is_time_like=t, - ) - np.testing.assert_allclose(res_ns, 1.0) - res_s = quad_ker( - u=0, - order=order, - mode0=100, - mode1=100, - is_log=is_log, + order=(3, 0), + mode0=label[0], + mode1=label[1], + is_log=True, logx=0.123, areas=np.zeros(3), - backward_method=None, - a_s=0.0, - nf=3, - L=0.0, - sv_mode=sv.Modes.expanded, - Lsv=0.0, - is_msbar=False, - is_polarized=p, - is_time_like=t, - ) - np.testing.assert_allclose(res_s, 1.0) - res_s = quad_ker( - u=0, - order=order, - mode0=100, - mode1=21, - is_log=is_log, - logx=0.0, - areas=np.zeros(3), - backward_method=None, + backward_method=InversionMethod.EXPANDED, a_s=0.0, nf=3, L=0.0, - sv_mode=sv.Modes.expanded, + sv_mode=sv_mode, Lsv=0.0, is_msbar=False, - is_polarized=p, - is_time_like=t, + is_polarized=False, + is_time_like=False, ) - np.testing.assert_allclose(res_s, 0.0) - - # test expanded intrisic inverse kernels - labels = [(200, 200), *br.singlet_labels] - for label in labels: - res_ns = quad_ker( - u=0, - order=(3, 0), - mode0=label[0], - mode1=label[1], - is_log=True, - logx=0.123, - areas=np.zeros(3), - backward_method=InversionMethod.EXPANDED, - a_s=0.0, - nf=3, - L=0.0, - sv_mode=sv.Modes.expanded, - Lsv=0.0, - is_msbar=False, - is_polarized=False, - is_time_like=False, - ) - if label[-1] == label[-2]: - np.testing.assert_allclose(res_ns, 1.0) - else: - np.testing.assert_allclose(res_ns, 0.0) + if label[-1] == label[-2]: + np.testing.assert_allclose(res_ns, 1.0) + else: + np.testing.assert_allclose(res_ns, 0.0) # test exact intrinsic inverse kernel labels.extend( diff --git a/tests/eko/io/test_access.py b/tests/eko/io/test_access.py new file mode 100644 index 000000000..86d24bf1f --- /dev/null +++ b/tests/eko/io/test_access.py @@ -0,0 +1,31 @@ +import pytest + +from eko.io import access + + +def test_writeable(tmp_path): + c = access.AccessConfigs(tmp_path, False, True) + assert c.read + c.assert_open() + assert c.write + c.assert_writeable() + + +def test_readable(tmp_path): + c = access.AccessConfigs(tmp_path, True, True) + assert c.read + c.assert_open() + assert not c.write + with pytest.raises(access.ReadOnlyOperator): + c.assert_writeable() + + +def test_closed(tmp_path): + for ro in [True, False]: + c = access.AccessConfigs(tmp_path, ro, False) + assert not c.read + with pytest.raises(access.ClosedOperator): + c.assert_open() + assert not c.write + with pytest.raises(access.ClosedOperator): + c.assert_writeable() diff --git a/tests/eko/io/test_bases.py b/tests/eko/io/test_bases.py new file mode 100644 index 000000000..a0165243f --- /dev/null +++ b/tests/eko/io/test_bases.py @@ -0,0 +1,83 @@ +from dataclasses import fields + +import numpy as np + +from eko import basis_rotation as br +from eko import interpolation +from eko.io.bases import Bases + + +class TestBases: + XGRID_TEST = [1e-3, 1e-2, 1e-1, 1.0] + + def test_serialization(self): + rot = Bases(interpolation.XGrid(self.XGRID_TEST)) + + d = rot.raw + rot1 = rot.from_dict(d) + + for f in fields(Bases): + assert getattr(rot, f.name) == getattr(rot1, f.name) + + assert d["targetgrid"] is None + assert "_targetgrid" not in d + + def test_pids(self): + rot = Bases(interpolation.XGrid(self.XGRID_TEST)) + + # no check on correctness of value set + rot.inputpids = [0, 1] + # but the internal grid is unmodified + assert len(rot.pids) == 14 + # and fallback implemented for unset external bases + assert np.all(rot.targetpids == rot.pids) + + def test_grids(self): + rot = Bases(interpolation.XGrid(self.XGRID_TEST)) + + # no check on correctness of value set + rot.inputgrid = interpolation.XGrid([0.1, 1]) + # but the internal grid is unmodified + assert len(rot.xgrid) == len(self.XGRID_TEST) + # and fallback implemented for unset external grids + assert np.all(rot.targetgrid == rot.xgrid) + + def test_fallback(self): + xg = interpolation.XGrid([0.1, 1.0]) + r = Bases(xgrid=xg) + np.testing.assert_allclose(r.targetpids, r.pids) + np.testing.assert_allclose(r.inputpids, r.pids) + assert r.xgrid == xg + assert r.targetgrid == xg + assert r.inputgrid == xg + + def test_overwrite(self): + tpids = np.array([3, 4] + list(br.flavor_basis_pids[2:])) + ipids = np.array([5, 6] + list(br.flavor_basis_pids[2:])) + xg = interpolation.XGrid([0.1, 1.0]) + txg = interpolation.XGrid([0.2, 1.0]) + ixg = interpolation.XGrid([0.3, 1.0]) + r = Bases( + xgrid=xg, + _targetgrid=txg, + _inputgrid=ixg, + _targetpids=tpids, + _inputpids=ipids, + ) + np.testing.assert_allclose(r.targetpids, tpids) + np.testing.assert_allclose(r.inputpids, ipids) + assert r.xgrid == xg + assert r.targetgrid == txg + assert r.inputgrid == ixg + + def test_init(self): + xg = interpolation.XGrid([0.1, 1.0]) + txg = np.array([0.2, 1.0]) + ixg = {"grid": [0.3, 1.0], "log": True} + r = Bases(xgrid=xg, _targetgrid=txg, _inputgrid=ixg) + assert isinstance(r.xgrid, interpolation.XGrid) + assert isinstance(r.targetgrid, interpolation.XGrid) + assert isinstance(r.inputgrid, interpolation.XGrid) + assert r.xgrid == xg + assert r.targetgrid == interpolation.XGrid(txg) + assert r.inputgrid == interpolation.XGrid.load(ixg) diff --git a/tests/eko/io/test_init.py b/tests/eko/io/test_init.py index d73d7d97e..e5ca0e139 100644 --- a/tests/eko/io/test_init.py +++ b/tests/eko/io/test_init.py @@ -1,23 +1,9 @@ import pathlib -import numpy as np import pytest -import eko from eko import EKO -from eko import basis_rotation as br -from eko import interpolation -from eko.io import legacy, manipulate, runcards -from ekobox.mock import eko_identity -from tests.conftest import EKOFactory - - -def chk_keys(a, b): - """Check all keys are preserved""" - assert sorted(a.keys()) == sorted(b.keys()) - for key, value in a.items(): - if isinstance(value, dict): - assert sorted(value.keys()) == sorted(b[key].keys()) +from eko.io import legacy class TestLegacy: @@ -27,213 +13,3 @@ def test_load_tar(self, out_v0, tmp_path: pathlib.Path): legacy.load_tar(out_v0, oppath) with EKO.read(oppath) as eko: assert eko.metadata.data_version == 0 - - -class TestManipulate: - def test_xgrid_reshape(self, eko_factory: EKOFactory, tmp_path: pathlib.Path): - # create object - muout = 10.0 - mu2out = muout**2 - xg = interpolation.XGrid(np.geomspace(1e-5, 1.0, 21)) - eko_factory.operator.mugrid = [(muout, 5)] - eko_factory.operator.xgrid = xg - o1 = eko_factory.get() - lpids = 2 - o1[mu2out] = eko.io.Operator( - operator=eko_identity([1, lpids, len(xg), lpids, len(xg)])[0] - ) - xgp = interpolation.XGrid(np.geomspace(1e-5, 1.0, 11)) - # only target - otpath = tmp_path / "ot.tar" - o1.deepcopy(otpath) - with EKO.edit(otpath) as ot: - manipulate.xgrid_reshape(ot, xgp) - chk_keys(o1.raw, ot.raw) - assert ot[mu2out].operator.shape == (lpids, len(xgp), lpids, len(xg)) - ottpath = tmp_path / "ott.tar" - o1.deepcopy(ottpath) - with EKO.edit(ottpath) as ott: - with pytest.warns(Warning): - manipulate.xgrid_reshape(ott, xg) - chk_keys(o1.raw, ott.raw) - np.testing.assert_allclose(ott[mu2out].operator, o1[mu2out].operator) - - # only input - oipath = tmp_path / "oi.tar" - o1.deepcopy(oipath) - with EKO.edit(oipath) as oi: - manipulate.xgrid_reshape(oi, inputgrid=xgp) - assert oi[mu2out].operator.shape == (lpids, len(xg), lpids, len(xgp)) - chk_keys(o1.raw, oi.raw) - oiipath = tmp_path / "oii.tar" - o1.deepcopy(oiipath) - with EKO.edit(oiipath) as oii: - with pytest.warns(Warning): - manipulate.xgrid_reshape(oii, inputgrid=xg) - chk_keys(o1.raw, oii.raw) - np.testing.assert_allclose(oii[mu2out].operator, o1[mu2out].operator) - - # both - oitpath = tmp_path / "oit.tar" - o1.deepcopy(oitpath) - with EKO.edit(oitpath) as oit: - manipulate.xgrid_reshape(oit, xgp, xgp) - chk_keys(o1.raw, oit.raw) - op = eko_identity([1, 2, len(xgp), 2, len(xgp)]) - np.testing.assert_allclose(oit[mu2out].operator, op[0], atol=1e-10) - - # error - with pytest.raises(ValueError): - manipulate.xgrid_reshape(o1) - - def test_reshape_io(self, eko_factory: EKOFactory, tmp_path): - eko_factory.path = tmp_path / "eko.tar" - eko_factory.operator.configs.interpolation_polynomial_degree = 1 - # create object - o1 = eko_factory.get() - lpids = len(o1.rotations.pids) - path_copy = tmp_path / "eko_copy.tar" - o1.deepcopy(path_copy) - newxgrid = interpolation.XGrid([0.1, 1.0]) - inputpids = np.eye(lpids) - inputpids[:2, :2] = np.array([[1, -1], [1, 1]]) - with EKO.edit(path_copy) as o2: - manipulate.xgrid_reshape(o2, newxgrid, newxgrid) - manipulate.flavor_reshape(o2, inputpids=inputpids) - # reload - with EKO.read(path_copy) as o3: - chk_keys(o1.raw, o3.raw) - np.testing.assert_allclose(o3.rotations.inputgrid.raw, newxgrid.raw) - np.testing.assert_allclose(o3.rotations.targetgrid.raw, newxgrid.raw) - # since we use a general rotation, the inputpids are erased, - # leaving just as many zeros as PIDs, as placeholders for missing - # values - np.testing.assert_allclose( - o3.rotations.inputpids, [0] * len(o3.rotations.pids) - ) - # these has to be unchanged - np.testing.assert_allclose(o3.rotations.targetpids, o3.rotations.pids) - - def test_flavor_reshape(self, eko_factory: EKOFactory, tmp_path: pathlib.Path): - # create object - xg = interpolation.XGrid(np.geomspace(1e-5, 1.0, 21)) - muout = 10.0 - mu2out = muout**2 - eko_factory.operator.xgrid = xg - eko_factory.operator.mugrid = [(muout, 5)] - o1 = eko_factory.get() - lpids = len(o1.rotations.pids) - lx = len(xg) - o1[mu2out] = eko.io.Operator( - operator=eko_identity([1, lpids, lx, lpids, lx])[0], - error=None, - ) - # only target - target_r = np.eye(lpids) - target_r[:2, :2] = np.array([[1, -1], [1, 1]]) - tpath = tmp_path / "ot.tar" - ttpath = tmp_path / "ott.tar" - o1.deepcopy(tpath) - with EKO.edit(tpath) as ot: - manipulate.flavor_reshape(ot, target_r) - chk_keys(o1.raw, ot.raw) - assert ot[mu2out].operator.shape == (lpids, len(xg), lpids, len(xg)) - ot.deepcopy(ttpath) - with EKO.edit(ttpath) as ott: - manipulate.flavor_reshape(ott, np.linalg.inv(target_r)) - np.testing.assert_allclose(ott[mu2out].operator, o1[mu2out].operator) - with pytest.warns(Warning): - manipulate.flavor_reshape(ott, np.eye(lpids)) - chk_keys(o1.raw, ott.raw) - np.testing.assert_allclose(ott[mu2out].operator, o1[mu2out].operator) - - # only input - input_r = np.eye(lpids) - input_r[:2, :2] = np.array([[1, -1], [1, 1]]) - ipath = tmp_path / "oi.tar" - iipath = tmp_path / "oii.tar" - o1.deepcopy(ipath) - with EKO.edit(ipath) as oi: - manipulate.flavor_reshape(oi, inputpids=input_r) - chk_keys(o1.raw, oi.raw) - assert oi[mu2out].operator.shape == (lpids, len(xg), lpids, len(xg)) - oi.deepcopy(iipath) - with EKO.edit(iipath) as oii: - manipulate.flavor_reshape(oii, inputpids=np.linalg.inv(input_r)) - np.testing.assert_allclose(oii[mu2out].operator, o1[mu2out].operator) - with pytest.warns(Warning): - manipulate.flavor_reshape(oii, inputpids=np.eye(lpids)) - chk_keys(o1.raw, oii.raw) - np.testing.assert_allclose(oii[mu2out].operator, o1[mu2out].operator) - - # both - itpath = tmp_path / "oit.tar" - o1.deepcopy(itpath) - with EKO.edit(itpath) as oit: - manipulate.flavor_reshape(oit, target_r, input_r) - chk_keys(o1.raw, oit.raw) - op = eko_identity([1, lpids, len(xg), lpids, len(xg)]).copy() - np.testing.assert_allclose(oit[mu2out].operator, op[0], atol=1e-10) - # error - fpath = tmp_path / "fail.tar" - o1.deepcopy(fpath) - with pytest.raises(ValueError): - with EKO.edit(fpath) as of: - manipulate.flavor_reshape(of) - - def test_to_evol(self, eko_factory: EKOFactory, tmp_path): - xgrid = interpolation.XGrid([0.5, 1.0]) - mu_out = 2.0 - mu2_out = mu_out**2 - eko_factory.operator.mu0 = float(np.sqrt(1.0)) - eko_factory.operator.mugrid = [(mu_out, 4)] - eko_factory.operator.xgrid = xgrid - eko_factory.operator.configs.interpolation_polynomial_degree = 1 - eko_factory.operator.configs.interpolation_is_log = False - eko_factory.operator.configs.ev_op_max_order = (2, 0) - eko_factory.operator.configs.ev_op_iterations = 1 - eko_factory.operator.configs.inversion_method = runcards.InversionMethod.EXACT - o00 = eko_factory.get() - o01_path = tmp_path / "o01.tar" - o00.deepcopy(o01_path) - with EKO.edit(o01_path) as o01: - manipulate.to_evol(o01) - o10_path = tmp_path / "o10.tar" - o00.deepcopy(o10_path) - with EKO.edit(o10_path) as o10: - manipulate.to_evol(o10, False, True) - o11_path = tmp_path / "o11.tar" - o00.deepcopy(o11_path) - with EKO.edit(o11_path) as o11: - manipulate.to_evol(o11, True, True) - chk_keys(o00.raw, o11.raw) - - with EKO.edit(o01_path) as o01: - with EKO.edit(o10_path) as o10: - with EKO.read(o11_path) as o11: - # check the input rotated one - np.testing.assert_allclose( - o01.rotations.inputpids, br.rotate_flavor_to_evolution - ) - np.testing.assert_allclose( - o01.rotations.targetpids, br.flavor_basis_pids - ) - # rotate also target - manipulate.to_evol(o01, False, True) - np.testing.assert_allclose( - o01[mu2_out].operator, o11[mu2_out].operator - ) - chk_keys(o00.raw, o01.raw) - # check the target rotated one - np.testing.assert_allclose( - o10.rotations.inputpids, br.flavor_basis_pids - ) - np.testing.assert_allclose( - o10.rotations.targetpids, br.rotate_flavor_to_evolution - ) - # rotate also input - manipulate.to_evol(o10) - np.testing.assert_allclose( - o10[mu2_out].operator, o11[mu2_out].operator - ) - chk_keys(o00.raw, o10.raw) diff --git a/tests/eko/io/test_inventory.py b/tests/eko/io/test_inventory.py new file mode 100644 index 000000000..38c5ae7b5 --- /dev/null +++ b/tests/eko/io/test_inventory.py @@ -0,0 +1,104 @@ +from dataclasses import dataclass + +import numpy as np +import pytest + +from eko.io import access, inventory, items + + +@dataclass(frozen=True) +class FakeH(items.Header): + blub: int + + +def test_contentless(tmp_path): + acw = inventory.AccessConfigs(tmp_path, False, True) + iw = inventory.Inventory(tmp_path, acw, FakeH, contentless=True, name="Bla") + assert "Bla" in str(iw) + one = FakeH(1) + assert one not in iw + assert len(list(tmp_path.glob("*"))) == 0 + # set an element + iw[one] = "blub" + assert one in iw + assert iw[one] is None + ls = list(tmp_path.glob("*")) + assert len(iw) == 1 + assert len(ls) == 1 + assert ls[0].suffix == inventory.HEADER_EXT + # let's make a second read-only instance + acr = inventory.AccessConfigs(tmp_path, True, True) + ir = inventory.Inventory(tmp_path, acr, FakeH, contentless=True, name="noihaha") + assert "Bla" not in str(ir) + assert one not in ir + assert ir[one] is None + # deletion is empty for contentless + del iw[one] + assert one in iw + assert iw[one] is None + assert len(list(tmp_path.glob("*"))) == 1 + iw.empty() + assert len(list(tmp_path.glob("*"))) == 1 + # non-existant + two = FakeH(2) + assert two not in iw + with pytest.raises(inventory.LookupError): + iw[two] + + +def test_contentfull(tmp_path): + def o(): + return items.Operator(np.random.rand(2, 2, 2, 2)) + + acw = inventory.AccessConfigs(tmp_path, False, True) + iw = inventory.Inventory(tmp_path, acw, FakeH, name="Bla") + one = FakeH(1) + assert one not in iw + assert len(list(tmp_path.glob("*"))) == 0 + # set an element + o_one = o() + iw[one] = o_one + assert one in iw + assert iw[one] is not None + assert len(iw) == 1 + assert len(list(tmp_path.glob("*" + inventory.HEADER_EXT))) == 1 + assert len(list(tmp_path.glob("*" + inventory.COMPRESSED_EXT))) == 1 + # let's make a second read-only instance + acr = inventory.AccessConfigs(tmp_path, True, True) + ir = inventory.Inventory(tmp_path, acr, FakeH, name="noihaha") + assert "Bla" not in str(ir) + assert one not in ir + # if we sync we know the header is there + ~ir + assert one in ir + assert len(ir) == 1 + # do an actual read to load + np.testing.assert_allclose(ir[one].operator, o_one.operator) + # but we can not write + with pytest.raises(access.ReadOnlyOperator): + ir[one] = o() + # let's overwrite the op in the writable, so they diverge + o_one_p = o() + iw[one] = o_one_p + assert len(iw) == 1 + np.testing.assert_allclose(iw.cache[one].operator, o_one_p.operator) + np.testing.assert_allclose(ir.cache[one].operator, o_one.operator) + # let's actually read again + ~ir + np.testing.assert_allclose(ir[one].operator, o_one_p.operator) + # non-existant + two = FakeH(2) + assert two not in ir + with pytest.raises(inventory.LookupError): + ir[two] + + +def test_fs(tmp_path): + acw = inventory.AccessConfigs(tmp_path, False, True) + iw = inventory.Inventory(tmp_path, acw, FakeH, contentless=True, name="Bla") + aa = "aa" + inventory.HEADER_EXT + ab = "ab" + inventory.HEADER_EXT + (tmp_path / aa).write_text("aa") + (tmp_path / ab).write_text("ab") + with pytest.raises(inventory.LookupError, match="many"): + iw.lookup("a", True) diff --git a/tests/eko/io/test_items.py b/tests/eko/io/test_items.py new file mode 100644 index 000000000..762b10e11 --- /dev/null +++ b/tests/eko/io/test_items.py @@ -0,0 +1,79 @@ +import io + +import lz4.frame +import numpy as np +import pytest + +from eko.io.items import Evolution, Matching, Operator, Target +from eko.matchings import Atlas +from eko.quantities.heavy_quarks import MatchingScales + + +def test_evolution(): + tc = Atlas(MatchingScales([1.5, 5.0, 170.0]), (1.65, 4)) + p = tc.matched_path((30, 5)) + he = Evolution.from_atlas(p[0], True) + assert he.origin == 1.65 + assert he.target == 5 + assert he.nf == 4 + assert he.cliff + assert he.as_atlas == p[0] + + +def test_matching(): + tc = Atlas(MatchingScales([1.5, 5.0, 170.0]), (1.65, 4)) + p = tc.matched_path((30, 5)) + hm = Matching.from_atlas(p[1]) + assert hm.hq == 5 + assert hm.scale == 5.0 + assert hm.as_atlas == p[1] + + +def test_target(): + ep = (10.0, 4) + ht = Target.from_ep(ep) + assert ht.scale == 10.0 + assert ht.nf == 4 + assert ht.ep == ep + + +class TestOperator: + def test_value_only(self): + v = np.random.rand(2, 2) + opv = Operator(operator=v) + assert opv.error is None + stream = io.BytesIO() + opv.save(stream) + stream.seek(0) + opv_ = Operator.load(stream) + np.testing.assert_allclose(opv.operator, opv_.operator) + np.testing.assert_allclose(v, opv_.operator) + assert opv_.error is None + + def test_value_and_error(self): + v, e = np.random.rand(2, 2, 2) + opve = Operator(operator=v, error=e) + stream = io.BytesIO() + opve.save(stream) + stream.seek(0) + opve_ = Operator.load(stream) + np.testing.assert_allclose(opve.operator, opve_.operator) + np.testing.assert_allclose(v, opve_.operator) + np.testing.assert_allclose(opve.error, opve_.error) + np.testing.assert_allclose(e, opve_.error) + + def test_load_error_is_not_lz4(self, monkeypatch): + stream = io.BytesIO() + with pytest.raises(RuntimeError, match="LZ4"): + Operator.load(stream) + + def test_load_error(self, monkeypatch): + # TODO see the other TODO at Operator.load + v, e = np.random.rand(2, 2, 2) + opve = Operator(operator=v, error=e) + stream = io.BytesIO() + opve.save(stream) + stream.seek(0) + monkeypatch.setattr(np, "load", lambda _: None) + with pytest.raises(ValueError): + Operator.load(stream) diff --git a/tests/eko/io/test_manipulate.py b/tests/eko/io/test_manipulate.py new file mode 100644 index 000000000..3431d93ce --- /dev/null +++ b/tests/eko/io/test_manipulate.py @@ -0,0 +1,255 @@ +import pathlib + +import numpy as np +import pytest + +import eko +from eko import EKO +from eko import basis_rotation as br +from eko import interpolation +from eko.io import manipulate, runcards +from ekobox.mock import eko_identity +from tests.conftest import EKOFactory + + +def chk_keys(a, b): + """Check all keys are preserved""" + assert sorted(a.keys()) == sorted(b.keys()) + for key, value in a.items(): + if isinstance(value, dict): + assert sorted(value.keys()) == sorted(b[key].keys()) + + +class TestManipulate: + def test_xgrid_reshape(self, eko_factory: EKOFactory, tmp_path: pathlib.Path): + # create object + muout = 10.0 + mu2out = muout**2 + epout = (mu2out, 5) + xg = interpolation.XGrid(np.geomspace(1e-5, 1.0, 21)) + eko_factory.operator.mugrid = [(muout, 5)] + eko_factory.operator.xgrid = xg + o1 = eko_factory.get() + lpids = 2 + o1[epout] = eko.io.Operator( + operator=eko_identity([1, lpids, len(xg), lpids, len(xg)])[0] + ) + xgp = interpolation.XGrid(np.geomspace(1e-5, 1.0, 11)) + # only target + otpath = tmp_path / "ot.tar" + o1.deepcopy(otpath) + with EKO.edit(otpath) as ot: + manipulate.xgrid_reshape(ot, xgp) + chk_keys(o1.raw, ot.raw) + assert ot[epout].operator.shape == (lpids, len(xgp), lpids, len(xg)) + ottpath = tmp_path / "ott.tar" + o1.deepcopy(ottpath) + with EKO.edit(ottpath) as ott: + with pytest.warns(Warning): + manipulate.xgrid_reshape(ott, xg) + chk_keys(o1.raw, ott.raw) + np.testing.assert_allclose(ott[epout].operator, o1[epout].operator) + + # only input + oipath = tmp_path / "oi.tar" + o1.deepcopy(oipath) + with EKO.edit(oipath) as oi: + manipulate.xgrid_reshape(oi, inputgrid=xgp) + assert oi[epout].operator.shape == (lpids, len(xg), lpids, len(xgp)) + chk_keys(o1.raw, oi.raw) + oiipath = tmp_path / "oii.tar" + o1.deepcopy(oiipath) + with EKO.edit(oiipath) as oii: + with pytest.warns(Warning): + manipulate.xgrid_reshape(oii, inputgrid=xg) + chk_keys(o1.raw, oii.raw) + np.testing.assert_allclose(oii[epout].operator, o1[epout].operator) + + # both + oitpath = tmp_path / "oit.tar" + o1.deepcopy(oitpath) + with EKO.edit(oitpath) as oit: + manipulate.xgrid_reshape(oit, xgp, xgp) + chk_keys(o1.raw, oit.raw) + op = eko_identity([1, 2, len(xgp), 2, len(xgp)]) + np.testing.assert_allclose(oit[epout].operator, op[0], atol=1e-10) + + # op error handling + ep2 = (25, 5) + o1[ep2] = eko.io.Operator( + operator=eko_identity([1, lpids, len(xg), lpids, len(xg)])[0], + error=0.1 * eko_identity([1, lpids, len(xg), lpids, len(xg)])[0], + ) + ot2path = tmp_path / "ot2.tar" + o1.deepcopy(ot2path) + with EKO.edit(ot2path) as ot2: + manipulate.xgrid_reshape(ot2, xgp) + chk_keys(o1.raw, ot2.raw) + assert ot2[ep2].operator.shape == (lpids, len(xgp), lpids, len(xg)) + assert ot2[epout].error is None + assert ot2[ep2].error is not None + + # Python error + with pytest.raises(ValueError): + manipulate.xgrid_reshape(o1) + + def test_reshape_io(self, eko_factory: EKOFactory, tmp_path): + eko_factory.path = tmp_path / "eko.tar" + eko_factory.operator.configs.interpolation_polynomial_degree = 1 + # create object + o1 = eko_factory.get() + lpids = len(o1.bases.pids) + path_copy = tmp_path / "eko_copy.tar" + o1.deepcopy(path_copy) + newxgrid = interpolation.XGrid([0.1, 1.0]) + inputpids = np.eye(lpids) + inputpids[:2, :2] = np.array([[1, -1], [1, 1]]) + with EKO.edit(path_copy) as o2: + manipulate.xgrid_reshape(o2, newxgrid, newxgrid) + manipulate.flavor_reshape(o2, inputpids=inputpids) + # reload + with EKO.read(path_copy) as o3: + chk_keys(o1.raw, o3.raw) + np.testing.assert_allclose(o3.bases.inputgrid.raw, newxgrid.raw) + np.testing.assert_allclose(o3.bases.targetgrid.raw, newxgrid.raw) + # since we use a general rotation, the inputpids are erased, + # leaving just as many zeros as PIDs, as placeholders for missing + # values + np.testing.assert_allclose(o3.bases.inputpids, [0] * len(o3.bases.pids)) + # these has to be unchanged + np.testing.assert_allclose(o3.bases.targetpids, o3.bases.pids) + + def test_flavor_reshape(self, eko_factory: EKOFactory, tmp_path: pathlib.Path): + # create object + xg = interpolation.XGrid(np.geomspace(1e-5, 1.0, 21)) + muout = 10.0 + mu2out = muout**2 + epout = (mu2out, 5) + eko_factory.operator.xgrid = xg + eko_factory.operator.mugrid = [(muout, 5)] + o1 = eko_factory.get() + lpids = len(o1.bases.pids) + lx = len(xg) + o1[epout] = eko.io.Operator( + operator=eko_identity([1, lpids, lx, lpids, lx])[0], + error=None, + ) + # only target + target_r = np.eye(lpids) + target_r[:2, :2] = np.array([[1, -1], [1, 1]]) + tpath = tmp_path / "ot.tar" + ttpath = tmp_path / "ott.tar" + o1.deepcopy(tpath) + with EKO.edit(tpath) as ot: + manipulate.flavor_reshape(ot, target_r) + chk_keys(o1.raw, ot.raw) + assert ot[epout].operator.shape == (lpids, len(xg), lpids, len(xg)) + ot.deepcopy(ttpath) + with EKO.edit(ttpath) as ott: + manipulate.flavor_reshape(ott, np.linalg.inv(target_r)) + np.testing.assert_allclose(ott[epout].operator, o1[epout].operator) + with pytest.warns(Warning): + manipulate.flavor_reshape(ott, np.eye(lpids)) + chk_keys(o1.raw, ott.raw) + np.testing.assert_allclose(ott[epout].operator, o1[epout].operator) + + # only input + input_r = np.eye(lpids) + input_r[:2, :2] = np.array([[1, -1], [1, 1]]) + ipath = tmp_path / "oi.tar" + iipath = tmp_path / "oii.tar" + o1.deepcopy(ipath) + with EKO.edit(ipath) as oi: + manipulate.flavor_reshape(oi, inputpids=input_r) + chk_keys(o1.raw, oi.raw) + assert oi[epout].operator.shape == (lpids, len(xg), lpids, len(xg)) + oi.deepcopy(iipath) + with EKO.edit(iipath) as oii: + manipulate.flavor_reshape(oii, inputpids=np.linalg.inv(input_r)) + np.testing.assert_allclose(oii[epout].operator, o1[epout].operator) + with pytest.warns(Warning): + manipulate.flavor_reshape(oii, inputpids=np.eye(lpids)) + chk_keys(o1.raw, oii.raw) + np.testing.assert_allclose(oii[epout].operator, o1[epout].operator) + + # both + itpath = tmp_path / "oit.tar" + o1.deepcopy(itpath) + with EKO.edit(itpath) as oit: + manipulate.flavor_reshape(oit, target_r, input_r) + chk_keys(o1.raw, oit.raw) + op = eko_identity([1, lpids, len(xg), lpids, len(xg)]).copy() + np.testing.assert_allclose(oit[epout].operator, op[0], atol=1e-10) + # error + fpath = tmp_path / "fail.tar" + o1.deepcopy(fpath) + with pytest.raises(ValueError): + with EKO.edit(fpath) as of: + manipulate.flavor_reshape(of) + + def test_to_evol(self, eko_factory: EKOFactory, tmp_path): + self._test_to_all_evol( + eko_factory, + tmp_path, + manipulate.to_evol, + br.rotate_flavor_to_evolution, + br.flavor_basis_pids, + ) + + def test_to_uni_evol(self, eko_factory: EKOFactory, tmp_path): + self._test_to_all_evol( + eko_factory, + tmp_path, + manipulate.to_uni_evol, + br.rotate_flavor_to_unified_evolution, + br.flavor_basis_pids, + ) + + def _test_to_all_evol( + self, eko_factory: EKOFactory, tmp_path, to_evol_fnc, rot_matrix, pids + ): + xgrid = interpolation.XGrid([0.5, 1.0]) + mu_out = 2.0 + mu2_out = mu_out**2 + nfout = 4 + epout = (mu2_out, nfout) + eko_factory.operator.mu0 = float(np.sqrt(1.0)) + eko_factory.operator.mugrid = [(mu_out, nfout)] + eko_factory.operator.xgrid = xgrid + eko_factory.operator.configs.interpolation_polynomial_degree = 1 + eko_factory.operator.configs.interpolation_is_log = False + eko_factory.operator.configs.ev_op_max_order = (2, 0) + eko_factory.operator.configs.ev_op_iterations = 1 + eko_factory.operator.configs.inversion_method = runcards.InversionMethod.EXACT + o00 = eko_factory.get() + o01_path = tmp_path / "o01.tar" + o00.deepcopy(o01_path) + with EKO.edit(o01_path) as o01: + to_evol_fnc(o01) + o10_path = tmp_path / "o10.tar" + o00.deepcopy(o10_path) + with EKO.edit(o10_path) as o10: + to_evol_fnc(o10, False, True) + o11_path = tmp_path / "o11.tar" + o00.deepcopy(o11_path) + with EKO.edit(o11_path) as o11: + to_evol_fnc(o11, True, True) + chk_keys(o00.raw, o11.raw) + + with EKO.edit(o01_path) as o01: + with EKO.edit(o10_path) as o10: + with EKO.read(o11_path) as o11: + # check the input rotated one + np.testing.assert_allclose(o01.bases.inputpids, rot_matrix) + np.testing.assert_allclose(o01.bases.targetpids, pids) + # rotate also target + to_evol_fnc(o01, False, True) + np.testing.assert_allclose(o01[epout].operator, o11[epout].operator) + chk_keys(o00.raw, o01.raw) + # check the target rotated one + np.testing.assert_allclose(o10.bases.inputpids, pids) + np.testing.assert_allclose(o10.bases.targetpids, rot_matrix) + # rotate also input + to_evol_fnc(o10) + np.testing.assert_allclose(o10[epout].operator, o11[epout].operator) + chk_keys(o00.raw, o10.raw) diff --git a/tests/eko/io/test_metadata.py b/tests/eko/io/test_metadata.py new file mode 100644 index 000000000..a5651e0c8 --- /dev/null +++ b/tests/eko/io/test_metadata.py @@ -0,0 +1,33 @@ +import logging + +import pytest +import yaml + +from eko.io import bases, metadata, paths + + +def test_metadata(tmp_path, caplog): + m = metadata.Metadata((1.0, 3), bases.Bases([0.1, 1.0])) + # errors + with caplog.at_level(logging.INFO): + m.update() + assert "no file" in caplog.text + with pytest.raises(RuntimeError): + m.path + # now modify + m.path = tmp_path + m.update() + p = paths.InternalPaths(tmp_path) + assert tmp_path.exists() + assert p.metadata.exists() + assert p.metadata.is_file() + assert "version" in p.metadata.read_text() + # change version + m.version = "0.0.0-a1~really1.0.0" + m.update() + # if I read back the thing should be what I set + mn = metadata.Metadata((1.0, 3), bases.Bases([0.1, 1.0])) + mm = metadata.Metadata.load(tmp_path) + assert m.path == tmp_path + assert mm.version != mn.version + assert mm.version == "0.0.0-a1~really1.0.0" diff --git a/tests/eko/io/test_runcards.py b/tests/eko/io/test_runcards.py index 237f814fb..cec067819 100644 --- a/tests/eko/io/test_runcards.py +++ b/tests/eko/io/test_runcards.py @@ -1,5 +1,4 @@ import copy -from dataclasses import fields from io import StringIO import numpy as np @@ -7,67 +6,70 @@ import yaml from banana.data.theories import default_card as theory_card -from eko import interpolation from eko.io import runcards as rc +from eko.io.bases import Bases from ekomark.data.operators import default_card as operator_card -class TestRotations: - XGRID_TEST = [1e-3, 1e-2, 1e-1, 1.0] - - def test_serialization(self): - rot = rc.Rotations(interpolation.XGrid(self.XGRID_TEST)) - - d = rot.raw - rot1 = rot.from_dict(d) - - for f in fields(rc.Rotations): - assert getattr(rot, f.name) == getattr(rot1, f.name) - - assert d["targetgrid"] is None - assert "_targetgrid" not in d - - def test_pids(self): - rot = rc.Rotations(interpolation.XGrid(self.XGRID_TEST)) - - # no check on correctness of value set - rot.inputpids = [0, 1] - # but the internal grid is unmodified - assert len(rot.pids) == 14 - # and fallback implemented for unset external bases - assert np.all(rot.targetpids == rot.pids) - - def test_grids(self): - rot = rc.Rotations(interpolation.XGrid(self.XGRID_TEST)) - - # no check on correctness of value set - rot.inputgrid = interpolation.XGrid([0.1, 1]) - # but the internal grid is unmodified - assert len(rot.xgrid) == len(self.XGRID_TEST) - # and fallback implemented for unset external grids - assert np.all(rot.targetgrid == rot.xgrid) - - def test_flavored_mu2grid(): - mugrid = list(range(0, 40, 5)) + mugrid = list(range(5, 40, 5)) masses = [10, 20, 30] ratios = [1, 1, 1] flavored = rc.flavored_mugrid(mugrid, masses, ratios) - assert pytest.approx([flav for _, flav in flavored]) == [3, 3, 4, 4, 5, 5, 6, 6] + assert pytest.approx([flav for _, flav in flavored]) == [3, 4, 4, 5, 5, 6, 6] # check we can dump stream = StringIO() - ser = yaml.safe_dump(flavored, stream) + _ = yaml.safe_dump(flavored, stream) stream.seek(0) deser = yaml.safe_load(stream) assert len(flavored) == len(deser) - # after deserialization on is now list instead of tuple + # after deserialization one is now list instead of tuple for t, l in zip(flavored, deser): assert len(t) == len(l) assert t == tuple(l) +def test_runcards_opcard(): + # check conversion + tc = copy.deepcopy(theory_card) + oc = copy.deepcopy(operator_card) + tc["Q0"] = 2.0 + # mugrid + oc["mugrid"] = [2.0, 10.0] + _nt, no = rc.update(tc, oc) + assert isinstance(no, rc.OperatorCard) + assert len(no.evolgrid) == len(oc["mugrid"]) + assert len(no.mu2grid) == len(no.evolgrid) + assert no.evolgrid[0][-1] == 4 + assert no.evolgrid[1][-1] == 5 + np.testing.assert_allclose(no.mu0, tc["Q0"]) + np.testing.assert_allclose(no.mu20, tc["Q0"] ** 2.0) + assert len(no.pids) == 14 + del oc["mugrid"] + # or mu2grid + oc["mu2grid"] = [9.0, 30.0, 32.0] + _nt, no = rc.update(tc, oc) + assert isinstance(no, rc.OperatorCard) + assert len(no.evolgrid) == len(oc["mu2grid"]) + assert len(no.mu2grid) == len(no.evolgrid) + assert no.evolgrid[0][-1] == 4 + assert no.evolgrid[1][-1] == 5 + assert no.evolgrid[2][-1] == 5 + del oc["mu2grid"] + # or Q2grid + oc["Q2grid"] = [15.0, 130.0, 140.0, 1e5] + _nt, no = rc.update(tc, oc) + assert isinstance(no, rc.OperatorCard) + assert len(no.evolgrid) == len(oc["Q2grid"]) + assert len(no.mu2grid) == len(no.evolgrid) + assert no.evolgrid[0][-1] == 4 + assert no.evolgrid[1][-1] == 5 + assert no.evolgrid[2][-1] == 5 + assert no.evolgrid[3][-1] == 6 + + def test_runcards_ekomark(): # check conversion tc = copy.deepcopy(theory_card) @@ -79,3 +81,43 @@ def test_runcards_ekomark(): nnt, nno = rc.update(nt, no) assert nnt == nt assert nno == no + + +def test_runcards_quarkmass(): + tc = copy.deepcopy(theory_card) + tc["nfref"] = 5 + tc["IC"] = 1 + oc = copy.deepcopy(operator_card) + nt, no = rc.update(tc, oc) + assert nt.heavy.intrinsic_flavors == [4] + for _, scale in nt.heavy.masses: + assert np.isnan(scale) + m2s = rc.masses(nt, no.configs.evolution_method) + raw = rc.Legacy.heavies("m%s", tc) + raw2 = np.power(raw, 2.0) + np.testing.assert_allclose(m2s, raw2) + tc["HQ"] = "MSBAR" + tc["Qmc"] = raw[0] * 1.1 + tc["Qmb"] = raw[1] * 1.1 + tc["Qmt"] = raw[2] * 0.9 + nt, no = rc.update(tc, oc) + for _, scale in nt.heavy.masses: + assert not np.isnan(scale) + m2s = rc.masses(nt, no.configs.evolution_method) + for m1, m2 in zip(m2s, raw2): + assert not np.isclose(m1, m2) + tc["HQ"] = "Blub" + with pytest.raises(ValueError, match="mass scheme"): + _nt, _no = rc.update(tc, oc) + nt.heavy.masses_scheme = "Bla" + with pytest.raises(ValueError, match="mass scheme"): + _ms = rc.masses(nt, no.configs.evolution_method) + + +def test_legacy_fallback(): + assert rc.Legacy.fallback(1, 2, 3) == 1 + assert rc.Legacy.fallback(None, 2, 3) == 2 + assert rc.Legacy.fallback(None, 2, 3, default=7) == 2 + assert rc.Legacy.fallback(None, None, 3) == 3 + assert rc.Legacy.fallback(None, None, None) is None + assert rc.Legacy.fallback(None, None, None, default=7) == 7 diff --git a/tests/eko/io/test_struct.py b/tests/eko/io/test_struct.py index 67b4d3347..b7ff9877b 100644 --- a/tests/eko/io/test_struct.py +++ b/tests/eko/io/test_struct.py @@ -6,93 +6,14 @@ import pytest import yaml -from eko import EKO -from eko import basis_rotation as br -from eko import interpolation +from eko import EKO, interpolation from eko.io import struct +from eko.io.items import Target from tests.conftest import EKOFactory -class TestOperator: - def test_value_only(self): - v = np.random.rand(2, 2) - opv = struct.Operator(operator=v) - assert opv.error is None - for compress in (True, False): - stream = io.BytesIO() - opv.save(stream, compress) - stream.seek(0) - opv_ = struct.Operator.load(stream, compress) - np.testing.assert_allclose(opv.operator, opv_.operator) - np.testing.assert_allclose(v, opv_.operator) - assert opv_.error is None - - def test_value_and_error(self): - v, e = np.random.rand(2, 2, 2) - opve = struct.Operator(operator=v, error=e) - for compress in (True, False): - stream = io.BytesIO() - opve.save(stream, compress) - stream.seek(0) - opve_ = struct.Operator.load(stream, compress) - np.testing.assert_allclose(opve.operator, opve_.operator) - np.testing.assert_allclose(v, opve_.operator) - np.testing.assert_allclose(opve.error, opve_.error) - np.testing.assert_allclose(e, opve_.error) - - def test_load_error(self, monkeypatch): - # We might consider dropping this exception since np.load will always - # return a array (or fail on it's own) - stream = io.BytesIO() - monkeypatch.setattr(np, "load", lambda _: None) - with pytest.raises(ValueError): - struct.Operator.load(stream, False) - - -class TestRotations: - def test_fallback(self): - xg = interpolation.XGrid([0.1, 1.0]) - r = struct.Rotations(xgrid=xg) - np.testing.assert_allclose(r.targetpids, r.pids) - np.testing.assert_allclose(r.inputpids, r.pids) - assert r.xgrid == xg - assert r.targetgrid == xg - assert r.inputgrid == xg - - def test_overwrite(self): - tpids = np.array([3, 4] + list(br.flavor_basis_pids[2:])) - ipids = np.array([5, 6] + list(br.flavor_basis_pids[2:])) - xg = interpolation.XGrid([0.1, 1.0]) - txg = interpolation.XGrid([0.2, 1.0]) - ixg = interpolation.XGrid([0.3, 1.0]) - r = struct.Rotations( - xgrid=xg, - _targetgrid=txg, - _inputgrid=ixg, - _targetpids=tpids, - _inputpids=ipids, - ) - np.testing.assert_allclose(r.targetpids, tpids) - np.testing.assert_allclose(r.inputpids, ipids) - assert r.xgrid == xg - assert r.targetgrid == txg - assert r.inputgrid == ixg - - def test_init(self): - xg = interpolation.XGrid([0.1, 1.0]) - txg = np.array([0.2, 1.0]) - ixg = {"grid": [0.3, 1.0], "log": True} - r = struct.Rotations(xgrid=xg, _targetgrid=txg, _inputgrid=ixg) - assert isinstance(r.xgrid, interpolation.XGrid) - assert isinstance(r.targetgrid, interpolation.XGrid) - assert isinstance(r.inputgrid, interpolation.XGrid) - assert r.xgrid == xg - assert r.targetgrid == interpolation.XGrid(txg) - assert r.inputgrid == interpolation.XGrid.load(ixg) - - class TestEKO: - def test_new_error(self, tmp_path: pathlib.Path): + def test_new_error(self, tmp_path: pathlib.Path, theory_card, operator_card): # try to write to a file different from bla no_tar_path = tmp_path / "Blub.bla" no_tar_path.touch() @@ -104,23 +25,34 @@ def test_new_error(self, tmp_path: pathlib.Path): tar.add(no_tar_path) with pytest.raises(FileExistsError, match="Blub.tar"): struct.EKO.create(exists_path) + for args in [(None, None), (theory_card, None), (None, operator_card)]: + with pytest.raises(RuntimeError, match="missing"): + with struct.EKO.create(tmp_path / "Blub2.tar") as builder: + eko = builder.load_cards(*args).build() def test_load_error(self, tmp_path): # try to read from a non-tar path no_tar_path = tmp_path / "Blub.tar" no_tar_path.write_text("Blub", encoding="utf-8") - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="tar"): struct.EKO.read(no_tar_path) + with pytest.raises(ValueError, match="file mode"): + struct.EKO.open(no_tar_path, "ü") def test_properties(self, eko_factory: EKOFactory): mu = 10.0 - mugrid = [(mu, 5)] + nf = 5 + mugrid = [(mu, nf)] eko_factory.operator.mugrid = mugrid eko = eko_factory.get() assert hasattr(eko.theory_card.heavy, "masses") assert hasattr(eko.operator_card, "debug") np.testing.assert_allclose(eko.mu2grid, [mu**2]) - assert mu**2 in eko + ep2 = (mu**2, nf) + assert ep2 in eko + assert eko[ep2] is not None + del eko.operators + assert ep2 not in eko.operators.cache default_grid = eko.operator_card.xgrid assert eko.xgrid == default_grid xg = interpolation.XGrid([0.1, 1.0]) @@ -128,68 +60,81 @@ def test_properties(self, eko_factory: EKOFactory): assert eko.xgrid == xg assert "metadata" in eko.raw # check we can dump and reload + eko.assert_permissions(True, True) stream = io.StringIO() yaml.safe_dump(eko.raw, stream) stream.seek(0) raw_eko = yaml.safe_load(stream) assert "metadata" in raw_eko + assert "read" in eko.permissions + assert "write" in eko.permissions + np.testing.assert_allclose(eko.mu20, 1.65**2.0) + assert len(eko.evolgrid) == 1 + np.testing.assert_allclose(eko.evolgrid[0][0], mu**2.0) + assert eko.evolgrid[0][1] == nf + # __delattr__ + eko.blub = "bla" + del eko.blub + with pytest.raises(AttributeError, match="blub"): + eko.blub def test_ops(self, eko_factory: EKOFactory): mu = 10.0 mu2 = mu**2 - mugrid = [(mu, 5)] + nf = 5 + ep = (mu2, nf) + mugrid = [(mu, nf)] eko_factory.operator.mugrid = mugrid eko = eko_factory.get() v = np.random.rand(2, 2) opv = struct.Operator(operator=v) - # try setting not an operator - with pytest.raises(ValueError): - eko[mu2] = "bla" # approx - eko[mu2] = opv - assert eko.approx(2 * mu2) is None - assert eko.approx(mu2 + 1.0, atol=2) == mu2 - eko[mu2 + 1.0] = opv + eko[ep] = opv + assert eko.approx((2 * mu2, nf)) is None + assert eko.approx((mu2 + 1.0, nf), atol=2)[0] == mu2 + eko[(mu2 + 1.0, nf)] = opv with pytest.raises(ValueError): - eko.approx(mu2 + 0.5, atol=2) + eko.approx((mu2 + 0.5, nf), atol=2) # iterate - for q2, q2eko in zip((mu2, mu2 + 1.0), eko): - assert q2 == q2eko - np.testing.assert_allclose(v, eko[q2].operator) - for q2, (q2eko, op) in zip((mu2, mu2 + 1.0), eko.items()): - assert q2 == q2eko + for mu2_, ep in zip((mu2, mu2 + 1.0), eko): + assert mu2_ == ep[0] + np.testing.assert_allclose(v, eko[(mu2, nf)].operator) + for mu2_, (mu2eko, op) in zip((mu2, mu2 + 1.0), eko.items()): + assert mu2_ == mu2eko[0] np.testing.assert_allclose(v, op.operator) # getter with pytest.raises(ValueError): - eko[mu2 + 2.0] - with eko.operator(mu2) as op: + eko[mu2 + 2.0, nf] + with eko.operator(ep) as op: np.testing.assert_allclose(v, op.operator) # overwrite vv = np.random.rand(2, 2) opvv = struct.Operator(operator=vv) - eko[mu2 + 1.0] = opvv - np.testing.assert_allclose(vv, eko[mu2 + 1.0].operator) + eko[mu2 + 1.0, nf] = opvv + np.testing.assert_allclose(vv, eko[mu2 + 1.0, nf].operator) def test_copy(self, eko_factory: EKOFactory, tmp_path: pathlib.Path): mu = 10.0 mu2 = mu**2 - mugrid = [(mu, 5)] + nf = 5 + ep = (mu2, nf) + mugrid = [(mu, nf)] eko_factory.operator.mugrid = mugrid eko1 = eko_factory.get() v = np.random.rand(2, 2) opv = struct.Operator(operator=v) - eko1[mu2] = opv - np.testing.assert_allclose(eko1[mu2].operator, v) + eko1[ep] = opv + np.testing.assert_allclose(eko1[ep].operator, v) p = tmp_path / "eko2.tar" eko1.deepcopy(p) with EKO.edit(p) as eko2: - np.testing.assert_allclose(eko1[mu2].operator, v) - np.testing.assert_allclose(eko2[mu2].operator, v) + np.testing.assert_allclose(eko1[ep].operator, v) + np.testing.assert_allclose(eko2[ep].operator, v) vv = np.random.rand(2, 2) opvv = struct.Operator(operator=vv) - eko2[mu2] = opvv - np.testing.assert_allclose(eko1[mu2].operator, v) - np.testing.assert_allclose(eko2[mu2].operator, vv) + eko2[ep] = opvv + np.testing.assert_allclose(eko1[ep].operator, v) + np.testing.assert_allclose(eko2[ep].operator, vv) # dump does not happen before closing, unless explicitly called, and # without a dump the path would be empty eko2.dump() @@ -198,46 +143,44 @@ def test_copy(self, eko_factory: EKOFactory, tmp_path: pathlib.Path): eko2_ = struct.EKO.read(p) assert eko2.raw == eko2_.raw - -class TestLegacy: def test_items(self, eko_factory: EKOFactory): """Test autodump, autoload, and manual unload.""" eko = eko_factory.get() v = np.random.rand(2, 2) opv = struct.Operator(operator=v) - for mu2 in eko.operator_card.mu2grid: - eko[mu2] = opv + for ep in eko.operator_card.evolgrid: + eko[ep] = opv - mu2 = next(iter(eko.mu2grid)) + ep = next(iter(eko)) # unload - eko._operators[mu2] = None + eko.operators.cache[Target.from_ep(ep)] = None # test autoloading - assert isinstance(eko[mu2], struct.Operator) - assert isinstance(eko._operators[mu2], struct.Operator) + assert isinstance(eko[ep], struct.Operator) + assert isinstance(eko.operators[Target.from_ep(ep)], struct.Operator) - del eko[mu2] + del eko[ep] - assert eko._operators[mu2] is None + assert eko.operators.cache[Target.from_ep(ep)] is None - def test_iter(self, eko_factory): + def test_iter(self, eko_factory: EKOFactory): """Test managed iteration.""" eko_factory.operator.mugrid = [(3.0, 4), (20.0, 5), (300.0, 6)] eko = eko_factory.get() - mu2prev = None - for mu2, op in eko.items(): - if mu2prev is not None: - assert eko._operators[mu2prev] is None + epprev = None + for ep, op in eko.items(): + if epprev is not None: + assert eko.operators.cache[Target.from_ep(epprev)] is None assert isinstance(op, struct.Operator) - mu2prev = mu2 + epprev = ep - def test_context_operator(self, eko_factory): + def test_context_operator(self, eko_factory: EKOFactory): """Test automated handling through context.""" eko = eko_factory.get() - mu2 = eko.mu2grid[0] + ep = next(iter(eko)) - with eko.operator(mu2) as op: + with eko.operator(ep) as op: assert isinstance(op, struct.Operator) - assert eko._operators[mu2] is None + assert eko.operators.cache[Target.from_ep(ep)] is None diff --git a/tests/eko/kernels/test_ns.py b/tests/eko/kernels/test_ns.py index 7b3c16a3c..013fc5433 100644 --- a/tests/eko/kernels/test_ns.py +++ b/tests/eko/kernels/test_ns.py @@ -202,8 +202,9 @@ def test_ode_n3lo(): np.testing.assert_allclose(lhs, rhs) -def test_error(): - with pytest.raises(ValueError): +def test_error(monkeypatch): + monkeypatch.setattr("eko.beta.beta_qcd", lambda *_args: 1.0) + with pytest.raises(NotImplementedError, match="order is not implemented"): ns.dispatcher((5, 0), "iterate-exact", np.random.rand(3) + 0j, 0.2, 0.1, 3, 10) with pytest.raises(NotImplementedError): ad.gamma_ns((2, 0), 10202, 1, 3) diff --git a/tests/eko/runner/__init__.py b/tests/eko/runner/__init__.py index e69de29bb..c8ec5b854 100644 --- a/tests/eko/runner/__init__.py +++ b/tests/eko/runner/__init__.py @@ -0,0 +1,24 @@ +import numpy as np + + +def check_shapes(o, txs, ixs, theory_card, operators_card): + tpids = len(o.bases.targetpids) + ipids = len(o.bases.inputpids) + op_shape = (tpids, len(txs), ipids, len(ixs)) + + # check output = input + np.testing.assert_allclose(o.xgrid.raw, operators_card.xgrid.raw) + # targetgrid and inputgrid in the opcard are now ignored, we are testing this + np.testing.assert_allclose( + o.bases.targetgrid.raw, + txs.raw, + ) + np.testing.assert_allclose(o.bases.inputgrid.raw, ixs.raw) + np.testing.assert_allclose(o.mu20, operators_card.mu20) + # check available operators + ~o.operators + assert len(o.mu2grid) == len(operators_card.mu2grid) + assert list(o.mu2grid) == operators_card.mu2grid + for _, ops in o.items(): + assert ops.operator.shape == op_shape + assert ops.error.shape == op_shape diff --git a/tests/eko/runner/conftest.py b/tests/eko/runner/conftest.py new file mode 100644 index 000000000..a1516b2a7 --- /dev/null +++ b/tests/eko/runner/conftest.py @@ -0,0 +1,38 @@ +from pathlib import Path + +import numpy as np +import pytest + +from eko import EKO +from eko.io.items import Operator +from eko.io.runcards import OperatorCard, TheoryCard +from eko.runner import commons, recipes + + +@pytest.fixture +def neweko(theory_card: TheoryCard, operator_card: OperatorCard, tmp_path: Path): + path = tmp_path / "eko.tar" + with EKO.create(path) as builder: + yield builder.load_cards(theory_card, operator_card).build() + + path.unlink(missing_ok=True) + + +@pytest.fixture +def identity(neweko: EKO): + xs = len(neweko.xgrid.raw) + flavs = len(neweko.bases.pids) + return Operator(operator=np.eye(xs * flavs).reshape((xs, flavs, xs, flavs))) + + +@pytest.fixture +def ekoparts(neweko: EKO, identity: Operator): + atlas = commons.atlas(neweko.theory_card, neweko.operator_card) + neweko.load_recipes(recipes.create(neweko.operator_card.evolgrid, atlas)) + + for rec in neweko.recipes: + neweko.parts[rec] = identity + for rec in neweko.recipes_matching: + neweko.parts_matching[rec] = identity + + return neweko diff --git a/tests/eko/runner/test_compute.py b/tests/eko/runner/test_compute.py deleted file mode 100644 index b00a3e362..000000000 --- a/tests/eko/runner/test_compute.py +++ /dev/null @@ -1,9 +0,0 @@ -from eko.runner.compute import compute -from eko.runner.recipes import Recipe - - -def test_compute(): - recipe = Recipe("ciao") - part = compute(recipe) - - assert hasattr(part, "operator") diff --git a/tests/eko/runner/test_init.py b/tests/eko/runner/test_init.py index 63037c2a7..ff5985834 100644 --- a/tests/eko/runner/test_init.py +++ b/tests/eko/runner/test_init.py @@ -11,10 +11,14 @@ def compute(self): self.path.write_text("output", encoding="utf-8") +def mock_solve(th, op, path): + return MockRunner(th, op, path).compute() + + def test_run(monkeypatch, tmp_path: pathlib.Path): # just test, that it is a shortcut to 'compute' path = tmp_path / "eko.tar" - monkeypatch.setattr(eko.runner.legacy, "Runner", MockRunner) + monkeypatch.setattr(eko, "solve", mock_solve) eko.solve({}, {}, path=path) out = path.read_text(encoding="utf-8") assert out == "output" diff --git a/tests/eko/runner/test_legacy.py b/tests/eko/runner/test_legacy.py index 6d4f0f88a..60395acc5 100644 --- a/tests/eko/runner/test_legacy.py +++ b/tests/eko/runner/test_legacy.py @@ -9,6 +9,8 @@ from eko.io.runcards import TheoryCard from eko.quantities.heavy_quarks import QuarkMassScheme +from . import check_shapes + def test_raw(theory_card, operator_card, tmp_path): """we don't check the content here, but only the shape""" @@ -44,37 +46,13 @@ class FakeEM(enum.Enum): check_shapes(eko_, eko_.xgrid, eko_.xgrid, theory_card, operator_card) -def check_shapes(o, txs, ixs, theory_card, operators_card): - tpids = len(o.rotations.targetpids) - ipids = len(o.rotations.inputpids) - op_shape = (tpids, len(txs), ipids, len(ixs)) - - # check output = input - np.testing.assert_allclose(o.xgrid.raw, operators_card.xgrid.raw) - # targetgrid and inputgrid in the opcard are now ignored, we are testing this - np.testing.assert_allclose( - o.rotations.targetgrid.raw, - txs.raw, - ) - np.testing.assert_allclose(o.rotations.inputgrid.raw, ixs.raw) - np.testing.assert_allclose(o.mu20, operators_card.mu20) - # check available operators - assert len(o.mu2grid) == len(operators_card.mu2grid) - assert list(o.mu2grid) == operators_card.mu2grid - for _, ops in o.items(): - assert ops.operator.shape == op_shape - assert ops.error.shape == op_shape - - def test_vfns(theory_ffns, operator_card, tmp_path): - # change targetpids path = tmp_path / "eko.tar" tc: TheoryCard = theory_ffns(3) oc = copy.deepcopy(operator_card) tc.heavy.matching_ratios.c = 1.0 tc.heavy.matching_ratios.b = 1.0 tc.order = (2, 0) - oc.debug.skip_non_singlet = False r = eko.runner.legacy.Runner(tc, oc, path=path) r.compute() with EKO.read(path) as eko_: diff --git a/tests/eko/runner/test_managed.py b/tests/eko/runner/test_managed.py new file mode 100644 index 000000000..e75fc8676 --- /dev/null +++ b/tests/eko/runner/test_managed.py @@ -0,0 +1,31 @@ +import copy + +import numpy as np + +from eko import EKO +from eko.io.runcards import TheoryCard +from eko.runner.managed import solve + +from . import check_shapes + + +def test_raw(theory_card, operator_card, tmp_path): + """we don't check the content here, but only the shape""" + path = tmp_path / "eko.tar" + tc = theory_card + oc = operator_card + solve(tc, oc, path=path) + with EKO.read(path) as eko_: + check_shapes(eko_, eko_.xgrid, eko_.xgrid, tc, oc) + + +def test_vfns(theory_ffns, operator_card, tmp_path): + path = tmp_path / "eko.tar" + tc: TheoryCard = theory_ffns(3) + oc = copy.deepcopy(operator_card) + tc.heavy.matching_ratios.c = 1.0 + tc.heavy.matching_ratios.b = 1.0 + tc.order = (2, 0) + solve(tc, oc, path=path) + with EKO.read(path) as eko_: + check_shapes(eko_, eko_.xgrid, eko_.xgrid, tc, oc) diff --git a/tests/eko/runner/test_operators.py b/tests/eko/runner/test_operators.py new file mode 100644 index 000000000..8721fca99 --- /dev/null +++ b/tests/eko/runner/test_operators.py @@ -0,0 +1,36 @@ +import numpy as np + +from eko.io.items import Operator +from eko.io.struct import EKO +from eko.runner.operators import join, retrieve + + +def test_retrieve(ekoparts: EKO): + evhead, evop = next(iter(ekoparts.parts.cache.items())) + matchhead, matchop = next(iter(ekoparts.parts_matching.cache.items())) + + els = retrieve([evhead] * 5, ekoparts.parts, ekoparts.parts_matching) + assert len(els) == 5 + assert all(isinstance(el, Operator) for el in els) + + els = retrieve( + [evhead, matchhead, matchhead], ekoparts.parts, ekoparts.parts_matching + ) + assert len(els) == 3 + assert all(isinstance(el, Operator) for el in els) + + +def test_join(identity: Operator): + """Just check a trivial property: product of identities is identity. + + In principle the join operation should respect all the properties of the + matrix product, but there not so many sensible rank-4 operators. + + """ + linear_size = np.product(identity.operator.shape[:2]) + for n in range(1, 8, 3): + res = join([identity for _ in range(n)]) + assert res.error is None + np.testing.assert_allclose( + res.operator.reshape(linear_size, linear_size), np.eye(linear_size) + ) diff --git a/tests/eko/runner/test_recipes.py b/tests/eko/runner/test_recipes.py index 581ea3a8d..59b1a8855 100644 --- a/tests/eko/runner/test_recipes.py +++ b/tests/eko/runner/test_recipes.py @@ -1,19 +1,43 @@ -from pathlib import Path +from typing import List -import pytest +from eko.io.items import Evolution, Matching +from eko.io.types import EvolutionPoint +from eko.matchings import Atlas +from eko.quantities.heavy_quarks import MatchingScales +from eko.runner.recipes import create, elements -from eko import EKO -from eko.io.runcards import OperatorCard, TheoryCard +SCALES = MatchingScales([10.0, 20.0, 30.0]) +ATLAS = Atlas(SCALES, (50.0, 5)) -@pytest.fixture -def neweko(theory_card: TheoryCard, operator_card: OperatorCard, tmp_path: Path): - path = tmp_path / "eko.tar" - with EKO.create(path) as builder: - yield builder.load_cards(theory_card, operator_card).build() +def test_elements(): + onestep = elements((60.0, 5), ATLAS) + assert len(onestep) == 1 + assert isinstance(onestep[0], Evolution) + assert not onestep[0].cliff - path.unlink(missing_ok=True) + backandforth = elements((60.0, 6), ATLAS) + assert len(backandforth) == 3 + assert isinstance(backandforth[0], Evolution) + assert backandforth[0].cliff + assert isinstance(backandforth[1], Matching) + assert not backandforth[1].inverse + down = elements((5.0, 3), ATLAS) + assert all([isinstance(el, Evolution) for i, el in enumerate(down) if i % 2 == 0]) + assert all([isinstance(el, Matching) for i, el in enumerate(down) if i % 2 == 1]) -def test_create(neweko: EKO): - pass + +def test_create(): + evolgrid: List[EvolutionPoint] = [(60.0, 5)] + + recs = create(evolgrid, ATLAS) + assert len(recs) == 1 + + evolgrid.append((60.0, 6)) + recs = create(evolgrid, ATLAS) + assert len(recs) == 1 + 3 + + evolgrid.append((70.0, 6)) + recs = create(evolgrid, ATLAS) + assert len(recs) == 1 + 3 + 1 diff --git a/tests/eko/test_couplings.py b/tests/eko/test_couplings.py index 14f543dfe..03e542bed 100644 --- a/tests/eko/test_couplings.py +++ b/tests/eko/test_couplings.py @@ -70,7 +70,7 @@ def test_init(self): hqm_scheme=QuarkMassScheme.POLE, thresholds_ratios=[1.0, 1.0, 1.0], ) - assert sc.q2_ref == muref**2 + assert sc.mu2_ref == muref**2 assert sc.a_ref[0] == alpharef[0] / 4.0 / np.pi assert sc.a(muref**2)[0] == alpharef[0] / (4.0 * np.pi) assert sc.a_ref[1] == alpharef[1] / 4.0 / np.pi @@ -226,7 +226,10 @@ def test_exact(self): for thresh_setup in thresh_setups: for qcd in range(1, 4 + 1): for qed in range(2 + 1): - for qedref in [muref, nan]: # testing both running and fixed alpha + for em_running in [ + True, + False, + ]: # testing both running and fixed alpha pto = (qcd, qed) couplings = CouplingsInfo.from_dict( dict( @@ -235,6 +238,7 @@ def test_exact(self): scale=muref, num_flavs_ref=None, max_num_flavs=6, + em_running=em_running, ) ) sc_expanded = Couplings( @@ -273,14 +277,14 @@ def test_exact(self): sc_exact.a(q2)[1], rtol=precisions[1], ) - if qedref is nan or qed == 0: + if not em_running or qed == 0: np.testing.assert_allclose( - sc_expanded.a(q2)[1], + sc_expanded.a_em(q2), alpharef[1] / (4 * np.pi), rtol=1e-10, ) np.testing.assert_allclose( - sc_exact.a(q2)[1], + sc_exact.a_em(q2), alpharef[1] / (4 * np.pi), rtol=1e-10, ) diff --git a/tests/eko/test_matchings.py b/tests/eko/test_matchings.py new file mode 100644 index 000000000..36a540f3f --- /dev/null +++ b/tests/eko/test_matchings.py @@ -0,0 +1,172 @@ +"""Tests for the threshold class""" +from dataclasses import astuple + +import numpy as np + +from eko.matchings import ( + Atlas, + Matching, + Segment, + flavor_shift, + is_downward_path, + nf_default, +) +from eko.quantities.heavy_quarks import MatchingScales + + +class TestPathSegment: + def test_tuple(self): + p = Segment(0, 1, 3) + assert astuple(p) == (0, 1, 3) + # is hashable? + d = {} + d[p] = 1 + assert d[p] == 1 + + def test_str(self): + p = Segment(0, 1, 3) + s = str(p) + assert s.index("0") > 0 + assert s.index("1") > 0 + assert s.index("3") > 0 + + +ZERO = (0.0, 0) + + +class TestAtlas: + def test_init(self): + # 3 thr + tc3 = Atlas(MatchingScales([1, 2, 3]), ZERO) + assert tc3.walls == [0, 1, 2, 3, np.inf] + # 2 thr + tc2 = Atlas(MatchingScales([0, 2, 3]), ZERO) + assert tc2.walls == [0, 0, 2, 3, np.inf] + + def test_normalize(self): + tc = Atlas(MatchingScales([1, 2, 3]), (1.5, None)) + assert tc.origin[1] == 4 + assert tc.normalize((2.5, None))[1] == 5 + assert tc.normalize((2.5, 3))[1] == 3 + + def test_nfref(self): + # weird but fine + tc = Atlas(MatchingScales([1, 2, 3]), (1.5, 3)) + p = tc.path((1.5, 4)) + assert len(p) == 2 + assert astuple(p[0]) == (1.5, 1.0, 3) + assert astuple(p[1]) == (1.0, 1.5, 4) + + def test_str(self): + walls = MatchingScales([1.23, 9.87, 14.54]) + stc3 = str(Atlas(walls, ZERO)) + + for w in walls: + assert f"{w:.2e}" in stc3 + + def test_ffns(self): + tc3 = Atlas.ffns(3, 0.0) + assert tc3.walls == [0] + [np.inf] * 4 + tc4 = Atlas.ffns(4, 3.0) + assert tc4.walls == [0] * 2 + [np.inf] * 3 + assert len(tc4.path((2.0, 4))) == 1 + + def test_path_3thr(self): + tc = Atlas(MatchingScales([1, 2, 3]), (0.5, 3)) + p1 = tc.path((0.7, 3)) + assert len(p1) == 1 + assert astuple(p1[0]) == (0.5, 0.7, 3) + + def test_path_3thr_backward(self): + tc = Atlas(MatchingScales([1, 2, 3]), (2.5, 5)) + p1 = tc.path((0.7, 3)) + assert len(p1) == 3 + assert astuple(p1[0]) == (2.5, 2.0, 5) + assert astuple(p1[1]) == (2.0, 1.0, 4) + assert astuple(p1[2]) == (1.0, 0.7, 3) + + def test_path_3thr_on_threshold(self): + tc = Atlas(MatchingScales([1, 2, 3]), (0.5, 3)) + # on the right of mc + p3 = tc.path((1.0, 4)) + assert len(p3) == 2 + assert p3[0].nf == 3 + assert astuple(p3[1]) == (1.0, 1.0, 4) + # on the left of mc + p4 = tc.path((1.0, 3)) + assert len(p4) == 1 + assert p4[0].nf == 3 + + def test_path_3thr_weird(self): + tc = Atlas(MatchingScales([1, 2, 3]), (0.5, 3)) + # the whole distance underground + p6 = tc.path((3.5, 3)) + assert len(p6) == 1 + assert astuple(p6[0]) == (0.5, 3.5, 3) + mu2_from = 3.5 + mu2_to = 0.7 + # 0 + # 1 <----------- + # ---> 2 + # 3 < ----- + # | | | + origin = (mu2_from, 3) + target = (mu2_to, 5) + p7 = Atlas(MatchingScales([1, 2, 3]), origin).path(target) + assert [s.nf for s in p7] == list(range(3, 5 + 1)) + assert p7[0].origin == mu2_from + assert p7[2].target == mu2_to + # 0 + # 1 <----------- + # ---> 2 -> 3 + # 4 < --------- + # | | | + target = (mu2_to, 6) + p8 = Atlas(MatchingScales([1, 2, 3]), origin).path(target) + assert [s.nf for s in p8] == list(range(3, 6 + 1)) + assert p8[0].origin == mu2_from + assert p8[3].target == mu2_to + + def test_matched_path(self): + for kc in [0.3, 0.9, 1.5]: + for kb in [0.75, 1.25]: + tc = Atlas(MatchingScales([1 * kc, 2 * kb, 300]), (0.5, 3)) + p = tc.matched_path((200, 5)) + assert len(p) == 5 + assert isinstance(p[0], Segment) + assert isinstance(p[1], Matching) + assert p[1].scale == p[0].target + assert p[1].scale == p[2].origin + assert p[1].hq == p[0].nf + 1 + assert p[1].hq == p[2].nf + assert isinstance(p[2], Segment) + assert isinstance(p[3], Matching) + assert p[3].scale == p[2].target + assert p[3].scale == p[4].origin + assert p[3].hq == p[2].nf + 1 + assert p[3].hq == p[4].nf + assert isinstance(p[4], Segment) + + +def test_nf(): + nf4 = Atlas.ffns(4, 0.0) + for q2 in [1.0, 1e1, 1e2, 1e3, 1e4]: + assert nf_default(q2, nf4) == 4 + at = Atlas(MatchingScales([1, 2, 3]), (0.5, 3)) + assert nf_default(0.9, at) == 3 + assert nf_default(1.1, at) == 4 + + +def test_downward_path(): + thr_atlas = Atlas(np.power([2, 3, 4], 2).tolist(), (91**2, 3)) + mu2_to = 5**2 + path_3 = thr_atlas.path((mu2_to, 3)) + # path_3 is downward in q2 + is_downward = is_downward_path(path_3) + assert is_downward is True + assert flavor_shift(is_downward) == 4 + # path_6 is downward in q2, but forward in nf + path_6 = thr_atlas.path((mu2_to, 6)) + is_downward = is_downward_path(path_6) + assert is_downward is False + assert flavor_shift(is_downward) == 3 diff --git a/tests/eko/test_msbar_masses.py b/tests/eko/test_msbar_masses.py index c35f4359c..b0c102ef8 100644 --- a/tests/eko/test_msbar_masses.py +++ b/tests/eko/test_msbar_masses.py @@ -64,11 +64,14 @@ def test_compute_msbar_mass(self, theory_card: TheoryCard): m2_ref, Q2m_ref, strong_coupling=strong_coupling, + thresholds_ratios=th.heavy.matching_ratios, xif2=th.xif**2, q2_to=m2_computed[nf - 3], ) ) - np.testing.assert_allclose(m2_computed, m2_test, rtol=6e-4) + np.testing.assert_allclose( + m2_computed, m2_test, rtol=6e-4, err_msg=f"{method=},{order=}" + ) def test_compute_msbar_mass_VFNS(self, theory_card: TheoryCard): # test the solution now with some initial contition @@ -107,6 +110,7 @@ def test_compute_msbar_mass_VFNS(self, theory_card: TheoryCard): m2_ref, Q2m_ref, strong_coupling=strong_coupling, + thresholds_ratios=th.heavy.matching_ratios, xif2=th.xif**2, q2_to=m2_computed[nf - 3], ) diff --git a/tests/eko/test_quantities.py b/tests/eko/test_quantities.py new file mode 100644 index 000000000..0c2628b30 --- /dev/null +++ b/tests/eko/test_quantities.py @@ -0,0 +1,49 @@ +from math import nan + +import numpy as np +import pytest + +from eko.quantities import heavy_quarks as hq + + +def test_HeavyQuarks(): + with pytest.raises(ValueError): + hq.MatchingRatios([1, 2, 3, 4]) + l = hq.MatchingRatios([0.5, 2.0, 3.0]) + assert len(l) == 3 + assert l.c == 0.5 + assert l.b == 2.0 + assert l.t == 3.0 + l.c = 0.7 + assert len(l) == 3 + assert l.c == 0.7 + assert l.b == 2.0 + assert l.t == 3.0 + l.b = 2.7 + assert len(l) == 3 + assert l.c == 0.7 + assert l.b == 2.7 + assert l.t == 3.0 + l.t = 3.7 + assert len(l) == 3 + assert l.c == 0.7 + assert l.b == 2.7 + assert l.t == 3.7 + + +def test_HeavyInfo(): + i = hq.HeavyInfo( + num_flavs_init=4, + num_flavs_max_pdf=6, + intrinsic_flavors=[4, 5], + masses=hq.HeavyQuarkMasses( + [ + hq.QuarkMassRef([2.0, nan]), + hq.QuarkMassRef([5.0, nan]), + hq.QuarkMassRef([100.0, nan]), + ] + ), + masses_scheme=hq.QuarkMassScheme.POLE, + matching_ratios=hq.MatchingRatios([1.5, 2.0, 3.0]), + ) + np.testing.assert_allclose(i.squared_ratios, [2.25, 4.0, 9.0]) diff --git a/tests/eko/test_thresholds.py b/tests/eko/test_thresholds.py deleted file mode 100644 index 0e3d9f699..000000000 --- a/tests/eko/test_thresholds.py +++ /dev/null @@ -1,183 +0,0 @@ -""" - Tests for the threshold class -""" -import numpy as np -import pytest - -from eko.thresholds import PathSegment, ThresholdsAtlas, flavor_shift, is_downward_path - - -class TestPathSegment: - def test_tuple(self): - p = PathSegment(0, 1, 3) - assert p.tuple == (0, 1, 3) - # is hashable? - d = {} - d[p.tuple] = 1 - assert d[p.tuple] == 1 - - def test_str(self): - p = PathSegment(0, 1, 3) - s = str(p) - assert s.index("0") > 0 - assert s.index("3") > 0 - assert s.index("3") > 0 - - -class TestThresholdsAtlas: - def test_init(self): - # 3 thr - tc3 = ThresholdsAtlas([1, 2, 3]) - assert tc3.area_walls == [0, 1, 2, 3, np.inf] - # 2 thr - tc2 = ThresholdsAtlas([0, 2, 3]) - assert tc2.area_walls == [0, 0, 2, 3, np.inf] - - # errors - with pytest.raises(ValueError): - ThresholdsAtlas([1.0, 0.0]) - - def test_nfref(self): - # errors - with pytest.raises(ValueError, match="Without"): - ThresholdsAtlas([0, 2, 3], nf_ref=3) - with pytest.raises(ValueError, match="starts"): - ThresholdsAtlas([0, 2, 3], 1, 3) - with pytest.raises(ValueError, match="stops"): - ThresholdsAtlas([0, np.inf, np.inf], 1, 5) - # weird but fine - tc = ThresholdsAtlas([1, 2, 3], 1.5, 3) - p = tc.path(1.5) - assert len(p) == 2 - assert p[0].q2_from == 1.5 - assert p[0].q2_to == 1.0 - assert p[0].nf == 3 - assert p[1].q2_from == 1.0 - assert p[1].q2_to == 1.5 - assert p[1].nf == 4 - - def test_str(self): - walls = [1.23, 9.87, 14.54] - stc3 = str(ThresholdsAtlas(walls)) - - for w in walls: - assert f"{w:.2e}" in stc3 - - def test_build_area_walls(self): - for k in range(3, 6 + 1): - walls = ThresholdsAtlas.build_area_walls([1, 2, 3], [1, 2, 3], k) - assert len(walls) == k - 3 - - with pytest.raises(ValueError): - ThresholdsAtlas.build_area_walls([1, 2], [1, 2, 3], 4) - with pytest.raises(ValueError): - ThresholdsAtlas.build_area_walls([1, 2, 3], [1, 2], 4) - with pytest.raises(ValueError): - ThresholdsAtlas.build_area_walls([1, 2], [1, 2], 4) - - def test_ffns(self): - tc3 = ThresholdsAtlas.ffns(3) - assert tc3.area_walls == [0] + [np.inf] * 4 - tc4 = ThresholdsAtlas.ffns(4) - assert tc4.area_walls == [0] * 2 + [np.inf] * 3 - assert len(tc4.path(q2_to=2.0, q2_from=3.0)) == 1 - - def test_path_3thr(self): - tc = ThresholdsAtlas([1, 2, 3], 0.5) - p1 = tc.path(0.7) - assert len(p1) == 1 - assert p1[0].q2_from == 0.5 - assert p1[0].q2_to == 0.7 - assert p1[0].nf == 3 - - p2 = tc.path(1.5, q2_from=2.5) - assert len(p2) == 2 - assert p2[0].nf == 5 - assert p2[1].nf == 4 - - def test_path_3thr_backward(self): - tc = ThresholdsAtlas([1, 2, 3], 2.5) - p1 = tc.path(0.7) - assert len(p1) == 3 - assert p1[0].tuple == (2.5, 2.0, 5) - assert p1[1].tuple == (2.0, 1.0, 4) - assert p1[2].tuple == (1.0, 0.7, 3) - - def test_path_3thr_on_threshold(self): - tc = ThresholdsAtlas([1, 2, 3], 0.5) - # on the right of mc - p3 = tc.path(1.0, nf_to=4) - assert len(p3) == 2 - assert p3[0].nf == 3 - assert p3[1].tuple == (1.0, 1.0, 4) - # on the left of mc - p4 = tc.path(1.0, nf_to=3) - assert len(p4) == 1 - assert p4[0].nf == 3 - # on the left of mt, across mb - p5 = tc.path(1.5, q2_from=3.0, nf_from=5) - assert len(p5) == 2 - assert p5[0].nf == 5 - assert p5[1].nf == 4 - - def test_path_3thr_weird(self): - tc = ThresholdsAtlas([1, 2, 3], 0.5) - # the whole distance underground - p6 = tc.path(3.5, nf_to=3) - assert len(p6) == 1 - assert p6[0].tuple == (0.5, 3.5, 3) - q2_from = 3.5 - q2_to = 0.7 - # 0 - # 1 <----------- - # ---> 2 - # 3 < ----- - # | | | - p7 = tc.path(q2_to=q2_to, nf_to=5, q2_from=q2_from, nf_from=3) - assert len(p7) == 3 - assert p7[0].nf == 3 - assert p7[1].nf == 4 - assert p7[2].nf == 5 - assert p7[0].q2_from == q2_from - assert p7[2].q2_to == q2_to - # 0 - # 1 <----------- - # ---> 2 -> 3 - # 4 < --------- - # | | | - p8 = tc.path(q2_to=q2_to, nf_to=6, q2_from=q2_from, nf_from=3) - assert len(p8) == 4 - assert p8[0].nf == 3 - assert p8[1].nf == 4 - assert p8[2].nf == 5 - assert p8[3].nf == 6 - assert p8[0].q2_from == q2_from - assert p8[3].q2_to == q2_to - - def test_nf(self): - nf4 = ThresholdsAtlas.ffns(4) - for q2 in [1.0, 1e1, 1e2, 1e3, 1e4]: - assert nf4.nf(q2) == 4 - ta = ThresholdsAtlas([1, 2, 3], 0.5) - assert ta.nf(0.9) == 3 - assert ta.nf(1.1) == 4 - - -def test_downward_path(): - thr_atlas = ThresholdsAtlas( - masses=np.power([2, 3, 4], 2), - q2_ref=91**2, - nf_ref=3, - thresholds_ratios=[1, 1, 1], - ) - q2_to = 5**2 - path_3 = thr_atlas.path(q2_to, nf_to=3) - # path_3 is downward in q2 - is_downward = is_downward_path(path_3) - assert is_downward is True - assert flavor_shift(is_downward) == 4 - # path_6 is downward in q2, but forward in nf - path_6 = thr_atlas.path(q2_to, nf_to=6) - is_downward = is_downward_path(path_6) - assert is_downward is False - assert flavor_shift(is_downward) == 3 diff --git a/tests/ekobox/test_apply.py b/tests/ekobox/test_apply.py index 8e980818f..70e4b1435 100644 --- a/tests/ekobox/test_apply.py +++ b/tests/ekobox/test_apply.py @@ -7,35 +7,35 @@ class TestApply: def test_apply(self, eko_factory: EKOFactory, fake_pdf): eko = eko_factory.get() - mu2_out = eko.mu2grid[0] + ep_out = eko.evolgrid[0] # fake pdfs pdf_grid = apply.apply_pdf(eko, fake_pdf) - assert len(pdf_grid) == len(eko.mu2grid) - pdfs = pdf_grid[mu2_out]["pdfs"] - assert list(pdfs.keys()) == list(eko.rotations.targetpids) + assert len(pdf_grid) == len(eko.evolgrid) + pdfs = pdf_grid[ep_out[0]]["pdfs"] + assert list(pdfs.keys()) == list(eko.bases.targetpids) # rotate to target_grid target_grid = [0.75] pdf_grid = apply.apply_pdf(eko, fake_pdf, target_grid) assert len(pdf_grid) == 1 - pdfs = pdf_grid[mu2_out]["pdfs"] - assert list(pdfs.keys()) == list(eko.rotations.targetpids) + pdfs = pdf_grid[ep_out[0]]["pdfs"] + assert list(pdfs.keys()) == list(eko.bases.targetpids) def test_apply_flavor(self, eko_factory: EKOFactory, fake_pdf, monkeypatch): eko = eko_factory.get() - mu2_out = eko.mu2grid[0] + ep_out = eko.evolgrid[0] # fake pdfs monkeypatch.setattr( "eko.basis_rotation.rotate_flavor_to_evolution", np.ones((14, 14)) ) monkeypatch.setattr( "eko.basis_rotation.flavor_basis_pids", - eko.rotations.targetpids, + eko.bases.targetpids, ) fake_evol_basis = tuple( - ["a", "b"] + [str(x) for x in range(len(eko.rotations.pids) - 2)] + ["a", "b"] + [str(x) for x in range(len(eko.bases.pids) - 2)] ) monkeypatch.setattr("eko.basis_rotation.evol_basis", fake_evol_basis) pdf_grid = apply.apply_pdf(eko, fake_pdf, rotate_to_evolution_basis=True) - assert len(pdf_grid) == len(eko.mu2grid) - pdfs = pdf_grid[mu2_out]["pdfs"] + assert len(pdf_grid) == len(eko.evolgrid) + pdfs = pdf_grid[ep_out[0]]["pdfs"] assert list(pdfs.keys()) == list(fake_evol_basis) diff --git a/tests/ekobox/test_genpdf.py b/tests/ekobox/test_genpdf.py index d14c5dfe6..0565db701 100644 --- a/tests/ekobox/test_genpdf.py +++ b/tests/ekobox/test_genpdf.py @@ -2,8 +2,11 @@ import pytest from eko import basis_rotation as br +from eko.io.runcards import flavored_mugrid from ekobox import genpdf +MASSES = [1.5, 4.0, 170.0] + def test_genpdf_exceptions(tmp_path, cd): with cd(tmp_path): @@ -33,13 +36,14 @@ def test_genpdf_exceptions(tmp_path, cd): def test_generate_block(): xg = np.linspace(0.0, 1.0, 5) - q2s = np.geomspace(1.0, 1e3, 5) + mu2s = np.geomspace(1.0, 1e3, 5) + evolgrid = flavored_mugrid(mu2s.tolist(), MASSES, [1.0, 1.0, 1.0]) pids = np.arange(3) - b = genpdf.generate_block(lambda pid, x, q2: pid * x * q2, xg, q2s, pids) + b = genpdf.generate_block(lambda pid, x, q2: pid * x * q2, xg, evolgrid, pids) assert isinstance(b, dict) assert sorted(b.keys()) == sorted(["data", "mu2grid", "xgrid", "pids"]) assert isinstance(b["data"], np.ndarray) - assert b["data"].shape == (len(xg) * len(q2s), len(pids)) + assert b["data"].shape == (len(xg) * len(mu2s), len(pids)) def test_install_pdf(fake_lhapdf, cd): @@ -68,7 +72,8 @@ def test_generate_pdf_debug_pid(fake_lhapdf, cd): mytmp.mkdir() n = "test_generate_pdf_debug_pid" xg = np.linspace(0.0, 1.0, 5) - q2s = np.geomspace(1.0, 1e3, 7) + mu2s = np.geomspace(1.0, 1e3, 7) + evolgrid = flavored_mugrid(mu2s.tolist(), MASSES, [1.0, 1.0, 1.0]) p = mytmp / n i = f"{n}.info" with cd(mytmp): @@ -79,7 +84,7 @@ def test_generate_pdf_debug_pid(fake_lhapdf, cd): info_update={"Debug": "debug"}, install=True, xgrid=xg, - mu2grid=q2s, + evolgrid=evolgrid, ) pp = fake_lhapdf / n assert not p.exists() @@ -164,7 +169,8 @@ def test_generate_pdf_toy_antiqed(fake_lhapdf, cd): # iterate pdfs with their error type and number of blocks n = "test_generate_pdf_toy_antiqed" xg = np.linspace(1e-5, 1.0, 5) - q2s = np.geomspace(1.0, 1e3, 7) + mu2s = np.geomspace(1.0, 1e3, 7) + evolgrid = flavored_mugrid(mu2s.tolist(), MASSES, [1.0, 1.0, 1.0]) anti_qed_singlet = np.zeros_like(br.flavor_basis_pids, dtype=np.float_) anti_qed_singlet[br.flavor_basis_pids.index(1)] = -4 anti_qed_singlet[br.flavor_basis_pids.index(-1)] = -4 @@ -179,7 +185,7 @@ def test_generate_pdf_toy_antiqed(fake_lhapdf, cd): [anti_qed_singlet], "toy", xgrid=xg, - mu2grid=q2s, + evolgrid=evolgrid, ) assert p.exists() # check info file diff --git a/tests/ekobox/test_utils.py b/tests/ekobox/test_utils.py index d4b896927..557db679c 100644 --- a/tests/ekobox/test_utils.py +++ b/tests/ekobox/test_utils.py @@ -16,6 +16,7 @@ def test_ekos_product(tmp_path): theory = cards.example.theory() theory.order = (1, 0) + theory.heavy.num_flavs_init = 5 op1 = cards.example.operator() op1.mu0 = mu01 @@ -35,8 +36,7 @@ def test_ekos_product(tmp_path): op_err = copy.deepcopy(op2) op_err.mu0 = mu01 - mu2first = mugrid2[0][0] ** 2 - mu2last = mugrid2[-1][0] ** 2 + mu2first = (mugrid2[0][0] ** 2, mugrid2[0][1]) ini_path = tmp_path / "ini.tar" eko.solve(theory, op1, path=ini_path) @@ -52,12 +52,13 @@ def test_ekos_product(tmp_path): _ = utils.ekos_product(eko_ini, eko_fin_err) # product is copied res_path = tmp_path / "eko_res.tar" - eko_fin[mu2last].error = None # drop one set of errors utils.ekos_product(eko_ini, eko_fin, path=res_path) with EKO.read(res_path) as eko_res: assert eko_res.operator_card.mu20 == eko_ini.operator_card.mu20 - np.testing.assert_allclose(eko_res.mu2grid[1:], eko_fin.mu2grid) + np.testing.assert_allclose( + sorted(eko_res.mu2grid)[1:], sorted(eko_fin.mu2grid) + ) np.testing.assert_allclose( eko_ini[mu2first].operator, eko_res[mu2first].operator ) @@ -65,10 +66,10 @@ def test_ekos_product(tmp_path): # product overwrites initial utils.ekos_product(eko_ini, eko_fin) - np.testing.assert_allclose(eko_ini.mu2grid[1:], eko_fin.mu2grid) + np.testing.assert_allclose( + sorted(eko_ini.mu2grid)[1:], sorted(eko_fin.mu2grid) + ) np.testing.assert_allclose( eko_res[mu2first].operator, eko_ini[mu2first].operator ) utils.ekos_product(eko_ini, eko_fin) - - assert eko_res[mu2last].error is None diff --git a/tests/ekore/anomalous_dimensions/unpolarized/space_like/test_init.py b/tests/ekore/anomalous_dimensions/unpolarized/space_like/test_init.py index 3e82fbb52..2175eddcd 100644 --- a/tests/ekore/anomalous_dimensions/unpolarized/space_like/test_init.py +++ b/tests/ekore/anomalous_dimensions/unpolarized/space_like/test_init.py @@ -123,6 +123,8 @@ def test_gamma_ns(): ad_us.gamma_ns((4, 0), br.non_singlet_pids_map["ns+"], 1, nf), np.zeros(4), ) + with pytest.raises(NotImplementedError): + ad_us.gamma_ns((2, 0), 10106, 2.0, nf) def test_gamma_ns_qed(): @@ -194,6 +196,14 @@ def test_gamma_ns_qed(): ) +def test_errors(): + cache = h.cache.reset() + with pytest.raises(NotImplementedError): + ad_us.choose_ns_ad_as1aem1(10106, 2.0, cache) + with pytest.raises(NotImplementedError): + ad_us.choose_ns_ad_aem2(10106, 2.0, 4, cache) + + def test_dim_singlet(): nf = 3 N = 2 diff --git a/tests/ekore/anomalous_dimensions/unpolarized/time_like/test_init.py b/tests/ekore/anomalous_dimensions/unpolarized/time_like/test_init.py index 7ad42d825..88f83a980 100644 --- a/tests/ekore/anomalous_dimensions/unpolarized/time_like/test_init.py +++ b/tests/ekore/anomalous_dimensions/unpolarized/time_like/test_init.py @@ -1,7 +1,11 @@ +import eko.basis_rotation as br import ekore.anomalous_dimensions.unpolarized.time_like as ad def test_shapes(): for k in range(1, 3 + 1): - assert ad.gamma_ns((k, 0), 10101, 2.0, 5).shape == (k,) + for s in ["+", "-", "V"]: + assert ad.gamma_ns( + (k, 0), br.non_singlet_pids_map[f"ns{s}"], 2.0, 5 + ).shape == (k,) assert ad.gamma_singlet((k, 0), 2.0, 5).shape == (k, 2, 2) diff --git a/tests/ekore/harmonics/test_cache.py b/tests/ekore/harmonics/test_cache.py index ab756da11..30d68d759 100644 --- a/tests/ekore/harmonics/test_cache.py +++ b/tests/ekore/harmonics/test_cache.py @@ -1,4 +1,5 @@ import numpy as np +import pytest from ekore import harmonics as h @@ -12,6 +13,15 @@ def test_reset(): np.sum([isinstance(val, int) for val in h.cache.__dict__.values()]) - 1 ) assert c.shape == (available_items,) + assert c.shape == (h.cache.CACHE_SIZE,) + + +def test_error(): + c = h.cache.reset() + with pytest.raises(RuntimeError): + h.cache.get(-1, c, 1.0) + with pytest.raises(RuntimeError): + h.cache.get(h.cache.CACHE_SIZE, c, 1.0) def test_get(): diff --git a/tests/ekore/harmonics/test_log_functions.py b/tests/ekore/harmonics/test_log_functions.py index a6f0dc0fe..205bc6445 100644 --- a/tests/ekore/harmonics/test_log_functions.py +++ b/tests/ekore/harmonics/test_log_functions.py @@ -7,10 +7,10 @@ def test_lm1pm2(): - def mellin_lm1pm2(x, k): + def mellin_lm1pm2(x, k, N): return x ** (N - 1) * (1 - x) ** 2 * np.log(1 - x) ** k - Ns = 100 * np.random.rand(3) + Ns = [1.0, 1.5, 2.0, 2.34, 56.789] for N in Ns: sx = hsx(N, 4) @@ -22,16 +22,16 @@ def mellin_lm1pm2(x, k): } for k, ref in ref_values.items(): - test_value = quad(mellin_lm1pm2, 0, 1, args=(k))[0] + test_value = quad(mellin_lm1pm2, 0, 1, args=(k, N))[0] np.testing.assert_allclose(test_value, ref) def test_lm1pm1(): # test mellin transformation with some random N values - def mellin_lm1pm1(x, k): + def mellin_lm1pm1(x, k, N): return x ** (N - 1) * (1 - x) * np.log(1 - x) ** k - Ns = 100 * np.random.rand(3) + Ns = [1.0, 1.5, 2.0, 2.34, 56.789] for N in Ns: sx = hsx(N, 4) @@ -43,16 +43,16 @@ def mellin_lm1pm1(x, k): } for k in [1, 2, 3, 4]: - test_value = quad(mellin_lm1pm1, 0, 1, args=(k))[0] + test_value = quad(mellin_lm1pm1, 0, 1, args=(k, N))[0] np.testing.assert_allclose(test_value, ref_values[k]) def test_lm1p(): # test mellin transformation with some random N values - def mellin_lm1p(x, k): + def mellin_lm1p(x, k, N): return x ** (N - 1) * np.log(1 - x) ** k - Ns = 100 * np.random.rand(3) + Ns = [1.0, 1.5, 2.0, 2.34, 56.789] for N in Ns: sx = hsx(N, 5) @@ -65,5 +65,5 @@ def mellin_lm1p(x, k): } for k in [1, 3, 4, 5]: - test_value = quad(mellin_lm1p, 0, 1, args=(k))[0] + test_value = quad(mellin_lm1p, 0, 1, args=(k, N))[0] np.testing.assert_allclose(test_value, ref_values[k]) diff --git a/tests/ekore/operator_matrix_elements/unpolarized/space_like/test_init.py b/tests/ekore/operator_matrix_elements/unpolarized/space_like/test_init.py new file mode 100644 index 000000000..d1db339bd --- /dev/null +++ b/tests/ekore/operator_matrix_elements/unpolarized/space_like/test_init.py @@ -0,0 +1,9 @@ +r"""Test |OME|.""" + +import ekore.operator_matrix_elements.unpolarized.space_like as ome + + +def test_shapes(): + for k in range(1, 3 + 1): + assert ome.A_singlet((k, 0), complex(0, 1), 4, 0.0, False).shape == (k, 3, 3) + assert ome.A_non_singlet((k, 0), complex(0, 1), 4, 0.0).shape == (k, 2, 2)