Skip to content

Commit

Permalink
Merge pull request #227 from NNPDF/lha_bench_workflow
Browse files Browse the repository at this point in the history
  • Loading branch information
alecandido authored Mar 31, 2023
2 parents b5deebe + b5af1f0 commit 0a82abb
Show file tree
Hide file tree
Showing 4 changed files with 159 additions and 53 deletions.
44 changes: 44 additions & 0 deletions .github/workflows/lha_bot.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# A single CI script with github workflow.
name: LHA Benchmarks

on:
push:
branches-ignore:
- "*"
tags:
pull_request:
types:
- closed
- ready_for_review
- review_requested
workflow_dispatch:

jobs:
lhabench:
name: LHA paper Benchmarks
runs-on: ubuntu-latest
container:
image: ghcr.io/nnpdf/bench-evol:v2
credentials:
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}

steps:
- uses: actions/checkout@v2
with:
# tags needed for dynamic versioning
fetch-depth: 0
- name: Install and configure Poetry
uses: snok/install-poetry@v1
with:
virtualenvs-create: false
installer-parallel: true
- name: Install project
run: |
poetry install --no-interaction --with test -E mark -E box
- name: Install task runner
run: pip install poethepoet
- name: Run benchmark
run: |
poe lha -m "nnlo and sv"
poe lha -m "ffns_pol and sv"
140 changes: 95 additions & 45 deletions benchmarks/lha_paper_bench.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
"""
Benchmark to :cite:`Giele:2002hx` (LO + NLO) and :cite:`Dittmar:2005ed` (NNLO).
"""
import argparse
import os
from math import nan

import numpy as np
import pytest
from banana import register

from eko.interpolation import lambertgrid
Expand Down Expand Up @@ -36,7 +39,7 @@
# ffns_skip_pdfs.extend([-5, 5, "T24"])


class LHABenchmark(Runner):
class LHA(Runner):
"""Globally set the external program to LHA."""

def __init__(self):
Expand Down Expand Up @@ -121,16 +124,52 @@ def run_lha(self, theory_updates):
["ToyLH"],
)

def benchmark_plain(self, pto):
def run_plain(self, pto):
"""Run plain configuration."""
self.run_lha(self.plain_theory(pto))

def benchmark_sv(self, pto):
def run_sv(self, pto):
"""Run scale variations."""
self.run_lha(self.sv_theories(pto))


class BenchmarkVFNS(LHABenchmark):
class BaseBenchmark:
"""Abstract common benchmark tasks."""

def runner(self) -> LHA:
"""Runner to run."""
raise NotImplementedError("runner method has to be overwritten!")

def transformed_runner(self):
"""Prepare runner for benchmark setup"""
r = self.runner()
r.log_to_stdout = os.environ.get("EKO_LOG_STDOUT", False)
return r

@pytest.mark.lo
def benchmark_plain_lo(self):
self.transformed_runner().run_plain(0)

@pytest.mark.nlo
def benchmark_plain_nlo(self):
self.transformed_runner().run_plain(1)

@pytest.mark.nnlo
def benchmark_plain_nnlo(self):
self.transformed_runner().run_plain(2)

@pytest.mark.nlo
@pytest.mark.sv
def benchmark_sv_nlo(self):
self.transformed_runner().run_sv(1)

@pytest.mark.nnlo
@pytest.mark.sv
def benchmark_sv_nnlo(self):
self.transformed_runner().run_sv(2)


class VFNS(LHA):
"""Provide |VFNS| settings."""

def __init__(self):
Expand All @@ -148,7 +187,13 @@ def __init__(self):
)


class BenchmarkFFNS(LHABenchmark):
@pytest.mark.vfns
class BenchmarkVFNS(BaseBenchmark):
def runner(self):
return VFNS()


class FFNS(LHA):
"""Provide |FFNS| settings."""

def __init__(self):
Expand Down Expand Up @@ -188,7 +233,51 @@ def skip_pdfs(theory):
return ffns_skip_pdfs


class BenchmarkRunner(BenchmarkVFNS):
@pytest.mark.ffns
class BenchmarkFFNS(BaseBenchmark):
def runner(self):
return FFNS()


class FFNS_polarized(FFNS):
def run_lha(self, theory_updates):
"""Enforce operator grid and PDF.
Parameters
----------
theory_updates : list(dict)
theory updates
"""
self.run(
theory_updates,
[
{
"Q2grid": [1e4],
"ev_op_iterations": 10,
"interpolation_xgrid": lambertgrid(60).tolist(),
"polarized": True,
}
],
["ToyLH_polarized"],
)


@pytest.mark.ffns_pol
class BenchmarkFFNS_polarized(BaseBenchmark):
def runner(self):
return FFNS_polarized()

@pytest.mark.nnlo
def benchmark_plain_nnlo(self):
pass

@pytest.mark.nnlo
@pytest.mark.sv
def benchmark_sv_nnlo(self):
pass


class CommonRunner(VFNS):
"""Generic benchmark runner using the LHA |VFNS| settings."""

def __init__(self, external):
Expand Down Expand Up @@ -218,42 +307,3 @@ def benchmark_sv(self, pto):
high["XIR"] = np.sqrt(0.5)

self.run_lha([low, high])


class BenchmarkFFNS_polarized(BenchmarkFFNS):
def run_lha(self, theory_updates):
"""Enforce operator grid and PDF.
Parameters
----------
theory_updates : list(dict)
theory updates
"""
self.run(
theory_updates,
[
{
"mugrid": [100],
"ev_op_iterations": 10,
"interpolation_xgrid": lambertgrid(60).tolist(),
"polarized": True,
}
],
["ToyLH_polarized"],
)


if __name__ == "__main__":
# Benchmark to LHA
# obj = BenchmarkFFNS_polarized()
# obj = BenchmarkFFNS()
obj = BenchmarkVFNS()
# obj.benchmark_plain(1)
obj.benchmark_sv(2)

# # VFNS benchmarks with LHA settings
# programs = ["LHA", "pegasus", "apfel"]
# for p in programs:
# obj = BenchmarkRunner(p)
# # obj.benchmark_plain(2)
# obj.benchmark_sv(2)
14 changes: 12 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,8 @@ bench-run.env.NUMBA_DISABLE_JIT.default = "0"
lint = "pylint src/**/*.py -E"
lint-warnings = "pylint src/**/*.py --exit-zero"
sandbox = "python benchmarks/sandbox.py"
lha = "python benchmarks/lha_paper_bench.py"
lha.cmd = "pytest benchmarks/lha_paper_bench.py -s"
lha.env.NUMBA_DISABLE_JIT.default = "0"
nav = "ekonav --config benchmarks/banana.yaml"
navigator = "ekonav --config benchmarks/banana.yaml"
docs = { "shell" = "cd doc; make html" }
Expand Down Expand Up @@ -145,7 +146,16 @@ addopts = [
'--strict-markers',
]
env = ["D:NUMBA_DISABLE_JIT=1"]
markers = ["isolated: marks benchmarks as isolated"]
markers = [
"isolated: marks benchmarks as isolated",
"ffns: Fixed flavor configuration",
"ffns_pol: Polarized fixed flavor configuration",
"vfns: Variable flavor configuration",
"lo: Leading order",
"nlo: Next-to-leading order",
"nnlo: Next-to-next-to-leading order",
"sv: Scale variations",
]

[tool.pylint.master]
# extensions not to check
Expand Down
14 changes: 8 additions & 6 deletions src/ekomark/benchmark/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ class Runner(BenchmarkRunner):
rotate_to_evolution_basis = False
sandbox = False
plot_operator = False
log_to_stdout = True

def __init__(self):
self.banana_cfg = banana_cfg.cfg
Expand Down Expand Up @@ -74,12 +75,13 @@ def run_me(self, theory, ocard, _pdf):
DGLAP result
"""
# activate logging
logStdout = logging.StreamHandler(sys.stdout)
logStdout.setLevel(logging.INFO)
logStdout.setFormatter(logging.Formatter("%(message)s"))
logging.getLogger("eko").handlers = []
logging.getLogger("eko").addHandler(logStdout)
logging.getLogger("eko").setLevel(logging.INFO)
if self.log_to_stdout:
logStdout = logging.StreamHandler(sys.stdout)
logStdout.setLevel(logging.INFO)
logStdout.setFormatter(logging.Formatter("%(message)s"))
logging.getLogger("eko").handlers = []
logging.getLogger("eko").addHandler(logStdout)
logging.getLogger("eko").setLevel(logging.INFO)

ops_id = f"o{ocard['hash'][:6]}_t{theory['hash'][:6]}"
root = banana_cfg.cfg["paths"]["database"].parents[0]
Expand Down

0 comments on commit 0a82abb

Please sign in to comment.