From 655f90baeba9934b116de8099cc3a452126b2ae7 Mon Sep 17 00:00:00 2001 From: mhostert Date: Thu, 1 Feb 2024 00:12:44 -0500 Subject: [PATCH] random number generator now seeded with np.random.default_rng(seed) --- setup.cfg | 1 + src/DarkNews/GenLauncher.py | 17 +++-- src/DarkNews/MC.py | 120 +++++++++++++++++++++++------------- src/DarkNews/__init__.py | 2 +- tests/test_predictions.py | 47 +++++++------- 5 files changed, 116 insertions(+), 71 deletions(-) diff --git a/setup.cfg b/setup.cfg index 9440635..a3c5791 100644 --- a/setup.cfg +++ b/setup.cfg @@ -21,6 +21,7 @@ classifiers = Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 + Programming Language :: Python :: 3.11 License :: OSI Approved :: MIT License Operating System :: OS Independent Intended Audience :: Science/Research diff --git a/src/DarkNews/GenLauncher.py b/src/DarkNews/GenLauncher.py index d7010a6..5fa4bd8 100644 --- a/src/DarkNews/GenLauncher.py +++ b/src/DarkNews/GenLauncher.py @@ -295,6 +295,13 @@ def __init__(self, param_file=None, **kwargs): dn.MC.NEVAL = self.neval dn.MC.NINT = self.nint + # random number generator to be used by vegas + if self.seed is not None: + np.random.seed(self.seed) + self.rng = np.random.default_rng(self.seed).random + else: + self.rng = np.random.random # defaults to vegas' default + # get the initial projectiles self.projectiles = [getattr(lp, nu) for nu in self.nu_flavors] @@ -536,7 +543,12 @@ def _create_all_MC_cases(self, **kwargs): "helicity": helicity, } mc_case = dn.MC.MC_events( - self.experiment, bsm_model=self.bsm_model, enforce_prompt=self.enforce_prompt, sparse=self.sparse, **args + self.experiment, + bsm_model=self.bsm_model, + enforce_prompt=self.enforce_prompt, + sparse=self.sparse, + rng=self.rng, + **args, ) self.gen_cases.append(mc_case) @@ -605,9 +617,6 @@ def run(self, loglevel=None, verbose=None, logfile=None, overwrite_path=None): # Set theory params and run generation of events prettyprinter.info("Generating Events using the neutrino-nucleus upscattering engine") - # numpy set used by vegas - if self.seed: - np.random.seed(self.seed) self.df = self.gen_cases[0].get_MC_events() for mc in self.gen_cases[1:]: diff --git a/src/DarkNews/MC.py b/src/DarkNews/MC.py index 1ff761b..9ede2b7 100755 --- a/src/DarkNews/MC.py +++ b/src/DarkNews/MC.py @@ -3,8 +3,9 @@ import vegas as vg import logging -logger = logging.getLogger('logger.' + __name__) -prettyprinter = logging.getLogger('prettyprinter.' + __name__) + +logger = logging.getLogger("logger." + __name__) +prettyprinter = logging.getLogger("prettyprinter." + __name__) from collections import defaultdict from functools import partial @@ -37,16 +38,16 @@ class MC_events: decay_product: visible decay products in the detector helicity: helicity of the up-scattered neutrino enforce_prompt: If True, forces all decays to be prompt, so that pos_scatt == pos_decay - sparse: Specify the level of sparseness of the internal dataframe and output. Not supported for HEPevt. - Allowed values are 0--3, where: - `0`: keep all information; - `1`: keep neutrino energy, visible and unstable particle momenta, scattering and decay positions, and all weights; - `2`: keep neutrino energy, visible and unstable particle momenta, and all weights; + rng: Random number generator (default: None) and can be set as np.random.default_rng(seed) + sparse: Specify the level of sparseness of the internal dataframe and output. Not supported for HEPevt. + Allowed values are 0--3, where: + `0`: keep all information; + `1`: keep neutrino energy, visible and unstable particle momenta, scattering and decay positions, and all weights; + `2`: keep neutrino energy, visible and unstable particle momenta, and all weights; `3`: visible particle momenta and all weights. """ - def __init__(self, experiment, bsm_model, sparse=0, enforce_prompt=False, **kwargs): - + def __init__(self, experiment, bsm_model, enforce_prompt=False, rng=None, sparse=0, **kwargs): # default parameters scope = { "nu_projectile": pdg.numu, @@ -56,10 +57,11 @@ def __init__(self, experiment, bsm_model, sparse=0, enforce_prompt=False, **kwar "decay_product": ["e+e-"], "helicity": "conserving", } - + self.enforce_prompt = enforce_prompt self.sparse = sparse - + self.rng = rng + scope.update(kwargs) self.scope = scope self.experiment = experiment @@ -125,7 +127,10 @@ def __init__(self, experiment, bsm_model, sparse=0, enforce_prompt=False, **kwar elif self.decays_to_singlephoton: # scope for decay process self.decay_case = processes.FermionSinglePhotonDecay( - nu_parent=self.nu_upscattered, nu_daughter=self.nu_outgoing, h_parent=self.ups_case.h_upscattered, TheoryModel=bsm_model, + nu_parent=self.nu_upscattered, + nu_daughter=self.nu_outgoing, + h_parent=self.ups_case.h_upscattered, + TheoryModel=bsm_model, ) else: logger.error("Error! Could not determine what type of decay class to use.") @@ -143,8 +148,7 @@ def __init__(self, experiment, bsm_model, sparse=0, enforce_prompt=False, **kwar raise ValueError # process being considered - self.underl_process_name = \ - f"{self.nu_projectile.name} {self.ups_case.target.name} --> \ + self.underl_process_name = f"{self.nu_projectile.name} {self.ups_case.target.name} --> \ {self.nu_upscattered.name} {self.ups_case.target.name} --> \ {self.nu_outgoing.name} {DECAY_PRODUCTS} {self.ups_case.target.name}" @@ -171,7 +175,6 @@ def get_MC_events(self): self.EMAX = self.experiment.ERANGE[1] if self.decays_to_dilepton: - if self.decay_case.vector_on_shell and self.decay_case.scalar_off_shell: DIM = 3 logger.info(f"{self.nu_upscattered.name} decays via on-shell Z'.") @@ -197,8 +200,15 @@ def get_MC_events(self): # BATCH SAMPLE INTEGRAND OF INTEREST logger.debug(f"Running VEGAS for DIM={DIM}") batch_f = integrand_type(dim=DIM, Emin=self.EMIN, Emax=self.EMAX, MC_case=self) - integ = vg.Integrator(DIM * [[0.0, 1.0]]) # unit hypercube - result = run_vegas(batch_f, integ, NINT=NINT, NEVAL=NEVAL, NINT_warmup=NINT_warmup, NEVAL_warmup=NEVAL_warmup,) + integ = vg.Integrator(DIM * [[0.0, 1.0]], ran_array_generator=self.rng) + result = run_vegas( + batch_f, + integ, + NINT=NINT, + NEVAL=NEVAL, + NINT_warmup=NINT_warmup, + NEVAL_warmup=NEVAL_warmup, + ) logger.debug("Main VEGAS run completed.") logger.debug(f"Vegas results for the integrals: {result.summary()}") @@ -220,9 +230,9 @@ def get_MC_events(self): # SAVE ALL EVENTS AS A PANDAS DATAFRAME particles = list(four_momenta.keys()) - if self.sparse >= 2: # keep visible, and parent momenta -- Enu to be added later + if self.sparse >= 2: # keep visible, and parent momenta -- Enu to be added later particles = [x for x in particles if "target" not in x and "recoils" not in x and "daughter" not in x] - if self.sparse == 4: # keep only visible momenta + if self.sparse == 4: # keep only visible momenta particles = [x for x in particles if "w_decay" not in x] columns_index = pd.MultiIndex.from_product([particles, ["0", "1", "2", "3"]]) @@ -269,59 +279,76 @@ def get_MC_events(self): df_gen["w_event_rate"] = ( weights["diff_event_rate"] * const.attobarn_to_cm2 / decay_rates * target_multiplicity * exposure * batch_f.norm["diff_event_rate"] ) - + if self.sparse <= 1: # > target pdgid df_gen.insert( - loc=len(df_gen.columns), column="target_pdgid", value=self.ups_case.target.pdgid, + loc=len(df_gen.columns), + column="target_pdgid", + value=self.ups_case.target.pdgid, ) df_gen["target_pdgid"] = df_gen["target_pdgid"].astype("int") # > projectile pdgid df_gen.insert( - loc=len(df_gen.columns), column="projectile_pdgid", value=self.ups_case.nu_projectile.pdgid, + loc=len(df_gen.columns), + column="projectile_pdgid", + value=self.ups_case.nu_projectile.pdgid, ) df_gen["projectile_pdgid"] = df_gen["projectile_pdgid"].astype("int") if self.sparse < 4: # > flux averaged xsec weights (neglecting kinematics of decay) - df_gen["w_flux_avg_xsec"] = weights["diff_flux_avg_xsec"] * const.attobarn_to_cm2 * target_multiplicity * exposure * batch_f.norm["diff_flux_avg_xsec"] - + df_gen["w_flux_avg_xsec"] = ( + weights["diff_flux_avg_xsec"] * const.attobarn_to_cm2 * target_multiplicity * exposure * batch_f.norm["diff_flux_avg_xsec"] + ) - # Event-by-event descriptors + # Event-by-event descriptors if self.sparse == 0: # > target name df_gen.insert( - loc=len(df_gen.columns), column="target", value=np.full(tot_nevents, self.ups_case.target.name), + loc=len(df_gen.columns), + column="target", + value=np.full(tot_nevents, self.ups_case.target.name), ) df_gen["target"] = df_gen["target"].astype("string") - + # > scattering regime df_gen.insert( - loc=len(df_gen.columns), column="scattering_regime", value=np.full(tot_nevents, self.ups_case.scattering_regime), + loc=len(df_gen.columns), + column="scattering_regime", + value=np.full(tot_nevents, self.ups_case.scattering_regime), ) df_gen["scattering_regime"] = df_gen["scattering_regime"].astype("string") - + # > heliciy df_gen.insert( - loc=len(df_gen.columns), column="helicity", value=np.full(tot_nevents, self.helicity), + loc=len(df_gen.columns), + column="helicity", + value=np.full(tot_nevents, self.helicity), ) df_gen["helicity"] = df_gen["helicity"].astype("string") - + # > underlying process string df_gen.insert( - loc=len(df_gen.columns), column="underlying_process", value=np.full(tot_nevents, self.underl_process_name), + loc=len(df_gen.columns), + column="underlying_process", + value=np.full(tot_nevents, self.underl_process_name), ) df_gen["underlying_process"] = df_gen["underlying_process"].astype("string") # > Helicity of incoming neutrino if self.nu_projectile.pdgid < 0: df_gen.insert( - loc=len(df_gen.columns), column="h_projectile", value=np.full(tot_nevents, +1), + loc=len(df_gen.columns), + column="h_projectile", + value=np.full(tot_nevents, +1), ) elif self.nu_projectile.pdgid > 0: df_gen.insert( - loc=len(df_gen.columns), column="h_projectile", value=np.full(tot_nevents, -1), + loc=len(df_gen.columns), + column="h_projectile", + value=np.full(tot_nevents, -1), ) # > Helicity of outgoing HNL @@ -329,7 +356,7 @@ def get_MC_events(self): df_gen["h_parent", ""] = df_gen["h_projectile"] elif self.helicity == "flipping": df_gen["h_parent", ""] = -df_gen["h_projectile"] - + # ######################################################################### # Metadata @@ -340,12 +367,11 @@ def get_MC_events(self): df_gen.attrs["model"] = self.bsm_model # > saving the lifetime of the parent (upscattered) HNL - df_gen.attrs[f"{self.nu_upscattered.name}_ctau0"] = const.get_decay_rate_in_cm((weights['diff_decay_rate_0'] * batch_f.norm['diff_decay_rate_0']).sum()) - + df_gen.attrs[f"{self.nu_upscattered.name}_ctau0"] = const.get_decay_rate_in_cm((weights["diff_decay_rate_0"] * batch_f.norm["diff_decay_rate_0"]).sum()) # ######################################################################### # PROPAGATE PARENT PARTICLE - if self.sparse <=2: + if self.sparse <= 2: self.experiment.set_geometry() self.experiment.place_scatters(df_gen) @@ -355,7 +381,10 @@ def get_MC_events(self): # decay only the first mother (typically the HNL produced) logger.info(f"Parent {self.ups_case.nu_upscattered.name} proper decay length: {df_gen.attrs[f'{self.nu_upscattered.name}_ctau0']:.3E} cm.\n") geom.place_decay( - df_gen, "P_decay_N_parent", l_decay_proper_cm=df_gen.attrs[f"{self.nu_upscattered.name}_ctau0"], label="pos_decay", + df_gen, + "P_decay_N_parent", + l_decay_proper_cm=df_gen.attrs[f"{self.nu_upscattered.name}_ctau0"], + label="pos_decay", ) # print final result @@ -370,10 +399,14 @@ def get_merged_MC_output(df1, df2): take two pandas dataframes with events and combine them. Resetting index to go from (0,n_1+n_2) where n_i is the number of events in dfi """ - if df1.attrs['model'] != df2.attrs['model']: - logger.warning("Beware! Merging generation cases with different df.attrs['models']! Merging events, but discarting the 'attrs' of the second (latest) dataframe.") - if df1.attrs['experiment'] != df2.attrs['experiment']: - logger.warning("Beware! Merging generation cases with different df.attrs['experiment']! Merging events, but discarting the 'attrs' of the second (latest) dataframe.") + if df1.attrs["model"] != df2.attrs["model"]: + logger.warning( + "Beware! Merging generation cases with different df.attrs['models']! Merging events, but discarting the 'attrs' of the second (latest) dataframe." + ) + if df1.attrs["experiment"] != df2.attrs["experiment"]: + logger.warning( + "Beware! Merging generation cases with different df.attrs['experiment']! Merging events, but discarting the 'attrs' of the second (latest) dataframe." + ) df = pd.concat([df1, df2], axis=0).reset_index(drop=True) @@ -415,7 +448,6 @@ def get_samples(integ, batch_integrand, return_jac=False): weights = defaultdict(partial(np.ndarray, 0)) for x, y, wgt in integ.random_batch(yield_y=True, fcn=batch_integrand): - # compute integrand on samples including jacobian factors if integ.uses_jac: jac = integ.map.jac1d(y) diff --git a/src/DarkNews/__init__.py b/src/DarkNews/__init__.py index 03d31bb..feb0586 100755 --- a/src/DarkNews/__init__.py +++ b/src/DarkNews/__init__.py @@ -1,4 +1,4 @@ -__version__ = "0.4.0" +__version__ = "0.4.1" import sys diff --git a/tests/test_predictions.py b/tests/test_predictions.py index 6dffb3a..7ef61a3 100644 --- a/tests/test_predictions.py +++ b/tests/test_predictions.py @@ -12,77 +12,80 @@ def close_enough(x, y, tol=1e-3): @pytest.mark.skipif("Linux" in platform.system(), reason="Linux appears to misbehave with seeded random numbers on GitHub actions") -def test_MB_rates_of_BPs(SM_gen, gen_simplest_benchmarks, gen_other_final_states, gen_most_generic_model, gen_dirt_cases): - ####### - expect = 0.038363091541911774 +def test_MB_SM(SM_gen): + expect = 0.03666960244288936 assert close_enough(SM_gen.w_event_rate.sum(), expect), "seeded SM generation has changed!" - ####### + +def test_MB_benchmarks(gen_simplest_benchmarks): df_light, df_heavy, df_TMM = gen_simplest_benchmarks # check seeded generation - expect = 13180.463971429017 + expect = 13607.917925196376 assert close_enough(df_light.w_event_rate.sum(), expect), "seeded light dark photon has changed!" # check seeded generation - expect = 5.516596808396571 + expect = 5.490449133362185 assert close_enough(df_heavy.w_event_rate.sum(), expect), "seeded heavy dark photon has changed!" # check seeded generation - expect = 52620.678472114305 + expect = 52246.996516303276 assert close_enough(df_TMM.w_event_rate.sum(), expect), "seeded TMM has changed!" - ####### + +def test_MB_other_final_states(gen_other_final_states): df_light, df_heavy, df_TMM_mumu, df_TMM_photon = gen_other_final_states # check seeded generation - expect = 202.14258392685633 + expect = 203.35903851291818 assert close_enough(df_light.w_event_rate.sum(), expect), "seeded light dark photon to muons has changed!" # check seeded generation - expect = 2.3258916609020366 + expect = 2.326728225953212 assert close_enough(df_heavy.w_event_rate.sum(), expect), "seeded heavy dark photon to muons has changed!" # check seeded generation - expect = 3458.8436785760227 - assert close_enough(df_TMM_mumu.w_event_rate.sum(), expect), "seeded light dark photon to muons has changed!" + expect = 3425.7711399143527 + assert close_enough(df_TMM_mumu.w_event_rate.sum(), expect), "seeded TMM to muons has changed!" assert "P_decay_ell_plus" in df_TMM_mumu.columns assert df_TMM_mumu["P_decay_ell_plus", "0"].min() > dn.const.m_mu, "Mu+ energy smaller than its mass? Not generating for muons?" assert df_TMM_mumu["P_decay_ell_minus", "0"].min() > dn.const.m_mu, "Mu- energy smaller than its mass? Not generating for muons?" # check seeded generation - expect = 3411.9684811051043 + expect = 3450.618873090897 assert close_enough(df_TMM_photon.w_event_rate.sum(), expect), "seeded heavy dark photon to muons has changed!" assert "P_decay_photon" in df_TMM_photon.columns - ####### + +def test_MB_generic_model(gen_most_generic_model): df_light, df_heavy, df_photon = gen_most_generic_model # check seeded generation - expect = 23222307.376793236 + expect = 23279598.68188055 assert close_enough(df_light.w_event_rate.sum(), expect), "seeded light dark photon to muons has changed!" # check seeded generation - expect = 178964.05762603838 + expect = 179795.4844049769 assert close_enough(df_heavy.w_event_rate.sum(), expect), "seeded heavy most-generic model has changed!" # check seeded generation - expect = 179580.3899866729 + expect = 179488.15926133815 assert close_enough(df_photon.w_event_rate.sum(), expect), "seeded heavy most-generic model has changed!" - ####### + +def test_MB_dirt(gen_dirt_cases): df_1, df_2, df_3, df_4 = gen_dirt_cases # check seeded generation - expect = 77791.02852083213 + expect = 82089.91508629762 assert close_enough(df_1.w_event_rate.sum(), expect), "seeded sbnd dirt generation has changed!" # check seeded generation - expect = 32.76684839561704 + expect = 31.809474146527283 assert close_enough(df_2.w_event_rate.sum(), expect), "seeded microboone dirt generation has changed!" # check seeded generation - expect = 267.9440511293012 + expect = 274.96427582452174 assert close_enough(df_3.w_event_rate.sum(), expect), "seeded icarus dirt generation has changed!" # check seeded generation - expect = 404.9325841822879 + expect = 402.99035914458227 assert close_enough(df_4.w_event_rate.sum(), expect), "seeded miniboone dirt generation has changed!"