Skip to content

Commit

Permalink
Merge branch 'gwastro:master' into multiband_transform
Browse files Browse the repository at this point in the history
  • Loading branch information
WuShichao authored Aug 7, 2023
2 parents 37029ca + 32e0b90 commit 6721aff
Show file tree
Hide file tree
Showing 7 changed files with 56 additions and 41 deletions.
70 changes: 32 additions & 38 deletions bin/live/pycbc_live_combine_single_fits
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,15 @@
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.

"""Combine PyCBC Live single-detector trigger fitting parameters from several
different files."""

import h5py, numpy as np, argparse
import logging
import pycbc

parser = argparse.ArgumentParser(usage="",
description="Combine fitting parameters from several different files")

parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--verbose", action="store_true",
help="Print extra debugging information", default=False)
parser.add_argument("--trfits-files", nargs="+", required=True,
Expand All @@ -30,7 +33,7 @@ parser.add_argument("--output", required=True,
parser.add_argument("--ifos", required=True, nargs="+",
help="list of ifos fo collect info for")

args=parser.parse_args()
args = parser.parse_args()

pycbc.init_logging(args.verbose)

Expand All @@ -42,8 +45,8 @@ if args.conservative_percentile < 50 or \
"otherwise it is either not a percentile, or not "
"conservative.")

counts_all = {ifo:[] for ifo in args.ifos}
alphas_all = {ifo:[] for ifo in args.ifos}
counts_all = {ifo: [] for ifo in args.ifos}
alphas_all = {ifo: [] for ifo in args.ifos}
analysis_dates = []

with h5py.File(args.trfits_files[0], 'r') as fit_f0:
Expand All @@ -58,7 +61,7 @@ with h5py.File(args.trfits_files[0], 'r') as fit_f0:
fit_thresh = fit_f0.attrs['fit_threshold']
fit_func = fit_f0.attrs['fit_function']

live_times = {ifo : [] for ifo in args.ifos}
live_times = {ifo: [] for ifo in args.ifos}

trigger_file_starts = []
trigger_file_ends = []
Expand All @@ -67,7 +70,6 @@ n_files = len(args.trfits_files)
logging.info("Checking through %d files", n_files)

for f in args.trfits_files:

fits_f = h5py.File(f, 'r')
# Check that the file uses the same setup as file 0, to make sure
# all coefficients are comparable
Expand All @@ -84,10 +86,9 @@ for f in args.trfits_files:
for ifo in args.ifos:
if ifo not in fits_f:
continue
else:
trig_times = fits_f[ifo]['triggers']['end_time'][:]
gps_last = max(gps_last, trig_times.max())
gps_first = min(gps_first, trig_times.min())
trig_times = fits_f[ifo]['triggers']['end_time'][:]
gps_last = max(gps_last, trig_times.max())
gps_first = min(gps_first, trig_times.min())
trigger_file_starts.append(gps_first)
trigger_file_ends.append(gps_last)

Expand All @@ -101,7 +102,7 @@ for f in args.trfits_files:
counts_all[ifo].append(fits_f[ifo + '/counts'][:])
alphas_all[ifo].append(fits_f[ifo + '/fit_coeff'][:])
if any(np.isnan(fits_f[ifo + '/fit_coeff'][:])):
logging.info("nan in " + f + ", " + ifo)
logging.info("nan in %s, %s", f, ifo)
logging.info(fits_f[ifo + '/fit_coeff'][:])
fits_f.close()

Expand All @@ -118,10 +119,10 @@ ad = trigger_file_ends[ad_order] - start_time_n
counts_bin = {ifo: [c for c in zip(*counts_all[ifo])] for ifo in args.ifos}
alphas_bin = {ifo: [a for a in zip(*alphas_all[ifo])] for ifo in args.ifos}

alphas_out = {ifo : np.zeros(len(alphas_bin[ifo])) for ifo in args.ifos}
counts_out = {ifo : np.inf * np.ones(len(counts_bin[ifo])) for ifo in args.ifos}
cons_alphas_out = {ifo : np.zeros(len(alphas_bin[ifo])) for ifo in args.ifos}
cons_counts_out = {ifo : np.inf * np.ones(len(alphas_bin[ifo])) for ifo in args.ifos}
alphas_out = {ifo: np.zeros(len(alphas_bin[ifo])) for ifo in args.ifos}
counts_out = {ifo: np.inf * np.ones(len(counts_bin[ifo])) for ifo in args.ifos}
cons_alphas_out = {ifo: np.zeros(len(alphas_bin[ifo])) for ifo in args.ifos}
cons_counts_out = {ifo: np.inf * np.ones(len(alphas_bin[ifo])) for ifo in args.ifos}

logging.info("Writing results")
fout = h5py.File(args.output, 'w')
Expand All @@ -132,23 +133,14 @@ fout['bins_edges'] = list(bl) + [bu[-1]]
fout['fits_dates'] = ad + start_time_n

for ifo in args.ifos:
fout.create_group(ifo)
fout[ifo].attrs['live_time'] = sum(live_times[ifo])

save_allmeanalpha = {}
for ifo in args.ifos:
fout_ifo = fout[ifo]
logging.info(ifo)
fout_ifo = fout.create_group(ifo)
l_times = np.array(live_times[ifo])
count_all = np.sum(counts_bin[ifo], axis=0) / l_times
invalphan = np.array(counts_bin[ifo]) / np.array(alphas_bin[ifo])
invalphan_all = np.mean(invalphan, axis=0)
alpha_all = np.mean(counts_bin[ifo], axis=0) / invalphan_all
meant = l_times.mean()
fout_ifo.attrs['live_time'] = l_times.sum()

fout_ifo[f'separate_fits/live_times'] = l_times[ad_order]
fout_ifo[f'separate_fits/start_time'] = trigger_file_starts[ad_order]
fout_ifo[f'separate_fits/end_time'] = trigger_file_ends[ad_order]
fout_ifo['separate_fits/live_times'] = l_times[ad_order]
fout_ifo['separate_fits/start_time'] = trigger_file_starts[ad_order]
fout_ifo['separate_fits/end_time'] = trigger_file_ends[ad_order]

for counter, a_c_u_l in enumerate(zip(alphas_bin[ifo],
counts_bin[ifo], bu, bl)):
Expand All @@ -161,9 +153,11 @@ for ifo in args.ifos:
cons_alpha = np.percentile(a, 100 - args.conservative_percentile)
cons_alphas_out[ifo][counter] = cons_alpha
alphas_out[ifo][counter] = mean_alpha
cons_count = np.percentile(c, args.conservative_percentile)
cons_counts_out[ifo][counter] = cons_count * len(c)
counts_out[ifo][counter] = c.sum()
# To get the count values, we need to convert to rates and back again
r = c / l_times[ad_order]
cons_rate = np.percentile(r, args.conservative_percentile)
cons_counts_out[ifo][counter] = cons_rate * l_times[ad_order].sum()
counts_out[ifo][counter] = np.mean(r) * l_times[ad_order].sum()

fout_ifo[f'separate_fits/bin_{counter:d}/fit_coeff'] = a
fout_ifo[f'separate_fits/bin_{counter:d}/counts'] = c
Expand All @@ -179,15 +173,15 @@ for ifo in args.ifos:
# Take some averages for plotting and summary values
overall_invalphan = counts_out[ifo] / alphas_out[ifo]
overall_meanalpha = counts_out[ifo].mean() / overall_invalphan.mean()
sum_counts_out = counts_out[ifo].sum() / sum(live_times[ifo])
save_allmeanalpha[ifo] = overall_meanalpha

# For the fixed version, we just set this to 1
fout_ifo['fixed/counts'] = [1 for c in counts_out[ifo]]
fout_ifo['fixed/fit_coeff'] = [0 for a in alphas_out[ifo]]
fout_ifo['fixed/counts'] = [1] * len(counts_out[ifo])
fout_ifo['fixed/fit_coeff'] = [0] * len(alphas_out[ifo])

# Add some useful info to the output file
fout_ifo.attrs['mean_alpha'] = save_allmeanalpha[ifo]
fout_ifo.attrs['mean_alpha'] = overall_meanalpha
fout_ifo.attrs['total_counts'] = counts_out[ifo].sum()

fout.close()

logging.info('Done')
2 changes: 1 addition & 1 deletion bin/live/pycbc_live_plot_combined_single_fits
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ for ifo in ifos:
count_labels = [l.get_label() for l in count_lines]
ax_count.legend(count_lines, count_labels, loc='lower center',
ncol=5, bbox_to_anchor=(0.5, 1.01))
ax_count.set_ylabel('Counts per live time')
ax_count.set_ylabel('Rate of triggers above fit threshold [s$^{-1}$]')

for ax in [ax_count, ax_alpha]:
ax.set_xticks(xtix)
Expand Down
3 changes: 2 additions & 1 deletion bin/workflows/pycbc_make_inference_inj_workflow
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ from pycbc.workflow.jobsetup import (PycbcCreateInjectionsExecutable,
PycbcInferenceExecutable)
from pycbc.workflow import inference_followups as inffu
from pycbc.workflow import plotting
from pycbc.workflow import versioning
from pycbc.inject import InjectionSet
from pycbc.io import FieldArray

Expand Down Expand Up @@ -356,7 +357,7 @@ if do_pp_test:
workflow += pp_workflow

# Create versioning information
wf.make_versioning_page(
versioning.make_versioning_page(
workflow,
workflow.cp,
rdir['workflow/version'],
Expand Down
3 changes: 2 additions & 1 deletion bin/workflows/pycbc_make_inference_plots_workflow
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ from pycbc.workflow import configuration
from pycbc.workflow import core
from pycbc.workflow import datafind
from pycbc.workflow import plotting
from pycbc.workflow import versioning
from pycbc import __version__
import pycbc.workflow.inference_followups as inffu

Expand Down Expand Up @@ -248,7 +249,7 @@ for num_event, event in enumerate(events):
title=label, collapse=True)

# Create versioning information
wf.make_versioning_page(
versioning.make_versioning_page(
workflow,
container.cp,
rdir['workflow/version'],
Expand Down
3 changes: 3 additions & 0 deletions pycbc/pnutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -524,6 +524,9 @@ def _get_imr_duration(m1, m2, s1z, s2z, f_low, approximant="SEOBNRv4"):
# NB for no clear reason this function has f_low as first argument
time_length = lalsimulation.SimIMRSEOBNRv4ROMTimeOfFrequency(
f_low, m1 * lal.MSUN_SI, m2 * lal.MSUN_SI, s1z, s2z)
elif approximant == 'SEOBNRv5_ROM':
time_length = lalsimulation.SimIMRSEOBNRv5ROMTimeOfFrequency(
f_low, m1 * lal.MSUN_SI, m2 * lal.MSUN_SI, s1z, s2z)
elif approximant == 'SPAtmplt' or approximant == 'TaylorF2':
chi = lalsimulation.SimInspiralTaylorF2ReducedSpinComputeChi(
m1, m2, s1z, s2z
Expand Down
6 changes: 6 additions & 0 deletions pycbc/waveform/waveform.py
Original file line number Diff line number Diff line change
Expand Up @@ -1030,6 +1030,11 @@ def seobnrv4_length_in_time(**kwds):
"""
return get_imr_length("SEOBNRv4", **kwds)

def seobnrv5_length_in_time(**kwds):
"""Stub for holding the calculation of SEOBNRv5_ROM waveform duration.
"""
return get_imr_length("SEOBNRv5_ROM", **kwds)

def imrphenomd_length_in_time(**kwds):
"""Stub for holding the calculation of IMRPhenomD waveform duration.
"""
Expand Down Expand Up @@ -1091,6 +1096,7 @@ def get_hm_length_in_time(lor_approx, maxm_default, **kwargs):
_filter_time_lengths["SEOBNRv4HM_ROM"] = seobnrv4hm_length_in_time
_filter_time_lengths["SEOBNRv4"] = seobnrv4_length_in_time
_filter_time_lengths["SEOBNRv4P"] = seobnrv4_length_in_time
_filter_time_lengths["SEOBNRv5_ROM"] = seobnrv5_length_in_time
_filter_time_lengths["IMRPhenomC"] = imrphenomd_length_in_time
_filter_time_lengths["IMRPhenomD"] = imrphenomd_length_in_time
_filter_time_lengths["IMRPhenomPv2"] = imrphenomd_length_in_time
Expand Down
10 changes: 10 additions & 0 deletions pycbc/workflow/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ def make_analysis_dir(path):

class Executable(pegasus_workflow.Executable):
# These are the file retention levels
DO_NOT_KEEP = 0
INTERMEDIATE_PRODUCT = 1
ALL_TRIGGERS = 2
MERGED_TRIGGERS = 3
Expand Down Expand Up @@ -147,6 +148,15 @@ def __init__(self, cp, name, ifos=None, out_dir=None, tags=None,
self.update_output_directory(out_dir=out_dir)

# Determine the level at which output files should be kept
if cp.has_option_tags('pegasus_profile-%s' % name,
'pycbc|retention_level', tags):
# Get the retention_level from the config file
# This method allows us to use the retention levels
# defined above
cfg_ret_level = cp.get_opt_tags('pegasus_profile-%s' % name,
'pycbc|retention_level', tags)
self.current_retention_level = getattr(self, cfg_ret_level)

self.update_current_retention_level(self.current_retention_level)

# Should I reuse this executable?
Expand Down

0 comments on commit 6721aff

Please sign in to comment.