Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

pseudo-signal calculation for analytic covariances #166

Merged
merged 8 commits into from
Dec 20, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 22 additions & 7 deletions project/ana_cov_comp/README_perlmutter.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,23 +3,38 @@
```bash
alias shortjob="sbatch paramfiles/1perlmutternode.slurm $1" # short QOS
shortjob "srun --ntasks 1 --cpus-per-task 128 --cpu-bind=cores python -u \
python python/get_1pt_ewin_alms.py paramfiles/cov_dr6_v4_20231128.dict"
python/get_1pt_ewin_alms.py paramfiles/cov_dr6_v4_20231128.dict"
shortjob "srun --ntasks 1 --cpus-per-task 128 --cpu-bind=cores python -u \
python python/get_2pt_ewin_alms.py paramfiles/cov_dr6_v4_20231128.dict"
python/get_2pt_ewin_alms.py paramfiles/cov_dr6_v4_20231128.dict"
shortjob "srun --ntasks 1 --cpus-per-task 128 --cpu-bind=cores python -u \
python/get_4pt_coupling_matrices_recipe.py paramfiles/cov_dr6_v4_20231128.dict"
```

## Couplings:
2pt couplings:
```bash
alias shortjob="sbatch --time 2:00:00 paramfiles/1perlmutternode.slurm $1" # short QOS
alias shortjob="sbatch --time 4:00:00 paramfiles/1perlmutternode.slurm $1" # short QOS
shortjob "srun --ntasks 1 --cpus-per-task 128 --cpu-bind=cores python -u \
python python/get_2pt_coupling_matrices.py paramfiles/cov_dr6_v4_20231128.dict"
python/get_2pt_coupling_matrices.py paramfiles/cov_dr6_v4_20231128.dict"
```

For production on all 4pt,
```bash
# For production on all 4pt
alias shortjob="sbatch paramfiles/1perlmutternode.slurm $1" # short QOS
alias shortjob="sbatch --time 2:00:00 paramfiles/1perlmutternode.slurm $1" # short QOS
for i in {0..57}; do \
shortjob "srun --ntasks 1 --cpus-per-task 128 --cpu-bind=cores python -u \
python/get_4pt_coupling_result.py paramfiles/cov_dr6_v4_20231128.dict $((50*i)) $((50*i+50))"
done
```
```

## Pseudosignal Matrix

First, generate the bestfit foregrounds as usual,
```bash
python python/get_best_fit_mflike.py paramfiles/cov_dr6_v4_20231128.dict
```

Then generate the pseudodiagonal matrix,
```bash
python python/get_pseudosignal.py paramfiles/cov_dr6_v4_20231128.dict
```
27 changes: 17 additions & 10 deletions project/ana_cov_comp/paramfiles/cov_dr6_v4_20231128.dict
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,15 @@ multistep_path = data_dir
mcms_dir = data_dir + 'mcms'

binned_mcm = False
lmax = 8500
lmax = 10800
use_toeplitz_mcm = False
binning_file = data_dir + "binning/BIN_ACTPOL_50_4_SC_large_bin_at_low_ell"
niter = 0
type = 'Dl'

# alms
alms_dir = data_dir + 'alms'
apply_kspace_filter = False
apply_kspace_filter = True
k_filter_dr6 = {"type": "binary_cross", "vk_mask": [-90, 90], "hk_mask": [-50, 50], "weighted": False}
deconvolve_pixwin = True
pixwin_dr6 = {"pix": 'CAR', "order": 0}
Expand Down Expand Up @@ -169,12 +169,19 @@ freq_info_dr6_pa6_f090 = {"freq_tag": 90, "passband": passband_dir + "passband_d
freq_info_dr6_pa6_f150 = {"freq_tag": 150, "passband": passband_dir + "passband_dr6_pa6_f150.dat"}

# beams
beam_dr6_pa4_f150 = data_dir + 'beams/20230130_beams/coadd_pa4_f150_night_beam_tform_jitter_cmb.txt'
beam_dr6_pa4_f220 = data_dir + 'beams/20230130_beams/coadd_pa4_f220_night_beam_tform_jitter_cmb.txt'
beam_dr6_pa5_f090 = data_dir + 'beams/20230130_beams/coadd_pa5_f090_night_beam_tform_jitter_cmb.txt'
beam_dr6_pa5_f150 = data_dir + 'beams/20230130_beams/coadd_pa5_f150_night_beam_tform_jitter_cmb.txt'
beam_dr6_pa6_f090 = data_dir + 'beams/20230130_beams/coadd_pa6_f090_night_beam_tform_jitter_cmb.txt'
beam_dr6_pa6_f150 = data_dir + 'beams/20230130_beams/coadd_pa6_f150_night_beam_tform_jitter_cmb.txt'
beam_T_dr6_pa4_f150 = data_dir + 'beams/20230902_beams/coadd_pa4_f150_night_beam_tform_jitter_cmb.txt'
beam_T_dr6_pa4_f220 = data_dir + 'beams/20230902_beams/coadd_pa4_f220_night_beam_tform_jitter_cmb.txt'
beam_T_dr6_pa5_f090 = data_dir + 'beams/20230902_beams/coadd_pa5_f090_night_beam_tform_jitter_cmb.txt'
beam_T_dr6_pa5_f150 = data_dir + 'beams/20230902_beams/coadd_pa5_f150_night_beam_tform_jitter_cmb.txt'
beam_T_dr6_pa6_f090 = data_dir + 'beams/20230902_beams/coadd_pa6_f090_night_beam_tform_jitter_cmb.txt'
beam_T_dr6_pa6_f150 = data_dir + 'beams/20230902_beams/coadd_pa6_f150_night_beam_tform_jitter_cmb.txt'

beam_pol_dr6_pa4_f150 = data_dir + 'beams/20230902_beams/coadd_pa4_f150_night_beam_tform_jitter_cmb.txt'
beam_pol_dr6_pa4_f220 = data_dir + 'beams/20230902_beams/coadd_pa4_f220_night_beam_tform_jitter_cmb.txt'
beam_pol_dr6_pa5_f090 = data_dir + 'beams/20230902_beams/coadd_pa5_f090_night_beam_tform_jitter_cmb.txt'
beam_pol_dr6_pa5_f150 = data_dir + 'beams/20230902_beams/coadd_pa5_f150_night_beam_tform_jitter_cmb.txt'
beam_pol_dr6_pa6_f090 = data_dir + 'beams/20230902_beams/coadd_pa6_f090_night_beam_tform_jitter_cmb.txt'
beam_pol_dr6_pa6_f150 = data_dir + 'beams/20230902_beams/coadd_pa6_f150_night_beam_tform_jitter_cmb.txt'

leakage_file_dir = data_dir + 'beams/20230130_beams/'

Expand All @@ -190,8 +197,8 @@ best_fits_dir = data_dir + 'best_fits'

cosmo_params = {"cosmomc_theta":0.0104085, "logA": 3.044, "ombh2": 0.02237, "omch2": 0.1200, "ns": 0.9649, "Alens": 1.0, "tau": 0.0544}
fg_norm = {"nu_0": 150.0, "ell_0": 3000, "T_CMB": 2.725}
fg_components = {'tt': ['tSZ_and_CIB', 'cibp', 'kSZ', 'radio', 'dust'], 'te': ['radio', 'dust'], 'ee': ['radio', 'dust'], 'bb': ['radio', 'dust'], 'tb': ['radio', 'dust'], 'eb': []}
fg_params = {"a_tSZ": 3.30, "a_kSZ": 1.60, "a_p": 6.90, "beta_p": 2.08, "a_c": 4.90, "beta_c": 2.20, "a_s": 3.10, "a_gtt": 8.7, "xi": 0.1, "T_d": 9.60, "a_gte": 0, "a_gtb": 0, "a_gee": 0, "a_gbb": 0, "a_pste": 0, "a_pstb": 0, "a_psee": 0, "a_psbb": 0}
fg_components = {'tt': ['tSZ_and_CIB', 'cibp', 'kSZ', 'radio', 'dust'], 'te': ['radio', 'dust'], 'ee': ['radio', 'dust'], 'bb': ['radio', 'dust'], 'tb': ['radio', 'dust'], 'eb': []}
fg_params = {"a_tSZ": 3.30, "a_kSZ": 1.60, "a_p": 6.90, "beta_p": 2.08, "a_c": 4.90, "beta_c": 2.20, "a_s": 3.10, "a_gtt": 8.83, "xi": 0.1, "T_d": 9.60, "a_gte": 0.43, "a_gtb": 0.012, "a_gee": 0.165, "a_gbb": 0.116, "a_pste": 0, "a_pstb": 0, "a_psee": 0, "a_psbb": 0}

# sim
iStart = 0
Expand Down
38 changes: 27 additions & 11 deletions project/ana_cov_comp/python/get_best_fit_mflike.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,15 @@
import pylab as plt
from pspipe_utils import best_fits, log, pspipe_list
from pspy import pspy_utils, so_dict, so_spectra
from itertools import combinations_with_replacement

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])
log = log.get_logger(**d)

# first let's get a list of all frequency we plan to study
surveys = d["surveys"]
lmax = d["lmax"]
lmax = d["lmax"] # models only go up to ell of 10000
type = d["type"]
spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"]

Expand All @@ -31,7 +32,7 @@

cosmo_params = d["cosmo_params"]
log.info(f"Computing CMB spectra with cosmological parameters: {cosmo_params}")
l_th, ps_dict = pspy_utils.ps_from_params(cosmo_params, type, lmax + 500)
l_th, ps_dict = pspy_utils.ps_from_params(cosmo_params, type, lmax + 1)

f_name = f"{bestfit_dir}/cmb.dat"
so_spectra.write_ps(f_name, l_th, ps_dict, type, spectra=spectra)
Expand All @@ -47,7 +48,20 @@
if do_bandpass_integration:
log.info("Doing bandpass integration")

narrays, sv_list, ar_list = pspipe_list.get_arrays_list(d)
# compatibility with data_analysis, should be industrialized #FIXME
def get_arrays_list(d):
surveys = d['surveys']
arrays = {sv: d[f'arrays_{sv}'] for sv in surveys}
sv_list, ar_list = [], []
for sv1 in surveys:
for ar1 in arrays[sv1]:
for chan1 in arrays[sv1][ar1]:
sv_list.append(sv1)
ar_list.append(f"{ar1}_{chan1}")
return len(sv_list), sv_list, ar_list

narrays, sv_list, ar_list = get_arrays_list(d)

for sv, ar in zip(sv_list, ar_list):

freq_info = d[f"freq_info_{sv}_{ar}"]
Expand All @@ -59,6 +73,7 @@
passbands[f"{sv}_{ar}"] = [nu_ghz, pb]

log.info("Getting foregrounds contribution")
l_th = l_th[l_th < 10_000] # models only go up to 9_999
fg_dict = best_fits.get_foreground_dict(l_th, passbands, fg_components, fg_params, fg_norm)

for sv1, ar1 in zip(sv_list, ar_list):
Expand All @@ -70,18 +85,19 @@
fg[spec] = fg_dict[spec.lower(), "all", name1, name2]
so_spectra.write_ps(f"{bestfit_dir}/fg_{name1}x{name2}.dat", l_th, fg, type, spectra=spectra)

############################################################
### after this, we are done, the rest is legacy/plotting ###
############################################################

log.info("Writing best fit spectra")
spectra_list = pspipe_list.get_spec_name_list(d, char = "_")
spectra_list = ([f"{a}x{b}" for a,b in combinations_with_replacement(passbands.keys(), 2)])
best_fit_dict = {}
for ps_name in spectra_list:
best_fit_dict[ps_name] = {}
name1, name2 = ps_name.split("x")
for spec in spectra:
if spec.lower() in d["fg_components"].keys():
fg = fg_dict[spec.lower(), "all", name1, name2]
else:
fg = fg_dict[spec.lower()[::-1], "all", name1, name2]
best_fit_dict[ps_name][spec] = ps_dict[spec] + fg
fg = fg_dict[spec.lower(), "all", name1, name2]
best_fit_dict[ps_name][spec] = ps_dict[spec][:(9_999 + 1) - 2] + fg
so_spectra.write_ps(f"{bestfit_dir}/cmb_and_fg_{ps_name}.dat", l_th, best_fit_dict[ps_name], type, spectra=spectra)


Expand Down Expand Up @@ -113,7 +129,7 @@
for comp in fg_components[mode]:
ax.plot(l_th, fg_dict[mode, comp, name1, name2])
ax.plot(l_th, fg_dict[mode, "all", name1, name2], color="k")
ax.plot(l_th, ps_dict[mode.upper()], color="gray")
ax.plot(l_th, ps_dict[mode.upper()][:(9_999 + 1) - 2], color="gray")
ax.set_title(cross)
if mode == "tt":
ax.set_yscale("log")
Expand All @@ -128,7 +144,7 @@
for comp in fg_components[mode]:
ax.plot(l_th, fg_dict[mode, comp, name2, name1])
ax.plot(l_th, fg_dict[mode, "all", name2, name1])
ax.plot(l_th, ps_dict[mode.upper()], color="gray")
ax.plot(l_th, ps_dict[mode.upper()][:(9_999 + 1) - 2], color="gray")
ax.set_title(f"{name2}x{name1}")

if mode[0] == mode[1]:
Expand Down
173 changes: 173 additions & 0 deletions project/ana_cov_comp/python/get_pseudosignal.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
'''
This script saves in the `best_fits` directory the following files:
1. `signal_matrix.npy` contains an array (i,j,k) indexed by (field,pol) × (field,pol) × ℓ
2. `signal_matrix_labels.npy` contains a dictionary of (field_pol × field_pol) → (i,j)
'''
import matplotlib

matplotlib.use('Agg')
import sys, os

import numpy as np
from pspy import pspy_utils, so_dict, so_spectra, so_map_preprocessing
from pspipe_utils import log, misc

d = so_dict.so_dict()
d.read_from_file(sys.argv[1])
log = log.get_logger(**d)

# first let's get a list of all frequency we plan to study
surveys = d['surveys']
lmax = d['lmax']
type = d['type']
spectra = ['TT', 'TE', 'TB', 'ET', 'BT', 'EE', 'EB', 'BE', 'BB']

bestfit_dir = d['best_fits_dir']
couplings_dir = d['couplings_dir']

cosmo_params = d['cosmo_params']

apply_kspace_filter = d["apply_kspace_filter"]

# compatibility with data_analysis, should be industrialized #FIXME
def get_arrays_list(d):
surveys = d['surveys']
arrays = {sv: d[f'arrays_{sv}'] for sv in surveys}
sv_list, ar_list = [], []
for sv1 in surveys:
for ar1 in arrays[sv1]:
for chan1 in arrays[sv1][ar1]:
sv_list.append(sv1)
ar_list.append(f'{ar1}_{chan1}')
return len(sv_list), sv_list, ar_list

narrays, sv_list, ar_list = get_arrays_list(d)

def cmb_matrix_from_file(f_name, _lmax, spectra, input_type='Dl'):
ps_mat = np.zeros((3, 3, _lmax+1))

l, ps_theory = so_spectra.read_ps(f_name, spectra=spectra)
assert l[0] == 2, 'the file is expected to start at l=2'
lmax = min(_lmax, int(max(l))) # make sure lmax doesn't exceed model lmax

for p1, pol1 in enumerate('TEB'):
for p2, pol2 in enumerate('TEB'):
if input_type == 'Dl':
ps_theory[pol1 + pol2] *= 2 * np.pi / (l * (l + 1))
ps_mat[p1, p2, 2:(lmax+1)] = ps_theory[pol1 + pol2][:(lmax+1) - 2]

ps_mat[..., lmax+1:] = ps_mat[..., lmax][..., None] # extend with last val

return ps_mat

def foreground_matrix_from_files(f_name_tmp, arrays_list, _lmax, spectra, input_type='Dl'):
narrays = len(arrays_list)
fg_mat = np.zeros((narrays, 3, narrays, 3, _lmax+1))

for a1, array1 in enumerate(arrays_list):
for a2, array2 in enumerate(arrays_list):
l, fg_theory = so_spectra.read_ps(f_name_tmp.format(array1, array2), spectra=spectra)
assert l[0] == 2, 'the file is expected to start at l=2'
lmax = min(_lmax, int(max(l))) # make sure lmax doesn't exceed model lmax

for p1, pol1 in enumerate('TEB'):
for p2, pol2 in enumerate('TEB'):
if input_type == 'Dl':
fg_theory[pol1 + pol2] *= 2 * np.pi / (l * (l + 1))
fg_mat[a1, p1, a2, p2, 2:(lmax+1)] = fg_theory[pol1 + pol2][:(lmax+1) - 2]

fg_mat[..., lmax+1:] = fg_mat[..., lmax][..., None] # extend with last val

return fg_mat

ells = np.arange(lmax+1)

# ps_mat, fg_mat starts from zero, is C_ell as is convention
f_name_cmb = bestfit_dir + '/cmb.dat'
ps_mat = cmb_matrix_from_file(f_name_cmb, lmax, spectra)

f_name_fg = bestfit_dir + '/fg_{}x{}.dat'
array_list = [f'{sv}_{ar}' for sv, ar in zip(sv_list, ar_list)]
fg_mat = foreground_matrix_from_files(f_name_fg, array_list, lmax, spectra)

# apply beam and transfer function
beamed_signal_model = np.zeros_like(fg_mat)
tfed_beamed_signal_model = np.zeros_like(fg_mat)
for a1, array1 in enumerate(array_list):
for a2, array2 in enumerate(array_list):
l1, bl1 = misc.read_beams(d[f'beam_T_{array1}'], d[f'beam_pol_{array1}']) # diag TEB, l
l2, bl2 = misc.read_beams(d[f'beam_T_{array2}'], d[f'beam_pol_{array2}']) # diag TEB, l
assert np.all(l1[:(lmax+1)] == ells), 'ell of beam from array1 != ells'
assert np.all(l2[:(lmax+1)] == ells), 'ell of beam from array2 != ells'

for p1, pol1 in enumerate('TEB'):
for p2, pol2 in enumerate('TEB'):
key = f'{array1}_{pol1}', f'{array2}_{pol2}'
bl = bl1[pol1][:(lmax+1)] * bl2[pol2][:(lmax+1)] # beams defined at beam level

if apply_kspace_filter:
sv1, sv2 = sv_list[a1], sv_list[a2]

filter_dict1 = d[f"k_filter_{sv1}"]
assert filter_dict1['type'] == 'binary_cross', \
f'if {sv1=} kfilt, must be binary cross'
tf1 = so_map_preprocessing.analytical_tf_vkhk(filter_dict1['vk_mask'], filter_dict1['hk_mask'], ells)

filter_dict2 = d[f"k_filter_{sv2}"]
assert filter_dict1['type'] == 'binary_cross', \
f'if {sv2=} kfilt, must be binary cross'
tf2 = so_map_preprocessing.analytical_tf_vkhk(filter_dict2['vk_mask'], filter_dict2['hk_mask'], ells)

tf = np.sqrt(tf1 * tf2) # tf defined at ps level
else:
tf = 1

beamed_signal_model[a1, :, a2, :] = bl * (ps_mat + fg_mat[a1, :, a2, :])
tfed_beamed_signal_model[a1, :, a2, :] = tf * beamed_signal_model[a1, :, a2, :]

np.save(f'{bestfit_dir}/beamed_signal_model.npy', beamed_signal_model)
np.save(f'{bestfit_dir}/tfed_beamed_signal_model.npy', tfed_beamed_signal_model)

# Next, we loop over each of them and apply the appropriate MCM
pseudo_beamed_signal_model = np.zeros_like(beamed_signal_model)
pseudo_tfed_beamed_signal_model = np.zeros_like(tfed_beamed_signal_model)
single_coupling_pols = {'TT': '00', 'TE': '02', 'ET': '02', 'TB': '02', 'BT': '02'}

for a1, array1 in enumerate(array_list):
for a2, array2 in enumerate(array_list):
arrs = f'w_{array1}xw_{array2}' if a1 <= a2 else f'w_{array2}xw_{array1}' # canonical

for inp, out in ((beamed_signal_model, pseudo_beamed_signal_model), (tfed_beamed_signal_model, pseudo_tfed_beamed_signal_model)):
# handle single coupling polarization combos
for P1P2 in single_coupling_pols:
spin = single_coupling_pols[P1P2]
mcm_file = f'{couplings_dir}/{arrs}_{spin}_coupling.npy'
M = np.load(mcm_file) * (2*ells + 1) # 2l + 1 across rows
pol1, pol2 = P1P2
p1, p2 = 'TEB'.index(pol1), 'TEB'.index(pol2)

out[a1, p1, a2, p2] = M @ inp[a1, p1, a2, p2]

# read 22 couplings
Mpp = np.load( f'{couplings_dir}/{arrs}_pp_coupling.npy') * (2*ells + 1) # 2l + 1 across rows
Mmm = np.load( f'{couplings_dir}/{arrs}_mm_coupling.npy') * (2*ells + 1) # 2l + 1 across rows

# handle EE BB
M = np.block([[Mpp, Mmm], [Mmm, Mpp]])
clee = inp[a1, 1, a2, 1]
clbb = inp[a1, 2, a2, 2]
pcl = M @ np.hstack([clee, clbb])
out[a1, 1, a2, 1] = pcl[:len(clee)]
out[a1, 2, a2, 2] = pcl[len(clee):]

# handle EB BE
M = np.block([[Mpp, -Mmm], [-Mmm, Mpp]])
cleb = inp[a1, 1, a2, 2]
clbe = inp[a1, 2, a2, 1]
pcl = M @ np.hstack([cleb, clbe])
out[a1, 1, a2, 2] = pcl[:len(cleb)]
out[a1, 2, a2, 1] = pcl[len(cleb):]


np.save(f'{bestfit_dir}/pseudo_beamed_signal_model.npy', pseudo_beamed_signal_model)
np.save(f'{bestfit_dir}/pseudo_tfed_beamed_signal_model.npy', pseudo_beamed_signal_model)
Loading