-
Notifications
You must be signed in to change notification settings - Fork 3
/
profiler_real.py
181 lines (140 loc) · 4.99 KB
/
profiler_real.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
__author__ = "Andrea Chiappo"
__email__ = "[email protected]"
#
# Template code to perform a Jeans analysis
# on real stellar kinematic data from a
# dwarf spheroidal satellite galaxy
#
# outputs:
# - npy file containing the sampled parameter coordinates
# - npy file containing the corresponding likelihood values
#
import numpy as np
from utils import load_data, envelope
###############################################################################
# select the candidate dwarf galaxy (Draco)
# import the corresponding stellar kinematic data
dwarf = 'dra'
R,v,dv,D,rh,rt = load_data(dwarf)
###############################################################################
# select the model used in the Jeans equation
from profiles import build_profile, build_kernel
from dispersion import SphericalJeansDispersion
from likelihood import GaussianLikelihood
ker = 'iso'
theta = 0.5
dm = build_profile('nfw')
st = build_profile('plummer',rh=rh)
kr = build_kernel(ker)
dwarf_props = {'D':D, 'theta':theta, 'rt':np.inf, 'with_errs':False}
Sigma = SphericalJeansDispersion(dm, st, kr, dwarf_props)
###############################################################################
# likelihood object instantiation
LL = GaussianLikelihood([R, v, dv, v.mean()], Sigma)
LL.set_free('dm_r0')
LL.set_free('dm_a')
LL.set_free('dm_b')
LL.set_free('dm_c')
# enter allowed ranges on the parameter values
# for the exploration of the parameter space
priors = {'J': (10, 30),
'dm_r0' : (-3, 2),
'dm_a' : (0, 3),
'dm_b' : (0.5, 6),
'dm_c' : (0, 1.5)
}
if not ker == 'iso':
ker_param = 'ker_'+kr.params[0]
LL.set_free(ker_param)
if ker == 'om':
priors[ker_param] = (-3, 2)
else:
priors[ker_param] = (-9, 0.9)
# print on screen likelihood arguments
LLargs = LL.free_pars.keys()
print('LLargs: ',LLargs)
###############################################################################
# definition of the three elements entering the emcee sampler:
# - log prior
# - log likelihood
# - log posterior
###############################################################################
from sys import float_info
def lnprior(theta):
for val,par in zip(theta, LLargs):
pi, pf = priors[par]
if not pi < val < pf:
return -np.inf
return 0.0
def lnlike(theta):
new_theta = []
# copy theta into new list
# where r0 is in natural units
for val,par in zip(theta, LLargs):
new_theta.append(10**val if par=='dm_r0' or par=='ker_ra' else val)
try:
ll = -LL(*new_theta)
except:
ll = -float_info.max
if not np.isfinite(ll):
return -float_info.max
return ll
def lnprob(theta):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta)
###############################################################################
# loglikelihood parameter space sampling
###############################################################################
# definition of number of steps in the sampling chain
# the value reported were found to be a compromise
# between sampling time and thoroughness
ndim = len(LLargs)
nwalkers = 100
if R.size<100:
nsteps = 8000
elif 100<R.size<500:
nsteps = 4000
elif 500<R.size<1000:
nsteps = 2000
elif 1000<R.size<2000:
nsteps = 1000
else:
nsteps = 500
# initial positions of random walkers as
# randomly sampled points from parameter ranges
pos0 = np.empty([nwalkers,ndim])
for w in range(nwalkers):
for p,par in enumerate(LLargs):
pL,pR = priors[par]
p0 = np.random.uniform(low=pL,high=pR)
pos0[w,p] = p0
from emcee import EnsembleSampler
# multi-threads computation
sampler = EnsembleSampler(nwalkers, ndim, lnprob, threads=1)
# Run initial burn-in steps
pos, prob, state = sampler.run_mcmc(pos0, 200)
# Reset the chain to remove the burn-in samples
sampler.reset()
# Starting from the final position in the burn-in chain, sample for nsteps.
sampler.run_mcmc(pos, nsteps, rstate0=state)
###############################################################################
# statistical inference from sampling the loglikelihood parameter space
###############################################################################
# instruction to save the result of sampling:
# - the coordinates of the sampled points in the chain
# - the corresponding logprobability
np.save('results/{0}/{1}/samples_{0}_{1}'.format(ker,dwarf), sampler.chain)
np.save('results/{0}/{1}/lnprobs_{0}_{1}'.format(ker,dwarf), sampler.lnprobability)
# instruction to flatten the chain to successively
# envelope the results along the desired direction
flatsamples = sampler.flatchain
flatlnprobs = sampler.flatlnprobability
# envelope lowermost -lnlikelihood values over sampled J range
# to obtain the profile of another variable, change 'param' argument
Penv, Senv, Lenv = envelope(flatsamples, flatlnprobs, param=2)
# print results of envelope on screen
print 'results'
for P,S,L in zip(Penv, Senv, Lenv):
print P, S, L