Skip to content

Commit

Permalink
Minor deployment fixes.
Browse files Browse the repository at this point in the history
  • Loading branch information
cmccully committed Feb 27, 2024
1 parent 7b6ab72 commit aab2a1c
Show file tree
Hide file tree
Showing 9 changed files with 82 additions and 47,117 deletions.
1 change: 1 addition & 0 deletions banzai_floyds/dbs.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ def get_standard(ra, dec, runtime_context, offset_threshold=5):
class FluxStandard(Base):
__tablename__ = 'fluxstandards'
id = Column(Integer, primary_key=True, autoincrement=True)
frameid = Column(Integer, unique=True)
filename = Column(String(100), unique=True)
filepath = Column(String(150))
ra = Column(Float)
Expand Down
6 changes: 5 additions & 1 deletion banzai_floyds/extract.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ def extract(binned_data):
# Apparently if you integrate over a pixel, the integral and the average are the same,
# so we can treat the pixel value as being the average at the center of the pixel to first order.

results = {'fluxraw': [], 'fluxrawerr': [], 'wavelength': [], 'binwidth': [], 'order': []}
results = {'fluxraw': [], 'fluxrawerr': [], 'wavelength': [], 'binwidth': [], 'order': [], 'background': []}
for data_to_sum in binned_data.groups:
wavelength_bin = data_to_sum['wavelength_bin'][0]
wavelength_bin_width = data_to_sum['wavelength_bin_width'][0]
Expand All @@ -193,7 +193,11 @@ def extract(binned_data):
flux *= data_to_sum['uncertainty'] ** -2
flux = np.sum(flux)
flux_normalization = np.sum(data_to_sum['weights']**2 * data_to_sum['uncertainty']**-2)
background = data_to_sum['background'] * data_to_sum['weights']
background *= data_to_sum['uncertainty'] ** -2
background = np.sum(background)
results['fluxraw'].append(flux / flux_normalization)
results['background'].append(background / flux_normalization)
uncertainty = np.sqrt(np.sum(data_to_sum['weights']) / flux_normalization)
results['fluxrawerr'].append(uncertainty)
results['wavelength'].append(wavelength_bin)
Expand Down
3 changes: 1 addition & 2 deletions banzai_floyds/fringe.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
from banzai.stages import Stage
from banzai.utils import import_utils
from banzai.utils.file_utils import make_calibration_filename_function
from banzai_floyds.utils.order_utils import get_order_2d_region
from datetime import datetime
from scipy.interpolate import CloughTocher2DInterpolator
from banzai_floyds.matched_filter import optimize_match_filter
Expand Down Expand Up @@ -69,7 +68,7 @@ def make_master_calibration_frame(self, images):
high_sn = image.data / image.uncertainty > 10.0
data_to_fit = np.logical_and(image.orders.data == 1, high_sn)
fringe_spline = fit_smooth_fringe_spline(image.data, data_to_fit)

# TODO: Someone needs to check this transformation
shifted_order = np.logical_and(image.orders.shifted(-fringe_offset).data == 1, high_sn)
offset_coordinates = [x[shifted_order], y[shifted_order] + fringe_offset]
Expand Down
2 changes: 1 addition & 1 deletion banzai_floyds/tests/test_e2e.py
Original file line number Diff line number Diff line change
Expand Up @@ -249,6 +249,6 @@ def process_science_frames(self):

def test_if_science_frames_were_created(self):
test_data = ascii.read(DATA_FILELIST)
for expected_file in expected_filenames(test_data):
for i, expected_file in enumerate(expected_filenames(test_data)):
if 'e91.fits' in expected_file and not is_standard(test_data['object'][i]):
assert os.path.exists(expected_file)
3 changes: 1 addition & 2 deletions banzai_floyds/utils/telluric_utils.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import numpy as np
from banzai_floyds.matched_filter import optimize_match_filter
import pkg_resources
from astropy.io import ascii
from banzai.logs import get_logger
Expand All @@ -15,7 +14,7 @@
# Also see Matheson et al. 2000, AJ 120, 1499
# I had to be pretty judicious on my choice of telluric regions so that there were anchor points for all the
# polynomial fits.
# In principle, we could also use telfit to fit the telluric absorption but that will be slower. See
# In principle, we could also use telfit to fit the telluric absorption but that will be slower. See
# Gullikson et al. 2014, AJ 148, 53
TELLURIC_REGIONS = [{'wavelength_min': 5000.0, 'wavelength_max': 5155.0, 'molecule': 'O2'},
{'wavelength_min': 5370.0, 'wavelength_max': 5545.0, 'molecule': 'O2'},
Expand Down
400 changes: 36 additions & 364 deletions characterization_testing/Extraction.ipynb

Large diffs are not rendered by default.

99 changes: 8 additions & 91 deletions characterization_testing/FringeFrameMaker.ipynb

Large diffs are not rendered by default.

46,683 changes: 27 additions & 46,656 deletions characterization_testing/WavelengthCalibration.ipynb

Large diffs are not rendered by default.

2 changes: 2 additions & 0 deletions helm-chart/banzai-floyds/templates/_helpers.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,8 @@ Ingester environment variables
value: {{ .Values.ingester.apiRoot | quote }}
- name: BUCKET
value: {{ .Values.ingester.s3Bucket | quote }}
- name: FILESTORE_TYPE
value: {{ .Values.ingester.filestoreType | quote }}
{{- if .Values.ingester.noMetrics }}
- name: OPENTSDB_PYTHON_METRICS_TEST_MODE
value: "1"
Expand Down

0 comments on commit aab2a1c

Please sign in to comment.