diff --git a/LICENSE.txt b/LICENSE.txt index 5b9f57c3d9..e6a4e0df4a 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2016 Cool Star Lab +Copyright (c) 2016-2023 Cool Star Lab Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md new file mode 100644 index 0000000000..61cc920515 --- /dev/null +++ b/README.md @@ -0,0 +1,259 @@ +.. README for SPLAT homepage. + +.. _`SpeX Prism Library`: http://www.browndwarfs.org/spexprism +.. _`SPL`: http://www.browndwarfs.org/spexprism +.. _`pip`: https://pip.pypa.io/en/stable/ +.. _`docs`: https://splat.physics.ucsd.edu/splat/ + + +SPLAT: The SpeX Prism Library Analysis Toolkit +=============================================== + +Access SPLAT's full documentation at `https://splat.physics.ucsd.edu/splat `_. + +Preamble +-------- + +SPLAT is a python-based spectral access and analysis package designed to interface +with the `SpeX Prism Library`_ (`SPL`_), an online repository of over +3,000 low-resolution, near-infrared spectra, primarily +of low-temperature stars and brown dwarfs. +It is built on common python packages such as `astropy `_, `astroquery `_, `emcee `_, `matplotlib `_, `numpy `_, `pandas `_, `scipy `_, and others. + + +SPLAT tools allow you to: + * search the SpeX Prism Library for spectral data and source information; + * access and analyze publically-available spectra contained in it; + * analyze your own spectral data from SpeX and other instruments; + * perform basic spectral analyses such as type classification, gravity classification, index measurement, spectrophotometry, reddening, blended light analysis, and basic math operations; + * access atmosphere models and perform fits to spectral data; + * transform observables to physical parameters using evolutionary models; + * use published empirical trends between spectral type, absolute magnitudes, colors, luminosities, effective temperatures, and others; + * access online data repositories through wrappers to `astroquery `_ + * simulate very low mass star and brown dwarf populations by combining spatial, evolutionary, and observational properties; and + * plot, tabulate, and publish your results. + +Note: + Many features in SPLAT continue to be in development. + Help us improve the code by reporting bugs (and solutions!) to our github site, + `https://github.com/aburgasser/splat `_. + +Installation and Dependencies +----------------------------- + +SPLAT should be cloned from the github site `https://github.com/aburgasser/splat `_. which is updated on a regular basis. + +Warning: + At this time please do not install splat using `pip`_, as this is an outdated version of SPLAT that is no longer supported. + +Once you've downloaded the code and data, you will need to add the SPLAT top-level directory to the environment variables ``SPLAT_PATH`` and ``PYTHONPATH`` (and optionally to your system ``PATH``). More detailed instructions are on the installation page at `https://splat.physics.ucsd.edu/splat `_. + +SPLAT has core dependencies on the following packages: + * `astropy `_ + * `astroquery `_ + * `bokeh `_ (for SPLAT web interface only) + * `corner `_ (for model fitting only) + * `emcee `_ (for model fitting only) + * `flask `_ (for SPLAT web interface only) + * `matplotlib `_ + * `numpy `_ + * `pandas `_ + * `requests `_ + * `scipy `_ + +Using SPLAT +----------- + +.. _`Spectrum`: https://splat.physics.ucsd.edu/splat/splat.html?highlight=spectrum#the-splat-spectrum-object +.. _`getSpectrum()`: https://splat.physics.ucsd.edu/splat/api.html#splat.getSpectrum +.. _`fluxCalibrate()`: https://splat.physics.ucsd.edu/splat/api.html#splat.Spectrum.fluxCalibrate +.. _`plot()`: https://splat.physics.ucsd.edu/splat/api.html#splat.Spectrum.plot +.. _`plotSpectrum()`: https://splat.physics.ucsd.edu/splat/api.html#splat.plot.plotSpectrum +.. _`measureIndex()`: https://splat.physics.ucsd.edu/splat/api.html#splat.measureIndex +.. _`measureIndexSet()`: https://splat.physics.ucsd.edu/splat/api.html#splat.measureIndexSet +.. _`classifyGravity()`: https://splat.physics.ucsd.edu/splat/api.html#splat.classifyGravity +.. _`classifyByXXX`: https://splat.physics.ucsd.edu/splat/api.html#spectral-classification +.. _`compareSpectra()`: https://splat.physics.ucsd.edu/splat/api.html#splat.compareSpectra +.. _`modelFitMCMC()`: https://splat.physics.ucsd.edu/splat/api.html#splat.model.modelFitMCMC + + +SPLAT is organized into a series of modules based on core functionalities: + * `splat.core`: core functionalities, including index measurement, database access and classification + * `splat.citations`: biblographic/bibtex routines + * `splat.database`: access the spectral and source databases, as well as online resources through astroquery + * `splat.empirical`: empirical conversion relations + * `splat.evolve`: access to evolutionary models + * `splat.model`: access to spectral models and model-fitting routines + * `splat.photometry`: spectrophotometry routines and filter access + * `splat.plot`: plotting and visualization routines + * `splat.simulate`: population simulation routines + * `splat.utilities`: additional routines for general analysis + * `splat.web`: SPLAT's web interface (in development) + +SPLAT has been tested on both Python 2.7 and 3.0-3.7, and is best used in +`ipython` or `jupyter notebook`. +All of the necessary data is +included in the github package, so you don't need to be online to run most programs. + +Reading in Spectra +~~~~~~~~~~~~~~~~~~ + +The best way to read in a spectrum is to use `getSpectrum()`_, which takes a number of search keywords and returns a list of `Spectrum`_ objects: + +>>> import splat +>>> splist = splat.getSpectrum(shortname='0415-0935') +Retrieving 1 file + +>>> splist = splat.getSpectrum(name='TWA30A') +Retrieving 3 files + +>>> splist = splat.getSpectrum(opt_spt=['L2','L5'],jmag=[12,13]) +Retrieving 5 files + +In each case, splist is a list of `Spectrum`_ objects, each a container of various aspects of each spectrum and its source properties. For example, selecting the first spectrum, + +>>> splist[0] +SPEX-PRISM spectrum of 2MASSW J0036159+182110 + +``sp.wave`` gives the wavelengths of this spectrum, ``sp.flux`` the flux values, and ``sp.noise`` the +flux uncertainty. A summary of the `Spectrum`_ object can be accessed using ``sp.info()``. + +>>> splist[0].info() +SPEX-PRISM spectrum of 2MASSW J0036159+182110 +Airmass = nan +Source designation = J00361617+1821104 +Median S/N = 274 +SpeX Classification = L2.0 +Spectrum key = 10249, Source key = 10068 +If you use these data, please cite: + Burgasser, A. J. et al. (2008, Astrophysical Journal, 681, 579-593) + bibcode: 2008ApJ...681..579B +History: + SPEX-PRISM spectrum successfully loaded + +You can also read in your own spectrum by passing a filename + +>>> sp = splat.Spectrum(filename='PATH_TO/myspectrum.fits') + +Both fits and ascii (tab or csv) data formats are supported, but files +should ideally conform to the following data format standard: + * column 1: wavelength, assumed in microns + * column 2: flux in f_lambda units + * column 3: (optional) flux uncertainty in f_lambda units. + +There are a few built-in readers for specific data formats. + +To flux calibrate a spectrum, use the `Spectrum`_ object's built in `fluxCalibrate()`_ method: + +>>> sp = splat.getSpectrum(shortname='0415-0935')[0] +>>> sp.fluxCalibrate('2MASS J',14.0) + +Visualizing Spectra +~~~~~~~~~~~~~~~~~~~ + +To display the spectrum, use the Spectrum object's `plot()`_ function + +>>> sp.plot() + +or the splat.plot routine `plotSpectrum()`_ : + +>>> import splat.plot as splot +>>> splot.plotSpectrum(sp) + +You can save your spectrum by adding a filename: + +>>> splot.plotSpectrum(sp,file='spectrum.pdf') + +You can also compare multiple spectra: + +>>> sp1 = splat.getSpectrum(shortname='0415-0935')[0] +>>> sp2 = splat.getSpectrum(shortname='1217-0311')[0] +>>> splot.plotSpectrum(sp1,sp2,colors=['k','r']) + +`plotSpectrum()`_ and related routines have many extras to label features, plot uncertainties, +indicate telluric absorption regions, make multi-panel and multi-page plots +of lists of spectra, plot batches of spectra, etc. Be sure to look through the `splat.plot`_ +subpackage for more details. + +Analysis functions +~~~~~~~~~~~~~~~~~~ + +SPLAT's primary purpose is to allow the analysis of ultracool dwarf spectra. + +To measure spectral indices, use `measureIndex()`_ or `measureIndexSet()`_: + +>>> sp = splat.getSpectrum(shortname='0415-0935')[0] +>>> value, error = splat.measureIndex(sp,[1.14,1.165],[1.21,1.235],method='integrate') +>>> indices = splat.measureIndexSet(sp,set='testi') + +The last line returns a dictionary, whose value,error pair can be accessed by the name +of the index: + +>>> print(indices['sH2O-J']) # returns value, error + +You can also determine the gravity classification of a source following `Allers & Liu (2013) `_ using `classifyGravity()`_: + +>>> sp = splat.getSpectrum(young=True, lucky=True)[0] +>>> print(splat.classifyGravity(sp)) # returned 'VL-G' + +To classify a spectrum, use the various `classifyByXXX`_ methods: + +>>> sp = splat.getSpectrum(shortname='0415-0935')[0] +>>> spt,unc = splat.classifyByIndex(sp,set='burgasser') +>>> spt,unc = splat.classifyByStandard(sp,spt=['T5','T9']) +>>> result = splat.classifyByTemplate(sp,spt=['T6','T9'],nbest=5) + +The last line returns a dictionary containing the best 5 template matches. + +To compare a spectrum to another spectrum or a model, use `compareSpectra()`_ : + +>>> import splat.model as spmod +>>> mdl = spmod.loadModel(teff=720,logg=4.8,set='btsettl') # loads a BTSettl08 model +>>> sp = splat.getSpectrum(shortname='0415-0935')[0] +>>> chi,scale = splat.compareSpectra(sp,mdl) +>>> mdl.scale(scale) +>>> splat.plotSpectrum(sp,mdl,colors=['k','r'],legend=[sp.name,mdl.name]) + +You can shortcut the last three lines using the ``plot`` keyword: + +>>> chi,scale = splat.compareSpectra(sp,mdl,plot=True) + + +There are also codes **still in development** to fit models directly to spectra: `modelFitGrid()`_, `modelFitMCMC()`_, and `modelFitEMCEE()`_: + +>>> import splat.model as spmod +>>> sp = splat.getSpectrum(shortname='0415-0935')[0] +>>> sp.fluxCalibrate('2MASS J',14.49,absolute=True) +>>> nbest = 5 +>>> result1 = splat.modelFitGrid(sp,set='btsettl') +>>> result2 = splat.modelFitMCMC(sp,set='btsettl',initial_guess=[800,5.0,0.],nsamples=300,step_sizes=[50.,0.5,0.]) +>>> result3 = splat.modelFitEMCEE(sp,set='btsettl',initial_guess=[800,5.0,0.],nwalkers=12,nsamples=500) + +The outputs of all of these fitting functions is a dictionary or list of dictionaries containing the parameters of the best-fitting models; there are also several diagnostic plots produced depending on the routine. View the model fitting page for more details. + +All of these routines have many options worth exploring, and which are (increasingly) documented at `https://splat.physics.ucsd.edu/splat `_. If there are capabilities +you need, please suggest them to aburgasser@ucsd.edu, or note it in the "Issues" link on our `github site `_. + +Citing SPLAT and its data +------------------------- + +If you use SPLAT tools for your research, please cite Burgasser et al. (2017, ASInC 14, 7) [`NASA ADS `_]. + +In addition, if you use data contained in SPLAT or the SpeX Prism Library, please be sure to cite the original spectral data source, which can be accessed from the Spectrum object: + +>>> sp = splat.getSpectrum(lucky=True) +>>> sp.citation().data_reference +'2016ApJ...817..112S' + +>>> import splat.citations as spcite +>>> spcite.shortRef(sp.data_reference) +'Schneider, A. C. et al. (2016, Astrophysical Journal, 817, 112)' + +Acknowledgements +---------------- + +SPLAT is an collaborative project of research students in the `UCSD Cool Star Lab `_, aimed at developing research through the building of spectral analysis tools. Contributors to SPLAT have included Christian Aganze, Jessica Birky, Daniella Bardalez Gagliuffi, Adam Burgasser (PI), Caleb Choban, Andrew Davis, Ivanna Escala, Joshua Hazlett, Carolina Herrara Hernandez, Elizabeth Moreno Hilario, Aishwarya Iyer, Yuhui Jin, Mike Lopez, Dorsa Majidi, Diego Octavio Talavera Maya, Alex Mendez, Gretel Mercado, Niana Mohammed, Johnny Parra, Maitrayee Sahi, Adrian Suarez, Melisa Tallis, Tomoki Tamiya, Chris Theissen, and Russell van Linge. + +This project has been supported by the National Aeronautics and Space Administration under Grant No. NNX15AI75G. + + diff --git a/setup.py b/setup.py index efcdfce044..b75e9aa14f 100644 --- a/setup.py +++ b/setup.py @@ -1,3 +1,4 @@ + #!/usr/bin/env python # Licensed under a 3-clause BSD style license - see LICENSE.rst diff --git a/splat/citations.py b/splat/citations.py index 6d5eae31f5..380208c1ee 100644 --- a/splat/citations.py +++ b/splat/citations.py @@ -72,7 +72,7 @@ def bibTexParser(bib_input,**kwargs): def veryShortRef(bib_dict,**kwargs): ''' :Purpose: - Takes a bibtex dictionary and returns a short (in-line) version of the citation + Takes a bibtex entry and returns a short (in-line) version of the citation :Required parameters: :param bib_tex: Dictionary output from bibTexParser, else a bibcode that is fed into bibTexParser @@ -85,14 +85,11 @@ def veryShortRef(bib_dict,**kwargs): ''' if type(bib_dict) is not dict: - if type(bib_dict) is numpy.str: - bib_dict = str(bib_dict) - if type(bib_dict) is str: - bib_dict = getBibTeX(bib_dict,**kwargs) - if isinstance(bib_dict,dict) == False: return '' - else: - if kwargs.get('verbose',False): print('Input to shortRef is neither a bibcode nor a bibTex dictionary') - return '' + if type(bib_dict) is str: bib_dict = getBibTeX(bib_dict,**kwargs) + if len(bib_dict) == 0: return '' + if type(bib_dict) is not dict: + if kwargs.get('verbose',False): print('Input to shortRef is neither a bibcode nor a bibTex dictionary') + return '' authors = bib_dict['author'].split(' and ') a = authors[0].replace('~',' ').split(' ') @@ -124,14 +121,12 @@ def shortRef(bib_dict,**kwargs): ''' if type(bib_dict) is not dict: - if type(bib_dict) is numpy.str: - bib_dict = str(bib_dict) - if type(bib_dict) is str: + if type(bib_dict) is str: bib_dict = getBibTeX(bib_dict,**kwargs) - if isinstance(bib_dict,dict) == False: return '' - else: - if kwargs.get('verbose',False): print('Input to shortRef is neither a bibcode nor a bibTex dictionary') - return '' + if len(bib_dict) == 0: return '' + if type(bib_dict) is not dict: + if kwargs.get('verbose',False): print('Input to shortRef is neither a bibcode nor a bibTex dictionary') + return '' authors = bib_dict['author'].split(' and ') if len(authors) == 1: @@ -175,14 +170,12 @@ def longRef(bib_dict,**kwargs): ''' if type(bib_dict) is not dict: - if type(bib_dict) is numpy.str: - bib_dict = str(bib_dict) if type(bib_dict) is str: bib_dict = getBibTeX(bib_dict,**kwargs) - if isinstance(bib_dict,dict) == False: return '' - else: - if kwargs.get('verbose',False): print('Input to longRef is neither a bibcode nor a bibTex dictionary') - return '' + if len(bib_dict) == 0: return '' + if type(bib_dict) is not dict: + if kwargs.get('verbose',False): print('Input to longRef is neither a bibcode nor a bibTex dictionary') + return '' authors = bib_dict['author'].split(' and ') if len(authors) == 1: @@ -226,14 +219,12 @@ def veryLongRef(bib_dict,**kwargs): ''' if type(bib_dict) is not dict: - if type(bib_dict) is numpy.str: - bib_dict = str(bib_dict) if type(bib_dict) is str: bib_dict = getBibTeX(bib_dict,**kwargs) - if isinstance(bib_dict,dict) == False: return '' - else: - if kwargs.get('verbose',False): print('Input to verylongRef is neither a bibcode nor a bibTex dictionary') - return '' + if len(bib_dict) == 0: return '' + if type(bib_dict) is not dict: + if kwargs.get('verbose',False): print('Input to verylongRef is neither a bibcode nor a bibTex dictionary') + return '' authors = bib_dict['author'].split(' and ') if len(authors) == 1: @@ -278,7 +269,7 @@ def citeURL(bib_dict,**kwargs): ''' if type(bib_dict) is not dict: - if type(bib_dict) is numpy.str: + if type(bib_dict) is str: bib_dict = str(bib_dict) if type(bib_dict) is str: # assume this is a bibcode @@ -337,63 +328,78 @@ def processBiblibrary(biblibrary,verbose=False): return output -def getBibTeX(bibcode,**kwargs): +def getBibTeX(bibcode, biblibrary=SPLAT_PATH+DB_FOLDER+BIBFILE, online=False, verbose=True): ''' Purpose - Takes a bibcode and returns a dictionary containing the bibtex information; looks either in internal SPLAT - or user-supplied bibfile, or seeks online. If nothing found, gives a soft warning and returns False + ------- + Takes a bibcode and returns a dictionary containing the bibtex information; + looks either in internal SPLAT or user-supplied bibfile, or seeks online. + If nothing found, gives a soft warning and returns False - :Note: - **Currently not functional** + Parameters + ---------- - :Required parameters: - :param bibcode: Bibcode string to look up (e.g., '2014ApJ...787..126L') + bibcode : str + Bibcode string to look up (e.g., '2014ApJ...787..126L') - :Optional parameters: - :param biblibrary: Filename for biblibrary to use in place of SPLAT internal one - :type string: optional, default = '' - :param online: If True, go directly online; if False, do not try to go online - :type logical: optional, default = null + biblibrary = SPLAT_PATH+DB_FOLDER+BIBFILE: str [optional] + File pointing to a bibtex library file; by default points to internal library - :Output: - - A dictionary containing the bibtex fields, or False if not found + online = False : bool [optional] + If True, go directly online; if False, do not try to go online + NOTE: CURRENLY SET TO NOT ONLINE DUE TO CHANGE IN ADS API + + verbose = True : bool [optional] + Set to True to provide feedback + + Outputs + ------- + + dictionary containing bibtex information, or blank dictionary if nothing found + + Example + ------- + + TBD + + Dependencies + ------------ + + None ''' # go online first if directed to do so - if kwargs.get('online',False) and checkOnline(): - bib_tex = getBibTeXOnline(bibcode) + # if online==True and checkOnline(): + # bib_tex = getBibTeXOnline(bibcode) # read locally first - else: - biblibrary = kwargs.get('biblibrary', SPLAT_PATH+DB_FOLDER+BIBFILE) +# else: # check the file - if not os.path.exists(os.path.normpath(biblibrary)): - if kwargs.get('verbose',True) == True: print('Could not find bibtex library {}'.format(biblibrary)) - biblibrary = SPLAT_PATH+DB_FOLDER+BIBFILE - - if not os.path.exists(os.path.normpath(biblibrary)): - raise NameError('Could not find SPLAT main bibtext library {}; something is wrong'.format(biblibrary)) - - - with open(os.path.normpath(biblibrary), 'r') as bib_file: - text = bib_file.read() - #print re.search('@[A-Z]+{' + bib_code, bib_file) - in_lib = re.search('@[a-z]+{' + bibcode, text) - if in_lib == None: - if kwargs.get('force',False): return False - if kwargs.get('verbose',False) == True: print('Bibcode {} not in bibtex library {}; checking online'.format(bibcode,biblibrary)) - bib_tex = getBibTeXOnline(bibcode) - else: - begin = text.find(re.search('@[a-z]+{' + bibcode, text).group(0)) - text = text[begin:] - end = text.find('\n@') - bib_tex = text[:end] - - if bib_tex == False: - return False - else: - return bibTexParser(bib_tex) + if not os.path.exists(os.path.normpath(biblibrary)): + if verbose == True: print('Could not find bibtex library {}'.format(biblibrary)) + biblibrary = SPLAT_PATH+DB_FOLDER+BIBFILE + if not os.path.exists(os.path.normpath(biblibrary)): + raise NameError('Could not find SPLAT main bibtext library {}; something is wrong'.format(biblibrary)) + +# open and read + bib_tex = {} + with open(os.path.normpath(biblibrary), 'r') as bib_file: + text = bib_file.read() + #print re.search('@[A-Z]+{' + bib_code, bib_file) + in_lib = re.search('@[a-z]+{' + bibcode, text) + if in_lib != None: +# if force==True: return bib_tex +# if kwargs.get('verbose',False) == True: print('Bibcode {} not in bibtex library {}; checking online'.format(bibcode,biblibrary)) +# bib_tex = getBibTeXOnline(bibcode) +# else: + begin = text.find(re.search('@[a-z]+{' + bibcode, text).group(0)) + text = text[begin:] + end = text.find('\n@') + bib_tex = text[:end] + + if len(bib_tex) == 0: return bib_tex + else: return bibTexParser(bib_tex) def getBibTeXOnline(bibcode,verbose=False): diff --git a/splat/core.py b/splat/core.py index 95261c9a9a..8ad872087e 100644 --- a/splat/core.py +++ b/splat/core.py @@ -242,33 +242,33 @@ class Spectrum(object): ''' def __init__(self, *args, **kwargs): -# some presets - sdb = False - self.ismodel = kwargs.get('ismodel',False) - self.istransmission = kwargs.get('istransmission',False) - self.wave_label = kwargs.get('wave_label',r'Wavelength') +# some presets - DO WE NEED ALL OF THESE? + sdb = pandas.DataFrame() +# self.ismodel = kwargs.get('ismodel',False) +# self.istransmission = kwargs.get('istransmission',False) self.wave_unit = kwargs.get('wave_unit',DEFAULT_WAVE_UNIT) - self.wave_unit_label = kwargs.get('wave_unit_label',self.wave_unit) + self.wave_label = kwargs.get('wave_label',r'Wavelength') +# self.wave_unit_label = kwargs.get('wave_unit_label',self.wave_unit) + self.flux_unit = kwargs.get('flux_unit',DEFAULT_FLUX_UNIT) self.flux_label = kwargs.get('flux_label',r'F$_{\lambda}$') if kwargs.get('surface',False) == True: self.flux_label = 'Surface {}'.format(self.flux_label) if kwargs.get('apparent',False) == True: self.flux_label = 'Apparent {}'.format(self.flux_label) if kwargs.get('absolute',False) == True: self.flux_label = 'Absolute {}'.format(self.flux_label) if kwargs.get('normalized',False) == True: self.flux_label = 'Normalized {}'.format(self.flux_label) - self.flux_unit = kwargs.get('flux_unit',DEFAULT_FLUX_UNIT) - if kwargs.get('dimensionless')==True: self.flux_unit = u.dimensionless_unscaled - self.flux_unit_label = kwargs.get('flux_unit_label',self.flux_unit) + if kwargs.get('dimensionless',False)==True: self.flux_unit = u.dimensionless_unscaled +# self.flux_unit_label = kwargs.get('flux_unit_label',self.flux_unit) # self.header = kwargs.get('header',fits.PrimaryHDU()) self.header = kwargs.get('header',{}) self.filename = kwargs.get('file','') self.filename = kwargs.get('filename',self.filename) self.name = kwargs.get('name','') - self.idkey = kwargs.get('idkey',False) -# instrument + self.data_key = kwargs.get('idkey',False) + self.data_key = kwargs.get('data_key',self.data_key) self.instrument = kwargs.get('instrument','') inst = checkInstrument(self.instrument) if inst != False: for k in list(INSTRUMENTS[inst].keys()): setattr(self,k,kwargs.get(k,INSTRUMENTS[inst][k])) - self.instrument_mode = kwargs.get('instrument_mode','') +# self.instrument_mode = kwargs.get('instrument_mode','') # self.runfast = kwargs.get('runfast',True) self.published = kwargs.get('published','N') self.bibcode = kwargs.get('bibcode','') @@ -277,117 +277,137 @@ def __init__(self, *args, **kwargs): self.flux = [] self.noise = [] self.variance = [] + verbose = kwargs.get('verbose',True) + +# return empty Spectrum object if something goes wrong + empty = kwargs.get('empty',False) + if empty==True: return(self) # process arguments # option 1: a filename is given - if len(args) > 0: - if isinstance(args[0],str): + if len(args) == 1: + if isinstance(args[0],str): self.filename = args[0] + empty=True -# option 2: a spectrum ID is given -# elif isinstance(args[0],int) or isinstance(args[0],float) or isinstance(args[0],numpy.int64) or isinstance(args[0],numpy.float64): +# option 2: a spectrum data key is given elif isNumber(args[0])==True: - self.idkey = int(args[0]) - try: - sdb = keySpectrum(self.idkey) - if isinstance(sdb,bool) == False: - self.filename = sdb['DATA_FILE'].iloc[0] - except: - print('Warning: problem reading in spectral database') + self.data_key = int(args[0]) +# try: + sdb = keySpectrum(self.data_key) + if len(sdb) > 0: self.filename = sdb['DATA_FILE'].iloc[0] + else: + if verbose==True: print('Could not find data key {} in SPLAT database'.format(args[0])) + empty=True +# except: +# print('Warning: problem reading in spectral database') # option 3: a dictionary is given - check for the appropriate keys elif isinstance(args[0],dict)==True: if 'wave' in list(args[0].keys()) and 'flux' in list(args[0].keys()): for k in list(args[0].keys()): setattr(self,k,args[0][k]) + else: + if verbose==True: print('Passed a dictionary that is missing wave and/or flux keys') + empty=True -# option 4: a pandas array is given - check for the appropriate columns +# option 4: a pandas dataframe is given elif isinstance(args[0],pandas.core.frame.DataFrame)==True: if 'wave' in list(args[0].columns) and 'flux' in list(args[0].columns): for k in list(args[0].columns): setattr(self,k,args[0][k]) - -# option 5: arrays are given - interpret as wave, flux, and optionally noise -# puts these into keyword arguments - if len(args) > 1: - if (isinstance(args[0],list) or isinstance(args[0],numpy.ndarray)) and (isinstance(args[1],list) or isinstance(args[1],numpy.ndarray)): - kwargs['wave'] = kwargs.get('wave',args[0]) - kwargs['flux'] = kwargs.get('flux',args[1]) - if len(args) > 2: - if isinstance(args[2],list) or isinstance(args[2],numpy.ndarray): - kwargs['noise'] = kwargs.get('noise',args[2]) - -# option 6: wave, flux and optionally noise included in keyword arguments - if len(kwargs.get('wave','')) > 0 and len(kwargs.get('flux','')) > 0: - self.wave = kwargs['wave'] - self.flux = kwargs['flux'] - if len(kwargs.get('noise','')) > 0: - self.noise = kwargs['noise'] + else: + if verbose==True: print('Passed a pandas array that is missing wave and/or flux columns') + empty=True + +# option 5: a list or numpy array or arrays is given: +# interpret as wave, flux, and optionally noise + elif isinstance(args[0],list)==True or isinstance(args[0],numpy.ndarray)==True: + if len(args[0]) >=2 and (isinstance(args[0][0],list)==True or isinstance(args[0][0],numpy.ndarray)==True): + self.wave = args[0][0] + self.flux = args[0][1] + if len(args[0]) >=3: self.noise = args[0][2] + else: + if verbose==True: print('Input array needs at least two arrays for wave and flux, but only contains {}'.format(len(args[0]))) + empty=True else: - self.noise = numpy.zeros(len(self.wave)) -# some extras - others = ['pixel','mask','flag','flags','model','background'] - for o in others: - if len(kwargs.get(o,'')) > 0: - setattr(self,o,kwargs[o]) + if verbose==True: print('Do not recognize input as filename, database key, list, dict, numpy array, or pandas table') + empty=True + +# option 6: multiple lists or numpy arrays are given +# interpret as wave, flux, and optionally noise + elif len(args) > 1: + if (isinstance(args[0],list) or isinstance(args[0],numpy.ndarray)) and \ + (isinstance(args[1],list) or isinstance(args[1],numpy.ndarray)): + self.wave = args[0] + self.flux = args[1] + else: + if verbose==True: print('Multiple inputs need to be lists or numpy arrays') + empty=True + + if len(args) > 2: + if isinstance(args[2],list) or isinstance(args[2],numpy.ndarray): + kwargs['noise'] = kwargs.get('noise',args[2]) -# read in file if there isn't anything in our wave or flux arrays - if (len(self.wave)==0 or len(self.flux)==0) and self.filename != '': +# option 7: wave, flux and optionally noise included in keyword arguments +# allow for this not being an option + else: + for k in ['wave','flux','noise']: + if len(kwargs.get(k,[])) > 0: + if isinstance(kwargs[k],list) or isinstance(kwargs[k],numpy.ndarray): + setattr(self,k,kwargs[k]) + else: + if verbose==True: print('{} keyword must be a list or numpy array; you passed {}'.format(k,type(kwargs[k]))) + empty=True + else: + if k in 'wave flux': +# if verbose==True: print('{} keyword not included or passed empty list'.format(k)) + empty=True + +# read in file if container is currently empty but there is a filename + if empty==True and self.filename != '': self.simplefilename = os.path.basename(self.filename) - self.name = kwargs.get('name',self.simplefilename) + if self.name=='': self.name=self.simplefilename # set up parameters mkwargs = {} - mkwargs['filename']=self.filename +# mkwargs['filename']=self.filename mkwargs['instrument']=self.instrument mkwargs['folder'] = kwargs.get('folder','./') mkwargs['wave_unit'] = self.wave_unit mkwargs['flux_unit'] = self.flux_unit - mkwargs['delimiter']=kwargs.get('delimiter',',') + mkwargs['delimiter']=kwargs.get('delimiter','') mkwargs['comment']=kwargs.get('comment','#') mkwargs['file_type']=kwargs.get('file_type','') mkwargs['verbose']=kwargs.get('verbose',False) # self.file = self.filename # is this in the SPLAT database? if so use the default folder -# NOTE: NEED TO MAKE THIS INSTRUMENT FLEXIBLE if self.filename in list(DB_SPECTRA['DATA_FILE']): mkwargs['folder'] = SPLAT_PATH+DATA_FOLDER - sdb = searchLibrary(**mkwargs) + sdb = searchLibrary(file=self.filename) # return prior spectrum - THIS IS NOT WORKING SO COMMENTED OUT # if self.filename in list(SPECTRA_READIN.keys()) and self.runfast == True: # self = SPECTRA_READIN[self.filename] # return -# try: - - # breakouts for specific instruments -# if (kwargs.get('APOGEE') == True or kwargs.get('apogee') == True or kwargs.get('instrument','SPEX-PRISM').upper() == 'APOGEE') and self.filename != '': -# rs = _readAPOGEE(self.filename,**kwargs) -# self.instrument = 'APOGEE' -# # for k in list(rs.keys()): setattr(self,k.lower(),rs[k]) -# self.history.append('Spectrum successfully loaded') -# # create a copy to store as the original -# self.original = copy.deepcopy(self) - -# elif (kwargs.get('BOSS',False) == True or kwargs.get('boss',False) == True or kwargs.get('eboss',False) == True or kwargs.get('EBOSS',False) == True or kwargs.get('instrument','SPEX-PRISM').upper() == 'BOSS' or kwargs.get('instrument','SPEX-PRISM').upper() == 'EBOSS') and self.filename != '': -# rs = _readBOSS(self.filename) -# # for k in list(rs.keys()): setattr(self,k.lower(),rs[k]) -# self.wave_unit = kwargs.get('wave_unit',u.Angstrom) -# self.flux_unit = kwargs.get('flux_unit',u.erg/(u.cm**2 * u.s * u.Angstrom)) -# self.history.append('Spectrum successfully loaded') -# # create a copy to store as the original -# self.original = copy.deepcopy(self) -# else: - # read in spectrum, being careful not to overwrite specifically assigned quantities rs = readSpectrum(self.filename,**mkwargs) - for k in list(rs.keys()): - if k not in list(kwargs.keys()): setattr(self,k.lower(),rs[k]) + if 'wave' in rs.keys(): + for k in list(rs.keys()): + if k not in list(kwargs.keys()): setattr(self,k.lower(),rs[k]) + empty=False + +# some extras + others = ['pixel','mask','flag','flags','model','background'] + for o in others: + if len(kwargs.get(o,'')) > 0: + setattr(self,o,kwargs[o]) + # None of this worked; create an empty Spectrum object (can be used for copying) - if len(self.wave)==0 or len(self.flux)==0: + if empty==True: print('Warning: Creating an empty Spectrum object') return @@ -395,75 +415,56 @@ def __init__(self, *args, **kwargs): # convert to numpy arrays if not isinstance(self.wave,numpy.ndarray): self.wave = numpy.array(self.wave) if not isinstance(self.flux,numpy.ndarray): self.flux = numpy.array(self.flux) + if len(self.noise)==0: self.noise = [numpy.nan]*len(self.wave) if not isinstance(self.noise,numpy.ndarray): self.noise = numpy.array(self.noise) # enforce positivity of noise array - self.noise = numpy.absolute(self.noise) + if kwargs.get('no_negative_noise',True)==True: self.noise = numpy.absolute(self.noise) # assure wave, flux, noise have units if not isUnit(self.wave): self.wave = numpy.array(self.wave)*self.wave_unit if not isUnit(self.flux): self.flux = numpy.array(self.flux)*self.flux_unit if not isUnit(self.noise): self.noise = numpy.array(self.noise)*self.flux_unit -# some conversions - self.flam = self.flux - try: self.nu = self.wave.to(u.Hz,equivalencies=u.spectral()) - except: pass - try: - self.fnu = self.flux.to(u.Jansky,equivalencies=u.spectral_density(self.wave)) - self.fnu_unit = u.Jansky - except: pass - try: self.noisenu = self.noise.to(u.Jansky,equivalencies=u.spectral_density(self.wave)) - except: pass - self.temperature = numpy.zeros(len(self.flux)) +# some conversions - move off to Spectrum methods + # self.flam = self.flux + # try: self.nu = self.wave.to(u.Hz,equivalencies=u.spectral()) + # except: pass + # try: + # self.fnu = self.flux.to(u.Jansky,equivalencies=u.spectral_density(self.wave)) + # self.fnu_unit = u.Jansky + # except: pass + # try: self.noisenu = self.noise.to(u.Jansky,equivalencies=u.spectral_density(self.wave)) + # except: pass + # self.temperature = numpy.zeros(len(self.flux)) # calculate variance & S/N # self.variance = numpy.array([n**2 for n in self.noise.value])*self.noise.unit*self.noise.unit self.variance = self.noise**2 # self.snr = self.computeSN() self.snr = numpy.nanmedian(self.flux/self.noise) -# estimate resolution - be default central lam/lam spacing/3 - i = int(0.5*len(self.wave)) - self.resolution = kwargs.get('resolution',self.wave.value[i]/numpy.absolute(self.wave.value[i+1]-self.wave.value[i])/2.) +# estimate resolution - by default central lam/lam spacing/3 + self.resolution = kwargs.get('resolution', + numpy.nanmedian(self.wave.value)/numpy.absolute(numpy.nanmedian(self.wave.value-numpy.roll(self.wave.value,3)))) # populate information on source and spectrum from database # COULD POSSIBLY MOVE THIS TO A SEPARATE FUNCTION # print(sdb) # sdb = searchLibrary(**mkwargs) - if isinstance(sdb,bool) == False : - if isinstance(sdb,pandas.core.frame.DataFrame) and len(sdb) != 0: - for k in list(sdb.columns): - setattr(self,k.lower(),str(sdb[k].iloc[0])) -# elif isinstance(sdb,dict) == True: -# for k in list(sdb.keys()): -# setattr(self,k.lower(),str(sdb[k][0])) - else: - try: - for k in list(sdb.keys()): - setattr(self,k.lower(),str(sdb[k][0])) - except: - pass - # set shortname if possible - # REMOVED - # try: self.shortname = designationToShortName(self.designation) - # except: pass -# set observation date if possible -# REMOVED - # try: - # self.date = str(self.observation_date) - # except: - # pass + if len(sdb) > 0: + for k in list(sdb.columns): setattr(self,k.lower(),str(sdb[k].iloc[0])) + # convert some data into numbers - kconv = ['ra','dec','julian_date','median_snr','resolution','airmass',\ - 'jmag','jmag_error','hmag','hmag_error','kmag','kmag_error','source_key'] - for k in kconv: - try: setattr(self,k,float(getattr(self,k))) - except: pass + # kconv = ['ra','dec','julian_date','median_snr','resolution','airmass',\ + # 'jmag','jmag_error','hmag','hmag_error','kmag','kmag_error','source_key'] + # for k in kconv: + # try: setattr(self,k,float(getattr(self,k))) + # except: pass # this is to make sure the database resolution is the default value # IS THIS NECESSARY? - try: - if kwargs.get('resolution',False) == False or kwargs.get('instrument',False) == False: - kwargs['resolution'] = self.resolution - except: pass + # try: + # if kwargs.get('resolution',False) == False or kwargs.get('instrument',False) == False: + # kwargs['resolution'] = self.resolution + # except: pass # instrument specific information # THIS HAS BEEN REMOVED @@ -579,8 +580,7 @@ def __init__(self, *args, **kwargs): self.original = copy.deepcopy(self) # add to previous read spectra - if self.filename != '' and self.ismodel == False: - SPECTRA_READIN[self.filename] = self + if self.filename != '': SPECTRA_READIN[self.filename] = self return @@ -961,19 +961,24 @@ def addNoise(self,snr=0.): def info(self): ''' - :Purpose: - Returns a summary of properties for the Spectrum object + Purpose + ------- - :Required Inputs: - None + Returns a summary of properties for the Spectrum object - :Optional Inputs: - None + Parameters + ---------- - :Output: - Text summary describing the Spectrum object + None + + Outputs + ------- + + Text summary describing the Spectrum object + + Example + ------- - :Example: >>> import splat >>> sp = splat.getSpectrum(lucky=True)[0] >>> sp.info() @@ -990,61 +995,63 @@ def info(self): bibcode: 2010ApJ...710.1142B History: SPEX_PRISM spectrum successfully loaded + + Dependencies + ------------ + None ''' - if self.ismodel == True: - f = '\n{} for instrument {} with the following parmeters:'.format(self.modelset,self.instrument) - for ms in SPECTRAL_MODEL_PARAMETERS_INORDER: - if hasattr(self,ms): f+='\n\t{} = {} {}'.format(ms,getattr(self,ms),SPECTRAL_MODEL_PARAMETERS[ms]['unit']) -# f+='\nSmoothed to slit width {} {}'.format(self.slit,SPECTRAL_MODEL_PARAMETERS['slit']['unit']) - f+='\n\nIf you use this model, please cite {}'.format(spbib.shortRef(SPECTRAL_MODELS[self.modelset]['bibcode'])) - f+='\nbibcode = {}\n'.format(SPECTRAL_MODELS[self.modelset]['bibcode']) - elif self.istransmission == True: - f = '\n{} spectrum'.format(self.name) -# f+='\nSmoothed to slit width {} {}'.format(self.slit,SPECTRAL_MODEL_PARAMETERS['slit']['unit']) - f+='\n\nIf you use these data, please cite {}'.format(spbib.shortRef(self.bibcode)) - f+='\nbibcode = {}\n'.format(self.bibcode) - else: - f = '\n' - if hasattr(self,'instrument'): f+='{} '.format(self.instrument) - if hasattr(self,'name'): f+='spectrum of {}'.format(self.name) - if hasattr(self,'observer') and hasattr(self,'date'): - if isinstance(self.observer,list): - for i in range(len(self.observer)): - f+='\nObserved by {} on {}'.format(self.observer[i],properDate(self.date[i],output='YYYY MMM DD')) - else: - f+='\nObserved by {} on {}'.format(self.observer,properDate(self.date,output='YYYY MMM DD')) - if hasattr(self,'airmass'): f+='\nAirmass = {:.2f}'.format(float(self.airmass)) - if hasattr(self,'designation'): f+='\nSource designation = {}'.format(self.designation) - if hasattr(self,'median_snr'): f+='\nMedian S/N = {:.0f}'.format(float(self.median_snr)) - if hasattr(self,'spex_type'): f+='\nSpeX Classification = {}'.format(self.spex_type) -# these lines are currently broken - # if hasattr(self,'lit_type'): - # if isinstance(self.lit_type,list): - # for i in range(len(self.lit_type)): - # f+='\nLiterature Classification = {} from {}'.format(self.lit_type[i],spbib.shortRef(self.lit_type_ref[i])) - # else: - # f+='\nLiterature Classification = {} from {}'.format(self.lit_type,spbib.shortRef(self.lit_type_ref)) - if hasattr(self,'source_key') and hasattr(self,'data_key'): - if isinstance(self.source_key,list): - for i in range(len(self.source_key)): - f+='\nSpectrum key = {}, Source key = {}'.format(int(self.data_key[i]),int(self.source_key[i])) - else: - f+='\nSpectrum key = {}, Source key = {}'.format(int(self.data_key),int(self.source_key)) - if self.published == 'Y': - f+='\n\nIf you use these data, please cite:' - if isinstance(self.data_reference,list): - for i in range(len(self.data_reference)): - f+='\n\t{}'.format(spbib.shortRef(self.data_reference[i])) - f+='\n\tbibcode: {}'.format(self.data_reference[i]) - else: - f+='\n\t{}'.format(spbib.shortRef(self.data_reference)) - f+='\n\tbibcode: {}'.format(self.data_reference) +# if self.ismodel == True: +# f = '\n{} for instrument {} with the following parmeters:'.format(self.modelset,self.instrument) +# for ms in SPECTRAL_MODEL_PARAMETERS_INORDER: +# if hasattr(self,ms): f+='\n\t{} = {} {}'.format(ms,getattr(self,ms),SPECTRAL_MODEL_PARAMETERS[ms]['unit']) +# # f+='\nSmoothed to slit width {} {}'.format(self.slit,SPECTRAL_MODEL_PARAMETERS['slit']['unit']) +# f+='\n\nIf you use this model, please cite {}'.format(spbib.shortRef(SPECTRAL_MODELS[self.modelset]['bibcode'])) +# f+='\nbibcode = {}\n'.format(SPECTRAL_MODELS[self.modelset]['bibcode']) +# elif self.istransmission == True: +# f = '\n{} spectrum'.format(self.name) +# # f+='\nSmoothed to slit width {} {}'.format(self.slit,SPECTRAL_MODEL_PARAMETERS['slit']['unit']) +# f+='\n\nIf you use these data, please cite {}'.format(spbib.shortRef(self.bibcode)) +# f+='\nbibcode = {}\n'.format(self.bibcode) +# else: + f = '\n' + if hasattr(self,'instrument'): f+='{} '.format(self.instrument) + if hasattr(self,'name'): f+='spectrum of {}'.format(self.name) + if hasattr(self,'observer') and hasattr(self,'date'): + if isinstance(self.observer,list): + for i in range(len(self.observer)): + f+='\nObserved by {} on {}'.format(self.observer[i],properDate(self.date[i],output='YYYY MMM DD')) else: - f+='\n\nUNPUBLISHED DATA' + f+='\nObserved by {} on {}'.format(self.observer,properDate(self.date,output='YYYY MMM DD')) + if hasattr(self,'airmass'): f+='\nAirmass = {:.2f}'.format(float(self.airmass)) + if hasattr(self,'designation'): f+='\nSource designation = {}'.format(self.designation) + if hasattr(self,'median_snr'): f+='\nMedian S/N = {:.0f}'.format(float(self.median_snr)) + if hasattr(self,'spex_type'): f+='\nSpeX Classification = {}'.format(self.spex_type) +# these lines are currently broken + # if hasattr(self,'lit_type'): + # if isinstance(self.lit_type,list): + # for i in range(len(self.lit_type)): + # f+='\nLiterature Classification = {} from {}'.format(self.lit_type[i],spbib.shortRef(self.lit_type_ref[i])) + # else: + # f+='\nLiterature Classification = {} from {}'.format(self.lit_type,spbib.shortRef(self.lit_type_ref)) + if hasattr(self,'source_key') and hasattr(self,'data_key'): + # if isinstance(self.source_key,list): + # for i in range(len(self.source_key)): + # f+='\nSpectrum key = {}, Source key = {}'.format(int(self.data_key[i]),int(self.source_key[i])) + # else: + f+='\nSpectrum key = {}, Source key = {}'.format(int(self.data_key),int(self.source_key)) + if self.published == 'Y': + f+='\n\nIf you use these data, please cite:' + # if isinstance(self.data_reference,list): + # for i in range(len(self.data_reference)): + # f+='\n\t{}'.format(spbib.shortRef(self.data_reference[i])) + # f+='\n\tbibcode: {}'.format(self.data_reference[i]) + # else: + f+='\n\t{}'.format(spbib.shortRef(self.data_reference)) + f+='\n\tbibcode: {}'.format(self.data_reference) + else: f+='\n\nUNPUBLISHED DATA' f+='\n\nHistory:' - for h in self.history: - f+='\n\t{}'.format(h) + for h in self.history: f+='\n\t{}'.format(h) print(f) return @@ -1132,7 +1139,7 @@ def export(self,filename='',clobber=True,csv=False,tab=True,delimiter='\t',save_ if isinstance(self.__getattribute__(k),str) == True or (isinstance(self.__getattribute__(k),float) == True and numpy.isnan(self.__getattribute__(k)) == False) or isinstance(self.__getattribute__(k),int) == True or isinstance(self.__getattribute__(k),bool) == True: hdu.header[k.upper()] = str(self.__getattribute__(k)) # print(hdu.header) - hdu.writeto(filename,clobber=clobber) + hdu.writeto(filename,overwrite=clobber) # except: # raise NameError('Problem saving spectrum object to file {}'.format(filename)) @@ -1452,12 +1459,16 @@ def toWavelengths(self,wave,force=True,verbose=False): flux_unit = self.flux.unit if len(self.wave) <= len(wave): f = interp1d(self.wave.value,self.flux.value,bounds_error=False,fill_value=0.) - n = interp1d(self.wave.value,self.noise.value,bounds_error=False,fill_value=0.) self.flux = f(wave.value)*flux_unit - self.noise = n(wave.value)*flux_unit + if numpy.isfinite(numpy.nanmin(self.noise))==True: + n = interp1d(self.wave.value,self.noise.value,bounds_error=False,fill_value=0.) + self.noise = n(wave.value)*flux_unit + else: self.noise = self.flux*numpy.nan else: self.flux = integralResample(self.wave.value,self.flux.value,wave.value)*flux_unit - self.noise = integralResample(self.wave.value,self.noise.value,wave.value)*flux_unit + if numpy.isfinite(numpy.nanmin(self.noise))==True: + self.noise = integralResample(self.wave.value,self.noise.value,wave.value)*flux_unit + else: self.noise = self.flux*numpy.nan self.wave = wave self.variance = self.noise**2 self.history.append('Resampled to new wavelength grid') @@ -1674,6 +1685,7 @@ def broaden(self,vbroad,kern=None,epsilon=0.6,method='rotation',verbose=False): kern = lsfRotation(vbroad.value,vsamp.value,epsilon=epsilon) report = 'Rotationally broadened spectrum by {}'.format(vbroad) +# NOTE: THIS IS CURRENTLY NOT FUNCTIONAL # gaussian ±10 sigma elif 'gauss' in method.lower(): n = numpy.ceil(20.*vbroad.value/vsamp.value) @@ -1699,8 +1711,9 @@ def broaden(self,vbroad,kern=None,epsilon=0.6,method='rotation',verbose=False): nwave = numpy.nanmin(self.wave.value)*(a**numpy.arange(len(self.wave))) nflux = self.flux.value*nwave ncflux = numpy.convolve(nflux, kern, 'same') - self.flux = ncflux/nwave*flux_unit + self.flux = (ncflux/nwave)*flux_unit self.history.append(report) + if verbose==True: print(report) return @@ -2931,11 +2944,13 @@ def sample(self,rng,method='median',verbose=ERROR_CHECKING): w = numpy.where(numpy.logical_and(self.wave.value >= rng[0],self.wave.value <= rng[1])) if len(w[0])>0: - if method in ['median','med']: val = numpy.nanmedian(self.flux.value[w]) - elif method in ['mean','average','ave']: val = numpy.nanmean(self.flux.value[w]) - elif method in ['max','maximum']: val = numpy.nanmax(self.flux.value[w]) - elif method in ['min','minimum']: val = numpy.nanmin(self.flux.value[w]) - elif method in ['std','stddev','stdev','rms']: val = numpy.nanstd(self.flux.value[w]) + if method.lower() in ['median','med']: val = numpy.nanmedian(self.flux.value[w]) + elif method.lower() in ['mean','average','ave']: val = numpy.nanmean(self.flux.value[w]) + elif method.lower() in ['max','maximum']: val = numpy.nanmax(self.flux.value[w]) + elif method.lower() in ['min','minimum']: val = numpy.nanmin(self.flux.value[w]) + elif method.lower() in ['std','stddev','stdev','rms']: val = numpy.nanstd(self.flux.value[w]) + elif method.lower() in ['unc','uncertainty','noise','error']: val = numpy.nanmedian(self.noise.value[w]) + elif method.lower() in ['sn','snr','signal-to-noise','s/n']: val = numpy.nanmedian(self.flux.value[w]/self.noise.value[w]) else: raise ValueError('Did not recongize sampling method {}'.format(method)) return val else: @@ -4211,19 +4226,47 @@ def stitch(s1,s2,rng=[],verbose=False,scale=True,**kwargs): ##################################################### -def getSpectrum(getList=False, limit=0, *args, **kwargs): +def getSpectrum(output='spectra', limit=20, verbose=True, key_name='DATA_KEY',*args, **kwargs): ''' - :Purpose: + Purpose + ------- - Gets a spectrum from the SPLAT library using various selection criteria. Calls searchLibrary_ to select spectra; if any found it routines an array of Spectrum objects, otherwise an empty array. + Gets a spectrum from the SPLAT library using various selection criteria. + Calls searchLibrary_ to select spectra; if any found it routines an array of + Spectrum objects; otherwise, it returns an empty array. - .. _searchLibrary : api.html#splat.core.searchLibrary + Parameters + ---------- - :Output: + output = 'spectra' : str [optional] + Determines what is returned to user; options are: + * 'spectra': return array of Spectrum objects [default] + * 'files': return a list of filenames + * 'table': return pandas table of spectral information (output of `searchLibrary()`_) - An array of Spectrum objects that satisfy the search criteria + limit = 0 : int [optional] + Sets limit for the number of returns. Set to < 0 to return unlimited number - :Example: + key = 'DATA_KEY' : str [optional] + Keyword for the data key in the database file + + verbose = True : bool [optional] + Set to True to provide feedback + + **kwargs : additional keywords + Include additional keywords for `searchLibrary()`_ + + + Outputs + ------- + + result : array or pandas Dataframe + An array of Spectrum objects or filenames or pandas Dataframe for sources + that satisfy the search criteria + + + Example + ------- >>> import splat >>> sp = splat.getSpectrum(shortname='1507-1627')[0] Retrieving 1 file @@ -4231,53 +4274,78 @@ def getSpectrum(getList=False, limit=0, *args, **kwargs): Retrieving 120 files >>> sparr = splat.getSpectrum(spt='T5',young=True) No files match search criteria + + + Dependencies + ------------ + `searchLibrary()`_ + + .. _searchLibrary : api.html#splat.core.searchLibrary + ''' +# set default search parameters - NOT NECESSARY + # if kwargs.get('lucky',False) == True: kwargs['published'] = True + # if kwargs.get('output','')=='': kwargs['output'] = 'all' - if kwargs.get('lucky',False) == True: kwargs['published'] = True - result = [] - kwargs['output'] = 'all' +# search library search = searchLibrary(*args, **kwargs) - if len(search) > 0: - files = [] - if len(search) == 1: - files.append(search['DATA_FILE'].iloc[0]) - else: - for i,x in enumerate(search['DATA_FILE']): - files.append(search['DATA_FILE'].iloc[i]) + if len(search) == 0: + if verbose==True: print('\nNo files match search criteria\n\n') + return [] + # else: + # files = list(search['DATA_FILE']) + # if len(search) == 1: files.append(search['DATA_FILE'].iloc[0]) + # else: + # for i,x in enumerate(search['DATA_FILE']): + # files.append(search['DATA_FILE'].iloc[i]) +# return search table + if 'table' in output: return search # return just the filenames - if getList == True: - return files - - if len(files) == 1: - if kwargs.get('lucky',False) == True: - print('\nRetrieving 1 lucky file\n') - else: - print('\nRetrieving 1 file\n') - skwargs = search.iloc[0].to_dict() - result.append(Spectrum(files[0],**skwargs)) - else: -# if (kwargs.get('lucky',False) == True): + if 'file' in output: return list(search['DATA_FILE']) + +# return Spectrum objects +# limit number + if limit > 0 and limit < len(search): search = search.iloc[:limit] + result=[] +# string on feedback + if verbose==True: + txt = ' file' + if kwargs.get('lucky',False) == True: txt=' lucky'+txt + if len(search)>1: txt = txt+'s' + print('\nreturning {:.0f}{}\n'.format(len(search),txt)) + for i,k in enumerate(search[key_name]): + skwargs = search.iloc[i].to_dict() + result.append(Spectrum(k)) +# if len(files) == 1: +# if kwargs.get('lucky',False) == True: +# print('\nRetrieving 1 lucky file\n') +# else: +# print('\nRetrieving 1 file\n') +# skwargs = search.iloc[0].to_dict() +# result.append(Spectrum(files[0],**skwargs)) +# else: +# # if (kwargs.get('lucky',False) == True): # print('\nRetrieving 1 lucky file\n') # ind = numpy.random.choice(numpy.arange(len(files))) # print(x) # result.append(Spectrum(files[ind],header=search[ind])) # else: - if limit != 0 and limit < len(files): - files = files[:limit] - search = search.iloc[:limit] - print('\nRetrieving {} files\n'.format(len(files))) - for i,x in enumerate(files): - skwargs = search.iloc[i].to_dict() - result.append(Spectrum(x,header=dict(search.iloc[i]),**skwargs)) - - else: - if checkAccess() == False: - sys.stderr.write('\nNo published files match search criteria\n\n') - else: - sys.stderr.write('\nNo files match search criteria\n\n') + # if limit != 0 and limit < len(files): + # files = files[:limit] + # search = search.iloc[:limit] + # print('\nRetrieving {} files\n'.format(len(files))) + # for i,x in enumerate(files): + # skwargs = search.iloc[i].to_dict() + # result.append(Spectrum(x,header=dict(search.iloc[i]),**skwargs)) + + # else: + # if checkAccess() == False: + # sys.stderr.write('\nNo published files match search criteria\n\n') + # else: + # sys.stderr.write('\nNo files match search criteria\n\n') return result @@ -4319,6 +4387,11 @@ def getStandard(spt, **kwargs): kys = STDS_ESD_SPEX_KEYS subclass = 'esd' stdtype = 'extreme subdwarf' + elif kwargs.get('dsd',False) or 'd/sd' in sptstr: + stds = STDS_DSD_SPEX + kys = STDS_DSD_SPEX_KEYS + subclass = 'd/sd' + stdtype = 'mild subdwarf' elif kwargs.get('sd',False) or 'sd' in sptstr: stds = STDS_SD_SPEX kys = STDS_SD_SPEX_KEYS @@ -4390,8 +4463,21 @@ def initiateStandards(*args,**kwargs): initiateStandards(intg=True) initiateStandards(vlg=True) return + if kwargs.get('subdwarf',False): + swargs = copy.deepcopy(kwargs) + del swargs['subdwarf'] + initiateStandards(sd=True) + initiateStandards(dsd=True) + initiateStandards(esd=True) + return + if kwargs.get('young',False): + swargs = copy.deepcopy(kwargs) + del swargs['young'] + initiateStandards(intg=True) + initiateStandards(vlg=True) + return - elif kwargs.get('sd',False): + if kwargs.get('sd',False): stds = STDS_SD_SPEX kys = copy.deepcopy(STDS_SD_SPEX_KEYS) elif kwargs.get('dsd',False): @@ -4426,87 +4512,159 @@ def initiateStandards(*args,**kwargs): return - -def keySource(keys, **kwargs): - ''' - :Purpose: Takes a source key and returns a table with the source information - :param keys: source key or a list of source keys - :Example: - >>> import splat - >>> print spl.keySource(10001) - SOURCE_KEY NAME DESIGNATION ... NOTE SELECT - ---------- ------------------------ ----------------- ... ---- ------ - 10001 SDSS J000013.54+255418.6 J00001354+2554180 ... True - >>> print spl.keySource([10105, 10623]) - SOURCE_KEY NAME DESIGNATION ... NOTE SELECT - ---------- ---------------------- ----------------- ... ---- ------ - 10105 2MASSI J0103320+193536 J01033203+1935361 ... True - 10623 SDSS J09002368+2539343 J09002368+2539343 ... True - >>> print spl.keySource(1000001) - No sources found with source key 1000001 - False +def keySource(keys,verbose=True): ''' + Purpose + ------- -# vectorize - if isinstance(keys,list) == False: - keys = [keys] + Searches DB_SOURCE for source matching input source key(s) in column 'SOURCE_KEY' -# sdb = ascii.read(SPLAT_PATH+DB_FOLDER+SOURCES_DB, delimiter='\t',fill_values='-99.',format='tab') -# sdb = fetchDatabase(SPLAT_PATH+DB_FOLDER+SOURCES_DB) -# sdb = copy.deepcopy(DB_SOURCES) -# sdb['SELECT'] = [x in keys for x in sdb['SOURCE_KEY']] + Parameters + ---------- - sdb = DB_SOURCES[[x in keys for x in DB_SOURCES['SOURCE_KEY']]] -# if sum(sdb['SELECT']) == 0.: - if len(sdb) == 0.: - if kwargs.get('verbose',True) == True: print('No sources found with source key(s) = {}'.format(*keys)) - return False - else: -# db = sdb[:][numpy.where(sdb['SELECT']==1)] - return sdb + keys : int or list + integer or list of integers corresponding to source keys + verbose = True : boolean [optional] + set to True to have program return verbose output + + Outputs + ------- + + pandas DataFrame containing the rows in DB_SOURCE that match input keys, + or empty pandas DataFrame + + Example + ------- + + TBD + + Dependencies + ------------ + + `keySearch()`_ + + .. _`keySearch()` : api.html#splat.utilities.keySearch -def keySpectrum(keys, **kwargs): ''' - :Purpose: Takes a spectrum key and returns a table with the spectrum and source information - :param keys: spectrum key or a list of source keys - :Example: - >>> import splat - >>> print spl.keySpectrum(10001) - DATA_KEY SOURCE_KEY DATA_FILE ... COMPANION COMPANION_NAME NOTE_2 - -------- ---------- ---------------- ... --------- -------------- ------ - 10001 10443 10001_10443.fits ... - >>> print spl.keySpectrum([10123, 11298]) - DATA_KEY SOURCE_KEY DATA_FILE ... COMPANION COMPANION_NAME NOTE_2 - -------- ---------- ---------------- ... --------- -------------- ------ - 11298 10118 11298_10118.fits ... - 10123 10145 10123_10145.fits ... - >>> print spl.keySpectrum(1000001) - No spectra found with spectrum key 1000001 - False + return keySearch(keys,key_name='SOURCE_KEY',db=DB_SOURCE,verbose=verbose) + + +def keySpectrum(keys,verbose=True): ''' + Purpose + ------- -# vectorize - if isinstance(keys,list) == False: - keys = [keys] + Searches DB_SPECTRA for source matching input source key(s) in column 'DATA_KEY' -# sdb = copy.deepcopy(DB_SPECTRA) -# sdb['SELECT'] = [x in keys for x in sdb['DATA_KEY']] + Parameters + ---------- -# if sum(sdb['SELECT']) == 0.: -# if verbose: print('No spectra found with spectrum key {}'.format(keys[0])) -# return False -# else: -# s2db = copy.deepcopy(DB_SOURCES) -# db = join(sdb[:][numpy.where(sdb['SELECT']==1)],s2db,keys='SOURCE_KEY') -# return db - - sdb = DB_SPECTRA[[x in keys for x in DB_SPECTRA['DATA_KEY']]] - if len(sdb) == 0.: - if kwargs.get('verbose',True) == True: print('No sources found with spectrum key(s) = {}'.format(*keys)) - return False - else: - return sdb + keys : int or list + integer or list of integers corresponding to source keys + + verbose = True : boolean [optional] + set to True to have program return verbose output + + Outputs + ------- + + pandas DataFrame containing the rows in DB_SPECTRA that match input keys, + or empty pandas DataFrame + + Example + ------- + + TBD + + Dependencies + ------------ + + `keySearch()`_ + + .. _`keySearch()` : api.html#splat.utilities.keySearch + + ''' + return keySearch(keys,key_name='DATA_KEY',db=DB_SPECTRA,verbose=verbose) + +# def keySource(keys, verbose=True db=DB_SOURCES, **kwargs): +# ''' +# :Purpose: Takes a source key and returns a table with the source information +# :param keys: source key or a list of source keys +# :Example: +# >>> import splat +# >>> print spl.keySource(10001) +# SOURCE_KEY NAME DESIGNATION ... NOTE SELECT +# ---------- ------------------------ ----------------- ... ---- ------ +# 10001 SDSS J000013.54+255418.6 J00001354+2554180 ... True +# >>> print spl.keySource([10105, 10623]) +# SOURCE_KEY NAME DESIGNATION ... NOTE SELECT +# ---------- ---------------------- ----------------- ... ---- ------ +# 10105 2MASSI J0103320+193536 J01033203+1935361 ... True +# 10623 SDSS J09002368+2539343 J09002368+2539343 ... True +# >>> print spl.keySource(1000001) +# No sources found with source key 1000001 +# False +# ''' + +# # vectorize +# if isinstance(keys,list) == False: keys = [keys] + +# # sdb = ascii.read(SPLAT_PATH+DB_FOLDER+SOURCES_DB, delimiter='\t',fill_values='-99.',format='tab') +# # sdb = fetchDatabase(SPLAT_PATH+DB_FOLDER+SOURCES_DB) +# # sdb = copy.deepcopy(DB_SOURCES) +# # sdb['SELECT'] = [x in keys for x in sdb['SOURCE_KEY']] + +# sdb = db[[x in keys for x in db[key_name]]] +# if len(sdb) == 0.: +# if kwargs.get('verbose',True) == True: print('No sources found with source key(s) = {}'.format(*keys)) +# return False +# else: +# # db = sdb[:][numpy.where(sdb['SELECT']==1)] +# return sdb + + +# def keySpectrum(keys, **kwargs): +# ''' +# :Purpose: Takes a spectrum key and returns a table with the spectrum and source information +# :param keys: spectrum key or a list of source keys +# :Example: +# >>> import splat +# >>> print spl.keySpectrum(10001) +# DATA_KEY SOURCE_KEY DATA_FILE ... COMPANION COMPANION_NAME NOTE_2 +# -------- ---------- ---------------- ... --------- -------------- ------ +# 10001 10443 10001_10443.fits ... +# >>> print spl.keySpectrum([10123, 11298]) +# DATA_KEY SOURCE_KEY DATA_FILE ... COMPANION COMPANION_NAME NOTE_2 +# -------- ---------- ---------------- ... --------- -------------- ------ +# 11298 10118 11298_10118.fits ... +# 10123 10145 10123_10145.fits ... +# >>> print spl.keySpectrum(1000001) +# No spectra found with spectrum key 1000001 +# False +# ''' + +# # vectorize +# if isinstance(keys,list) == False: +# keys = [keys] + +# # sdb = copy.deepcopy(DB_SPECTRA) +# # sdb['SELECT'] = [x in keys for x in sdb['DATA_KEY']] + +# # if sum(sdb['SELECT']) == 0.: +# # if verbose: print('No spectra found with spectrum key {}'.format(keys[0])) +# # return False +# # else: +# # s2db = copy.deepcopy(DB_SOURCES) +# # db = join(sdb[:][numpy.where(sdb['SELECT']==1)],s2db,keys='SOURCE_KEY') +# # return db + +# sdb = DB_SPECTRA[[x in keys for x in DB_SPECTRA['DATA_KEY']]] +# if len(sdb) == 0.: +# if kwargs.get('verbose',True) == True: print('No sources found with spectrum key(s) = {}'.format(*keys)) +# return False +# else: +# return sdb def searchLibrary(radius=10., instrument='SPEX-PRISM', source_database=DB_SOURCES, spectra_database=DB_SPECTRA, *args, **kwargs): @@ -5118,7 +5276,10 @@ def searchLibrary(radius=10., instrument='SPEX-PRISM', source_database=DB_SOURCE -def readSpectrum(file,folder='',instrument=DEFAULT_INSTRUMENT,wave_unit=DEFAULT_WAVE_UNIT,flux_unit=DEFAULT_FLUX_UNIT,dimensionless=False,comment='#',delimiter=',',file_type='',crval1='CRVAL1',cdelt1='CDELT1',catchSN=True,noZeroNoise=True,verbose=False,**kwargs): +def readSpectrum(file,folder='',file_type='',wave_unit=DEFAULT_WAVE_UNIT, + flux_unit=DEFAULT_FLUX_UNIT,dimensionless=False,comment='#',delimiter='', + crval1='CRVAL1',cdelt1='CDELT1',catch_sn=True,remove_nans=True,no_zero_noise=True, + use_instrument_reader=True,instrument=DEFAULT_INSTRUMENT,instrument_param={},verbose=False): ''' Purpose ------- @@ -5130,7 +5291,7 @@ def readSpectrum(file,folder='',instrument=DEFAULT_INSTRUMENT,wave_unit=DEFAULT_ file : string filename of data to be read in; if full path not provided, routine will search in local directory - or in folder indicated by folder keyword + or in folder indicated by folder keyword; can also pass a URL to a remote file folder = '' : string [optional] full path to folder containing file @@ -5145,10 +5306,6 @@ def readSpectrum(file,folder='',instrument=DEFAULT_INSTRUMENT,wave_unit=DEFAULT_ * wavelog = wavelength solution is logarithmic (common for echelle data) * sdss = sets both waveheader and wavelog - instrument = DEFAULT_INSTRUMENT : string [optional] - instrument by which data was acquired, by default the DEFAULT-INSTRUMENT global parameter - checked against INSTRUMENTS dictionary - wave_unit = DEFAULT_WAVE_UNIT : astropy.unit [optional] units of wavelength axis, by default specified by the DEFAULT_WAVE_UNIT global parameter @@ -5157,25 +5314,40 @@ def readSpectrum(file,folder='',instrument=DEFAULT_INSTRUMENT,wave_unit=DEFAULT_ note that you can specify a unitless number dimensionless = False : boolean [optional] - set to True to set the flux units to a dimensionless quantity (e.g. transmission, reflectance) + set to True to set the flux units to a dimensionless quantity (for transmission, reflectance) comment = '#' : string [optional] for ascii files, character that indicates the file line is a comment (to be ignored) - delimiter = ',' : string [optional] + delimiter = '' : string [optional] for ascii files, character that separates columns of values crval1,cdelt1 = 'CRVAL1','CDELT1' : string [optional] for fits files for which the wavelength solution is embedded in header, these are the keywords containing the zeroth wavelength and linear change coefficient - catchSN = True : boolean [optional] - set to True to check if uncertainty axis is actually signal-to-noise, by checking if median(flux/uncertainty) < 1 + catch_sn = True : boolean [optional] + set to True to check if uncertainty axis is actually signal-to-noise, + by checking if median(flux/uncertainty) < 1 [NOTE: NO LONGER IMPLEMENTED] - noZeroNoise = True : boolean [optional] + remove_nans = True : boolean [optional] + set to True to remove all wave/flux/noise values for which wave or flux are nan + + no_zero_noise = True : boolean [optional] set to True to set all elements of noise array that are zero to numpy.nan; this helps in later computations of S/N or fit statistics + use_instrument_reader = True : bool [optional] + set to True to use the default instrument read in the DEFAULT-INSTRUMENT global parameter + checked against INSTRUMENTS dictionary + + instrument = DEFAULT_INSTRUMENT : string [optional] + instrument by which data was acquired, by default the DEFAULT-INSTRUMENT global parameter + checked against INSTRUMENTS dictionary + + instrument_param = {} : dict [optional] + instrument-specific parameters to pass to instrument reader + verbose = False : boolean [optional] set to True to have program return verbose output @@ -5193,6 +5365,7 @@ def readSpectrum(file,folder='',instrument=DEFAULT_INSTRUMENT,wave_unit=DEFAULT_ Example ------- + Case 1: An ascii file named 'spectrum.csv' with three comma-delimited columns of wavelength in Angstroms and flux and uncertainty in erg/s/cm2/Angstrom @@ -5273,6 +5446,7 @@ def readSpectrum(file,folder='',instrument=DEFAULT_INSTRUMENT,wave_unit=DEFAULT_ Dependencies ------------ + `checkAccess()`_ `checkInstrument()`_ `checkOnline()`_ @@ -5303,8 +5477,8 @@ def readSpectrum(file,folder='',instrument=DEFAULT_INSTRUMENT,wave_unit=DEFAULT_ ''' # check inputs and keyword parameters - for k in ['file','filename','data_file','datafile']: - if k in list(kwargs.keys()): file = kwargs[k] + # for k in ['file','filename','data_file','datafile']: + # if k in list(kwargs.keys()): file = kwargs[k] if file == '': raise NameError('\nNo filename passed to readSpectrum') if not(isUnit(wave_unit)): @@ -5315,149 +5489,97 @@ def readSpectrum(file,folder='',instrument=DEFAULT_INSTRUMENT,wave_unit=DEFAULT_ flux_unit = DEFAULT_FLUX_UNIT if dimensionless==True: flux_unit = u.dimensionless_unscaled -# program parameters - online = False - dnldflag = False - readin = False - zipflag = '' - -# leaving this as a kwargs -# NEED TO GET RID OF ONLINE ASPECTS HERE - url = kwargs.get('url',SPLAT_URL+DATA_FOLDER) - -# filename -# kwargs['filename'] = file -# kwargs['model'] = False - - -# first pass: check if file is local -# NEED TO GET RID OF ONLINE ASPECTS HERE - ofile = copy.deepcopy(file) - if os.path.exists(os.path.normpath(file)) == False: file = folder+os.path.basename(file) - if os.path.exists(os.path.normpath(file)) == False: - if verbose==True: print('Cannot find {} locally or in folder {}, trying online\n\n'.format(ofile,folder)) - online=True - file = copy.deepcopy(ofile) -# kwargs['filename'] = file - -# second pass: download file if necessary -# NEED TO GET RID OF ONLINE ASPECTS HERE -# online = not local - if online == True and checkAccess() == False: - raise ValueError('\nCannot find file locally, and you do not have remote access'.format(file)) - if online == True: - ofile = copy.deepcopy(file) - if checkOnline(url+file) == '': file = folder+os.path.basename(file) - if checkOnline(url+file) == '': - raise ValueError('\nCannot find file {} or {} on SPLAT website {}\n\n'.format(ofile,file,url)) -# read in online file -# file = kwargs['filename'] - -# this section downloads file to local machine and then reads it in -# this has caused some problems when the file is not properly downloaded - try: - if os.path.exists(os.path.normpath(os.path.basename(file))): - os.remove(os.path.normpath(os.path.basename(file))) - open(os.path.normpath(os.path.basename(file)), 'wb').write(requests.get(url+file).content) - dnldflag = True - except: - raise NameError('\nProblem reading in {} from SPLAT website {}'.format(file,url)) + +# if a url, make sure it exists + if file[:4]=='http': + if requests.get(file).status_code!=requests.codes.ok: + raise ValueError('Cannot find remote file {}; check URL or your online status'.format(file)) + +# if a local file, make sure it exists + else: + if os.path.exists(os.path.normpath(file)) == False: file = folder+os.path.basename(file) + if os.path.exists(os.path.normpath(file)) == False: + raise ValueError('Cannot find {} locally or in folder {}\n\n'.format(ofile,folder)) # instrument specific read shortcut - not working? + readin = False inst = checkInstrument(instrument) - if inst != False: + if inst != False and use_instrument_reader==True: instrument = inst if INSTRUMENTS[instrument]['reader'] != '': -# output = locals()[INSTRUMENTS[instrument]['reader']](file,verbose=verbose,**kwargs) -# print(INSTRUMENTS[instrument]['reader'],type(INSTRUMENTS[instrument]['reader'])) output = INSTRUMENTS[instrument]['reader'](file,verbose=verbose,**kwargs) readin = True - # if instrument.upper()=='APOGEE': output = _readAPOGEE(file,**kwargs) - # elif instrument.upper()=='BOSS': output = _readBOSS(file,**kwargs) - # elif instrument.upper()=='LDSS3': output = _readIRAF(file,**kwargs) - # elif instrument.upper()=='FIRE': output = _readFIRE(file,**kwargs) - # elif instrument.upper()=='MAGE': output = _readMAGE(file,**kwargs) - # elif instrument.upper()=='WFC3': output = _readWFC3(file,**kwargs) - # elif instrument.upper()=='KAST-RED' or instrument.upper()=='KAST-BLUE': output = _readKAST(file,**kwargs) - # other reads if readin==False: # determine which type of file file_type = '{} {}'.format(file.split('.')[-1],file_type) -# gzip compressed file - unzip and remove later - if 'gz' in file_type: - zipflag = 'gz' - file = file.replace('.gz','') - file_type = '{} {}'.format(file.split('.')[-1],file_type) - with open(os.path.normpath(file), 'wb') as f_out, gzip.open(os.path.normpath(file+'.gz'), 'rb') as f_in: - shutil.copyfileobj(f_in, f_out) - -# bz2 compressed file - unzip and remove later - if 'bz2' in file_type: - zipflag = 'bz2' - file = file.replace('.bz2','') - file_type = '{} {}'.format(file.split('.')[-1],file_type) - with open(os.path.normpath(file), 'wb') as f_out, bz2.open(os.path.normpath(file+'.bz2'), 'rb') as f_in: - shutil.copyfileobj(f_in, f_out) +# zipped file - extract root + for k in ['gz','bz2','zip']: + if k in file_type: + file_type = '{} {}'.format((file.replace(k,'')).split('.')[-1],file_type) -# fits file +# fits - can be done with fits.open as local or online and w/ or w/o gzip/bzip2/pkzip if 'fit' in file_type: with fits.open(os.path.normpath(file),ignore_missing_end=True,ignore_missing_simple=True,do_not_scale_image_data=True) as hdu: hdu.verify('silentfix+ignore') - if 'NAXIS3' in list(hdu[0].header.keys()): d = numpy.copy(hdu[0].data[0,:,:]) - else: d = numpy.copy(hdu[0].data) header = hdu[0].header + if 'NAXIS3' in list(header.keys()): d = numpy.copy(hdu[0].data[0,:,:]) + else: d = numpy.copy(hdu[0].data) -# ascii file - else: - if 'csv' in file_type: delimiter = ',' - if 'tsv' in file_type or 'tab' in file_type or 'txt' in file_type: delimiter = '\t' - if 'pipe' in file_type: delimiter = '|' - if 'latex' in file_type: delimiter = '&' +# some specific formatting cases + if 'sdss' in file_type and 'fit' in file_type: file_type='waveheader wavelog {}'.format(file_type) +# wavelength is in header + if 'waveheader' in file_type and 'fit' in file_type and len(d[:,0])<3: + flux = d[0,:] + if crval1 in list(header.keys()) and cdelt1 in list(header.keys()): + wave = numpy.polyval([float(header[cdelt1]),float(header[crval1])],numpy.arange(len(flux))) + else: + raise ValueError('\nCannot find {} and {} keywords in header of fits file {}'.format(crval1,cdelt1,file)) +# wavelength is explicitly in data array + else: + wave = d[0,:] + flux = d[1,:] + if len(d[:,0]) > 1: noise = d[2,:] + else: noise = [numpy.nan]*len(wave) - try: - d = numpy.genfromtxt(os.path.normpath(file), comments=comment, delimiter=delimiter, unpack=False, \ - missing_values = ('NaN','nan'), filling_values = (numpy.nan)).transpose() - except: - raise ValueError('\nCould not read ascii data from file {}'.format(file)) -# d = numpy.genfromtxt(os.path.normpath(file), comments=';', unpack=False, \ -# missing_values = ('NaN','nan'), filling_values = (numpy.nan)).transpose() +# ascii - can be done with pandas as local or online and w/ or w/o gzip/bzip2/pkzip + else: + if 'csv' in file_type and delimiter=='': delimiter = ',' + elif ('tsv' in file_type or 'tab' in file_type or 'txt' in file_type) and delimiter=='': delimiter = '\t' + elif 'pipe' in file_type and delimiter=='': delimiter = '|' + elif 'latex' in file_type and delimiter=='': delimiter = '&' + +# initial read + dp = pandas.read_csv(file,delimiter=delimiter,comment=comment,header=0) +# if numbers in first row, replace with header + if isNumber(dp.columns[0])==True: + cnames = ['wave','flux'] + if len(dp.columns)>2: cnames.append('noise') + if len(dp.columns)>3: + for i in range(len(dp.columns))-3: cnames.append('c{}'.format(i)) + dp = pandas.read_csv(file,delimiter=delimiter,comment=comment,names=cnames) +# assume order wave, flux, noise +# for now ignoring any other columns + wave = numpy.array(dp[dp.columns[0]]) + flux = numpy.array(dp[dp.columns[1]]) + if len(dp.columns)>2: noise = numpy.array(dp[dp.columns[2]]) + else: noise = [numpy.nan]*len(dp) +# placeholder header header = fits.Header() # blank header - -# check alignment of data array -# print(file,file_type,numpy.shape(d),header) - if len(d[:,0]) > len(d[0,:]): d = d.transpose() # array is oriented wrong - -# SDSS format for wavelength scale - in header and log format -# DOES THIS NEED TO BE MOVED INTO A SPECIFIC READER? - if 'sdss' in file_type: file_type='waveheader wavelog {}'.format(file_type) - if 'waveheader' in file_type or len(d[:,0])<2: - flux = d[0,:] - if crval1 in list(header.keys()) and cdelt1 in list(header.keys()): -# wave = numpy.linspace(float(header[crval1]),float(header[crval1])+len(flux)*float(header[cdelt1]),num=len(flux)) - wave = numpy.polyval([float(header[cdelt1]),float(header[crval1])],numpy.arange(len(flux))) - else: - raise ValueError('\nCannot find {} and {} keywords in header of fits file {}'.format(crval1,cdelt1,file)) - if len(d[:,0]) > 1: noise = d[1,:] - else: - noise = numpy.zeros(len(wave)) - noise[:] = numpy.nan -# wavelength is explicitly in data array - else: - wave = d[0,:] - flux = d[1,:] - if len(d[:,0]) > 2: noise = d[2,:] - else: - noise = numpy.zeros(len(wave)) - noise[:] = numpy.nan # wavelength scale is logarithmic if 'wavelog' in file_type: wave = 10.**wave - output = {'wave': wave, 'flux': flux, 'noise': noise, 'header': header, 'wave_unit': wave_unit, 'flux_unit': flux_unit} +# final output dictionary + output = {'wave': wave, + 'flux': flux, + 'noise': noise, + 'header': header, + 'wave_unit': wave_unit, + 'flux_unit': flux_unit} # make sure arrays are numpy arrays output['wave'] = numpy.array(output['wave']) @@ -5466,21 +5588,21 @@ def readSpectrum(file,folder='',instrument=DEFAULT_INSTRUMENT,wave_unit=DEFAULT_ # make sure arrays have units if not isUnit(output['wave']): output['wave'] = output['wave']*wave_unit + output['wave'].to(wave_unit) if not isUnit(output['flux']): output['flux'] = output['flux']*flux_unit + output['flux'].to(flux_unit) if not isUnit(output['noise']): output['noise'] = output['noise']*flux_unit - -# fix nans in flux -# w = numpy.where(numpy.isnan(flux) == True) -# flux[w] = 0. + output['noise'].to(flux_unit) # remove all parts of spectrum that are nans - w = numpy.where(numpy.logical_and(numpy.isnan(output['wave']) == False,numpy.isnan(output['flux']) == False)) - output['wave'] = output['wave'][w] - output['flux'] = output['flux'][w] - output['noise'] = output['noise'][w] + if remove_nans==True: + w = numpy.where(numpy.logical_and(numpy.isnan(output['wave']) == False,numpy.isnan(output['flux']) == False)) + output['wave'] = output['wave'][w] + output['flux'] = output['flux'][w] + output['noise'] = output['noise'][w] # force places where noise is zero to be NaNs - if noZeroNoise==True: + if no_zero_noise==True: output['noise'][numpy.where(output['noise'] == 0.)] = numpy.nan @@ -5490,10 +5612,6 @@ def readSpectrum(file,folder='',instrument=DEFAULT_INSTRUMENT,wave_unit=DEFAULT_ # if inst != False: # for k in list(INSTRUMENTS[inst].keys()): output[k] = INSTRUMENTS[inst][k] -# file clean up - if zipflag != '': os.remove(os.path.normpath(file)) - if online==True and dnldflag == True: os.remove(os.path.normpath(os.path.basename(file))) - return output @@ -6086,7 +6204,7 @@ def classifyByIndex(sp,ref='burgasser',string_flag=True,round_flag=False,remeasu -def classifyByStandard(sp, std_class='dwarf',dof=-1, **kwargs): +def classifyByStandard(sp, std_class='dwarf',dof=-1, verbose=False,**kwargs): ''' :Purpose: Determine the spectral type and uncertainty for a @@ -6162,7 +6280,7 @@ def classifyByStandard(sp, std_class='dwarf',dof=-1, **kwargs): ('sdL0.0:', 1.8630159149200021) ''' - verbose = kwargs.get('verbose',False) +# verbose = kwargs.get('verbose',False) best_flag = kwargs.get('best',True) average_flag = kwargs.get('average',not best_flag) best_flag = not average_flag @@ -6192,7 +6310,7 @@ def classifyByStandard(sp, std_class='dwarf',dof=-1, **kwargs): return classifyByStandard(sp,**mkwargs) # assign subclasses - allowed_classes = ['dwarf','subdwarf','sd','esd','dsd','lowg','vlg','intg','all'] + allowed_classes = ['dwarf','sd','esd','dsd','vlg','intg','subdwarf','young','all'] for a in allowed_classes: if kwargs.get(a,False) == True: std_class = a std_class = std_class.lower() @@ -6206,25 +6324,25 @@ def classifyByStandard(sp, std_class='dwarf',dof=-1, **kwargs): subclass = '' stdtype = 'Dwarf' if verbose==True: print('Using dwarf standards') - elif std_class == 'sd' or std_class == 'subdwarf': + elif std_class == 'sd': initiateStandards(sd=True) stds = STDS_SD_SPEX subclass = 'sd' - stdtype = 'Subdwarf' + stdtype = 'subdwarf' if verbose==True: print('Using subdwarf standards') elif std_class == 'dsd': initiateStandards(dsd=True) stds = STDS_DSD_SPEX subclass = '' - stdtype = 'Mild subdwarf' + stdtype = 'mild subdwarf' if verbose == True: print('Using dwarf standards') elif std_class == 'esd': initiateStandards(esd=True) stds = STDS_ESD_SPEX subclass = 'esd' - stdtype = 'Extreme Subdwarf' + stdtype = 'extreme subdwarf' if verbose == True: print('Using extreme subdwarf standards') - elif std_class == 'vlg' or std_class == 'lowg': + elif std_class == 'vlg': initiateStandards(vlg=True) stds = STDS_VLG_SPEX subclass = '' @@ -6236,6 +6354,22 @@ def classifyByStandard(sp, std_class='dwarf',dof=-1, **kwargs): subclass = '' stdtype = 'Intermediate Gravity' if verbose == True: print('Using intermediate low gravity standards') + elif std_class == 'subdwarf': + initiateStandards(sd=True) + initiateStandards(dsd=True) + initiateStandards(esd=True) + stds = STDS_SD_SPEX.copy() + stds.update(STDS_DSD_SPEX) + stds.update(STDS_ESD_SPEX) + stdtype = 'subdwarf' + if verbose == True: print('Using d/sd, sd, and esd standards') + elif std_class == 'young': + initiateStandards(vlg=True) + initiateStandards(intg=True) + stds = STDS_VLG_SPEX.copy() + stds.update(STDS_INTG_SPEX) + stdtype = 'young' + if verbose == True: print('Using INTG and VLG standards') elif std_class == 'all': initiateStandards() initiateStandards(sd=True) @@ -6272,15 +6406,14 @@ def classifyByStandard(sp, std_class='dwarf',dof=-1, **kwargs): else: print('\nWarning: do not recognize method = {}'.format(kwargs['method'])) fit_ranges = [[0.7,2.45]] # by default, compare whole spectrum + fit_ranges = kwargs.get('fit_ranges',fit_ranges) fit_ranges = kwargs.get('fitrange',fit_ranges) fit_ranges = kwargs.get('fitrng',fit_ranges) fit_ranges = kwargs.get('comprange',fit_ranges) fit_ranges = kwargs.get('comprng',fit_ranges) if not isinstance(fit_ranges[0],list): fit_ranges = [fit_ranges] - - -# if verbose==True: print(fit_ranges) + if verbose==True: print('Fitting over range {}'.format(fit_ranges)) # compute fitting statistics stat = [] @@ -6291,8 +6424,7 @@ def classifyByStandard(sp, std_class='dwarf',dof=-1, **kwargs): chisq,scale = compareSpectra(sp,stds[t],fit_ranges=fit_ranges,statistic=statistic,novar2=True) stat.append(chisq) sspt.append(t) - if (verbose): - print('Type {}: statistic = {}, scale = {}'.format(t, chisq, scale)) + if verbose==True: print('Type {}: statistic = {}, scale = {}'.format(t, chisq, scale)) # list of sorted standard files and spectral types sorted_stdsptnum = [x for (y,x) in sorted(zip(stat,sspt))] @@ -6976,7 +7108,9 @@ def classifySB(sp,ref='burgasser2010',output='classification',spt='',indices=Non for k in list(indices_measured.keys()): if k not in list(indices.keys()): indices[k] = indices_measured[k] - if verbose==True: print('\t{} = {:.2f}+/-{:.2f}'.format(k,indices[k][0],indices[k][1])) + if verbose==True and isinstance(indices[k],tuple)==True: +# print(k,indices[k]) + print('\t{} = {:.2f}+/-{:.2f}'.format(k,indices[k][0],indices[k][1])) if index_data['spt']==True: if 'SPT' not in list(indices.keys()): indices['SPT'] = spt @@ -7581,7 +7715,7 @@ def measureIndex(sp,ranges,method='ratio',sample='integrate',nsamples=100,noiseF elif (sample == 'sum'): value[i] = numpy.nansum(yNum) elif (sample == 'median'): - value[i] = numpy.median(yNum) + value[i] = numpy.nanmedian(yNum) elif (sample == 'maximum'): value[i] = numpy.nanmax(yNum) elif (sample == 'minimum'): @@ -7612,7 +7746,7 @@ def measureIndex(sp,ranges,method='ratio',sample='integrate',nsamples=100,noiseF elif (sample == 'sum'): value_sim[i,j] = numpy.nansum(yVar) elif (sample == 'median'): - value_sim[i,j] = numpy.median(yVar) + value_sim[i,j] = numpy.nanmedian(yVar) elif (sample == 'maximum'): value_sim[i,j] = numpy.nanmax(yVar) elif (sample == 'minimum'): diff --git a/splat/initialize.py b/splat/initialize.py index 15927dd93f..29495dffd4 100644 --- a/splat/initialize.py +++ b/splat/initialize.py @@ -14,7 +14,7 @@ # program constants -VERSION = '2022.12.09' +VERSION = '2023.05.27' __version__ = VERSION SPLAT_URL = 'http://splat.physics.ucsd.edu/splat/' DOCUMENTATION_URL = 'http://pono.ucsd.edu/~adam/splat/' @@ -636,7 +636,9 @@ 'morley14': {'instruments': {}, 'name': 'Morley et al. (2014)', 'citation': 'Morley et al. (2014)', 'bibcode': '2014ApJ...787...78M', 'altname': ['morley2014'], 'default': {'teff': 300., 'logg': 5.0, 'z': 0., 'fsed': 'f5', 'cld': 'h50'}}, \ # 'nextgen99': {'instruments': {}, 'name': 'Phoenix NextGen', 'citation': 'Hauschildt et al. (1999)', 'bibcode': '1999ApJ...525..871H', 'altname': ['nextgen,hauschildt,hauschildt99,hauschildt1999'], 'default': {'teff': 2000., 'logg': 5.0, 'z': 0.0}}, \ 'saumon08': {'instruments': {}, 'name': 'Saumon & Marley 2008', 'citation': 'Saumon & Marley 2008', 'bibcode': '2008ApJ...689.1327S', 'altname': ['saumon','saumon2008'], 'default': {'teff': 1000., 'logg': 5.0, 'z': 0.}}, \ - 'sonora18': {'instruments': {}, 'name': 'Sonora 2018', 'citation': 'Marley et al. (2018)', 'bibcode': 'marley_mark_2018_1309035', 'altname': ['marley','marley18','marley2018','sonora','sonora2018'], 'default': {'teff': 1000., 'logg': 5.0, 'z': 0., 'cld': 'nc'}}, \ + 'sonora18': {'instruments': {}, 'name': 'Sonora 2018', 'citation': 'Marley et al. (2018)', 'bibcode': 'marley_mark_2018_1309035', 'altname': ['marley','marley18','marley2018','sonora2018'], 'default': {'teff': 1000., 'logg': 5.0, 'z': 0., 'cld': 'nc'}}, \ + 'sonora21': {'instruments': {}, 'name': 'Sonora-bobcat', 'citation': 'Marley et al. (2021)', 'bibcode': '2021ApJ...920...85M', 'altname': ['marley2021','sonora','sonora2021','bobcat','sonora-bobcat'], 'default': {'teff': 1000., 'logg': 5.0, 'z': 0., 'co': 1}}, \ + 'karalidi21': {'instruments': {}, 'name': 'Sonora-cholla', 'citation': 'Karalidi et al. (2021)', 'bibcode': '2021ApJ...923..269K', 'altname': ['karalidi2021','karalidi','sonora-cholla','cholla'], 'default': {'teff': 1000., 'logg': 5.0, 'z': 0., 'kzz': 4}}, \ 'gerasimov': {'instruments': {}, 'name': 'Gerasimov 2020', 'citation': 'Gerasimov et al. (2020)', 'bibcode': '2020RNAAS...4..214G', 'altname': ['phxlowz'], 'default': {'teff': 1000., 'logg': 5.0, 'z': 0.}}, \ 'lowz': {'instruments': {}, 'name': 'LowZ models', 'citation': 'Meisner et al. (2021)', 'bibcode': '2021ApJ...915..120M', 'altname': ['meisner2021','mei21'], 'default': {'teff': 1000., 'logg': 5.0, 'z': 0., 'kzz': '2.0', 'co': 0.85}}, \ # 'btcond': {'instruments': {}, 'name': 'BT Cond', 'citation': 'Allard et al. (2012)', 'bibcode': '2012RSPTA.370.2765A', 'altname': ['dusty-cond','bt-cond'], 'rawfolder': '/Volumes/splat/models/btcond/ORIGINAL/', 'default': {'teff': 1500., 'logg': 5.0, 'z': 0.0, 'enrich': 0.0}}, \ @@ -770,6 +772,18 @@ 'K/J': {'ranges': ([2.06,2.10]*u.micron,[1.25,1.29]*u.micron), 'method': 'ratio', 'sample': 'integrate'},\ 'H-dip': {'ranges': ([1.61,1.64]*u.micron,[1.56,1.59]*u.micron,[1.66,1.69]*u.micron), 'method': 'inverse_line', 'sample': 'integrate'},\ }}, + 'burgasser2023': {'altname': ['burgasser23','bur23'], 'bibcode': '', 'indices': {\ + 'CH4-J': {'ranges': ([1.315,1.335]*u.micron,[1.26,1.285]*u.micron), 'method': 'ratio', 'sample': 'median'},\ + 'CH4-H': {'ranges': ([1.635,1.675]*u.micron,[1.56,1.60]*u.micron), 'method': 'ratio', 'sample': 'median'},\ + 'CH4-K': {'ranges': ([2.215,2.255]*u.micron,[2.08,2.12]*u.micron), 'method': 'ratio', 'sample': 'median'},\ + 'H2O-J': {'ranges': ([1.14,1.165]*u.micron,[1.26,1.285]*u.micron), 'method': 'ratio', 'sample': 'median'},\ + 'H2O-H': {'ranges': ([1.48,1.52]*u.micron,[1.56,1.60]*u.micron), 'method': 'ratio', 'sample': 'median'},\ + 'H2O-K': {'ranges': ([1.975,1.995]*u.micron,[2.08,2.10]*u.micron), 'method': 'ratio', 'sample': 'median'},\ + 'Y/J': {'ranges': ([1.005,1.045]*u.micron,[1.25,1.29]*u.micron), 'method': 'ratio', 'sample': 'median'},\ + 'K/J': {'ranges': ([2.06,2.10]*u.micron,[1.25,1.29]*u.micron), 'method': 'ratio', 'sample': 'median'},\ + 'K/H': {'ranges': ([2.06,2.10]*u.micron,[1.56,1.60]*u.micron), 'method': 'ratio', 'sample': 'median'},\ + 'H-dip': {'ranges': ([1.61,1.64]*u.micron,[1.56,1.59]*u.micron,[1.66,1.69]*u.micron), 'method': 'inverse_line', 'sample': 'median'},\ + }}, 'bardalez2014': {'altname': ['bardalez','bardalez14','bar14'], 'bibcode': '2014ApJ...794..143B', 'indices': {\ 'H2O-J': {'ranges': ([1.14,1.165]*u.micron,[1.26,1.285]*u.micron), 'method': 'ratio', 'sample': 'integrate'},\ 'CH4-J': {'ranges': ([1.315,1.335]*u.micron,[1.26,1.285]*u.micron), 'method': 'ratio', 'sample': 'integrate'},\ @@ -868,41 +882,45 @@ # classification indices -# need to add allers INDEX_CLASSIFICATION_RELATIONS = { 'reid2001': {'altname': ['reid','reid01','rei01'], 'bibcode': '2001AJ....121.1710R', 'method': 'polynomial', 'sptoffset': 20., 'decimal': False, 'min_indices': 1, 'sets': ['reid2001'], 'indices': { \ 'H2O-A': {'fitunc': 1.18, 'range': [18,26], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [-32.1, 23.4]}, \ - 'H2O-B': {'fitunc': 1.02, 'range': [18,28], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [-24.9, 20.7]}} - }, + 'H2O-B': {'fitunc': 1.02, 'range': [18,28], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [-24.9, 20.7]}, + }}, 'testi2001': {'altname': ['testi','testi01','tes01'], 'bibcode': '2001ApJ...552L.147T', 'method': 'polynomial', 'sptoffset': 10., 'decimal': True, 'min_indices': 2, 'sets': ['testi2001'], 'indices': { \ 'sHJ': {'fitunc': 0.5, 'range': [20,26], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [-1.87, 1.67]}, \ 'sKJ': {'fitunc': 0.5, 'range': [20,26], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [-1.20, 2.01]}, \ 'sH2O_J': {'fitunc': 0.5, 'range': [20,26], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [1.54, 0.98]}, \ 'sH2O_H1': {'fitunc': 0.5, 'range': [20,26], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [1.27, 0.76]}, \ 'sH2O_H2': {'fitunc': 0.5, 'range': [20,26], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [2.11, 0.29]}, \ - 'sH2O_K': {'fitunc': 0.5, 'range': [20,26], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [2.36, 0.60]}} - }, + 'sH2O_K': {'fitunc': 0.5, 'range': [20,26], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [2.36, 0.60]}, + }}, + 'slesnick2004': {'altname': ['slesnick','slesnick04','sle04'], 'bibcode': '2004ApJ...610.1045S', 'method': 'polynomial', 'sptoffset': 10., 'decimal': False, 'min_indices': 1, 'sets': ['slesnick2004'], 'indices': { \ + 'H2O-1': {'fitunc': 1.2, 'range': [10,30], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [-35.35,33.71]}, \ + 'H2O-2': {'fitunc': 0.53, 'range': [12,23], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [-27.10,34.13]}, \ + 'FeH': {'fitunc': 0.66, 'range': [13,23], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [-31.59,36.31]}, \ + }}, 'burgasser2007': {'altname': ['burgasser','burgasser07','bur07'], 'bibcode': '2007ApJ...659..655B', 'method': 'polynomial', 'sptoffset': 20., 'decimal': False, 'min_indices': 2, 'sets': ['burgasser2006'], 'indices': { \ 'H2O-J': {'fitunc': 0.8, 'range': [20,39], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [1.038e2, -2.156e2, 1.312e2, -3.919e1, 1.949e1]}, \ 'H2O-H': {'fitunc': 1.0, 'range': [20,39], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [9.087e-1, -3.221e1, 2.527e1, -1.978e1, 2.098e1]}, \ 'CH4-J': {'fitunc': 0.7, 'range': [30,39], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [1.491e2, -3.381e2, 2.424e2, -8.450e1, 2.708e1]}, \ 'CH4-H': {'fitunc': 0.3, 'range': [31,39], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [2.084e1, -5.068e1, 4.361e1, -2.291e1, 2.013e1]}, \ - 'CH4-K': {'fitunc': 1.1, 'range': [20,37], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [-1.259e1, -4.734e0, 2.534e1, -2.246e1, 1.885e1]}} - }, + 'CH4-K': {'fitunc': 1.1, 'range': [20,37], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [-1.259e1, -4.734e0, 2.534e1, -2.246e1, 1.885e1]}, \ + }}, 'geballe2002': {'altname': ['geballe','geballe02','geb02'], 'bibcode': '2002ApJ...564..466G', 'method': 'ranges', 'sptoffset': 20., 'decimal': False, 'min_indices': 2, 'sets': ['geballe2002','martin1999'], 'indices': { \ 'PC3': [[2.4,2.6,20.],[2.6,2.86,21.],[2.85,3.25,22.],[3.25,4.25,23.],[4.25,6,24.]],\ 'Color-d2': [[4.5,5.5,20.],[5.5,6.5,21.],[6.5,7.5,22.],[7.5,10.,23.],[10,17,24.],[17.,23.,25.],[23.,25.,26.]],\ 'H2O-1.2': [[1.5,1.7,30.],[1.7,1.9,31.],[1.9,2.15,32.],[2.15,2.5,33.],[2.5,3.0,34.],[3.0,4.5,35.],[4.5,6.5,36.],[6.5,10.,37.],[10.,15.,38.]],\ 'H2O-1.5': [[1.2,1.27,20.],[1.27,1.35,21.],[1.35,1.43,22.],[1.43,1.5,23.],[1.5,1.55,24.],[1.55,1.6,25.],[1.6,1.65,26.],[1.65,1.7,27.],[1.7,1.8,28.],[1.8,1.95,29.],[1.95,2.2,30.],[2.2,2.5,31.],[2.5,3.0,32.],[3.0,3.5,33.],[3.5,4.5,34.],[4.5,5.5,35.],[5.5,7.,36.],[7.,9.,37.],[9.,12.,38.]],\ 'CH4-1.6': [[1.02,1.07,30.],[1.07,1.15,31.],[1.15,1.3,32.],[1.3,1.5,33.],[1.5,1.8,34.],[1.8,2.5,35.],[2.5,4,36.],[4.,6.,37.],[6.,9.,38.]],\ - 'CH4-2.2': [[0.91,0.94,23.],[0.94,0.98,24.],[0.98,1.025,25.],[1.025,1.075,26.],[1.075,1.125,27.],[1.125,1.175,28.],[1.175,1.25,29.],[1.25,1.4,30.],[1.4,1.6,31.],[1.6,1.95,32.],[1.95,2.75,33.],[2.75,3.8,34.],[3.8,5.5,35.],[5.5,8.5,36.],[8.5,12.,37],[12.,18.,38.]]} - }, + 'CH4-2.2': [[0.91,0.94,23.],[0.94,0.98,24.],[0.98,1.025,25.],[1.025,1.075,26.],[1.075,1.125,27.],[1.125,1.175,28.],[1.175,1.25,29.],[1.25,1.4,30.],[1.4,1.6,31.],[1.6,1.95,32.],[1.95,2.75,33.],[2.75,3.8,34.],[3.8,5.5,35.],[5.5,8.5,36.],[8.5,12.,37],[12.,18.,38.]] + }}, 'allers2013': {'altname': ['allers','allers13','all13'], 'bibcode': '2013ApJ...657..511A', 'method': 'polynomial', 'sptoffset': 10., 'decimal': False, 'min_indices': 2, 'sets': ['allers2013','mclean2003','slesnick2004'], 'indices': { \ 'H2O': {'fitunc': 0.390, 'range': [15,25], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [24.0476, -104.424, 169.388,-83.5437]}, \ 'H2O-1': {'fitunc': 1.097, 'range': [14,25], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [28.5982, -80.7404, 39.3513, 12.1927]}, \ 'H2OD': {'fitunc': 0.757, 'range': [20,28], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [-97.230, 229.884, -202.245, 79.4477]}, \ - 'H2O-2': {'fitunc': 0.501, 'range': [14,22], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [37.5013, -97.8144, 55.4580, 10.8822]}},\ - }, + 'H2O-2': {'fitunc': 0.501, 'range': [14,22], 'spt': 0., 'sptunc': 99., 'mask': 1., 'coeff': [37.5013, -97.8144, 55.4580, 10.8822]},\ + }}, } # indices for spectral binary identification @@ -1237,19 +1255,19 @@ 'WISE_W3': {\ 'spt': [16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,35,36,37,38,39], \ 'values': [8.72,8.96,9.18,9.24,9.35,9.66,9.62,9.91,10.05,10.13,10.53,10.87,10.66,10.48,11.02,11.26,10.61,10.70,11.37,11.61,12.26,12.27,11.97],\ - 'rms': [0.34,0.33,0.47,0.42,0.32,0.31,0.37,0.37,0.44,0.51,0.87,1.08,0.36,0.75,0.5,0.76,0.40,0.5,0.5,0.37,0.76,0.64,0.5]}}, - }, + 'rms': [0.34,0.33,0.47,0.42,0.32,0.31,0.37,0.37,0.44,0.51,0.87,1.08,0.36,0.75,0.5,0.76,0.40,0.5,0.5,0.37,0.76,0.64,0.5]}, + }}, 'burgasser2007': {'altname': ['burgasser','burgasser07','bur07'], 'bibcode': '2007ApJ...659..655B', 'sptoffset': 20, 'method': 'polynomial', 'filters': { 'MKO_J': {'fitunc' : 0.30, 'range' : [20., 38.], 'coeff': [.000203252, -.0129143, .275734, -1.99967, 14.8948]}, 'MKO_H': {'fitunc' : 0.27, 'range' : [20., 38.], 'coeff' : [.000175368, -.0108205, .227363, -1.60036, 13.2372]}, - 'MKO_K': {'fitunc' : 0.26, 'range' : [20., 38.], 'coeff': [.0000001051, -.000006985, .0001807, -.002271, .01414, -.04024, .05129, .2322, 10.45]}}, - }, + 'MKO_K': {'fitunc' : 0.26, 'range' : [20., 38.], 'coeff': [.0000001051, -.000006985, .0001807, -.002271, .01414, -.04024, .05129, .2322, 10.45]}, + }}, 'cruz2003': {'altname': ['cruz','cruz03','cru03'], 'bibcode': '2003AJ....126.2421C', 'sptoffset': 10, 'method': 'polynomial', 'filters': { - '2MASS_J': {'fitunc' : 0.30, 'range' : [16.,28.], 'coeff': [-6.892e-4,3.453e-2,-6.193e-1,5.043,-4.410]}}, - }, + '2MASS_J': {'fitunc' : 0.30, 'range' : [16.,28.], 'coeff': [-6.892e-4,3.453e-2,-6.193e-1,5.043,-4.410]}, + }}, 'dahn2002': {'altname': ['dahn','dahn02','dah02'], 'bibcode': '2002AJ....124.1170D', 'sptoffset': 10, 'method': 'polynomial', 'filters': { - '2MASS_J': {'fitunc' : 0.25, 'range' : [17.,28.], 'coeff': [0.341,8.38]}}, - }, + '2MASS_J': {'fitunc' : 0.25, 'range' : [17.,28.], 'coeff': [0.341,8.38]}, + }}, 'dupuy2012': {'altname': ['dupuy','dupuy12','dup12'], 'bibcode': '2012ApJS..201...19D', 'sptoffset': 10, 'method': 'polynomial', 'filters': { 'MKO_Y': {'fitunc': 0.40, 'range' : [16., 39.], 'coeff': [-.00000252638, .000285027, -.0126151, .279438, -3.26895, 19.5444, -35.1560]}, 'MKO_J': {'fitunc' : 0.39, 'range' : [16., 39.], 'coeff' : [-.00000194920, .000227641, -.0103332, .232771, -2.74405, 16.3986, -28.3129]}, @@ -1266,63 +1284,63 @@ 'WISE_W1': {'fitunc': 0.39, 'range':[16., 39.], 'coeff': [1.58040e-5, -3.33944e-4, -4.38105e-3, 3.55395e-1, 7.14765]}, 'WISE_W2': {'fitunc': 0.35, 'range':[16., 39.], 'coeff': [1.78555e-5, -8.81973e-4, 1.14325e-2, 1.92354e-1, 7.46564]}, 'WISE_W3': {'fitunc': 0.43, 'range':[16., 39.], 'coeff': [2.37656e-5,-1.28563e-3,2.01740e-2,6.64242e-2,7.81181e0]}, - 'WISE_W4': {'fitunc': 0.76, 'range':[16., 39.], 'coeff': [-2.16042e-3,1.14630e-1,7.78974e0]}}, - }, + 'WISE_W4': {'fitunc': 0.76, 'range':[16., 39.], 'coeff': [-2.16042e-3,1.14630e-1,7.78974e0]}, + }}, 'dupuy2013': {'altname': ['dupuy13','dup13'], 'bibcode': '2013Sci...341.1492D', 'sptoffset': 0, 'method': 'interpolate', 'filters': { 'MKO_Y': {'spt': [38,38.5,39,40.], 'values': [17.4,18.81,19.26,20.24], 'rms': [0.25,0.51,0.88,0.17]}, 'MKO_J': {'spt': [38,38.5,39,39.5,40.], 'values': [16.43,17.87,18.39,17.68,20.09], 'rms': [0.46,0.44,0.95,0.37,0.25]}, 'MKO_H': {'spt': [38,38.5,39,39.5,40.], 'values': [16.82,18.2,18.77,18.08,20.6], 'rms': [0.43,0.45,1.08,0.39,0.25]}, 'MKO_K': {'spt': [38,38.5,39,40.], 'values': [16.93,18.27,18.89,20.7], 'rms': [0.8,0.4,0.57,0.18]}, 'IRAC_CH1': {'spt': [38,38.5,39,39.5,40.], 'values': [15.11,15.83,16.17,15.58,16.99], 'rms': [0.15,0.22,0.23,0.41,0.21]}, - 'IRAC_CH2': {'spt': [38,38.5,39,39.5,40.], 'values': [13.4,13.79,14.09,13.51,14.66], 'rms': [0.21,0.12,0.2,0.43,0.28]}}, - }, + 'IRAC_CH2': {'spt': [38,38.5,39,39.5,40.], 'values': [13.4,13.79,14.09,13.51,14.66], 'rms': [0.21,0.12,0.2,0.43,0.28]}, + }}, 'faherty2012': {'altname': ['faherty12','fah12'],'bibcode': '2012ApJ...752...56F', 'sptoffset': 10, 'method': 'polynomial', 'filters': { 'MKO_J': {'fitunc' : 0.30, 'range' : [20., 38.], 'coeff': [.000203252, -.0129143, .275734, -1.99967, 14.8948]}, 'MKO_H': {'fitunc' : 0.27, 'range' : [20., 38.], 'coeff' : [.000175368, -.0108205, .227363, -1.60036, 13.2372]}, - 'MKO_K': {'fitunc' : 0.28, 'range' : [20., 38.], 'coeff' : [.0000816516, -.00469032, .0940816, -.485519, 9.76100]}}, - }, + 'MKO_K': {'fitunc' : 0.28, 'range' : [20., 38.], 'coeff' : [.0000816516, -.00469032, .0940816, -.485519, 9.76100]}, + }}, 'faherty2016': {'altname': ['faherty','faherty2016','faherty-field','fah16'],'bibcode': '2016ApJS..225...10F', 'sptoffset': 10, 'method': 'polynomial', 'filters': { '2MASS_J': {'fitunc' : 0.402, 'range' : [16., 39.], 'coeff': [3.478e-5,-2.684e-3,7.771e-2,-1.058,7.157,-8.350]}, '2MASS_H': {'fitunc' : 0.389, 'range' : [16., 39.], 'coeff' : [2.841e-5,-2.217e-3,6.551e-2,-9.174e-1,6.406,-7.496]}, '2MASS_KS': {'fitunc' : 0.537, 'range' : [16., 39.], 'coeff' : [2.540e-5,-1.997e-3,5.978e-2,-8.481e-1,5.970,-6.704]}, 'WISE_W1': {'fitunc' : 0.365, 'range' : [16., 39.], 'coeff' : [8.337e-6,-6.897e-4,2.258e-2,-3.603e-1,2.991,-1.664e-1]}, 'WISE_W2': {'fitunc' : 0.398, 'range' : [16., 39.], 'coeff' : [8.190e-6,-6.938e-4,2.283e-2,-3.655e-1,3.032,-5.043e-1]}, - 'WISE_W3': {'fitunc' : 0.446, 'range' : [16., 39.], 'coeff' : [-1.024e-6,9.477e-5,-2.573e-3,1.520e-2,3.365e-1,6.462]}}, - }, + 'WISE_W3': {'fitunc' : 0.446, 'range' : [16., 39.], 'coeff' : [-1.024e-6,9.477e-5,-2.573e-3,1.520e-2,3.365e-1,6.462]}, + }}, 'faherty2016-young': {'altname': ['faherty-young','fah16y'],'bibcode': '2016ApJS..225...10F', 'sptoffset': 10, 'method': 'polynomial', 'filters': { '2MASS_J': {'fitunc' : 0.647, 'range' : [17., 27.], 'coeff': [4.032e-3,-1.416e-1,2.097,8.478e-1]}, '2MASS_H': {'fitunc' : 0.634, 'range' : [17., 27.], 'coeff' : [2.642e-3,-1.049e-1,1.753,1.207]}, '2MASS_KS': {'fitunc' : 0.640, 'range' : [17., 27.], 'coeff' : [-1.585e-2,7.338e-1,4.537]}, 'WISE_W1': {'fitunc' : 0.648, 'range' : [17., 27.], 'coeff' : [-1.397e-2,5.955e-1,5.247]}, 'WISE_W2': {'fitunc' : 0.694, 'range' : [17., 27.], 'coeff' : [-1.507e-2,5.944e-1,5.061]}, - 'WISE_W3': {'fitunc' : 0.717, 'range' : [17., 27.], 'coeff' : [-1.003e-4,-1.670e-3,2.023e-1,7.529]}}, - }, + 'WISE_W3': {'fitunc' : 0.717, 'range' : [17., 27.], 'coeff' : [-1.003e-4,-1.670e-3,2.023e-1,7.529]}, + }}, 'faherty2016-group': {'altname': ['faherty-group','fah16g'],'bibcode': '2016ApJS..225...10F', 'sptoffset': 10, 'method': 'polynomial', 'filters': { '2MASS_J': {'fitunc' : 0.660, 'range' : [17., 27.], 'coeff': [-3.825e-3,1.370e-1,-9.279e-1,10.141]}, '2MASS_H': {'fitunc' : 0.603, 'range' : [17., 27.], 'coeff' : [-3.909e-3,1.346e-1,-9.347e-1,9.728]}, '2MASS_KS': {'fitunc' : 0.556, 'range' : [17., 27.], 'coeff' : [-4.006e-3,1.378e-1,-1.031,9.916]}, 'WISE_W1': {'fitunc' : 0.551, 'range' : [17., 27.], 'coeff' : [-4.483e-3,1.505e-1,-1.208,10.403]}, 'WISE_W2': {'fitunc' : 0.616, 'range' : [17., 27.], 'coeff' : [-6.821e-3,2.322e-1,-2.133,13.322]}, - 'WISE_W3': {'fitunc' : 0.427, 'range' : [17., 27.], 'coeff' : [-5.684e-3,1.993e-1,-1.987,13.972]}}, - }, + 'WISE_W3': {'fitunc' : 0.427, 'range' : [17., 27.], 'coeff' : [-5.684e-3,1.993e-1,-1.987,13.972]}, + }}, 'filippazzo2015': {'altname': ['filippazzo','filippazzo15','fillippazzo','filipazo','filippazo','fil15'],'bibcode': '2015ApJ...810..158F', 'sptoffset': 10, 'method': 'polynomial', 'filters': { '2MASS_J': {'fitunc': 0.40, 'range': [16., 39.], 'coeff': [3.478e-5, -2.684e-3, 7.771e-2, -1.058, 7.157, -8.350]}, - 'WISE_W2': {'fitunc': 0.40, 'range': [16., 39.], 'coeff': [8.190e-6, -6.938e-4, 2.283e-2, -3.655e-1, 3.032, -5.043e-1]}}, - }, + 'WISE_W2': {'fitunc': 0.40, 'range': [16., 39.], 'coeff': [8.190e-6, -6.938e-4, 2.283e-2, -3.655e-1, 3.032, -5.043e-1]}, + }}, 'hawley2002': {'altname': ['hawley02','hawley','haw02'], 'bibcode': '2002AJ....123.3409H', 'sptoffset': 0, 'method': 'interpolate', 'filters': { - '2MASS_J': {'spt': numpy.arange(10.,36.1,1.), 'values': [6.45,6.72,6.98,7.24,8.34,9.44,10.18,10.92,11.14,11.43,11.72,12.,12.29,12.58,12.87,13.16,14.31,14.45,14.58,14.72,14.86,14.99,15.13,15.27,15.4,15.54,15.68], 'rms': numpy.zeros(27)+0.4}}, - }, + '2MASS_J': {'spt': numpy.arange(10.,36.1,1.), 'values': [6.45,6.72,6.98,7.24,8.34,9.44,10.18,10.92,11.14,11.43,11.72,12.,12.29,12.58,12.87,13.16,14.31,14.45,14.58,14.72,14.86,14.99,15.13,15.27,15.4,15.54,15.68], 'rms': numpy.zeros(27)+0.4}, + }}, 'gonzales2018': {'altname': ['gonzales','gonzales18','gonzalez2018','gonzalez18','gonzalez','gon18','subdwarf'],'bibcode': 'TBD', 'sptoffset': 10., 'method': 'polynomial', 'filters': { '2MASS_J': {'fitunc' : 0.26, 'range' : [17., 27.], 'coeff': [0.263,8.49]}, '2MASS_H': {'fitunc' : 0.24, 'range' : [17., 27.], 'coeff': [0.304,7.77]}, '2MASS_KS': {'fitunc' : 0.21, 'range' : [17., 27.], 'coeff': [0.344,7.29]}, 'WISE_W1': {'fitunc' : 0.27, 'range' : [17., 27.], 'coeff': [0.241,7.72]}, - 'WISE_W2': {'fitunc' : 0.24, 'range' : [17., 27.], 'coeff': [0.228,7.62]}}, - }, + 'WISE_W2': {'fitunc' : 0.24, 'range' : [17., 27.], 'coeff': [0.228,7.62]}, + }}, 'knapp2004': {'altname': ['knapp','knapp04','kna04'], 'bibcode': '2004AJ....127.3553K', 'sptoffset': 10, 'method': 'polynomial', 'filters': { 'MKO_J': {'fitunc' : 0.40, 'range' : [21., 39.], 'coeff' : [-7.923e-5,3.986e-3,-6.848e-2,4.500e-1,-6.278e-1,12.03]}, - 'MKO_K': {'fitunc': 0.30, 'range' : [21., 39.], 'coeff': [-7.351e-5,3.524e-3,-5.819e-2,3.876e-1,-6.485e-1,10.93]}}, - }, + 'MKO_K': {'fitunc': 0.30, 'range' : [21., 39.], 'coeff': [-7.351e-5,3.524e-3,-5.819e-2,3.876e-1,-6.485e-1,10.93]}, + }}, 'kiman2019': {'altname': ['kiman','kiman19','kim19'], 'bibcode': '2019AJ....157..231K', 'sptoffset': 0, 'method': 'interpolate', 'filters': { 'GAIA_G': {'spt': [10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,26], 'values': [8.13,8.78,9.35,9.97,10.77,11.86,12.92,13.54,14.60,15.26,16.11,16.82,17.11,17.89,18.51,18.92], 'rms': [0.76,0.83,0.69,0.74,0.77,0.85,0.53,0.56,0.55,0.53,0.44,0.31,0.40,0.59,0.27,0.17]}, 'GAIA_Rp': {'spt': [10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,26], 'values': [7.20,7.76,8.26,8.81,9.55,10.54,11.50,12.07,13.02,13.62,14.45,15.14,15.42,16.21,16.80,16.98], 'rms': [0.74,0.82,0.67,0.72,0.75,0.81,0.50,0.52,0.51,0.50,0.42,0.31,0.39,0.58,0.25,0.20]}, @@ -1351,8 +1369,8 @@ '2MASS_H': {'fitunc' : 0.36, 'range' : [16., 28.], 'coeff': [0.340,7.555]}, '2MASS_KS': {'fitunc' : 0.33, 'range' : [16., 28.], 'coeff': [0.317,7.292]}, 'WISE_W1': {'fitunc' : 0.26, 'range' : [16., 28.], 'coeff': [0.243,7.698]}, - 'WISE_W2': {'fitunc' : 0.24, 'range' : [16., 28.], 'coeff': [0.236,7.466]}}, - }, + 'WISE_W2': {'fitunc' : 0.24, 'range' : [16., 28.], 'coeff': [0.236,7.466]}, + }}, 'liu2016-optical': {'altname': ['liu-optical','liu16-optical','liu16-field-optical','liu-field-optical'],'bibcode': '2016ApJ...833...96L', 'sptoffset': 10., 'method': 'polynomial', 'filters': { 'MKO_Y': {'fitunc' : 0.29, 'range' : [16., 28.], 'coeff': [0.404,8.677]}, 'MKO_J': {'fitunc' : 0.26, 'range' : [16., 28.], 'coeff': [0.364,8.131]}, @@ -1362,8 +1380,8 @@ '2MASS_H': {'fitunc' : 0.30, 'range' : [16., 28.], 'coeff': [0.332,7.723]}, '2MASS_KS': {'fitunc' : 0.29, 'range' : [16., 28.], 'coeff': [0.308,7.491]}, 'WISE_W1': {'fitunc' : 0.29, 'range' : [16., 28.], 'coeff': [0.255,7.610]}, - 'WISE_W2': {'fitunc' : 0.29, 'range' : [16., 28.], 'coeff': [0.231,7.559]}}, - }, + 'WISE_W2': {'fitunc' : 0.29, 'range' : [16., 28.], 'coeff': [0.231,7.559]}, + }}, 'liu2016-vlg': {'altname': ['liu-vlg','liu16-vlg'],'bibcode': '2016ApJ...833...96L', 'sptoffset': 10., 'method': 'polynomial', 'filters': { 'MKO_Y': {'fitunc' : 0.77, 'range' : [16., 27.], 'coeff': [0.799,3.689]}, 'MKO_J': {'fitunc' : 0.72, 'range' : [16., 27.], 'coeff': [0.731,3.475]}, @@ -1373,8 +1391,8 @@ '2MASS_H': {'fitunc' : 0.64, 'range' : [16., 27.], 'coeff': [0.632,3.734]}, '2MASS_KS': {'fitunc' : 0.55, 'range' : [16., 27.], 'coeff': [0.573,3.699]}, 'WISE_W1': {'fitunc' : 0.46, 'range' : [16., 27.], 'coeff': [0.457,4.392]}, - 'WISE_W2': {'fitunc' : 0.48, 'range' : [16., 27.], 'coeff': [0.451,4.039]}}, - }, + 'WISE_W2': {'fitunc' : 0.48, 'range' : [16., 27.], 'coeff': [0.451,4.039]}, + }}, 'liu2016-intg': {'altname': ['liu-intg','liu16-intg'],'bibcode': '2016ApJ...833...96L', 'sptoffset': 10., 'method': 'polynomial', 'filters': { 'MKO_Y': {'fitunc' : 0.36, 'range' : [20., 27.], 'coeff': [0.562,6.778]}, 'MKO_J': {'fitunc' : 0.50, 'range' : [20., 27.], 'coeff': [0.537,5.941]}, @@ -1384,13 +1402,13 @@ '2MASS_H': {'fitunc' : 0.47, 'range' : [20., 27.], 'coeff': [0.411,6.552]}, '2MASS_KS': {'fitunc' : 0.36, 'range' : [20., 27.], 'coeff': [0.356,6.573]}, 'WISE_W1': {'fitunc' : 0.13, 'range' : [20., 27.], 'coeff': [0.237,7.436]}, - 'WISE_W2': {'fitunc' : 0.18, 'range' : [20., 27.], 'coeff': [0.193,7.561]}}, - }, + 'WISE_W2': {'fitunc' : 0.18, 'range' : [20., 27.], 'coeff': [0.193,7.561]}, + }}, 'looper2008': {'altname': ['looper','looper08'],'bibcode': '2008ApJ...685.1183L', 'sptoffset': 20, 'method': 'polynomial', 'filters': { '2MASS_J': {'fitunc' : 0.29, 'range' : [20., 38.], 'coeff': [-5.462e-6,2.595e-4,-3.915e-3,1.663e-2,3.690e-2,1.255e-1,11.817]}, '2MASS_H': {'fitunc' : 0.29, 'range' : [20., 38.], 'coeff' : [-4.218e-6,1.987e-4,-2.970e-3,1.261e-2,3.032e-2,1.125e-1,11.010]}, - '2MASS_KS': {'fitunc' : 0.33, 'range' : [20., 38.], 'coeff' : [-4.104e-6,1.911e-4,-2.864e-3,1.299e-2,2.565e-2,7.369e-2,10.521]}}, - }, + '2MASS_KS': {'fitunc' : 0.33, 'range' : [20., 38.], 'coeff' : [-4.104e-6,1.911e-4,-2.864e-3,1.299e-2,2.565e-2,7.369e-2,10.521]}, + }}, 'pecaut2013': {'altname': ['mamajek','pecaut','pecaut13'], 'reference': 'Pecaut & Mamajek (2013)','bibcode':'2013ApJS..208....9P', 'url': 'http://www.pas.rochester.edu/~emamajek/EEM_dwarf_UBVIJHK_colors_Teff.txt','sptoffset': 0, 'method': 'interpolate', 'filters': { 'BESSEL_V': {\ 'spt': [0.,0.5,1.,1.5,2.,2.5,3.,3.5,4.,4.5,5.,5.5,6.,6.5,7.,8.,9.,10,10.5,11,11.5,12,12.5,13,13.5,14,14.5,15,15.5,16,16.5,17,17.5,18,18.5,19,19.5,20.,21.,22.,23.,24.,25.], \ @@ -1407,34 +1425,53 @@ '2MASS_KS': {\ 'spt': [0.,0.5,1.,1.5,2.,2.5,3.,3.5,4.,4.5,5.,5.5,6.,6.5,7.,8.,9.,10,10.5,11,11.5,12,12.5,13,13.5,14,14.5,15,15.5,16,16.5,17,17.5,18,18.5,19,19.5,20.,21.,22.,23.,24.,25.,26.,27.,28.,29.,30.,31.,32.,33.,34.,34.5,35.,35.5,36.,37.,37.5,38.,39.,39.5,40.5,41.,41.5,42.],\ 'values': [3.81, 3.82, 3.87, 3.9, 4.04, 4.07, 4.16, 4.21, 4.25, 4.26, 4.48, 4.57, 4.66, 4.78, 4.82, 4.98, 5.11, 5.22, 5.36, 5.67, 5.85, 6.06, 6.27, 6.54, 7.19, 7.55, 7.93, 8.36, 9.01, 9.32, 9.47, 9.76, 9.97, 10.11, 10.22, 10.3, 10.45, 10.55, 10.8, 10.9, 11.3, 11.4, 11.82, 12.27, 12.62, 12.7, 12.74, 12.9, 12.87, 13.19, 13.69, 14.51, 14.66, 14.9, 15.18, 15.54, 16.36, 16.85, 17.43, 18.48, 19.32, 21.5, 23, 23.5, 24],\ - 'rms': numpy.zeros(65)+0.05}}, - }, + 'rms': numpy.zeros(65)+0.05}, + }}, 'tinney2003': {'altname': ['tinney','tinney03'],'bibcode': '2003AJ....126..975T', 'sptoffset': 10, 'method': 'polynomial', 'filters': { 'COUSINS_I': {'fitunc' : 0.37, 'range' : [20., 37.5], 'coeff': [-2.49821e-6,1.04398e-3,-6.49719e-2,1.56038,-1.58296e1,7.22089e1]}, 'UKIDSS_Z': {'fitunc' : 0.29, 'range' : [20., 37.5], 'coeff': [-9.97226e-7,1.05950e-4,-4.57019e-3,1.02898e-1,-1.29357e0,8.96822e0,-3.08010e1,4.99447e1]}, 'UKIDSS_K': {'fitunc' : 0.40, 'range' : [20., 37.5], 'coeff': [1.14139e-5,-8.86885e-4,2.68071e-2,-3.89554e-1,2.95440e0,8.14626e-1]}, '2MASS_KS': {'fitunc' : 0.38, 'range' : [20., 37.5], 'coeff': [-1.25074e-5,1.63124e-3,-7.42418e-2,1.54509,-1.47407e1,6.27861e1]}, 'UKIRT_J': {'fitunc' : 0.30, 'range' : [20., 37.5], 'coeff': [-9.91110e-7,1.05811e-4,-4.58399e-3,1.03572e-1,-1.30526e0,9.06701e0,-3.13411e1,5.04642e1]}, - '2MASS_J': {'fitunc' : 0.36, 'range' : [20., 37.5], 'coeff': [-2.80824e-6,3.41146e-4,-1.73848e-2,4.82120e-1,-7.86911,7.57222e1,-3.98105e2,8.94012e2]}}, - }, + '2MASS_J': {'fitunc' : 0.36, 'range' : [20., 37.5], 'coeff': [-2.80824e-6,3.41146e-4,-1.73848e-2,4.82120e-1,-7.86911,7.57222e1,-3.98105e2,8.94012e2]}, + }}, 'tinney2014': {'altname': ['tinney14'],'bibcode': '2014ApJ...796...39T', 'sptoffset': 0, 'method': 'interpolate', 'filters': { 'MKO_J': {'spt': [36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,42], 'values': [15.22,15.49,16.39,16.66,17.9,18.35,19.08,20.32,22.39,22.18,25.76], 'rms': [0.31,0.37,0.72,0.36,0.46,0.9,0.97,1.25,1.,0.76,3.52]}, - 'WISE_W2': {'spt': [36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,42], 'values': [12.86,13.28,13.39,13.44,13.75,13.92,14.28,14.65,15.2,14.78,15.76], 'rms': [0.17,0.48,0.27,0.23,0.22,0.24,0.46,0.35,1.,0.77,2.15]}}, - }, + 'WISE_W2': {'spt': [36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,42], 'values': [12.86,13.28,13.39,13.44,13.75,13.92,14.28,14.65,15.2,14.78,15.76], 'rms': [0.17,0.48,0.27,0.23,0.22,0.24,0.46,0.35,1.,0.77,2.15]}, + }}, 'zhang2013': {'altname': ['zhang','zhang13','zha13','zhang_dwarf'],'bibcode': '2013MNRAS.434.2664Z', 'sptoffset': 10., 'method': 'polynomial', 'filters': { 'SDSS_R': {'fitunc' : 0.71, 'range' : [11., 28.], 'coeff': [-1.6751e-4,8.4503e-3,-1.5169e-1,1.1111,-1.8452,9.8326]}, 'SDSS_I': {'fitunc' : 0.70, 'range' : [11., 28.], 'coeff': [-1.0634e-4,5.3482e-3,-9.6139e-2,7.1114e-1,-1.0400,8.6772]}, 'SDSS_Z': {'fitunc' : 0.60, 'range' : [11., 28.], 'coeff': [-6.2600e-5,2.9652e-3,-4.9888e-2,3.3379e-1,-4.7257e-2,7.8303]}, '2MASS_J': {'fitunc' : 0.52, 'range' : [11., 29.], 'coeff': [-5.3057e-5,2.7283e-3,-4.9840e-2,3.6508e-1,-2.9785e-1,6.2826]}, '2MASS_H': {'fitunc' : 0.54, 'range' : [11., 29.], 'coeff': [-5.8608e-5,3.0343e-3,-5.5512e-2,4.0397e-1,-3.9566e-1,5.7798]}, - '2MASS_KS': {'fitunc' : 0.53, 'range' : [11., 29.], 'coeff': [-5.5258e-5,2.8626e-3,-5.2136e-2,3.7299e-1,-3.0303e-1,5.4490]}}, - }, - 'zhang2017': {'altname': ['zhang17','zha17','zhang_subdwarf'],'bibcode': '2017MNRAS.464.3040Z', 'sptoffset': 10., 'method': 'polynomial', 'filters': { + '2MASS_KS': {'fitunc' : 0.53, 'range' : [11., 29.], 'coeff': [-5.5258e-5,2.8626e-3,-5.2136e-2,3.7299e-1,-3.0303e-1,5.4490]}, + }}, + 'zhang2017': {'altname': ['zhang17','zha17'],'bibcode': '2017MNRAS.464.3040Z', 'sptoffset': 10., 'method': 'polynomial', 'filters': { 'MKO_J': {'fitunc' : 0.40, 'range' : [10., 27.], 'coeff': [8.53625e-4,-1.76459e-2,3.17384e-1,8.64788]}, 'MKO_H': {'fitunc' : 0.40, 'range' : [10., 27.], 'coeff': [2.90020e-4,-4.54248e-3,2.71013e-1,8.19731]}, '2MASS_J': {'fitunc' : 0.40, 'range' : [10., 27.], 'coeff': [8.48172e-4,-1.75984e-2,3.16187e-1,8.68342]}, - '2MASS_H': {'fitunc' : 0.41, 'range' : [10., 27.], 'coeff': [4.32261e-4,-7.53663e-3,2.81607e-1,8.18494]}}, - }, + '2MASS_H': {'fitunc' : 0.41, 'range' : [10., 27.], 'coeff': [4.32261e-4,-7.53663e-3,2.81607e-1,8.18494]}, + }}, + 'zhang2019lsd': {'altname': ['zhang19lsd','zha19lsd','zhang_lsubdwarf','zhang_lsd'],'bibcode': '2019MNRAS.486.1260Z', 'sptoffset': 20., 'method': 'polynomial', 'filters': { + 'MKO_Y': {'fitunc' : 0.1063, 'range' : [20., 27.], 'coeff': [0.23129,12.1660]}, + 'WISE_W1': {'fitunc' : 0.1735, 'range' : [20., 27.], 'coeff': [0.1787,10.3005]}, + 'WISE_W2': {'fitunc' : 0.1647, 'range' : [20., 27.], 'coeff': [0.1830,9.9392]}, + }}, + 'zhang2019tsd': {'altname': ['zhang19tsd','zha19tsd','zhang_tsubdwarf','zhang_tsd'],'bibcode': '2019MNRAS.486.1260Z', 'sptoffset': 30., 'method': 'polynomial', 'filters': { + 'MKO_Y': {'fitunc' : 0.6426, 'range' : [35.5, 38.], 'coeff': [0.8742,11.2935]}, + 'MKO_J': {'fitunc' : 0.8135, 'range' : [35.5, 39.], 'coeff': [1.2294,7.9159]}, + 'MKO_H': {'fitunc' : 0.7675, 'range' : [35.5, 39.], 'coeff': [1.0964,9.2711]}, + 'MKO_K': {'fitunc' : 0.7876, 'range' : [35.5, 38.], 'coeff': [1.2030,9.1042]}, + 'MKO_KS': {'fitunc' : 0.7865, 'range' : [35.5, 38.], 'coeff': [1.1996,8.9844]}, + 'WISE_W1': {'fitunc' : 0.4584, 'range' : [35.5, 39.], 'coeff': [0.7678,10.5810]}, + 'WISE_W2': {'fitunc' : 0.2887, 'range' : [35.5, 39.], 'coeff': [0.4232,10.2525]}, + }}, + 'zhang2019esd': {'altname': ['zhang19esd','zha19esd','zhang_esubdwarf','zhang_esd','zhang_usd'],'bibcode': '2019MNRAS.486.1260Z', 'sptoffset': 20., 'method': 'polynomial', 'filters': { + 'MKO_Y': {'fitunc' : 0.122, 'range' : [20., 23.], 'coeff': [0.2443,11.5748]}, + 'WISE_W1': {'fitunc' : 0.1084, 'range' : [20., 27.], 'coeff': [0.2610,10.0081]}, + 'WISE_W2': {'fitunc' : 0.1213, 'range' : [20., 27.], 'coeff': [0.2250,9.7670]}, + }}, } # Empirical relations - SpT to chi value (Halpha EW to LHa/Lbol) diff --git a/splat/plot.py b/splat/plot.py index 29cd3ae26c..5770293d49 100755 --- a/splat/plot.py +++ b/splat/plot.py @@ -456,7 +456,7 @@ def plotSpectrum(inp,xrng=[],yrng=[],xlabel='',ylabel='',xlog=False,ylog=False,g for k in ['multilayout','multiLayout','multi_layout']: layout=kwargs.get(k,layout) for k in ['file','filename']: output=kwargs.get(k,output) if not isinstance(output,str): output='' - filetype = '' + filetype = '.pdf' if output!='': filetype=output.split('.')[-1] if comparison != None and isinstance(comparison,splat.Spectrum) == False and isinstance(comparison,list) == False: @@ -803,12 +803,13 @@ def plotSpectrum(inp,xrng=[],yrng=[],xlabel='',ylabel='',xlog=False,ylog=False,g ax.axis(bound) # save to file or display +# ERROR HERE - CHECK WHAT FILES if multipage == False: if files[plts] != '' and (plts % nplot == 3 or plts == len(splist)-1): if kwargs.get('tight',True) == True: - plt.savefig(files[plts], format=filetype, bbox_inches='tight') + plt.savefig(files[plts], bbox_inches='tight') else: - plt.savefig(files[plts], format=filetype) + plt.savefig(files[plts]) if output == '' and not kwargs.get('web',False): plt.show() if (kwargs.get('interactive',False) != False): plt.ion() diff --git a/splat/utilities.py b/splat/utilities.py index 8d818bf6f1..16a8fc1dee 100644 --- a/splat/utilities.py +++ b/splat/utilities.py @@ -10,6 +10,7 @@ import base64 import copy import os +import pandas import re import requests import string @@ -2261,6 +2262,67 @@ def checkDBCoordinates(db,designation_keyword='DESIGNATION',ra_keyword='RA',dec_ if shortname_keyword not in list(db.keys()): db[shortname_keyword] = [designationToShortName(d) for d in db[designation_keyword]] return db + + +def keySearch(keys, key_name='KEY', db=pandas.DataFrame(), verbose=True): + ''' + Purpose + ------- + + General purpose function that searches a pandas database for values of keys that match in + key_name column. Use by `keySource()`_ and `keySpectrum()`_. + + Parameters + ---------- + + keys : int or list + integer or list of integers corresponding to keys + + key_name = 'KEY' : string [optional] + name of column to search + + db = blank pandas DataFrame : pandas DataFrame [optional] + pandas data Frame to search; code checks that it contains key_name column + + verbose = True : boolean [optional] + set to True to have program return verbose output + + Outputs + ------- + + pandas DataFrame containing the rows that match input keys, or empty pandas DataFrame + + Example + ------- + + TBD + + Dependencies + ------------ + None + + ''' +# check db is a pandas DataFrame, or try to convert + if isinstance(db,pandas.DataFrame) == False: + try: dbconv = pandas.DataFrame(db) + except: pass + else: dbconv = copy.deepcopy(db) + if isinstance(dbconv,pandas.DataFrame) == False: + if verbose==True: print('Passed database is not a pandas dataframe') + return pandas.DataFrame() + +# check key is in data frame + if key_name not in list(dbconv.columns): + raise ValueError('Cannot find key column {} in dataframe'.format(key_name)) + +# vectorize and make integer + if isinstance(keys,list) == False: keys = [keys] + keys = [int(k) for k in keys] + +# search dataframe + sdb = dbconv[[x in keys for x in dbconv[key_name]]] + if len(sdb) == 0 and verbose==True: print('No sources found with key(s) = {}'.format(*keys)) + return sdb ##################################################### diff --git a/tutorials/basic_spectral_analysis.ipynb b/tutorials/basic_spectral_analysis.ipynb index 9a4aa653fe..52c2dc0b6d 100644 --- a/tutorials/basic_spectral_analysis.ipynb +++ b/tutorials/basic_spectral_analysis.ipynb @@ -10,7 +10,7 @@ "Adam Burgasser\n", "\n", "## Version date\n", - "18 January 2022\n", + "27 May 2022\n", "\n", "## Learning Goals\n", "* Read in a spectrum from the SPLAT database or externally (splat.searchLibrary, splat.getSpectrum)\n", @@ -37,7 +37,7 @@ "source": [ "# Starting off\n", "\n", - "Let's make sure the code is properly downloaded through the import statements; see http://pono.ucsd.edu/~adam/browndwarfs/splat/ for more detail on the proper installation procedures" + "Let's make sure the code is properly downloaded through the import statements; see http://splat.physics.ucsd.edu/splat/ for more detail on the proper installation procedures" ] }, { @@ -54,6 +54,7 @@ "\n", "# other useful imports\n", "import matplotlib.pyplot as plt\n", + "%matplotlib inline\n", "import numpy as np\n", "import pandas\n", "import astropy.units as u\n", @@ -67,7 +68,7 @@ "metadata": {}, "outputs": [], "source": [ - "# check what version you are using\n", + "# check what version you are using - should be later than 2023.05\n", "splat.VERSION" ] }, @@ -78,7 +79,7 @@ "outputs": [], "source": [ "# check that you have some spectra in the library\n", - "splat.DB_SOURCES" + "splat.DB_SPECTRA" ] }, { @@ -92,12 +93,13 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "# Reading in and visualizing spectra\n", - "\n", - "SPLAT contains a built-in library of published SpeX prism spectra of ultracool dwarfs. It is also possible to download additional spectral datasets and read in your own spectrum or a spectrum from an website. Once you've read a spectrum into a Spectrum object, you can use the built-in features to visualize the spectrum." + "# more information about the package\n", + "help(splat)" ] }, { @@ -106,7 +108,17 @@ "metadata": {}, "outputs": [], "source": [ - "splat.getSpectrum?" + "# more information about the sub-packages\n", + "help(splat.core)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Reading in and visualizing spectra\n", + "\n", + "SPLAT contains a built-in library of published SpeX prism spectra of ultracool dwarfs. It is also possible to download additional spectral datasets and read in your own spectrum or a spectrum from an website. Once you've read a spectrum into a Spectrum object, you can use the built-in features to visualize the spectrum." ] }, { @@ -128,6 +140,7 @@ "outputs": [], "source": [ "# get some information about this spectrum using info()\n", + "sp = splat.getSpectrum(lucky=True)[0]\n", "sp.info()" ] }, @@ -149,7 +162,7 @@ "metadata": {}, "outputs": [], "source": [ - "# grab a very specific spectrum based on its source ID\n", + "# grab a very specific spectrum based on its data ID\n", "sp = splat.Spectrum(10001)\n", "sp.plot()" ] @@ -207,8 +220,8 @@ "outputs": [], "source": [ "# read in a spectrum from an online fits file\n", - "f = download_file('http://pono.ucsd.edu/~adam/data/spex_test/spex_prism_PSOJ0077921+578267_120924.fits',cache=\"update\")\n", - "sp = splat.Spectrum(file=f,file_type='fits',name='PSOJ0077921+578267')\n", + "url = 'http://splat.physics.ucsd.edu/splat/spexprism/spectra/spex-prism_2MASPJ0345432+254023_20030905_BUR06B.txt'\n", + "sp = splat.Spectrum(file=url,name='2MASS J0345+2540')\n", "sp.plot()" ] }, @@ -372,7 +385,7 @@ "outputs": [], "source": [ "# mask part of the spectrum\n", - "sp.maskFlux([1.8,2.0])\n", + "sp.maskWave([1.8,2.0])\n", "sp.plot()" ] }, @@ -435,9 +448,9 @@ "metadata": {}, "outputs": [], "source": [ - "# read in two M-type spectra, normalize them and add them together\n", + "# read in M and L dwarf spectra, normalize them and add them together\n", "sp1 = splat.getSpectrum(spt=['M5','M9'],lucky=True)[0]\n", - "sp2 = splat.getSpectrum(spt=['M5','M9'],lucky=True)[0]\n", + "sp2 = splat.getSpectrum(spt=['L0','L5'],lucky=True)[0]\n", "sp1.normalize()\n", "sp2.normalize()\n", "\n", @@ -455,15 +468,6 @@ "plt.ylabel('Normalized Flux Density')\n" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sp3.plot()" - ] - }, { "cell_type": "code", "execution_count": null, @@ -480,19 +484,20 @@ "sp3 = sp1-sp2\n", "\n", "# plot the individual spectra and their difference in two panels\n", + "# note the use of the .value suffix\n", "plt.subplot(211)\n", - "plt.plot(sp1.wave,sp1.flux,'b-')\n", - "plt.plot(sp2.wave,sp2.flux,'g-')\n", + "plt.plot(sp1.wave.value,sp1.flux.value,'b-')\n", + "plt.plot(sp2.wave.value,sp2.flux.value,'g-')\n", "#plt.ylim([0,1.2])\n", "plt.xlim([0.8,2.4])\n", "plt.ylabel('Normalized Flux Density')\n", "plt.legend([sp1.name,sp2.name])\n", "\n", "plt.subplot(212)\n", - "plt.plot(sp3.wave,sp3.flux,'k-')\n", + "plt.plot(sp3.wave.value,sp3.flux.value,'k-')\n", "plt.legend(['Difference'])\n", "plt.plot([0.8,2.4],[0,0],'k--')\n", - "plt.fill_between(sp3.wave,sp3.noise,-1.*sp3.noise,color='k',alpha=0.3)\n", + "plt.fill_between(sp3.wave.value,sp3.noise.value,-1.*sp3.noise.value,color='k',alpha=0.3)\n", "#plt.ylim([-0.5,0.5])\n", "plt.xlim([0.8,2.4])\n", "plt.xlabel('Wavelength (micron)')\n", @@ -514,25 +519,24 @@ "\n", "# fit to a line using np.polyfit\n", "fit = np.polyfit(sp.wave.value,sp.flux.value,1)\n", - "print(fit)\n", "\n", "# generate a spectrum that is this linear function\n", - "sp_continuum = splat.Spectrum(wave=sp.wave,flux=np.polyval(fit,sp.wave.value)*sp.flux.unit)\n", + "sp_continuum = splat.Spectrum(wave=sp.wave,flux=np.polyval(fit,sp.wave.value))\n", "\n", "# divide out this continuum\n", "sp_normalized = sp/sp_continuum\n", "\n", "# plot the results\n", "plt.subplot(211)\n", - "plt.plot(sp.wave,sp.flux,'k-')\n", - "plt.plot(sp_continuum.wave,sp_continuum.flux,'g-')\n", + "plt.plot(sp.wave.value,sp.flux.value,'k-')\n", + "plt.plot(sp_continuum.wave.value,sp_continuum.flux.value,'g-')\n", "plt.ylim([0,np.nanquantile(sp.flux.value,0.98)*1.5])\n", "plt.xlim(fit_range)\n", "plt.ylabel('Normalized Flux Density')\n", "plt.legend([sp.name,'Continuum'])\n", "\n", "plt.subplot(212)\n", - "plt.plot(sp_normalized.wave,sp_normalized.flux,'k-')\n", + "plt.plot(sp_normalized.wave.value,sp_normalized.flux.value,'k-')\n", "plt.legend(['Continuum-Corrected'])\n", "plt.plot(fit_range,[1,1],'k--')\n", "plt.ylim([0.5,1.5])\n", @@ -655,7 +659,7 @@ "outputs": [], "source": [ "# learn about the options for this routine\n", - "splat.classifyByStandard()?" + "splat.classifyByStandard?" ] }, { @@ -708,7 +712,7 @@ "metadata": {}, "outputs": [], "source": [ - "# there are other standard sets we can read in\n", + "# there are other standard sets we can read in, including subdwarfs and young sources\n", "splat.initializeStandards(sd=True)\n", "splat.STDS_SD_SPEX" ] @@ -719,22 +723,38 @@ "metadata": {}, "outputs": [], "source": [ - "# try classifying a subdwarf with these\n", + "# now try classifying a subdwarf with these\n", + "# note that we'll need to initialize subdwarf standards\n", "#sp = splat.getSpectrum(spt='M7',subdwarf=True,lucky=True)[0]\n", "#splat.classifyByStandard(sp,method='kirkpatrick',plot=True)\n", - "sp = splat.getSpectrum(spt='sdM8',subdwarf=True,lucky=True)[0]\n", - "splat.classifyByStandard(sp,sd=True,plot=True)\n" + "sp = splat.getSpectrum(spt='sdM6',subdwarf=True,lucky=True)[0]\n", + "splat.classifyByStandard(sp,subdwarf=True,plot=True)\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Classify by indices\n", + "## Measuring and classifying by indices\n", "\n", "You can also use spectral indices to classify spectra; these indices sample specific features, such as molecular absorption bands" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# do a basic index measurement\n", + "# read in a random T5\n", + "sp = splat.getSpectrum(spt='T5',lucky=True)[0]\n", + "\n", + "# measure the ratio of two regions - first range is numerator second range is denominator\n", + "ind = splat.measureIndex(sp,[[1.1,1.2],[1.22,1.32]],method='ratio',sample='integrate')\n", + "print(ind)" + ] + }, { "cell_type": "code", "execution_count": null, @@ -795,6 +815,7 @@ "metadata": {}, "outputs": [], "source": [ + "# classify by Kirkpatrick method\n", "splat.classifyByStandard(sp,method='kirkpatrick',plot=True)" ] }, @@ -804,6 +825,7 @@ "metadata": {}, "outputs": [], "source": [ + "# is it young?\n", "splat.classifyGravity(sp,verbose=True)" ] }, @@ -813,106 +835,9 @@ "metadata": {}, "outputs": [], "source": [ - "splat.classifyByStandard(sp,lowg=True,plot=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Index measurement\n", - "\n", - "SPLAT has built-in functions to do index measurement, including literature-defined index sets and empirical relations to turn these into classifications" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# do a basic index measurement\n", - "# read in a random T5\n", - "sp = splat.getSpectrum(spt='T5',lucky=True)[0]\n", - "\n", - "# measure the ratio of two regions - first range is numerator second range is denominator\n", - "ind = splat.measureIndex(sp,[[1.1,1.2],[1.22,1.32]],method='ratio',sample='integrate')\n", - "print(ind)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# you can visualize the placement of these indices by setting plot=True\n", - "# NOTE: THIS IS CURRENTLY THROWING ERRORS SO DON'T RUN!\n", - "\n", - "#ind = splat.measureIndex(sp,[[1.1,1.2],[1.22,1.32]],method='ratio',sample='integrate',plot=True)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# measure an index set that is pre-defined in the literature\n", - "# this returns a dictionary of values\n", - "splat.measureIndexSet(sp,ref='burgasser',verbose=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# there is a handy information function to find out what index sets are currently included\n", - "spem.info_indices()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# you can use these indices to classify an object\n", - "splat.classifyByIndex(sp,ref='burgasser',verbose=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# indices are also used for gravity classification of young sources\n", - "sp = splat.getSpectrum(spt='L2',young=True,lucky=True)[0]\n", - "splat.classifyGravity(sp,verbose=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# you can compare to alternate standards as well\n", - "# this command compares to a suite of subdwarf standards\n", - "splat.classifyByStandard(sp2,plot=True,sd=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# this command compares to a suite of low gravity standards\n", - "splat.classifyByStandard(sp2,plot=True,vlg=True)" + "# classify with low gravity standards\n", + "splat.initializeStandards(young=True)\n", + "splat.classifyByStandard(sp,young=True,plot=True)" ] }, { @@ -931,7 +856,7 @@ "outputs": [], "source": [ "# first read in the spectrum of 2MASS J0518-2828 by seaching on the shortname 'J0518-2828'\n", - "[enter code here]" + "# [enter code here]" ] }, { @@ -941,7 +866,7 @@ "outputs": [], "source": [ "# measure the spectral indices from burgasser et al.\n", - "[enter code here]" + "# [enter code here]" ] }, { @@ -951,7 +876,7 @@ "outputs": [], "source": [ "# determine the spectral type using the kirkpatrick method\n", - "[enter code here]" + "# [enter code here]" ] }, { @@ -961,7 +886,7 @@ "outputs": [], "source": [ "# read in spectral templates for the primary and secondary types\n", - "[enter code here]" + "# [enter code here]" ] }, { @@ -976,7 +901,7 @@ "print(mag_L5,mag_T5)\n", "\n", "# now use the magnitudes to scale the template spectra\n", - "[enter here]" + "# [enter here]" ] }, { @@ -986,7 +911,7 @@ "outputs": [], "source": [ "# add the template spectra together to make a binary template\n", - "[enter code here]" + "# [enter code here]" ] }, { @@ -996,7 +921,7 @@ "outputs": [], "source": [ "# now compare the binary template and J0518-2828 spectrum using compareSpectra, and plot the result\n", - "[enter code here]" + "# [enter code here]" ] }, { @@ -1008,7 +933,7 @@ "# BONUS: do the above steps a few times until you get a \"best\" fit, and plot the \n", "# appropriately scaled primary, secondary, binary templates and J0518-2828, and\n", "# and the difference between J0518-2828 and the binary template to compare them\n", - "[enter code here]" + "# [enter code here]" ] }, { @@ -1163,7 +1088,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -1177,7 +1102,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.7" + "version": "3.9.7" } }, "nbformat": 4, diff --git a/tutorials/spectral_database_query.ipynb b/tutorials/spectral_database_query.ipynb index e175d05eef..79b0078f2e 100644 --- a/tutorials/spectral_database_query.ipynb +++ b/tutorials/spectral_database_query.ipynb @@ -10,7 +10,7 @@ "Adam Burgasser\n", "\n", "## Version date\n", - "22 July 2021\n", + "27 May 2023\n", "\n", "## Learning Goals\n", "* Explore some of the data spreadsheet manipulation tools built into SPLAT (``splat.database.prepDB``)\n", @@ -43,7 +43,8 @@ "import copy\n", "import numpy as np\n", "import pandas\n", - "import matplotlib.pyplot as plt" + "import matplotlib.pyplot as plt\n", + "%matplotlib inline" ] }, { @@ -52,7 +53,7 @@ "metadata": {}, "outputs": [], "source": [ - "# make sure this is at least 2021.07.22\n", + "# make sure this is at least 2023.05\n", "splat.VERSION" ] }, @@ -465,18 +466,11 @@ "fig.tight_layout()\n", " " ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -490,7 +484,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.7" + "version": "3.9.7" } }, "nbformat": 4, diff --git a/tutorials/spectral_datasets.ipynb b/tutorials/spectral_datasets.ipynb index 365d2b5a18..8cff4b3416 100644 --- a/tutorials/spectral_datasets.ipynb +++ b/tutorials/spectral_datasets.ipynb @@ -10,7 +10,7 @@ "Adam Burgasser\n", "\n", "## Version date\n", - "14 July 2021\n", + "27 May 2023\n", "\n", "## Learning Goals\n", "* Select sets of spectra from the SPLAT library (splat.searchLibrary)\n", @@ -41,7 +41,8 @@ "import matplotlib.pyplot as plt\n", "import numpy as np\n", "import pandas\n", - "import astropy.units as u" + "import astropy.units as u\n", + "%matplotlib inline\n" ] }, { @@ -384,6 +385,7 @@ "source": [ "# here's an example of comparing all of our sources to one particular comparison source, the sdL0.0 standard\n", "# The subdwarf standards are contained in the splat.STDS_SD_SPEX variable\n", + "splat.initializeStandards(all=True)\n", "comptype = 'sdL0.0'\n", "spcomp = splat.STDS_SD_SPEX[comptype]\n", "spcomp.normalize([0.9,1.4])\n", @@ -395,7 +397,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -409,7 +411,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.7" + "version": "3.9.7" } }, "nbformat": 4,