# -*- coding: utf-8 -*-
'''
This module provides an access to the HITRAN data.
Data is downloaded and cached.
This module serves as a simple database manager front end.
API is aimed to be RESTful, which means that interaction
between local API and remote data-server will be held
via sending RESTful queries (API->remote) and
receiving data preferably in text format (remote->API).
Object are supposed to be implemented by structures/dicts
as they are present in almost any programming language.
Trying to retain functional style for this API.
'''
import sys
import json
import os, os.path
import re
from os import listdir
import numpy as np
from numpy import zeros,array,setdiff1d,ndarray,arange
from numpy import place,where,real,polyval
from numpy import complex128,int64,float64,float32
from numpy import sqrt,abs,exp,pi,log,sin,cos,tan
from numpy import convolve
from numpy import flipud
from numpy.fft import fft,fftshift
from numpy import linspace,floor
from numpy import any,minimum,maximum
from numpy import sort as npsort
from bisect import bisect
from warnings import warn,simplefilter
import pydoc
# Enable warning repetitions
simplefilter('always', UserWarning)
# Python 3 compatibility
try:
import urllib.request as urllib2
except ImportError:
import urllib2
if 'io' in sys.modules: # define open using Linux-style line endings
import io
def open_(*args,**argv):
argv.update(dict(newline='\n'))
return io.open(*args,**argv)
else:
open_ = open
HAPI_VERSION = '1.1.1.0'; __version__ = HAPI_VERSION
HAPI_HISTORY = [
'FIXED GRID BUG (ver. 1.1.0.1)',
'FIXED OUTPUT FORMAT FOR CROSS-SECTIONS (ver. 1.1.0.1)',
'ADDED CPF BY SCHREIER (JQSRT_112_2011) (ver. 1.1.0.2)',
'OPTIMIZED EXPRESSION EVALUATIONS FOR SELECT (ver. 1.1.0.3)',
'ADDED SUPPORT FOR MIXTURES (ver. 1.1.0.4)',
'ADDED SUPPORT FOR USER-DEFINED ENV DEPENDENCES (ver. 1.1.0.5)',
'ADDED PROFILE SELECTION (ALPHA) (ver. 1.1.0.6)',
'ADDED METADATA FOR HTP, FIXED NORMALIZATION IN CONVOLVESPECTRUMSAME (ver. 1.1.0.7)',
'FIXED A "LONELY HEADER" BUG IN CACHE2STORAGE (ver. 1.1.0.7.1)',
'ADDED SUPPORT FOR PHOSGENE AND CYANOGEN (ver. 1.1.0.7.2)',
'OPTIMIZED STORAGE2CACHE (by Nils-Holger Loeber) (ver. 1.1.0.7.3)',
'ADDED SKIPABLE PARAMETERS IN HEADERS (ver. 1.1.0.7.4)',
'ADDED SUPPORT FOR FORTRAN D-NOTATION (ver. 1.1.0.7.5)',
'ADDED SUPPORT FOR WEIRD-FORMATTED INTENSITY VALUES E.G. "2.700-164" (ver. 1.1.0.7.6)',
'ADDED TIPS-2017 (ver. 1.1.0.8)',
'ADDED SUPPORT FOR CUSTOM EXTENSIONS OF THE DATA FILES (ver. 1.1.0.8.1)',
'FIXED LINK TO (2,0) ISOTOPOLOGUE IN TIPS-2017 (ver. 1.1.0.8.2)',
'ADDED SAVEHEADER FUNCTION (ver. 1.1.0.8.3)',
'ADDED METADATA FOR SF6 (ver. 1.1.0.8.4)',
'ADDED D2O ISOTOPOLOGUE OF WATER TO DESCRIPTION (ver. 1.1.0.8.5)',
'FIXED LINE ENDINGS IN STORAGE2CACHE AND QUERYHITRAN (ver. 1.1.0.8.6)',
'ADDED SUPPORT FOR NON-INTEGER LOCAL ISO IDS (ver. 1.1.0.8.7)',
'FIXED PARAMETER NAME CASE BUG (by Robert J. Hargreaves) (ver. 1.1.0.8.8)',
'CAST LOCAL_ISO_ID=0 TO 10 FOR CARBON DIOXIDE (ver. 1.1.0.8.9)',
'USING NUMPY.ARRAYS FOR NUMERIC COLUMNS OF LOCAL_TABLE_CACHE (ver. 1.1.0.9.0)',
'ADDED DESCRIPTIONS FOR BROADENING BY H2O (ver. 1.1.0.9.1)',
'ADDED PROXY SUPPORT IN FETCH AND FETCH_BY_IDS (ver. 1.1.0.9.2)',
'ADDED LIMIT FOR NUMBER OF LINES DURING TABLE READ (ver. 1.1.0.9.3)',
'FIXED ABSOLUTE PATH BUG IN TABLE NAMES (ver. 1.1.0.9.4)',
'CORRECTED ABUNDANCE OF THE HD ISOTOPOLOGUE (ver. 1.1.0.9.5)',
'ADDED UNIFIED INTERFACES FOR ABSCOEF AND XSC CALCULATIONS (ver. 1.1.0.9.6)',
'ADDED PARLISTS FOR LINE MIXING (VOIGT AND SDVOIGT) (ver. 1.1.0.9.7)',
'ADDED SUPPORT FOR ROSENKRANZ LM PARAMETERS TO PCQSDHC AND LORENTZ (ver. 1.1.1.0)',
]
# version header
print('HAPI version: %s' % HAPI_VERSION)
print('To get the most up-to-date version please check http://hitran.org/hapi')
print('ATTENTION: Python versions of partition sums from TIPS-2017 are now available in HAPI code')
#print('ATTENTION: Python versions of partition sums from TIPS-2017 are available at http://hitran.org/suppl/TIPS/')
#print(' To use them in HAPI ver. 1.1.0.7, use partitionFunction parameter of the absorptionCoefficient_ routine.')
print('')
print(' It is free to use HAPI. If you use HAPI in your research or software development,')
print(' please cite it using the following reference:')
print(' R.V. Kochanov, I.E. Gordon, L.S. Rothman, P. Wcislo, C. Hill, J.S. Wilzewski,')
print(' HITRAN Application Programming Interface (HAPI): A comprehensive approach')
print(' to working with spectroscopic data, J. Quant. Spectrosc. Radiat. Transfer 177, 15-30 (2016)')
print(' DOI: 10.1016/j.jqsrt.2016.03.005')
# define precision
__ComplexType__ = complex128
__IntegerType__ = int64
__FloatType__ = float64
# define zero
cZero = __FloatType__(0.)
# physical constants
cBolts = 1.380648813E-16 # erg/K, CGS
cc = 2.99792458e10 # cm/s, CGS
hh = 6.626196e-27 # erg*s, CGS
# computational constants
cSqrtLn2divSqrtPi = 0.469718639319144059835
cLn2 = 0.6931471805599
cSqrtLn2 = 0.8325546111577
cSqrt2Ln2 = 1.1774100225
# initialize global variables
VARIABLES = {}
VARIABLES['DEBUG'] = False
if VARIABLES['DEBUG']: warn('DEBUG is set to True!')
GLOBAL_DEBUG = False
if GLOBAL_DEBUG: warn('GLOBAL_DEBUG is set to True!')
LOCAL_HOST = 'http://localhost'
# DEBUG switch
if GLOBAL_DEBUG:
GLOBAL_HOST = LOCAL_HOST+':8000' # localhost
else:
GLOBAL_HOST = 'http://hitran.org'
VARIABLES['PROXY'] = {}
# EXAMPLE OF PROXY:
# VARIABLES['PROXY'] = {'http': '127.0.0.1:80'}
# make it changeable
VARIABLES['GLOBAL_HOST'] = GLOBAL_HOST
# display the fetch URL (debug)
VARIABLES['DISPLAY_FETCH_URL'] = False
# In this "robust" version of arange the grid doesn't suffer
# from the shift of the nodes due to error accumulation.
# This effect is pronounced only if the step is sufficiently small.
def arange_(lower,upper,step):
npnt = floor((upper-lower)/step)+1
upper_new = lower + step*(npnt-1)
if abs((upper-upper_new)-step) < 1e-10:
upper_new += step
npnt += 1
return linspace(lower,upper_new,int(npnt))
# ---------------------------------------------------------------
# ---------------------------------------------------------------
# LOCAL DATABASE MANAGEMENT SYSTEM
# ---------------------------------------------------------------
# ---------------------------------------------------------------
# ---------------------------------------------------------------
# DATABASE BACKEND: simple text files, parsed into a python lists
# Use a directory as a database. Each table is stored in a
# separate text file. Parameters in text are position-fixed.
BACKEND_DATABASE_NAME_DEFAULT = '.'
VARIABLES['BACKEND_DATABASE_NAME'] = BACKEND_DATABASE_NAME_DEFAULT
# For this node local DB is schema-dependent!
LOCAL_TABLE_CACHE = {
'sampletab' : { # table
'header' : { # header
'order' : ('column1','column2','column3'),
'format' : {
'column1' : '%10d',
'column2' : '%20f',
'column3' : '%30s'
},
'default' : {
'column1' : 0,
'column2' : 0.0,
'column3' : ''
},
'number_of_rows' : 3,
'size_in_bytes' : None,
'table_name' : 'sampletab',
'table_type' : 'strict'
}, # /header
'data' : {
'column1' : [1,2,3],
'column2' : [10.5,11.5,12.5],
'column3' : ['one','two','three']
}, # /data
} # /table
} # hash-map of tables
# FORMAT CONVERSION LAYER
# converts between TRANSPORT_FORMAT and OBJECT_FORMAT
HITRAN_FORMAT_160 = {
'M' : {'pos' : 1, 'len' : 2, 'format' : '%2d' },
'I' : {'pos' : 3, 'len' : 1, 'format' : '%1d' },
'nu' : {'pos' : 4, 'len' : 12, 'format' : '%12f'},
'S' : {'pos' : 16, 'len' : 10, 'format' : '%10f'},
'R' : {'pos' : 26, 'len' : 0, 'format' : '%0f' },
'A' : {'pos' : 26, 'len' : 10, 'format' : '%10f'},
'gamma_air' : {'pos' : 36, 'len' : 5, 'format' : '%5f' },
'gamma_self' : {'pos' : 41, 'len' : 5, 'format' : '%5f' },
'E_' : {'pos' : 46, 'len' : 10, 'format' : '%10f'},
'n_air' : {'pos' : 56, 'len' : 4, 'format' : '%4f' },
'delta_air' : {'pos' : 60, 'len' : 8, 'format' : '%8f' },
'V' : {'pos' : 68, 'len' : 15, 'format' : '%15s'},
'V_' : {'pos' : 83, 'len' : 15, 'format' : '%15s'},
'Q' : {'pos' : 98, 'len' : 15, 'format' : '%15s'},
'Q_' : {'pos' : 113, 'len' : 15, 'format' : '%15s'},
'Ierr' : {'pos' : 128, 'len' : 6, 'format' : '%6s' },
'Iref' : {'pos' : 134, 'len' : 12, 'format' : '%12s'},
'flag' : {'pos' : 146, 'len' : 1, 'format' : '%1s' },
'g' : {'pos' : 147, 'len' : 7, 'format' : '%7f' },
'g_' : {'pos' : 154, 'len' : 7, 'format' : '%7f' }
}
# This should be generating from the server's response
HITRAN_DEFAULT_HEADER = {
"table_type": "column-fixed",
"size_in_bytes": -1,
"table_name": "###",
"number_of_rows": -1,
"order": [
"molec_id",
"local_iso_id",
"nu",
"sw",
"a",
"gamma_air",
"gamma_self",
"elower",
"n_air",
"delta_air",
"global_upper_quanta",
"global_lower_quanta",
"local_upper_quanta",
"local_lower_quanta",
"ierr",
"iref",
"line_mixing_flag",
"gp",
"gpp"
],
"format": {
"a": "%10.3E",
"gamma_air": "%5.4f",
"gp": "%7.1f",
"local_iso_id": "%1d",
"molec_id": "%2d",
"sw": "%10.3E",
"local_lower_quanta": "%15s",
"local_upper_quanta": "%15s",
"gpp": "%7.1f",
"elower": "%10.4f",
"n_air": "%4.2f",
"delta_air": "%8.6f",
"global_upper_quanta": "%15s",
"iref": "%12s",
"line_mixing_flag": "%1s",
"ierr": "%6s",
"nu": "%12.6f",
"gamma_self": "%5.3f",
"global_lower_quanta": "%15s"
},
"default": {
"a": 0.0,
"gamma_air": 0.0,
"gp": "FFF",
"local_iso_id": 0,
"molec_id": 0,
"sw": 0.0,
"local_lower_quanta": "000",
"local_upper_quanta": "000",
"gpp": "FFF",
"elower": 0.0,
"n_air": 0.0,
"delta_air": 0.0,
"global_upper_quanta": "000",
"iref": "EEE",
"line_mixing_flag": "EEE",
"ierr": "EEE",
"nu": 0.0,
"gamma_self": 0.0,
"global_lower_quanta": "000"
},
"description": {
"a": "Einstein A-coefficient in s-1",
"gamma_air": "Air-broadened Lorentzian half-width at half-maximum at p = 1 atm and T = 296 K",
"gp": "Upper state degeneracy",
"local_iso_id": "Integer ID of a particular Isotopologue, unique only to a given molecule, in order or abundance (1 = most abundant)",
"molec_id": "The HITRAN integer ID for this molecule in all its isotopologue forms",
"sw": "Line intensity, multiplied by isotopologue abundance, at T = 296 K",
"local_lower_quanta": "Rotational, hyperfine and other quantum numbers and labels for the lower state of a transition",
"local_upper_quanta": "Rotational, hyperfine and other quantum numbers and labels for the upper state of a transition",
"gpp": "Lower state degeneracy",
"elower": "Lower-state energy",
"n_air": "Temperature exponent for the air-broadened HWHM",
"delta_air": "Pressure shift induced by air, referred to p=1 atm",
"global_upper_quanta": "Electronic and vibrational quantum numbers and labels for the upper state of a transition",
"iref": "Ordered list of reference identifiers for transition parameters",
"line_mixing_flag": "A flag indicating the presence of additional data and code relating to line-mixing",
"ierr": "Ordered list of indices corresponding to uncertainty estimates of transition parameters",
"nu": "Transition wavenumber",
"gamma_self": "Self-broadened HWHM at 1 atm pressure and 296 K",
"global_lower_quanta": "Electronic and vibrational quantum numbers and labels for the lower state of a transition"
},
"position": {
"molec_id": 0,
"local_iso_id": 2,
"nu": 3,
"sw": 15,
"a": 25,
"gamma_air": 35,
"gamma_self": 40,
"elower": 45,
"n_air": 55,
"delta_air": 59,
"global_upper_quanta": 67,
"global_lower_quanta": 82,
"local_upper_quanta": 97,
"local_lower_quanta": 112,
"ierr": 127,
"iref": 133,
"line_mixing_flag": 145,
"gp": 146,
"gpp": 153,
},
'cast': {
"molec_id": "uint8",
"local_iso_id": "uint8",
"nu": "float32",
"sw": "float62",
"a": "float62",
"gamma_air": "float16",
"gamma_self": "float16",
"elower": "float32",
"n_air": "float16",
"delta_air": "float16",
"global_upper_quanta": "str",
"global_lower_quanta": "str",
"local_upper_quanta": "str",
"local_upper_quanta": "str",
"ierr": "str",
"iref": "str",
"line_mixing_flag": "str",
"gp": "int16",
"gpp": "int16",
}
}
PARAMETER_META_ = \
{
"global_iso_id" : {
"id" : 1,
"name" : "global_iso_id",
"name_html" : "Global isotopologue ID",
"table_name" : "",
"description" : "Unique integer ID of a particular isotopologue: every global isotopologue ID is unique to a particular species, even between different molecules. The number itself is, however arbitrary.",
"description_html" : "Unique integer ID of a particular isotopologue: every global isotopologue ID is unique to a particular species, even between different molecules. The number itself is, however arbitrary.",
"default_fmt" : "%5d",
"default_units" : "",
"data_type" : "int",
"selectable" : 1,
"has_reference" : 0,
"has_error" : 0
},
"molec_id" : {
"id" : 2,
"name" : "molec_id",
"name_html" : "Molecule ID",
"table_name" : "",
"description" : "The HITRAN integer ID for this molecule in all its isotopologue forms",
"description_html" : "The HITRAN integer ID for this molecule in all its isotopologue forms",
"default_fmt" : "%2d",
"default_units" : None,
"data_type" : "int",
"selectable" : 1,
"has_reference" : 0,
"has_error" : 0
},
"local_iso_id" : {
"id" : 3,
"name" : "local_iso_id",
"name_html" : "Isotopologue ID",
"table_name" : "",
"description" : "Integer ID of a particular Isotopologue, unique only to a given molecule, in order or abundance (1 = most abundant)",
"description_html" : "Integer ID of a particular Isotopologue, unique only to a given molecule, in order or abundance (1 = most abundant)",
"default_fmt" : "%1d",
"default_units" : "",
"data_type" : "int",
"selectable" : 1,
"has_reference" : 0,
"has_error" : 0
},
"nu" : {
"id" : 4,
"name" : "nu",
"name_html" : "ν",
"table_name" : "prm_nu",
"description" : "Transition wavenumber",
"description_html" : "Transition wavenumber",
"default_fmt" : "%12.6f",
"default_units" : "cm-1",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"sw" : {
"id" : 5,
"name" : "sw",
"name_html" : "S",
"table_name" : "prm_sw",
"description" : "Line intensity, multiplied by isotopologue abundance, at T = 296 K",
"description_html" : "Line intensity, multiplied by isotopologue abundance, at T = 296 K",
"default_fmt" : "%10.3e",
"default_units" : "cm-1/(molec.cm-2)",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"a" : {
"id" : 6,
"name" : "a",
"name_html" : "A",
"table_name" : "prm_a",
"description" : "Einstein A-coefficient in s-1",
"description_html" : "Einstein A-coefficient",
"default_fmt" : "%10.3e",
"default_units" : "s-1",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 0,
"has_error" : 0
},
"gamma_air" : {
"id" : 7,
"name" : "gamma_air",
"name_html" : "γair",
"table_name" : "prm_gamma_air",
"description" : "Air-broadened Lorentzian half-width at half-maximum at p = 1 atm and T = 296 K",
"description_html" : "Air-broadened Lorentzian half-width at half-maximum at p = 1 atm and T = 296 K",
"default_fmt" : "%6.4f",
"default_units" : "cm-1.atm-1",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"gamma_self" : {
"id" : 8,
"name" : "gamma_self",
"name_html" : "γself",
"table_name" : "prm_gamma_self",
"description" : "Self-broadened HWHM at 1 atm pressure and 296 K",
"description_html" : "Self-broadened HWHM at 1 atm pressure and 296 K",
"default_fmt" : "%5.3f",
"default_units" : "cm-1.atm-1",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"n_air" : {
"id" : 9,
"name" : "n_air",
"name_html" : "nair",
"table_name" : "prm_n_air",
"description" : "Temperature exponent for the air-broadened HWHM",
"description_html" : "Temperature exponent for the air-broadened HWHM",
"default_fmt" : "%7.4f",
"default_units" : "",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"delta_air" : {
"id" : 10,
"name" : "delta_air",
"name_html" : "δair",
"table_name" : "prm_delta_air",
"description" : "Pressure shift induced by air, referred to p=1 atm",
"description_html" : "Pressure shift induced by air, referred to p=1 atm",
"default_fmt" : "%9.6f",
"default_units" : "cm-1.atm-1",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"elower" : {
"id" : 11,
"name" : "elower",
"name_html" : "E\"",
"table_name" : "",
"description" : "Lower-state energy",
"description_html" : "Lower-state energy",
"default_fmt" : "%10.4f",
"default_units" : "cm-1",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 0,
"has_error" : 0
},
"gp" : {
"id" : 12,
"name" : "gp",
"name_html" : "g\'",
"table_name" : "",
"description" : "Upper state degeneracy",
"description_html" : "Upper state degeneracy",
"default_fmt" : "%5d",
"default_units" : "",
"data_type" : "int",
"selectable" : 1,
"has_reference" : 0,
"has_error" : 0
},
"gpp" : {
"id" : 13,
"name" : "gpp",
"name_html" : "g\"",
"table_name" : "",
"description" : "Lower state degeneracy",
"description_html" : "Lower state degeneracy",
"default_fmt" : "%5d",
"default_units" : "",
"data_type" : "int",
"selectable" : 1,
"has_reference" : 0,
"has_error" : 0
},
"global_upper_quanta" : {
"id" : 14,
"name" : "global_upper_quanta",
"name_html" : "Global upper quanta",
"table_name" : "",
"description" : "Electronic and vibrational quantum numbers and labels for the upper state of a transition",
"description_html" : "Electronic and vibrational quantum numbers and labels for the upper state of a transition",
"default_fmt" : "%15s",
"default_units" : None,
"data_type" : "str",
"selectable" : 0,
"has_reference" : 0,
"has_error" : 0
},
"global_lower_quanta" : {
"id" : 15,
"name" : "global_lower_quanta",
"name_html" : "Global lower quanta",
"table_name" : "",
"description" : "Electronic and vibrational quantum numbers and labels for the lower state of a transition",
"description_html" : "Electronic and vibrational quantum numbers and labels for the lower state of a transition",
"default_fmt" : "%15s",
"default_units" : None,
"data_type" : "str",
"selectable" : 0,
"has_reference" : 0,
"has_error" : 0
},
"local_upper_quanta" : {
"id" : 16,
"name" : "local_upper_quanta",
"name_html" : "Local upper quanta",
"table_name" : "",
"description" : "Rotational, hyperfine and other quantum numbers and labels for the upper state of a transition",
"description_html" : "Rotational, hyperfine and other quantum numbers and labels for the upper state of a transition",
"default_fmt" : "%15s",
"default_units" : None,
"data_type" : "str",
"selectable" : 0,
"has_reference" : 0,
"has_error" : 0
},
"local_lower_quanta" : {
"id" : 17,
"name" : "local_lower_quanta",
"name_html" : "Local lower quanta",
"table_name" : "",
"description" : "Rotational, hyperfine and other quantum numbers and labels for the lower state of a transition",
"description_html" : "Rotational, hyperfine and other quantum numbers and labels for the lower state of a transition",
"default_fmt" : "%15s",
"default_units" : None,
"data_type" : "str",
"selectable" : 0,
"has_reference" : 0,
"has_error" : 0
},
"line_mixing_flag" : {
"id" : 18,
"name" : "line_mixing_flag",
"name_html" : "Line mixing flag",
"table_name" : "",
"description" : "A flag indicating the presence of additional data and code relating to line-mixing",
"description_html" : "A flag indicating the presence of additional data and code relating to line-mixing",
"default_fmt" : "%1s",
"default_units" : "",
"data_type" : "str",
"selectable" : 0,
"has_reference" : 0,
"has_error" : 0
},
"ierr" : {
"id" : 19,
"name" : "ierr",
"name_html" : "Error indices",
"table_name" : "",
"description" : "Ordered list of indices corresponding to uncertainty estimates of transition parameters",
"description_html" : "Ordered list of indices corresponding to uncertainty estimates of transition parameters",
"default_fmt" : "%s",
"default_units" : "",
"data_type" : "str",
"selectable" : 0,
"has_reference" : 0,
"has_error" : 0
},
"iref" : {
"id" : 20,
"name" : "iref",
"name_html" : "References",
"table_name" : "",
"description" : "Ordered list of reference identifiers for transition parameters",
"description_html" : "Ordered list of reference identifiers for transition parameters",
"default_fmt" : "%s",
"default_units" : None,
"data_type" : "str",
"selectable" : 0,
"has_reference" : 0,
"has_error" : 0
},
"deltap_air" : {
"id" : 21,
"name" : "deltap_air",
"name_html" : "δ\'air",
"table_name" : "prm_deltap_air",
"description" : "Linear temperature dependence coefficient for air-induced pressure shift",
"description_html" : "Linear temperature dependence coefficient for air-induced pressure shift",
"default_fmt" : "%10.3e",
"default_units" : "",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"n_self" : {
"id" : 22,
"name" : "n_self",
"name_html" : "nself",
"table_name" : "prm_n_self",
"description" : "Temperature exponent for the self-broadened HWHM",
"description_html" : "Temperature exponent for the self-broadened HWHM",
"default_fmt" : "%7.4f",
"default_units" : "",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"delta_self" : {
"id" : 23,
"name" : "delta_self",
"name_html" : "δself",
"table_name" : "prm_delta_self",
"description" : "Self-induced pressure shift, referred to p=1 atm",
"description_html" : "Self-induced pressure shift, referred to p=1 atm",
"default_fmt" : "%9.6f",
"default_units" : "cm-1.atm-1",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"deltap_self" : {
"id" : 24,
"name" : "deltap_self",
"name_html" : "δ\'self",
"table_name" : "prm_deltap_self",
"description" : "Linear temperature dependence coefficient for self-induced pressure shift",
"description_html" : "Linear temperature dependence coefficient for self-induced pressure shift",
"default_fmt" : "%10.3e",
"default_units" : "",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"SD_air" : {
"id" : 28,
"name" : "SD_air",
"name_html" : "SDair",
"table_name" : "prm_sd_air",
"description" : "Speed-dependence parameter, air-broadened lines",
"description_html" : "Speed-dependence parameter, air-broadened lines",
"default_fmt" : "%9.6f",
"default_units" : "",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"SD_self" : {
"id" : 29,
"name" : "SD_self",
"name_html" : "SDself",
"table_name" : "prm_sd_self",
"description" : "Speed-dependence parameter, self-broadened lines",
"description_html" : "Speed-dependence parameter, self-broadened lines",
"default_fmt" : "%9.6f",
"default_units" : "",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"beta_g_air" : {
"id" : 30,
"name" : "beta_g_air",
"name_html" : "βG, air",
"table_name" : "prm_beta_g_air",
"description" : "Dicke narrowing parameter for the air broadened Galatry line profile",
"description_html" : "Dicke narrowing parameter for the air broadened Galatry line profile",
"default_fmt" : "%9.6f",
"default_units" : "",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"y_self" : {
"id" : 31,
"name" : "y_self",
"name_html" : "Yself",
"table_name" : "prm_y_self",
"description" : "First-order (Rosenkranz) line coupling coefficient; self-broadened environment",
"description_html" : "First-order (Rosenkranz) line coupling coefficient; self-broadened environment",
"default_fmt" : "%10.3e",
"default_units" : "cm-1.atm-1",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"y_air" : {
"id" : 32,
"name" : "y_air",
"name_html" : "Yair",
"table_name" : "prm_y_air",
"description" : "First-order (Rosenkranz) line coupling coefficient; air-broadened environment",
"description_html" : "First-order (Rosenkranz) line coupling coefficient; air-broadened environment",
"default_fmt" : "%10.3e",
"default_units" : "cm-1.atm-1",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"statep" : {
"id" : 33,
"name" : "statep",
"name_html" : "qns\'",
"table_name" : "",
"description" : "Upper state quantum numbers",
"description_html" : "Upper state quantum numbers",
"default_fmt" : "%256s",
"default_units" : "",
"data_type" : "str",
"selectable" : 1,
"has_reference" : 0,
"has_error" : 0
},
"statepp" : {
"id" : 34,
"name" : "statepp",
"name_html" : "qns\"",
"table_name" : "",
"description" : "Lower state quantum numbers",
"description_html" : "Lower state quantum numbers",
"default_fmt" : "%256s",
"default_units" : "",
"data_type" : "str",
"selectable" : 1,
"has_reference" : 0,
"has_error" : 0
},
"beta_g_self" : {
"id" : 35,
"name" : "beta_g_self",
"name_html" : "βG, self",
"table_name" : "prm_beta_g_self",
"description" : "Dicke narrowing parameter for the self-broadened Galatry line profile",
"description_html" : "Dicke narrowing parameter for the self-broadened Galatry line profile",
"default_fmt" : "%9.6f",
"default_units" : "",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"trans_id" : {
"id" : 36,
"name" : "trans_id",
"name_html" : "Transition ID",
"table_name" : "",
"description" : "Unique integer ID of a particular transition entry in the database. (The same physical transition may have different IDs if its parameters have been revised or updated).",
"description_html" : "Unique integer ID of a particular transition entry in the database. (The same physical transition may have different IDs if its parameters have been revised or updated).",
"default_fmt" : "%12d",
"default_units" : "",
"data_type" : "int",
"selectable" : 1,
"has_reference" : 0,
"has_error" : 0
},
"par_line" : {
"id" : 37,
"name" : "par_line",
"name_html" : ".par line",
"table_name" : "",
"description" : "Native 160-character formatted HITRAN line",
"description_html" : "Native 160-character formatted HITRAN line",
"default_fmt" : "%160s",
"default_units" : "",
"data_type" : "str",
"selectable" : 1,
"has_reference" : 0,
"has_error" : 0
},
"gamma_H2" : {
"id" : 38,
"name" : "gamma_H2",
"name_html" : "γH2 ",
"table_name" : "prm_gamma_H2",
"description" : "Lorentzian lineshape HWHM due to pressure broadening by H2 at 1 atm pressure",
"description_html" : "Lorentzian lineshape HWHM due to pressure broadening by H2 at 1 atm pressure",
"default_fmt" : "%6.4f",
"default_units" : "cm-1.atm-1",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"n_H2" : {
"id" : 39,
"name" : "n_H2",
"name_html" : "nH2",
"table_name" : "prm_n_H2",
"description" : "Temperature exponent for the H2-broadened HWHM",
"description_html" : "Temperature exponent for the H2-broadened HWHM",
"default_fmt" : "%7.4f",
"default_units" : "",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"delta_H2" : {
"id" : 40,
"name" : "delta_H2",
"name_html" : "δH2",
"table_name" : "prm_delta_H2",
"description" : "Pressure shift induced by H2, referred to p=1 atm",
"description_html" : "Pressure shift induced by H2, referred to p=1 atm",
"default_fmt" : "%9.6f",
"default_units" : "cm-1.atm-1",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"deltap_H2" : {
"id" : 41,
"name" : "deltap_H2",
"name_html" : "δ\'H2",
"table_name" : "prm_deltap_H2",
"description" : "Linear temperature dependence coefficient for H2-induced pressure shift",
"description_html" : "Linear temperature dependence coefficient for H2-induced pressure shift",
"default_fmt" : "%10.3e",
"default_units" : "",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"gamma_He": {
"id" : 42,
"name" : "gamma_He",
"name_html" : "γHe ",
"table_name" : "prm_gamma_He",
"description" : "Lorentzian lineshape HWHM due to pressure broadening by He at 1 atm pressure",
"description_html" : "Lorentzian lineshape HWHM due to pressure broadening by He at 1 atm pressure",
"default_fmt" : "%6.4f",
"default_units" : "cm-1.atm-1",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"n_He" : {
"id" : 43,
"name" : "n_He",
"name_html" : "nHe",
"table_name" : "prm_n_He",
"description" : "Temperature exponent for the He-broadened HWHM",
"description_html" : "Temperature exponent for the He-broadened HWHM",
"default_fmt" : "%7.4f",
"default_units" : "",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"delta_He" : {
"id" : 44,
"name" : "delta_He",
"name_html" : "δHe",
"table_name" : "prm_delta_He",
"description" : "Pressure shift induced by He, referred to p=1 atm",
"description_html" : "Pressure shift induced by He, referred to p=1 atm",
"default_fmt" : "%9.6f",
"default_units" : "cm-1.atm-1",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"gamma_CO2" : {
"id" : 45,
"name" : "gamma_CO2",
"name_html" : "γCO2 ",
"table_name" : "prm_gamma_CO2",
"description" : "Lorentzian lineshape HWHM due to pressure broadening by CO2 at 1 atm pressure",
"description_html" : "Lorentzian lineshape HWHM due to pressure broadening by CO2 at 1 atm pressure",
"default_fmt" : "%6.4f",
"default_units" : "cm-1.atm-1",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"n_CO2" : {
"id" : 46,
"name" : "n_CO2",
"name_html" : "nCO2",
"table_name" : "prm_n_CO2",
"description" : "Temperature exponent for the CO2-broadened HWHM",
"description_html" : "Temperature exponent for the CO2-broadened HWHM",
"default_fmt" : "%7.4f",
"default_units" : "",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"delta_CO2" : {
"id" : 47,
"name" : "delta_CO2",
"name_html" : "δCO2",
"table_name" : "prm_delta_CO2",
"description" : "Pressure shift induced by CO2, referred to p=1 atm",
"description_html" : "Pressure shift induced by CO2, referred to p=1 atm",
"default_fmt" : "%9.6f",
"default_units" : "cm-1.atm-1",
"data_type" : "float",
"selectable" : 1,
"has_reference" : 1,
"has_error" : 1
},
"gamma_HT_0_self_50" : {
"default_fmt" : "%6.4f",
},
"n_HT_self_50" : {
"default_fmt" : "%9.6f",
},
"gamma_HT_2_self_50" : {
"default_fmt" : "%6.4f",
},
"delta_HT_0_self_50" : {
"default_fmt" : "%9.6f",
},
"deltap_HT_self_50" : {
"default_fmt" : "%9.6f",
},
"delta_HT_2_self_50" : {
"default_fmt" : "%9.6f",
},
"gamma_HT_0_self_150" : {
"default_fmt" : "%6.4f",
},
"n_HT_self_150" : {
"default_fmt" : "%9.6f",
},
"gamma_HT_2_self_150" : {
"default_fmt" : "%6.4f",
},
"delta_HT_0_self_150" : {
"default_fmt" : "%9.6f",
},
"deltap_HT_self_150" : {
"default_fmt" : "%9.6f",
},
"delta_HT_2_self_150" : {
"default_fmt" : "%9.6f",
},
"gamma_HT_0_self_296" : {
"default_fmt" : "%6.4f",
},
"n_HT_self_296" : {
"default_fmt" : "%9.6f",
},
"gamma_HT_2_self_296" : {
"default_fmt" : "%6.4f",
},
"delta_HT_0_self_296" : {
"default_fmt" : "%9.6f",
},
"deltap_HT_self_296" : {
"default_fmt" : "%9.6f",
},
"delta_HT_2_self_296" : {
"default_fmt" : "%9.6f",
},
"gamma_HT_0_self_700" : {
"default_fmt" : "%6.4f",
},
"n_HT_self_700" : {
"default_fmt" : "%9.6f",
},
"gamma_HT_2_self_700" : {
"default_fmt" : "%6.4f",
},
"delta_HT_0_self_700" : {
"default_fmt" : "%9.6f",
},
"deltap_HT_self_700" : {
"default_fmt" : "%9.6f",
},
"delta_HT_2_self_700" : {
"default_fmt" : "%9.6f",
},
"nu_HT_self" : {
"default_fmt" : "%6.4f",
},
"kappa_HT_self" : {
"default_fmt" : "%9.6f",
},
"eta_HT_self" : {
"default_fmt" : "%9.6f",
},
"gamma_HT_0_air_50" : {
"default_fmt" : "%6.4f",
},
"n_HT_air_50" : {
"default_fmt" : "%9.6f",
},
"gamma_HT_2_air_50" : {
"default_fmt" : "%6.4f",
},
"delta_HT_0_air_50" : {
"default_fmt" : "%9.6f",
},
"deltap_HT_air_50" : {
"default_fmt" : "%9.6f",
},
"delta_HT_2_air_50" : {
"default_fmt" : "%9.6f",
},
"gamma_HT_0_air_150" : {
"default_fmt" : "%6.4f",
},
"n_HT_air_150" : {
"default_fmt" : "%9.6f",
},
"gamma_HT_2_air_150" : {
"default_fmt" : "%6.4f",
},
"delta_HT_0_air_150" : {
"default_fmt" : "%9.6f",
},
"deltap_HT_air_150" : {
"default_fmt" : "%9.6f",
},
"delta_HT_2_air_150" : {
"default_fmt" : "%9.6f",
},
"gamma_HT_0_air_296" : {
"default_fmt" : "%6.4f",
},
"n_HT_air_296" : {
"default_fmt" : "%9.6f",
},
"gamma_HT_2_air_296" : {
"default_fmt" : "%6.4f",
},
"delta_HT_0_air_296" : {
"default_fmt" : "%9.6f",
},
"deltap_HT_air_296" : {
"default_fmt" : "%9.6f",
},
"delta_HT_2_air_296" : {
"default_fmt" : "%9.6f",
},
"gamma_HT_0_air_700" : {
"default_fmt" : "%6.4f",
},
"n_HT_air_700" : {
"default_fmt" : "%9.6f",
},
"gamma_HT_2_air_700" : {
"default_fmt" : "%6.4f",
},
"delta_HT_0_air_700" : {
"default_fmt" : "%9.6f",
},
"deltap_HT_air_700" : {
"default_fmt" : "%9.6f",
},
"delta_HT_2_air_700" : {
"default_fmt" : "%9.6f",
},
"nu_HT_air" : {
"default_fmt" : "%6.4f",
},
"kappa_HT_air" : {
"default_fmt" : "%9.6f",
},
"eta_HT_air" : {
"default_fmt" : "%9.6f",
},
"gamma_H2O" : {
"default_fmt" : "%6.4f",
},
"n_H2O" : {
"default_fmt" : "%9.6f",
},
"Y_SDV_air_296" : {
"default_fmt" : "%10.3e",
},
"Y_SDV_self_296" : {
"default_fmt" : "%10.3e",
},
"Y_HT_air_296" : {
"default_fmt" : "%10.3e",
},
"Y_HT_self_296" : {
"default_fmt" : "%10.3e",
},
}
# lower the case of all parameter names (fix for case-sensitive databases)
PARAMETER_META = {}
for param in PARAMETER_META_:
PARAMETER_META[param.lower()] = PARAMETER_META_[param]
def getFullTableAndHeaderName(TableName,ext=None):
#print('TableName=',TableName)
if ext is None: ext = 'data'
flag_abspath = False # check if the supplied table name already contains absolute path
if os.path.isabs(TableName): flag_abspath = True
fullpath_data = TableName + '.' + ext
if not flag_abspath: fullpath_data = os.path.join(VARIABLES['BACKEND_DATABASE_NAME'],fullpath_data)
if not os.path.isfile(fullpath_data):
fullpath_data = VARIABLES['BACKEND_DATABASE_NAME'] + '/' + TableName + '.par'
if not os.path.isfile(fullpath_data) and TableName!='sampletab':
raise Exception('Lonely header \"%s\"' % fullpath_data)
fullpath_header = TableName + '.header'
if not flag_abspath: fullpath_header = os.path.join(VARIABLES['BACKEND_DATABASE_NAME'],fullpath_header)
return fullpath_data,fullpath_header
def getParameterFormat(ParameterName,TableName):
return LOCAL_TABLE_CACHE[TableName]['header']['format']
def getTableHeader(TableName):
return LOCAL_TABLE_CACHE[TableName]['header']
def getRowObject(RowID,TableName):
# return RowObject from TableObject in CACHE
RowObject = []
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['order']:
par_value = LOCAL_TABLE_CACHE[TableName]['data'][par_name][RowID]
par_format = LOCAL_TABLE_CACHE[TableName]['header']['format'][par_name]
RowObject.append((par_name,par_value,par_format))
return RowObject
# INCREASE ROW COUNT
def addRowObject(RowObject,TableName):
#print 'addRowObject: '
#print 'RowObject: '+str(RowObject)
#print 'TableName:'+TableName
for par_name,par_value,par_format in RowObject:
#print 'par_name,par_value,par_format: '+str((par_name,par_value,par_format))
#print '>>> '+ str(LOCAL_TABLE_CACHE[TableName]['data'][par_name])
#LOCAL_TABLE_CACHE[TableName]['data'][par_name] += [par_value]
LOCAL_TABLE_CACHE[TableName]['data'][par_name].append(par_value)
def setRowObject(RowID,RowObject,TableName):
number_of_rows = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
if RowID >= 0 and RowID < number_of_rows:
for par_name,par_value,par_format in RowObject:
LOCAL_TABLE_CACHE[TableName]['data'][par_name][RowID] = par_value
else:
# !!! XXX ATTENTION: THIS IS A TEMPORARY INSERTION XXX !!!
LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows'] += 1
addRowObject(RowObject,TableName)
def getDefaultRowObject(TableName):
# get a default RowObject from a table
RowObject = []
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['order']:
par_value = LOCAL_TABLE_CACHE[TableName]['header']['default'][par_name]
par_format = LOCAL_TABLE_CACHE[TableName]['header']['format'][par_name]
RowObject.append((par_name,par_value,par_format))
return RowObject
def subsetOfRowObject(ParameterNames,RowObject):
# return a subset of RowObject according to
#RowObjectNew = []
#for par_name,par_value,par_format in RowObject:
# if par_name in ParameterNames:
# RowObjectNew.append((par_name,par_value,par_format))
#return RowObjectNew
dct = {}
for par_name,par_value,par_format in RowObject:
dct[par_name] = (par_name,par_value,par_format)
RowObjectNew = []
for par_name in ParameterNames:
RowObjectNew.append(dct[par_name])
return RowObjectNew
#FORMAT_PYTHON_REGEX = '^\%([0-9]*)\.?([0-9]*)([dfs])$'
FORMAT_PYTHON_REGEX = '^\%(\d*)(\.(\d*))?([edfsEDFS])$'
# Fortran string formatting
# based on a pythonic format string
def formatString(par_format,par_value,lang='FORTRAN'):
# Fortran format rules:
# %M.NP
# M - total field length (optional)
# (minus sign included in M)
# . - decimal ceparator (optional)
# N - number of digits after . (optional)
# P - [dfs] int/float/string
# PYTHON RULE: if N is abcent, default value is 6
regex = FORMAT_PYTHON_REGEX
(lng,trail,lngpnt,ty) = re.search(regex,par_format).groups()
result = par_format % par_value
if ty.lower() in set(['f','e']):
lng = int(lng) if lng else 0
lngpnt = int(lngpnt) if lngpnt else 0
result = par_format % par_value
res = result.strip()
if lng==lngpnt+1:
if res[0:1]=='0':
result = '%%%ds' % lng % res[1:]
if par_value<0:
if res[1:2]=='0':
result = '%%%ds' % lng % (res[0:1]+res[2:])
return result
def putRowObjectToString(RowObject):
# serialize RowObject to string
# TODO: support different languages (C,Fortran)
output_string = ''
for par_name,par_value,par_format in RowObject:
# Python formatting
#output_string += par_format % par_value
# Fortran formatting
#print 'par_name,par_value,par_format: '+str((par_name,par_value,par_format))
output_string += formatString(par_format,par_value)
return output_string
# Parameter nicknames are hard-coded.
PARAMETER_NICKNAMES = {
"a": "A",
"gamma_air": "gair",
"gp": "g",
"local_iso_id": "I",
"molec_id": "M",
"sw": "S",
"local_lower_quanta": "Q_",
"local_upper_quanta": "Q",
"gpp": "g_",
"elower": "E_",
"n_air": "nair",
"delta_air": "dair",
"global_upper_quanta": "V",
"iref": "Iref",
"line_mixing_flag": "f",
"ierr": "ierr",
"nu": "nu",
"gamma_self": "gsel",
"global_lower_quanta": "V_"
}
def putTableHeaderToString(TableName):
output_string = ''
regex = FORMAT_PYTHON_REGEX
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['order']:
par_format = LOCAL_TABLE_CACHE[TableName]['header']['format'][par_name]
(lng,trail,lngpnt,ty) = re.search(regex,par_format).groups()
fmt = '%%%ss' % lng
try:
par_name_short = PARAMETER_NICKNAMES[par_name]
except:
par_name_short = par_name
#output_string += fmt % par_name
output_string += (fmt % par_name_short)[:int(lng)]
return output_string
def getRowObjectFromString(input_string,TableName):
# restore RowObject from string, get formats and names in TableName
#print 'getRowObjectFromString:'
pos = 0
RowObject = []
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['order']:
par_format = LOCAL_TABLE_CACHE[TableName]['header']['format'][par_name]
regex = '^\%([0-9]+)\.?[0-9]*([dfs])$' #
regex = FORMAT_PYTHON_REGEX
(lng,trail,lngpnt,ty) = re.search(regex,par_format).groups()
lng = int(lng)
par_value = input_string[pos:(pos+lng)]
if ty=='d': # integer value
par_value = int(par_value)
elif ty.lower() in set(['e','f']): # float value
par_value = float(par_value)
elif ty=='s': # string value
pass # don't strip string value
else:
print('err1')
raise Exception('Format \"%s\" is unknown' % par_format)
RowObject.append((par_name,par_value,par_format))
pos += lng
# Do the same but now for extra (comma-separated) parameters
if 'extra' in set(LOCAL_TABLE_CACHE[TableName]['header']):
csv_chunks = input_string.split(LOCAL_TABLE_CACHE[TableName]['header'].\
get('extra_separator',','))
# Disregard the first "column-fixed" container if it presents:
if LOCAL_TABLE_CACHE[TableName]['header'].get('order',[]):
pos = 1
else:
pos = 0
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['extra']:
par_format = LOCAL_TABLE_CACHE[TableName]['header']['extra_format'][par_name]
regex = '^\%([0-9]+)\.?[0-9]*([dfs])$' #
regex = FORMAT_PYTHON_REGEX
(lng,trail,lngpnt,ty) = re.search(regex,par_format).groups()
lng = int(lng)
par_value = csv_chunks[pos]
if ty=='d': # integer value
try:
par_value = int(par_value)
except:
par_value = 0
elif ty.lower() in set(['e','f']): # float value
try:
par_value = float(par_value)
except:
par_value = 0.0
elif ty=='s': # string value
pass # don't strip string value
else:
print('err')
raise Exception('Format \"%s\" is unknown' % par_format)
RowObject.append((par_name,par_value,par_format))
pos += 1
return RowObject
# Conversion between OBJECT_FORMAT and STORAGE_FORMAT
# This will substitute putTableToStorage and getTableFromStorage
def cache2storage(TableName):
try:
os.mkdir(VARIABLES['BACKEND_DATABASE_NAME'])
except:
pass
#fullpath_data,fullpath_header = getFullTableAndHeaderName(TableName) # "lonely header" bug
fullpath_data = VARIABLES['BACKEND_DATABASE_NAME'] + '/' + TableName + '.data' # bugfix
fullpath_header = VARIABLES['BACKEND_DATABASE_NAME'] + '/' + TableName + '.header' # bugfix
OutfileData = open(fullpath_data,'w')
OutfileHeader = open(fullpath_header,'w')
# write table data
line_count = 1
line_number = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
for RowID in range(0,line_number):
line_count += 1
RowObject = getRowObject(RowID,TableName)
raw_string = putRowObjectToString(RowObject)
OutfileData.write(raw_string+'\n')
# write table header
TableHeader = getTableHeader(TableName)
OutfileHeader.write(json.dumps(TableHeader,indent=2))
def storage2cache(TableName,cast=True,ext=None,nlines=None,pos=None):
""" edited by NHL
TableName: name of the HAPI table to read in
ext: file extension
nlines: number of line in the block; if None, read all line at once
pos: file position to seek
"""
#print 'storage2cache:'
#print('TableName',TableName)
if nlines is not None:
print('WARNING: storage2cache is reading the block of maximum %d lines'%nlines)
fullpath_data,fullpath_header = getFullTableAndHeaderName(TableName,ext)
if TableName in LOCAL_TABLE_CACHE and \
'filehandler' in LOCAL_TABLE_CACHE[TableName] and \
LOCAL_TABLE_CACHE[TableName]['filehandler'] is not None:
InfileData = LOCAL_TABLE_CACHE[TableName]['filehandler']
else:
InfileData = open_(fullpath_data,'r')
InfileHeader = open(fullpath_header,'r')
#try:
header_text = InfileHeader.read()
try:
Header = json.loads(header_text)
except:
print('HEADER:')
print(header_text)
raise Exception('Invalid header')
#print 'Header:'+str(Header)
LOCAL_TABLE_CACHE[TableName] = {}
LOCAL_TABLE_CACHE[TableName]['header'] = Header
LOCAL_TABLE_CACHE[TableName]['data'] = {}
LOCAL_TABLE_CACHE[TableName]['filehandler'] = InfileData
# Check if Header['order'] and Header['extra'] contain
# parameters with same names, raise exception if true.
#intersct = set(Header['order']).intersection(set(Header.get('extra',[])))
intersct = set(Header.get('order',[])).intersection(set(Header.get('extra',[])))
if intersct:
raise Exception('Parameters with the same names: {}'.format(intersct))
# initialize empty data to avoid problems
glob_order = []; glob_format = {}; glob_default = {}
if "order" in LOCAL_TABLE_CACHE[TableName]['header'].keys():
glob_order += LOCAL_TABLE_CACHE[TableName]['header']['order']
glob_format.update(LOCAL_TABLE_CACHE[TableName]['header']['format'])
glob_default.update(LOCAL_TABLE_CACHE[TableName]['header']['default'])
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['order']:
LOCAL_TABLE_CACHE[TableName]['data'][par_name] = []
if "extra" in LOCAL_TABLE_CACHE[TableName]['header'].keys():
glob_order += LOCAL_TABLE_CACHE[TableName]['header']['extra']
glob_format.update(LOCAL_TABLE_CACHE[TableName]['header']['extra_format'])
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['extra']:
glob_default[par_name] = PARAMETER_META[par_name]['default_fmt']
LOCAL_TABLE_CACHE[TableName]['data'][par_name] = []
header = LOCAL_TABLE_CACHE[TableName]['header']
if 'extra' in header and header['extra']:
line_count = 0
flag_EOF = False
#line_number = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
#for line in InfileData:
while True:
#print '%d line from %d' % (line_count,line_number)
#print 'line: '+line #
if nlines is not None and line_count>=nlines: break
line = InfileData.readline()
if line=='': # end of file is represented by an empty string
flag_EOF = True
break
try:
RowObject = getRowObjectFromString(line,TableName)
line_count += 1
except:
continue
#print 'RowObject: '+str(RowObject)
addRowObject(RowObject,TableName)
#except:
# raise Exception('TABLE FETCHING ERROR')
LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows'] = line_count
else:
quantities = header['order']
formats = [header['format'][qnt] for qnt in quantities]
types = {'d':int, 'f':float, 'E':float, 's':str}
converters = []
end = 0
for qnt, fmt in zip(quantities, formats):
# pre-defined positions are needed to skip the existing parameters in headers (new feature)
if 'position' in header:
start = header['position'][qnt]
else:
start = end
dtype = types[fmt[-1]]
aux = fmt[fmt.index('%')+1:-1]
if '.' in aux:
aux = aux[:aux.index('.')]
size = int(aux)
end = start + size
def cfunc(line, dtype=dtype, start=start, end=end, qnt=qnt):
# return dtype(line[start:end]) # this will fail on the float number with D exponent (Fortran notation)
if dtype==float:
try:
return dtype(line[start:end])
except ValueError: # possible D exponent instead of E
try:
return dtype(line[start:end].replace('D','E'))
except ValueError: # this is a special case and it should not be in the main version tree!
# Dealing with the weird and unparsable intensity format such as "2.700-164, i.e with no E or D characters.
res = re.search('(\d\.\d\d\d)\-(\d\d\d)',line[start:end])
if res:
return dtype(res.group(1)+'E-'+res.group(2))
else:
raise Exception('PARSE ERROR: unknown format of the par value (%s)'%line[start:end])
elif dtype==int and qnt=='local_iso_id':
if line[start:end]=='0': return 10
try:
return dtype(line[start:end])
except ValueError:
# convert letters to numbers: A->11, B->12, etc... ; .par file must be in ASCII or Unicode.
return 11+ord(line[start:end])-ord('A')
else:
return dtype(line[start:end])
#cfunc.__doc__ = 'converter {} {}'.format(qnt, fmt) # doesn't work in earlier versions of Python
converters.append(cfunc)
#start = end
#data_matrix = [[cvt(line) for cvt in converters] for line in InfileData]
flag_EOF = False
line_count = 0
data_matrix = []
while True:
if nlines is not None and line_count>=nlines: break
line = InfileData.readline()
if line=='': # end of file is represented by an empty string
flag_EOF = True
break
data_matrix.append([cvt(line) for cvt in converters])
line_count += 1
data_columns = zip(*data_matrix)
for qnt, col in zip(quantities, data_columns):
#LOCAL_TABLE_CACHE[TableName]['data'][qnt].extend(col) # old code
if type(col[0]) in {int,float}:
LOCAL_TABLE_CACHE[TableName]['data'][qnt] = np.array(col) # new code
else:
LOCAL_TABLE_CACHE[TableName]['data'][qnt].extend(col) # old code
#LOCAL_TABLE_CACHE[TableName]['data'][qnt] = list(col)
#LOCAL_TABLE_CACHE[TableName]['data'][qnt] = col
header['number_of_rows'] = line_count = (
len(LOCAL_TABLE_CACHE[TableName]['data'][quantities[0]]))
# Delete all character-separated values, treat them as column-fixed.
try:
del LOCAL_TABLE_CACHE[TableName]['header']['extra']
del LOCAL_TABLE_CACHE[TableName]['header']['extra_format']
del LOCAL_TABLE_CACHE[TableName]['header']['extra_separator']
except:
pass
# Update header.order/format with header.extra/format if exist.
LOCAL_TABLE_CACHE[TableName]['header']['order'] = glob_order
LOCAL_TABLE_CACHE[TableName]['header']['format'] = glob_format
LOCAL_TABLE_CACHE[TableName]['header']['default'] = glob_default
if flag_EOF:
InfileData.close()
LOCAL_TABLE_CACHE[TableName]['filehandler'] = None
InfileHeader.close()
print(' Lines parsed: %d' % line_count)
return flag_EOF
## old version based on regular expressions
#def storage2cache(TableName):
# fullpath_data,fullpath_header = getFullTableAndHeaderName(TableName)
# InfileData = open(fullpath_data,'r')
# InfileHeader = open(fullpath_header,'r')
# #try:
# header_text = InfileHeader.read()
# try:
# Header = json.loads(header_text)
# except:
# print('HEADER:')
# print(header_text)
# raise Exception('Invalid header')
# LOCAL_TABLE_CACHE[TableName] = {}
# LOCAL_TABLE_CACHE[TableName]['header'] = Header
# LOCAL_TABLE_CACHE[TableName]['data'] = {}
# # Check if Header['order'] and Header['extra'] contain
# # parameters with same names, raise exception if true.
# intersct = set(Header.get('order',[])).intersection(set(Header.get('extra',[])))
# if intersct:
# raise Exception('Parameters with the same names: {}'.format(intersct))
# # initialize empty data to avoid problems
# glob_order = []; glob_format = {}; glob_default = {}
# if "order" in LOCAL_TABLE_CACHE[TableName]['header'].keys():
# glob_order += LOCAL_TABLE_CACHE[TableName]['header']['order']
# glob_format.update(LOCAL_TABLE_CACHE[TableName]['header']['format'])
# glob_default.update(LOCAL_TABLE_CACHE[TableName]['header']['default'])
# for par_name in LOCAL_TABLE_CACHE[TableName]['header']['order']:
# LOCAL_TABLE_CACHE[TableName]['data'][par_name] = []
# if "extra" in LOCAL_TABLE_CACHE[TableName]['header'].keys():
# glob_order += LOCAL_TABLE_CACHE[TableName]['header']['extra']
# glob_format.update(LOCAL_TABLE_CACHE[TableName]['header']['extra_format'])
# for par_name in LOCAL_TABLE_CACHE[TableName]['header']['extra']:
# glob_default[par_name] = PARAMETER_META[par_name]['default_fmt']
# LOCAL_TABLE_CACHE[TableName]['data'][par_name] = []
# line_count = 0
# for line in InfileData:
# try:
# RowObject = getRowObjectFromString(line,TableName)
# line_count += 1
# except:
# continue
# addRowObject(RowObject,TableName)
# LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows'] = line_count
# # Delete all character-separated values, treat them as column-fixed.
# try:
# del LOCAL_TABLE_CACHE[TableName]['header']['extra']
# del LOCAL_TABLE_CACHE[TableName]['header']['extra_format']
# del LOCAL_TABLE_CACHE[TableName]['header']['extra_separator']
# except:
# pass
# # Update header.order/format with header.extra/format if exist.
# LOCAL_TABLE_CACHE[TableName]['header']['order'] = glob_order
# LOCAL_TABLE_CACHE[TableName]['header']['format'] = glob_format
# LOCAL_TABLE_CACHE[TableName]['header']['default'] = glob_default
# InfileData.close()
# InfileHeader.close()
# print(' Lines parsed: %d' % line_count)
# pass
# / FORMAT CONVERSION LAYER
def getTableNamesFromStorage(StorageName):
file_names = listdir(StorageName)
table_names = []
for file_name in file_names:
matchObject = re.search('(.+)\.header$',file_name)
if matchObject:
table_names.append(matchObject.group(1))
return table_names
# FIX POSSIBLE BUG: SIMILAR NAMES OF .PAR AND .DATA FILES
# BUG FIXED BY INTRODUCING A PRIORITY:
# *.data files have more priority than *.par files
# See getFullTableAndHeaderName function for explanation
def scanForNewParfiles(StorageName):
file_names = listdir(StorageName)
headers = {} # without extensions!
parfiles_without_header = []
for file_name in file_names:
# create dictionary of unique headers
try:
fname,fext = re.search('(.+)\.(\w+)',file_name).groups()
except:
continue
if fext == 'header': headers[fname] = True
for file_name in file_names:
# check if extension is 'par' and the header is absent
try:
fname,fext = re.search('(.+)\.(\w+)',file_name).groups()
except:
continue
if fext == 'par' and fname not in headers:
parfiles_without_header.append(fname)
return parfiles_without_header
def createHeader(TableName):
fname = TableName+'.header'
fp = open(VARIABLES['BACKEND_DATABASE_NAME']+'/'+fname,'w')
if os.path.isfile(TableName):
raise Exception('File \"%s\" already exists!' % fname)
fp.write(json.dumps(HITRAN_DEFAULT_HEADER,indent=2))
fp.close()
def loadCache():
print('Using '+VARIABLES['BACKEND_DATABASE_NAME']+'\n')
LOCAL_TABLE_CACHE = {}
table_names = getTableNamesFromStorage(VARIABLES['BACKEND_DATABASE_NAME'])
parfiles_without_header = scanForNewParfiles(VARIABLES['BACKEND_DATABASE_NAME'])
# create headers for new parfiles
for tab_name in parfiles_without_header:
# get name without 'par' extension
createHeader(tab_name)
table_names.append(tab_name)
for TableName in table_names:
print(TableName)
storage2cache(TableName)
def saveCache():
try:
# delete query buffer
del LOCAL_TABLE_CACHE[QUERY_BUFFER]
except:
pass
for TableName in LOCAL_TABLE_CACHE:
print(TableName)
cache2storage(TableName)
# DB backend level, start transaction
def databaseBegin(db=None):
if db:
VARIABLES['BACKEND_DATABASE_NAME'] = db
else:
VARIABLES['BACKEND_DATABASE_NAME'] = BACKEND_DATABASE_NAME_DEFAULT
if not os.path.exists(VARIABLES['BACKEND_DATABASE_NAME']):
os.mkdir(VARIABLES['BACKEND_DATABASE_NAME'])
loadCache()
# DB backend level, end transaction
def databaseCommit():
saveCache()
# ----------------------------------------------------
# ----------------------------------------------------
# CONDITIONS
# ----------------------------------------------------
# ----------------------------------------------------
# ----------------------------------------------------
# hierarchic query.condition language:
# Conditions: CONS = ('and', ('=','p1','p2'), ('<','p1',13))
# String literals are distinguished from variable names
# by using the operation ('STRING','some_string')
# ----------------------------------------------------
# necessary conditions for hitranonline:
SAMPLE_CONDITIONS = ('AND',('SET','internal_iso_id',[1,2,3,4,5,6]),('>=','nu',0),('<=','nu',100))
# sample hitranonline protocol
# http://hitran.cloudapp.net/lbl/5?output_format_id=1&iso_ids_list=5&numin=0&numax=100&access=api&key=e20e4bd3-e12c-4931-99e0-4c06e88536bd
CONDITION_OPERATIONS = set(['AND','OR','NOT','RANGE','IN','<','>','<=','>=','==','!=','LIKE','STR','+','-','*','/','MATCH','SEARCH','FINDALL'])
# Operations used in Condition verification
# Basic scheme: operationXXX(args),
# where args - list/array of arguments (>=1)
def operationAND(args):
# any number if arguments
for arg in args:
if not arg:
return False
return True
def operationOR(args):
# any number of arguments
for arg in args:
if arg:
return True
return False
def operationNOT(arg):
# one argument
return not arg
def operationRANGE(x,x_min,x_max):
return x_min <= x <= x_max
def operationSUBSET(arg1,arg2):
# True if arg1 is subset of arg2
# arg1 is an element
# arg2 is a set
return arg1 in arg2
def operationLESS(args):
# any number of args
for i in range(1,len(args)):
if args[i-1] >= args[i]:
return False
return True
def operationMORE(args):
# any number of args
for i in range(1,len(args)):
if args[i-1] <= args[i]:
return False
return True
def operationLESSOREQUAL(args):
# any number of args
for i in range(1,len(args)):
if args[i-1] > args[i]:
return False
return True
def operationMOREOREQUAL(args):
# any number of args
for i in range(1,len(args)):
if args[i-1] < args[i]:
return False
return True
def operationEQUAL(args):
# any number of args
for i in range(1,len(args)):
if args[i] != args[i-1]:
return False
return True
def operationNOTEQUAL(arg1,arg2):
return arg1 != arg2
def operationSUM(args):
# any numbers of arguments
if type(args[0]) in set([int,float]):
result = 0
elif type(args[0]) in set([str,unicode]):
result = ''
else:
raise Exception('SUM error: unknown arg type')
for arg in args:
result += arg
return result
def operationDIFF(arg1,arg2):
return arg1-arg2
def operationMUL(args):
# any numbers of arguments
if type(args[0]) in set([int,float]):
result = 1
else:
raise Exception('MUL error: unknown arg type')
for arg in args:
result *= arg
return result
def operationDIV(arg1,arg2):
return arg1/arg2
def operationSTR(arg):
# transform arg to str
if type(arg)!=str:
raise Exception('Type mismatch: STR')
return arg
def operationSET(arg):
# transform arg to list
if type(arg) not in set([list,tuple,set]):
raise Exception('Type mismatch: SET')
return list(arg)
def operationMATCH(arg1,arg2):
# Match regex (arg1) and string (arg2)
#return bool(re.match(arg1,arg2)) # works wrong
return bool(re.search(arg1,arg2))
def operationSEARCH(arg1,arg2):
# Search regex (arg1) in string (arg2)
# Output list of entries
group = re.search(arg1,arg2).groups()
result = []
for item in group:
result.append(('STR',item))
return result
def operationFINDALL(arg1,arg2):
# Search all groups of a regex
# Output a list of groups of entries
# XXX: If a group has more than 1 entry,
# there could be potential problems
list_of_groups = re.findall(arg1,arg2)
result = []
for item in list_of_groups:
result.append(('STR',item))
return result
def operationLIST(args):
# args is a list: do nothing (almost)
return list(args)
# /operations
# GROUPING ----------------------------------------------
GROUP_INDEX = {}
# GROUP_INDEX has the following structure:
# GROUP_INDEX[KEY] = VALUE
# KEY = table line values
# VALUE = {'FUNCTIONS':DICT,'FLAG':LOGICAL,'ROWID':INTEGER}
# FUNCTIONS = {'FUNC_NAME':DICT}
# FUNC_NAME = {'FLAG':LOGICAL,'NAME':STRING}
# name and default value
GROUP_FUNCTION_NAMES = { 'COUNT' : 0,
'SUM' : 0,
'MUL' : 1,
'AVG' : 0,
'MIN' : +1e100,
'MAX' : -1e100,
'SSQ' : 0,
}
def clearGroupIndex():
#GROUP_INDEX = {}
for key in GROUP_INDEX.keys():
del GROUP_INDEX[key]
def getValueFromGroupIndex(GroupIndexKey,FunctionName):
# If no such index_key, create it and return a value
if FunctionName not in GROUP_FUNCTION_NAMES:
raise Exception('No such function \"%s\"' % FunctionName)
# In the case if NewRowObjectDefault is requested
if not GroupIndexKey:
return GROUP_FUNCTION_NAMES[FunctionName]
if FunctionName not in GROUP_INDEX[GroupIndexKey]['FUNCTIONS']:
GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName] = {}
GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName]['FLAG'] = True
GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName]['VALUE'] = \
GROUP_FUNCTION_NAMES[FunctionName]
return GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName]['VALUE']
def setValueToGroupIndex(GroupIndexKey,FunctionName,Value):
GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName]['VALUE'] = Value
GROUP_DESC = {}
def initializeGroup(GroupIndexKey):
if GroupIndexKey not in GROUP_INDEX:
print('GROUP_DESC[COUNT]='+str(GROUP_DESC['COUNT']))
GROUP_INDEX[GroupIndexKey] = {}
GROUP_INDEX[GroupIndexKey]['FUNCTIONS'] = {}
GROUP_INDEX[GroupIndexKey]['ROWID'] = len(GROUP_INDEX) - 1
for FunctionName in GROUP_FUNCTION_NAMES:
# initialize function flags (UpdateFlag)
if FunctionName in GROUP_INDEX[GroupIndexKey]['FUNCTIONS']:
GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName]['FLAG'] = True
print('initializeGroup: GROUP_INDEX='+str(GROUP_INDEX))
def groupCOUNT(GroupIndexKey):
FunctionName = 'COUNT'
Value = getValueFromGroupIndex(GroupIndexKey,FunctionName)
if GroupIndexKey:
if GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName]['FLAG']:
GROUP_INDEX[GroupIndexKey]['FUNCTIONS'][FunctionName]['FLAG'] = False
Value = Value + 1
setValueToGroupIndex(GroupIndexKey,FunctionName,Value)
return Value
def groupSUM():
pass
def groupMUL():
pass
def groupAVG():
pass
def groupMIN():
pass
def groupMAX():
pass
def groupSSQ():
pass
OPERATORS = {\
# List
'LIST' : lambda args : operationLIST(args),
# And
'&' : lambda args : operationAND(args),
'&&' : lambda args : operationAND(args),
'AND' : lambda args : operationAND(args),
# Or
'|' : lambda args : operationOR(args),
'||' : lambda args : operationOR(args),
'OR' : lambda args : operationOR(args),
# Not
'!' : lambda args : operationNOT(args[0]),
'NOT' : lambda args : operationNOT(args[0]),
# Between
'RANGE' : lambda args : operationRANGE(args[0],args[1],args[2]),
'BETWEEN' : lambda args : operationRANGE(args[0],args[1],args[2]),
# Subset
'IN' : lambda args : operationSUBSET(args[0],args[1]),
'SUBSET': lambda args : operationSUBSET(args[0],args[1]),
# Less
'<' : lambda args : operationLESS(args),
'LESS' : lambda args : operationLESS(args),
'LT' : lambda args : operationLESS(args),
# More
'>' : lambda args : operationMORE(args),
'MORE' : lambda args : operationMORE(args),
'MT' : lambda args : operationMORE(args),
# Less or equal
'<=' : lambda args : operationLESSOREQUAL(args),
'LESSOREQUAL' : lambda args : operationLESSOREQUAL(args),
'LTE' : lambda args : operationLESSOREQUAL(args),
# More or equal
'>=' : lambda args : operationMOREOREQUAL(args),
'MOREOREQUAL' : lambda args : operationMOREOREQUAL(args),
'MTE' : lambda args : operationMOREOREQUAL(args),
# Equal
'=' : lambda args : operationEQUAL(args),
'==' : lambda args : operationEQUAL(args),
'EQ' : lambda args : operationEQUAL(args),
'EQUAL' : lambda args : operationEQUAL(args),
'EQUALS' : lambda args : operationEQUAL(args),
# Not equal
'!=' : lambda args : operationNOTEQUAL(args[0],args[1]),
'<>' : lambda args : operationNOTEQUAL(args[0],args[1]),
'~=' : lambda args : operationNOTEQUAL(args[0],args[1]),
'NE' : lambda args : operationNOTEQUAL(args[0],args[1]),
'NOTEQUAL' : lambda args : operationNOTEQUAL(args[0],args[1]),
# Plus
'+' : lambda args : operationSUM(args),
'SUM' : lambda args : operationSUM(args),
# Minus
'-' : lambda args : operationDIFF(args[0],args[1]),
'DIFF' : lambda args : operationDIFF(args[0],args[1]),
# Mul
'*' : lambda args : operationMUL(args),
'MUL' : lambda args : operationMUL(args),
# Div
'/' : lambda args : operationDIV(args[0],args[1]),
'DIV' : lambda args : operationDIV(args[0],args[1]),
# Regexp match
'MATCH' : lambda args : operationMATCH(args[0],args[1]),
'LIKE' : lambda args : operationMATCH(args[0],args[1]),
# Regexp search
'SEARCH' : lambda args : operationSEARCH(args[0],args[1]),
# Regexp findal
'FINDALL' : lambda args : operationFINDALL(args[0],args[1]),
# Group count
'COUNT' : lambda args : groupCOUNT(args[0]),
}
# new evaluateExpression function,
# accounting for groups
"""
def evaluateExpression(root,VarDictionary,GroupIndexKey=None):
# input = local tree root
# XXX: this could be very slow due to passing
# every time VarDictionary as a parameter
# Two special cases: 1) root=varname
# 2) root=list/tuple
# These cases must be processed in a separate way
if type(root) in set([list,tuple]):
# root is not a leaf
head = root[0].upper()
# string constants are treated specially
if head in set(['STR','STRING']): # one arg
return operationSTR(root[1])
elif head in set(['SET']):
return operationSET(root[1])
tail = root[1:]
args = []
# evaluate arguments recursively
for element in tail: # resolve tree by recursion
args.append(evaluateExpression(element,VarDictionary,GroupIndexKey))
# call functions with evaluated arguments
if head in set(['LIST']): # list arg
return operationLIST(args)
elif head in set(['&','&&','AND']): # many args
return operationAND(args)
elif head in set(['|','||','OR']): # many args
return operationOR(args)
elif head in set(['!','NOT']): # one args
return operationNOT(args[0])
elif head in set(['RANGE','BETWEEN']): # three args
return operationRANGE(args[0],args[1],args[2])
elif head in set(['IN','SUBSET']): # two args
return operationSUBSET(args[0],args[1])
elif head in set(['<','LESS','LT']): # many args
return operationLESS(args)
elif head in set(['>','MORE','MT']): # many args
return operationMORE(args)
elif head in set(['<=','LESSOREQUAL','LTE']): # many args
return operationLESSOREQUAL(args)
elif head in set(['>=','MOREOREQUAL','MTE']): # many args
return operationMOREOREQUAL(args)
elif head in set(['=','==','EQ','EQUAL','EQUALS']): # many args
return operationEQUAL(args)
elif head in set(['!=','<>','~=','NE','NOTEQUAL']): # two args
return operationNOTEQUAL(args[0],args[1])
elif head in set(['+','SUM']): # many args
return operationSUM(args)
elif head in set(['-','DIFF']): # two args
return operationDIFF(args[0],args[1])
elif head in set(['*','MUL']): # many args
return operationMUL(args)
elif head in set(['/','DIV']): # two args
return operationDIV(args[0],args[1])
elif head in set(['MATCH','LIKE']): # two args
return operationMATCH(args[0],args[1])
elif head in set(['SEARCH']): # two args
return operationSEARCH(args[0],args[1])
elif head in set(['FINDALL']): # two args
return operationFINDALL(args[0],args[1])
# --- GROUPING OPERATIONS ---
elif head in set(['COUNT']):
return groupCOUNT(GroupIndexKey)
else:
raise Exception('Unknown operator: %s' % root[0])
elif type(root)==str:
# root is a par_name
return VarDictionary[root]
else:
# root is a non-string constant
return root
"""
def evaluateExpression(root,VarDictionary,GroupIndexKey=None):
# input = local tree root
# XXX: this could be very slow due to passing
# every time VarDictionary as a parameter
# Two special cases: 1) root=varname
# 2) root=list/tuple
# These cases must be processed in a separate way
if type(root) in set([list,tuple]):
# root is not a leaf
head = root[0].upper()
# string constants are treated specially
if head in set(['STR','STRING']): # one arg
return operationSTR(root[1])
elif head in set(['SET']):
return operationSET(root[1])
tail = root[1:]
args = []
# evaluate arguments recursively
for element in tail: # resolve tree by recursion
args.append(evaluateExpression(element,VarDictionary,GroupIndexKey))
# call functions with evaluated arguments
try:
return OPERATORS[head](args)
except KeyError:
raise Exception('Unknown operator: %s' % head)
elif type(root)==str:
# root is a par_name
return VarDictionary[root]
else:
# root is a non-string constant
return root
def getVarDictionary(RowObject):
# get VarDict from RowObject
# VarDict: par_name => par_value
VarDictionary = {}
for par_name,par_value,par_format in RowObject:
VarDictionary[par_name] = par_value
return VarDictionary
def checkRowObject(RowObject,Conditions,VarDictionary):
#VarDictionary = getVarDictionary(RowObject)
if Conditions:
Flag = evaluateExpression(Conditions,VarDictionary)
else:
Flag=True
return Flag
# ----------------------------------------------------
# /CONDITIONS
# ----------------------------------------------------
# ----------------------------------------------------
# PARAMETER NAMES (includeing creation of new ones)
# ----------------------------------------------------
# Bind an expression to a new parameter
# in a form: ('BIND','new_par',('some_exp',...))
def operationBIND(parname,Expression,VarDictionary):
pass
# This section is for more detailed processing of parlists.
# Table creation must include not only subsets of
# existing parameters, but also new parameters
# derived from functions on a special prefix language
# For this reason subsetOfRowObject(..) must be substituted
# by newRowObject(ParameterNames,RowObject)
# For parsing use the function evaluateExpression
# Get names from expression.
# Must merge this one with evaluateExrpression.
# This is VERY LIMITED version of what will be
# when make the language parser is implemented.
# For more ideas and info see LANGUAGE_REFERENCE
# more advansed version of expression evaluator
def evaluateExpressionPAR(ParameterNames,VarDictionary=None):
# RETURN: 1) Upper-level Expression names
# 2) Upper-level Expression values
# Is it reasonable to pass a Context to every parse function?
# For now the function does the following:
# 1) iterates through all UPPER-LEVEL list elements
# 2) if element is a par name: return par name
# if element is an BIND expression: return bind name
# (see operationBIND)
# 3) if element is an anonymous expression: return #N(=1,2,3...)
# N.B. Binds can be only on the 0-th level of Expression
pass
def getContextFormat(RowObject):
# Get context format from the whole RowObject
ContextFormat = {}
for par_name,par_value,par_format in RowObject:
ContextFormat[par_name] = par_format
return ContextFormat
def getDefaultFormat(Type):
if Type is int:
return '%10d'
elif Type is float:
return '%25.15E'
elif Type is str:
return '%20s'
elif Type is bool:
return '%2d'
else:
raise Exception('Unknown type')
def getDefaultValue(Type):
if Type is int:
return 0
elif Type is float:
return 0.0
elif Type is str:
return ''
elif Type is bool:
return False
else:
raise Exception('Unknown type')
# VarDictionary = Context (this name is more suitable)
# GroupIndexKey is a key to special structure/dictionary GROUP_INDEX.
# GROUP_INDEX contains information needed to calculate streamed group functions
# such as COUNT, AVG, MIN, MAX etc...
def newRowObject(ParameterNames,RowObject,VarDictionary,ContextFormat,GroupIndexKey=None):
# Return a subset of RowObject according to
# ParameterNames include either par names
# or expressions containing par names literals
# ContextFormat contains format for ParNames
anoncount = 0
RowObjectNew = []
for expr in ParameterNames:
if type(expr) in set([list,tuple]): # bind
head = expr[0]
if head in set(['let','bind','LET','BIND']):
par_name = expr[1]
par_expr = expr[2]
else:
par_name = "#%d" % anoncount
anoncount += 1
par_expr = expr
par_value = evaluateExpression(par_expr,VarDictionary,GroupIndexKey)
try:
par_format = expr[3]
except:
par_format = getDefaultFormat(type(par_value))
else: # parname
par_name = expr
par_value = VarDictionary[par_name]
par_format = ContextFormat[par_name]
RowObjectNew.append((par_name,par_value,par_format))
return RowObjectNew
# ----------------------------------------------------
# /PARAMETER NAMES
# ----------------------------------------------------
# ----------------------------------------------------
# OPERATIONS ON TABLES
# ----------------------------------------------------
QUERY_BUFFER = '__BUFFER__'
def getTableList():
return LOCAL_TABLE_CACHE.keys()
def describeTable(TableName):
"""
INPUT PARAMETERS:
TableName: name of the table to describe
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Print information about table, including
parameter names, formats and wavenumber range.
---
EXAMPLE OF USAGE:
describeTable('sampletab')
---
"""
print('-----------------------------------------')
print(TableName+' summary:')
try:
print('-----------------------------------------')
print('Comment: \n'+LOCAL_TABLE_CACHE[TableName]['header']['comment'])
except:
pass
print('Number of rows: '+str(LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']))
print('Table type: '+str(LOCAL_TABLE_CACHE[TableName]['header']['table_type']))
print('-----------------------------------------')
print(' PAR_NAME PAR_FORMAT')
print('')
for par_name in LOCAL_TABLE_CACHE[TableName]['header']['order']:
par_format = LOCAL_TABLE_CACHE[TableName]['header']['format'][par_name]
print('%20s %20s' % (par_name,par_format))
print('-----------------------------------------')
# Write a table to File or STDOUT
def outputTable(TableName,Conditions=None,File=None,Header=True):
# Display or record table with condition checking
if File:
Header = False
OutputFile = open(File,'w')
if Header:
headstr = putTableHeaderToString(TableName)
if File:
OutputFile.write(headstr)
else:
print(headstr)
for RowID in range(0,LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']):
RowObject = getRowObject(RowID,TableName)
VarDictionary = getVarDictionary(RowObject)
VarDictionary['LineNumber'] = RowID
if not checkRowObject(RowObject,Conditions,VarDictionary):
continue
raw_string = putRowObjectToString(RowObject)
if File:
OutputFile.write(raw_string+'\n')
else:
print(raw_string)
# Create table "prototype-based" way
def createTable(TableName,RowObjectDefault):
# create a Table based on a RowObjectDefault
LOCAL_TABLE_CACHE[TableName] = {}
header_order = []
header_format = {}
header_default = {}
data = {}
for par_name,par_value,par_format in RowObjectDefault:
header_order.append(par_name)
header_format[par_name] = par_format
header_default[par_name] = par_value
data[par_name] = []
#header_order = tuple(header_order) # XXX ?
LOCAL_TABLE_CACHE[TableName]['header']={}
LOCAL_TABLE_CACHE[TableName]['header']['order'] = header_order
LOCAL_TABLE_CACHE[TableName]['header']['format'] = header_format
LOCAL_TABLE_CACHE[TableName]['header']['default'] = header_default
LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows'] = 0
LOCAL_TABLE_CACHE[TableName]['header']['size_in_bytes'] = 0
LOCAL_TABLE_CACHE[TableName]['header']['table_name'] = TableName
LOCAL_TABLE_CACHE[TableName]['header']['table_type'] = 'column-fixed'
LOCAL_TABLE_CACHE[TableName]['data'] = data
# simple "drop table" capability
def dropTable(TableName):
"""
INPUT PARAMETERS:
TableName: name of the table to delete
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Deletes a table from local database.
---
EXAMPLE OF USAGE:
dropTable('some_dummy_table')
---
"""
# delete Table from both Cache and Storage
try:
#LOCAL_TABLE_CACHE[TableName] = {}
del LOCAL_TABLE_CACHE[TableName]
except:
pass
# delete from storage
pass # TODO
# Returns a column corresponding to parameter name
def getColumn(TableName,ParameterName):
"""
INPUT PARAMETERS:
TableName: source table name (required)
ParameterName: name of column to get (required)
OUTPUT PARAMETERS:
ColumnData: list of values from specified column
---
DESCRIPTION:
Returns a column with a name ParameterName from
table TableName. Column is returned as a list of values.
---
EXAMPLE OF USAGE:
p1 = getColumn('sampletab','p1')
---
"""
return LOCAL_TABLE_CACHE[TableName]['data'][ParameterName]
# Returns a list of columns corresponding to parameter names
def getColumns(TableName,ParameterNames):
"""
INPUT PARAMETERS:
TableName: source table name (required)
ParameterNames: list of column names to get (required)
OUTPUT PARAMETERS:
ListColumnData: tuple of lists of values from specified column
---
DESCRIPTION:
Returns columns with a names in ParameterNames from
table TableName. Columns are returned as a tuple of lists.
---
EXAMPLE OF USAGE:
p1,p2,p3 = getColumns('sampletab',('p1','p2','p3'))
---
"""
Columns = []
for par_name in ParameterNames:
Columns.append(LOCAL_TABLE_CACHE[TableName]['data'][par_name])
return Columns
def addColumn(TableName,ParameterName,Before=None,Expression=None,Type=None,Default=None,Format=None):
if ParameterName in LOCAL_TABLE_CACHE[TableName]['header']['format']:
raise Exception('Column \"%s\" already exists' % ParameterName)
if not Type: Type = float
if not Default: Default = getDefaultValue(Type)
if not Format: Format = getDefaultFormat(Type)
number_of_rows = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
# Mess with data
if not Expression:
LOCAL_TABLE_CACHE[TableName]['data'][ParameterName]=[Default for i in range(0,number_of_rows)]
else:
data = []
for RowID in range(0,number_of_rows):
RowObject = getRowObject(RowID,TableName)
VarDictionary = getVarDictionary(RowObject)
VarDictionary['LineNumber'] = RowID
par_value = evaluateExpression(Expression,VarDictionary)
data.append(par_value)
LOCAL_TABLE_CACHE[TableName]['data'][ParameterName] = data
# Mess with header
header_order = LOCAL_TABLE_CACHE[TableName]['header']['order']
if not Before:
header_order.append(ParameterName)
else:
#i = 0
#for par_name in header_order:
# if par_name == Before: break
# i += 1
i = header_order.index(Before)
header_order = header_order[:i] + [ParameterName,] + header_order[i:]
LOCAL_TABLE_CACHE[TableName]['header']['order'] = header_order
LOCAL_TABLE_CACHE[TableName]['header']['format'][ParameterName] = Format
LOCAL_TABLE_CACHE[TableName]['header']['default'][ParameterName] = Default
def deleteColumn(TableName,ParameterName):
if ParameterName not in LOCAL_TABLE_CACHE[TableName]['header']['format']:
raise Exception('No such column \"%s\"' % ParameterName)
# Mess with data
i = LOCAL_TABLE_CACHE[TableName]['header']['order'].index(ParameterName)
del LOCAL_TABLE_CACHE[TableName]['header']['order'][i]
del LOCAL_TABLE_CACHE[TableName]['header']['format'][ParameterName]
del LOCAL_TABLE_CACHE[TableName]['header']['default'][ParameterName]
if not LOCAL_TABLE_CACHE[TableName]['header']['order']:
LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows'] = 0
# Mess with header
del LOCAL_TABLE_CACHE[TableName]['data'][ParameterName]
def deleteColumns(TableName,ParameterNames):
if type(ParameterNames) not in set([list,tuple,set]):
ParameterNames = [ParameterNames]
for ParameterName in ParameterNames:
deleteColumn(TableName,ParameterName)
def renameColumn(TableName,OldParameterName,NewParameterName):
pass
def insertRow():
pass
def deleteRows(TableName,ParameterNames,Conditions):
pass
# select from table to another table
def selectInto(DestinationTableName,TableName,ParameterNames,Conditions):
# TableName must refer to an existing table in cache!!
# Conditions = Restrictables in specific format
# Sample conditions: cond = {'par1':{'range',[b_lo,b_hi]},'par2':b}
# return structure similar to TableObject and put it to QUERY_BUFFER
# if ParameterNames is '*' then all parameters are used
#table_columns = LOCAL_TABLE_CACHE[TableName]['data'].keys()
#table_length = len(TableObject['header']['number_of_rows'])
#if ParameterNames=='*':
# ParameterNames = table_columns
# check if Conditions contain elements which are not in the TableObject
#condition_variables = getConditionVariables(Conditions)
#strange_pars = set(condition_variables)-set(table_variables)
#if strange_pars:
# raise Exception('The following parameters are not in the table \"%s\"' % (TableName,list(strange_pars)))
# do full scan each time
if DestinationTableName == TableName:
raise Exception('Selecting into source table is forbidden')
table_length = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
row_count = 0
for RowID in range(0,table_length):
RowObject = getRowObject(RowID,TableName)
VarDictionary = getVarDictionary(RowObject)
VarDictionary['LineNumber'] = RowID
ContextFormat = getContextFormat(RowObject)
RowObjectNew = newRowObject(ParameterNames,RowObject,VarDictionary,ContextFormat)
if checkRowObject(RowObject,Conditions,VarDictionary):
addRowObject(RowObjectNew,DestinationTableName)
row_count += 1
LOCAL_TABLE_CACHE[DestinationTableName]['header']['number_of_rows'] += row_count
def length(TableName):
tab_len = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
#print(str(tab_len)+' rows in '+TableName)
return tab_len
# Select parameters from a table with certain conditions.
# Parameters can be the names or expressions.
# Conditions contain a list of expressions in a special language.
# Set Output to False to suppress output
# Set File=FileName to redirect output to a file.
def select(TableName,DestinationTableName=QUERY_BUFFER,ParameterNames=None,Conditions=None,Output=True,File=None):
"""
INPUT PARAMETERS:
TableName: name of source table (required)
DestinationTableName: name of resulting table (optional)
ParameterNames: list of parameters or expressions (optional)
Conditions: list of logincal expressions (optional)
Output: enable (True) or suppress (False) text output (optional)
File: enable (True) or suppress (False) file output (optional)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Select or filter the data in some table
either to standard output or to file (if specified)
---
EXAMPLE OF USAGE:
select('sampletab',DestinationTableName='outtab',ParameterNames=(p1,p2),
Conditions=(('and',('>=','p1',1),('<',('*','p1','p2'),20))))
Conditions means (p1>=1 and p1*p2<20)
---
"""
# TODO: Variables defined in ParameterNames ('LET') MUST BE VISIBLE IN Conditions !!
# check if table exists
if TableName not in LOCAL_TABLE_CACHE.keys():
raise Exception('%s: no such table. Check tableList() for more info.' % TableName)
if not ParameterNames: ParameterNames=LOCAL_TABLE_CACHE[TableName]['header']['order']
LOCAL_TABLE_CACHE[DestinationTableName] = {} # clear QUERY_BUFFER for the new result
RowObjectDefault = getDefaultRowObject(TableName)
VarDictionary = getVarDictionary(RowObjectDefault)
ContextFormat = getContextFormat(RowObjectDefault)
RowObjectDefaultNew = newRowObject(ParameterNames,RowObjectDefault,VarDictionary,ContextFormat)
dropTable(DestinationTableName) # redundant
createTable(DestinationTableName,RowObjectDefaultNew)
selectInto(DestinationTableName,TableName,ParameterNames,Conditions)
if DestinationTableName!=QUERY_BUFFER:
if File: outputTable(DestinationTableName,File=File)
elif Output:
outputTable(DestinationTableName,File=File)
# SORTING ===========================================================
def arrangeTable(TableName,DestinationTableName=None,RowIDList=None):
#print 'AT/'
#print 'AT: RowIDList = '+str(RowIDList)
# make a subset of table rows according to RowIDList
if not DestinationTableName:
DestinationTableName = TableName
if DestinationTableName != TableName:
dropTable(DestinationTableName)
LOCAL_TABLE_CACHE[DestinationTableName]['header']=LOCAL_TABLE_CACHE[TableName]['header']
LOCAL_TABLE_CACHE[DestinationTableName]['data']={}
LOCAL_TABLE_CACHE[DestinationTableName]['header']['number_of_rows'] = len(RowIDList)
#print 'AT: RowIDList = '+str(RowIDList)
for par_name in LOCAL_TABLE_CACHE[DestinationTableName]['header']['order']:
par_data = LOCAL_TABLE_CACHE[TableName]['data'][par_name]
LOCAL_TABLE_CACHE[DestinationTableName]['data'][par_name] = [par_data[i] for i in RowIDList]
def compareLESS(RowObject1,RowObject2,ParameterNames):
#print 'CL/'
# arg1 and arg2 are RowObjects
# Compare them according to ParameterNames
# Simple validity check:
#if len(arg1) != len(arg2):
# raise Exception('Arguments have different lengths')
#RowObject1Subset = subsetOfRowObject(ParameterNames,RowObject1)
#RowObject2Subset = subsetOfRowObject(ParameterNames,RowObject2)
#return RowObject1Subset < RowObject2Subset
row1 = []
row2 = []
#n = len(RowObject1)
#for i in range(0,n):
# par_name1 = RowObject1[i][0]
# if par_name1 in ParameterNames:
# par_value1 = RowObject1[i][1]
# par_value2 = RowObject2[i][1]
# row1 += [par_value1]
# row2 += [par_value2]
VarDictionary1 = getVarDictionary(RowObject1)
VarDictionary2 = getVarDictionary(RowObject2)
for par_name in ParameterNames:
par_value1 = VarDictionary1[par_name]
par_value2 = VarDictionary2[par_name]
row1 += [par_value1]
row2 += [par_value2]
Flag = row1 < row2
return Flag
def quickSort(index,TableName,ParameterNames,Accending=True):
# ParameterNames: names of parameters which are
# taking part in the sorting
if index == []:
return []
else:
PivotID = index[0]
Pivot = getRowObject(PivotID,TableName)
lesser_index = []
greater_index = [];
for RowID in index[1:]:
RowObject = getRowObject(RowID,TableName)
if compareLESS(RowObject,Pivot,ParameterNames):
lesser_index += [RowID]
else:
greater_index += [RowID]
lesser = quickSort(lesser_index,TableName,ParameterNames,Accending)
greater = quickSort(greater_index,TableName,ParameterNames,Accending)
if Accending:
return lesser + [PivotID] + greater
else:
return greater + [PivotID] + lesser
# Sorting must work well on the table itself!
def sort(TableName,DestinationTableName=None,ParameterNames=None,Accending=True,Output=False,File=None):
"""
INPUT PARAMETERS:
TableName: name of source table (required)
DestinationTableName: name of resulting table (optional)
ParameterNames: list of parameters or expressions to sort by (optional)
Accending: sort in ascending (True) or descending (False) order (optional)
Output: enable (True) or suppress (False) text output (optional)
File: enable (True) or suppress (False) file output (optional)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Sort a table by a list of it's parameters or expressions.
The sorted table is saved in DestinationTableName (if specified).
---
EXAMPLE OF USAGE:
sort('sampletab',ParameterNames=(p1,('+',p1,p2)))
---
"""
number_of_rows = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
index = range(0,number_of_rows)
if not DestinationTableName:
DestinationTableName = TableName
# if names are not provided use all parameters in sorting
if not ParameterNames:
ParameterNames = LOCAL_TABLE_CACHE[TableName]['header']['order']
elif type(ParameterNames) not in set([list,tuple]):
ParameterNames = [ParameterNames] # fix of stupid bug where ('p1',) != ('p1')
index_sorted = quickSort(index,TableName,ParameterNames,Accending)
arrangeTable(TableName,DestinationTableName,index_sorted)
if Output:
outputTable(DestinationTableName,File=File)
# /SORTING ==========================================================
# GROUPING ==========================================================
# GROUP_INDEX global auxiliary structure is a Dictionary,
# which has the following properties:
# 1) Each key is a composite variable:
# [array of values of ParameterNames variable
# STREAM_UPDATE_FLAG]
# 2) Each value is an index in LOCAL_TABLE_CACHE[TableName]['data'][...],
# corresponding to this key
# STREAM_UPDATE_FLAG = TRUE if value in GROUP_INDEX needs updating
# = FALSE otherwise
# If no grouping variables are specified (GroupParameterNames==None)
# than the following key is used: "__GLOBAL__"
def group(TableName,DestinationTableName=QUERY_BUFFER,ParameterNames=None,GroupParameterNames=None,File=None,Output=True):
"""
INPUT PARAMETERS:
TableName: name of source table (required)
DestinationTableName: name of resulting table (optional)
ParameterNames: list of parameters or expressions to take (optional)
GroupParameterNames: list of parameters or expressions to group by (optional)
Accending: sort in ascending (True) or descending (False) order (optional)
Output: enable (True) or suppress (False) text output (optional)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
none
---
EXAMPLE OF USAGE:
group('sampletab',ParameterNames=('p1',('sum','p2')),GroupParameterNames=('p1'))
... makes grouping by p1,p2. For each group it calculates sum of p2 values.
---
"""
# Implements such functions as:
# count,sum,avg,min,max,ssq etc...
# 1) ParameterNames can contain group functions
# 2) GroupParameterNames can't contain group functions
# 3) If ParameterNames contains parameters defined by LET directive,
# it IS visible in the sub-context of GroupParameterNames
# 4) Parameters defined in GroupParameterNames are NOT visible in ParameterNames
# 5) ParameterNames variable represents the structure of the resulting table/collection
# 6) GroupParameterNames can contain either par_names or expressions with par_names
# Clear old GROUP_INDEX value
clearGroupIndex()
# Consistency check
if TableName == DestinationTableName:
raise Exception('TableName and DestinationTableName must be different')
#if not ParameterNames: ParameterNames=LOCAL_TABLE_CACHE[TableName]['header']['order']
# Prepare the new DestinationTable
RowObjectDefault = getDefaultRowObject(TableName)
VarDictionary = getVarDictionary(RowObjectDefault)
ContextFormat = getContextFormat(RowObjectDefault)
RowObjectDefaultNew = newRowObject(ParameterNames,RowObjectDefault,VarDictionary,ContextFormat)
dropTable(DestinationTableName) # redundant
createTable(DestinationTableName,RowObjectDefaultNew)
# Loop through rows of source Table
# On each iteration group functions update GROUP_INDEX (see description above)
number_of_rows = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
# STAGE 1: CREATE GROUPS
print('LOOP:')
for RowID in range(0,number_of_rows):
print('--------------------------------')
print('RowID='+str(RowID))
RowObject = getRowObject(RowID,TableName) # RowObject from source table
VarDictionary = getVarDictionary(RowObject)
print('VarDictionary='+str(VarDictionary))
# This is a trick which makes evaluateExpression function
# not consider first expression as an operation
GroupParameterNames_ = ['LIST'] + list(GroupParameterNames)
GroupIndexKey = evaluateExpression(GroupParameterNames_,VarDictionary)
# List is an unhashable type in Python!
GroupIndexKey = tuple(GroupIndexKey)
initializeGroup(GroupIndexKey)
print('GROUP_INDEX='+str(GROUP_INDEX))
ContextFormat = getContextFormat(RowObject)
RowObjectNew = newRowObject(ParameterNames,RowObject,VarDictionary,ContextFormat,GroupIndexKey)
RowIDGroup = GROUP_INDEX[GroupIndexKey]['ROWID']
setRowObject(RowIDGroup,RowObjectNew,DestinationTableName)
# Output result if required
if Output and DestinationTableName==QUERY_BUFFER:
outputTable(DestinationTableName,File=File)
# /GROUPING =========================================================
# EXTRACTING ========================================================
REGEX_INTEGER = '[+-]?\d+'
REGEX_STRING = '[^\s]+'
REGEX_FLOAT_F = '[+-]?\d*\.?\d+'
REGEX_FLOAT_E = '[+-]?\d*\.?\d+[eEfF]?[+-]?\d+'
REGEX_INTEGER_FIXCOL = lambda n: '\d{%d}' % n
REGEX_STRING_FIXCOL = lambda n: '[^\s]{%d}' % n
REGEX_FLOAT_F_FIXCOL = lambda n: '[\+\-\.\d]{%d}' % n
REGEX_FLOAT_E_FIXCOL = lambda n: '[\+\-\.\deEfF]{%d}' % n
# Extract sub-columns from string column
def extractColumns(TableName,SourceParameterName,ParameterFormats,ParameterNames=None,FixCol=False):
"""
INPUT PARAMETERS:
TableName: name of source table (required)
SourceParameterName: name of source column to process (required)
ParameterFormats: c formats of unpacked parameters (required)
ParameterNames: list of resulting parameter names (optional)
FixCol: column-fixed (True) format of source column (optional)
OUTPUT PARAMETERS:
none
---
DESCRIPTION:
Note, that this function is aimed to do some extra job on
interpreting string parameters which is normally supposed
to be done by the user.
---
EXAMPLE OF USAGE:
extractColumns('sampletab',SourceParameterName='p5',
ParameterFormats=('%d','%d','%d'),
ParameterNames=('p5_1','p5_2','p5_3'))
This example extracts three integer parameters from
a source column 'p5' and puts results in ('p5_1','p5_2','p5_3').
---
"""
# ParameterNames = just the names without expressions
# ParFormats contains python formats for par extraction
# Example: ParameterNames=('v1','v2','v3')
# ParameterFormats=('%1s','%1s','%1s')
# By default the format of parameters is column-fixed
if type(LOCAL_TABLE_CACHE[TableName]['header']['default'][SourceParameterName]) not in set([str,unicode]):
raise Exception('Source parameter must be a string')
i=-1
# bug when (a,) != (a)
if ParameterNames and type(ParameterNames) not in set([list,tuple]):
ParameterNames = [ParameterNames]
if ParameterFormats and type(ParameterFormats) not in set([list,tuple]):
ParameterFormats = [ParameterFormats]
# if ParameterNames is empty, fill it with #1-2-3-...
if not ParameterNames:
ParameterNames = []
# using naming convension #i, i=0,1,2,3...
for par_format in ParameterFormats:
while True:
i+=1
par_name = '#%d' % i
fmt = LOCAL_TABLE_CACHE[TableName]['header']['format'].get(par_name,None)
if not fmt: break
ParameterNames.append(par_name)
# check if ParameterNames are valid
Intersection = set(ParameterNames).intersection(LOCAL_TABLE_CACHE[TableName]['header']['order'])
if Intersection:
raise Exception('Parameters %s already exist' % str(list(Intersection)))
# loop over ParameterNames to prepare LOCAL_TABLE_CACHE
i=0
for par_name in ParameterNames:
par_format = ParameterFormats[i]
LOCAL_TABLE_CACHE[TableName]['header']['format'][par_name]=par_format
LOCAL_TABLE_CACHE[TableName]['data'][par_name]=[]
i+=1
# append new parameters in order list
LOCAL_TABLE_CACHE[TableName]['header']['order'] += ParameterNames
# cope with default values
i=0
format_regex = []
format_types = []
for par_format in ParameterFormats:
par_name = ParameterNames[i]
regex = FORMAT_PYTHON_REGEX
(lng,trail,lngpnt,ty) = re.search(regex,par_format).groups()
ty = ty.lower()
if ty == 'd':
par_type = int
if FixCol:
format_regex_part = REGEX_INTEGER_FIXCOL(lng)
else:
format_regex_part = REGEX_INTEGER
elif ty == 's':
par_type = str
if FixCol:
format_regex_part = REGEX_STRING_FIXCOL(lng)
else:
format_regex_part = REGEX_STRING
elif ty == 'f':
par_type = float
if FixCol:
format_regex_part = REGEX_FLOAT_F_FIXCOL(lng)
else:
format_regex_part = REGEX_FLOAT_F
elif ty == 'e':
par_type = float
if FixCol:
format_regex_part = REGEX_FLOAT_E_FIXCOL(lng)
else:
format_regex_part = REGEX_FLOAT_E
else:
raise Exception('Unknown data type')
format_regex.append('('+format_regex_part+')')
format_types.append(par_type)
def_val = getDefaultValue(par_type)
LOCAL_TABLE_CACHE[TableName]['header']['default'][par_name]=def_val
i+=1
format_regex = '\s*'.join(format_regex)
# loop through values of SourceParameter
for SourceParameterString in LOCAL_TABLE_CACHE[TableName]['data'][SourceParameterName]:
try:
ExtractedValues = list(re.search(format_regex,SourceParameterString).groups())
except:
raise Exception('Error with line \"%s\"' % SourceParameterString)
i=0
# loop through all parameters which are supposed to be extracted
for par_name in ParameterNames:
par_value = format_types[i](ExtractedValues[i])
LOCAL_TABLE_CACHE[TableName]['data'][par_name].append(par_value)
i+=1
# explicitly check that number of rows are equal
number_of_rows = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
number_of_rows2 = len(LOCAL_TABLE_CACHE[TableName]['data'][SourceParameterName])
number_of_rows3 = len(LOCAL_TABLE_CACHE[TableName]['data'][ParameterNames[0]])
if not (number_of_rows == number_of_rows2 == number_of_rows3):
raise Exception('Error while extracting parameters: check your regexp')
# Split string columns into sub-columns with given names
def splitColumn(TableName,SourceParameterName,ParameterNames,Splitter):
pass
# /EXTRACTING =======================================================
# ---------------------------------------------------------------
# ---------------------------------------------------------------
# /LOCAL DATABASE MANAGEMENT SYSTEM
# ---------------------------------------------------------------
# ---------------------------------------------------------------
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# GLOBAL API FUNCTIONS
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
def mergeParlist(*arg):
# Merge parlists and remove duplicates.
# Argument contains a list of lists/tuples.
container = []
for a in arg:
container += list(a)
result = []
index = set()
for par_name in container:
if par_name not in index:
index.add(par_name)
result.append(par_name)
return result
# Define parameter groups to simplify the usage of fetch_
PARLIST_DOTPAR = ['par_line',]
PARLIST_ID = ['trans_id',]
PARLIST_STANDARD = ['molec_id','local_iso_id','nu','sw','a','elower','gamma_air',
'delta_air','gamma_self','n_air','n_self','gp','gpp']
PARLIST_LABELS = ['statep','statepp']
#PARLIST_LINEMIXING = ['y_air','y_self']
PARLIST_VOIGT_AIR = ['gamma_air','delta_air','deltap_air','n_air']
PARLIST_VOIGT_SELF = ['gamma_self','delta_self','deltap_self','n_self']
PARLIST_VOIGT_H2 = ['gamma_H2','delta_H2','deltap_H2','n_H2']
PARLIST_VOIGT_CO2 = ['gamma_CO2','delta_CO2','n_CO2']
PARLIST_VOIGT_HE = ['gamma_He','delta_He','n_He']
PARLIST_VOIGT_H2O = ['gamma_H2O','n_H2O']
PARLIST_VOIGT_LINEMIXING = ['y_air','y_self']
PARLIST_VOIGT_ALL = mergeParlist(PARLIST_VOIGT_AIR,PARLIST_VOIGT_SELF,
PARLIST_VOIGT_H2,PARLIST_VOIGT_CO2,
PARLIST_VOIGT_HE,PARLIST_VOIGT_H2O,
PARLIST_VOIGT_LINEMIXING)
PARLIST_SDVOIGT_AIR = ['gamma_air','delta_air','deltap_air','n_air','SD_air']
PARLIST_SDVOIGT_SELF = ['gamma_self','delta_self','deltap_self','n_self','SD_self']
PARLIST_SDVOIGT_H2 = []
PARLIST_SDVOIGT_CO2 = []
PARLIST_SDVOIGT_HE = []
PARLIST_SDVOIGT_LINEMIXING = ['Y_SDV_air_296','Y_SDV_self_296']
PARLIST_SDVOIGT_ALL = mergeParlist(PARLIST_SDVOIGT_AIR,PARLIST_SDVOIGT_SELF,
PARLIST_SDVOIGT_H2,PARLIST_SDVOIGT_CO2,
PARLIST_SDVOIGT_HE,PARLIST_SDVOIGT_LINEMIXING)
PARLIST_GALATRY_AIR = ['gamma_air','delta_air','deltap_air','n_air','beta_g_air']
PARLIST_GALATRY_SELF = ['gamma_self','delta_self','deltap_self','n_self','beta_g_self']
PARLIST_GALATRY_H2 = []
PARLIST_GALATRY_CO2 = []
PARLIST_GALATRY_HE = []
PARLIST_GALATRY_ALL = mergeParlist(PARLIST_GALATRY_AIR,PARLIST_GALATRY_SELF,
PARLIST_GALATRY_H2,PARLIST_GALATRY_CO2,
PARLIST_GALATRY_HE)
PARLIST_HT_SELF = ['gamma_HT_0_self_50','n_HT_self_50','gamma_HT_2_self_50',
'delta_HT_0_self_50','deltap_HT_self_50','delta_HT_2_self_50',
'gamma_HT_0_self_150','n_HT_self_150','gamma_HT_2_self_150',
'delta_HT_0_self_150','deltap_HT_self_150','delta_HT_2_self_150',
'gamma_HT_0_self_296','n_HT_self_296','gamma_HT_2_self_296',
'delta_HT_0_self_296','deltap_HT_self_296','delta_HT_2_self_296',
'gamma_HT_0_self_700','n_HT_self_700','gamma_HT_2_self_700',
'delta_HT_0_self_700','deltap_HT_self_700','delta_HT_2_self_700',
'nu_HT_self','kappa_HT_self','eta_HT_self','Y_HT_self_296']
#PARLIST_HT_AIR = ['gamma_HT_0_air_50','n_HT_air_50','gamma_HT_2_air_50',
# 'delta_HT_0_air_50','deltap_HT_air_50','delta_HT_2_air_50',
# 'gamma_HT_0_air_150','n_HT_air_150','gamma_HT_2_air_150',
# 'delta_HT_0_air_150','deltap_HT_air_150','delta_HT_2_air_150',
# 'gamma_HT_0_air_296','n_HT_air_296','gamma_HT_2_air_296',
# 'delta_HT_0_air_296','deltap_HT_air_296','delta_HT_2_air_296',
# 'gamma_HT_0_air_700','n_HT_air_700','gamma_HT_2_air_700',
# 'delta_HT_0_air_700','deltap_HT_air_700','delta_HT_2_air_700',
# 'nu_HT_air','kappa_HT_air','eta_HT_air']
PARLIST_HT_AIR = ['gamma_HT_0_air_296','n_HT_air_296','gamma_HT_2_air_296',
'delta_HT_0_air_296','deltap_HT_air_296','delta_HT_2_air_296',
'nu_HT_air','kappa_HT_air','eta_HT_air','Y_HT_air_296']
PARLIST_HT_ALL = mergeParlist(PARLIST_HT_SELF,PARLIST_HT_AIR)
PARLIST_ALL = mergeParlist(PARLIST_ID,PARLIST_DOTPAR,PARLIST_STANDARD,
PARLIST_LABELS,PARLIST_VOIGT_ALL,
PARLIST_SDVOIGT_ALL,PARLIST_GALATRY_ALL,
PARLIST_HT_ALL)
PARAMETER_GROUPS = {
'par_line' : PARLIST_DOTPAR,
'160-char' : PARLIST_DOTPAR,
'.par' : PARLIST_DOTPAR,
'id' : PARLIST_ID,
'standard' : PARLIST_STANDARD,
'labels' : PARLIST_LABELS,
#'linemixing' : PARLIST_LINEMIXING,
'voigt_air' : PARLIST_VOIGT_AIR,
'voigt_self' : PARLIST_VOIGT_SELF,
'voigt_h2' : PARLIST_VOIGT_H2,
'voigt_co2' : PARLIST_VOIGT_CO2,
'voigt_he' : PARLIST_VOIGT_HE,
'voigt_h2o' : PARLIST_VOIGT_H2O,
'voigt_linemixing': PARLIST_VOIGT_LINEMIXING,
'voigt' : PARLIST_VOIGT_ALL,
'sdvoigt_air' : PARLIST_SDVOIGT_AIR,
'sdvoigt_self' : PARLIST_SDVOIGT_SELF,
'sdvoigt_h2' : PARLIST_SDVOIGT_H2,
'sdvoigt_co2' : PARLIST_SDVOIGT_CO2,
'sdvoigt_he' : PARLIST_SDVOIGT_HE,
'sdvoigt_linemixing': PARLIST_SDVOIGT_LINEMIXING,
'sdvoigt' : PARLIST_SDVOIGT_ALL,
'galatry_air' : PARLIST_GALATRY_AIR,
'galatry_self' : PARLIST_GALATRY_SELF,
'galatry_h2' : PARLIST_GALATRY_H2,
'galatry_co2' : PARLIST_GALATRY_CO2,
'galatry_he' : PARLIST_GALATRY_HE,
'galatry' : PARLIST_GALATRY_ALL,
'ht' : PARLIST_HT_ALL,
'all' : PARLIST_ALL
}
def prepareParlist(pargroups=[],params=[],dotpar=True):
# Apply defaults
parlist_default = []
if dotpar:
parlist_default += ['par_line']
#parlist_default += PARAMETER_GROUPS['id']
# Make a dictionary of "assumed" parameters.
ASSUMED_PARAMS = {}
if 'par_line' in set(parlist_default):
ASSUMED_PARAMS = HITRAN_DEFAULT_HEADER['format']
parlist = parlist_default
# Iterate over parameter groups.
for pargroup in pargroups:
pargroup = pargroup.lower()
parlist += PARAMETER_GROUPS[pargroup]
# Iterate over single parameters.
for param in params:
#param = param.lower()
parlist.append(param)
# Clean up parameter list.
parlist = mergeParlist(parlist)
result = []
for param in parlist:
if param not in ASSUMED_PARAMS:
result.append(param)
return result
def prepareHeader(parlist):
HEADER = {'table_name':'','number_of_rows':-1,'format':{},
'default':{},'table_type':'column-fixed',
'size_in_bytes':-1,'order':[],'description':{}}
# Add column-fixed 160-character part, if specified in parlist.
if 'par_line' in set(parlist):
HEADER['order'] = HITRAN_DEFAULT_HEADER['order']
HEADER['format'] = HITRAN_DEFAULT_HEADER['format']
HEADER['default'] = HITRAN_DEFAULT_HEADER['default']
HEADER['description'] = HITRAN_DEFAULT_HEADER['description']
HEADER['position'] = HITRAN_DEFAULT_HEADER['position']
# Insert all other parameters in the "extra" section of the header.
plist = [v for v in parlist if v!='par_line']
HEADER['extra'] = []
HEADER['extra_format'] = {}
HEADER['extra_separator'] = ','
for param in plist:
param = param.lower()
HEADER['extra'].append(param)
HEADER['extra_format'][param] = PARAMETER_META[param]['default_fmt']
return HEADER
def queryHITRAN(TableName,iso_id_list,numin,numax,pargroups=[],params=[],dotpar=True,head=False):
ParameterList = prepareParlist(pargroups=pargroups,params=params,dotpar=dotpar)
TableHeader = prepareHeader(ParameterList)
TableHeader['table_name'] = TableName
DataFileName = VARIABLES['BACKEND_DATABASE_NAME'] + '/' + TableName + '.data'
HeaderFileName = VARIABLES['BACKEND_DATABASE_NAME'] + '/' + TableName + '.header'
# create URL
iso_id_list_str = [str(iso_id) for iso_id in iso_id_list]
iso_id_list_str = ','.join(iso_id_list_str)
print('\nData is fetched from %s\n'%VARIABLES['GLOBAL_HOST'])
if pargroups or params: # custom par search
url = VARIABLES['GLOBAL_HOST'] + '/lbl/api?' + \
'iso_ids_list=' + iso_id_list_str + '&' + \
'numin=' + str(numin) + '&' + \
'numax=' + str(numax) + '&' + \
'head=' + str(head) + '&' + \
'fixwidth=0&sep=[comma]&' +\
'request_params=' + ','.join(ParameterList)
else: # old-fashioned .par search
url = VARIABLES['GLOBAL_HOST'] + '/lbl/api?' + \
'iso_ids_list=' + iso_id_list_str + '&' + \
'numin=' + str(numin) + '&' + \
'numax=' + str(numax)
#raise Exception(url)
# Download data by chunks.
if VARIABLES['DISPLAY_FETCH_URL']: print(url+'\n')
try:
# Proxy handling # https://stackoverflow.com/questions/1450132/proxy-with-urllib2
if VARIABLES['PROXY']:
print('Using proxy '+str(VARIABLES['PROXY']))
proxy = urllib2.ProxyHandler(VARIABLES['PROXY'])
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
req = urllib2.urlopen(url)
except urllib2.HTTPError:
raise Exception('Failed to retrieve data for given parameters.')
except urllib2.URLError:
raise Exception('Cannot connect to %s. Try again or edit GLOBAL_HOST variable.' % GLOBAL_HOST)
CHUNK = 64 * 1024
print('BEGIN DOWNLOAD: '+TableName)
with open_(DataFileName,'w') as fp:
while True:
chunk = req.read(CHUNK)
if not chunk: break
fp.write(chunk.decode('utf-8'))
print(' %d bytes written to %s' % (CHUNK,DataFileName))
with open(HeaderFileName,'w') as fp:
fp.write(json.dumps(TableHeader,indent=2))
print('Header written to %s' % HeaderFileName)
print('END DOWNLOAD')
# Set comment
# Get this table to LOCAL_TABLE_CACHE
storage2cache(TableName)
print('PROCESSED')
def saveHeader(TableName):
ParameterList = prepareParlist(dotpar=True)
TableHeader = prepareHeader(ParameterList)
with open(TableName+'.header','w') as fp:
fp.write(json.dumps(TableHeader,indent=2))
# ---------- DATABASE FRONTEND END -------------
# simple implementation of getting a line list from a remote server
def getLinelist(local_name,query,api_key):
return makeQuery(local_name)
# -------------------------------------------------------------------
# -------------------------------------------------------------------
# / GLOBABL API FUNCTIONS
# -------------------------------------------------------------------
# -------------------------------------------------------------------
# ---------------- FILTER ---------------------------------------------
def filter(TableName,Conditions):
select(TableName=TableName,Conditions=Conditions,Output=False)
# ---------------------- ISO.PY ---------------------------------------
ISO_ID_INDEX = {
'M':0,
'I':1,
'iso_name':2,
'abundance':3,
'mass':4,
'mol_name':5
}
# id M I iso_name abundance mass mol_name
ISO_ID = {
1 : [ 1, 1, 'H2(16O)', 0.997317, 18.010565, 'H2O' ],
2 : [ 1, 2, 'H2(18O)', 0.00199983, 20.014811, 'H2O' ],
3 : [ 1, 3, 'H2(17O)', 0.000372, 19.01478, 'H2O' ],
4 : [ 1, 4, 'HD(16O)', 0.00031069, 19.01674, 'H2O' ],
5 : [ 1, 5, 'HD(18O)', 0.000000623, 21.020985, 'H2O' ],
6 : [ 1, 6, 'HD(17O)', 0.000000116, 20.020956, 'H2O' ],
129 : [ 1, 7, 'D2(16O)', 0.000000024197, 20.022915, 'H2O' ],
7 : [ 2, 1, '(12C)(16O)2', 0.984204, 43.98983, 'CO2' ],
8 : [ 2, 2, '(13C)(16O)2', 0.011057, 44.993185, 'CO2' ],
9 : [ 2, 3, '(16O)(12C)(18O)', 0.0039471, 45.994076, 'CO2' ],
10 : [ 2, 4, '(16O)(12C)(17O)', 0.000734, 44.994045, 'CO2' ],
11 : [ 2, 5, '(16O)(13C)(18O)', 0.00004434, 46.997431, 'CO2' ],
12 : [ 2, 6, '(16O)(13C)(17O)', 0.00000825, 45.9974, 'CO2' ],
13 : [ 2, 7, '(12C)(18O)2', 0.0000039573, 47.998322, 'CO2' ],
14 : [ 2, 8, '(17O)(12C)(18O)', 0.00000147, 46.998291, 'CO2' ],
121 : [ 2, 9, '(12C)(17O)2', 0.0000001368, 45.998262, 'CO2' ],
15 : [ 2, 10, '(13C)(18O)2', 0.000000044967, 49.001675, 'CO2' ], # 0->11
120 : [ 2, 11, '(18O)(13C)(17O)', 0.00000001654, 48.00165, 'CO2' ], # 'A'->11
122 : [ 2, 12, '(13C)(17O)2', 0.0000000015375, 47.001618, 'CO2' ], # 'B'->12
16 : [ 3, 1, '(16O)3', 0.992901, 47.984745, 'O3' ],
17 : [ 3, 2, '(16O)(16O)(18O)', 0.00398194, 49.988991, 'O3' ],
18 : [ 3, 3, '(16O)(18O)(16O)', 0.00199097, 49.988991, 'O3' ],
19 : [ 3, 4, '(16O)(16O)(17O)', 0.00074, 48.98896, 'O3' ],
20 : [ 3, 5, '(16O)(17O)(16O)', 0.00037, 48.98896, 'O3' ],
21 : [ 4, 1, '(14N)2(16O)', 0.990333, 44.001062, 'N2O' ],
22 : [ 4, 2, '(14N)(15N)(16O)', 0.0036409, 44.998096, 'N2O' ],
23 : [ 4, 3, '(15N)(14N)(16O)', 0.0036409, 44.998096, 'N2O' ],
24 : [ 4, 4, '(14N)2(18O)', 0.00198582, 46.005308, 'N2O' ],
25 : [ 4, 5, '(14N)2(17O)', 0.000369, 45.005278, 'N2O' ],
26 : [ 5, 1, '(12C)(16O)', 0.98654, 27.994915, 'CO' ],
27 : [ 5, 2, '(13C)(16O)', 0.01108, 28.99827, 'CO' ],
28 : [ 5, 3, '(12C)(18O)', 0.0019782, 29.999161, 'CO' ],
29 : [ 5, 4, '(12C)(17O)', 0.000368, 28.99913, 'CO' ],
30 : [ 5, 5, '(13C)(18O)', 0.00002222, 31.002516, 'CO' ],
31 : [ 5, 6, '(13C)(17O)', 0.00000413, 30.002485, 'CO' ],
32 : [ 6, 1, '(12C)H4', 0.98827, 16.0313, 'CH4' ],
33 : [ 6, 2, '(13C)H4', 0.0111, 17.034655, 'CH4' ],
34 : [ 6, 3, '(12C)H3D', 0.00061575, 17.037475, 'CH4' ],
35 : [ 6, 4, '(13C)H3D', 0.0000049203, 18.04083, 'CH4' ],
36 : [ 7, 1, '(16O)2', 0.995262, 31.98983, 'O2' ],
37 : [ 7, 2, '(16O)(18O)', 0.00399141, 33.994076, 'O2' ],
38 : [ 7, 3, '(16O)(17O)', 0.000742, 32.994045, 'O2' ],
39 : [ 8, 1, '(14N)(16O)', 0.993974, 29.997989, 'NO' ],
40 : [ 8, 2, '(15N)(16O)', 0.0036543, 30.995023, 'NO' ],
41 : [ 8, 3, '(14N)(18O)', 0.00199312, 32.002234, 'NO' ],
42 : [ 9, 1, '(32S)(16O)2', 0.94568, 63.961901, 'SO2' ],
43 : [ 9, 2, '(34S)(16O)2', 0.04195, 65.957695, 'SO2' ],
44 : [ 10, 1, '(14N)(16O)2', 0.991616, 45.992904, 'NO2' ],
45 : [ 11, 1, '(14N)H3', 0.9958715, 17.026549, 'NH3' ],
46 : [ 11, 2, '(15N)H3', 0.0036613, 18.023583, 'NH3' ],
47 : [ 12, 1, 'H(14N)(16O)3', 0.98911, 62.995644, 'HNO3' ],
117 : [ 12, 2, 'H(15N)(16O)3', 0.003636, 63.99268, 'HNO3' ],
48 : [ 13, 1, '(16O)H', 0.997473, 17.00274, 'OH' ],
49 : [ 13, 2, '(18O)H', 0.00200014, 19.006986, 'OH' ],
50 : [ 13, 3, '(16O)D', 0.00015537, 18.008915, 'OH' ],
51 : [ 14, 1, 'H(19F)', 0.99984425, 20.006229, 'HF' ],
110 : [ 14, 2, 'D(19F)', 0.000115, 21.0125049978, 'HF' ],
52 : [ 15, 1, 'H(35Cl)', 0.757587, 35.976678, 'HCl' ],
53 : [ 15, 2, 'H(37Cl)', 0.242257, 37.973729, 'HCl' ],
107 : [ 15, 3, 'D(35Cl)', 0.000118005, 36.9829544578, 'HCl' ],
108 : [ 15, 4, 'D(37Cl)', 0.000037735, 38.9800043678, 'HCl' ],
54 : [ 16, 1, 'H(79Br)', 0.50678, 79.92616, 'HBr' ],
55 : [ 16, 2, 'H(81Br)', 0.49306, 81.924115, 'HBr' ],
111 : [ 16, 3, 'D(79Br)', 0.0000582935, 80.9324388778, 'HBr' ],
112 : [ 16, 4, 'D(81Br)', 0.0000567065, 82.9303923778, 'HBr' ],
56 : [ 17, 1, 'H(127I)', 0.99984425, 127.912297, 'HI' ],
113 : [ 17, 2, 'D(127I)', 0.000115, 128.918574778, 'HI' ],
57 : [ 18, 1, '(35Cl)(16O)', 0.75591, 50.963768, 'ClO' ],
58 : [ 18, 2, '(37Cl)(16O)', 0.24172, 52.960819, 'ClO' ],
59 : [ 19, 1, '(16O)(12C)(32S)', 0.93739, 59.966986, 'OCS' ],
60 : [ 19, 2, '(16O)(12C)(34S)', 0.04158, 61.96278, 'OCS' ],
61 : [ 19, 3, '(16O)(13C)(32S)', 0.01053, 60.970341, 'OCS' ],
62 : [ 19, 4, '(16O)(12C)(33S)', 0.01053, 60.966371, 'OCS' ],
63 : [ 19, 5, '(18O)(12C)(32S)', 0.00188, 61.971231, 'OCS' ],
64 : [ 20, 1, 'H2(12C)(16O)', 0.98624, 30.010565, 'H2CO' ],
65 : [ 20, 2, 'H2(13C)(16O)', 0.01108, 31.01392, 'H2CO' ],
66 : [ 20, 3, 'H2(12C)(18O)', 0.0019776, 32.014811, 'H2CO' ],
67 : [ 21, 1, 'H(16O)(35Cl)', 0.75579, 51.971593, 'HOCl' ],
68 : [ 21, 2, 'H(16O)(37Cl)', 0.24168, 53.968644, 'HOCl' ],
69 : [ 22, 1, '(14N)2', 0.9926874, 28.006147, 'N2' ],
118 : [ 22, 2, '(14N)(15N)', 0.0072535, 29.997989, 'N2' ],
70 : [ 23, 1, 'H(12C)(14N)', 0.98511, 27.010899, 'HCN' ],
71 : [ 23, 2, 'H(13C)(14N)', 0.01107, 28.014254, 'HCN' ],
72 : [ 23, 3, 'H(12C)(15N)', 0.0036217, 28.007933, 'HCN' ],
73 : [ 24, 1, '(12C)H3(35Cl)', 0.74894, 49.992328, 'CH3Cl' ],
74 : [ 24, 2, '(12C)H3(37Cl)', 0.23949, 51.989379, 'CH3Cl' ],
75 : [ 25, 1, 'H2(16O)2', 0.994952, 34.00548, 'H2O2' ],
76 : [ 26, 1, '(12C)2H2', 0.9776, 26.01565, 'C2H2' ],
77 : [ 26, 2, '(12C)(13C)H2', 0.02197, 27.019005, 'C2H2' ],
105 : [ 26, 3, '(12C)2HD', 0.00030455, 27.021825, 'C2H2' ],
78 : [ 27, 1, '(12C)2H6', 0.97699, 30.04695, 'C2H6' ],
106 : [ 27, 2, '(12C)H3(13C)H3', 0.021952611, 31.050305, 'C2H6' ],
79 : [ 28, 1, '(31P)H3', 0.99953283, 33.997238, 'PH3' ],
80 : [ 29, 1, '(12C)(16O)(19F)2', 0.98654, 65.991722, 'COF2' ],
119 : [ 29, 2, '(13C)(16O)(19F)2', 0.0110834, 66.995083, 'COF2' ],
126 : [ 30, 1, '(32S)(19F)6', 0.950180, 145.962492, 'SF6' ],
81 : [ 31, 1, 'H2(32S)', 0.94988, 33.987721, 'H2S' ],
82 : [ 31, 2, 'H2(34S)', 0.04214, 35.983515, 'H2S' ],
83 : [ 31, 3, 'H2(33S)', 0.007498, 34.987105, 'H2S' ],
84 : [ 32, 1, 'H(12C)(16O)(16O)H', 0.983898, 46.00548, 'HCOOH' ],
85 : [ 33, 1, 'H(16O)2', 0.995107, 32.997655, 'HO2' ],
86 : [ 34, 1, '(16O)', 0.997628, 15.994915, 'O' ],
87 : [ 36, 1, '(14N)(16O)+', 0.993974, 29.997989, 'NOp' ],
88 : [ 37, 1, 'H(16O)(79Br)', 0.5056, 95.921076, 'HOBr' ],
89 : [ 37, 2, 'H(16O)(81Br)', 0.4919, 97.919027, 'HOBr' ],
90 : [ 38, 1, '(12C)2H4', 0.9773, 28.0313, 'C2H4' ],
91 : [ 38, 2, '(12C)H2(13C)H2', 0.02196, 29.034655, 'C2H4' ],
92 : [ 39, 1, '(12C)H3(16O)H', 0.98593, 32.026215, 'CH3OH' ],
93 : [ 40, 1, '(12C)H3(79Br)', 0.5013, 93.941811, 'CH3Br' ],
94 : [ 40, 2, '(12C)H3(81Br)', 0.48766, 95.939764, 'CH3Br' ],
95 : [ 41, 1, '(12C)H3(12C)(14N)', 0.97482, 41.026549, 'CH3CN' ],
96 : [ 42, 1, '(12C)(19F)4', 0.9893, 87.993616, 'CF4' ],
116 : [ 43, 1, '(12C)4H2', 0.955998, 50.01565, 'C4H2' ],
109 : [ 44, 1, 'H(12C)3(14N)', 0.9646069, 51.01089903687, 'HC3N' ],
103 : [ 45, 1, 'H2', 0.999688, 2.01565, 'H2' ],
115 : [ 45, 2, 'HD', 0.000311432, 3.021825, 'H2' ],
97 : [ 46, 1, '(12C)(32S)', 0.939624, 43.971036, 'CS' ],
98 : [ 46, 2, '(12C)(34S)', 0.0416817, 45.966787, 'CS' ],
99 : [ 46, 3, '(13C)(32S)', 0.0105565, 44.974368, 'CS' ],
100 : [ 46, 4, '(12C)(33S)', 0.00741668, 44.970399, 'CS' ],
114 : [ 47, 1, '(32S)(16O)3', 0.9423964, 79.95682, 'SO3' ],
123 : [ 48, 1, '(12C)2(14N)2', 0.970752433, 52.006148, 'C2N2' ],
124 : [ 49, 1, '(12C)(16O)(35Cl)2', 0.566391761, 97.9326199796, 'COCl2' ],
125 : [ 49, 2, '(12C)(16O)(35Cl)(37Cl)', 0.362235278, 99.9296698896, 'COCl2' ],
# 101 : [ 1001, 1, 'H', None, None, 'H' ],
# 102 : [ 1002, 1, 'He', None, None, 'He' ],
# 104 : [ 1018, 1, 'Ar', None, None, 'Ar' ],
}
ISO_INDEX = {
'id':0,
'iso_name':1,
'abundance':2,
'mass':3,
'mol_name':4
}
# M I id iso_name abundance mass mol_name
ISO = {
( 1, 1 ): [ 1, 'H2(16O)', 0.997317, 18.010565, 'H2O' ],
( 1, 2 ): [ 2, 'H2(18O)', 0.00199983, 20.014811, 'H2O' ],
( 1, 3 ): [ 3, 'H2(17O)', 0.000372, 19.01478, 'H2O' ],
( 1, 4 ): [ 4, 'HD(16O)', 0.00031069, 19.01674, 'H2O' ],
( 1, 5 ): [ 5, 'HD(18O)', 0.000000623, 21.020985, 'H2O' ],
( 1, 6 ): [ 6, 'HD(17O)', 0.000000116, 20.020956, 'H2O' ],
( 1, 7 ): [ 129, 'D2(16O)', 0.000000024197, 20.022915, 'H2O' ],
( 2, 1 ): [ 7, '(12C)(16O)2', 0.984204, 43.98983, 'CO2' ],
( 2, 2 ): [ 8, '(13C)(16O)2', 0.011057, 44.993185, 'CO2' ],
( 2, 3 ): [ 9, '(16O)(12C)(18O)', 0.0039471, 45.994076, 'CO2' ],
( 2, 4 ): [ 10, '(16O)(12C)(17O)', 0.000734, 44.994045, 'CO2' ],
( 2, 5 ): [ 11, '(16O)(13C)(18O)', 0.00004434, 46.997431, 'CO2' ],
( 2, 6 ): [ 12, '(16O)(13C)(17O)', 0.00000825, 45.9974, 'CO2' ],
( 2, 7 ): [ 13, '(12C)(18O)2', 0.0000039573, 47.998322, 'CO2' ],
( 2, 8 ): [ 14, '(17O)(12C)(18O)', 0.00000147, 46.998291, 'CO2' ],
( 2, 9 ): [ 121, '(12C)(17O)2', 0.0000001368, 45.998262, 'CO2' ],
( 2, 10 ): [ 15, '(13C)(18O)2', 0.000000044967, 49.001675, 'CO2' ], # 0->10
( 2, 11 ): [ 120, '(18O)(13C)(17O)', 0.00000001654, 48.00165, 'CO2' ], # 'A'->11
( 2, 12 ): [ 122, '(13C)(17O)2', 0.0000000015375, 47.001618, 'CO2' ], # 'B'->12
( 3, 1 ): [ 16, '(16O)3', 0.992901, 47.984745, 'O3' ],
( 3, 2 ): [ 17, '(16O)(16O)(18O)', 0.00398194, 49.988991, 'O3' ],
( 3, 3 ): [ 18, '(16O)(18O)(16O)', 0.00199097, 49.988991, 'O3' ],
( 3, 4 ): [ 19, '(16O)(16O)(17O)', 0.00074, 48.98896, 'O3' ],
( 3, 5 ): [ 20, '(16O)(17O)(16O)', 0.00037, 48.98896, 'O3' ],
( 4, 1 ): [ 21, '(14N)2(16O)', 0.990333, 44.001062, 'N2O' ],
( 4, 2 ): [ 22, '(14N)(15N)(16O)', 0.0036409, 44.998096, 'N2O' ],
( 4, 3 ): [ 23, '(15N)(14N)(16O)', 0.0036409, 44.998096, 'N2O' ],
( 4, 4 ): [ 24, '(14N)2(18O)', 0.00198582, 46.005308, 'N2O' ],
( 4, 5 ): [ 25, '(14N)2(17O)', 0.000369, 45.005278, 'N2O' ],
( 5, 1 ): [ 26, '(12C)(16O)', 0.98654, 27.994915, 'CO' ],
( 5, 2 ): [ 27, '(13C)(16O)', 0.01108, 28.99827, 'CO' ],
( 5, 3 ): [ 28, '(12C)(18O)', 0.0019782, 29.999161, 'CO' ],
( 5, 4 ): [ 29, '(12C)(17O)', 0.000368, 28.99913, 'CO' ],
( 5, 5 ): [ 30, '(13C)(18O)', 0.00002222, 31.002516, 'CO' ],
( 5, 6 ): [ 31, '(13C)(17O)', 0.00000413, 30.002485, 'CO' ],
( 6, 1 ): [ 32, '(12C)H4', 0.98827, 16.0313, 'CH4' ],
( 6, 2 ): [ 33, '(13C)H4', 0.0111, 17.034655, 'CH4' ],
( 6, 3 ): [ 34, '(12C)H3D', 0.00061575, 17.037475, 'CH4' ],
( 6, 4 ): [ 35, '(13C)H3D', 0.0000049203, 18.04083, 'CH4' ],
( 7, 1 ): [ 36, '(16O)2', 0.995262, 31.98983, 'O2' ],
( 7, 2 ): [ 37, '(16O)(18O)', 0.00399141, 33.994076, 'O2' ],
( 7, 3 ): [ 38, '(16O)(17O)', 0.000742, 32.994045, 'O2' ],
( 8, 1 ): [ 39, '(14N)(16O)', 0.993974, 29.997989, 'NO' ],
( 8, 2 ): [ 40, '(15N)(16O)', 0.0036543, 30.995023, 'NO' ],
( 8, 3 ): [ 41, '(14N)(18O)', 0.00199312, 32.002234, 'NO' ],
( 9, 1 ): [ 42, '(32S)(16O)2', 0.94568, 63.961901, 'SO2' ],
( 9, 2 ): [ 43, '(34S)(16O)2', 0.04195, 65.957695, 'SO2' ],
( 10, 1 ): [ 44, '(14N)(16O)2', 0.991616, 45.992904, 'NO2' ],
( 11, 1 ): [ 45, '(14N)H3', 0.9958715, 17.026549, 'NH3' ],
( 11, 2 ): [ 46, '(15N)H3', 0.0036613, 18.023583, 'NH3' ],
( 12, 1 ): [ 47, 'H(14N)(16O)3', 0.98911, 62.995644, 'HNO3' ],
( 12, 2 ): [ 117, 'H(15N)(16O)3', 0.003636, 63.99268, 'HNO3' ],
( 13, 1 ): [ 48, '(16O)H', 0.997473, 17.00274, 'OH' ],
( 13, 2 ): [ 49, '(18O)H', 0.00200014, 19.006986, 'OH' ],
( 13, 3 ): [ 50, '(16O)D', 0.00015537, 18.008915, 'OH' ],
( 14, 1 ): [ 51, 'H(19F)', 0.99984425, 20.006229, 'HF' ],
( 14, 2 ): [ 110, 'D(19F)', 0.000115, 21.0125049978, 'HF' ],
( 15, 1 ): [ 52, 'H(35Cl)', 0.757587, 35.976678, 'HCl' ],
( 15, 2 ): [ 53, 'H(37Cl)', 0.242257, 37.973729, 'HCl' ],
( 15, 3 ): [ 107, 'D(35Cl)', 0.000118005, 36.9829544578, 'HCl' ],
( 15, 4 ): [ 108, 'D(37Cl)', 0.000037735, 38.9800043678, 'HCl' ],
( 16, 1 ): [ 54, 'H(79Br)', 0.50678, 79.92616, 'HBr' ],
( 16, 2 ): [ 55, 'H(81Br)', 0.49306, 81.924115, 'HBr' ],
( 16, 3 ): [ 111, 'D(79Br)', 0.0000582935, 80.9324388778, 'HBr' ],
( 16, 4 ): [ 112, 'D(81Br)', 0.0000567065, 82.9303923778, 'HBr' ],
( 17, 1 ): [ 56, 'H(127I)', 0.99984425, 127.912297, 'HI' ],
( 17, 2 ): [ 113, 'D(127I)', 0.000115, 128.918574778, 'HI' ],
( 18, 1 ): [ 57, '(35Cl)(16O)', 0.75591, 50.963768, 'ClO' ],
( 18, 2 ): [ 58, '(37Cl)(16O)', 0.24172, 52.960819, 'ClO' ],
( 19, 1 ): [ 59, '(16O)(12C)(32S)', 0.93739, 59.966986, 'OCS' ],
( 19, 2 ): [ 60, '(16O)(12C)(34S)', 0.04158, 61.96278, 'OCS' ],
( 19, 3 ): [ 61, '(16O)(13C)(32S)', 0.01053, 60.970341, 'OCS' ],
( 19, 4 ): [ 62, '(16O)(12C)(33S)', 0.01053, 60.966371, 'OCS' ],
( 19, 5 ): [ 63, '(18O)(12C)(32S)', 0.00188, 61.971231, 'OCS' ],
( 20, 1 ): [ 64, 'H2(12C)(16O)', 0.98624, 30.010565, 'H2CO' ],
( 20, 2 ): [ 65, 'H2(13C)(16O)', 0.01108, 31.01392, 'H2CO' ],
( 20, 3 ): [ 66, 'H2(12C)(18O)', 0.0019776, 32.014811, 'H2CO' ],
( 21, 1 ): [ 67, 'H(16O)(35Cl)', 0.75579, 51.971593, 'HOCl' ],
( 21, 2 ): [ 68, 'H(16O)(37Cl)', 0.24168, 53.968644, 'HOCl' ],
( 22, 1 ): [ 69, '(14N)2', 0.9926874, 28.006147, 'N2' ],
( 22, 2 ): [ 118, '(14N)(15N)', 0.0072535, 29.997989, 'N2' ],
( 23, 1 ): [ 70, 'H(12C)(14N)', 0.98511, 27.010899, 'HCN' ],
( 23, 2 ): [ 71, 'H(13C)(14N)', 0.01107, 28.014254, 'HCN' ],
( 23, 3 ): [ 72, 'H(12C)(15N)', 0.0036217, 28.007933, 'HCN' ],
( 24, 1 ): [ 73, '(12C)H3(35Cl)', 0.74894, 49.992328, 'CH3Cl' ],
( 24, 2 ): [ 74, '(12C)H3(37Cl)', 0.23949, 51.989379, 'CH3Cl' ],
( 25, 1 ): [ 75, 'H2(16O)2', 0.994952, 34.00548, 'H2O2' ],
( 26, 1 ): [ 76, '(12C)2H2', 0.9776, 26.01565, 'C2H2' ],
( 26, 2 ): [ 77, '(12C)(13C)H2', 0.02197, 27.019005, 'C2H2' ],
( 26, 3 ): [ 105, '(12C)2HD', 0.00030455, 27.021825, 'C2H2' ],
( 27, 1 ): [ 78, '(12C)2H6', 0.97699, 30.04695, 'C2H6' ],
( 27, 2 ): [ 106, '(12C)H3(13C)H3', 0.021952611, 31.050305, 'C2H6' ],
( 28, 1 ): [ 79, '(31P)H3', 0.99953283, 33.997238, 'PH3' ],
( 29, 1 ): [ 80, '(12C)(16O)(19F)2', 0.98654, 65.991722, 'COF2' ],
( 29, 2 ): [ 119, '(13C)(16O)(19F)2', 0.0110834, 66.995083, 'COF2' ],
( 30, 1 ): [ 126, '(32S)(19F)6', 0.950180, 145.962492, 'SF6' ],
( 31, 1 ): [ 81, 'H2(32S)', 0.94988, 33.987721, 'H2S' ],
( 31, 2 ): [ 82, 'H2(34S)', 0.04214, 35.983515, 'H2S' ],
( 31, 3 ): [ 83, 'H2(33S)', 0.007498, 34.987105, 'H2S' ],
( 32, 1 ): [ 84, 'H(12C)(16O)(16O)H', 0.983898, 46.00548, 'HCOOH' ],
( 33, 1 ): [ 85, 'H(16O)2', 0.995107, 32.997655, 'HO2' ],
( 34, 1 ): [ 86, '(16O)', 0.997628, 15.994915, 'O' ],
( 36, 1 ): [ 87, '(14N)(16O)+', 0.993974, 29.997989, 'NOp' ],
( 37, 1 ): [ 88, 'H(16O)(79Br)', 0.5056, 95.921076, 'HOBr' ],
( 37, 2 ): [ 89, 'H(16O)(81Br)', 0.4919, 97.919027, 'HOBr' ],
( 38, 1 ): [ 90, '(12C)2H4', 0.9773, 28.0313, 'C2H4' ],
( 38, 2 ): [ 91, '(12C)H2(13C)H2', 0.02196, 29.034655, 'C2H4' ],
( 39, 1 ): [ 92, '(12C)H3(16O)H', 0.98593, 32.026215, 'CH3OH' ],
( 40, 1 ): [ 93, '(12C)H3(79Br)', 0.5013, 93.941811, 'CH3Br' ],
( 40, 2 ): [ 94, '(12C)H3(81Br)', 0.48766, 95.939764, 'CH3Br' ],
( 41, 1 ): [ 95, '(12C)H3(12C)(14N)', 0.97482, 41.026549, 'CH3CN' ],
( 42, 1 ): [ 96, '(12C)(19F)4', 0.9893, 87.993616, 'CF4' ],
( 43, 1 ): [ 116, '(12C)4H2', 0.955998, 50.01565, 'C4H2' ],
( 44, 1 ): [ 109, 'H(12C)3(14N)', 0.9646069, 51.01089903687, 'HC3N' ],
( 45, 1 ): [ 103, 'H2', 0.999688, 2.01565, 'H2' ],
( 45, 2 ): [ 115, 'HD', 0.000311432, 3.021825, 'H2' ],
( 46, 1 ): [ 97, '(12C)(32S)', 0.939624, 43.971036, 'CS' ],
( 46, 2 ): [ 98, '(12C)(34S)', 0.0416817, 45.966787, 'CS' ],
( 46, 3 ): [ 99, '(13C)(32S)', 0.0105565, 44.974368, 'CS' ],
( 46, 4 ): [ 100, '(12C)(33S)', 0.00741668, 44.970399, 'CS' ],
( 47, 1 ): [ 114, '(32S)(16O)3', 0.9423964, 79.95682, 'SO3' ],
( 48, 1 ): [ 123, '(12C)2(14N)2', 0.970752433, 52.006148, 'C2N2' ],
( 49, 1 ): [ 124, '(12C)(16O)(35Cl)2', 0.566391761, 97.9326199796, 'COCl2' ],
( 49, 2 ): [ 125, '(12C)(16O)(35Cl)(37Cl)', 0.362235278, 99.9296698896, 'COCl2' ],
#( 1001, 1 ): [ 101, 'H', None, None, 'H' ],
#( 1002, 1 ): [ 102, 'He', None, None, 'He' ],
#( 1018, 1 ): [ 104, 'Ar', None, None, 'Ar' ],
}
def print_iso():
print('The dictionary \"ISO\" contains information on isotopologues in HITRAN\n')
print(' M I id iso_name abundance mass mol_name')
for i in ISO:
ab = ISO[i][ISO_INDEX['abundance']]
ma = ISO[i][ISO_INDEX['mass']]
ab = ab if ab else -1
ma = ma if ma else -1
print('%4i %4i : %5i %25s %10f %10f %15s' % (i[0],i[1],ISO[i][ISO_INDEX['id']],ISO[i][ISO_INDEX['iso_name']],ab,ma,ISO[i][ISO_INDEX['mol_name']]))
def print_iso_id():
print('The dictionary \"ISO_ID\" contains information on \"global\" IDs of isotopologues in HITRAN\n')
print(' id M I iso_name abundance mass mol_name')
for i in ISO_ID:
ab = ISO_ID[i][ISO_ID_INDEX['abundance']]
ma = ISO_ID[i][ISO_ID_INDEX['mass']]
ab = ab if ab else -1
ma = ma if ma else -1
print('%5i : %4i %4i %25s %15.10f %10f %15s' % (i,ISO_ID[i][ISO_ID_INDEX['M']],ISO_ID[i][ISO_ID_INDEX['I']],ISO_ID[i][ISO_ID_INDEX['iso_name']],ab,ma,ISO_ID[i][ISO_ID_INDEX['mol_name']]))
profiles = 'profiles'
def print_profiles():
print('Profiles available:')
print(' HT : PROFILE_HT')
print(' SDRautian : PROFILE_SDRAUTIAN')
print(' Rautian : PROFILE_RAUTIAN')
print(' SDVoigt : PROFILE_SDVOIGT')
print(' Voigt : PROFILE_VOIGT')
print(' Lorentz : PROFILE_LORENTZ')
print(' Doppler : PROFILE_DOPPLER')
slit_functions = 'slit_functions'
def print_slit_functions():
print(' RECTANGULAR : SLIT_RECTANGULAR')
print(' TRIANGULAR : SLIT_TRIANGULAR')
print(' GAUSSIAN : SLIT_GAUSSIAN')
print(' DIFFRACTION : SLIT_DIFFRACTION')
print(' MICHELSON : SLIT_MICHELSON')
print(' DISPERSION/LORENTZ : SLIT_DISPERSION')
tutorial='tutorial'
units='units'
index='index'
data='data'
spectra='spectra'
plotting='plotting'
python='python'
python_tutorial_text = \
"""
THIS TUTORIAL IS TAKEN FROM http://www.stavros.io/tutorials/python/
AUTHOR: Stavros Korokithakis
----- LEARN PYTHON IN 10 MINUTES -----
PRELIMINARY STUFF
So, you want to learn the Python programming language but can't find a concise
and yet full-featured tutorial. This tutorial will attempt to teach you Python in 10 minutes.
It's probably not so much a tutorial as it is a cross between a tutorial and a cheatsheet,
so it will just show you some basic concepts to start you off. Obviously, if you want to
really learn a language you need to program in it for a while. I will assume that you are
already familiar with programming and will, therefore, skip most of the non-language-specific stuff.
The important keywords will be highlighted so you can easily spot them. Also, pay attention because,
due to the terseness of this tutorial, some things will be introduced directly in code and only
briefly commented on.
PROPERTIES
Python is strongly typed (i.e. types are enforced), dynamically, implicitly typed (i.e. you don't
have to declare variables), case sensitive (i.e. var and VAR are two different variables) and
object-oriented (i.e. everything is an object).
GETTING HELP
Help in Python is always available right in the interpreter. If you want to know how an object works,
all you have to do is call help(