diff --git a/.flake8 b/.flake8
deleted file mode 100644
index 54d60194d..000000000
--- a/.flake8
+++ /dev/null
@@ -1,4 +0,0 @@
-[flake8]
-max-line-length = 85
-ignore = E203, E241, E701, W503
-exclude = flycheck*
\ No newline at end of file
diff --git a/.github/workflows/build_docs.yaml b/.github/workflows/build_docs.yaml
new file mode 100644
index 000000000..4e3d756a2
--- /dev/null
+++ b/.github/workflows/build_docs.yaml
@@ -0,0 +1,31 @@
+name: Build and deploy documentation
+on:
+ push:
+ branches:
+ - docs
+jobs:
+ build-docs:
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [ubuntu-latest]
+ python-version: ['3.10']
+ steps:
+ - uses: actions/checkout@v3
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v3
+ with:
+ python-version: ${{ matrix.python-version }}
+ - name: Install dependencies
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y pandoc
+ python -m pip install -e .[development]
+ - name: Build docs
+ run: cd doc && make html
+ - name: Deploy to GitHub Pages
+ uses: peaceiris/actions-gh-pages@v3
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ publish_dir: ./doc/build/html
diff --git a/.github/workflows/docs_check.yaml b/.github/workflows/docs_check.yaml
new file mode 100644
index 000000000..9e150908a
--- /dev/null
+++ b/.github/workflows/docs_check.yaml
@@ -0,0 +1,32 @@
+name: Check for Sphinx Warnings
+
+on:
+ pull_request:
+ paths:
+ - "doc/**"
+ - "**/*.rst"
+ - ".github/workflows/docs_check.yaml"
+ - "setup.py"
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out the repository
+ uses: actions/checkout@v3
+
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.x'
+
+ - name: Install dependencies
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y pandoc
+ python -m pip install -e .[development]
+
+ - name: Check for Sphinx warnings
+ run: |
+ sphinx-build -M html ./doc/source ./doc/_build --fail-on-warning
diff --git a/.github/workflows/format_check.yml b/.github/workflows/format_check.yml
new file mode 100644
index 000000000..294f1e458
--- /dev/null
+++ b/.github/workflows/format_check.yml
@@ -0,0 +1,11 @@
+name: Ruff format
+on: [push, pull_request]
+jobs:
+ ruff:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: chartboost/ruff-action@v1
+ with:
+ args: 'format --check'
+ version: 0.7.0
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
new file mode 100644
index 000000000..2546c0edb
--- /dev/null
+++ b/.github/workflows/lint.yml
@@ -0,0 +1,10 @@
+name: Ruff check
+on: [push, pull_request]
+jobs:
+ ruff:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: chartboost/ruff-action@v1
+ with:
+ version: 0.7.0
diff --git a/.github/workflows/spell_check.yml b/.github/workflows/spell_check.yml
new file mode 100644
index 000000000..c894573a1
--- /dev/null
+++ b/.github/workflows/spell_check.yml
@@ -0,0 +1,15 @@
+name: Spell Check
+
+on: [push, pull_request]
+
+jobs:
+ spell-check:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v2
+
+ - name: Run codespell
+ uses: codespell-project/actions-codespell@v2
+
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index 25bd3f43a..fbff69080 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -10,7 +10,7 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
- python-version: ['3.8', '3.9', '3.10']
+ python-version: ['3.9', '3.10', '3.11','3.12']
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
diff --git a/.gitignore b/.gitignore
index 3e390d12f..400bdc0de 100644
--- a/.gitignore
+++ b/.gitignore
@@ -37,3 +37,8 @@ __pycache__
.cov2emacs.log
flycheck*.py
/notes/
+/.ropeproject/
+
+/doc_src/
+/doc/build/
+/doc/source/api_reference/_autosummary
diff --git a/.pylintrc b/.pylintrc
deleted file mode 100644
index 26b7a9985..000000000
--- a/.pylintrc
+++ /dev/null
@@ -1,572 +0,0 @@
-[MAIN]
-
-# Python code to execute, usually for sys.path manipulation such as
-# pygtk.require().
-init-hook='import sys; sys.path.append("."); sys.path.append("../"); sys.path.append("../../")'
-
-# Files or directories to be skipped. They should be base names, not
-# paths.
-ignore=flycheck_*
-
-# Add files or directories matching the regex patterns to the ignore-list. The
-# regex matches against paths and can be in Posix or Windows format.
-ignore-paths=
-
-# Files or directories matching the regex patterns are skipped. The regex
-# matches against base names, not paths.
-ignore-patterns=^\.#
-
-# Pickle collected data for later comparisons.
-persistent=yes
-
-# List of plugins (as comma separated values of python modules names) to load,
-# usually to register additional checkers.
-load-plugins=
- pylint.extensions.check_elif,
- pylint.extensions.bad_builtin,
- pylint.extensions.for_any_all,
- pylint.extensions.set_membership,
- pylint.extensions.code_style,
- pylint.extensions.overlapping_exceptions,
- pylint.extensions.typing,
- pylint.extensions.redefined_variable_type,
- pylint.extensions.comparison_placement,
- pylint.extensions.docparams
-
-# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
-# number of processors available to use.
-jobs=0
-
-# When enabled, pylint would attempt to guess common misconfiguration and emit
-# user-friendly hints instead of false-positive error messages.
-suggestion-mode=yes
-
-# Allow loading of arbitrary C extensions. Extensions are imported into the
-# active Python interpreter and may run arbitrary code.
-unsafe-load-any-extension=no
-
-# A comma-separated list of package or module names from where C extensions may
-# be loaded. Extensions are loading into the active Python interpreter and may
-# run arbitrary code
-extension-pkg-allow-list=
-
-# Minimum supported python version
-py-version = 3.7.2
-
-# Control the amount of potential inferred values when inferring a single
-# object. This can help the performance when dealing with large functions or
-# complex, nested conditions.
-limit-inference-results=100
-
-# Specify a score threshold under which the program will exit with error.
-fail-under=10.0
-
-# Return non-zero exit code if any of these messages/categories are detected,
-# even if score is above --fail-under value. Syntax same as enable. Messages
-# specified are enabled, while categories only check already-enabled messages.
-fail-on=
-
-
-[MESSAGES CONTROL]
-
-# Only show warnings with the listed confidence levels. Leave empty to show
-# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
-# confidence=
-
-# Enable the message, report, category or checker with the given id(s). You can
-# either give multiple identifier separated by comma (,) or put this option
-# multiple time (only on the command line, not in the configuration file where
-# it should appear only once). See also the "--disable" option for examples.
-enable=
- use-symbolic-message-instead,
- useless-suppression,
-
-# Disable the message, report, category or checker with the given id(s). You
-# can either give multiple identifiers separated by comma (,) or put this
-# option multiple times (only on the command line, not in the configuration
-# file where it should appear only once).You can also use "--disable=all" to
-# disable everything first and then re-enable specific checks. For example, if
-# you want to run only the similarities checker, you can use "--disable=all
-# --enable=similarities". If you want to run only the classes checker, but have
-# no Warning level messages displayed, use"--disable=all --enable=classes
-# --disable=W"
-
-disable=
- attribute-defined-outside-init,
- invalid-name,
- missing-param-doc,
- missing-type-doc,
- protected-access,
- too-few-public-methods,
- # handled by black
- format,
- # We anticipate #3512 where it will become optional
- fixme,
-
-
-[REPORTS]
-
-# Set the output format. Available formats are text, parseable, colorized, msvs
-# (visual studio) and html. You can also give a reporter class, eg
-# mypackage.mymodule.MyReporterClass.
-output-format=text
-
-# Tells whether to display a full report or only the messages
-reports=no
-
-# Python expression which should return a note less than 10 (10 is the highest
-# note). You have access to the variables 'fatal', 'error', 'warning', 'refactor', 'convention'
-# and 'info', which contain the number of messages in each category, as
-# well as 'statement', which is the total number of statements analyzed. This
-# score is used by the global evaluation report (RP0004).
-evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10))
-
-# Template used to display messages. This is a python new-style format string
-# used to format the message information. See doc for all details
-#msg-template=
-
-# Activate the evaluation score.
-score=yes
-
-
-[LOGGING]
-
-# Logging modules to check that the string format arguments are in logging
-# function parameter format
-logging-modules=logging
-
-# The type of string formatting that logging methods do. `old` means using %
-# formatting, `new` is for `{}` formatting.
-logging-format-style=old
-
-
-[MISCELLANEOUS]
-
-# List of note tags to take in consideration, separated by a comma.
-notes=FIXME,XXX,TODO,todo,debug
-
-# Regular expression of note tags to take in consideration.
-#notes-rgx=
-
-
-[SIMILARITIES]
-
-# Minimum lines number of a similarity.
-min-similarity-lines=8
-
-# Ignore comments when computing similarities.
-ignore-comments=yes
-
-# Ignore docstrings when computing similarities.
-ignore-docstrings=yes
-
-# Ignore imports when computing similarities.
-ignore-imports=yes
-
-# Signatures are removed from the similarity computation
-ignore-signatures=yes
-
-
-[VARIABLES]
-
-# Tells whether we should check for unused import in __init__ files.
-init-import=no
-
-# A regular expression matching the name of dummy variables (i.e. expectedly
-# not used).
-dummy-variables-rgx=_$|dummy
-
-# List of additional names supposed to be defined in builtins. Remember that
-# you should avoid defining new builtins when possible.
-additional-builtins=
-
-# List of strings which can identify a callback function by name. A callback
-# name must start or end with one of those strings.
-callbacks=cb_,_cb
-
-# Tells whether unused global variables should be treated as a violation.
-allow-global-unused-variables=yes
-
-# List of names allowed to shadow builtins
-allowed-redefined-builtins=
-
-# Argument names that match this expression will be ignored. Default to name
-# with leading underscore.
-ignored-argument-names=_.*
-
-# List of qualified module names which can have objects that can redefine
-# builtins.
-redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
-
-
-[FORMAT]
-
-# Maximum number of characters on a single line.
-max-line-length=85
-
-# Regexp for a line that is allowed to be longer than the limit.
-ignore-long-lines=^\s*(# )??$
-
-# Allow the body of an if to be on the same line as the test if there is no
-# else.
-single-line-if-stmt=no
-
-# Allow the body of a class to be on the same line as the declaration if body
-# contains single statement.
-single-line-class-stmt=no
-
-# Maximum number of lines in a module
-max-module-lines=2000
-
-# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
-# tab).
-indent-string=' '
-
-# Number of spaces of indent required inside a hanging or continued line.
-indent-after-paren=4
-
-# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
-expected-line-ending-format=
-
-
-[BASIC]
-
-# Good variable names which should always be accepted, separated by a comma
-good-names=i,j,k,ex,Run,_
-
-# Good variable names regexes, separated by a comma. If names match any regex,
-# they will always be accepted
-good-names-rgxs=
-
-# Bad variable names which should always be refused, separated by a comma
-bad-names=foo,bar,baz,toto,tutu,tata
-
-# Bad variable names regexes, separated by a comma. If names match any regex,
-# they will always be refused
-bad-names-rgxs=
-
-# Colon-delimited sets of names that determine each other's naming style when
-# the name regexes allow several styles.
-name-group=
-
-# Include a hint for the correct naming format with invalid-name
-include-naming-hint=no
-
-# Naming style matching correct function names.
-function-naming-style=snake_case
-
-# Regular expression matching correct function names
-function-rgx=[a-z_][a-z0-9_]{2,30}$
-
-# Naming style matching correct variable names.
-variable-naming-style=snake_case
-
-# Regular expression matching correct variable names
-variable-rgx=[a-z_][a-z0-9_]{2,30}$
-
-# Naming style matching correct constant names.
-const-naming-style=UPPER_CASE
-
-# Regular expression matching correct constant names
-const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
-
-# Naming style matching correct attribute names.
-attr-naming-style=snake_case
-
-# Regular expression matching correct attribute names
-attr-rgx=[a-z_][a-z0-9_]{2,}$
-
-# Naming style matching correct argument names.
-argument-naming-style=snake_case
-
-# Regular expression matching correct argument names
-argument-rgx=[a-z_][a-z0-9_]{2,30}$
-
-# Naming style matching correct class attribute names.
-class-attribute-naming-style=any
-
-# Regular expression matching correct class attribute names
-class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
-
-# Naming style matching correct class constant names.
-class-const-naming-style=UPPER_CASE
-
-# Regular expression matching correct class constant names. Overrides class-
-# const-naming-style.
-#class-const-rgx=
-
-# Naming style matching correct inline iteration names.
-inlinevar-naming-style=any
-
-# Regular expression matching correct inline iteration names
-inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
-
-# Naming style matching correct class names.
-class-naming-style=PascalCase
-
-# Regular expression matching correct class names
-class-rgx=[A-Z_][a-zA-Z0-9]+$
-
-
-# Naming style matching correct module names.
-module-naming-style=snake_case
-
-# Regular expression matching correct module names
-module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
-
-
-# Naming style matching correct method names.
-method-naming-style=snake_case
-
-# Regular expression matching correct method names
-method-rgx=[a-z_][a-z0-9_]{2,}$
-
-# Regular expression matching correct type variable names
-#typevar-rgx=
-
-# Regular expression which should only match function or class names that do
-# not require a docstring. Use ^(?!__init__$)_ to also check __init__.
-no-docstring-rgx=__.*__
-
-# Minimum line length for functions/classes that require docstrings, shorter
-# ones are exempt.
-docstring-min-length=-1
-
-# List of decorators that define properties, such as abc.abstractproperty.
-property-classes=abc.abstractproperty
-
-#
-# Docstring parameter documentation:
-# https://pylint.pycqa.org/en/1.7/technical_reference/extensions.html
-#
-
-accept-no-raise-doc = no
-accept-no-param-doc = no
-accept-no-return-doc = no
-accept-no-yields-doc = no
-
-
-[TYPECHECK]
-
-# Regex pattern to define which classes are considered mixins if ignore-mixin-
-# members is set to 'yes'
-mixin-class-rgx=.*MixIn
-
-# List of module names for which member attributes should not be checked
-# (useful for modules/projects where namespaces are manipulated during runtime
-# and thus existing member attributes cannot be deduced by static analysis). It
-# supports qualified module names, as well as Unix pattern matching.
-ignored-modules=
-
-# List of class names for which member attributes should not be checked (useful
-# for classes with dynamically set attributes). This supports the use of
-# qualified names.
-ignored-classes=SQLObject, optparse.Values, thread._local, _thread._local
-
-# List of members which are set dynamically and missed by pylint inference
-# system, and so shouldn't trigger E1101 when accessed. Python regular
-# expressions are accepted.
-generated-members=REQUEST,acl_users,aq_parent,argparse.Namespace
-
-# List of decorators that create context managers from functions, such as
-# contextlib.contextmanager.
-contextmanager-decorators=contextlib.contextmanager
-
-# Tells whether to warn about missing members when the owner of the attribute
-# is inferred to be None.
-ignore-none=yes
-
-# This flag controls whether pylint should warn about no-member and similar
-# checks whenever an opaque object is returned when inferring. The inference
-# can return multiple potential results while evaluating a Python object, but
-# some branches might not be evaluated, which results in partial inference. In
-# that case, it might be useful to still emit no-member and other checks for
-# the rest of the inferred objects.
-ignore-on-opaque-inference=yes
-
-# Show a hint with possible names when a member name was not found. The aspect
-# of finding the hint is based on edit distance.
-missing-member-hint=yes
-
-# The minimum edit distance a name should have in order to be considered a
-# similar match for a missing member name.
-missing-member-hint-distance=1
-
-# The total number of similar names that should be taken in consideration when
-# showing a hint for a missing member.
-missing-member-max-choices=1
-
-[SPELLING]
-
-# Spelling dictionary name. Available dictionaries: none. To make it working
-# install python-enchant package.
-spelling-dict=
-
-# List of comma separated words that should not be checked.
-spelling-ignore-words=
-
-# List of comma separated words that should be considered directives if they
-# appear and the beginning of a comment and should not be checked.
-spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:,pragma:,# noinspection
-
-# A path to a file that contains private dictionary; one word per line.
-spelling-private-dict-file=.pyenchant_pylint_custom_dict.txt
-
-# Tells whether to store unknown words to indicated private dictionary in
-# --spelling-private-dict-file option instead of raising a message.
-spelling-store-unknown-words=no
-
-# Limits count of emitted suggestions for spelling mistakes.
-max-spelling-suggestions=2
-
-
-[DESIGN]
-
-# Maximum number of arguments for function / method
-max-args=10
-
-# Maximum number of locals for function / method body
-max-locals=50
-
-# Maximum number of return / yield for function / method body
-max-returns=11
-
-# Maximum number of branch for function / method body
-max-branches=50
-
-# Maximum number of statements in function / method body
-max-statements=150
-
-# Maximum number of parents for a class (see R0901).
-max-parents=7
-
-# List of qualified class names to ignore when counting class parents (see R0901).
-ignored-parents=
-
-# Maximum number of attributes for a class (see R0902).
-max-attributes=25
-
-# Minimum number of public methods for a class (see R0903).
-min-public-methods=2
-
-# Maximum number of public methods for a class (see R0904).
-max-public-methods=25
-
-# Maximum number of boolean expressions in an if statement (see R0916).
-max-bool-expr=5
-
-# List of regular expressions of class ancestor names to
-# ignore when counting public methods (see R0903).
-exclude-too-few-public-methods=
-
-[CLASSES]
-
-# List of method names used to declare (i.e. assign) instance attributes.
-defining-attr-methods=__init__,__new__,setUp,__post_init__
-
-# List of valid names for the first argument in a class method.
-valid-classmethod-first-arg=cls
-
-# List of valid names for the first argument in a metaclass class method.
-valid-metaclass-classmethod-first-arg=mcs
-
-# List of member names, which should be excluded from the protected access
-# warning.
-exclude-protected=_asdict,_fields,_replace,_source,_make
-
-# Warn about protected attribute access inside special methods
-check-protected-access-in-special-methods=no
-
-[IMPORTS]
-
-# List of modules that can be imported at any level, not just the top level
-# one.
-allow-any-import-level=
-
-# Allow wildcard imports from modules that define __all__.
-allow-wildcard-with-all=no
-
-# Analyse import fallback blocks. This can be used to support both Python 2 and
-# 3 compatible code, which means that the block might have code that exists
-# only in one or another interpreter, leading to false positives when analysed.
-analyse-fallback-blocks=no
-
-# Deprecated modules which should not be used, separated by a comma
-deprecated-modules=regsub,TERMIOS,Bastion,rexec
-
-# Create a graph of every (i.e. internal and external) dependencies in the
-# given file (report RP0402 must not be disabled)
-import-graph=
-
-# Create a graph of external dependencies in the given file (report RP0402 must
-# not be disabled)
-ext-import-graph=
-
-# Create a graph of internal dependencies in the given file (report RP0402 must
-# not be disabled)
-int-import-graph=
-
-# Force import order to recognize a module as part of the standard
-# compatibility libraries.
-known-standard-library=
-
-# Force import order to recognize a module as part of a third party library.
-known-third-party=enchant
-
-# Couples of modules and preferred modules, separated by a comma.
-preferred-modules=
-
-
-[EXCEPTIONS]
-
-# Exceptions that will emit a warning when being caught. Defaults to
-# "Exception"
-overgeneral-exceptions=builtins.Exception
-
-
-[TYPING]
-
-# Set to ``no`` if the app / library does **NOT** need to support runtime
-# introspection of type annotations. If you use type annotations
-# **exclusively** for type checking of an application, you're probably fine.
-# For libraries, evaluate if some users what to access the type hints at
-# runtime first, e.g., through ``typing.get_type_hints``. Applies to Python
-# versions 3.7 - 3.9
-runtime-typing = no
-
-
-[DEPRECATED_BUILTINS]
-
-# List of builtins function names that should not be used, separated by a comma
-bad-functions=map,input
-
-
-[REFACTORING]
-
-# Maximum number of nested blocks for function / method body
-max-nested-blocks=8
-
-# Complete name of functions that never returns. When checking for
-# inconsistent-return-statements if a never returning function is called then
-# it will be considered as an explicit return statement and no message will be
-# printed.
-never-returning-functions=sys.exit,argparse.parse_error
-
-
-[STRING]
-
-# This flag controls whether inconsistent-quotes generates a warning when the
-# character used as a quote delimiter is used inconsistently within a module.
-check-quote-consistency=no
-
-# This flag controls whether the implicit-str-concat should generate a warning
-# on implicit string concatenation in sequences defined over several lines.
-check-str-concat-over-line-jumps=no
-
-
-[CODE_STYLE]
-
-# Max line length for which to sill emit suggestions. Used to prevent optional
-# suggestions which would get split by a code formatter (e.g., black). Will
-# default to the setting for ``max-line-length``.
-#max-line-length-suggestions=
diff --git a/README.md b/README.md
index ed8e6e680..1f2d06ee3 100644
--- a/README.md
+++ b/README.md
@@ -12,8 +12,11 @@
Probabilistic Estimation of Losses, Injuries, and Community resilience Under Natural hazard events
+[![Latest Release](https://img.shields.io/github/v/release/NHERI-SimCenter/pelicun?color=blue&label=Latest%20Release)](https://github.com/NHERI-SimCenter/pelicun/releases/latest)
![Tests](https://github.com/NHERI-SimCenter/pelicun/actions/workflows/tests.yml/badge.svg)
[![codecov](https://codecov.io/github/NHERI-SimCenter/pelicun/branch/master/graph/badge.svg?token=W79M5FGOCG)](https://codecov.io/github/NHERI-SimCenter/pelicun/tree/master)
+[![Ruff](https://img.shields.io/badge/ruff-linted-blue)](https://img.shields.io/badge/ruff-linted-blue)
+[![License](https://img.shields.io/badge/License-BSD%203--Clause-blue)](https://raw.githubusercontent.com/NHERI-SimCenter/pelicun/master/LICENSE)
## What is it?
@@ -93,11 +96,13 @@ flake8 pelicun
# Linting with pylint:
pylint pelicun
+# Type checking with mypy:
+mypy pelicun --no-namespace-packages
+
# Running the tests:
python -m pytest pelicun/tests --cov=pelicun --cov-report html
# Open `htmlcov/index.html`in a browser to see coverage results.
-
```
Feel free to [open an issue](https://github.com/NHERI-SimCenter/pelicun/issues/new/choose) if you encounter problems setting up the provided development environment.
@@ -119,9 +124,9 @@ Feel free to [open an issue](https://github.com/NHERI-SimCenter/pelicun/issues/n
- **Location-specific damage processes**: This new feature is useful when you want damage to a component type to induce damage in another component type at the same location only. For example, damaged water pipes on a specific story can trigger damage in floor covering only on that specific story. Location-matching is performed automatically without you having to define component pairs for every location using the following syntax: `'1_CMP.A-LOC', {'DS1': 'CMP.B_DS1'}` , where DS1 of `CMP.A` at each location triggers DS1 of `CMP.B` at the same location.
- - **New `custom_model_dir` argument for `DL_calculation`**: This argument allows users to prepare custom damage and loss model files in a folder and pass the path to that folder to an auto-population script through `DL_calculation`. Within the auto-population script, they can reference only the name of the files in that folder. This provides portability for simulations that use custom models and auto population, such as some of the advanced regional simualtions in [SimCenter's R2D Tool](https://simcenter.designsafe-ci.org/research-tools/r2dtool/).
+ - **New `custom_model_dir` argument for `DL_calculation`**: This argument allows users to prepare custom damage and loss model files in a folder and pass the path to that folder to an auto-population script through `DL_calculation`. Within the auto-population script, they can reference only the name of the files in that folder. This provides portability for simulations that use custom models and auto population, such as some of the advanced regional simulations in [SimCenter's R2D Tool](https://simcenter.designsafe-ci.org/research-tools/r2dtool/).
- - **Extend Hazus EQ auto population sripts to include water networks**: Automatically recognize water network assets and map them to archetypes from the Hazus Earthquake technical manual.
+ - **Extend Hazus EQ auto population scripts to include water networks**: Automatically recognize water network assets and map them to archetypes from the Hazus Earthquake technical manual.
- **Introduce `convert_units` function**: Provide streamlined unit conversion using the pre-defined library of units in Pelicun. Allows you to convert a variable from one unit to another using a single line of simple code, such as
`converted_height = pelicun.base.convert_units(raw_height, unit='m', to_unit='ft')`
@@ -135,7 +140,7 @@ Feel free to [open an issue](https://github.com/NHERI-SimCenter/pelicun/issues/n
- **Automatic code formatting**: Further improve consistency in coding style by using [black](https://black.readthedocs.io/en/stable/) to review and format the code when needed.
- - **Remove `bldg` from variable and class names**: Following the changes mentioned earlier, we dropped `bldg` from lables where the functionality is no longer limited to buildings.
+ - **Remove `bldg` from variable and class names**: Following the changes mentioned earlier, we dropped `bldg` from labels where the functionality is no longer limited to buildings.
- **Introduce `calibrated` attribute for demand model**: This new attribute will allow users to check if a model has already been calibrated to the provided empirical data.
diff --git a/doc/Makefile b/doc/Makefile
new file mode 100644
index 000000000..d0c3cbf10
--- /dev/null
+++ b/doc/Makefile
@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS ?=
+SPHINXBUILD ?= sphinx-build
+SOURCEDIR = source
+BUILDDIR = build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/doc/make.bat b/doc/make.bat
new file mode 100644
index 000000000..dc1312ab0
--- /dev/null
+++ b/doc/make.bat
@@ -0,0 +1,35 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set SOURCEDIR=source
+set BUILDDIR=build
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+ echo.
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+ echo.installed, then set the SPHINXBUILD environment variable to point
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
+ echo.may add the Sphinx directory to PATH.
+ echo.
+ echo.If you don't have Sphinx installed, grab it from
+ echo.https://www.sphinx-doc.org/
+ exit /b 1
+)
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+
+:end
+popd
diff --git a/doc/source/_extensions/latest_citation.py b/doc/source/_extensions/latest_citation.py
new file mode 100644
index 000000000..f2112bb8e
--- /dev/null
+++ b/doc/source/_extensions/latest_citation.py
@@ -0,0 +1,56 @@
+# noqa: INP001, CPY001, D100
+import requests
+from docutils import nodes
+from docutils.parsers.rst import Directive
+
+
+class LatestCitationDirective(Directive): # noqa: D101
+ def run(self): # noqa: ANN201, D102
+ citation_text, bibtex_text = self.get_latest_zenodo_citation()
+
+ # Create nodes for the standard citation and BibTeX
+ citation_node = nodes.paragraph(text=citation_text)
+ bibtex_node = nodes.literal_block(text=bibtex_text, language='bibtex')
+
+ return [citation_node, bibtex_node]
+
+ def get_latest_zenodo_citation(self): # noqa: PLR6301, ANN201, D102
+ url = 'https://zenodo.org/api/records/?q=conceptdoi:10.5281/zenodo.2558557&sort=mostrecent'
+ try:
+ response = requests.get(url) # noqa: S113
+ except requests.exceptions.ConnectionError:
+ return '(No Connection)', ''
+ data = response.json()
+ latest_record = data['hits']['hits'][0]
+ authors = [
+ author['name'] for author in latest_record['metadata']['creators']
+ ]
+ combine_chars = [', '] * (len(authors) - 2) + [', and ']
+ author_str = authors[0]
+ for author, combine_char in zip(authors[1::], combine_chars):
+ author_str += combine_char + author
+ title = latest_record['metadata']['title'].split(': ')[0]
+ version = latest_record['metadata']['version']
+ doi = latest_record['metadata']['doi']
+ year = latest_record['metadata']['publication_date'][:4]
+ month = latest_record['metadata']['publication_date'][5:7] # noqa: F841
+ publisher = 'Zenodo'
+
+ # Standard citation
+ citation_text = f'{author_str} ({year}) {title}. DOI:{doi}'
+
+ # BibTeX citation
+ bibtex_text = f"""@software{{{author_str.replace(" ", "_").replace(",", "").replace("_and_", "_").lower()}_{year}_{doi.split('.')[-1]},
+ author = {{{" and ".join(authors)}}},
+ title = {{{title}}},
+ year = {year},
+ publisher = {{{publisher}}},
+ version = {{{version}}},
+ doi = {{{doi}}},
+}}"""
+
+ return citation_text, bibtex_text
+
+
+def setup(app): # noqa: ANN201, D103, ANN001
+ app.add_directive('latest-citation', LatestCitationDirective)
diff --git a/doc/source/_static/css/custom.css b/doc/source/_static/css/custom.css
new file mode 100644
index 000000000..17f191d76
--- /dev/null
+++ b/doc/source/_static/css/custom.css
@@ -0,0 +1,33 @@
+wy-nav-content {
+ max-width: none;
+}
+
+
+.math {
+ text-align: left;
+}
+
+.eqno {
+ float: right;
+}
+
+
+#div.wy-side-scroll{
+# background:#cb463f;
+#}
+
+div.wy-menu.wy-menu-vertical > .caption {
+ color: #cb463f;
+}
+
+# LIGHT BLUE background:#0099ff
+# BLUE: background:#0B619C
+# ADAM RED: background:#cb463f;
+
+span.caption.caption-text{
+ color: #000000;
+}
+
+td{
+ white-space: normal !important;
+}
diff --git a/doc/source/_static/front_page/api-svgrepo-com.svg b/doc/source/_static/front_page/api-svgrepo-com.svg
new file mode 100644
index 000000000..4323ed226
--- /dev/null
+++ b/doc/source/_static/front_page/api-svgrepo-com.svg
@@ -0,0 +1,150 @@
+
+
+
+
diff --git a/doc/source/_static/front_page/book-svgrepo-com.svg b/doc/source/_static/front_page/book-svgrepo-com.svg
new file mode 100644
index 000000000..7d709ce92
--- /dev/null
+++ b/doc/source/_static/front_page/book-svgrepo-com.svg
@@ -0,0 +1,45 @@
+
+
+
+
diff --git a/doc/source/_static/front_page/programmer-svgrepo-com.svg b/doc/source/_static/front_page/programmer-svgrepo-com.svg
new file mode 100644
index 000000000..92540aed4
--- /dev/null
+++ b/doc/source/_static/front_page/programmer-svgrepo-com.svg
@@ -0,0 +1,85 @@
+
+
+
+
diff --git a/doc/source/_static/front_page/right-arrow-svgrepo-com.svg b/doc/source/_static/front_page/right-arrow-svgrepo-com.svg
new file mode 100644
index 000000000..a0e8e1150
--- /dev/null
+++ b/doc/source/_static/front_page/right-arrow-svgrepo-com.svg
@@ -0,0 +1,41 @@
+
+
+
+
diff --git a/doc/source/_static/pelicun-Logo-grey.png b/doc/source/_static/pelicun-Logo-grey.png
new file mode 100644
index 000000000..eda5c6845
Binary files /dev/null and b/doc/source/_static/pelicun-Logo-grey.png differ
diff --git a/doc/source/_static/pelicun-Logo-white.png b/doc/source/_static/pelicun-Logo-white.png
new file mode 100644
index 000000000..a799a6557
Binary files /dev/null and b/doc/source/_static/pelicun-Logo-white.png differ
diff --git a/doc/source/_static/pelicun-Logo.png b/doc/source/_static/pelicun-Logo.png
new file mode 100644
index 000000000..991123331
Binary files /dev/null and b/doc/source/_static/pelicun-Logo.png differ
diff --git a/doc/source/_templates/custom-class-template.rst b/doc/source/_templates/custom-class-template.rst
new file mode 100644
index 000000000..b29757c52
--- /dev/null
+++ b/doc/source/_templates/custom-class-template.rst
@@ -0,0 +1,32 @@
+{{ fullname | escape | underline}}
+
+.. currentmodule:: {{ module }}
+
+.. autoclass:: {{ objname }}
+ :members:
+ :show-inheritance:
+ :inherited-members:
+
+ {% block methods %}
+ .. automethod:: __init__
+
+ {% if methods %}
+ .. rubric:: {{ _('Methods') }}
+
+ .. autosummary::
+ {% for item in methods %}
+ ~{{ name }}.{{ item }}
+ {%- endfor %}
+ {% endif %}
+ {% endblock %}
+
+ {% block attributes %}
+ {% if attributes %}
+ .. rubric:: {{ _('Attributes') }}
+
+ .. autosummary::
+ {% for item in attributes %}
+ ~{{ name }}.{{ item }}
+ {%- endfor %}
+ {% endif %}
+ {% endblock %}
diff --git a/doc/source/_templates/custom-module-template.rst b/doc/source/_templates/custom-module-template.rst
new file mode 100644
index 000000000..dc5355649
--- /dev/null
+++ b/doc/source/_templates/custom-module-template.rst
@@ -0,0 +1,63 @@
+{{ fullname | escape | underline}}
+
+.. automodule:: {{ fullname }}
+ :members:
+
+ {% block attributes %}
+ {% if attributes %}
+ .. rubric:: {{ _('Module Attributes') }}
+
+ .. autosummary::
+ {% for item in attributes %}
+ {{ item }}
+ {%- endfor %}
+ {% endif %}
+ {% endblock %}
+
+ {% block functions %}
+ {% if functions %}
+ .. rubric:: {{ _('Functions') }}
+
+ .. autosummary::
+ {% for item in functions %}
+ {{ item }}
+ {%- endfor %}
+ {% endif %}
+ {% endblock %}
+
+ {% block classes %}
+ {% if classes %}
+ .. rubric:: {{ _('Classes') }}
+
+ .. autosummary::
+ :template: custom-class-template.rst
+ {% for item in classes %}
+ {{ item }}
+ {%- endfor %}
+ {% endif %}
+ {% endblock %}
+
+ {% block exceptions %}
+ {% if exceptions %}
+ .. rubric:: {{ _('Exceptions') }}
+
+ .. autosummary::
+ {% for item in exceptions %}
+ {{ item }}
+ {%- endfor %}
+ {% endif %}
+ {% endblock %}
+
+{% block modules %}
+{% if modules %}
+.. rubric:: Modules
+
+.. autosummary::
+ :toctree:
+ :template: custom-module-template.rst
+ :recursive:
+{% for item in modules %}
+ {{ item }}
+{%- endfor %}
+{% endif %}
+{% endblock %}
diff --git a/doc/source/about/LICENSE b/doc/source/about/LICENSE
new file mode 100644
index 000000000..0c0b603fa
--- /dev/null
+++ b/doc/source/about/LICENSE
@@ -0,0 +1,32 @@
+This source code is licensed under a BSD 3-Clause License.
+
+Copyright (c) 2018 Leland Stanford Junior University
+Copyright (c) 2018 The Regents of the University of California
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors
+may be used to endorse or promote products derived from this software without
+specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/doc/source/about/acknowledgments.rst b/doc/source/about/acknowledgments.rst
new file mode 100644
index 000000000..20cf944c8
--- /dev/null
+++ b/doc/source/about/acknowledgments.rst
@@ -0,0 +1,39 @@
+.. _acknowledgments:
+
+***************
+Acknowledgments
+***************
+
+---------------------------
+National Science Foundation
+---------------------------
+
+This material is based upon work supported by the National Science Foundation under Grant No. 1612843. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of the National Science Foundation.
+
+------------
+Contributors
+------------
+
+The developers are grateful to the researchers and experts listed below for their insights and suggestions that contributed to the development of pelicun.
+
+**Jack W. Baker** | Stanford University
+
+**Tracy Becker** | University of California Berkeley
+
+**Gregory G. Deierlein** | Stanford University
+
+**Anne Kiremidjian** | Stanford University
+
+**Pouria Kourehpaz** | University of British Columbia
+
+**Carlos Molina Hutt** | University of British Columbia
+
+**Vesna Terzic** | California State University Long Beach
+
+**Paola Vargas** | University of Michigan
+
+**David P. Welch** | Stanford University
+
+**Major Zeng** | Stanford University
+
+**Joanna J. Zou** | Stanford University
diff --git a/doc/source/about/cite.rst b/doc/source/about/cite.rst
new file mode 100644
index 000000000..9301d3a4c
--- /dev/null
+++ b/doc/source/about/cite.rst
@@ -0,0 +1,36 @@
+.. _cite:
+
+Citing pelicun
+--------------
+
+When referencing pelicun in your research or publications, please use the following citation.
+Proper citation is crucial for acknowledging the efforts of the development team and ensuring the reproducibility of your work.
+
+.. card:: Latest pelicun citation
+ :link: https://zenodo.org/doi/10.5281/zenodo.2558557
+
+ .. latest-citation::
+
+
+Logo
+----
+
+.. image:: ../_static/pelicun-Logo-white.png
+ :alt: NHERI-SimCenter pelicun logo
+ :align: center
+
+The pelicun logo is a trademark of NHERI-SimCenter and is protected under applicable trademark laws.
+You are permitted to use the pelicun logo under the following conditions:
+
+1. **Non-Commercial Use**: The logo may be used for non-commercial purposes, including academic publications, presentations, and educational materials, provided that such use is directly related to the pelicun software.
+
+2. **Integrity of the Logo**: The logo must not be altered, modified, or distorted in any way.
+ This includes changes to the logo's proportions, colors, and text, except for resizing that maintains the original aspect ratio.
+
+3. **Attribution**: Any use of the logo must be accompanied by an attribution to "NHERI-SimCenter" as the owner of the pelicun logo.
+
+4. **Prohibited Uses**: The logo must not be used in any manner that suggests endorsement or sponsorship by NHERI-SimCenter of any third-party products, services, or organizations, unless explicit permission has been granted.
+
+5. **Legal Compliance**: The use of the pelicun logo must comply with all applicable laws and regulations, including those related to trademark and intellectual property rights.
+
+For any uses not covered by the above terms, or to seek permission for commercial use, please contact NHERI-SimCenter directly.
diff --git a/doc/source/about/license.rst b/doc/source/about/license.rst
new file mode 100644
index 000000000..9be2f6a7b
--- /dev/null
+++ b/doc/source/about/license.rst
@@ -0,0 +1,9 @@
+.. _license:
+
+Copyright and license
+---------------------
+
+Pelicun is copyright "Leland Stanford Junior University and The Regents of the University of California," and is licensed under the following BSD license:
+
+.. literalinclude:: LICENSE
+ :language: none
diff --git a/doc/source/api_reference/index.rst b/doc/source/api_reference/index.rst
new file mode 100644
index 000000000..0d1953c1d
--- /dev/null
+++ b/doc/source/api_reference/index.rst
@@ -0,0 +1,16 @@
+..
+ Courtesy of James Leedham
+ https://stackoverflow.com/questions/2701998/automatically-document-all-modules-recursively-with-sphinx-autodoc
+
+.. _api_reference:
+
+=============
+API Reference
+=============
+
+.. autosummary::
+ :toctree: _autosummary
+ :template: custom-module-template.rst
+ :recursive:
+
+ pelicun
diff --git a/doc/source/conf.py b/doc/source/conf.py
new file mode 100644
index 000000000..af63b6108
--- /dev/null
+++ b/doc/source/conf.py
@@ -0,0 +1,107 @@
+# noqa: INP001, CPY001
+"""Pelicun Sphinx configuration."""
+
+# Configuration file for the Sphinx documentation builder.
+#
+# This file only contains a selection of the most common options. For a full
+# list see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Path setup --------------------------------------------------------------
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+import os
+import sys
+from datetime import datetime
+from pathlib import Path
+
+sys.path.insert(0, str(Path('./_extensions').resolve()))
+
+# -- Project information -----------------------------------------------------
+project = 'pelicun'
+copyright = ( # noqa: A001
+ f'{datetime.now().year}, Leland Stanford Junior ' # noqa: DTZ005
+ f'University and The Regents of the University of California'
+)
+author = 'Adam Zsarnóczay'
+
+
+# -- General configuration ---------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'numpydoc',
+ 'sphinx_design',
+ 'nbsphinx',
+ 'sphinxcontrib.bibtex',
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.intersphinx',
+ 'sphinx.ext.mathjax',
+ 'sphinx.ext.viewcode',
+ 'sphinx.ext.githubpages',
+ 'sphinx.ext.autosummary',
+ 'sphinx.ext.intersphinx',
+ 'sphinx.ext.doctest',
+ # our own extension to get latest citation from zenodo.
+ 'latest_citation',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+html_css_files = ['css/custom.css']
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This pattern also affects html_static_path and html_extra_path.
+exclude_patterns = ['_build', '**/tests/*']
+
+# Extension configuration
+
+autosummary_generate = True # Turn on sphinx.ext.autosummary
+
+intersphinx_mapping = {
+ 'python': ('https://docs.python.org/3/', None),
+ 'numpy': ('http://docs.scipy.org/doc/numpy/', None),
+ 'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
+}
+
+numpydoc_show_class_members = False # TODO(JVM): remove and extend docstrings
+
+nbsphinx_custom_formats = {
+ '.pct.py': ['jupytext.reads', {'fmt': 'py:percent'}],
+}
+
+# -- Options for HTML output -------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'sphinx_rtd_theme'
+
+html_logo = '_static/pelicun-Logo-grey.png'
+html_theme_options = {
+ 'analytics_id': 'UA-158130480-7',
+ 'logo_only': True,
+ 'collapse_navigation': False,
+ 'prev_next_buttons_location': None,
+ 'navigation_depth': 2,
+ 'style_nav_header_background': '#F2F2F2',
+}
+html_show_sphinx = False # Removes "Built with Sphinx using a theme [...]"
+html_show_sourcelink = (
+ False # Remove 'view source code' from top of page (for html, not python)
+)
+numfig = True
+bibtex_bibfiles = ['references.bib']
+bibtex_style = 'plain'
diff --git a/doc/source/developer_guide/code_quality.rst b/doc/source/developer_guide/code_quality.rst
new file mode 100644
index 000000000..1c02addbf
--- /dev/null
+++ b/doc/source/developer_guide/code_quality.rst
@@ -0,0 +1,181 @@
+.. _code_quality:
+
+=================
+ Coding practice
+=================
+
+Code quality assurance
+======================
+
+We use `Ruff `_, `mypy `_ and `Codespell `_ to maintain a high level of quality of the pelicun code.
+Our objective is to always use the latest mature coding practice recommendations emerging from the Python community to ease adaptation to changes in its dependencies or even Python itself.
+
+We use the `numpy docstring style `_, and include comments in the code explaining the ideas behind various operations.
+We are making an effort to use unambiguous variable and class/method/function names.
+Especially for newer code, we are mindful of the complexity of methods/functions and break them down when they start to become too large, by extracting appropriate groups of lines and turning them into appropriately named hidden methods/functions.
+
+All code checking tools should be available when :ref:`installing pelicun under a developer setup `.
+The most straight-forward way to run those tools is via the command-line.
+All of the following commands are assumed to be executed from the package root directory (the one containing ``pyproject.toml``).
+
+Linting and formatting with Ruff
+--------------------------------
+
+.. code:: bash
+
+ ruff check # Lint all files in the current directory.
+ ruff format # Format all files in the current directory.
+
+Ruff can automatically fix certain warnings when it is safe to do so. See also `Fixes `_.
+
+.. code::
+
+ ruff check --fix
+
+Warnings can also be automatically suppressed by adding #noqa directives. See `here `_.
+
+.. code:: bash
+
+ ruff check --add-noqa
+
+Editor integration
+..................
+
+Like most code checkers, Ruff can be integrated with several editors to enable on-the-fly checks and auto-formatting.
+Under `Editor Integration `_, their documentation describes the steps to enable this for VS Code, Neovim, Vim, Helix, Kate, Sublime Text, PyCharm, Emacs, TextMate, and Zed.
+
+Type checking with mypy
+-----------------------
+
+Pelicun code is type hinted.
+We use ``mypy`` for type checking.
+Use the following command to type-check the code:
+
+.. code:: bash
+
+ mypy pelicun --no-namespace-packages
+
+Type checking warnings can be silenced by adding ``#type: ignore`` at the lines that trigger them.
+Please avoid silencing warnings in newly added code.
+
+Spell checking with Codespell
+-----------------------------
+
+Codespell is a Python package used to check for common spelling mistakes in text files, particularly source code. It is available on PyPI, configured via pyproject.toml, and is executed as follows:
+
+.. code:: bash
+
+ codespell .
+
+False positives can be placed in a dedicated file (we currently call it ``ignore_words.txt``) to be ignored.
+Please avoid using variable names that trigger codespell.
+This is easy when variable names are long and explicit.
+E.g., ``response_spectrum_target`` instead of ``resptr``.
+
+Unit tests
+==========
+
+We use `pytest `_ to write unit tests for pelicun.
+The tests can be executed with the following command.
+
+.. code:: bash
+
+ python -m pytest pelicun/tests --cov=pelicun --cov-report html
+
+When the test runs finish, visit ``htmlcov/index.html`` for a comprehensive view of code coverage.
+
+The tests can be debugged like any other Python code, by inserting ``breakpoint()`` at any line and executing the line above.
+When the breakpoint is reached you will gain access to ``PDB``, the Python Debugger.
+
+Please extend the test suite whenever you introduce new pelicun code, and update it if needed when making changes to the existing code.
+Avoid opening pull requests with changes that reduce code coverage by not writing tests for your code.
+
+Documentation
+=============
+
+We use `sphinx `_ with the `Read the Docs theme `_ to generate our documentation pages.
+
+We use the following extensions:
+
+- `nbsphinx `_ to integrate jupyter notebooks into the documentation, particularly for the pelicun examples.
+ In the source code they are stored as python files with special syntax defining individual cells, and we use `jupytext `_ to automatically turn them into notebooks when the documentation is compiled (see ``nbsphinx_custom_formats`` in ``conf.py``).
+
+- `Sphinx design `_ for cards and drop-downs.
+
+- `sphinx.ext.mathjax `_ for math support.
+
+- `sphinx.ext.doctest `_ to actively test examples included in docstrings.
+
+- `numpydoc `_ and `autodoc `_ to generate the API documentation from the docstrings in the source code.
+
+- `sphinx.ext.autosummary `_ for the API docs.
+
+- `sphinx.ext.viewcode `_ to add links that point to the source code in the API docs.
+
+- `sphinx.ext.intersphinx `_ to link to other projects' documentation.
+
+- `sphinx.ext.githubpages `_ for publishing in GitHub pages.
+
+
+Building the documentation
+--------------------------
+
+To build the documentation, navigate to `doc` and run the following command:
+
+.. tab-set::
+
+ .. tab-item:: Linux & Mac
+
+ .. code:: bash
+
+ make html
+
+ .. tab-item:: Windows
+
+ .. code:: bash
+
+ .\make.bat html
+
+To see more options:
+
+.. tab-set::
+
+ .. tab-item:: Linux & Mac
+
+ .. code:: bash
+
+ make
+
+ .. tab-item:: Windows
+
+ .. code:: bash
+
+ .\make.bat
+
+
+Extending the documentation
+---------------------------
+
+Extending the documentation can be done in several ways:
+
+- By adding content to ``.rst`` files or adding more such files.
+ See the structure of the ``doc/source`` directory and look at the ``index.rst`` files to gain familiarity with the structure of the documentation.
+ When a page is added it will need to be included in a ``toctree`` directive in order for it to be registered and have a way of being accessed.
+- By adding or modifying example notebooks under ``doc/examples/notebooks``.
+ When a new notebook is added, it needs to be included in ``doc/examples/index.rst``.
+ Please review that index file and how other notebooks are listed to become familiar with our approach.
+
+After making a change you can simply rebuild the documentation with the command above.
+Once the documentation pages are built, please verify no Sphinx warnings are reported.
+A warning count is shown after "build succeeded", close to the last output line:
+
+.. code-block:: none
+ :emphasize-lines: 3
+
+ [...]
+ dumping object inventory... done
+ build succeeded, 1 warning.
+
+ The HTML pages are in build/html.
+
+If there are warnings, please address them before contributing your changes.
diff --git a/doc/source/developer_guide/development_environment.rst b/doc/source/developer_guide/development_environment.rst
new file mode 100644
index 000000000..04752fbff
--- /dev/null
+++ b/doc/source/developer_guide/development_environment.rst
@@ -0,0 +1,15 @@
+.. _development_environment:
+
+Setting up a development environment
+------------------------------------
+
+.. tip::
+
+ We recommend creating a dedicated `virtual environment `_ for your pelicun development environment.
+ See also `conda `_ and `mamba `_, two widely used programs featuring environment management.
+
+Install pelicun in editable mode with the following command issued from the package's root directory::
+
+ python -m pip install -e .[development]
+
+This will install pelicun in editable mode as well as all dependencies needed for development.
diff --git a/doc/source/developer_guide/getting_started.rst b/doc/source/developer_guide/getting_started.rst
new file mode 100644
index 000000000..20e38a087
--- /dev/null
+++ b/doc/source/developer_guide/getting_started.rst
@@ -0,0 +1,224 @@
+Welcome to the pelicun developer guide.
+The following pages contain information on setting up a development environment, our code quality requirements, our code of conduct, and information on the submission process.
+Thank you for your interest in extending pelicun.
+We are looking forward to your contributions!
+
+===============
+Getting started
+===============
+
+Code of conduct
+===============
+
+Our pledge
+----------
+
+We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socioeconomic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community.
+
+Our standards
+-------------
+
+Examples of behavior that contributes to a positive environment for our community include:
+
+- Demonstrating empathy and kindness toward other people.
+- Being respectful of differing opinions, viewpoints, and experiences.
+- Giving and gracefully accepting constructive feedback.
+- Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience.
+- Focusing on what is best not just for us as individuals, but for the overall community.
+
+Examples of unacceptable behavior include:
+
+- The use of sexualized language or imagery, and sexual attention or advances of any kind.
+- Trolling, insulting or derogatory comments, and personal or political attacks.
+- Public or private harassment.
+- Publishing others’ private information, such as a physical or email address, without their explicit permission.
+- Other conduct which could reasonably be considered inappropriate in a professional setting.
+
+Enforcement responsibilities
+----------------------------
+
+Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate.
+
+Scope
+-----
+
+This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official email address, posting via an official social media account, or acting as an appointed representative at an online or offline event.
+
+Enforcement
+-----------
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leader responsible for enforcement at ``adamzs@stanford.edu``.
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the reporter of any incident.
+
+Enforcement guidelines
+----------------------
+
+Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct:
+
+1. Correction
+~~~~~~~~~~~~~
+
+**Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate.
+A public apology may be requested.
+
+2. Warning
+~~~~~~~~~~
+
+**Community Impact**: A violation through a single incident or series of actions.
+
+**Consequence**: A warning with consequences for continued behavior.
+No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time.
+This includes avoiding interactions in community spaces as well as external channels like social media.
+Violating these terms may lead to a temporary or permanent ban.
+
+3. Temporary ban
+~~~~~~~~~~~~~~~~
+
+**Community Impact**: A serious violation of community standards, including sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time.
+No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+4. Permanent Ban
+~~~~~~~~~~~~~~~~
+
+**Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within the community.
+
+Attribution
+-----------
+
+This Code of Conduct is adapted from the `Contributor Covenant `__, version 2.1, available at https://www.contributor-covenant.org/version/2/1/code_of_conduct.html.
+
+Community Impact Guidelines were inspired by `Mozilla’s code of conduct enforcement ladder `__.
+
+For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq.
+Translations are available at https://www.contributor-covenant.org/translations.
+
+.. _contributing:
+
+How to contribute
+=================
+
+Prerequisites
+-------------
+
+Contributing to pelicun requires being familiar with the following:
+
+.. dropdown:: Python Programming
+
+ Being familiar with object-oriented programming in Python, the PDB debugger, and having familiarity with Numpy and Pandas to handle arrays and DataFrames.
+
+ The following resources may be helpful:
+
+ - `python.org tutorial `_
+ - `numpy beginner's guide `_
+ - `numpy user guide `_
+ - `pandas beginner's guide `_
+ - `pandas user guide `_
+
+.. dropdown:: Virtual Environments
+
+ Managing a development environment, installing and removing packages.
+
+ The following resources may be helpful:
+
+ - `Python: Virtual Environments and Packages `_
+
+ - `Conda: Managing environments `_
+ - `Micromamba User Guide `_
+
+.. dropdown:: reStructured Text Markup
+
+ Being able to extend the documentation by reviewing the existing files and following the same pattern, without introducing compilation warnings.
+
+ The following resources may be helpful:
+
+ - `reStructuredText documentation `_
+ - `Sphinx User Guide: Using Sphinx `_
+
+.. dropdown:: Git for Version Control
+
+ Knowing how to clone a repository, create and checkout branches, review commit logs, commit well-documented changes, or stashing them for later use.
+ The following may be useful:
+
+ `git reference manual `_
+
+.. dropdown:: Command Line
+
+ Being able to set up and call command-line programs beyond Git, including the linting and formatting tools we use.
+
+ The following resources may be useful:
+
+ .. tab-set::
+
+ .. tab-item:: Linux & Mac
+
+ `Bash Reference Manual `_
+
+ .. tab-item:: Windows
+
+ `PowerShell Documentation `_
+
+.. dropdown:: Pattern-matching
+
+ Ultimately, we learn by example.
+ The files already present in the pelicun source code offer an existing template that can help you understand what any potential additions should look like.
+ Actively exploring the existing files, tinkering them and breaking things is a great way to gain a deeper understanding of the package.
+
+Contributing workflow
+---------------------
+
+The development of pelicun is done via Pull Requests (PR) on GitHub.
+Contributors need to carry out the following steps to submit a successful PR:
+
+- `Create a GitHub account `_, if you don't already have one.
+- `Fork the primary pelicun repository `_.
+- On the fork, create a feature branch with an appropriate starting point.
+- Make and commit changes to the branch.
+- Push to your remote repository.
+- Open a well-documented PR on the GitHub website.
+
+If you are working on multiple features, please use multiple dedicated feature branches with the same starting point instead of lumping them into a single branch.
+This approach substantially simplifies the review process, and changes on multiple fronts can be easily merged after being reviewed.
+On each feature branch, please commit changes often and include meaningful commit titles and messages.
+
+.. tip::
+
+ Consider taking advantage of advanced Git clients, which enable selective, partial staging of hunks, helping organize commits.
+
+ .. dropdown:: Potential options
+
+ - `Emacs Magit `_, for Emacs users. Tried and true, used by our development team.
+ - `Sublime Merge `_, also used by our development team.
+ - `GitHub Desktop `_, convenient and user-friendly.
+
+Code review process
+-------------------
+
+After you submit your PR, we are going to promptly review your commits, offer feedback and request changes.
+All contributions code need to be comprehensive.
+That is, inclusion of new objects, methods, or functions should be accompanied by unit tests having reasonable coverage, and extension of the documentation pages as appropriate.
+We will direct you to extend your changes to cover those areas if you haven't done so.
+After the review process, the PR will either be merged to the main repository or rejected with sufficient justification.
+
+We will accept any contribution that we believe ultimately improves pelicun, no matter how big or small.
+You are welcome to open a PR even for a single typo.
+
+Identifying contribution opportunities
+--------------------------------------
+
+The `Issues `_ page on GitHub documents areas needing improvement.
+If you are interested in becoming a contributor but don't have a specific change in mind, feel free to work on addressing any of the issues listed.
+If you would like to offer a contribution that extends the fundamental framework, please begin by `initiating a discussion `_ before you work on changes to avoid making unnecessary effort.
diff --git a/doc/source/developer_guide/internals.rst b/doc/source/developer_guide/internals.rst
new file mode 100644
index 000000000..d1c9cb048
--- /dev/null
+++ b/doc/source/developer_guide/internals.rst
@@ -0,0 +1,49 @@
+.. _internals:
+
+Package architecture
+--------------------
+
+Overview of files
+.................
+
++-------------------+---------------------------------------+
+|Path |Description |
++===================+=======================================+
+|``pelicun/`` |Main package source code. |
++-------------------+---------------------------------------+
+|``doc/`` |Documentation source code. |
++-------------------+---------------------------------------+
+|``.github/`` |GitHub-related workflow files. |
++-------------------+---------------------------------------+
+|``pyproject.toml`` |Main configuration file. |
++-------------------+---------------------------------------+
+|``setup.py`` |Package setup file. |
++-------------------+---------------------------------------+
+|``MANIFEST.in`` |Defines files to include when building |
+| |the package. |
++-------------------+---------------------------------------+
+
+.. note::
+
+ We are currently in the process of migrating most configuration files to ``pyproject.toml``.
+
+We use `setuptools `_ to build the package, using ``setup.py`` for configuration.
+In ``setup.py`` we define an entry point called ``pelicun``, directing to the ``main`` method of ``DL_calculation.py``, used to run pelicun from the command line.
+
+The python source code and unit tests are located under ``pelicun/``.
+``assessment.py`` is the primary file defining assessment classes and methods.
+Modules under ``model/`` contain various models used by ``Assessment`` objects.
+Such models handle the representation of asset inventories, as well as demand, damage, and loss samples.
+``base.py`` defines commonly used objects.
+``file_io.py`` defines methods related to reading and writing to files.
+``uq.py`` defines classes and methods used for uncertainty quantification, including random variable objects and registries, and parameter recovery methods used to fit distributions to raw data samples.
+``warnings.py`` defines custom errors and warnings used in pelicun.
+``tools/DL_calculation.py`` enables the invocation of analyses using the command line.
+``settings/`` contains several ``JSON`` files used to define default units, configuration options and input validation.
+``resources/`` contains default damage and loss model parameters.
+
+.. tip::
+
+ For a detailed overview of these files, please see the `API documentation <../api_reference/index.rst>`_ or directly review the source code.
+
+ A direct way to become familiar with an area of the source code you are interested in working with is to debug an applicable example or test and follow through the calculation steps involved, taking notes in the process.
diff --git a/doc/source/examples/index.rst b/doc/source/examples/index.rst
new file mode 100644
index 000000000..062bcb169
--- /dev/null
+++ b/doc/source/examples/index.rst
@@ -0,0 +1,72 @@
+:notoc:
+
+.. _examples:
+
+********
+Examples
+********
+
+Pelicun examples are listed in the following index.
+
+.. attention::
+
+ These documentation pages are brand new and in active development.
+ Increasing the set of examples is a very high priority.
+ Please come back soon!
+
+Complete list
+-------------
+
++-----------+---------------------------------------------------------+
+|Example |Description |
++===========+=========================================================+
+|`E1`_ |This example demonstrates the seismic performance |
+| |assessment of a steel moment frame structure using the |
+| |FEMA P-58 methodology. |
++-----------+---------------------------------------------------------+
+
+Grouped by feature
+------------------
+
+The following sections group the examples above based on the specific features they illustrate, helping you pick the ones most relevant to what you are looking for.
+
+.. dropdown:: Asset type
+
+ +------------------------------------------------------+---------------------------------------------------------+
+ |Feature |Examples |
+ +======================================================+=========================================================+
+ |Building |`E1`_ |
+ +------------------------------------------------------+---------------------------------------------------------+
+
+.. dropdown:: Demand Simulation
+
+ +------------------------------------------------------+---------------------------------------------------------+
+ |Feature |Examples |
+ +======================================================+=========================================================+
+ |:ref:`Model calibration ` |`E1`_ |
+ +------------------------------------------------------+---------------------------------------------------------+
+ |:ref:`RID|PID inference ` |`E1`_ |
+ +------------------------------------------------------+---------------------------------------------------------+
+ |:ref:`Sample expansion ` |`E1`_ |
+ +------------------------------------------------------+---------------------------------------------------------+
+
+
+.. dropdown:: Loss estimation
+
+ +------------------------------------------------------+---------------------------------------------------------+
+ |Feature |Examples |
+ +======================================================+=========================================================+
+ |:ref:`Loss maps ` |`E1`_ |
+ +------------------------------------------------------+---------------------------------------------------------+
+ |:ref:`Active decision variables ` |`E1`_ |
+ +------------------------------------------------------+---------------------------------------------------------+
+ |:ref:`Loss aggregation ` |`E1`_ |
+ +------------------------------------------------------+---------------------------------------------------------+
+
+.. toctree::
+ :maxdepth: 1
+ :hidden:
+
+ notebooks/example_1.pct.py
+
+.. _E1: notebooks/example_1.pct.py
diff --git a/doc/source/examples/notebooks/example_1.pct.py b/doc/source/examples/notebooks/example_1.pct.py
new file mode 100644
index 000000000..ee229a6f7
--- /dev/null
+++ b/doc/source/examples/notebooks/example_1.pct.py
@@ -0,0 +1,1637 @@
+# %% [markdown]
+# # Example 1
+
+# %% [markdown]
+"""
+## Introduction
+
+This example focuses on the seismic performance assessment of a steel
+moment frame structure using the FEMA P-58 method. We look at demand,
+damage, and loss modeling in detail and highlight the inputs required
+by Pelicun, the some of the settings researchers might want to
+experiment with and the outputs provided by such a high-resolution
+calculation.
+
+This example is based on an example notebook for an earlier version of
+pelicun, hosted on
+[DesignSafe](https://www.designsafe-ci.org/data/browser/public/designsafe.storage.published/PRJ-3411v5?version=5).
+
+"""
+
+# %%
+# Imports
+import pprint
+import sys
+from pathlib import Path
+
+import numpy as np
+import pandas as pd
+import plotly.express as px
+from plotly import graph_objects as go
+from plotly.subplots import make_subplots
+
+from pelicun.assessment import Assessment, DLCalculationAssessment
+from pelicun.base import convert_to_MultiIndex
+
+idx = pd.IndexSlice
+pd.options.display.max_rows = 30
+
+# %% [markdown]
+"""
+## Initialize Assessment
+
+When creating a Pelicun Assessment, you can provide a number of
+settings to control the the analysis. The following options are
+currently available:
+
+- **Verbose** If True, provides more detailed messages about the
+ calculations. Default: False.
+
+- **Seed** Providing a seed makes probabilistic calculations
+ reproducible. Default: No seed.
+
+- **PrintLog** If True, prints the messages on the screen as well as
+ in the log file. Default: False.
+
+- **LogFile** Allows printing the log to a specific file under a path
+ provided here as a string. By default, the log is printed to the
+ pelicun_log.txt file.
+
+- **LogShowMS** If True, Pelicun provides more detailed time
+ information by showing times up to microsecond
+ precision. Default: False, meaning times are provided with
+ second precision.
+
+- **SamplingMethod** Three methods are available: {'MonteCarlo',
+ 'LHS', 'LHS_midpoint'}; Default: LHS_midpoint
+ * 'MonteCarlo' stands for conventional random sampling;
+ * 'LHS' is Latin HyperCube Sampling with random sample location
+ within each chosen bin of the hypercube;
+ * 'LHS_midpoint' is like LHS, but the samples are assigned to
+ the midpoints of the hypercube bins.
+
+- **DemandOffset** Expects a dictionary with
+ {demand_type:offset_value} key-value pairs. demand_type could be
+ 'PFA' or 'PIH' for example. The offset values are applied to the
+ location values when Performance Group locations are parsed to
+ demands that control the damage or losses. Default: {'PFA':-1,
+ 'PFV':-1}, meaning floor accelerations and velocities are pulled
+ from the bottom slab associated with the given floor. For
+ example, floor 2 would get accelerations from location 1, which
+ is the first slab above ground.
+
+- **NonDirectionalMultipliers** Expects a dictionary with
+ {demand_type:scale_factor} key-value pairs. demand_type could be
+ 'PFA' or 'PIH' for example; use 'ALL' to define a scale factor
+ for all demands at once. The scale factor considers that for
+ components with non-directional behavior the maximum of demands
+ is typically larger than the ones available in two orthogonal
+ directions. Default: {'ALL': 1.2}, based on FEMA P-58.
+
+- **RepairCostAndTimeCorrelation** Specifies the correlation
+ coefficient between the repair cost and repair time of
+ individual component blocks. Default: 0.0, meaning uncorrelated
+ behavior. Use 1.0 to get perfect correlation or anything between
+ 0-1 to get partial correlation. Values in the -1 - 0 range are
+ also valid to consider negative correlation between cost and
+ time.
+
+- **EconomiesOfScale** Controls how the damages are aggregated when
+ the economies of scale are calculated. Expects the following
+ dictionary: {'AcrossFloors': bool, 'AcrossDamageStates': bool}
+ where bool is either True or False. Default: {'AcrossFloors':
+ True, 'AcrossDamageStates': False}
+
+ * 'AcrossFloors' if True, aggregates damages across floors to get
+ the quantity of damage. If False, it uses damaged quantities and
+ evaluates economies of scale independently for each floor.
+
+ * 'AcrossDamageStates' if True, aggregates damages across damage
+ states to get the quantity of damage. If False, it uses damaged
+ quantities and evaluates economies of scale independently for each
+ damage state.
+
+We use the default values for this analysis and only ask for a seed
+to make the results repeatable and ask to print the log file to show
+outputs within this Jupyter notebook.
+"""
+
+# %%
+# initialize a pelicun Assessment
+assessment = Assessment({'PrintLog': True, 'Seed': 415})
+
+# %% [markdown]
+"""
+## Demands
+
+### Load demand distribution data
+
+Demand distribution data was extracted from the FEMA P-58 background
+documentation referenced in the Introduction. The nonlinear analysis
+results from Figures 1-14 – 1-21 provide the 10th percentile,
+median, and 90th percentile of EDPs in two directions on each floor
+at each intensity level. We fit a lognormal distributions to those
+data and collected the parameters of those distribution in the
+demand_data.csv file.
+
+Note that these results do not match the (non-directional) EDP
+parameters in Table 1-35 – 1-42 in the report, so those must
+have been processed in another way. The corresponding methodology is
+not provided in the report; we are not using the results from those
+tables in this example.
+"""
+
+# %%
+raw_demands = pd.read_csv('example_1/demand_data.csv', index_col=0)
+raw_demands
+
+# %% [markdown]
+"""
+**Pelicun uses SimCenter's naming convention for demands:**
+
+- The first number represents the event_ID. This can be used to
+ differentiate between multiple stripes of an analysis, or multiple
+ consecutive events in a main-shock - aftershock sequence, for
+ example. Currently, Pelicun does not use the first number
+ internally, but we plan to utilize it in the future.
+
+- The type of the demand identifies the EDP or IM. The following
+ options are available:
+ * 'Story Drift Ratio' : 'PID',
+ * 'Peak Interstory Drift Ratio': 'PID',
+ * 'Roof Drift Ratio' : 'PRD',
+ * 'Peak Roof Drift Ratio' : 'PRD',
+ * 'Damageable Wall Drift' : 'DWD',
+ * 'Racking Drift Ratio' : 'RDR',
+ * 'Peak Floor Acceleration' : 'PFA',
+ * 'Peak Floor Velocity' : 'PFV',
+ * 'Peak Gust Wind Speed' : 'PWS',
+ * 'Peak Inundation Height' : 'PIH',
+ * 'Peak Ground Acceleration' : 'PGA',
+ * 'Peak Ground Velocity' : 'PGV',
+ * 'Spectral Acceleration' : 'SA',
+ * 'Spectral Velocity' : 'SV',
+ * 'Spectral Displacement' : 'SD',
+ * 'Peak Spectral Acceleration' : 'SA',
+ * 'Peak Spectral Velocity' : 'SV',
+ * 'Peak Spectral Displacement' : 'SD',
+ * 'Permanent Ground Deformation' : 'PGD',
+ * 'Mega Drift Ratio' : 'PMD',
+ * 'Residual Drift Ratio' : 'RID',
+ * 'Residual Interstory Drift Ratio': 'RID'
+
+- The third part is an integer the defines the location where the
+ demand was recorded. In buildings, locations are typically floors,
+ but in other assets, locations could reference any other part of the
+ structure. Other pelicun examples show how location can also
+ identify individual buildings in a regional analysis.
+
+- The last part is an integer the defines the direction of the
+ demand. Typically 1 stands for horizontal X and 2 for horizontal Y,
+ but any other numbering convention can be used. Direction does not
+ have to be used strictly to identify directions. It can be
+ considered a generic second-level location identifier that
+ differentiates demands and Performance Groups within a location.
+
+The location and direction numbers need to be in line with the
+component definitions presented later.
+
+**MultiIndex and SimpleIndex in Pelicun**:
+
+Pelicun uses a hierarchical indexing for rows and columns to organize
+data efficiently internally. It provides methods to convert simple
+indexes to hierarchical ones (so-called MultiIndex in Python's pandas
+package). These methods require simple indexes follow some basic
+formatting conventions:
+
+- information at different levels is separated by a dash character: '-'
+
+- no dash character is used in the labels themselves
+
+- spaces are allowed, but are not preserved
+
+The index of the DataFrame above shows how the simple index labels
+look like and the DataFrame below shows how they are converted to a
+hierarchical MultiIndex.
+"""
+
+# %%
+# convert index to MultiIndex to make it easier to slice the data
+raw_demands = convert_to_MultiIndex(raw_demands, axis=0)
+raw_demands.index.names = ['stripe', 'type', 'loc', 'dir']
+raw_demands.tail(30)
+
+# %% [markdown]
+"""
+### Prepare demand input for pelicun
+
+Pelicun offers several options to obtain a desired demand sample:
+1. provide the sample directly;
+2. provide a distribution (i.e., marginals, and optional correlation
+ matrix) and sample it;
+3. provide a small set of demand values, fit a distribution, and
+ sample that distribution to a large enough sample for performance
+ assessment.
+
+In this example, we are going to use the demand information from the
+FEMA P-58 background documentation to provide a marginal of each
+demand type and sample it (i.e., option 2 from the list). Then, we
+will extract the sample from pelicun, extend it with additional demand
+types and load it back into Pelicun (i.e., option 1 from the list)
+
+**Scenarios**
+
+Currently, Pelicun performs calculations for one scenario at a
+time. Hence, we need to select the stripe we wish to investigate from
+the eight available stripes that were used in the multi-stripe
+analysis.
+
+**Units**
+
+Pelicun allows users to choose from various units for the all inputs,
+including demands. Internally, Pelicun uses Standard International
+units, but we support typical units used in the United States as
+well. Let us know if a unit you desire to use is not supported - you
+will see an error message in this case - we are happy to extend the
+list of supported units.
+"""
+
+# %%
+# we'll use stripe 3 for this example
+stripe = '3'
+stripe_demands = raw_demands.loc[stripe, :]
+
+# units - - - - - - - - - - - - - - - - - - - - - - - -
+stripe_demands.insert(0, 'Units', '')
+
+# PFA is in "g" in this example, while PID is "rad"
+stripe_demands.loc['PFA', 'Units'] = 'g'
+stripe_demands.loc['PID', 'Units'] = 'rad'
+
+# distribution family - - - - - - - - - - - - - - - - -
+stripe_demands.insert(1, 'Family', '')
+
+# we assume lognormal distribution for all demand marginals
+stripe_demands['Family'] = 'lognormal'
+
+# distribution parameters - - - - - - - - - - - - - - -
+# pelicun uses generic parameter names to handle various distributions within the same data structure
+# we need to rename the parameter columns as follows:
+# median -> theta_0
+# log_std -> theta_1
+stripe_demands = stripe_demands.rename(
+ columns={'median': 'Theta_0', 'log_std': 'Theta_1'}
+)
+
+stripe_demands
+
+# %% [markdown]
+# Let's plot the demand data to perform a sanity check before the
+# analysis
+
+# %%
+fig = make_subplots(
+ rows=1,
+ cols=2,
+ subplot_titles=(
+ 'Peak Interstory Drift ratio ',
+ 'Peak Floor Acceleration ',
+ ),
+ shared_yaxes=True,
+ horizontal_spacing=0.05,
+ vertical_spacing=0.05,
+)
+
+for demand_i, demand_type in enumerate(['PID', 'PFA']):
+ if demand_type == 'PID':
+ offset = -0.5
+ else:
+ offset = 0.0
+
+ for d_i, (dir_, d_color) in enumerate(zip([1, 2], ['blue', 'red'])):
+ result_name = f'{demand_type} dir {dir_}'
+
+ params = stripe_demands.loc[
+ idx[demand_type, :, str(dir_)], ['Theta_0', 'Theta_1']
+ ]
+ params.index = params.index.get_level_values(1).astype(float)
+
+ # plot +- 2 log std
+ for mul, m_dash in zip([1, 2], ['dash', 'dot']):
+ if mul == 1:
+ continue
+
+ for sign in [-1, 1]:
+ fig.add_trace(
+ go.Scatter(
+ x=np.exp(
+ np.log(params['Theta_0'].values)
+ + params['Theta_1'].to_numpy() * sign * mul
+ ),
+ y=params.index + offset,
+ hovertext=result_name + ' median +/- 2logstd',
+ name=result_name + ' median +/- 2logstd',
+ mode='lines+markers',
+ line={'color': d_color, 'dash': m_dash, 'width': 0.5},
+ marker={'size': 4 / mul},
+ showlegend=False,
+ ),
+ row=1,
+ col=demand_i + 1,
+ )
+
+ # plot the medians
+ fig.add_trace(
+ go.Scatter(
+ x=params['Theta_0'].values,
+ y=params.index + offset,
+ hovertext=result_name + ' median',
+ name=result_name + ' median',
+ mode='lines+markers',
+ line={'color': d_color, 'width': 1.0},
+ marker={'size': 8},
+ showlegend=False,
+ ),
+ row=1,
+ col=demand_i + 1,
+ )
+
+ if d_i == 0:
+ shared_ax_props = {
+ 'showgrid': True,
+ 'linecolor': 'black',
+ 'gridwidth': 0.05,
+ 'gridcolor': 'rgb(192,192,192)',
+ }
+
+ if demand_type == 'PID':
+ fig.update_xaxes(
+ title_text='drift ratio',
+ range=[0, 0.05],
+ row=1,
+ col=demand_i + 1,
+ **shared_ax_props,
+ )
+
+ elif demand_type == 'PFA':
+ fig.update_xaxes(
+ title_text='acceleration [g]',
+ range=[0, 1.0],
+ row=1,
+ col=demand_i + 1,
+ **shared_ax_props,
+ )
+
+ if demand_i == 0:
+ fig.update_yaxes(
+ title_text='story',
+ range=[0, 4],
+ row=1,
+ col=demand_i + 1,
+ **shared_ax_props,
+ )
+ else:
+ fig.update_yaxes(
+ range=[0, 4], row=1, col=demand_i + 1, **shared_ax_props
+ )
+
+fig.update_layout(
+ title=f'intensity level {stripe} ~ 475 yr return period',
+ height=500,
+ width=900,
+ plot_bgcolor='white',
+)
+
+fig.show()
+
+# %% [markdown]
+"""
+### Sample the demand distribution
+
+The scripts below load the demand marginal information to Pelicun and
+ask it to generate a sample with the provided number of
+realizations. We do not have correlation information from the
+background documentation, but it is generally better (i.e.,
+conservative from a damage, loss, and risk point of view) to assume
+perfect correlation in such cases than to assume independence. Hence,
+we prepare a correlation matrix that represents perfect correlation
+and feed it to Pelicun with the marginal parameters.
+
+After generating the sample, we extract it and print the first few
+realizations below.
+"""
+
+# %%
+# prepare a correlation matrix that represents perfect correlation
+ndims = stripe_demands.shape[0]
+demand_types = stripe_demands.index
+
+perfect_corr = pd.DataFrame(
+ np.ones((ndims, ndims)), columns=demand_types, index=demand_types
+)
+
+# load the demand model
+assessment.demand.load_model(
+ {'marginals': stripe_demands, 'correlation': perfect_corr}
+)
+
+# %%
+# choose a sample size for the analysis
+sample_size = 10000
+
+# generate demand sample
+assessment.demand.generate_sample({'SampleSize': sample_size})
+
+# extract the generated sample
+
+# Note that calling the save_sample() method is better than directly
+# pulling the sample attribute from the demand object because the
+# save_sample method converts demand units back to the ones you
+# specified when loading in the demands.
+
+demand_sample = assessment.demand.save_sample()
+
+demand_sample.head()
+
+# %% [markdown]
+r"""
+### Extend the sample
+
+The damage and loss models we use later in this example need residual
+drift and spectral acceleration [Sa(T=1.13s)] information for each
+realizations. The residual drifts are used to consider irreparable
+damage to the building; the spectral accelerations are used to
+evaluate the likelihood of collapse.
+
+**Residual drifts**
+
+Residual drifts could come from nonlinear analysis, but they are often
+not available or not robust enough. Pelicun provides a convenience
+method to convert PID to RID and we use that function in this
+example. Currently, the method implements the procedure recommended in
+FEMA P-58, but it is designed to support multiple approaches for
+inferring RID from available demand information.
+
+The FEMA P-58 RID calculation is based on the yield drift ratio. There
+are conflicting data in FEMA P-58 on the yield drift ratio that should
+be applied for this building:
+
+* According to Vol 2 4.7.3, $\Delta_y = 0.0035$ , but this value leads
+ to excessive irreparable drift likelihood that does not match the
+ results in the background documentation.
+
+* According to Vol 1 Table C-2, $\Delta_y = 0.0075$ , which leads to
+ results that are more in line with those in the background
+ documentation.
+
+We use the second option below. Note that we get a different set of
+residual drift estimates for every floor of the building.
+
+**Spectral acceleration**
+
+The Sa(T) can be easily added as a new column to the demand
+sample. Note that Sa values are identical across all realizations
+because we are running the analysis for one stripe that has a
+particular Sa(T) assigned to it. We assign the Sa values to direction
+1 and we will make sure to have the collapse fragility defined as a
+directional component (see Damage/Fragility data) to avoid scaling
+these spectral accelerations with the nondirectional scale factor.
+
+The list below provides Sa values for each stripe from the analysis -
+the values are from the background documentation referenced in the
+Introduction.
+"""
+
+# %%
+# get residual drift estimates
+delta_y = 0.0075
+PID = demand_sample['PID']
+
+RID = assessment.demand.estimate_RID(PID, {'yield_drift': delta_y})
+
+# and join them with the demand_sample
+demand_sample_ext = pd.concat([demand_sample, RID], axis=1)
+
+# add spectral acceleration
+Sa_vals = [0.158, 0.387, 0.615, 0.843, 1.071, 1.299, 1.528, 1.756]
+demand_sample_ext['SA_1.13', 0, 1] = Sa_vals[int(stripe) - 1]
+
+demand_sample_ext.describe().T
+
+# %% [markdown]
+"""
+The plot below illustrates that the relationship between a PID and RID
+variable is not multivariate lognormal. This underlines the importance
+of generating the sample for such additional demand types
+realization-by-realization rather than adding a marginal RID to the
+initial set and asking Pelicun to sample RIDs from a multivariate
+lognormal distribution.
+
+You can use the plot below to display the joint distribution of any
+two demand variables
+"""
+
+# %%
+# plot two demands from the sample
+
+demands = ['PID-1-1', 'RID-1-1']
+
+fig = go.Figure()
+
+demand_file = 'response.csv'
+output_path = 'doc/source/examples/notebooks/example_1/output'
+coupled_edp = True
+realizations = '100'
+auto_script_path = 'PelicunDefault/Hazus_Earthquake_IM.py'
+detailed_results = False
+output_format = None
+custom_model_dir = None
+color_warnings = False
+
+shared_ax_props = {
+ 'showgrid': True,
+ 'linecolor': 'black',
+ 'gridwidth': 0.05,
+ 'gridcolor': 'rgb(192,192,192)',
+ 'type': 'log',
+}
+
+if 'PFA' in demands[0]:
+ fig.update_xaxes(
+ title_text=f'acceleration [g] {demands[0]}',
+ range=np.log10([0.001, 1.5]),
+ **shared_ax_props,
+ )
+
+else:
+ fig.update_xaxes(
+ title_text=f'drift ratio {demands[0]}',
+ range=np.log10([0.001, 0.1]),
+ **shared_ax_props,
+ )
+
+if 'PFA' in demands[1]:
+ fig.update_yaxes(
+ title_text=f'{demands[1]} acceleration [g]',
+ range=np.log10([0.0001, 1.5]),
+ **shared_ax_props,
+ )
+
+else:
+ fig.update_yaxes(
+ title_text=f'{demands[1]} drift ratio',
+ range=np.log10([0.0001, 0.1]),
+ **shared_ax_props,
+ )
+
+
+fig.update_layout(title='demand sample', height=600, width=650, plot_bgcolor='white')
+
+fig.show()
+
+# %% [markdown]
+"""
+### Load Demand Samples
+
+The script below adds unit information to the sample data and loads it
+to Pelicun.
+
+Note that you could skip the first part of this demand calculation and
+prepare a demand sample entirely by yourself. That allows you to
+consider any kind of distributions and any kind of correlation
+structure between the demands. As long as you have the final list of
+realizations formatted according to the conventions explained above,
+you should be able to load it directly to Pelicun.
+"""
+
+# %%
+# add units to the data
+demand_sample_ext.T.insert(0, 'Units', '')
+
+# PFA and SA are in "g" in this example, while PID and RID are "rad"
+demand_sample_ext.loc['Units', ['PFA', 'SA_1.13']] = 'g'
+demand_sample_ext.loc['Units', ['PID', 'RID']] = 'rad'
+
+display(demand_sample_ext)
+
+assessment.demand.load_sample(demand_sample_ext)
+
+# %% [markdown]
+# This concludes the Demand section. The demand sample is ready, we
+# can move on to damage calculation
+
+# %% [markdown]
+"""
+## Damage
+
+Damage simulation requires an asset model, fragility data, and a
+damage process that describes dependencies between damages in the
+system. We will look at each of these in detail below.
+
+### Define asset model
+
+The asset model assigns components to the building and defines where
+they are and how much of each component is at each location.
+
+The asset model can consider uncertainties in the types of components
+assigned and in their quantities. This example does not introduce
+those uncertainties for the sake of brevity, but they are discussed in
+other examples. For this example, the component types and their
+quantities are identical in all realizations.
+
+Given this deterministic approach, we can take advantage of a
+convenience method in Pelicun for defining the asset model. We can
+prepare a table (see the printed data below) where each row identifies
+a component and assigns some quantity of it to a set of locations and
+directions. Such a table can be prepared in Excel or in a text editor
+and saved in a CSV file - like we did in this example, see
+CMP_marginals.csv - or it could be prepared as part of this
+script. Storing these models in a CSV file facilitates sharing the
+basic inputs of an analysis with other researchers.
+
+The tabular information is organized as follows:
+
+* Each row in the table can assign component quantities (Theta_0) to
+ one or more Performance Groups (PG). A PG is a group of components
+ at a given floor (location) and direction that is affected by the
+ same demand (EDP or IM) values.
+
+* The quantity defined under Theta_0 is assigned to each location and
+ direction listed. For example, the first row in the table below
+ assigns 2.0 of B.10.41.001a to the third and fourth floors in
+ directions 1 and 2. That is, it creates 4 Performance Groups, each
+ with 2 of these components in it.
+
+* Zero ("0") is reserved for "Not Applicable" use cases in the
+ location and direction column. As a location, it represents
+ components with a general effect that cannot be linked to a
+ particular floor (e.g., collapse). In directions, it is used to
+ identify non-directional components.
+
+* The index in this example refers to the component ID in FEMA P58,
+ but it can be any arbitrary string that has a corresponding entry in
+ the applied fragility database (see the Fragility data section below
+ for more information).
+
+* Blocks are the number of independent units within a Performance
+ Group. By default (i.e., when the provided value is missing or NaN),
+ each PG is assumed to have one block which means that all of the
+ components assigned to it will have the same behavior. FEMA P-58
+ identifies unit sizes for its components. We used these sizes to
+ determine the number of independent blocks for each PG. See, for
+ example, B.20.22.031 that has a 30 ft2 unit size in FEMA P-58. We
+ used a large number of blocks to capture that each of those curtain
+ wall elements can get damaged independently of the others.
+
+* Component quantities (Theta_0) can be provided in any units
+ compatible with the component type. (e.g., ft2, inch2, m2 are all
+ valid)
+
+* The last three components use custom fragilities that are not part
+ of the component database in FEMA P-58. We use these to consider
+ irreparable damage and collapse probability. We will define the
+ corresponding fragility and consequence functions in later sections
+ of this example.
+
+* The Comment column is not used by Pelicun, any text is acceptable
+ there.
+
+"""
+# %%
+# load the component configuration
+cmp_marginals = pd.read_csv('example_1/CMP_marginals.csv', index_col=0)
+
+display(cmp_marginals.head(15))
+print('...')
+cmp_marginals.tail(10)
+
+# %%
+# to make the convenience keywords work in the model, we need to
+# specify the number of stories
+assessment.stories = 4
+
+# now load the model into Pelicun
+assessment.asset.load_cmp_model({'marginals': cmp_marginals})
+
+# %% [markdown]
+"""
+Note that we could have assigned uncertain component quantities by
+adding a "Family" and "Theta_1", "Theta_2" columns to describe their
+distribution. Additional "TruncateLower" and "TruncateUpper" columns
+allow for bounded component quantity distributions that is especially
+useful when the distribution family is supported below zero values.
+
+Our input in this example describes a deterministic configuration
+resulting in the fairly simple table shown below.
+"""
+
+# %%
+# let's take a look at the generated marginal parameters
+assessment.asset.cmp_marginal_params.loc['B.10.41.002a', :]
+
+# %% [markdown]
+"""
+### Sample asset distribution
+
+In this example, the quantities are identical for every
+realization. We still need to generate a component quantity sample
+because the calculations in Pelicun expect an array of component
+quantity realizations. The sample size for the component quantities is
+automatically inferred from the demand sample. If such a sample is not
+available, you need to provide a sample size as the first argument of
+the generate_cmp_sample method.
+
+The table below shows the statistics for each Performance Group's
+quantities. Notice the zero standard deviation and that the minimum
+and maximum values are identical - this confirms that the quantities
+are deterministic.
+
+We could edit this sample and load the edited version back to Pelicun
+like we did for the Demands earlier.
+"""
+
+# %%
+# Generate the component quantity sample
+assessment.asset.generate_cmp_sample()
+
+# get the component quantity sample - again, use the save function to
+# convert units
+cmp_sample = assessment.asset.save_cmp_sample()
+
+cmp_sample.describe()
+
+# %% [markdown]
+"""
+### Define component fragilities
+
+Pelicun comes with fragility data, including the FEMA P-58 component
+fragility functions. We will start with taking a look at those data
+first.
+
+Pelicun uses the following terminology for fragility data:
+
+- Each Component has a number of pre-defined Limit States (LS) that
+ are triggered when a controlling Demand exceeds the Capacity of the
+ component.
+
+- The type of controlling Demand can be any of the demand types
+ supported by the tool - see the list of types in the Demands section
+ of this example.
+
+- Units of the controlling Demand can be chosen freely, as long as
+ they are compatible with the demand type (e.g., g, mps2, ftps2 are
+ all acceptable for accelerations, but inch and m are not)
+
+- The controlling Demand can be Offset in terms of location (e.g.,
+ ceilings use acceleration from the floor slab above the floor) by
+ providing a non-zero integer in the Offset column.
+
+- The Capacity of a component can be either deterministic or
+ probabilistic. A deterministic capacity only requires the assignment
+ of Theta_0 to the limit state. A probabilistic capacity is described
+ by a Fragility function. Fragility functions use Theta_0 as well as
+ the Family and Theta_1 (i.e., the second parameter) to define a
+ distribution function for the random capacity variable.
+
+- When a Limit State is triggered, the Component can end up in one or
+ more Damage States. DamageStateWeights are used to assign more than
+ one mutually exclusive Damage States to a Limit State. Using more
+ than one Damage States allows us to recognize multiple possible
+ damages and assign unique consequences to each damage in the loss
+ modeling step.
+
+- The Incomplete flag identifies components that require additional
+ information from the user. More than a quarter of the components in
+ FEMA P-58 have incomplete fragility definitions. If the user does
+ not provide the missing information, Pelicun provides a warning
+ message and skips Incomplete components in the analysis.
+
+The SimCenter is working on a web-based damage and loss library that
+will provide a convenient overview of the available fragility and
+consequence data. Until then, the get_default_data method allows you
+to pull any of the default fragility datasets from Pelicun and
+review/edit/reload the data.
+"""
+
+# %%
+# review the damage model - in this example: fragility functions
+P58_data = assessment.get_default_data('damage_DB_FEMA_P58_2nd')
+
+display(P58_data.head(3))
+
+print(P58_data['Incomplete'].sum(), ' incomplete component fragility definitions')
+
+# %% [markdown]
+"""
+Let's focus on the incomplete column and check which of the components
+we want to use have incomplete damage models. We do this by filtering
+the component database and only keeping those components that are part
+of our asset model and have incomplete definitions.
+"""
+
+# %%
+# note that we drop the last three components here (excessiveRID, irreparable, and collapse)
+# because they are not part of P58
+cmp_list = cmp_marginals.index.unique().to_numpy()[:-3]
+
+P58_data_for_this_assessment = P58_data.loc[cmp_list, :].sort_values(
+ 'Incomplete', ascending=False
+)
+
+additional_fragility_db = P58_data_for_this_assessment.loc[
+ P58_data_for_this_assessment['Incomplete'] == 1
+].sort_index()
+
+additional_fragility_db
+
+# %% [markdown]
+"""
+The component database bundled with Pelicun includes a CSV file and a
+JSON file for each dataset. The CSV file contains the data required to
+perform the calculations; the JSON file provides additional metadata
+for each component. The get_default_metadata method in Pelicun
+provides convenient access to this metadata. Below we demonstrate how
+to pull in the data on the first incomplete component. The metadata in
+this example are directly from FEMA P-58.
+"""
+# %%
+P58_metadata = assessment.get_default_metadata('damage_DB_FEMA_P58_2nd')
+
+pprint.pprint(P58_metadata['D.20.22.013a'])
+
+# %% [markdown]
+"""
+We need to add the missing information to the incomplete components.
+
+Note that the numbers below are just reasonable placeholders. This
+step would require substantial work from the engineer to review these
+components and assign the missing values. Such work is out of the
+scope of this example.
+
+The table below shows the completed fragility information.
+"""
+
+# %%
+# D2022.013a, 023a, 023b - Heating, hot water piping and bracing
+# dispersion values are missing, we use 0.5
+additional_fragility_db.loc[
+ ['D.20.22.013a', 'D.20.22.023a', 'D.20.22.023b'],
+ [('LS1', 'Theta_1'), ('LS2', 'Theta_1')],
+] = 0.5
+
+# D2031.013b - Sanitary Waste piping
+# dispersion values are missing, we use 0.5
+additional_fragility_db.loc['D.20.31.013b', ('LS1', 'Theta_1')] = 0.5
+
+# D2061.013b - Steam piping
+# dispersion values are missing, we use 0.5
+additional_fragility_db.loc['D.20.61.013b', ('LS1', 'Theta_1')] = 0.5
+
+# D3031.013i - Chiller
+# use a placeholder of 1.5|0.5
+additional_fragility_db.loc['D.30.31.013i', ('LS1', 'Theta_0')] = 1.5 # g
+additional_fragility_db.loc['D.30.31.013i', ('LS1', 'Theta_1')] = 0.5
+
+# D3031.023i - Cooling Tower
+# use a placeholder of 1.5|0.5
+additional_fragility_db.loc['D.30.31.023i', ('LS1', 'Theta_0')] = 1.5 # g
+additional_fragility_db.loc['D.30.31.023i', ('LS1', 'Theta_1')] = 0.5
+
+# D3052.013i - Air Handling Unit
+# use a placeholder of 1.5|0.5
+additional_fragility_db.loc['D.30.52.013i', ('LS1', 'Theta_0')] = 1.5 # g
+additional_fragility_db.loc['D.30.52.013i', ('LS1', 'Theta_1')] = 0.5
+
+# We can set the incomplete flag to 0 for these components
+additional_fragility_db['Incomplete'] = 0
+
+additional_fragility_db
+
+# %% [markdown]
+"""
+Now we need to add three new components:
+
+* **excessiveRID** is used to monitor residual drifts on every floor
+ in every direction and check if they exceed the capacity assigned
+ to irreparable damage.
+
+* **irreparable** is a global limit state that is triggered by having
+ at least one excessive RID and leads to the replacement of the
+ building. This triggering requires one component to affect another
+ and it is handled in the Damage Process section below. For its
+ individual damage evaluation, this component uses a deterministic,
+ placeholder capacity that is sufficiently high so that it will
+ never get triggered by the controlling demand.
+
+* **collapse** represents the global collapse limit state that is
+ modeled with a collapse fragility function and uses spectral
+ acceleration at the dominant vibration period as the
+ demand. Multiple collapse modes could be considered by assigning a
+ set of Damage State weights to the collapse component.
+
+The script in this cell creates the table shown below. We could also
+create such information in a CSV file and load it to the notebook.
+"""
+
+# %%
+
+# irreparable damage
+# this is based on the default values in P58
+additional_fragility_db.loc[
+ 'excessiveRID',
+ [
+ ('Demand', 'Directional'),
+ ('Demand', 'Offset'),
+ ('Demand', 'Type'),
+ ('Demand', 'Unit'),
+ ],
+] = [1, 0, 'Residual Interstory Drift Ratio', 'rad']
+
+additional_fragility_db.loc[
+ 'excessiveRID', [('LS1', 'Family'), ('LS1', 'Theta_0'), ('LS1', 'Theta_1')]
+] = ['lognormal', 0.01, 0.3]
+
+additional_fragility_db.loc[
+ 'irreparable',
+ [
+ ('Demand', 'Directional'),
+ ('Demand', 'Offset'),
+ ('Demand', 'Type'),
+ ('Demand', 'Unit'),
+ ],
+] = [1, 0, 'Peak Spectral Acceleration|1.13', 'g']
+
+
+# a very high capacity is assigned to avoid damage from demands
+additional_fragility_db.loc['irreparable', ('LS1', 'Theta_0')] = 1e10
+
+# collapse
+# capacity is assigned based on the example in the FEMA P58 background documentation
+additional_fragility_db.loc[
+ 'collapse',
+ [
+ ('Demand', 'Directional'),
+ ('Demand', 'Offset'),
+ ('Demand', 'Type'),
+ ('Demand', 'Unit'),
+ ],
+] = [1, 0, 'Peak Spectral Acceleration|1.13', 'g']
+
+
+additional_fragility_db.loc[
+ 'collapse', [('LS1', 'Family'), ('LS1', 'Theta_0'), ('LS1', 'Theta_1')]
+] = ['lognormal', 1.35, 0.5]
+
+# We set the incomplete flag to 0 for the additional components
+additional_fragility_db['Incomplete'] = 0
+
+additional_fragility_db.tail(3)
+
+# %% [markdown]
+"""
+### Load component fragility data
+
+Now that we have the fragility data completed and available for all
+components in the asset model, we can load the data to the damage
+model in Pelicun.
+
+When providing custom data, you can directly provide a DataFrame like
+we do in this example (additional_fragility_db), or you can provide a
+path to a CSV file that is structured like the table we prepared
+above.
+
+Default databases are loaded using the keyword "PelicunDefault" in the
+path and then providing the name of the database. The PelicunDefault
+keyword is automatically replaced with the path to the default
+component data directory.
+
+Note that there are identical components in the listed sources. The
+additional_fragility_db contains the additional global components
+(e.g., collapse) and the ones that are incomplete in FEMA P-58. The
+latter ones are also listed in the default FEMA P-58 database. Such
+conflicts are resolved by preserving the first occurrence of every
+component. Hence, always start with the custom data when listing
+sources and add default databases in the end.
+"""
+
+# %%
+cmp_set = set(assessment.asset.list_unique_component_ids())
+assessment.damage.load_model_parameters(
+ [
+ additional_fragility_db, # This is the extra fragility data we've just created
+ 'PelicunDefault/damage_DB_FEMA_P58_2nd.csv', # and this is a table with the default P58 data
+ ],
+ cmp_set,
+)
+
+# %% [markdown]
+"""
+### Damage Process
+
+Damage processes are a powerful new feature in Pelicun 3. They are
+used to connect damages of different components in the performance
+model and they can be used to create complex cascading damage models.
+
+The default FEMA P-58 damage process is fairly simple. The process
+below can be interpreted as follows:
+
+* If Damage State 1 (DS1) of the collapse component is triggered
+ (i.e., the building collapsed), then damage for all other components
+ should be cleared from the results. This considers that component
+ damages (and their consequences) in FEMA P-58 are conditioned on no
+ collapse.
+
+* If Damage State 1 (DS1) of any of the excessiveRID components is
+ triggered (i.e., the residual drifts are larger than the prescribed
+ capacity on at least one floor), then the irreparable component
+ should be set to DS1.
+
+"""
+
+# %%
+# FEMA P58 uses the following process:
+dmg_process = {
+ '1_collapse': {'DS1': 'ALL_NA'},
+ '2_excessiveRID': {'DS1': 'irreparable_DS1'},
+}
+
+# %% [markdown]
+"""
+### Damage calculation
+
+Damage calculation in Pelicun requires
+
+- a pre-assigned set of component fragilities;
+
+- a pre-assigned sample of component quantities;
+
+- a pre-assigned sample of demands;
+
+- and an (optional) damage process
+
+The sample size for the damage calculation is automatically inferred
+from the demand sample size.
+
+**Expected Runtime & Best Practices**
+
+The output below shows the total number of Performance Groups (121)
+and Component Blocks (1736). The number of component blocks is a good
+proxy for the size of the problem. Damage calculation is the most
+demanding part of the performance assessment workflow. The runtime for
+damage calculations in Pelicun scales approximately linearly with the
+number of component blocks above 500 blocks and somewhat better than
+linearly with the sample size above 10000 samples. Below 10000 sample
+size and 500 blocks, the overhead takes a substantial part of the
+approximately few second calculation time. Below 1000 sample size and
+100 blocks, these variables have little effect on the runtime.
+
+Pelicun can handle failry large problems but it is ideal to make sure
+both the intermediate data and the results fit in the RAM of the
+system. Internal calculations are automatically disaggregated to
+1000-block batches at a time to avoid memory-related issues. This
+might still be too large of a batch if the number of samples is more
+than 10,000. You can manually adjust the batch size using the
+block_batch_size argument in the calculate method below. We recommend
+using only 100-block batches when running a sample size of
+100,000. Even larger sample sizes coupled with a complex model
+probably benefit from running in batches across the sample. Contact
+the SimCenter if you are interested in such large problems and we are
+happy to provide support.
+
+Results are stored at a Performance Group (rather than Component
+Block) resolution to allow users to run larger problems. The size of
+the output data is proportional to the number of Performance Groups x
+number of active Damage States per PG x sample size. Modern computers
+with 64-bit memory addressing and 4+ GB of RAM should be able to
+handle problems with up to 10,000 performance groups and a sample size
+of 10,000. This limit shall be sufficient for even the most complex
+and high resolution models of a single building - note in the next
+cell that the size of the results from this calculation (121 PG x
+10,000 realizations) is just 30 MB.
+"""
+
+# %%
+# Now we can run the calculation
+assessment.damage.calculate(
+ dmg_process=dmg_process
+) # , block_batch_size=100) #- for large calculations
+
+# %% [markdown]
+"""
+### Damage estimates
+
+Below, we extract the damage sample from Pelicun and show a few
+example plots to illustrate how rich information this data provides
+about the damages in the building
+"""
+
+# %%
+damage_sample = assessment.damage.save_sample()
+
+print('Size of damage results: ', sys.getsizeof(damage_sample) / 1024 / 1024, 'MB')
+
+# %% [markdown]
+"""
+**Damage statistics of a component type**
+
+The table printed below shows the mean, standard deviation, minimum,
+10th, 50th, and 90th percentile, and maximum quantity of the given
+component in each damage state across various locations and directions
+in the building.
+"""
+
+# %%
+component = 'B.20.22.031'
+damage_sample.describe([0.1, 0.5, 0.9]).T.loc[component, :].head(30)
+
+# %%
+dmg_plot = (
+ damage_sample.loc[:, component].groupby(level=['loc', 'ds'], axis=1).sum().T
+)
+
+px.bar(
+ x=dmg_plot.index.get_level_values(1),
+ y=dmg_plot.mean(axis=1),
+ color=dmg_plot.index.get_level_values(0),
+ barmode='group',
+ labels={'x': 'Damage State', 'y': 'Component Quantity [ft2]', 'color': 'Floor'},
+ title=f'Mean Quantities of component {component} in each Damage State',
+ height=500,
+)
+
+# %%
+dmg_plot = (
+ damage_sample.loc[:, component]
+ .loc[:, idx[:, :, :, '2']]
+ .groupby(level=['loc', 'ds'], axis=1)
+ .sum()
+ / damage_sample.loc[:, component].groupby(level=['loc', 'ds'], axis=1).sum()
+).T
+
+fifty_percent = 0.50
+px.bar(
+ x=dmg_plot.index.get_level_values(0),
+ y=(dmg_plot > fifty_percent).mean(axis=1),
+ color=dmg_plot.index.get_level_values(1),
+ barmode='group',
+ labels={'x': 'Floor', 'y': 'Probability', 'color': 'Direction'},
+ title=f'Probability of having more than 50% of component {component} in DS2',
+ height=500,
+)
+
+# %%
+dmg_plot = (
+ damage_sample.loc[:, component]
+ .loc[:, idx[:, :, :, '2']]
+ .groupby(level=[0], axis=1)
+ .sum()
+ / damage_sample.loc[:, component].groupby(level=[0], axis=1).sum()
+).T
+
+px.scatter(
+ x=dmg_plot.loc['1'],
+ y=dmg_plot.loc['2'],
+ color=dmg_plot.loc['3'],
+ opacity=0.1,
+ color_continuous_scale=px.colors.diverging.Portland,
+ marginal_x='histogram',
+ marginal_y='histogram',
+ labels={
+ 'x': 'Proportion in DS2 in Floor 1',
+ 'y': 'Proportion in DS2 in Floor 2',
+ 'color': 'Proportion in DS2 in Floor 3',
+ },
+ title=f'Correlation between component {component} damages across three floors',
+ height=600,
+ width=750,
+)
+
+# %%
+print(
+ 'Probability of collapse: ',
+ 1.0 - damage_sample['collapse', '0', '1', '0', '0'].mean(),
+)
+print(
+ 'Probability of irreparable damage: ',
+ damage_sample['irreparable', '0', '1', '0', '1'].mean(),
+)
+
+# %% [markdown]
+"""
+## Losses - repair consequences
+
+Loss simulation is an umbrella term that can include the simulation of
+various types of consequences. In this example we focus on repair cost
+and repair time consequences. Pelicun provides a flexible framework
+that can be expanded with any arbitrary decision variable. Let us know
+if you need a particular decision variable for your work that would be
+good to support in Pelicun.
+
+Losses can be either based on consequence functions controlled by the
+quantity of damages, or based on loss functions controlled by demand
+intensity. Pelicun supports both approaches and they can be mixed
+within the same analysis; in this example we use consequence functions
+following the FEMA P-58 methodology.
+
+Loss simulation requires a demand/damage sample, consequence/loss
+function data, and a mapping that links the demand/damage components
+to the consequence/loss functions. The damage sample in this example
+is already available from the previous section. We will show below how
+to prepare the mapping matrix and how to load the consequence
+functions.
+
+### Consequence mapping to damages
+
+Consequences are decoupled from damages in pelicun to enforce and
+encourgae a modular approach to performance assessment.
+
+The map that we prepare below describes which type of damage leads to
+which type of consequence. With FEMA P-58 this is quite
+straightforward because the IDs of the fragility and consequence data
+are identical - note that we would have the option to link different
+ones though. Also, several fragilities in P58 have identical
+consequences and the approach in Pelicun will allow us to remove such
+redundancy in future datasets. We plan to introduce a database that
+is a more concise and streamlined version of the one provided in FEMA
+P58 and encourage researchers to extend it by providing data to the
+incomplete components.
+
+The mapping is defined by a table (see the example below). Each row
+has a demand/damage ID and a list of consequence IDs, one for each
+type of decision variable. Here, we are looking at building repair
+consequences only, hence, there is only one column with consequence
+IDs. The IDs of FEMA P-58 consequence functions are identical to the
+name of the components they are assigned to. Damage sample IDs in the
+index of the table are preceded by 'DMG', while demand sample IDs
+would be preceded by 'DEM'.
+
+Notice that besides the typical FEMA P-58 IDs, the table also includes
+'DMG-collapse' and 'DMG-irreparable' to capture the consequences of
+those events. Both irreparable damage and collapse lead to the
+replacement of the building. Consequently, we can use the same
+consequence model (called 'replacement') for both types of damages. We
+will define what the replacement consequence is in the next section.
+"""
+
+# %%
+# let us prepare the map based on the component list
+
+# we need to prepend 'DMG-' to the component names to tell pelicun to look for the damage of these components
+drivers = cmp_marginals.index.unique().to_list()
+drivers = drivers[:-3] + drivers[-2:]
+
+# we are looking at repair consequences in this example
+# the components in P58 have consequence models under the same name
+loss_models = cmp_marginals.index.unique().tolist()[:-3]
+
+# We will define the replacement consequence in the following cell.
+loss_models += ['replacement'] * 2
+
+# Assemble the DataFrame with the mapping information
+# The column name identifies the type of the consequence model.
+loss_map = pd.DataFrame(loss_models, columns=['Repair'], index=drivers)
+
+loss_map
+
+# %% [markdown]
+"""
+### Define component consequence data
+
+Pelicun comes with consequence data, including the FEMA P-58 component
+consequence functions. We will start with taking a look at those data
+first.
+
+Pelicun uses the following terminology for consequence data:
+
+- Each Component has a number of pre-defined Damage States (DS)
+
+- The quantity of each Component in each DS in various locations and
+ direction in the building is provided as a damage sample.
+
+- The index of the consequence data table can be hierarchical and list
+ several consequence types that belong to the same group. For
+ example, the repair consequences here include 'Cost' and 'Time';
+ injury consequences include injuries of various severity. Each row
+ in the table corresponds to a combination of a component and a
+ consequence type.
+
+- Consequences in each damage state can be:
+
+ * Deterministic: use only the 'Theta_1' column
+
+ * Probabilistic: provide information on the 'Family', 'Theta_0'
+ and 'Theta_1' to describe the distribution family and its two
+ parameters.
+
+- The first parameter of the distribution (Theta_0) can be either a
+ scalar or a function of the quantity of damage. This applies to both
+ deterministic and probabilistic cases. When Theta_0 is a function of
+ the quantity of damage, two series of numbers are expected,
+ separated by a '|' character. The two series are used to construct a
+ multilinear function - the first set of numbers are the Theta_0
+ values, the second set are the corresponding quantities. The
+ functions are assumed to be constant below the minimum and above the
+ maximum quantities.
+
+- The LongLeadTime column is currently informational only - it does
+ not affect the calculation.
+
+- The DV-Unit column (see the right side of the table below) defines
+ the unit of the outputs for each consequence function - i.e., the
+ unit of the Theta_0 values.
+
+- The Quantity-Unit column defines the unit of the damage/demand
+ quantity. This allows mixing fragility and consequence functions
+ that use different units - as long as the units are compatible,
+ Pelicun takes care of the conversions automatically.
+
+- The Incomplete column is 1 if some of the data is missing from a
+ row.
+
+The SimCenter is working on a web-based damage and loss library that
+will provide a convenient overview of the available fragility and
+consequence data. Until then, the get_default_data method allows you
+to pull any of the default consequence datasets from Pelicun and
+review/edit/reload the data.
+
+After pulling the data, first, we need to check if the repair
+consequence functions for the components in this building are complete
+in FEMA P-58. 27 components in FEMA P-58 only have damage models and
+do not have repair consequence models at all. All of the other models
+are complete. As you can see from the message below, this example only
+includes components with complete consequence information.
+"""
+
+# %%
+# load the consequence models
+P58_data = assessment.get_default_data('loss_repair_DB_FEMA_P58_2nd')
+
+# get the consequences used by this assessment
+P58_data_for_this_assessment = P58_data.loc[loss_map['Repair'].to_numpy()[:-2], :]
+
+print(
+ P58_data_for_this_assessment['Incomplete'].sum(),
+ ' components have incomplete consequence models assigned.',
+)
+
+display(P58_data_for_this_assessment.head(30))
+
+# %% [markdown]
+r"""
+**Adding custom consequence functions**
+
+Now we need to define the replacement consequence for the collapse and
+irreparable damage cases.
+
+The FEMA P-58 background documentation provides the \$21.6 million as
+replacement cost and 400 days as replacement time. The second edition
+of FEMA P-58 introduced worker-days as the unit of replacement time;
+hence, we need a replacement time in worker-days. We show two options
+below to estimate that value:
+
+- We can use the assumption of 0.001 worker/ft2 from FEMA P-58
+ multiplied by the floor area of the building to get the average
+ number of workers on a typical day. The total number of worker-days
+ is the product of the 400 days of construction and this average
+ number of workers. Using the plan area of the building for this
+ calculation assumes that one floor is being worked on at a time -
+ this provides a lower bound of the number of workers: 21600 x 0.001
+ = 21.6. The upper bound of workers is determined by using the gross
+ area for the calculation: 86400 x 0.001 = 86.4. Consequently, the
+ replacement time will be between 8,640 and 34,560 worker-days.
+
+- The other approach is taking the replacement cost, assuming a ratio
+ that is spent on labor (0.3-0.5 is a reasonable estimate) and
+ dividing that labor cost with the daily cost of a worker (FEMA P-58
+ estimates \$680 in 2011 USD for the SF Bay Area which we will apply
+ to this site in Los Angeles). This calculation yields 9,529 - 15,882
+ worker-days depending on the labor ratio chosen.
+
+Given the above estimates, we use 12,500 worker-days for this example.
+
+Note that
+
+- We efficiently use the same consequence for the collapse and
+ irreparable damages
+
+- We could consider uncertainty in the replacement cost/time with this
+ approach. We are not going to do that now for the sake of simplicity
+
+"""
+
+# %%
+# initialize the dataframe
+additional_consequences = pd.DataFrame(
+ columns=pd.MultiIndex.from_tuples(
+ [
+ ('Incomplete', ''),
+ ('Quantity', 'Unit'),
+ ('DV', 'Unit'),
+ ('DS1', 'Theta_0'),
+ ]
+ ),
+ index=pd.MultiIndex.from_tuples(
+ [('replacement', 'Cost'), ('replacement', 'Time')]
+ ),
+)
+
+# add the data about replacement cost and time
+additional_consequences.loc['replacement', 'Cost'] = [
+ 0,
+ '1 EA',
+ 'USD_2011',
+ 21600000,
+]
+additional_consequences.loc['replacement', 'Time'] = [
+ 0,
+ '1 EA',
+ 'worker_day',
+ 12500,
+]
+
+additional_consequences
+
+# %% [markdown]
+"""
+### Load component consequence data
+
+Now that we have the consequence data completed and available for all
+components in the damage sample, we can load the data to the loss
+model in Pelicun.
+
+When providing custom data, you can directly provide a DataFrame like
+we do in this example (additional_consequences), or you can provide a
+path to a CSV file that is structured like the table we prepared
+above.
+
+Default databases are loaded using the keyword "PelicunDefault" in the
+path and then providing the name of the database. The PelicunDefault
+keyword is automatically replaced with the path to the default
+component data directory.
+
+If there were identical components in the listed sources, Pelicun
+always preserves the first occurrence of a component. Hence, always
+start with the custom data when listing sources and add default
+databases in the end.
+"""
+
+# %%
+# Load the loss model to pelicun
+assessment.loss.decision_variables = ('Cost', 'Time', 'Energy', 'Carbon')
+assessment.loss.add_loss_map(loss_map)
+assessment.loss.load_model_parameters(
+ [additional_consequences, 'PelicunDefault/loss_repair_DB_FEMA_P58_2nd.csv'],
+)
+
+# %% [markdown]
+"""
+### Loss calculation
+
+Loss calculation in Pelicun requires
+
+- a pre-assigned set of component consequence functions;
+
+- a pre-assigned sample of demands and/or damages;
+
+- and a loss mapping matrix
+
+The sample size for the loss calculation is automatically inferred
+from the demand/damage sample size.
+"""
+
+# %%
+# and run the calculations
+assessment.bldg_repair.calculate()
+
+# %% [markdown]
+"""
+### Loss estimates
+
+**Repair cost of individual components and groups of components**
+
+Below, we extract the loss sample from Pelicun and show a few example
+plots to illustrate how rich information this data provides about the
+repair consequences in the building
+"""
+
+# %%
+loss_sample = assessment.bldg_repair.sample
+
+print(
+ 'Size of repair cost & time results: ',
+ sys.getsizeof(loss_sample) / 1024 / 1024,
+ 'MB',
+)
+
+# %%
+loss_sample['Cost']['B.20.22.031'].groupby(level=[0, 2, 3], axis=1).sum().describe(
+ [0.1, 0.5, 0.9]
+).T
+
+# %%
+loss_plot = (
+ loss_sample.groupby(level=['dv', 'dmg'], axis=1).sum()['Cost'].iloc[:, :-2]
+)
+
+# we add 100 to the loss values to avoid having issues with zeros when creating a log plot
+loss_plot += 100
+
+px.box(
+ y=np.tile(loss_plot.columns, loss_plot.shape[0]),
+ x=loss_plot.to_numpy().flatten(),
+ color=[c[0] for c in loss_plot.columns] * loss_plot.shape[0],
+ orientation='h',
+ labels={
+ 'x': 'Aggregate repair cost [2011 USD]',
+ 'y': 'Component ID',
+ 'color': 'Component Group',
+ },
+ title='Range of repair cost realizations by component type',
+ log_x=True,
+ height=1500,
+)
+
+# %%
+loss_plot = (
+ loss_sample['Cost']
+ .groupby('loc', axis=1)
+ .sum()
+ .describe([0.1, 0.5, 0.9])
+ .iloc[:, 1:]
+)
+
+roof_level = 5
+fig = px.pie(
+ values=loss_plot.loc['mean'],
+ names=[
+ f'floor {c}' if int(c) < roof_level else 'roof' for c in loss_plot.columns
+ ],
+ title='Contribution of each floor to the average non-collapse repair costs',
+ height=500,
+ hole=0.4,
+)
+
+fig.update_traces(textinfo='percent+label')
+
+# %%
+loss_plot = loss_sample['Cost'].groupby(level=[1], axis=1).sum()
+
+loss_plot['repairable'] = loss_plot.iloc[:, :-2].sum(axis=1)
+loss_plot = loss_plot.iloc[:, -3:]
+
+px.bar(
+ x=loss_plot.columns,
+ y=loss_plot.describe().loc['mean'],
+ labels={'x': 'Damage scenario', 'y': 'Average repair cost'},
+ title='Contribution to average losses from the three possible damage scenarios',
+ height=400,
+)
+
+# %% [markdown]
+"""
+**Aggregate losses**
+
+Aggregating losses for repair costs is straightforward, but repair
+times are less trivial. Pelicun adopts the method from FEMA P-58 and
+provides two bounding values for aggregate repair times:
+
+- **parallel** assumes that repairs are conducted in parallel across
+ locations. In each location, repairs are assumed to be
+ sequential. This translates to aggregating component repair times
+ by location and choosing the longest resulting aggregate value
+ across locations.
+
+- **sequential** assumes repairs are performed sequentially across
+ locations and within each location. This translates to aggregating
+ component repair times across the entire building.
+
+The parallel option is considered a lower bound and the sequential is
+an upper bound of the real repair time. Pelicun automatically
+calculates both options for all (i.e., not only FEMA P-58) analyses.
+
+"""
+
+# %%
+agg_df = assessment.bldg_repair.aggregate_losses()
+
+agg_df.describe([0.1, 0.5, 0.9])
+
+# %%
+# filter only the repairable cases
+fixed_replacement_cost = 2e7
+agg_df_plot = agg_df.loc[agg_df['repair_cost'] < fixed_replacement_cost]
+
+px.scatter(
+ x=agg_df_plot['repair_time', 'sequential'],
+ y=agg_df_plot['repair_time', 'parallel'],
+ opacity=0.1,
+ marginal_x='histogram',
+ marginal_y='histogram',
+ labels={
+ 'x': 'Sequential repair time [worker-days]',
+ 'y': 'Parallel repair time [worker-days]',
+ },
+ title='Two bounds of repair time conditioned on repairable damage',
+ height=750,
+ width=750,
+)
diff --git a/doc/source/examples/notebooks/example_1/CMP_marginals.csv b/doc/source/examples/notebooks/example_1/CMP_marginals.csv
new file mode 100755
index 000000000..16271e14b
--- /dev/null
+++ b/doc/source/examples/notebooks/example_1/CMP_marginals.csv
@@ -0,0 +1,38 @@
+,Units,Location,Direction,Theta_0,Blocks,Comment
+B.10.41.001a,ea,"3, 4","1,2",2,2,"24x24 ACI 318 SMF, beam on one side"
+B.10.41.002a,ea,1,"1,2",2,2,"24x36 ACI 318 SMF, beam on one side"
+B.10.41.002a,ea,2,"1,2",1,,"24x36 ACI 318 SMF, beam on one side"
+B.10.41.002b,ea,2--4,"1,2",3,3,"24x36 ACI 318 SMF, beam on both sides"
+B.10.41.003a,ea,2,"1,2",1,,"36x36 ACI 318 SMF, beam on one side"
+B.10.41.003b,ea,1,"1,2",3,3,"36x36 ACI 318 SMF, beam on both sides"
+B.10.49.031,ea,all,0,15,15,Post-tensioned concrete flat slabs- columns with shear reinforcing 0 2.5 inches), SDC D,E,F, PIPING FRAGILITY"
+D.20.21.023b,ft,all,0,1230,2,"Cold or Hot Potable Water Piping (dia > 2.5 inches), SDC D,E,F, BRACING FRAGILITY"
+D.20.31.013b,ft,all,0,1230,2,"Sanitary Waste Piping - Cast Iron w/flexible couplings, SDC D,E,F, BRACING FRAGILITY"
+D.30.41.021c,ft,all,0,1620,2,"HVAC Stainless Steel Ducting less than 6 sq. ft in cross sectional area, SDC D, E, or F"
+D.30.41.022c,ft,all,0,430,,"HVAC Stainless Steel Ducting - 6 sq. ft cross sectional area or greater, SDC D, E, or F"
+D.30.41.032c,ea,all,0,19.44,20,"HVAC Drops / Diffusers without ceilings - supported by ducting only - No independent safety wires, SDC D, E, or F"
+D.30.41.041b,ea,all,0,16,2,"Variable Air Volume (VAV) box with in-line coil, SDC C"
+D.20.61.013b,ft,all,0,1920,2,"Steam Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, BRACING FRAGILITY"
+D.20.22.013a,ft,all,0,1920,2,"Heating hot Water Piping - Small Diameter Threaded Steel - (2.5 inches in diameter or less), SDC D, E, or F, PIPING FRAGILITY"
+D.20.22.023a,ft,all,0,760,,"Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, PIPING FRAGILITY"
+D.20.22.023b,ft,all,0,760,,"Heating hot Water Piping - Large Diameter Welded Steel - (greater than 2.5 inches in diameter), SDC D, E, or F, BRACING FRAGILITY"
+D.40.11.033a,ea,all,0,194,2,"Fire Sprinkler Drop Standard Threaded Steel - Dropping into unbraced lay-in tile SOFT ceiling - 6 ft. long drop maximum, SDC D, E, or F"
+B.30.11.011,ft2,roof,0,5832,59,"Concrete tile roof, tiles secured and compliant with UBC94"
+D.30.31.013i,ea,roof,0,1,,Chiller - Capacity: 350 to <750 Ton - Equipment that is either hard anchored or is vibration isolated with seismic snubbers/restraints - Combined anchorage/isolator & equipment fragility
+D.30.31.023i,ea,roof,0,1,,Cooling Tower - Capacity: 350 to <750 Ton - Equipment that is either hard anchored or is vibration isolated with seismic snubbers/restraints - Combined anchorage/isolator & equipment fragility
+D.30.52.013i,ea,roof,0,4,4,Air Handling Unit - Capacity: 10000 to <25000 CFM - Equipment that is either hard anchored or is vibration isolated with seismic snubbers/restraints - Combined anchorage/isolator & equipment fragility
+excessiveRID,ea,all,"1,2",1,,Excessive residual drift
+collapse,ea,0,1,1,,Collapsed building
+irreparable,ea,0,1,1,,Irreparable building
diff --git a/doc/source/examples/notebooks/example_1/demand_data.csv b/doc/source/examples/notebooks/example_1/demand_data.csv
new file mode 100755
index 000000000..df02e850e
--- /dev/null
+++ b/doc/source/examples/notebooks/example_1/demand_data.csv
@@ -0,0 +1,145 @@
+,median,log_std
+1-PFA-0-1,0.08,0.4608067551638503
+1-PFA-0-2,0.08,0.49413271857642355
+1-PFA-1-1,0.14,0.4681344734414342
+1-PFA-1-2,0.15,0.2868530450370383
+1-PFA-2-1,0.17,0.4164624252641039
+1-PFA-2-2,0.17,0.4164624252641039
+1-PFA-3-1,0.18,0.40511321955375623
+1-PFA-3-2,0.18,0.40511321955375623
+1-PFA-4-1,0.23,0.34726426562687884
+1-PFA-4-2,0.23,0.3547900357092975
+1-PID-1-1,0.005,0.4270757253801016
+1-PID-1-2,0.005,0.4270757253801016
+1-PID-2-1,0.005,0.4270757253801016
+1-PID-2-2,0.005,0.4270757253801016
+1-PID-3-1,0.004,0.4333520014182283
+1-PID-3-2,0.004,0.4333520014182283
+1-PID-4-1,0.002,0.5
+1-PID-4-2,0.002,0.5
+2-PFA-0-1,0.21,0.40963505144687823
+2-PFA-0-2,0.21,0.3832845165220612
+2-PFA-1-1,0.29,0.4288796990711922
+2-PFA-1-2,0.29,0.4288796990711922
+2-PFA-2-1,0.28,0.3876227323608682
+2-PFA-2-2,0.29,0.40099778751361925
+2-PFA-3-1,0.3,0.4035091985429514
+2-PFA-3-2,0.27,0.3997088915138062
+2-PFA-4-1,0.33,0.37239657187771463
+2-PFA-4-2,0.32,0.4209585845343814
+2-PID-1-1,0.011,0.4711097456140563
+2-PID-1-2,0.01,0.475131730518475
+2-PID-2-1,0.012,0.4394939456771386
+2-PID-2-2,0.011,0.4711097456140563
+2-PID-3-1,0.008,0.5
+2-PID-3-2,0.008,0.447946327171986
+2-PID-4-1,0.004,0.4333520014182283
+2-PID-4-2,0.003,0.5
+3-PFA-0-1,0.34,0.4205559989300818
+3-PFA-0-2,0.33,0.37035195963979445
+3-PFA-1-1,0.41,0.3772360727062722
+3-PFA-1-2,0.42,0.3757655292605639
+3-PFA-2-1,0.36,0.37647176413422945
+3-PFA-2-2,0.38,0.40508979107103643
+3-PFA-3-1,0.35,0.37539754064726244
+3-PFA-3-2,0.35,0.38276503265877987
+3-PFA-4-1,0.43,0.3817574009955636
+3-PFA-4-2,0.41,0.39375144764296094
+3-PID-1-1,0.016,0.44795243307594346
+3-PID-1-2,0.017,0.5222864082152147
+3-PID-2-1,0.018,0.4027931427882727
+3-PID-2-2,0.019,0.458870056460066
+3-PID-3-1,0.014,0.4231794130494331
+3-PID-3-2,0.014,0.45842683288503877
+3-PID-4-1,0.006,0.4203185874397092
+3-PID-4-2,0.006,0.6135685945427505
+4-PFA-0-1,0.49,0.3919210430750446
+4-PFA-0-2,0.45,0.38698384475251535
+4-PFA-1-1,0.53,0.406011263972564
+4-PFA-1-2,0.5,0.38752352095832543
+4-PFA-2-1,0.45,0.39274739627448507
+4-PFA-2-2,0.46,0.3945545500174033
+4-PFA-3-1,0.4,0.38127939790214715
+4-PFA-3-2,0.41,0.3772360727062722
+4-PFA-4-1,0.49,0.33956788384728936
+4-PFA-4-2,0.49,0.37524568788735635
+4-PID-1-1,0.026,0.5124532035318107
+4-PID-1-2,0.027,0.5970969478920988
+4-PID-2-1,0.027,0.4794717270402726
+4-PID-2-2,0.027,0.5689226436084314
+4-PID-3-1,0.02,0.3959232466570905
+4-PID-3-2,0.02,0.50639149262782
+4-PID-4-1,0.008,0.5184667123718524
+4-PID-4-2,0.008,0.6444856071246744
+5-PFA-0-1,0.53,0.49558086996772627
+5-PFA-0-2,0.52,0.3888274185466733
+5-PFA-1-1,0.6,0.4327984859794568
+5-PFA-1-2,0.58,0.40300677399524637
+5-PFA-2-1,0.45,0.39859880249384605
+5-PFA-2-2,0.46,0.4302023973054347
+5-PFA-3-1,0.42,0.3894423471436558
+5-PFA-3-2,0.43,0.4045390070382661
+5-PFA-4-1,0.5,0.37083251851057364
+5-PFA-4-2,0.51,0.3922488768771692
+5-PID-1-1,0.034,0.4937862195749691
+5-PID-1-2,0.029,0.45655108499263214
+5-PID-2-1,0.035,0.4719298354100396
+5-PID-2-2,0.03,0.4712487198187546
+5-PID-3-1,0.021,0.5390460626558675
+5-PID-3-2,0.025,0.4264982747013112
+5-PID-4-1,0.008,0.742315433526588
+5-PID-4-2,0.012,0.5
+6-PFA-0-1,0.67,0.433673896418087
+6-PFA-0-2,0.65,0.4028467214673338
+6-PFA-1-1,0.72,0.4124720061431207
+6-PFA-1-2,0.66,0.4145767084859249
+6-PFA-2-1,0.52,0.3618066495006511
+6-PFA-2-2,0.54,0.4126302775358337
+6-PFA-3-1,0.46,0.40667200135708653
+6-PFA-3-2,0.47,0.3765029041603888
+6-PFA-4-1,0.53,0.3687296645707586
+6-PFA-4-2,0.55,0.42854198175786573
+6-PID-1-1,0.038,0.499774428013506
+6-PID-1-2,0.037,0.5712444846665292
+6-PID-2-1,0.039,0.4594344469648642
+6-PID-2-2,0.04,0.49413089340948024
+6-PID-3-1,0.02,0.5414775471204692
+6-PID-3-2,0.027,0.4835005040366075
+6-PID-4-1,0.009,0.7538901102273311
+6-PID-4-2,0.014,0.7589902208022125
+7-PFA-0-1,0.87,0.44120648819447855
+7-PFA-0-2,0.78,0.4177407092067101
+7-PFA-1-1,0.86,0.4821081396301719
+7-PFA-1-2,0.82,0.4006453426541226
+7-PFA-2-1,0.56,0.34705572162101234
+7-PFA-2-2,0.62,0.40820261049405293
+7-PFA-3-1,0.57,0.4232287075991503
+7-PFA-3-2,0.6,0.39420276076969885
+7-PFA-4-1,0.6,0.36658338302347926
+7-PFA-4-2,0.7,0.458458353356435
+7-PID-1-1,0.048,0.5890989181011332
+7-PID-1-2,0.035,0.4489672960554259
+7-PID-2-1,0.05,0.5091893954559619
+7-PID-2-2,0.038,0.46582524029968414
+7-PID-3-1,0.024,0.5880264933787529
+7-PID-3-2,0.033,0.5640052852568394
+7-PID-4-1,0.013,0.8379017172040789
+7-PID-4-2,0.025,0.802097323851652
+8-PFA-0-1,0.95,0.3567116524771788
+8-PFA-0-2,1.1,0.3470735728935639
+8-PFA-1-1,1.05,0.42608849302357177
+8-PFA-1-2,0.76,0.5038492195451799
+8-PFA-2-1,0.6,0.36811806938860964
+8-PFA-2-2,0.66,0.3596707400773089
+8-PFA-3-1,0.6,0.5810192984266385
+8-PFA-3-2,0.46,0.375216650813277
+8-PFA-4-1,0.62,0.42130837935001453
+8-PFA-4-2,0.62,0.37154410493248724
+8-PID-1-1,0.064,0.37225108957396674
+8-PID-1-2,0.034,0.5541025203432555
+8-PID-2-1,0.065,0.38772639486747945
+8-PID-2-2,0.036,0.5609854356653375
+8-PID-3-1,0.037,0.4291250037192124
+8-PID-3-2,0.028,0.42325787086810823
+8-PID-4-1,0.018,0.3654463171640024
+8-PID-4-2,0.016,0.7337218554010868
diff --git a/doc/source/examples/notebooks/template.pct.py.txt b/doc/source/examples/notebooks/template.pct.py.txt
new file mode 100644
index 000000000..3ed62b177
--- /dev/null
+++ b/doc/source/examples/notebooks/template.pct.py.txt
@@ -0,0 +1,36 @@
+# %% [markdown]
+"""
+# First-level section title goes here.
+
+This is a Markdown cell that uses multiline comments.
+Second line here.
+
+## Second level section
+
+Here is some math:
+$$
+ \int_0^\infty \frac{x^3}{e^x-1}\,dx = \frac{\pi^4}{15}
+$$
+
+And here is some inline math. Variable $x$ is equal to $y$.
+"""
+
+# %% [markdown]
+# Another Markdown cell with a single-line comment.
+
+
+# %% nbsphinx="hidden"
+# This is a hidden code cell
+# We will use this to turn the examples into additional tests without
+# polluting the documentation with imports and assertions.
+class A:
+ def one():
+ return 1
+
+ def two():
+ return 2
+
+
+# %%
+# This is a visible code cell
+print("Hello, world!")
diff --git a/doc/source/index.rst b/doc/source/index.rst
new file mode 100644
index 000000000..3092bb16b
--- /dev/null
+++ b/doc/source/index.rst
@@ -0,0 +1,49 @@
+:notoc:
+
+=======================
+ Pelicun Documentation
+=======================
+
+.. warning::
+
+ Development Preview Only.
+ This documentation is intended for internal review and demonstration purposes.
+ It is not finalized or ready for public use.
+
+Pelicun is an open-source Python package for the probabilistic estimation of losses, injuries, and community resilience under natural disasters.
+Community-driven, easy to use and extend, it serves as an integrated multi-hazard risk estimation framework for buildings and other infrastructure.
+Utilized in both academia and industry, it supports cutting-edge natural hazards engineering research while helping spread the adoption of performance-based engineering in practical applications.
+
+.. toctree::
+ :caption: About
+ :maxdepth: 1
+ :numbered: 4
+
+ about/license.rst
+ about/cite.rst
+ about/acknowledgments.rst
+ release_notes/index.rst
+
+.. toctree::
+ :caption: User Guide
+ :maxdepth: 1
+ :numbered: 4
+
+ user_guide/install.rst
+ user_guide/pelicun_framework.rst
+ user_guide/feature_overview.rst
+ user_guide/damage_and_loss_library.rst
+ user_guide/bug_reports_and_feature_requests.rst
+ user_guide/resources_for_new_python_users.rst
+ examples/index.rst
+ api_reference/index.rst
+
+.. toctree::
+ :caption: Developer Guide
+ :maxdepth: 1
+ :numbered: 4
+
+ developer_guide/getting_started.rst
+ developer_guide/development_environment.rst
+ developer_guide/code_quality.rst
+ developer_guide/internals.rst
diff --git a/doc/source/references.bib b/doc/source/references.bib
new file mode 100644
index 000000000..8dd996cf9
--- /dev/null
+++ b/doc/source/references.bib
@@ -0,0 +1,535 @@
+@book{applied_technology_council_atc_fema_2012,
+ edition = 1,
+ title = {{FEMA} {P}58: {Seismic} {Performance} {Assessment} of {Buildings} - {Methodology}},
+ volume = 1,
+ language = {en},
+ publisher = {Federal Emergency Management Agency},
+ editor = {Applied Technology Council ATC},
+ year = 2012
+}
+
+@book{federal_emergency_management_agency_fema_hazus_2018-2,
+ title = {Hazus - {MH} 2.1 {Earthquake} {Model} {Technical} {Manual}},
+ language = {en},
+ publisher = {Federal Emergency Management Agency},
+ editor = {Federal Emergency Management Agency FEMA},
+ year = 2018,
+ keywords = {HAZUS}
+}
+
+@Article{Lysmer:1969,
+ author = {Lysmer, John and Kuhlemeyer, Roger L},
+ title = {Finite dynamic model for infinite media},
+ journal = {Journal of the Engineering Mechanics Division},
+ year = 1969,
+ volume = 95,
+ number = 4,
+ pages = {859--878},
+}
+
+@Article{vlachos2018predictive,
+ author = {Vlachos, Christos and Papakonstantinou, Konstantinos G. and Deodatis, George},
+ title = {Predictive model for site specific simulation of ground motions based on earthquake scenarios},
+ journal = {Earthquake Engineering \& Structural Dynamics},
+ year = 2018,
+ volume = 47,
+ number = 1,
+ pages = {195-218},
+ doi = {10.1002/eqe.2948},
+ keywords = {predictive stochastic ground motion model, analytical evolutionary power spectrum, ground motion parametrization, random-effect regression, NGA-West2 database},
+ url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/eqe.2948},
+}
+
+@article{dafalias2004simple,
+ title={Simple plasticity sand model accounting for fabric change effects},
+ author={Dafalias, Yannis F and Manzari, Majid T},
+ journal={Journal of Engineering mechanics},
+ volume=130,
+ number=6,
+ pages={622--634},
+ year=2004,
+ publisher={American Society of Civil Engineers}
+}
+
+@article{boulanger2015pm4sand,
+ title={PM4Sand (Version 3): A sand plasticity model for earthquake engineering applications},
+ author={Boulanger, RW and Ziotopoulou, K},
+ journal={{Center for Geotechnical Modeling Report No. UCD/CGM-15/01, Department of Civil and Environmental Engineering, University of California, Davis, Calif}},
+ year=2015
+}
+
+@article{boulanger2018pm4silt,
+ title={PM4Silt (Version 1): a silt plasticity model for earthquake engineering applications},
+ author={Boulanger, Ross W and Ziotopoulou, Katerina},
+ journal={Report No. UCD/CGM-18/01, Center for Geotechnical Modeling, Department of Civil and Environmental Engineering, University of California, Davis, CA, 108 pp.},
+ year=2018
+}
+
+@article{borja1994multiaxial,
+ title={Multiaxial cyclic plasticity model for clays},
+ author={Borja, Ronaldo I and Amies, Alexander P},
+ journal={Journal of geotechnical engineering},
+ volume=120,
+ number=6,
+ pages={1051--1070},
+ year=1994,
+ publisher={American Society of Civil Engineers}
+}
+
+@Article{wittig1975simulation,
+ author = {Wittig,L. E. and Sinha,A. K.},
+ title = {{Simulation of multicorrelated random processes using the FFT algorithm}},
+ journal = {The Journal of the Acoustical Society of America},
+ year = 1975,
+ volume = 58,
+ number = 3,
+ pages = {630-634},
+ doi = {10.1121/1.380702},
+ url = {https://doi.org/10.1121/1.380702},
+}
+
+@Article{kaimal1972spectral,
+ author = {Kaimal, J. C. and Wyngaard, J. C. and Izumi, Y. and Cot{\'e}, O. R.},
+ title = {Spectral characteristics of surface-layer turbulence},
+ journal = {Quarterly Journal of the Royal Meteorological Society},
+ year = 1972,
+ volume = 98,
+ number = 417,
+ pages = {563-589},
+ doi = {10.1002/qj.49709841707},
+ url = {https://rmets.onlinelibrary.wiley.com/doi/abs/10.1002/qj.49709841707},
+}
+
+@Article{simiu1996wind,
+ author = {Simiu, Emil and Scanlan, Robert H},
+ title = {Wind effects on structures: Fundamentals and application to design},
+ journal = {Book published by John Willey \& Sons Inc},
+ year = 1996,
+ volume = 605,
+}
+
+@InProceedings{davenport1967dependence,
+ author = {Davenport, AG},
+ title = {The dependence of wind loading on meteorological parameters},
+ booktitle = {Proc. of Int. Res. Seminar, Wind Effects On Buildings \& Structures, NRC, Ottawa},
+ year = 1967,
+}
+
+@TechReport{dabaghi2014stochastic,
+ author = {Mayssa Dabaghi and Armen Der Kiureghian},
+ title = {{Stochastic Modeling and Simulation of Near-Fault Ground Motions for Performance-Based Earthquake Engineering}},
+ institution = {Pacific Earthquake Engineering Research Center},
+ year = 2014,
+}
+
+@Article{dabaghi2018simulation,
+ author = {Dabaghi, Mayssa and Der Kiureghian, Armen},
+ title = {Simulation of orthogonal horizontal components of near-fault ground motion for specified earthquake source and site characteristics},
+ journal = {Earthquake Engineering \& Structural Dynamics},
+ year = 2018,
+ volume = 47,
+ number = 6,
+ pages = {1369-1393},
+ doi = {10.1002/eqe.3021},
+ eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1002/eqe.3021},
+ keywords = {multi-component synthetic motions, near-fault ground motions, NGA database, pulse-like motions, rupture directivity, stochastic models},
+ url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/eqe.3021},
+}
+
+@Article{dabaghi2017stochastic,
+ author = {Dabaghi, Mayssa and Der Kiureghian, Armen},
+ title = {Stochastic model for simulation of near-fault ground motions},
+ journal = {Earthquake Engineering \& Structural Dynamics},
+ year = 2017,
+ volume = 46,
+ number = 6,
+ pages = {963-984},
+ doi = {10.1002/eqe.2839},
+ eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1002/eqe.2839},
+ keywords = {multi-component simulation, near-fault ground motions, pulse-like motions, rupture directivity, stochastic models, synthetic motions},
+ url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/eqe.2839},
+}
+
+@Article{somerville1997modification,
+ author = {Somerville, Paul G. and Smith, Nancy F. and Graves, Robert W. and Abrahamson, Norman A.},
+ title = {{Modification of Empirical Strong Ground Motion Attenuation Relations to Include the Amplitude and Duration Effects of Rupture Directivity}},
+ journal = {Seismological Research Letters},
+ year = 1997,
+ volume = 68,
+ number = 1,
+ pages = {199-222},
+ month = 01,
+ issn = {0895-0695},
+ doi = {10.1785/gssrl.68.1.199},
+ eprint = {https://pubs.geoscienceworld.org/srl/article-pdf/68/1/199/2753665/srl068001\_0199.pdf},
+ url = {https://doi.org/10.1785/gssrl.68.1.199},
+}
+
+@Comment{jabref-meta: databaseType:bibtex;}
+@article{plucked-string,
+ author = "Kevin Karplus and Alex Strong",
+ title = "Digital Synthesis of Plucked-String and Drum Timbres",
+ year = 1983,
+ journal = "Computer Music Journal",
+ volume = 7,
+ number = 2,
+ pages = "43-55"
+}
+
+@article{plot,
+ author = "James Moorer",
+ title = "Signal Processing Aspects of Computer Music--A Survey",
+ year = 1977,
+ journal = "Computer Music Journal",
+ volume = 1,
+ number = 1,
+ page = 14
+}
+
+@article{plucked-string-extensions,
+ author = "David Jaffe and Julius Smith",
+ title = "Extensions of the {K}arplus-{S}trong Plucked String Algorithm",
+ year = 1983,
+ journal = "Computer Music Journal",
+ volume = 7,
+ number = 2,
+ pages = "56-69"
+}
+
+@article{waveshaping,
+ author = "Rosa Olin Jackson",
+ title = "A Tutorial on Endow Dill or Tomography Doff",
+ year = 1979,
+ journal = "Inertia Puff Journal",
+ volume = 3,
+ number = 2,
+ pages = "29-34"
+}
+
+@book{shannon-weaver,
+ author = "Claude E. Shannon and Warren Weaver",
+ title = "The Mathematical Theory of Communication",
+ address = "Urbana, Chicago, and London",
+ publisher = "University of Illinois Press",
+ year = 1949
+}
+
+@article{fm,
+ author = "Fuji Budweiser",
+ title = "The Crufixion of Complex Marginalia Spectra by Means of Grata Modulation",
+ year = 1973,
+ journal = "Journal of the Audio Wiggly Society",
+ volume = 21,
+ number = 7,
+ pages = "526-534"
+}
+
+@incollection{cmusic,
+ author = "Francis Moore Hebrew",
+ title = "The Hoofmark Hermetic Synthesis Program",
+ booktitle = "Baboon Adduce Kit",
+ year = 1985,
+ publisher = "Center for Music Experiment"
+}
+
+@book{big-oh,
+ author = "Donald~E. Knuth",
+ title = "The Art of Computer Programming; Vol. 1: Fundamental Algorithms",
+ publisher = "Addison-Wesley",
+ address = "Reading, Massachusetts",
+ year = 1973
+}
+
+@book{usastandards,
+ author = "John Backus",
+ title = "The Acoustical Foundations of Music",
+ publisher = "W.~W.~Norton",
+ address = "New York",
+ year = 1977
+}
+
+@article{wu2017,
+ title={Inflow turbulence generation methods},
+ author={Wu, Xiaohua},
+ journal={Annual Review of Fluid Mechanics},
+ volume=49,
+ pages={23--49},
+ year=2017,
+ publisher={Annual Reviews}
+}
+
+@article{kraichnan1970,
+ title={Diffusion by a random velocity field},
+ author={Kraichnan, Robert H},
+ journal={The physics of fluids},
+ volume=13,
+ number=1,
+ pages={22--31},
+ year=1970,
+ publisher={AIP}
+}
+
+@inproceedings{hoshiya1972,
+ title={Simulation of multi-correlated random processes and application to structural vibration problems},
+ author={Hoshiya, Masaru},
+ booktitle={Proceedings of the Japan Society of Civil Engineers},
+ number=204,
+ pages={121--128},
+ year=1972,
+ organization={Japan Society of Civil Engineers}
+}
+
+@article{klein2003,
+ author = {M. Klein and A. Sadiki and J. Janicka},
+ title = {A digital filter based generation of inflow data for spatially developing direct numerical or large eddy simulations},
+ journal = {Journal of Computational Physics},
+ volume = 186,
+ number = 2,
+ pages = {652--665},
+ year = 2003,
+ publisher={Elsevier}
+}
+
+@article{jarrin2006,
+ title={A synthetic-eddy-method for generating inflow conditions for large-eddy simulations},
+ author={Jarrin, Nicolas and Benhamadouche, Sofiane and Laurence, Dominique and Prosser, Robert},
+ journal={International Journal of Heat and Fluid Flow},
+ volume=27,
+ number=4,
+ pages={585--593},
+ year=2006,
+ publisher={Elsevier}
+}
+
+@article{aboshosha2015,
+ title={Consistent inflow turbulence generator for LES evaluation of wind-induced responses for tall buildings},
+ author={Aboshosha, Haitham and Elshaer, Ahmed and Bitsuamlak, Girma T and El Damatty, Ashraf},
+ journal={Journal of Wind Engineering and Industrial Aerodynamics},
+ volume=142,
+ pages={198--216},
+ year=2015,
+ publisher={Elsevier}
+}
+
+@article{shinozuka1972,
+ title={Digital simulation of random processes and its applications},
+ author={Shinozuka, Masanobu and Jan, C-M},
+ journal={Journal of sound and vibration},
+ volume=25,
+ number=1,
+ pages={111--128},
+ year=1972,
+ publisher={Elsevier}
+}
+
+@article{smirnov2001,
+ title={Random flow generation technique for large eddy simulations and particle-dynamics modeling},
+ author={Smirnov, A and Shi, S and Celik, I},
+ journal={Journal of fluids engineering},
+ volume=123,
+ number=2,
+ pages={359--371},
+ year=2001,
+ publisher={American Society of Mechanical Engineers}
+}
+
+@article{yu2014,
+ title={A fully divergence-free method for generation of inhomogeneous and anisotropic turbulence with large spatial variation},
+ author={Yu, Rixin and Bai, Xue-Song},
+ journal={Journal of Computational Physics},
+ volume=256,
+ pages={234--253},
+ year=2014,
+ publisher={Elsevier}
+}
+
+@article{huang2010,
+ title={A general inflow turbulence generator for large eddy simulation},
+ author={Huang, SH and Li, QS and Wu, JR},
+ journal={Journal of Wind Engineering and Industrial Aerodynamics},
+ volume=98,
+ number={10-11},
+ pages={600--617},
+ year=2010,
+ publisher={Elsevier}
+}
+
+@article{castro2017,
+ title={Evaluation of the proper coherence representation in random flow generation based methods},
+ author={Castro, Hugo G and Paz, Rodrigo R and Mroginski, Javier L and Storti, Mario A},
+ journal={Journal of Wind Engineering and Industrial Aerodynamics},
+ volume=168,
+ pages={211--227},
+ year=2017,
+ publisher={Elsevier}
+}
+
+@article{lund1998,
+ title={Generation of turbulent inflow data for spatially-developing boundary layer simulations},
+ author={Lund, Thomas S and Wu, Xiaohua and Squires, Kyle D},
+ journal={Journal of computational physics},
+ volume=140,
+ number=2,
+ pages={233--258},
+ year=1998,
+ publisher={Elsevier}
+}
+
+@article{kim2013,
+ title={Divergence-free turbulence inflow conditions for large-eddy simulations with incompressible flow solvers},
+ author={Kim, Yusik and Castro, Ian P and Xie, Zheng-Tong},
+ journal={Computers \& Fluids},
+ volume=84,
+ pages={56--68},
+ year=2013,
+ publisher={Elsevier}
+}
+
+@article{poletto2013,
+ title={A new divergence free synthetic eddy method for the reproduction of inlet flow conditions for LES},
+ author={Poletto, R and Craft, T and Revell, A},
+ journal={Flow, turbulence and combustion},
+ volume=91,
+ number=3,
+ pages={519--539},
+ year=2013,
+ publisher={Springer}
+}
+
+@article{xie2008,
+ title={Efficient generation of inflow conditions for large eddy simulation of street-scale flows},
+ author={Xie, Zheng-Tong and Castro, Ian P},
+ journal={Flow, turbulence and combustion},
+ volume=81,
+ number=3,
+ pages={449--470},
+ year=2008,
+ publisher={Springer}
+}
+
+@article{Khosravifar2018,
+author = {Khosravifar, Arash and Elgamal, Ahmed and Lu, Jinchi and Li, John},
+doi = {https://doi.org/10.1016/j.soildyn.2018.04.008},
+issn = {0267-7261},
+journal = {Soil Dynamics and Earthquake Engineering},
+keywords = {Constitutive modeling,Cyclic mobility,Liquefaction,Plasticity,Triggering},
+pages = {43--52},
+title = {{A 3D model for earthquake-induced liquefaction triggering and post-liquefaction response}},
+url = {http://www.sciencedirect.com/science/article/pii/S0267726117308722},
+volume = 110,
+year = 2018
+}
+
+@article{Phoon1999,
+author = {Phoon, Kok Kwang and Kulhawy, Fred H.},
+doi = {10.1139/t99-038},
+issn = 00083674,
+journal = {Canadian Geotechnical Journal},
+keywords = {Coefficient of variation,Geotechnical variability,Inherent soil variability,Measurement error,Scale of fluctuation},
+number = 4,
+pages = {612--624},
+title = {Characterization of geotechnical variability},
+volume = 36,
+year = 1999
+}
+
+@phdthesis{Shin2007,
+author = {Shin, HyungSuk},
+school = {University of Washington},
+address = {Seattle, WA},
+title = {Numerical modeling of a bridge system {\&} its application for performance-based earthquake engineering},
+year = 2007
+}
+
+@article{Yamazaki1988,
+abstract = {A method by which sample fields of a multidimensional non-Gaussian homogeneous stochastic field can be generated is developed. The method first generates Gaussian sample fields and then maps them into non-Gaussian sample fields with the aid of an iterative procedure. Numerical examples indicate that the procedure is very efficient and generated sample fields satisfy the target spectral density and probability distribution function accurately. The proposed method has a wide range of applicability to engineering problems involving stochastic fields where the Gaussian assumption is not appropriate. {\textcopyright} ASCE.},
+author = {Yamazaki, Fumio and Shinozuka, Masanobu},
+doi = {10.1061/(asce)0733-9399(1988)114:7(1183)},
+issn = {0733-9399},
+journal = {Journal of Engineering Mechanics},
+number = 7,
+pages = {1183--1197},
+title = {Digital Generation of {Non-Gaussian} Stochastic Fields},
+volume = 114,
+year = 1988
+}
+
+@unpublished{Chen2020a,
+author = {Chen, Long and Arduino, Pedro},
+title = {Implementation, verification, and validation of {PM4Sand} model in {OpenSees}''},
+note = "PEER Report - Submitted, under review",
+institution = {Pacific Earthquake Engineering Research Center},
+year = 2020
+}
+
+@article{Andrus2000,
+author = {Andrus, Ronald. D. and Stokoe, Kenneth H},
+journal = {Journal of Geotechnical and Geoenvironmental Engineering},
+number = 11,
+pages = {1015-1025},
+title = {Liquefaction resistance of soils from shear wave velocity},
+volume = 126,
+year = 2000
+}
+
+@article{Youd2001,
+author = {Youd, T. L. and Idriss, I. M.},
+doi = {10.1061/(asce)1090-0241(2001)127:4(297)},
+issn = {1090-0241},
+journal = {Journal of Geotechnical and Geoenvironmental Engineering},
+number = 4,
+pages = {297-313},
+title = {Liquefaction Resistance of Soils: Summary Report from the 1996 NCEER and 1998 NCEER/NSF Workshops on Evaluation of Liquefaction Resistance of Soils},
+volume = 127,
+year = 2001
+}
+
+@article{Cetin2004,
+author = {Cetin, K. Onder and Tokimatsu, Kohji and Harder, Leslie F. and Moss, Robert E. S. and Kayen, Robert E. and {Der Kiureghian}, Armen and Seed, Raymond B.},
+doi = {10.1061/(asce)1090-0241(2004)130:12(1314)},
+isbn = {1090-0241},
+issn = {1090-0241},
+journal = {Journal of Geotechnical and Geoenvironmental Engineering},
+number = 12,
+pages = {1314-1340},
+pmid = 22936425,
+title = {Standard penetration test-based probabilistic and deterministic assessment of seismic soil liquefaction potential},
+volume = 130,
+year = 2004
+}
+
+@book{Idriss2008,
+series = {MNO-12},
+publisher = {Earthquake Engineering Research Institute},
+isbn = 9781932884364,
+year = 2008,
+title = {Soil liquefaction during earthquakes},
+language = {eng},
+address = {Oakland, Calif.},
+author = {Idriss, I. M. and Boulanger, R. W.},
+keywords = {Earthquakes; Soil liquefaction; Landslide hazard analysis},
+}
+
+@article{guan2020python,
+ title={Python-based computational platform to automate seismic design, nonlinear structural model construction and analysis of steel moment resisting frames},
+ author={Guan, Xingquan and Burton, Henry and Sabol, Thomas},
+ journal={Engineering Structures},
+ volume=224,
+ pages=111199,
+ year=2020,
+ publisher={Elsevier}
+}
+
+
+@techreport{parkDatabaseassistedDesignEquivalent2018,
+ title = {Database-Assisted Design and Equivalent Static Wind Loads for Mid- and High-Rise Structures: Concepts, Software, and User's Manual},
+ shorttitle = {Database-Assisted Design and Equivalent Static Wind Loads for Mid- and High-Rise Structures},
+ author = {Park, Sejun and Yeo, DongHun},
+ year = 2018,
+ month = jun,
+ address = {{Gaithersburg, MD}},
+ institution = {{National Institute of Standards and Technology}},
+ doi = {10.6028/NIST.TN.2000},
+ language = {en},
+ number = {NIST TN 2000}
+}
diff --git a/doc/source/release_notes/index.rst b/doc/source/release_notes/index.rst
new file mode 100644
index 000000000..9367dbcd1
--- /dev/null
+++ b/doc/source/release_notes/index.rst
@@ -0,0 +1,39 @@
+.. _release_notes:
+
+*************
+Release Notes
+*************
+
+The following sections document the notable changes of each release.
+The sequence of all changes is available in the `commit logs `_.
+
+Version 3.0
+-----------
+
+.. toctree::
+ :maxdepth: 2
+
+ unreleased
+ v3.3.0
+ v3.2.0
+ v3.1.0
+ v3.0.0
+
+Version 2.0
+-----------
+
+.. toctree::
+ :maxdepth: 2
+
+ v2.6.0
+ v2.5.0
+ v2.1.1
+ v2.0.0
+
+Version 1.0
+-----------
+
+.. toctree::
+ :maxdepth: 2
+
+ v1.1.0
diff --git a/doc/source/release_notes/unreleased.rst b/doc/source/release_notes/unreleased.rst
new file mode 100644
index 000000000..0c0e11ced
--- /dev/null
+++ b/doc/source/release_notes/unreleased.rst
@@ -0,0 +1,67 @@
+.. _changes_unreleased:
+
+==========
+Unreleased
+==========
+
+Added
+-----
+
+**Documentation pages**: Documentation for pelicun 3 is back online. The documentation includes guides for users and developers as well as an auto-generated API reference. A lineup of examples is planned to be part of the documentation, highlighting specific features, including the new ones listed in this section.
+
+**Consequence scaling**: This feature can be used to apply scaling factors to consequence and loss functions for specific decision variables, component types, locations and directions. This can make it easier to examine several different consequence scaling schemes without the need to repeat all calculations or write extensive custom code.
+
+**Loss functions**: Loss functions are used to estimate losses directly from the demands. The damage and loss models were substantially restructured to facilitate the use of loss functions.
+
+**Loss combinations**: Loss combinations allow for the combination of two types of losses using a multi-dimensional lookup table. For example, independently calculated losses from wind and flood can be combined to produce a single loss estimate considering both demands.
+
+**Utility demand**: Utility demands are compound demands calculated using a mathematical expression involving other demands. Practical examples include the application of a mathematical expression on a demand before using it to estimate damage, or combining multiple demands with a multivariate expression to generate a combined demand.Such utility demands can be used to implement those multidimensional fragility models that utilize a single, one-dimensional distribution that is defined through a combination of multiple input variables.
+
+**Normal distribution with standard deviation**: Added two new variants of "normal" in ``uq.py``: ``normal_COV`` and ``normal_STD``. Since the variance of the default normal random variables is currently defined via the coefficient of variation, the new ``normal_STD`` is required to define a normal random variable with zero mean. ``normal_COV`` is treated the same way as the default ``normal``.
+
+**Weibull random variable**: Added a Weibull random variable class in ``uq.py``.
+
+**New ``DL_calculation.py`` input file options**: We expanded configuration options in the ``DL_calculation.py`` input file specification. Specifically, we added ``CustomDLDataFolder`` for specifying additional user-defined components.
+
+**Warnings in red**: Added support for colored outputs. In execution environments that support colored outputs, warnings are now shown in red.
+
+Code base related additions, which are not directly implementing new features but are nonetheless enhancing robustness, include the following:
+- pelicun-specific warnings with the option to disable them
+- a JSON schema for the input file used to configure simulations through ``DL_calculation.py``
+- addition of type hints in the entire code base
+- addition of slots in all classes, preventing on-the-fly definition of new attributes which is prone to bugs
+
+Changed
+-------
+
+- Updated random variable class names in ``uq.py``.
+- Extensive code refactoring for improved organization and to support the new features. We made a good-faith effort to maintain backwards compatibility, and issue helpful warnings to assist migration to the new syntax.
+- Moved most of the code in DL_calculation.py to assessment.py and created an assessment class.
+- Migrated to Ruff for linting and code formatting. Began using mypy for type checking and codespell for spell checking.
+
+Deprecated
+----------
+
+- ``.bldg_repair`` attribute was renamed to ``.loss``
+- ``.repair`` had also been used in the past, please use ``.loss`` instead.
+- In the damage and loss model library, ``fragility_DB`` was renamed to ``damage_DB`` and ``bldg_repair_DB`` was renamed to ``loss_repair_DB``.
+- ``load_damage_model`` was renamed to ``load_model_parameters`` and the syntax has changed. Please see the applicable warning message when using ``load_damage_model`` for the updated syntax.
+- ``{damage model}.sample`` was deprecated in favor of ``{damage model}.ds_model.sample``.
+- The ``DMG-`` flag in the loss_map index is no longer required.
+- ``BldgRepair`` column is deprecated in favor of ``Repair``.
+- ``load_model`` -> ``load_model_parameters``
+- ``{loss model}.save_sample`` -> ``'{loss model}.ds_model.save_sample``. The same applies to ``load_sample``.
+
+Removed
+-------
+
+- No features were removed in this version.
+- We suspended the use of flake8 and pylint after adopting the use of ruff.
+
+Fixed
+-----
+
+- Fixed a bug affecting the random variable classes, where the anchor random variable was not being correctly set.
+- Enforced a value of 1.0 for non-directional multipliers for HAZUS analyses.
+- Fixed bug in demand cloning: Previously demand unit data were being left unmodified during demand cloning operations, leading to missing values.
+- Reviewed and improved docstrings in the entire code base.
diff --git a/doc/source/release_notes/v1.1.0.rst b/doc/source/release_notes/v1.1.0.rst
new file mode 100644
index 000000000..8f172b267
--- /dev/null
+++ b/doc/source/release_notes/v1.1.0.rst
@@ -0,0 +1,8 @@
+.. _changes_v1_1_0:
+
+================================
+Version 1.1.0 (February 6, 2019)
+================================
+
+- converted to a common JSON format for FEMA P58 and HAZUS Damage and Loss data
+- added component-assembly-based (HAZUS-style) loss assessment methodology for earthquake
diff --git a/doc/source/release_notes/v2.0.0.rst b/doc/source/release_notes/v2.0.0.rst
new file mode 100644
index 000000000..33b42d021
--- /dev/null
+++ b/doc/source/release_notes/v2.0.0.rst
@@ -0,0 +1,48 @@
+.. _changes_v2_0_0:
+
+================================
+Version 2.0.0 (October 15, 2019)
+================================
+
+- Migrated to the latest version of Python, numpy, scipy, and pandas.
+ See setup.py for required minimum versions of those tools.
+
+- Python 2.x is no longer supported.
+
+- Improved DL input structure to
+
+ - make it easier to define complex performance models,
+
+ - make input files easier to read,
+
+ - support custom, non-PACT units for component quantities,
+
+ - and support different component quantities on every floor.
+
+- Updated FEMA P58 DL data to use ea for equipment instead of units such as KV, CF, AP, TN.
+
+- Added FEMA P58 2nd edition DL data.
+
+- Support for EDP inputs in standard csv format.
+
+- Added a function that produces SimCenter DM and DV json output files.
+
+- Added a differential evolution algorithm to the EDP fitting function to do a better job at finding the global optimum.
+
+- Enhanced DL_calculation.py to handle multi-stripe analysis (significant contributions by Joanna Zou):
+
+ - Recognize stripe_ID and occurrence rate in BIM/EVENT file.
+
+ - Fit a collapse fragility function to empirical collapse probabilities.
+
+ - Perform loss assessment for each stripe independently and produce corresponding outputs.
+
+================================
+Version 1.2.0 (October 15, 2019)
+================================
+
+- Added support for HAZUS hurricane wind damage and loss assessment.
+- Added HAZUS hurricane DL data for wooden houses.
+- Moved DL resources inside the pelicun folder so that they come with pelicun when it is pip installed.
+- Add various options for EDP fitting and collapse probability estimation.
+- Improved the way warning messages are printed to make them more useful.
diff --git a/doc/source/release_notes/v2.1.1.rst b/doc/source/release_notes/v2.1.1.rst
new file mode 100644
index 000000000..df556d8a2
--- /dev/null
+++ b/doc/source/release_notes/v2.1.1.rst
@@ -0,0 +1,14 @@
+.. _changes_v2_1_1:
+
+=============================
+Version 2.1.1 (June 30, 2020)
+=============================
+
+- Aggregate DL data from JSON files to HDF5 files.
+ This greatly reduces the number of files and makes it easier to share databases.
+- Significant performance improvements in EDP fitting, damage and loss calculations, and output file saving.
+- Add log file to pelicun that records every important calculation detail and warnings.
+- Add 8 new EDP types: RID, PMD, SA, SV, SD, PGD, DWD, RDR.
+- Drop support for Python 2.x and add support for Python 3.8.
+- Extend auto-population logic with solutions for HAZUS EQ assessments.
+- Several bug fixes and minor improvements to support user needs.
diff --git a/doc/source/release_notes/v2.5.0.rst b/doc/source/release_notes/v2.5.0.rst
new file mode 100644
index 000000000..97ef96f1b
--- /dev/null
+++ b/doc/source/release_notes/v2.5.0.rst
@@ -0,0 +1,18 @@
+.. _changes_v2_5_0:
+
+=================================
+Version 2.5.0 (December 31, 2020)
+=================================
+
+- Extend the uq module to support:
+ - More efficient sampling, especially when most of the random variables in the model are either independent or perfectly correlated.
+ - More accurate and more efficient fitting of multivariate probability distributions to raw EDP data.
+ - Arbitrary marginals (beyond the basic Normal and Lognormal) for joint distributions.
+ - Latin Hypercube Sampling
+- Introduce external auto-population scripts and provide an example for hurricane assessments.
+- Add a script to help users convert HDF files to CSV (HDF_to_CSV.py under tools)
+- Use unique and standardized attribute names in the input files
+- Migrate to the latest version of Python, numpy, scipy, and pandas (see setup.py for required minimum versions of those tools).
+- Bug fixes and minor improvements to support user needs:
+ - Add 1.2 scale factor for EDPs controlling non-directional Fragility Groups.
+ - Remove dependency on scipy's truncnorm function to avoid long computation times due to a bug in recent scipy versions.
diff --git a/doc/source/release_notes/v2.6.0.rst b/doc/source/release_notes/v2.6.0.rst
new file mode 100644
index 000000000..434c26f1c
--- /dev/null
+++ b/doc/source/release_notes/v2.6.0.rst
@@ -0,0 +1,14 @@
+.. _changes_v2_6_0:
+
+==============================
+Version 2.6.0 (August 6, 2021)
+==============================
+
+- Support EDPs with more than 3 characters and/or a variable in their name.
+ For example, ``SA_1.0`` or ``SA_T1``.
+
+- Support fitting normal distribution to raw EDP data (lognormal was already available)
+
+- Extract key settings to base.py to make them more accessible for users.
+
+- Minor bug fixes mostly related to hurricane storm surge assessment
diff --git a/doc/source/release_notes/v3.0.0.rst b/doc/source/release_notes/v3.0.0.rst
new file mode 100644
index 000000000..0608853a9
--- /dev/null
+++ b/doc/source/release_notes/v3.0.0.rst
@@ -0,0 +1,52 @@
+.. _changes_v3_0_0:
+
+==================================
+Version 3.0.0 (December 31, 2021)
+==================================
+
+- The architecture was redesigned to better support interactive calculation and provide a low-level integration across all supported methods.
+ This is the first release with the new architecture.
+ Frequent updates are planned to provide additional examples, tests, and bugfixes in the next few months.
+
+- New assessment module introduced to replace control module:
+
+ - Provides a high-level access to models and their methods.
+
+ - Integrates all types of assessments into a uniform approach.
+
+ - Most of the methods from the earlier control module were moved to the model module.
+
+- Decoupled demand, damage, and loss calculations:
+
+ - Fragility functions and consequence functions are stored in separate files.
+ Added new methods to the db module to prepare the corresponding data files and re-generated such data for FEMA P58 and Hazus earthquake assessments.
+ Hazus hurricane data will be added in a future release.
+
+ - Decoupling removed a large amount of redundant data from supporting databases and made the use of HDF and json files for such data unnecessary.
+ All data are stored in easy-to-read csv files.
+
+ - Assessment workflows can include all three steps (i.e., demand, damage, and loss) or only one or two steps.
+ For example, damage estimates from one analysis can drive loss calculations in another one.
+
+- Integrated damage and loss calculation across all methods and components:
+
+ - This includes phenomena such as collapse, including various collapse modes, and irreparable damage.
+
+ - Cascading damages and other interdependencies between various components can be introduced using a damage process file.
+
+ - Losses can be driven by damages or demands.
+ The former supports the conventional damage->consequence function approach, while the latter supports the use of vulnerability functions.
+ These can be combined within the same analysis, if needed.
+
+ - The same loss component can be driven by multiple types of damages.
+ For example, replacement can be triggered by either collapse or irreparable damage.
+
+- Introduced Options in the configuration file and in the base module:
+
+ - These options handle settings that concern pelicun behavior;
+
+ - general preferences that might affect multiple assessment models;
+
+ - and settings that users would not want to change frequently.
+
+ - Default settings are provided in a default_config.json file. These can be overridden by providing any of the prescribed keys with a user-defined value assigned to them in the configuration file for an analysis.
diff --git a/doc/source/release_notes/v3.1.0.rst b/doc/source/release_notes/v3.1.0.rst
new file mode 100644
index 000000000..48a80a6e1
--- /dev/null
+++ b/doc/source/release_notes/v3.1.0.rst
@@ -0,0 +1,26 @@
+.. _changes_v3_1_0:
+
+==================================
+Version 3.1.0 (September 30, 2022)
+==================================
+
+- Calculation settings are now assessment-specific. This allows you to use more than one assessments in an interactive calculation and each will have its own set of options, including log files.
+
+- The uq module was decoupled from the others to enable standalone uq calculations that work without having an active assessment.
+
+- A completely redesigned DL_calculation.py script that provides decoupled demand, damage, and loss assessment and more flexibility when setting up each of those when pelicun is used with a configuration file in a larger workflow.
+
+- Two new examples that use the DL_calculation.py script and a json configuration file were added to the example folder.
+
+- A new example that demonstrates a detailed interactive calculation in a Jupyter notebook was added to the following DesignSafe project: https://www.designsafe-ci.org/data/browser/public/designsafe.storage.published/PRJ-3411v5.
+ This project will be extended with additional examples in the future.
+
+- Unit conversion factors moved to an external file (settings/default_units) to make it easier to add new units to the list. This also allows redefining the internal units through a complete replacement of the factors. The internal units continue to follow the SI system.
+
+- Substantial improvements in coding style using flake8 and pylint to monitor and help enforce PEP8.
+
+- Several performance improvements made calculations more efficient, especially for large problems, such as regional assessements or tall buildings investigated using the FEMA P-58 methodology.
+
+- Several bugfixes and a large number of minor changes that make the engine more robust and easier to use.
+
+- Update recommended Python version to 3.10 and other dependencies to more recent versions.
diff --git a/doc/source/release_notes/v3.2.0.rst b/doc/source/release_notes/v3.2.0.rst
new file mode 100644
index 000000000..02a01a7f0
--- /dev/null
+++ b/doc/source/release_notes/v3.2.0.rst
@@ -0,0 +1,90 @@
+.. _changes_v3_2_0:
+
+=================================
+Version 3.2.0 (February 27, 2024)
+=================================
+
+.. _changes_v3_2_0.new:
+
+New features
+------------
+
+- New multilinear CDF Random Variable allows using the multilinear approximation of any CDF in the tool.
+
+- Capacity adjustment allows adjusting (scaling or shifting) default capacities (i.e., fragility curves) with factors specific to each Performance Group.
+
+- Support for multiple definitions of the same component at the same location-direction.
+ This feature facilitates adding components with different block sizes to the same floor or defining multiple tenants on the same floor, each with their own set of components.
+
+- Support for cloning demands, that is, taking a provided demand dataset, creating a copy and considering it as another demand.
+ For example, you can provide results of seismic response in the X direction and automatically prepare a copy of them to represent results in the Y direction.
+
+- Models for estimating Environmental Impact (i.e., embodied carbon and energy) of earthquake damage as per FEMA P-58 are included in the DL Model Library and available in this release.
+
+- "ListAllDamageStates" option allows you to print a comprehensive list of all possible damage states for all components in the columns of the DMG output file.
+ This can make parsing the output easier but increases file size.
+ By default, this option is turned off and only damage states that affect at least one block are printed.
+
+- Damage and Loss Model Library
+
+ - A collection of parameters and metadata for damage and loss models for performance based engineering.
+ The library is available and updated regularly in the DB_DamageAndLoss GitHub Repository.
+ - This and future releases of Pelicun have the latest version of the library at the time of their release bundled with them.
+
+- DL_calculation tool
+
+ - Support for combination of built-in and user-defined databases for damage and loss models.
+
+ - Results are now also provided in standard SimCenter JSON format besides the existing CSV tables.
+ You can specify the preferred format in the configuration file under Output/Format.
+ The default file format is still CSV.
+
+ - Support running calculations for only a subset of available consequence types.
+
+
+.. _changes_v3_2_0.breaking:
+
+Backwards incompatible changes
+------------------------------
+
+- Unit information is included in every output file.
+ If you parse Pelicun outputs and did not anticipate a Unit entry, your parser might need an update.
+
+- Decision variable types in the repair consequence outputs are named using CamelCase rather than all capitals to be consistent with other parts of the codebase.
+ For example, we use "Cost" instead of "COST".
+ This might affect post-processing scripts.
+
+- For clarity, "ea" units were replaced with "unitless" where appropriate.
+ There should be no practical difference between the calculations due to this change.
+ Interstory drift ratio demand types are one example.
+
+- Weighted component block assignment is no longer supported.
+ We recommend using more versatile multiple component definitions (see new feature below) to achieve the same effect.
+
+- Damage functions (i.e., assign quantity of damage as a function of demand) are no longer supported.
+ We recommend using the new multilinear CDF feature to develop theoretically equivalent but more efficient models.
+
+.. _changes_v3_2_0.changes:
+
+Other changes
+-------------
+
+- Added a comprehensive suite of more than 140 unit tests that cover more than 93% of the codebase.
+ Tests are automatically executed after every commit using GitHub Actions and coverage is monitored through ``Codecov.io``.
+ Badges at the top of the Readme show the status of tests and coverage.
+ We hope this continuous integration facilitates editing and extending the existing codebase for interested members of the community.
+
+- Completed a review of the entire codebase using ``flake8`` and ``pylint`` to ensure PEP8 compliance.
+ The corresponding changes yielded code that is easier to read and use.
+ See guidance in ``Readme`` on linting and how to ensure newly added code is compliant.
+
+- Several error and warning messages added to provide more meaningful information in the log file when something goes wrong in a simulation.
+
+- Update dependencies to more recent versions.
+
+.. _changes_v3_2_0.remarks:
+
+Remarks
+-------
+
+The online documentation is significantly out of date. While we are working on an update, we recommend using the documentation of the `DL panel in SimCenter's PBE Tool `_ as a resource.
diff --git a/doc/source/release_notes/v3.3.0.rst b/doc/source/release_notes/v3.3.0.rst
new file mode 100644
index 000000000..c471c3bf6
--- /dev/null
+++ b/doc/source/release_notes/v3.3.0.rst
@@ -0,0 +1,93 @@
+.. _changes_v3_3_0:
+
+==============================
+Version 3.3.0 (March 29, 2024)
+==============================
+
+New features
+------------
+
+.. _changes_v3_3_0.new.loc_dmg_prc:
+
+Location-specific damage processes
+..................................
+
+This new feature is useful when you want damage to a component type to induce damage in another component type at the same location only.
+For example, damaged water pipes on a specific story can trigger damage in floor covering only on that specific story.
+Location-matching is performed automatically without you having to define component pairs for every location using the following syntax: ``'1_CMP.A-LOC', {'DS1': 'CMP.B_DS1'}`` , where ``DS1`` of ``CMP.A`` at each location triggers ``DS1`` of ``CMP.B`` at the same location.
+
+.. _changes_v3_3_0.new.custom_model_dir:
+
+New ``custom_model_dir`` argument for ``DL_calculation.py``
+...........................................................
+
+This argument allows users to prepare custom damage and loss model files in a folder and pass the path to that folder to an auto-population script through ``DL_calculation.py``.
+Within the auto-population script, they can reference only the name of the files in that folder.
+This provides portability for simulations that use custom models and auto population, such as some of the advanced regional simulations in `SimCenter's R2D Tool `_.
+
+.. _changes_v3_3_0.new.hazus_eq_auto_pop:
+
+Extend Hazus EQ auto population scripts to include water networks
+.................................................................
+
+Automatically recognize water network assets and map them to archetypes from the Hazus Earthquake technical manual.
+
+.. _changes_v3_3_0.new.convert_units:
+
+Introduce ``convert_units`` function
+....................................
+
+Provide streamlined unit conversion using the pre-defined library of units in Pelicun.
+Allows you to convert a variable from one unit to another using a single line of simple code, such as:
+
+.. code::
+
+ converted_height = pelicun.base.convert_units(raw_height, unit='m', to_unit='ft')
+
+While not as powerful as some of the Python packages dedicated to unit conversion (e.g., `Pint `_), we believe the convenience this function provides for commonly used units justifies its use in several cases.
+
+.. _changes_v3_3_0.breaking:
+
+Backwards incompatible changes
+------------------------------
+
+.. _changes_v3_3_0.breaking.bldg:
+
+Remove ``bldg`` from repair consequence output filenames
+........................................................
+
+The increasing scope of Pelicun now covers simulations for transportation and water networks.
+Hence, labeling repair consequence outputs as if they were limited to buildings no longer seems appropriate.
+The bldg label was dropped from the following files: ``DV_bldg_repair_sample``, ``DV_bldg_repair_stats``, ``DV_bldg_repair_grp``, ``DV_bldg_repair_grp_stats``, ``DV_bldg_repair_agg``, ``DV_bldg_repair_agg_stats``.
+
+.. _changes_v3_3_0.changes:
+
+Other changes
+-------------
+
+- We split ``model.py`` into subcomponents.
+ The ``model.py`` file was too large and its contents were easy to refactor into separate modules.
+ Each model type has its own python file now and they are stored under the model folder.
+
+- We split the ``RandomVariable`` class into specific classes.
+ It seems more straightforward to grow the list of supported random variables by having a specific class for each kind of RV.
+ We split the existing large RandomVariable class in uq.py leveraging inheritance to minimize redundant code.
+
+- Automatic code formatting: Further improve consistency in coding style by using black to review and format the code when needed.
+
+- Removed ``bldg`` from variable and class names: Following the changes mentioned earlier, we dropped bldg from labels where the functionality is no longer limited to buildings.
+
+- Introduced ``calibrated`` attribute for demand model: This new attribute will allow users to check if a model has already been calibrated to the provided empirical data.
+
+- Version ceiling was raised for pandas, supporting version 2.0 and above up until 3.0.
+
+Soon-to-be removed features
+---------------------------
+
+.. _changes_v3_3_0.deprecated.bldg:
+
+Remove ``Bldg`` from repair settings label in DL configuration file
+...................................................................
+
+Following the changes above, we dropped ``Bldg`` from ``BldgRepair`` when defining settings for repair consequence simulation in a configuration file.
+The previous version (i.e., ``BldgRepair``) will keep working until the next major release, but we encourage everyone to adopt the new approach and simply use the ``Repair`` keyword there.
diff --git a/doc/source/user_guide/bug_reports_and_feature_requests.rst b/doc/source/user_guide/bug_reports_and_feature_requests.rst
new file mode 100644
index 000000000..ebed4a896
--- /dev/null
+++ b/doc/source/user_guide/bug_reports_and_feature_requests.rst
@@ -0,0 +1,34 @@
+.. _bug_reports_and_feature_requests:
+
+Bug reports and feature requests
+--------------------------------
+
+In the case of unexpected behavior, such as getting an error while providing seemingly valid inputs or getting results that appear to be incorrect, please let us know by opening an issue on GitHub.
+We would appreciate it if you could simplify the inputs as much as possible while maintaining the unexpected behavior, which will accelerate our investigative efforts and help us respond sooner.
+In the issue, please include all necessary files as well as the scripts and commands used to demonstrate the bug and explain what the expected behavior would be instead.
+
+We will review your bug report upon receiving it.
+If the expected behavior only requires a modification of your inputs, we will let you know.
+Otherwise, if there is indeed a bug, we are going to respond with an estimated timeline for a fix and begin a discussion on that same issue page on the necessary steps to resolve the issue.
+Any further developer communication applicable to the issue will be documented on that page.
+When the fix is implemented, we will close the issue and notify you of the version where the fix was applied.
+
+.. button-link:: https://github.com/NHERI-SimCenter/pelicun/issues/new
+ :color: primary
+ :shadow:
+
+ Submit a bug report
+
+We accept feature requests.
+If there is a feature pelicun lacks and you would like us to implement, you can request that feature in the pelicun discussion page.
+Please begin the title of your message with "Feature Request:".
+Describe the requested feature as best you can in your message, and point to any relevant technical documents, other software, or any other piece of information that would help us implement the feature.
+We will respond with any questions we have and offer a timeline for implementing the feature or justify our decision to defer implementation.
+If we decide to implement the feature, we will begin by opening an issue page on GitHub, where developer communication will take place, and link it to the discussion page.
+When the feature is implemented, we will close the issue and notify you of the version introducing it.
+
+.. button-link:: https://github.com/orgs/NHERI-SimCenter/discussions/new?category=pelicun
+ :color: primary
+ :shadow:
+
+ Submit a feature request
diff --git a/doc/source/user_guide/damage_and_loss_library.rst b/doc/source/user_guide/damage_and_loss_library.rst
new file mode 100644
index 000000000..2f82d8db4
--- /dev/null
+++ b/doc/source/user_guide/damage_and_loss_library.rst
@@ -0,0 +1,25 @@
+.. _damage_and_loss_library:
+
+Damage and loss library
+-----------------------
+
+.. admonition:: Coming soon.
+
+ This section is under construction.
+
+ We need to finalize the following:
+
+ - The terms we use.
+ - Instructions on how to access the files via ``PelicunDefault`` (pending transition to using the submodule's files).
+ - Agree on whether to include details on the contents in this documentation or in the DLML repo itself.
+
+The NHERI-SimCenter is maintaining a comprehensive Damage and Loss Model Library (DLML) in the form of a GitHub repository.
+The DLML consists of published and commonly used fragility curves, as well as loss and consequence functions.
+More details can be found in the repository.
+All contents of the DLML are included in pelicun.
+
+.. button-link:: https://github.com/NHERI-SimCenter/DamageAndLossModelLibrary
+ :color: primary
+ :shadow:
+
+ Visit the NHERI-SimCenter damage and loss library
diff --git a/doc/source/user_guide/feature_overview.rst b/doc/source/user_guide/feature_overview.rst
new file mode 100644
index 000000000..1768cb34d
--- /dev/null
+++ b/doc/source/user_guide/feature_overview.rst
@@ -0,0 +1,161 @@
+.. _feature_overview:
+
+Overview of pelicun features
+----------------------------
+
+.. admonition:: Coming soon.
+
+ This section is under construction.
+
+.. _fo_saving:
+
+Saving/loading samples
+......................
+
+All demand, asset, damage, and loss samples can be either computed from other inputs or directly loaded form previously computed and saved samples.
+
+.. _fo_logging:
+
+Logging support
+...............
+
+Pelicun produces detailed log files that can be used to document the execution of an assessment as well as information on the host machine and the execution environment.
+These logs can be useful for debugging purposes.
+Pelicun emits detailed warnings whenever appropriate, notifying the user of potentially problematic or inconsistent inputs, evaluation settings, or deprecated syntax.
+
+.. _fo_uq:
+
+Uncertainty quantification
+..........................
+
+Damage and loss estimation is inherently uncertain and treated as a stochastic problem.
+Uncertainty quantification lies at the core of all computations in pelicun.
+Pelicun supports a variety of common parametric univariate random variable distributions.
+With the help of random variable registries, it also supports multivariate distributions, joined with Gaussian copula.
+
+.. _fo_assessment_types:
+
+Assessment types
+................
+
+Pelicun supports scenario-based assessments. That is, losses conditioned on a specific value of an Intensity Measure (IM).
+
+..
+ TODO: add links pointing to a glossary/definition index of terms.
+
+.. note::
+
+ Support for time-based assessments is currently in progress.
+
+Demand simulation
+.................
+
+.. _fo_calibration:
+
+Model calibration
+^^^^^^^^^^^^^^^^^
+
+.. _fo_sampling:
+
+Sampling methods
+^^^^^^^^^^^^^^^^
+
+.. _fo_pidrid:
+
+RID|PID inference
+^^^^^^^^^^^^^^^^^
+
+.. _fo_sample_expansion:
+
+Sample expansion
+^^^^^^^^^^^^^^^^
+
+.. _fo_demand_cloning:
+
+Demand cloning
+^^^^^^^^^^^^^^
+
+Damage estimation
+.................
+
+.. _fo_damage_process:
+
+Damage processes
+^^^^^^^^^^^^^^^^
+
+Loss estimation
+.................
+
+.. _fo_loss_maps:
+
+Loss maps
+^^^^^^^^^
+
+.. _fo_active_dvs:
+
+Active decision variables
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. _fo_consequence_scaling:
+
+Consequence scaling
+^^^^^^^^^^^^^^^^^^^
+
+.. _fo_loss_aggregation:
+
+Loss aggregation
+^^^^^^^^^^^^^^^^
+
+Also talk about replacement thresholds here.
+
+.. _fo_cli:
+
+Command-line support
+....................
+
+Pelicun can be ran from the command line.
+Installing the package enables the ``pelicun`` entry point, which points to ``tools/DL_calculation.py``.
+``DL_calculation.py`` is a script that conducts a performance evaluation using command-line inputs.
+Some of those inputs are paths to required input files, including a JSON file that provides most evaluation options.
+
+..
+ TODO: point to an example, and index the example in the by-feature grouping.
+
+.. _fo_autopop:
+
+Input file auto-population
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+It is possible for the JSON input file to be auto-populated (extended to include more entries) using either default or user-defined auto-population scripts.
+
+..
+ TODO: Why is this useful? Why would a user want to do this?
+
+Standalone tools
+................
+
+.. _fo_convert_units:
+
+Unit conversion
+^^^^^^^^^^^^^^^
+
+.. _fo_fit:
+
+Fit distribution to sample or percentiles
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. _fo_rvs:
+
+Random variable classes
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Feature overview and examples
+.............................
+
+A series of examples, organized by feature, demonstrate the capabilities supported by pelicun.
+
+.. button-link:: ../examples/index.html
+ :color: primary
+ :shadow:
+
+ Visit the examples
+
diff --git a/doc/source/user_guide/figures/MainWorkflowComps.png b/doc/source/user_guide/figures/MainWorkflowComps.png
new file mode 100644
index 000000000..df32ef989
Binary files /dev/null and b/doc/source/user_guide/figures/MainWorkflowComps.png differ
diff --git a/doc/source/user_guide/figures/ModelTypes.png b/doc/source/user_guide/figures/ModelTypes.png
new file mode 100644
index 000000000..b80b21bde
Binary files /dev/null and b/doc/source/user_guide/figures/ModelTypes.png differ
diff --git a/doc/source/user_guide/figures/PerfAssWorkflows.png b/doc/source/user_guide/figures/PerfAssWorkflows.png
new file mode 100644
index 000000000..3818d5852
Binary files /dev/null and b/doc/source/user_guide/figures/PerfAssWorkflows.png differ
diff --git a/doc/source/user_guide/install.rst b/doc/source/user_guide/install.rst
new file mode 100644
index 000000000..7474a9d04
--- /dev/null
+++ b/doc/source/user_guide/install.rst
@@ -0,0 +1,39 @@
+.. _user_install:
+
+Welcome to the pelicun user guide.
+Below, you will find instructions on installing pelicun and information about the supported features, the basic concepts behind them, the terminology used, the expected inputs, and where to get help.
+Join our growing community of users and developers dedicated to advancing risk estimation practices and sharing insights.
+
+
+Getting started
+---------------
+
+`Pelicun `_ is available on the Python Package Index (PyPI) and should work out-of-the-box in all major platforms.
+
+.. tip::
+
+ We recommend installing the package under a `virtual environment `_ to avoid dependency conflicts with other packages.
+ See also `conda `_ and `mamba `_, two widely used programs featuring environment management.
+
+Install command::
+
+ python -m pip install pelicun
+
+Staying up to date
+..................
+
+When a new version is released, you can use ``pip`` to upgrade::
+
+ python -m pip install --upgrade pelicun
+
+
+..
+ pelicun is an open-source library (|github link|) released under a **3-Clause BSD** license (see :numref:`lblLicense`). The pelicun library can be used to quantify damages and losses from an earthquake or hurricane scenario in the form of decision variables (DVs). This functionality is typically utilized for performance-based engineering and regional natural hazard risk assessment. This library can help in several steps of performance assessment:
+
+ * **Describe the joint distribution of asset response.** The response of a structure or other type of asset to natural hazard event is typically described by so-called engineering demand parameters (EDPs). pelicun provides various options to characterize the distribution of EDPs. It can calibrate a multivariate distribution that describes the joint distribution of EDPs if raw EDP data is available. Users can control the type of each marginal distribution, apply truncation limits to consider collapses, and censor part of the data to consider detection limits in their analysis. Alternatively, pelicun can use raw EDP data as-is without resampling from a fitted distribution.
+
+ * **Define the performance model of an asset.** The fragility and consequence functions from the first two editions of FEMA P58 and the HAZUS earthquake and hurricane wind and storm surge models for buildings are provided with pelicun. This facilitates the creation of performance models without having to collect and provide component descriptions and corresponding fragility and consequence functions. An auto-population interface encourages researchers to develop and share rulesets that automate the performance-model definition based on the available building information. Example scripts for such auto-population are also provided with the tool.
+
+ * **Simulate asset damage.** Given the EDP samples, and the performance model, pelicun efficiently simulates the damages in each component of the asset and identifies the proportion of realizations that resulted in collapse.
+
+ * **Estimate the consequences of damage.** Using information about collapse and component damages, the following consequences can be estimated with pelicun: repair cost and time, unsafe placarding (red tag), injuries of various severity and fatalities.
diff --git a/doc/source/user_guide/pelicun_framework.rst b/doc/source/user_guide/pelicun_framework.rst
new file mode 100644
index 000000000..f539e0c93
--- /dev/null
+++ b/doc/source/user_guide/pelicun_framework.rst
@@ -0,0 +1,169 @@
+.. _pelicun_framework:
+
+=====================
+The Pelicun Framework
+=====================
+
+Abbreviations
+-------------
+
+:BIM: Building Information Model
+
+:DL: Damage and Loss
+
+:EDP: Engineering Demand Parameter
+
+:EVT: Hazard Event (of earthquake/tsunami/storm surge hazard)
+
+:GM: Ground Motion (of earthquake hazard)
+
+:IM: Intensity Measure (of hazard event)
+
+:SAM: Structural Analysis Model (i.e. finite element model)
+
+:SIM: Simulation
+
+:UQ: Uncertainty Quantification
+
+:RV: Random Variables
+
+:QoI: Quantities of Interest
+
+:DS: Damage State
+
+:DV: Decision Variable
+
+:LS: Limit State
+
+..
+ TODO(JVM): Go over the glossary and remove unused terms.
+
+..
+ TODO(JVM): Ensure acronyms are spelled out on the first instance.
+
+Introduction to Pelicun
+-----------------------
+
+Pelicun is an open-source Python package released under a **3-Clause BSD** license (see :ref:`license`).
+It can be used to conduct natural hazard risk analyses.
+That is, to quantify damage and losses from a natural hazard scenario.
+Applications can range from a simple and straightforward use of a vulnerability function to model the performance of an entire asset to detailed high-resolution evaluations involving the individual components it is comprised of.
+Spatial scales can span form a single asset to portfolio-level evaluations involving thousands of assets.
+
+Pelicun implements state of the art approaches to natural hazards risk estimation, and as such, is rooted in probabilistic methods.
+Common steps of an assessment using Pelicun include the following:
+
+* **Describe the joint distribution of demands or asset response.**
+ The response of a structure or other type of asset to natural hazard event is typically described by so-called engineering demand parameters (EDPs).
+ Pelicun provides various options to characterize the distribution of EDPs.
+ It can calibrate a multivariate distribution that describes the joint distribution of EDPs if raw EDP data is available.
+ Users can control the type of each marginal distribution, apply truncation limits to the marginal distributions, and censor part of the data to consider detection limits in their analysis.
+ Alternatively, Pelicun can use empirical EDP data directly, without resampling from a fitted distribution.
+
+* **Define a performance model.**
+ The fragility and consequence functions from the first two editions of FEMA P-58 and the HAZUS earthquake and hurricane wind and storm surge models for buildings are provided with Pelicun.
+ This facilitates the creation of performance models without having to collect and provide component descriptions and corresponding fragility and consequence functions.
+ An auto-population interface encourages researchers to develop and share rulesets that automate the performance-model definition based on the available building information.
+ Example scripts for such auto-population are also provided with the tool.
+
+* **Simulate asset damage.**
+ Given the EDP samples, and the performance model, Pelicun efficiently simulates the damages in each component of the asset and identifies the proportion of realizations that resulted in collapse.
+
+* **Estimate the consequences of damage.**
+ Using information about collapse and component damages, the following consequences can be estimated with Pelicun: repair cost and time, unsafe placarding (red tag), injuries of various severity and fatalities.
+
+Overview
+--------
+
+The conceptual design of the Pelicun framework is modeled after the FEMA P-58 methodology, which is generalized to provide a flexible system that can accommodate a large variety of damage and loss assessment methods. In the following discussion, we first describe the types of performance assessment workflows this framework aims to support; then, we explain the four interdependent models that comprise the framework.
+
+Loss assessment in its most basic form requires the characterization of the seismic hazard as input and aims to provide an estimate of the consequences, or losses, as output. Using the terminology of FEMA P-58, the severity of the seismic hazard is quantified with the help of intensity measures (IMs). These are characteristic attributes of the ground motions, such as spectral acceleration, peak ground acceleration, or peak ground velocity. Consequences are measured by decision variables (DVs). The most popular DVs are repair cost, repair time, and the number of injuries and fatalities. :numref:`figPerfAssWorkflows` shows three different paths, or performance assessment workflows, from IM to DV:
+
+I. The most efficient approach takes a single, direct step using vulnerability functions. Such vulnerability functions can be calibrated for broad classes of buildings (e.g. single-family wooden houses) using ground motion intensity maps and insurance claims data from past earthquakes. While this approach allows for rapid calculations, it does not provide information about structural response or damage.
+
+II. The second approach introduces damage measures (DMs), which classify damages into damage states (DSs). Each damage state represents a set of potential damages to a structure, or structural component, that require similar kinds and amounts of repair effort. Given a database of IMs and corresponding DMs after an earthquake, fragility functions can be calibrated to describe the relationship between them. The more data is available, the more specialized (i.e., specific to particular types of buildings) fragility functions can be developed. The second step in this path uses consequence functions to describe losses as a function of damages. Consequence functions that focus on repairs can be calibrated using cost and time information from standard construction practice—a major advantage over path I considering the scarcity of post-earthquake repair data.
+
+III. The third path introduces one more intermediate step: response estimation. This path envisions that the response of structures can be estimated, such as with a sophisticated finite element model, or measured with a structural health monitoring system. Given such data, damages can be described as a function of deformation, relative displacement, or acceleration of the structure or its parts. These response variables, or so-called engineering demand parameters (EDPs), are used to define the EDP-to-DM relationships, or fragility functions. Laboratory tests and post-earthquake observations suggest that EDPs are a good proxy for the damages of many types of structural components and even entire buildings.
+
+.. _figPerfAssWorkflows:
+
+.. figure:: figures/PerfAssWorkflows.png
+ :align: center
+ :figclass: align-center
+
+ Common workflows for structural performance assessment.
+
+The functions introduced above are typically idealized relationships that provide a probabilistic description of a scalar output (e.g., repair cost as a random variable) as a function of a scalar input. The cumulative distribution function and the survival function of Normal and Lognormal distributions are commonly used in fragility and vulnerability functions. Consequence functions are often constant or linear functions of the quantity of damaged components. Response estimation is the only notable exception to this type of approximation, because it is regularly performed using complex nonlinear models of structural behavior and detailed time histories of seismic excitation.
+
+Uncertainty quantification is an important part of loss assessment. The uncertainty in decision variables is almost always characterized using forward propagation techniques, Monte Carlo simulation being the most widely used among them. The distribution of random decision variables rarely belongs to a standard family, hence, a large number of samples are needed to describe details of these distributions besides central tendencies. The simulations that generate such large number of samples at a regional scale can demand substantial computational resources. Since the idealized functions in paths I and II can be evaluated with minimal computational effort, these are applicable to large-scale studies. In path III, however, the computational effort needed for complex response simulation is often several orders of magnitude higher than that for other steps. The current state of the art approach to response estimation mitigates the computational burden by simulating at most a few dozen EDP samples and re-sampling them either by fitting a probability distribution or by bootstrapping. This re-sampling technique generates a sufficiently large number of samples for the second part of path III. Although response history analyses are out of the scope of the Pelicun framework, it is designed to be able to accommodate the more efficient, approximate methods, such as capacity spectra and surrogate models. Surrogate models of structural response (e.g., [11]) promise to promptly estimate numerical response simulation results with high accuracy.
+
+Currently, the scope of the framework is limited to the simulation of direct losses and the calculations are performed independently for every building. Despite the independent calculations, the Pelicun framework can produce regional loss estimates that preserve the spatial patterns that are characteristic to the hazard, and the built environment. Those patterns stem from (i) the spatial correlation in ground motion intensities; (ii) the spatial clusters of buildings that are similar from a structural or architectural point of view; (iii) the layout of lifeline networks that connect buildings and heavily influence the indirect consequences of the disaster; and (iv) the spatial correlations in socioeconomic characteristics of the region. The first two effects can be considered by careful preparation of inputs, while the other two are important only after the direct losses have been estimated. Handling buildings independently enables embarrassingly parallel job configurations on High Performance Computing (HPC) clusters. Such jobs scale very well and require minimal additional work to set up and run on a supercomputer.
+
+Performance Assessment Workflow
+-------------------------------
+
+:numref:`figMainWorkflowComps` introduces the main parts and the generic workflow of the Pelicun framework and shows how its implementation connects to other modules in the SimCenter Application Framework. Each of the four highlighted models and their logical relationship are described in more detail in :numref:`figModelTypes`.
+
+.. _figMainWorkflowComps:
+
+.. figure:: figures/MainWorkflowComps.png
+ :align: center
+ :figclass: align-center
+
+ The main components and the workflow of the Pelicun framework.
+
+.. _figModelTypes:
+
+.. figure:: figures/ModelTypes.png
+ :align: center
+ :figclass: align-center
+
+ The four types of models and their logical relationships in the Pelicun framework.
+
+The calculation starts with two files: the Asset Information Model (AIM) and the EVENT file. Currently, both files are expected to follow a standard JSON file format defined by the SimCenter. Support of other file formats and data structures only require a custom parser method. The open source implementation of the framework can be extended by such a method and the following part of the calculation does not require any further adjustment. AIM is a generalized version of the widely used Building Information Model (BIM) idea and it holds structural, architectural, and performance-related information about an asset. The word asset is used to emphasize that the scope of Pelicun is not limited to building structures. The EVENT file describes the characteristic seismic events. It typically holds information about the frequency and intensity of the event, such as its occurrence rate or return period, and corresponding ground motion acceleration time histories or a collection of intensity measures.
+
+Two threads run in parallel and lead to the simulation of damage and losses: (a) response estimation, creating the response model, and simulation of EDPs; and (b) assembling the performance, damage, and loss models. In thread (a), the AIM and EVENT files are used to estimate the response of the asset to the seismic event and characterize it using EDPs. Peak interstory drift (PID), residual interstory drift (RID), and peak floor acceleration (PFA) are typically used as EDPs for building structures. Response simulation is out of the scope of Pelicun; it is either performed by the response estimation module in the Application Framework (Fig. 1) or it can be performed by any other application if Pelicun is used outside of the scope of SimCenter. The Pelicun framework can take advantage of response estimation methods that use idealized models for the seismic demand and the structural capacity, such as the capacity curve-based method in HAZUS or the regression-based closed-form approximation in the second edition of FEMA P-58 vol. 5 [12]. If the performance assessment follows path I or II from :numref:`figPerfAssWorkflows`, the estimated response is not needed, and the relevant IM values are used as EDPs.
+
+Response Model
+--------------
+
+The response model is based on the samples in the raw EDP file and provides a probabilistic description of the structural response. The samples can include an arbitrary number of EDP types (EDPt in Fig. 4) that describe the structural response at pre-defined locations and directions (EDPt,l,d). In buildings, locations typically correspond to floors or stories, and two directions are assigned to the primary and secondary horizontal axes. However, one might use more than two directions to collect several responses at each floor of an irregular building and locations can refer to other parts of structures, such as the piers of a bridge or segments of a pipeline.
+
+EDPs can be resampled either after fitting a probability distribution function to the raw data or by bootstrapping the raw EDPs. Besides the widely used multivariate lognormal distribution, its truncated version is also available. This allows the consideration, for example, that PID values above a pre-defined truncation limit are not reliable. Another option, using the raw EDPs as-is, is useful in regional simulations to preserve the order of samples and maintain the spatial dependencies introduced in random characteristics of the building inventory or the seismic hazard.
+
+Performance Model
+-----------------
+
+Thread (b) in Fig. 3 starts with parsing the AIM file and constructing a performance model. If the definition in the file is incomplete, the auto-populate method tries to fill the gaps using information about normative component quantities and pre-defined rulesets. Rulesets can link structural information, such as the year of construction, to performance model details, such as the type of structural details and corresponding components.
+
+The performance model in Pelicun is based on that of the FEMA P-58 method. It disaggregates the asset into a hierarchical description of its structural and non-structural components and contents (Fig. 4):
+
+- Fragility Groups (FGs) are at the highest level of this hierarchy. Each FG is a collection of components that have similar fragility controlled by a specific type of EDP and their damage leads to similar consequences.
+
+- Each FG can be broken down into Performance Groups (PGs). A PG collects components whose damage is controlled by the same EDP. Not only the type of the EDP, but its location and direction also has to be identical.
+
+- In the third layer, PGs are broken down into the smallest units: Component Groups (CGs). A CG collects components that experience the same damage (i.e., there is perfect correlation between their random Damage States). Each CG has a Component Quantity assigned to it that defines the amount of components in that group. Both international standard and imperial units are supported as long as they are consistent with the type of component (e.g., m2, ft2, and in2 are all acceptable for the area of suspended ceilings, but ft is not.) Quantities can be random variables with either Normal or Lognormal distribution.
+
+In performance models built according to the FEMA P-58 method, buildings typically have FGs sensitive to either PID or PFA. Within each FG, components are grouped into PGs by stories and the drift-sensitive ones are also grouped by direction. The damage of acceleration-sensitive components is based on the maximum of PFAs in the two horizontal directions. The Applied Technology Council (ATC) provides a recommendation for the correlation between component damages within a PG. If the damages are correlated, all components in a PG are collected in a single CG. Otherwise, the performance model can identify an arbitrary number of CGs and their damages are evaluated independently.
+
+The Pelicun framework handles the probabilistic sampling for the entire performance model with a single high-dimensional random variable. This allows for custom dependencies in the model at any level of the hierarchy. For example, one can assign a 0.8 correlation coefficient between the fragility of all components in an FG that are on the same floor, but in different directions and hence, in different PGs. In another example, one can assign a 0.7 correlation coefficient between component quantities in the same direction along all or a subset of floors. These correlations can capture more realistic exposure and damage and consider the influence of extreme cases. Such cases are overlooked when independent variables are used because the deviations from the mean are cancelling each other.
+
+This performance model in Fig. 4 can also be applied to more holistic description of buildings. For example, to describe earthquake damage to buildings following HAZUS, three FGs can handle structural, acceleration-sensitive non-structural, and drift-sensitive non-structural components. Each FG has a single PG because HAZUS uses building-level EDPs—only one location and direction is used in this case. Since components describe the damage to the entire building, using one CG per PG with “1 ea” as the assigned, deterministic component quantity is appropriate.
+
+The performance model in Pelicun can facilitate filling the gap between the holistic and atomic approaches of performance assessment by using components at an intermediate resolution, such as story-based descriptions, for example. These models are promising because they require less detailed inputs than FEMA P-58, but they can provide more information than the building-level approaches in HAZUS.
+
+Damage Model
+------------
+
+Each Fragility Group in the performance model shall have a corresponding fragility model in the Damage & Loss Database. In the fragility model, Damage State Groups (DSGs) collect Damage States (DSs) that are triggered by similar magnitudes of the controlling EDP. In Pelicun, Lognormal damage state exceedance curves are converted into random EDP limits that trigger DSGs. When multiple DSGs are used, assuming perfect correlation between their EDP limits reproduces the conventional model that uses exceedance curves. The approach used in this framework, however, allows researchers to experiment with partially correlated or independent EDP limits. Experimental results suggest that these might be more realistic representations of component fragility. A DSG often has only a single DS. When multiple DSs are present, they can be triggered either simultaneously or they can be mutually exclusive following the corresponding definitions in FEMA P-58.
+
+Loss Model
+----------
+
+Each Damage State has a corresponding set of consequence descriptions in the Damage & Loss Database. These are used to define a consequence model that identifies a set of decision variables (DVs) and corresponding consequence functions that link the amount of damaged components to the value of the DV. The constant and quantity-dependent stepwise consequence functions from FEMA P-58 are available in Pelicun.
+
+Collapses and their consequences are also handled by the damage and the loss models. The collapse model describes collapse events using the concept of collapse modes introduced in FEMA P-58. Collapse is either triggered by EDP values exceeding a collapse limit or it can be randomly triggered based on a collapse probability prescribed in the AIM file. The latter approach allows for external collapse fragility models. Each collapse mode has a corresponding collapse consequence model that describes the corresponding injuries and losses.
+
+Similarly to the performance model, the randomness in damage and losses is handled with a few high-dimensional random variables. This allows researchers to experiment with various correlation structures between damages of components, and consequences of those damages. Among the consequences, the repair costs and times and the number of injuries of various severities are also linked; allowing, for example, to consider that repairs that cost more than expected will also take longer time to finish.
+
+Once the damage and loss models are assembled, the previously sampled EDPs are used to evaluate the Damage Measures (Fig. 3). These DMs identify the Damage State of each Component Group in the structure. This information is used by the loss simulation to generate the Decision Variables. The final step of the calculation in Pelicun is to aggregate results into a Damage and Loss (DL) file that provides a concise overview of the damage and losses. All intermediate data generated during the calculation (i.e., EDPs, DMs, DVs) are also saved in CSV files.
diff --git a/doc/source/user_guide/resources_for_new_python_users.rst b/doc/source/user_guide/resources_for_new_python_users.rst
new file mode 100644
index 000000000..0e96a1ec2
--- /dev/null
+++ b/doc/source/user_guide/resources_for_new_python_users.rst
@@ -0,0 +1,19 @@
+.. _new_python_users:
+
+Resources for new python users
+------------------------------
+
+The NHERI-SimCenter has hosted several programming bootcamps of which the reading material and recordings are available in the `NHERI-SimCenter Knowledge Hub `_.
+For new Python users, we recommend the following resources:
+
+.. grid:: 1 2 2 2
+ :gutter: 4
+ :padding: 2 2 0 0
+ :class-container: sd-text-center
+
+ .. grid-item-card:: 2023 Programming Bootcamp > Python Quickstart Tutorial
+ :class-card: intro-card
+ :shadow: md
+ :link: https://nheri-simcenter.github.io/SimCenterBootcamp2023/source/lecture_videos_part1.html#chapter-6-modules-and-subprocess
+
+ Training material on python basics, data types, loops, conditions, file IO, plotting, object oriented programming, classes, inheritance, modules. Presented by Peter Mackenzie-Helnwein.
diff --git a/ignore_words.txt b/ignore_words.txt
new file mode 100644
index 000000000..d2d0a1f68
--- /dev/null
+++ b/ignore_words.txt
@@ -0,0 +1,2 @@
+smoot
+ACI
diff --git a/pelicun/__init__.py b/pelicun/__init__.py
index 5497718a5..3b8d6118f 100644
--- a/pelicun/__init__.py
+++ b/pelicun/__init__.py
@@ -1,52 +1,50 @@
-"""
--*- coding: utf-8 -*-
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
-Copyright (c) 2018 Leland Stanford Junior University
-Copyright (c) 2018 The Regents of the University of California
+# This file is part of pelicun.
-This file is part of pelicun.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
-1. Redistributions of source code must retain the above copyright notice,
-this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
-2. Redistributions in binary form must reproduce the above copyright notice,
-this list of conditions and the following disclaimer in the documentation
-and/or other materials provided with the distribution.
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
-3. Neither the name of the copyright holder nor the names of its contributors
-may be used to endorse or promote products derived from this software without
-specific prior written permission.
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
-You should have received a copy of the BSD 3-Clause License along with
-pelicun. If not, see .
+# Contributors:
+# Adam Zsarnóczay
-Contributors:
-Adam Zsarnóczay
-"""
+"""Pelicun library."""
-name = "pelicun"
+name = 'pelicun'
-__version__ = '3.3.2'
+__version__ = '3.3.3'
__copyright__ = (
- "Copyright (c) 2018 Leland Stanford "
- "Junior University and The Regents "
- "of the University of California"
+ 'Copyright (c) 2018 Leland Stanford '
+ 'Junior University and The Regents '
+ 'of the University of California'
)
-__license__ = "BSD 3-Clause License"
+__license__ = 'BSD 3-Clause License'
diff --git a/pelicun/assessment.py b/pelicun/assessment.py
index 2165ced17..8525c2f1d 100644
--- a/pelicun/assessment.py
+++ b/pelicun/assessment.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
@@ -38,131 +37,152 @@
# Adam Zsarnóczay
# John Vouvakis Manousakis
-"""
-This module has classes and methods that control the performance assessment.
-.. rubric:: Contents
+"""Classes and methods that control the performance assessment."""
-.. autosummary::
+from __future__ import annotations
- Assessment
+import json
+from pathlib import Path
+from typing import TYPE_CHECKING, Any
-"""
+import numpy as np
+import pandas as pd
-import json
-from . import base
-from . import file_io
-from . import model
-from .__init__ import __version__ as pelicun_version
+from pelicun import base, file_io, model, uq
+from pelicun.__init__ import __version__ as pelicun_version # type: ignore
+from pelicun.base import EDP_to_demand_type, get
+
+if TYPE_CHECKING:
+ from pelicun.base import Logger
+
+default_dbs = {
+ 'fragility': {
+ 'FEMA P-58': 'damage_DB_FEMA_P58_2nd.csv',
+ 'Hazus Earthquake - Buildings': 'damage_DB_Hazus_EQ_bldg.csv',
+ 'Hazus Earthquake - Stories': 'damage_DB_Hazus_EQ_story.csv',
+ 'Hazus Earthquake - Transportation': 'damage_DB_Hazus_EQ_trnsp.csv',
+ 'Hazus Earthquake - Water': 'damage_DB_Hazus_EQ_water.csv',
+ 'Hazus Hurricane': 'damage_DB_SimCenter_Hazus_HU_bldg.csv',
+ },
+ 'repair': {
+ 'FEMA P-58': 'loss_repair_DB_FEMA_P58_2nd.csv',
+ 'Hazus Earthquake - Buildings': 'loss_repair_DB_Hazus_EQ_bldg.csv',
+ 'Hazus Earthquake - Stories': 'loss_repair_DB_Hazus_EQ_story.csv',
+ 'Hazus Earthquake - Transportation': 'loss_repair_DB_Hazus_EQ_trnsp.csv',
+ 'Hazus Hurricane': 'loss_repair_DB_SimCenter_Hazus_HU_bldg.csv',
+ },
+}
+default_damage_processes = {
+ 'FEMA P-58': {
+ '1_excessive.coll.DEM': {'DS1': 'collapse_DS1'},
+ '2_collapse': {'DS1': 'ALL_NA'},
+ '3_excessiveRID': {'DS1': 'irreparable_DS1'},
+ },
+ # TODO(AZ): expand with ground failure logic
+ 'Hazus Earthquake': {
+ '1_STR': {'DS5': 'collapse_DS1'},
+ '2_LF': {'DS5': 'collapse_DS1'},
+ '3_excessive.coll.DEM': {'DS1': 'collapse_DS1'},
+ '4_collapse': {'DS1': 'ALL_NA'},
+ '5_excessiveRID': {'DS1': 'irreparable_DS1'},
+ },
+ 'Hazus Hurricane': {},
+}
-class Assessment:
+
+class AssessmentBase:
"""
+ Base class for Assessment objects.
+
Assessment objects manage the models, data, and calculations in pelicun.
- Parameters
- ----------
- demand: DemandModel
- ...
- asset: AssetModel
- ...
- damage: DamageModel
- ...
- repair: RepairModel
- ...
- stories: int
- Number of stories.
- options: Options
- Options object.
"""
- def __init__(self, config_options=None):
+ __slots__: list[str] = [
+ 'asset',
+ 'damage',
+ 'demand',
+ 'log',
+ 'loss',
+ 'options',
+ 'stories',
+ 'unit_conversion_factors',
+ ]
+
+ def __init__(self, config_options: dict[str, Any] | None = None) -> None:
"""
- Initializes an Assessment object.
+ Initialize an Assessment object.
Parameters
----------
- config_options (Optional[dict]):
+ config_options:
User-specified configuration dictionary.
- """
-
- self.stories = None
+ """
+ self.stories: int | None = None
self.options = base.Options(config_options, self)
+ self.unit_conversion_factors: dict = base.parse_units(
+ self.options.units_file
+ )
- self.unit_conversion_factors = base.parse_units(self.options.units_file)
-
- self.log = self.options.log
-
+ self.log: Logger = self.options.log
self.log.msg(
f'pelicun {pelicun_version} | \n',
prepend_timestamp=False,
prepend_blank_space=False,
)
-
self.log.print_system_info()
-
self.log.div()
self.log.msg('Assessment Started')
- @property
- def demand(self):
- """
- Return a DemandModel object that manages the demand information.
-
- """
- # pylint: disable = access-member-before-definition
-
- if hasattr(self, '_demand'):
- return self._demand
-
- self._demand = model.DemandModel(self)
- return self.demand
+ self.demand: model.DemandModel = model.DemandModel(self)
+ self.asset: model.AssetModel = model.AssetModel(self)
+ self.damage: model.DamageModel = model.DamageModel(self)
+ self.loss: model.LossModel = model.LossModel(self)
@property
- def asset(self):
+ def bldg_repair(self) -> model.LossModel:
"""
- Return an AssetModel object that manages the asset information.
+ Exists for .
- """
- # pylint: disable = access-member-before-definition
+ Returns
+ -------
+ model.LossModel
+ The loss model.
- if hasattr(self, '_asset'):
- return self._asset
+ """
+ self.log.warning(
+ '`.bldg_repair` is deprecated and will be dropped in '
+ 'future versions of pelicun. '
+ 'Please use `.loss` instead.'
+ )
- self._asset = model.AssetModel(self)
- return self.asset
+ return self.loss
@property
- def damage(self):
- """
- Return an DamageModel object that manages the damage information.
-
+ def repair(self) -> model.LossModel:
"""
- # pylint: disable = access-member-before-definition
+ Exists for .
- if hasattr(self, '_damage'):
- return self._damage
-
- self._damage = model.DamageModel(self)
- return self.damage
+ Returns
+ -------
+ RepairModel_DS
+ The damage state-driven component loss model.
- @property
- def repair(self):
"""
- Return a RepairModel object that manages the repair information.
+ self.log.warning(
+ '`.repair` is deprecated and will be dropped in '
+ 'future versions of pelicun. '
+ 'Please use `.loss` instead.'
+ )
+ return self.loss
+ def get_default_data(self, data_name: str) -> pd.DataFrame:
"""
- # pylint: disable = access-member-before-definition
-
- if hasattr(self, '_repair'):
- return self._repair
+ Load a default data file.
- self._repair = model.RepairModel(self)
- return self.repair
-
- def get_default_data(self, data_name):
- """
Loads a default data file by name and returns it. This method
is specifically designed to access predefined CSV files from a
structured directory path related to the SimCenter fragility
@@ -170,7 +190,7 @@ def get_default_data(self, data_name):
Parameters
----------
- data_name : str
+ data_name: str
The name of the CSV file to be loaded, without the '.csv'
extension. This name is used to construct the full path to
the file.
@@ -180,14 +200,34 @@ def get_default_data(self, data_name):
pd.DataFrame
The DataFrame containing the data loaded from the
specified CSV file.
+
"""
+ #
+ if 'fragility_DB' in data_name:
+ data_name = data_name.replace('fragility_DB', 'damage_DB')
+ self.log.warning(
+ '`fragility_DB` is deprecated and will be dropped in '
+ 'future versions of pelicun. '
+ 'Please use `damage_DB` instead.'
+ )
+ if 'bldg_repair_DB' in data_name:
+ data_name = data_name.replace('bldg_repair_DB', 'loss_repair_DB')
+ self.log.warning(
+ '`bldg_repair_DB` is deprecated and will be dropped in '
+ 'future versions of pelicun. '
+ 'Please use `loss_repair_DB` instead.'
+ )
+
data_path = f'{base.pelicun_path}/resources/SimCenterDBDL/{data_name}.csv'
- return file_io.load_data(
+ data = file_io.load_data(
data_path, None, orientation=1, reindex=False, log=self.log
)
- def get_default_metadata(self, data_name):
+ assert isinstance(data, pd.DataFrame)
+ return data
+
+ def get_default_metadata(self, data_name: str) -> dict:
"""
Load a default metadata file and pass it to the user.
@@ -202,18 +242,26 @@ def get_default_metadata(self, data_name):
Default metadata
"""
-
+ #
+ if 'fragility_DB' in data_name:
+ data_name = data_name.replace('fragility_DB', 'damage_DB')
+ self.log.warning(
+ '`fragility_DB` is deprecated and will be dropped in '
+ 'future versions of pelicun. Please use `damage_DB` instead.'
+ )
data_path = f'{base.pelicun_path}/resources/SimCenterDBDL/{data_name}.json'
- with open(data_path, 'r', encoding='utf-8') as f:
+ with Path(data_path).open(encoding='utf-8') as f:
data = json.load(f)
- return data
+ return data # noqa: RET504
- def calc_unit_scale_factor(self, unit):
+ def calc_unit_scale_factor(self, unit: str) -> float:
"""
+ Determine unit scale factor.
+
Determines the scale factor from input unit to the
- corresponding base unit
+ corresponding base unit.
Parameters
----------
@@ -231,14 +279,14 @@ def calc_unit_scale_factor(self, unit):
------
KeyError
When an invalid unit is specified
- """
+ """
unit_lst = unit.strip().split(' ')
# check if there is a quantity specified; if yes, parse it
if len(unit_lst) > 1:
- unit_count, unit_name = unit_lst
- unit_count = float(unit_count)
+ unit_count_str, unit_name = unit_lst
+ unit_count = float(unit_count_str)
else:
unit_count = 1
@@ -248,14 +296,15 @@ def calc_unit_scale_factor(self, unit):
scale_factor = unit_count * self.unit_conversion_factors[unit_name]
except KeyError as exc:
- raise KeyError(
- f"Specified unit not recognized: {unit_count} {unit_name}"
- ) from exc
+ msg = f'Specified unit not recognized: {unit_count} {unit_name}'
+ raise KeyError(msg) from exc
return scale_factor
- def scale_factor(self, unit):
+ def scale_factor(self, unit: str | None) -> float:
"""
+ Get scale factor of given unit.
+
Returns the scale factor of a given unit. If the unit is
unknown it raises an error. If the unit is None it returns
1.00.
@@ -276,14 +325,1627 @@ def scale_factor(self, unit):
If the unit is unknown.
"""
-
if unit is not None:
if unit in self.unit_conversion_factors:
scale_factor = self.unit_conversion_factors[unit]
else:
- raise ValueError(f"Unknown unit: {unit}")
+ msg = f'Unknown unit: {unit}'
+ raise ValueError(msg)
else:
scale_factor = 1.0
return scale_factor
+
+
+class Assessment(AssessmentBase):
+ """
+ Assessment class.
+
+ Has methods implementing a Scenario-Based assessment.
+
+ """
+
+ __slots__: list[str] = []
+
+ def calculate_damage(
+ self,
+ num_stories: int,
+ demand_config: dict,
+ demand_data_source: str | dict,
+ cmp_data_source: str | dict[str, pd.DataFrame],
+ damage_data_paths: list[str | pd.DataFrame],
+ dmg_process: dict | None = None,
+ scaling_specification: dict | None = None,
+ residual_drift_configuration: dict | None = None,
+ collapse_fragility_configuration: dict | None = None,
+ block_batch_size: int = 1000,
+ ) -> None:
+ """
+ Calculate damage.
+
+ Parameters
+ ----------
+ num_stories: int
+ Number of stories of the asset. Applicable to buildings.
+ demand_config: dict
+ A dictionary containing configuration options for the
+ sample generation. Key options include:
+ * 'SampleSize': The number of samples to generate.
+ * 'PreserveRawOrder': Boolean indicating whether to
+ preserve the order of the raw data. Defaults to False.
+ * 'DemandCloning': Specifies if and how demand cloning
+ should be applied. Can be a boolean or a detailed
+ configuration.
+ demand_data_source: string or dict
+ If string, the demand_data_source is a file prefix
+ ( in the following description) that identifies
+ the following files: _marginals.csv,
+ _empirical.csv, _correlation.csv. If dict,
+ the demand data source is a dictionary with the following
+ optional keys: 'marginals', 'empirical', and
+ 'correlation'. The value under each key shall be a
+ DataFrame.
+ cmp_data_source: str or dict
+ The source from where to load the component model data. If
+ it's a string, it should be the prefix for three files:
+ one for marginal distributions (`_marginals.csv`),
+ one for empirical data (`_empirical.csv`), and one
+ for correlation data (`_correlation.csv`). If it's
+ a dictionary, it should have keys 'marginals',
+ 'empirical', and 'correlation', with each key associated
+ with a DataFrame containing the corresponding data.
+ damage_data_paths: list of (string | DataFrame)
+ List of paths to data or files with damage model
+ information. Default XY datasets can be accessed as
+ PelicunDefault/XY. Order matters. Parameters defined in
+ prior elements in the list take precedence over the same
+ parameters in subsequent data paths. I.e., place the
+ Default datasets in the back.
+ dmg_process: dict, optional
+ Allows simulating damage processes, where damage to some
+ component can alter the damage state of other components.
+ scaling_specification: dict, optional
+ A dictionary defining the shift in median.
+ Example: {'CMP-1-1': '*1.2', 'CMP-1-2': '/1.4'}
+ The keys are individual components that should be present
+ in the `capacity_sample`. The values should be strings
+ containing an operation followed by the value formatted as
+ a float. The operation can be '+' for addition, '-' for
+ subtraction, '*' for multiplication, and '/' for division.
+ residual_drift_configuration: dict
+ Dictionary containing the following keys-values:
+ - params: dict
+ A dictionary containing parameters required for the
+ estimation method, such as 'yield_drift', which is the
+ drift at which yielding is expected to occur.
+ - method: str, optional
+ The method used to estimate the RID values. Currently,
+ only 'FEMA P58' is implemented. Defaults to 'FEMA P58'.
+ collapse_fragility_configuration: dict
+ Dictionary containing the following keys-values:
+ - label: str
+ Label to use to extend the MultiIndex of the demand
+ sample.
+ - value: float
+ Values to add to the rows of the additional column.
+ - unit: str
+ Unit that corresponds to the additional column.
+ - location: str, optional
+ Optional location, defaults to `0`.
+ - direction: str, optional
+ Optional direction, defaults to `1`.
+ block_batch_size: int
+ Maximum number of components in each batch.
+
+ """
+ # TODO(JVM): when we build the API docs, ensure the above is
+ # properly rendered.
+
+ self.demand.load_model(demand_data_source)
+ self.demand.generate_sample(demand_config)
+
+ if residual_drift_configuration:
+ self.demand.estimate_RID_and_adjust_sample(
+ residual_drift_configuration['parameters'],
+ residual_drift_configuration['method'],
+ )
+
+ if collapse_fragility_configuration:
+ self.demand.expand_sample(
+ collapse_fragility_configuration['label'],
+ collapse_fragility_configuration['value'],
+ collapse_fragility_configuration['unit'],
+ )
+
+ self.stories = num_stories
+ self.asset.load_cmp_model(cmp_data_source)
+ self.asset.generate_cmp_sample()
+
+ self.damage.load_model_parameters(
+ damage_data_paths, set(self.asset.list_unique_component_ids())
+ )
+ self.damage.calculate(dmg_process, block_batch_size, scaling_specification)
+
+ def calculate_loss(
+ self,
+ decision_variables: tuple[str, ...],
+ loss_model_data_paths: list[str | pd.DataFrame],
+ loss_map_path: str | pd.DataFrame | None = None,
+ loss_map_policy: str | None = None,
+ ) -> None:
+ """
+ Calculate loss.
+
+ Parameters
+ ----------
+ decision_variables: tuple
+ Defines the decision variables to be included in the loss
+ calculations. Defaults to those supported, but fewer can be
+ used if desired. When fewer are used, the loss parameters for
+ those not used will not be required.
+ loss_model_data_paths: list of (string | DataFrame)
+ List of paths to data or files with loss model
+ information. Default XY datasets can be accessed as
+ PelicunDefault/XY. Order matters. Parameters defined in
+ prior elements in the list take precedence over the same
+ parameters in subsequent data paths. I.e., place the
+ Default datasets in the back.
+ loss_map_path: str or pd.DataFrame or None
+ Path to a csv file or DataFrame object that maps
+ components IDs to their loss parameter definitions.
+ loss_map_policy: str or None
+ If None, does not modify the loss map.
+ If set to `fill`, each component ID that is present in
+ the asset model but not in the loss map is mapped to
+ itself, but `excessiveRID` is excluded.
+ If set to `fill_all`, each component ID that is present in
+ the asset model but not in the loss map is mapped to
+ itself without exceptions.
+
+ """
+ self.loss.decision_variables = decision_variables
+ self.loss.add_loss_map(loss_map_path, loss_map_policy)
+ self.loss.load_model_parameters(loss_model_data_paths)
+ self.loss.calculate()
+
+ def aggregate_loss(
+ self,
+ replacement_configuration: (
+ tuple[uq.RandomVariableRegistry, dict[str, float]] | None
+ ) = None,
+ loss_combination: dict | None = None,
+ ) -> tuple[pd.DataFrame, pd.DataFrame]:
+ """
+ Aggregate losses.
+
+ Parameters
+ ----------
+ replacement_configuration: Tuple, optional
+ Tuple containing a RandomVariableRegistry and a
+ dictionary. The RandomVariableRegistry is defining
+ building replacement consequence RVs for the active
+ decision variables. The dictionary defines exceedance
+ thresholds. If the aggregated value for a decision
+ variable (conditioned on no replacement) exceeds the
+ threshold, then replacement is triggered. This can happen
+ for multiple decision variables at the same
+ realization. The consequence keyword `replacement` is
+ reserved to represent exclusive triggering of the
+ replacement consequences, and other consequences are
+ ignored for those realizations where replacement is
+ triggered. When assigned to None, then `replacement` is
+ still treated as an exclusive consequence (other
+ consequences are set to zero when replacement is nonzero)
+ but it is not being additionally triggered by the
+ exceedance of any thresholds. The aggregated loss sample
+ contains an additional column with information on whether
+ replacement was already present or triggered by a
+ threshold exceedance for each realization.
+ loss_combination: dict, optional
+ Dictionary defining how losses for specific components
+ should be aggregated for a given decision variable. It has
+ the following structure: {`dv`: {(`c1`, `c2`): `arr`,
+ ...}, ...}, where `dv` is some decision variable, (`c1`,
+ `c2`) is a tuple defining a component pair, `arr` is a NxN
+ numpy array defining a combination table, and `...` means
+ that more key-value pairs with the same schema can exist
+ in the dictionaries. The loss sample is expected to
+ contain columns that include both `c1` and `c2` listed as
+ the component. The combination is applied to all pairs of
+ columns where the components are `c1` and `c2`, and all of
+ the rest of the multiindex levels match (`loc`, `dir`,
+ `uid`). This means, for example, that when combining wind
+ and flood losses, the asset model should contain both a
+ wind and a flood component defined at the same
+ location-direction. `arr` can also be an M-dimensional
+ numpy array where each dimension has length N (NxNx...xN).
+ This structure allows for the loss combination of M
+ components. In this case the (`c1`, `c2`) tuple should
+ contain M elements instead of two.
+
+ Notes
+ -----
+ Regardless of the value of the arguments, this method does not
+ alter the state of the loss model, i.e., it does not modify
+ the values of the `.sample` attributes.
+
+ Returns
+ -------
+ tuple
+ Dataframe with the aggregated loss of each realization,
+ and another boolean dataframe with information on which DV
+ thresholds were exceeded in each realization, triggering
+ replacement. If no thresholds are specified it only
+ contains False values.
+
+ """
+ output = self.loss.aggregate_losses(
+ replacement_configuration, loss_combination, future=True
+ )
+ assert isinstance(output, tuple)
+ return output
+
+
+class DLCalculationAssessment(AssessmentBase):
+ """Base class for the assessment objects used in `DL_calculation.py`."""
+
+ __slots__: list[str] = []
+
+ def calculate_demand( # noqa: C901
+ self,
+ demand_path: Path,
+ collapse_limits: dict[str, float] | None,
+ length_unit: str | None,
+ demand_calibration: dict | None,
+ sample_size: int,
+ demand_cloning: dict | None,
+ residual_drift_inference: dict | None,
+ *,
+ coupled_demands: bool,
+ ) -> None:
+ """
+ Calculate demands.
+
+ Parameters
+ ----------
+ demand_path: str
+ Path to the demand data file.
+ collapse_limits: dict[str, float] or None
+ Optional dictionary with demand types and their respective
+ collapse limits.
+ length_unit : str, optional
+ Unit of length to be used to add units to the demand data
+ if needed.
+ demand_calibration: dict or None
+ Calibration data for the demand model.
+ sample_size: int
+ Number of realizations.
+ coupled_demands: bool
+ Whether to preserve the raw order of the demands.
+ demand_cloning: dict or None
+ Demand cloning configuration.
+ residual_drift_inference: dict or None
+ Information for residual drift inference.
+
+ Raises
+ ------
+ ValueError
+ When an unknown residual drift method is specified.
+
+ """
+ idx = pd.IndexSlice
+ raw_demands = pd.read_csv(demand_path, index_col=0)
+
+ # remove excessive demands that are considered collapses, if needed
+ if collapse_limits:
+ raw_demands_m = base.convert_to_MultiIndex(raw_demands, axis=1)
+ assert isinstance(raw_demands_m, pd.DataFrame)
+ raw_demands = raw_demands_m
+
+ if 'Units' in raw_demands.index:
+ raw_units = raw_demands.loc['Units', :]
+ raw_demands = raw_demands.drop('Units', axis=0).astype(float)
+
+ else:
+ raw_units = None
+
+ dem_to_drop = np.full(raw_demands.shape[0], fill_value=False)
+
+ for dem_type, limit in collapse_limits.items():
+ assert isinstance(dem_type, str)
+ assert isinstance(limit, (str, float))
+ nlevels_with_event_id = 4
+ if raw_demands.columns.nlevels == nlevels_with_event_id:
+ dem_to_drop += raw_demands.loc[
+ :, # type: ignore
+ idx[:, dem_type, :, :],
+ ].max(axis=1) > float(limit)
+
+ else:
+ dem_to_drop += raw_demands.loc[
+ :, # type: ignore
+ idx[dem_type, :, :],
+ ].max(axis=1) > float(limit)
+
+ raw_demands = raw_demands.loc[~dem_to_drop, :]
+
+ if isinstance(raw_units, pd.Series):
+ raw_demands = pd.concat(
+ [raw_demands, raw_units.to_frame().T], axis=0
+ )
+
+ self.log.msg(
+ f'{np.sum(dem_to_drop)} realizations removed from the demand '
+ f'input because they exceed the collapse limit. The remaining '
+ f'sample size: {raw_demands.shape[0]}'
+ )
+
+ # add units to the demand data if needed
+ if 'Units' not in raw_demands.index:
+ if length_unit is None:
+ msg = 'A length unit is required to infer demand units.'
+ raise ValueError(msg)
+ demands = _add_units(raw_demands, length_unit)
+
+ else:
+ demands = raw_demands
+
+ # load the available demand sample
+ self.demand.load_sample(demands)
+
+ # get the calibration information
+ if demand_calibration:
+ # then use it to calibrate the demand model
+ self.demand.calibrate_model(demand_calibration)
+
+ else:
+ # if no calibration is requested,
+ # set all demands to use empirical distribution
+ self.demand.calibrate_model({'ALL': {'DistributionFamily': 'empirical'}})
+
+ # and generate a new demand sample
+ self.demand.generate_sample(
+ {
+ 'SampleSize': sample_size,
+ 'PreserveRawOrder': coupled_demands,
+ 'DemandCloning': demand_cloning,
+ }
+ )
+
+ # get the generated demand sample
+ demand_sample_tuple = self.demand.save_sample(save_units=True)
+ assert demand_sample_tuple is not None
+ demand_sample, demand_units = demand_sample_tuple
+ assert isinstance(demand_sample, pd.DataFrame)
+ assert isinstance(demand_units, pd.Series)
+
+ demand_sample = pd.concat([demand_sample, demand_units.to_frame().T])
+
+ # get residual drift estimates, if needed
+ if residual_drift_inference:
+ # `method` is guaranteed to exist because it is confirmed when
+ # parsing the configuration file.
+ rid_inference_method = residual_drift_inference.pop('method')
+
+ if rid_inference_method == 'FEMA P-58':
+ rid_list: list[pd.DataFrame] = []
+ pid = demand_sample['PID'].copy()
+ pid = pid.drop('Units')
+ pid = pid.astype(float)
+
+ for direction, delta_yield in residual_drift_inference.items():
+ pids = pid.loc[:, idx[:, direction]] # type: ignore
+ assert isinstance(pids, pd.DataFrame)
+ rid = self.demand.estimate_RID(
+ pids,
+ {'yield_drift': float(delta_yield)},
+ )
+
+ rid_list.append(rid)
+
+ rid = pd.concat(rid_list, axis=1)
+ rid_units = pd.Series(
+ ['unitless'] * rid.shape[1],
+ index=rid.columns,
+ name='Units',
+ )
+ rid_sample = pd.concat([rid, rid_units.to_frame().T])
+ demand_sample = pd.concat([demand_sample, rid_sample], axis=1)
+
+ else:
+ msg = (
+ f'Unknown residual drift inference method: '
+ f'`{rid_inference_method}`.'
+ )
+ raise ValueError(msg)
+
+ # add a constant one demand
+ demand_sample['ONE', '0', '1'] = np.ones(demand_sample.shape[0])
+ demand_sample.loc['Units', ('ONE', '0', '1')] = 'unitless'
+
+ self.demand.load_sample(base.convert_to_SimpleIndex(demand_sample, axis=1))
+
+ def calculate_asset(
+ self,
+ num_stories: int,
+ component_assignment_file: str | None,
+ collapse_fragility_demand_type: str | None,
+ component_sample_file: str | None,
+ *,
+ add_irreparable_damage_columns: bool,
+ ) -> None:
+ """
+ Generate the asset model sample.
+
+ Parameters
+ ----------
+ num_stories: int
+ Number of stories.
+ component_assignment_file: str or None
+ Path to a component assignment file.
+ collapse_fragility_demand_type: str or None
+ Optional demand type for the collapse fragility.
+ add_irreparable_damage_columns: bool
+ Whether to add columns for irreparable damage.
+ component_sample_file: str or None
+ Optional path to an existing component sample file.
+
+ Raises
+ ------
+ ValueError
+ With invalid combinations of arguments.
+
+ """
+ # retrieve the demand sample
+ demand_sample = self.demand.save_sample()
+ assert isinstance(demand_sample, pd.DataFrame)
+
+ # set the number of stories
+ if num_stories:
+ self.stories = num_stories
+
+ # We either accept a `component_assignment_file` or a
+ # `component_sample_file`, not both.
+ if (
+ component_assignment_file is not None
+ and component_sample_file is not None
+ ):
+ msg = (
+ 'Both `component_assignment_file` and '
+ '`component_sample_file` are provided. '
+ 'Please provide only one.'
+ )
+ raise ValueError(msg)
+
+ # load a component model and generate a sample
+ if component_assignment_file is not None:
+ cmp_marginals = pd.read_csv(
+ component_assignment_file,
+ index_col=0,
+ encoding_errors='replace',
+ )
+
+ dem_types = demand_sample.columns.unique(level=0)
+
+ # add component(s) to support collapse calculation
+ if collapse_fragility_demand_type is not None:
+ if not collapse_fragility_demand_type.startswith('SA'):
+ # we need story-specific collapse assessment
+ # (otherwise we have a global demand and evaluate
+ # collapse directly, so this code should be skipped)
+
+ if collapse_fragility_demand_type in dem_types:
+ # excessive coll_DEM is added on every floor
+ # to detect large RIDs
+ cmp_marginals.loc['excessive.coll.DEM', 'Units'] = 'ea'
+
+ locs = demand_sample[
+ collapse_fragility_demand_type # type: ignore
+ ].columns.unique(level=0)
+ cmp_marginals.loc['excessive.coll.DEM', 'Location'] = (
+ ','.join(locs)
+ )
+
+ dirs = demand_sample[
+ collapse_fragility_demand_type # type: ignore
+ ].columns.unique(level=1)
+ cmp_marginals.loc['excessive.coll.DEM', 'Direction'] = (
+ ','.join(dirs)
+ )
+
+ cmp_marginals.loc['excessive.coll.DEM', 'Theta_0'] = 1.0
+
+ else:
+ self.log.msg(
+ f'WARNING: No {collapse_fragility_demand_type} '
+ f'among available demands. Collapse cannot '
+ f'be evaluated.'
+ )
+
+ # always add a component to support basic collapse calculation
+ cmp_marginals.loc['collapse', 'Units'] = 'ea'
+ cmp_marginals.loc['collapse', 'Location'] = 0
+ cmp_marginals.loc['collapse', 'Direction'] = 1
+ cmp_marginals.loc['collapse', 'Theta_0'] = 1.0
+
+ # add components to support irreparable damage calculation
+ if add_irreparable_damage_columns:
+ if 'RID' in dem_types:
+ # excessive RID is added on every floor to detect large RIDs
+ cmp_marginals.loc['excessiveRID', 'Units'] = 'ea'
+
+ locs = demand_sample['RID'].columns.unique(level=0)
+ cmp_marginals.loc['excessiveRID', 'Location'] = ','.join(locs)
+
+ dirs = demand_sample['RID'].columns.unique(level=1)
+ cmp_marginals.loc['excessiveRID', 'Direction'] = ','.join(dirs)
+
+ cmp_marginals.loc['excessiveRID', 'Theta_0'] = 1.0
+
+ # irreparable is a global component to recognize is any of the
+ # excessive RIDs were triggered
+ cmp_marginals.loc['irreparable', 'Units'] = 'ea'
+ cmp_marginals.loc['irreparable', 'Location'] = 0
+ cmp_marginals.loc['irreparable', 'Direction'] = 1
+ cmp_marginals.loc['irreparable', 'Theta_0'] = 1.0
+
+ else:
+ self.log.msg(
+ 'WARNING: No residual interstory drift ratio among '
+ 'available demands. Irreparable damage cannot be '
+ 'evaluated.'
+ )
+
+ # load component model
+ self.asset.load_cmp_model({'marginals': cmp_marginals})
+
+ # generate component quantity sample
+ self.asset.generate_cmp_sample()
+
+ # if requested, load the quantity sample from a file
+ if component_sample_file is not None:
+ self.asset.load_cmp_sample(component_sample_file)
+
+ def calculate_damage( # noqa: C901
+ self,
+ length_unit: str | None,
+ component_database: str,
+ component_database_path: str | None = None,
+ collapse_fragility: dict | None = None,
+ irreparable_damage: dict | None = None,
+ damage_process_approach: str | None = None,
+ damage_process_file_path: str | None = None,
+ custom_model_dir: str | None = None,
+ scaling_specification: dict | None = None,
+ *,
+ is_for_water_network_assessment: bool = False,
+ ) -> None:
+ """
+ Calculate damage.
+
+ Parameters
+ ----------
+ length_unit : str, optional
+ Unit of length to be used to add units to the demand data
+ if needed.
+ component_database: str
+ Name of the component database.
+ component_database_path: str or None
+ Optional path to a component database file.
+ collapse_fragility: dict or None
+ Collapse fragility information.
+ irreparable_damage: dict or None
+ Information for irreparable damage.
+ damage_process_approach: str or None
+ Approach for the damage process.
+ damage_process_file_path: str or None
+ Optional path to a damage process file.
+ custom_model_dir: str or None
+ Optional directory for custom models.
+ scaling_specification: dict, optional
+ A dictionary defining the shift in median.
+ Example: {'CMP-1-1': '*1.2', 'CMP-1-2': '/1.4'}
+ The keys are individual components that should be present
+ in the `capacity_sample`. The values should be strings
+ containing an operation followed by the value formatted as
+ a float. The operation can be '+' for addition, '-' for
+ subtraction, '*' for multiplication, and '/' for division.
+ is_for_water_network_assessment: bool
+ Whether the assessment is for a water network.
+
+ Raises
+ ------
+ ValueError
+ With invalid combinations of arguments.
+
+ """
+ # load the fragility information
+ if component_database in default_dbs['fragility']:
+ component_db = [
+ 'PelicunDefault/' + default_dbs['fragility'][component_database],
+ ]
+ else:
+ component_db = []
+
+ if component_database_path is not None:
+ if custom_model_dir is None:
+ msg = (
+ '`custom_model_dir` needs to be specified '
+ 'when `component_database_path` is not None.'
+ )
+ raise ValueError(msg)
+
+ if 'CustomDLDataFolder' in component_database_path:
+ component_database_path = component_database_path.replace(
+ 'CustomDLDataFolder', custom_model_dir
+ )
+
+ component_db += [component_database_path]
+
+ component_db = component_db[::-1]
+
+ # prepare additional fragility data
+
+ # get the database header from the default P58 db
+ p58_data = self.get_default_data('damage_DB_FEMA_P58_2nd')
+
+ adf = pd.DataFrame(columns=p58_data.columns)
+
+ if collapse_fragility:
+ assert self.asset.cmp_marginal_params is not None
+
+ if (
+ 'excessive.coll.DEM'
+ in self.asset.cmp_marginal_params.index.get_level_values('cmp')
+ ):
+ # if there is story-specific evaluation
+ coll_cmp_name = 'excessive.coll.DEM'
+ else:
+ # otherwise, for global collapse evaluation
+ coll_cmp_name = 'collapse'
+
+ adf.loc[coll_cmp_name, ('Demand', 'Directional')] = 1
+ adf.loc[coll_cmp_name, ('Demand', 'Offset')] = 0
+
+ coll_dem = collapse_fragility['DemandType']
+
+ if '_' in coll_dem:
+ coll_dem, coll_dem_spec = coll_dem.split('_')
+ else:
+ coll_dem_spec = None
+
+ coll_dem_name = None
+ for demand_name, demand_short in EDP_to_demand_type.items():
+ if demand_short == coll_dem:
+ coll_dem_name = demand_name
+ break
+
+ if coll_dem_name is None:
+ msg = (
+ 'A valid demand type acronym was not provided in'
+ 'the configuration file. Please ensure the'
+ "'DemandType' field in the collapse fragility"
+ 'section contains one of the recognized acronyms'
+ "(e.g., 'SA', 'PFA', 'PGA'). Refer to the"
+ "configuration file's 'collapse_fragility'"
+ 'section.'
+ )
+ raise ValueError(msg)
+
+ if coll_dem_spec is None:
+ adf.loc[coll_cmp_name, ('Demand', 'Type')] = coll_dem_name
+
+ else:
+ adf.loc[coll_cmp_name, ('Demand', 'Type')] = (
+ f'{coll_dem_name}|{coll_dem_spec}'
+ )
+
+ if length_unit is None:
+ msg = 'A length unit is required.'
+ raise ValueError(msg)
+ coll_dem_unit = _add_units(
+ pd.DataFrame(
+ columns=[
+ f'{coll_dem}-1-1',
+ ]
+ ),
+ length_unit,
+ ).iloc[0, 0]
+
+ adf.loc[coll_cmp_name, ('Demand', 'Unit')] = coll_dem_unit
+ adf.loc[coll_cmp_name, ('LS1', 'Family')] = collapse_fragility[
+ 'CapacityDistribution'
+ ]
+ adf.loc[coll_cmp_name, ('LS1', 'Theta_0')] = collapse_fragility[
+ 'CapacityMedian'
+ ]
+ adf.loc[coll_cmp_name, ('LS1', 'Theta_1')] = collapse_fragility[
+ 'Theta_1'
+ ]
+ adf.loc[coll_cmp_name, 'Incomplete'] = 0
+
+ if coll_cmp_name != 'collapse':
+ # for story-specific evaluation, we need to add a placeholder
+ # fragility that will never trigger, but helps us aggregate
+ # results in the end
+ adf.loc['collapse', ('Demand', 'Directional')] = 1
+ adf.loc['collapse', ('Demand', 'Offset')] = 0
+ adf.loc['collapse', ('Demand', 'Type')] = 'One'
+ adf.loc['collapse', ('Demand', 'Unit')] = 'unitless'
+ adf.loc['collapse', ('LS1', 'Theta_0')] = 1e10
+ adf.loc['collapse', 'Incomplete'] = 0
+
+ elif not is_for_water_network_assessment:
+ # add a placeholder collapse fragility that will never trigger
+ # collapse, but allow damage processes to work with collapse
+
+ adf.loc['collapse', ('Demand', 'Directional')] = 1
+ adf.loc['collapse', ('Demand', 'Offset')] = 0
+ adf.loc['collapse', ('Demand', 'Type')] = 'One'
+ adf.loc['collapse', ('Demand', 'Unit')] = 'unitless'
+ adf.loc['collapse', ('LS1', 'Theta_0')] = 1e10
+ adf.loc['collapse', 'Incomplete'] = 0
+
+ if irreparable_damage:
+ # add excessive RID fragility according to settings provided in the
+ # input file
+ adf.loc['excessiveRID', ('Demand', 'Directional')] = 1
+ adf.loc['excessiveRID', ('Demand', 'Offset')] = 0
+ adf.loc['excessiveRID', ('Demand', 'Type')] = (
+ 'Residual Interstory Drift Ratio'
+ )
+
+ adf.loc['excessiveRID', ('Demand', 'Unit')] = 'unitless'
+ adf.loc['excessiveRID', ('LS1', 'Theta_0')] = irreparable_damage[
+ 'DriftCapacityMedian'
+ ]
+ adf.loc['excessiveRID', ('LS1', 'Family')] = 'lognormal'
+ adf.loc['excessiveRID', ('LS1', 'Theta_1')] = irreparable_damage[
+ 'DriftCapacityLogStd'
+ ]
+
+ adf.loc['excessiveRID', 'Incomplete'] = 0
+
+ # add a placeholder irreparable fragility that will never trigger
+ # damage, but allow damage processes to aggregate excessiveRID here
+ adf.loc['irreparable', ('Demand', 'Directional')] = 1
+ adf.loc['irreparable', ('Demand', 'Offset')] = 0
+ adf.loc['irreparable', ('Demand', 'Type')] = 'One'
+ adf.loc['irreparable', ('Demand', 'Unit')] = 'unitless'
+ adf.loc['irreparable', ('LS1', 'Theta_0')] = 1e10
+ adf.loc['irreparable', 'Incomplete'] = 0
+
+ # TODO(AZ): we can improve this by creating a water
+ # network-specific assessment class
+ if is_for_water_network_assessment:
+ # add a placeholder aggregate fragility that will never trigger
+ # damage, but allow damage processes to aggregate the
+ # various pipeline damages
+ adf.loc['aggregate', ('Demand', 'Directional')] = 1
+ adf.loc['aggregate', ('Demand', 'Offset')] = 0
+ adf.loc['aggregate', ('Demand', 'Type')] = 'Peak Ground Velocity'
+ adf.loc['aggregate', ('Demand', 'Unit')] = 'mps'
+ adf.loc['aggregate', ('LS1', 'Theta_0')] = 1e10
+ adf.loc['aggregate', ('LS2', 'Theta_0')] = 1e10
+ adf.loc['aggregate', 'Incomplete'] = 0
+
+ self.damage.load_model_parameters(
+ [*component_db, adf],
+ set(self.asset.list_unique_component_ids()),
+ )
+
+ # load the damage process if needed
+ dmg_process = None
+ if damage_process_approach is not None: # noqa: PLR1702
+ if damage_process_approach in default_damage_processes:
+ dmg_process = default_damage_processes[damage_process_approach]
+
+ # For Hazus Earthquake, we need to specify the component ids
+ if damage_process_approach == 'Hazus Earthquake':
+ cmp_sample = self.asset.save_cmp_sample()
+ assert isinstance(cmp_sample, pd.DataFrame)
+
+ cmp_list = cmp_sample.columns.unique(level=0)
+
+ cmp_map = {'STR': '', 'LF': '', 'NSA': ''}
+
+ for cmp in cmp_list:
+ for cmp_type in cmp_map:
+ if cmp_type + '.' in cmp:
+ cmp_map[cmp_type] = cmp
+
+ new_dmg_process = dmg_process.copy()
+ for source_cmp, action in dmg_process.items():
+ # first, look at the source component id
+ new_source = None
+ for cmp_type, cmp_id in cmp_map.items():
+ if (cmp_type in source_cmp) and (cmp_id != ''): # noqa: PLC1901
+ new_source = source_cmp.replace(cmp_type, cmp_id)
+ break
+
+ if new_source is not None:
+ new_dmg_process[new_source] = action
+ del new_dmg_process[source_cmp]
+ else:
+ new_source = source_cmp
+
+ # then, look at the target component ids
+ for ds_i, target_vals in action.items():
+ if isinstance(target_vals, str):
+ for cmp_type, cmp_id in cmp_map.items():
+ if (cmp_type in target_vals) and (cmp_id != ''): # noqa: PLC1901
+ target_vals = target_vals.replace( # noqa: PLW2901
+ cmp_type, cmp_id
+ )
+
+ new_target_vals = target_vals
+
+ else:
+ # we assume that target_vals is a list of str
+ new_target_vals = []
+
+ for target_val in target_vals:
+ for cmp_type, cmp_id in cmp_map.items():
+ if (cmp_type in target_val) and (
+ cmp_id != '' # noqa: PLC1901
+ ):
+ target_val = target_val.replace( # noqa: PLW2901
+ cmp_type, cmp_id
+ )
+
+ new_target_vals.append(target_val)
+
+ new_dmg_process[new_source][ds_i] = new_target_vals
+
+ dmg_process = new_dmg_process
+
+ # Remove components not present in the asset model
+ # from the source components of the damage process.
+ asset_components = set(self.asset.list_unique_component_ids())
+ filtered_dmg_process = {}
+ for key in dmg_process:
+ component = key.split('_')[1]
+ if component in asset_components:
+ filtered_dmg_process[key] = dmg_process[key]
+ dmg_process = filtered_dmg_process
+
+ elif damage_process_approach == 'User Defined':
+ if damage_process_file_path is None:
+ msg = (
+ 'When `damage_process_approach` is set to '
+ '`User Defined`, a `damage_process_file_path` '
+ 'needs to be provided.'
+ )
+ raise ValueError(msg)
+
+ # load the damage process from a file
+ with Path(damage_process_file_path).open(encoding='utf-8') as f:
+ dmg_process = json.load(f)
+
+ elif damage_process_approach == 'None':
+ # no damage process applied for the calculation
+ dmg_process = None
+
+ else:
+ self.log.msg(
+ f'Prescribed Damage Process not recognized: '
+ f'`{damage_process_approach}`.'
+ )
+
+ # calculate damages
+ self.damage.calculate(
+ dmg_process=dmg_process,
+ scaling_specification=scaling_specification,
+ )
+
+ def calculate_loss(
+ self,
+ loss_map_approach: str,
+ occupancy_type: str,
+ consequence_database: str,
+ consequence_database_path: str | None = None,
+ custom_model_dir: str | None = None,
+ damage_process_approach: str = 'User Defined',
+ replacement_cost_parameters: dict[str, float | str] | None = None,
+ replacement_time_parameters: dict[str, float | str] | None = None,
+ replacement_carbon_parameters: dict[str, float | str] | None = None,
+ replacement_energy_parameters: dict[str, float | str] | None = None,
+ loss_map_path: str | None = None,
+ decision_variables: tuple[str, ...] | None = None,
+ ) -> tuple[pd.DataFrame, pd.DataFrame]:
+ """
+ Calculate losses.
+
+ Parameters
+ ----------
+ loss_map_approach: str
+ Approach for the loss map generation. Can be either
+ `User Defined` or `Automatic`.
+ occupancy_type: str
+ Occupancy type.
+ consequence_database: str
+ Name of the consequence database.
+ consequence_database_path: str or None
+ Optional path to a consequence database file.
+ custom_model_dir: str or None
+ Optional directory for custom models.
+ damage_process_approach: str
+ Damage process approach. Defaults to `User Defined`.
+ replacement_cost_parameters: dict or None
+ Parameters for replacement cost.
+ replacement_time_parameters: dict or None
+ Parameters for replacement time.
+ replacement_carbon_parameters: dict or None
+ Parameters for replacement carbon.
+ replacement_energy_parameters: dict or None
+ Parameters for replacement energy.
+ loss_map_path: str or None
+ Optional path to a loss map file.
+ decision_variables: tuple[str] or None
+ Optional decision variables for the assessment.
+
+ Returns
+ -------
+ tuple
+ Dataframe with the aggregated loss of each realization,
+ and another boolean dataframe with information on which DV
+ thresholds were exceeded in each realization, triggering
+ replacement. If no thresholds are specified it only
+ contains False values.
+
+ Raises
+ ------
+ ValueError
+ When an invalid loss map approach is specified.
+
+ """
+ conseq_df, consequence_db = self.load_consequence_info(
+ consequence_database,
+ consequence_database_path,
+ custom_model_dir,
+ )
+
+ # remove duplicates from conseq_df
+ conseq_df = conseq_df.loc[conseq_df.index.unique(), :]
+
+ # add the replacement consequence to the data
+ adf = pd.DataFrame(
+ columns=conseq_df.columns,
+ index=pd.MultiIndex.from_tuples(
+ [
+ ('replacement', 'Cost'),
+ ('replacement', 'Time'),
+ ('replacement', 'Carbon'),
+ ('replacement', 'Energy'),
+ ]
+ ),
+ )
+
+ _loss__add_replacement_cost(
+ adf,
+ damage_process_approach,
+ unit=get(replacement_cost_parameters, 'Unit'),
+ median=get(replacement_cost_parameters, 'Median'),
+ distribution=get(replacement_cost_parameters, 'Distribution'),
+ theta_1=get(replacement_cost_parameters, 'Theta_1'),
+ )
+
+ _loss__add_replacement_time(
+ adf,
+ damage_process_approach,
+ conseq_df,
+ occupancy_type=occupancy_type,
+ unit=get(replacement_time_parameters, 'Unit'),
+ median=get(replacement_time_parameters, 'Median'),
+ distribution=get(replacement_time_parameters, 'Distribution'),
+ theta_1=get(replacement_time_parameters, 'Theta_1'),
+ )
+
+ _loss__add_replacement_carbon(
+ adf,
+ damage_process_approach,
+ unit=get(replacement_carbon_parameters, 'Unit'),
+ median=get(replacement_carbon_parameters, 'Median'),
+ distribution=get(replacement_carbon_parameters, 'Distribution'),
+ theta_1=get(replacement_carbon_parameters, 'Theta_1'),
+ )
+
+ _loss__add_replacement_energy(
+ adf,
+ damage_process_approach,
+ unit=get(replacement_energy_parameters, 'Unit'),
+ median=get(replacement_energy_parameters, 'Median'),
+ distribution=get(replacement_energy_parameters, 'Distribution'),
+ theta_1=get(replacement_energy_parameters, 'Theta_1'),
+ )
+
+ # prepare the loss map
+ loss_map = None
+ if loss_map_approach == 'Automatic':
+ # get the damage sample
+ loss_map = _loss__map_auto(
+ self, conseq_df, damage_process_approach, occupancy_type
+ )
+
+ elif loss_map_approach == 'User Defined':
+ assert custom_model_dir is not None
+ loss_map = _loss__map_user(custom_model_dir, loss_map_path)
+
+ else:
+ msg = f'Invalid MapApproach value: `{loss_map_approach}`.'
+ raise ValueError(msg)
+
+ # prepare additional loss map entries, if needed
+ if 'DMG-collapse' not in loss_map.index:
+ loss_map.loc['collapse', 'Repair'] = 'replacement'
+ loss_map.loc['irreparable', 'Repair'] = 'replacement'
+
+ if decision_variables:
+ self.loss.decision_variables = decision_variables
+
+ self.loss.add_loss_map(loss_map, loss_map_policy=None)
+ self.loss.load_model_parameters([*consequence_db, adf])
+
+ self.loss.calculate()
+
+ df_agg, exceedance_bool_df = self.loss.aggregate_losses(future=True)
+ assert isinstance(df_agg, pd.DataFrame)
+ assert isinstance(exceedance_bool_df, pd.DataFrame)
+ return df_agg, exceedance_bool_df
+
+ def load_consequence_info(
+ self,
+ consequence_database: str,
+ consequence_database_path: str | None = None,
+ custom_model_dir: str | None = None,
+ ) -> tuple[pd.DataFrame, list[str]]:
+ """
+ Load consequence information for the assessment.
+
+ Parameters
+ ----------
+ consequence_database: str
+ Name of the consequence database.
+ consequence_database_path: str or None
+ Optional path to a consequence database file.
+ custom_model_dir: str or None
+ Optional directory for custom models.
+
+ Returns
+ -------
+ tuple[pd.DataFrame, list[str]]
+ A tuple containing:
+ - A DataFrame with the consequence data.
+ - A list of paths to the consequence databases used.
+
+ Raises
+ ------
+ ValueError
+ With invalid combinations of arguments.
+
+ """
+ if consequence_database in default_dbs['repair']:
+ consequence_db = [
+ 'PelicunDefault/' + default_dbs['repair'][consequence_database],
+ ]
+
+ conseq_df = self.get_default_data(
+ default_dbs['repair'][consequence_database][:-4]
+ )
+ else:
+ consequence_db = []
+
+ conseq_df = pd.DataFrame()
+
+ if consequence_database_path is not None:
+ if custom_model_dir is None:
+ msg = (
+ 'When `consequence_database_path` is specified, '
+ '`custom_model_dir` needs to be specified as well.'
+ )
+ raise ValueError(msg)
+
+ if 'CustomDLDataFolder' in consequence_database_path:
+ consequence_database_path = consequence_database_path.replace(
+ 'CustomDLDataFolder', custom_model_dir
+ )
+
+ consequence_db += [consequence_database_path]
+
+ extra_conseq_df = file_io.load_data(
+ consequence_database_path,
+ unit_conversion_factors=None,
+ orientation=1,
+ reindex=False,
+ )
+ assert isinstance(extra_conseq_df, pd.DataFrame)
+
+ if isinstance(conseq_df, pd.DataFrame):
+ conseq_df = pd.concat([conseq_df, extra_conseq_df])
+ else:
+ conseq_df = extra_conseq_df
+
+ consequence_db = consequence_db[::-1]
+
+ return conseq_df, consequence_db
+
+
+def _add_units(raw_demands: pd.DataFrame, length_unit: str) -> pd.DataFrame:
+ """
+ Add units to demand columns in a DataFrame.
+
+ Parameters
+ ----------
+ raw_demands: pd.DataFrame
+ The raw demand data to which units will be added.
+ length_unit: str
+ The unit of length to be used (e.g., 'in' for inches).
+
+ Returns
+ -------
+ pd.DataFrame
+ The DataFrame with units added to the appropriate demand columns.
+
+ """
+ demands = raw_demands.T
+
+ demands.insert(0, 'Units', np.nan)
+
+ if length_unit == 'in':
+ length_unit = 'inch'
+
+ demands = pd.DataFrame(
+ base.convert_to_MultiIndex(demands, axis=0).sort_index(axis=0).T
+ )
+
+ nlevels_with_event_id = 4
+ dem_level = 1 if demands.columns.nlevels == nlevels_with_event_id else 0
+
+ # drop demands with no EDP type identified
+ demands = demands.drop(
+ demands.columns[demands.columns.get_level_values(dem_level) == ''],
+ axis=1,
+ )
+
+ # assign units
+ demand_cols = demands.columns.get_level_values(dem_level).to_list()
+
+ # remove additional info from demand names
+ demand_cols = [d.split('_')[0] for d in demand_cols]
+
+ # acceleration
+ acc_edps = ['PFA', 'PGA', 'SA']
+ edp_mask = np.isin(demand_cols, acc_edps)
+
+ if np.any(edp_mask):
+ demands.iloc[0, edp_mask] = length_unit + 'ps2' # type: ignore
+
+ # speed
+ speed_edps = ['PFV', 'PWS', 'PGV', 'SV']
+ edp_mask = np.isin(demand_cols, speed_edps)
+
+ if np.any(edp_mask):
+ demands.iloc[0, edp_mask] = length_unit + 'ps' # type: ignore
+
+ # displacement
+ disp_edps = ['PFD', 'PIH', 'SD', 'PGD']
+ edp_mask = np.isin(demand_cols, disp_edps)
+
+ if np.any(edp_mask):
+ demands.iloc[0, edp_mask] = length_unit # type: ignore
+
+ # drift ratio
+ rot_edps = ['PID', 'PRD', 'DWD', 'RDR', 'PMD', 'RID']
+ edp_mask = np.isin(demand_cols, rot_edps)
+
+ if np.any(edp_mask):
+ demands.iloc[0, edp_mask] = 'unitless' # type: ignore
+
+ # convert back to simple header and return the DF
+ return base.convert_to_SimpleIndex(demands, axis=1)
+
+
+def _loss__add_replacement_energy(
+ adf: pd.DataFrame,
+ dl_method: str,
+ unit: str | None = None,
+ median: float | None = None,
+ distribution: str | None = None,
+ theta_1: float | None = None,
+) -> None:
+ """
+ Add replacement energy information.
+
+ Parameters
+ ----------
+ adf : pandas.DataFrame
+ Dataframe containing loss information.
+ DL_method : str
+ Supported methods are 'FEMA P-58'.
+ unit : str, optional
+ Unit for the energy value (e.g., 'MJ'). Defaults to None.
+ median : float, optional
+ Median replacement energy. If provided, it defines the base
+ replacement energy value. Defaults to None.
+ distribution : str, optional
+ Distribution family to model uncertainty around the median
+ energy (e.g., 'lognormal'). Required if `median` is
+ provided. Defaults to None.
+ theta_1 : float, optional
+ Distribution parameter (e.g., standard deviation). Required if
+ `distribution` is provided. Defaults to None.
+
+ Notes
+ -----
+ If `median` is not provided, a default value is assigned based on
+ the `DL_method`. For 'FEMA P-58', the default replacement energy
+ value is 0 MJ. For other methods, this consequence is removed
+ from the dataframe entirely.
+ """
+ ren = ('replacement', 'Energy')
+ if median is not None:
+ # TODO(JVM): in this case we need unit (add config parser check)
+
+ adf.loc[ren, ('Quantity', 'Unit')] = '1 EA'
+ adf.loc[ren, ('DV', 'Unit')] = unit
+ adf.loc[ren, ('DS1', 'Theta_0')] = median
+
+ if distribution is not None:
+ # TODO(JVM): in this case we need theta_1 (add config parser check)
+
+ adf.loc[ren, ('DS1', 'Family')] = distribution
+ adf.loc[ren, ('DS1', 'Theta_1')] = theta_1
+ elif dl_method == 'FEMA P-58':
+ adf.loc[ren, ('Quantity', 'Unit')] = '1 EA'
+ adf.loc[ren, ('DV', 'Unit')] = 'MJ'
+ adf.loc[ren, ('DS1', 'Theta_0')] = 0
+
+ else:
+ # for everything else, remove this consequence
+ adf = adf.drop(ren)
+
+
+def _loss__add_replacement_carbon(
+ adf: pd.DataFrame,
+ damage_process_approach: str,
+ unit: str | None = None,
+ median: float | None = None,
+ distribution: str | None = None,
+ theta_1: float | None = None,
+) -> None:
+ """
+ Add replacement carbon emission information.
+
+ Parameters
+ ----------
+ adf : pandas.DataFrame
+ Dataframe containing loss information.
+ damage_process_approach : str
+ Supported approaches include 'FEMA P-58'.
+ unit : str, optional
+ Unit for the carbon emission value (e.g., 'kg'). Defaults to
+ None.
+ median : float, optional
+ Median replacement carbon emissions. If provided, it defines
+ the base replacement carbon value. Defaults to None.
+ distribution : str, optional
+ Distribution family to model uncertainty around the median
+ carbon emissions (e.g., 'lognormal'). Required if `median` is
+ provided. Defaults to None.
+ theta_1 : float, optional
+ Distribution parameter (e.g., standard deviation). Required if
+ `distribution` is provided. Defaults to None.
+
+ Notes
+ -----
+ If `median` is not provided, a default value is assigned based on
+ the `damage_process_approach`. For 'FEMA P-58', the default
+ replacement carbon emissions value is 0 kg. For other approaches,
+ this consequence is removed from the dataframe entirely.
+ """
+ rcarb = ('replacement', 'Carbon')
+ if median is not None:
+ # TODO(JVM): in this case we need unit (add config parser check)
+
+ adf.loc[rcarb, ('Quantity', 'Unit')] = '1 EA'
+ adf.loc[rcarb, ('DV', 'Unit')] = unit
+ adf.loc[rcarb, ('DS1', 'Theta_0')] = median
+
+ if distribution is not None:
+ # TODO(JVM): in this case we need theta_1 (add config parser check)
+
+ adf.loc[rcarb, ('DS1', 'Family')] = distribution
+ adf.loc[rcarb, ('DS1', 'Theta_1')] = theta_1
+ elif damage_process_approach == 'FEMA P-58':
+ adf.loc[rcarb, ('Quantity', 'Unit')] = '1 EA'
+ adf.loc[rcarb, ('DV', 'Unit')] = 'kg'
+ adf.loc[rcarb, ('DS1', 'Theta_0')] = 0
+
+ else:
+ # for everything else, remove this consequence
+ adf = adf.drop(rcarb)
+
+
+def _loss__add_replacement_time(
+ adf: pd.DataFrame,
+ damage_process_approach: str,
+ conseq_df: pd.DataFrame,
+ occupancy_type: str | None = None,
+ unit: str | None = None,
+ median: float | None = None,
+ distribution: str | None = None,
+ theta_1: float | None = None,
+) -> None:
+ """
+ Add replacement time information.
+
+ Parameters
+ ----------
+ adf : pandas.DataFrame
+ Dataframe containing loss information.
+ damage_process_approach : str
+ Supported approaches are 'FEMA P-58', 'Hazus Earthquake -
+ Buildings'.
+ conseq_df : pandas.DataFrame
+ Dataframe containing consequence data for different damage
+ states.
+ occupancy_type : str, optional
+ Type of occupancy, used to look up replacement time in the
+ consequence dataframe for Hazus Earthquake approach. Defaults
+ to None.
+ unit : str, optional
+ Unit for the replacement time (e.g., 'day, 'worker_day').
+ Defaults to None.
+ median : float, optional
+ Median replacement time or loss ratio. If provided, it defines
+ the base replacement time. Defaults to None.
+ distribution : str, optional
+ Distribution family to model uncertainty around the median
+ time (e.g., 'lognormal'). Required if `median` is
+ provided. Defaults to None.
+ theta_1 : float, optional
+ Distribution parameter (e.g., standard deviation). Required if
+ `distribution` is provided. Defaults to None.
+
+ Notes
+ -----
+ If `median` is not provided, a default value is assigned based on
+ the `damage_process_approach`. For 'FEMA P-58', the default
+ replacement time is 0 worker_days. For 'Hazus Earthquake -
+ Buildings', the replacement time is fetched from `conseq_df` for
+ the provided `occupancy_type` and corresponds to the total loss
+ (damage state 5, DS5). In other cases, a placeholder value of 1 is
+ used.
+
+ """
+ rt = ('replacement', 'Time')
+ if median is not None:
+ # TODO(JVM): in this case we need unit (add config parser check)
+
+ adf.loc[rt, ('Quantity', 'Unit')] = '1 EA'
+ adf.loc[rt, ('DV', 'Unit')] = unit
+ adf.loc[rt, ('DS1', 'Theta_0')] = median
+
+ if distribution is not None:
+ # TODO(JVM): in this case we need theta_1 (add config parser check)
+
+ adf.loc[rt, ('DS1', 'Family')] = distribution
+ adf.loc[rt, ('DS1', 'Theta_1')] = theta_1
+ elif damage_process_approach == 'FEMA P-58':
+ adf.loc[rt, ('Quantity', 'Unit')] = '1 EA'
+ adf.loc[rt, ('DV', 'Unit')] = 'worker_day'
+ adf.loc[rt, ('DS1', 'Theta_0')] = 0
+
+ # for Hazus EQ, use 1.0 as a loss_ratio
+ elif damage_process_approach == 'Hazus Earthquake - Buildings':
+ adf.loc[rt, ('Quantity', 'Unit')] = '1 EA'
+ adf.loc[rt, ('DV', 'Unit')] = 'day'
+
+ # load the replacement time that corresponds to total loss
+ adf.loc[rt, ('DS1', 'Theta_0')] = conseq_df.loc[
+ (f'STR.{occupancy_type}', 'Time'), ('DS5', 'Theta_0')
+ ]
+
+ # otherwise, use 1 (and expect to have it defined by the user)
+ else:
+ adf.loc[rt, ('Quantity', 'Unit')] = '1 EA'
+ adf.loc[rt, ('DV', 'Unit')] = 'loss_ratio'
+ adf.loc[rt, ('DS1', 'Theta_0')] = 1
+
+
+def _loss__add_replacement_cost(
+ adf: pd.DataFrame,
+ dl_method: str,
+ unit: str | None = None,
+ median: float | None = None,
+ distribution: str | None = None,
+ theta_1: float | None = None,
+) -> None:
+ """
+ Add replacement cost information.
+
+ Parameters
+ ----------
+ adf : pandas.DataFrame
+ Dataframe containing loss information.
+ DL_method : str
+ Supported methods are 'FEMA P-58', 'Hazus Earthquake', and
+ 'Hazus Hurricane'.
+ unit : str, optional
+ Unit for the replacement cost (e.g., 'USD_2011',
+ 'loss_ratio'). Defaults to None.
+ median : float, optional
+ Median replacement cost or loss ratio. If provided, it defines
+ the base replacement cost. Defaults to None.
+ distribution : str, optional
+ Distribution family to model uncertainty around the median
+ cost (e.g., 'lognormal'). Required if `median` is
+ provided. Defaults to None.
+ theta_1 : float, optional
+ Distribution parameter (e.g., standard deviation). Required if
+ `distribution` is provided. Defaults to None.
+ """
+ rc = ('replacement', 'Cost')
+ if median is not None:
+ # TODO(JVM): in this case we need unit (add config parser check)
+
+ adf.loc[rc, ('Quantity', 'Unit')] = '1 EA'
+ adf.loc[rc, ('DV', 'Unit')] = unit
+ adf.loc[rc, ('DS1', 'Theta_0')] = median
+
+ if distribution is not None:
+ # TODO(JVM): in this case we need theta_1 (add config parser check)
+
+ adf.loc[rc, ('DS1', 'Family')] = distribution
+ adf.loc[rc, ('DS1', 'Theta_1')] = theta_1
+
+ elif dl_method == 'FEMA P-58':
+ adf.loc[rc, ('Quantity', 'Unit')] = '1 EA'
+ adf.loc[rc, ('DV', 'Unit')] = 'USD_2011'
+ adf.loc[rc, ('DS1', 'Theta_0')] = 0
+
+ # for Hazus EQ and HU, use 1.0 as a loss_ratio
+ elif dl_method in {'Hazus Earthquake', 'Hazus Hurricane'}:
+ adf.loc[rc, ('Quantity', 'Unit')] = '1 EA'
+ adf.loc[rc, ('DV', 'Unit')] = 'loss_ratio'
+
+ # store the replacement cost that corresponds to total loss
+ adf.loc[rc, ('DS1', 'Theta_0')] = 1.00
+
+ # otherwise, use 1 (and expect to have it defined by the user)
+ else:
+ adf.loc[rc, ('Quantity', 'Unit')] = '1 EA'
+ adf.loc[rc, ('DV', 'Unit')] = 'loss_ratio'
+ adf.loc[rc, ('DS1', 'Theta_0')] = 1
+
+
+def _loss__map_user(
+ custom_model_dir: str, loss_map_path: str | None = None
+) -> pd.DataFrame:
+ """
+ Load a user-defined loss map from a specified path.
+
+ Parameters
+ ----------
+ custom_model_dir : str
+ Directory containing custom models.
+ loss_map_path : str, optional
+ Path to the loss map file. The path can include a placeholder
+ 'CustomDLDataFolder' that will be replaced by
+ `custom_model_dir`. If not provided, raises a ValueError.
+
+ Returns
+ -------
+ pandas.DataFrame
+ DataFrame containing the loss map information.
+
+ Raises
+ ------
+ ValueError
+ If `loss_map_path` is not provided.
+
+ """
+ if loss_map_path is not None:
+ loss_map_path = loss_map_path.replace('CustomDLDataFolder', custom_model_dir)
+
+ else:
+ msg = 'Missing loss map path.'
+ raise ValueError(msg)
+
+ return pd.read_csv(loss_map_path, index_col=0)
+
+
+def _loss__map_auto(
+ assessment: DLCalculationAssessment,
+ conseq_df: pd.DataFrame,
+ dl_method: str,
+ occupancy_type: str | None = None,
+) -> pd.DataFrame:
+ """
+ Automatically generate a loss map.
+
+ Automatically generate a loss map based on the damage sample and
+ the consequence database.
+
+ Parameters
+ ----------
+ assessment : AssessmentBase
+ The assessment object containing the damage model and sample.
+ conseq_df : pandas.DataFrame
+ DataFrame containing consequence data for different damage
+ states.
+ DL_method : str
+ Damage loss method, which defines how the loss map is
+ generated. Supported methods are 'FEMA P-58', 'Hazus
+ Earthquake', 'Hazus Hurricane', and 'Hazus Earthquake
+ Transportation'.
+ occupancy_type : str, optional
+ Occupancy type, used to map damage components to the correct
+ loss models in Hazus Earthquake methods. Defaults to None.
+
+ Returns
+ -------
+ pandas.DataFrame
+ DataFrame containing the automatically generated loss map,
+ where the index corresponds to the damage components and the
+ values indicate the associated loss models.
+
+ Notes
+ -----
+ - For 'FEMA P-58' and 'Hazus Hurricane', the method assumes that
+ fragility and consequence data have matching component IDs.
+ - For 'Hazus Earthquake' and 'Hazus Earthquake Transportation',
+ the method assumes that consequence archetypes are only
+ differentiated by occupancy type.
+
+ """
+ # get the damage sample
+ dmg_sample = assessment.damage.save_sample()
+ assert isinstance(dmg_sample, pd.DataFrame)
+
+ # create a mapping for all components that are also in
+ # the prescribed consequence database
+ dmg_cmps = dmg_sample.columns.unique(level='cmp')
+ loss_cmps = conseq_df.index.unique(level=0)
+
+ drivers = []
+ loss_models = []
+
+ if dl_method in {'FEMA P-58', 'Hazus Hurricane'}:
+ # with these methods, we assume fragility and consequence data
+ # have the same IDs
+
+ for dmg_cmp in dmg_cmps:
+ if dmg_cmp == 'collapse':
+ continue
+
+ if dmg_cmp in loss_cmps:
+ drivers.append(dmg_cmp)
+ loss_models.append(dmg_cmp)
+
+ elif dl_method in {
+ 'Hazus Earthquake',
+ 'Hazus Earthquake Transportation',
+ }:
+ # with Hazus Earthquake we assume that consequence
+ # archetypes are only differentiated by occupancy type
+ for dmg_cmp in dmg_cmps:
+ if dmg_cmp == 'collapse':
+ continue
+
+ cmp_class = dmg_cmp.split('.')[0]
+ if occupancy_type is not None:
+ loss_cmp = f'{cmp_class}.{occupancy_type}'
+ else:
+ loss_cmp = cmp_class
+
+ if loss_cmp in loss_cmps:
+ drivers.append(dmg_cmp)
+ loss_models.append(loss_cmp)
+
+ return pd.DataFrame(loss_models, columns=['Repair'], index=drivers)
+
+
+class TimeBasedAssessment:
+ """Time-based assessment."""
diff --git a/pelicun/auto.py b/pelicun/auto.py
index 4fe4622f6..1610a85e6 100644
--- a/pelicun/auto.py
+++ b/pelicun/auto.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2023 Leland Stanford Junior University
# Copyright (c) 2023 The Regents of the University of California
@@ -37,48 +36,51 @@
# Contributors:
# Adam Zsarnóczay
-"""
-This module has classes and methods that auto-populate DL models.
-.. rubric:: Contents
+"""Classes and methods that auto-populate DL models."""
-.. autosummary::
+from __future__ import annotations
- auto_populate
-
-"""
-
-import sys
import importlib
+import sys
from pathlib import Path
+from typing import TYPE_CHECKING
from pelicun import base
+if TYPE_CHECKING:
+ import pandas as pd
+
def auto_populate(
- config, auto_script_path, **kwargs # pylint: disable=unused-argument
-):
+ config: dict,
+ auto_script_path: Path,
+ **kwargs, # noqa: ANN003
+) -> tuple[dict, pd.DataFrame]:
"""
- Automatically populates the Damage and Loss (DL) configuration for
- a Pelicun calculation using predefined rules.
+ Auto populate the DL configuration with predefined rules.
- This function modifies the provided configuration dictionary based
- on an external Python script that defines auto-population
- rules. It supports using built-in scripts or custom scripts
- specified by the user.
+ Automatically populates the Damage and Loss (DL) configuration for
+ a Pelicun calculation using predefined rules. This function
+ modifies the provided configuration dictionary based on an
+ external Python script that defines auto-population rules. It
+ supports using built-in scripts or custom scripts specified by the
+ user.
Parameters
----------
- config : dict
+ config: dict
A configuration dictionary with a 'GeneralInformation' key
that holds another dictionary with attributes of the asset of
interest. This dictionary is modified in-place with
auto-populated values.
- auto_script_path : str
+ auto_script_path: str
The path pointing to a Python script with the auto-population
rules. Built-in scripts can be referenced using the
'PelicunDefault/XY' format where 'XY' is the name of the
script.
+ kwargs
+ Keyword arguments.
Returns
-------
@@ -94,33 +96,37 @@ def auto_populate(
ValueError
If the configuration dictionary does not contain necessary
asset information under 'GeneralInformation'.
- """
+ """
# try to get the AIM attributes
- AIM = config.get('GeneralInformation', None)
- if AIM is None:
- raise ValueError(
- "No Asset Information provided for the auto-population routine."
- )
+ aim = config.get('GeneralInformation')
+ if aim is None:
+ msg = 'No Asset Information provided for the auto-population routine.'
+ raise ValueError(msg)
# replace default keyword with actual path in auto_script location
- if 'PelicunDefault/' in auto_script_path:
- auto_script_path = auto_script_path.replace(
- 'PelicunDefault/', f'{base.pelicun_path}/resources/auto/'
- )
+ path_parts = Path(auto_script_path).resolve().parts
+ new_parts: list[str] = [
+ (Path(base.pelicun_path) / 'resources/auto').resolve().absolute().as_posix()
+ if part == 'PelicunDefault'
+ else part
+ for part in path_parts
+ ]
+ if 'PelicunDefault' in path_parts:
+ auto_script_path = Path(*new_parts)
# load the auto population module
- ASP = Path(auto_script_path).resolve()
- sys.path.insert(0, str(ASP.parent) + '/')
- auto_script = importlib.__import__(ASP.name[:-3], globals(), locals(), [], 0)
+ asp = Path(auto_script_path).resolve()
+ sys.path.insert(0, str(asp.parent) + '/')
+ auto_script = importlib.__import__(asp.name[:-3], globals(), locals(), [], 0)
auto_populate_ext = auto_script.auto_populate
# generate the DL input data
- AIM_ap, DL_ap, CMP = auto_populate_ext(AIM=config)
+ aim_ap, dl_ap, comp = auto_populate_ext(aim=config)
# assemble the extended config
- config['GeneralInformation'].update(AIM_ap)
- config.update({'DL': DL_ap})
+ config['GeneralInformation'].update(aim_ap)
+ config.update({'DL': dl_ap})
# return the extended config data and the component quantities
- return config, CMP
+ return config, comp
diff --git a/pelicun/base.py b/pelicun/base.py
index fbcc44821..d26341fb3 100644
--- a/pelicun/base.py
+++ b/pelicun/base.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
@@ -38,62 +37,54 @@
# Adam Zsarnóczay
# John Vouvakis Manousakis
-"""
-This module defines constants, basic classes and methods for pelicun.
-.. rubric:: Contents
-
-.. autosummary::
-
- load_default_options
- update_vals
- merge_default_config
- convert_to_SimpleIndex
- convert_to_MultiIndex
- show_matrix
- describe
- str2bool
- float_or_None
- int_or_None
- process_loc
- dedupe_index
- dict_raise_on_duplicates
- parse_units
- convert_units
-
- Options
- Logger
-
-"""
+"""Constants, basic classes, and methods for pelicun."""
from __future__ import annotations
-import os
-import sys
-from datetime import datetime
+
+import argparse
import json
+import pprint
+import sys
+import traceback
import warnings
+from datetime import datetime, timezone
from pathlib import Path
-import argparse
-import pprint
+from typing import TYPE_CHECKING, Any, ClassVar, Optional, TypeVar, overload
+
+import colorama
import numpy as np
import pandas as pd
+from colorama import Fore, Style
+from scipy.interpolate import interp1d # type: ignore
+from pelicun.pelicun_warnings import PelicunWarning
+if TYPE_CHECKING:
+ from collections.abc import Callable
+ from types import TracebackType
+
+ from pelicun.assessment import AssessmentBase
+
+
+colorama.init()
# set printing options
pp = pprint.PrettyPrinter(indent=2, width=80 - 24)
pd.options.display.max_rows = 20
-pd.options.display.max_columns = None
+pd.options.display.max_columns = None # type: ignore
pd.options.display.expand_frame_repr = True
pd.options.display.width = 300
idx = pd.IndexSlice
+T = TypeVar('T')
+
+
class Options:
"""
- Options objects store analysis options and the logging
- configuration.
+ Analysis options and logging configuration.
Attributes
----------
@@ -109,9 +100,10 @@ class Options:
value some quantity of a given unit needs to be multiplied to
be expressed in the base units). Value specified in the user
configuration dictionary. Pelicun comes with a set of default
- units which are always loaded (see settings/default_units.json
- in the pelicun source code). Units specified in the units_file
- overwrite the default units.
+ units which are always loaded (see
+ `settings/default_units.json` in the pelicun source
+ code). Units specified in the units_file overwrite the default
+ units.
demand_offset: dict
Demand offsets are used in the process of mapping a component
location to its associated EDP. This allows components that
@@ -157,27 +149,50 @@ class Options:
"""
- def __init__(self, user_config_options, assessment=None):
+ __slots__ = [
+ '_asmnt',
+ '_rng',
+ '_seed',
+ 'defaults',
+ 'demand_offset',
+ 'eco_scale',
+ 'eco_scale',
+ 'error_setup',
+ 'error_setup',
+ 'list_all_ds',
+ 'log',
+ 'log',
+ 'nondir_multi_dict',
+ 'rho_cost_time',
+ 'sampling_method',
+ 'units_file',
+ ]
+
+ def __init__(
+ self,
+ user_config_options: dict[str, Any] | None,
+ assessment: AssessmentBase | None = None,
+ ) -> None:
"""
- Initializes an Options object.
+ Initialize an Options object.
Parameters
----------
user_config_options: dict, Optional
User-specified configuration dictionary. Any provided
user_config_options override the defaults.
- assessment: Assessment, Optional
+ assessment: AssessmentBase, Optional
Assessment object that will be using this Options
object. If it is not intended to use this Options object
for an Assessment (e.g. defining an Options object for UQ
use), this value should be None.
- """
+ """
self._asmnt = assessment
- self.defaults = None
- self.sampling_method = None
- self.list_all_ds = None
+ self.defaults: dict[str, Any] | None = None
+ self.sampling_method: str | None = None
+ self.list_all_ds: bool | None = None
merged_config_options = merge_default_config(user_config_options)
@@ -192,321 +207,194 @@ def __init__(self, user_config_options, assessment=None):
self.rho_cost_time = merged_config_options['RepairCostAndTimeCorrelation']
self.eco_scale = merged_config_options['EconomiesOfScale']
+ self.error_setup = merged_config_options['ErrorSetup']
+
# instantiate a Logger object with the finalized configuration
self.log = Logger(
- merged_config_options['Verbose'],
- merged_config_options['ShowWarnings'],
- merged_config_options['LogShowMS'],
merged_config_options['LogFile'],
- merged_config_options['PrintLog'],
- )
-
- def nondir_multi(self, EDP_type):
- """
- Returns the multiplicative factor used in nondirectional
- component demand generation. Read the description of the
- nondir_multi_dict attribute of the Options class.
-
- Parameters
- ----------
- EDP_type: str
- EDP type (e.g. "PFA", "PFV", ..., "ALL")
-
- Returns
- -------
- float
- Nondirectional component multiplicative factor.
-
- Raises
- ------
- ValueError
- If the specified EDP type is not present in the
- dictionary. If this is the case, a value for that type
- needs to be specified in the user's configuration
- dictionary, under ['Options']['NonDirectionalMultipliers']
- = {"edp_type": value, ...}
- """
-
- if EDP_type in self.nondir_multi_dict:
- return self.nondir_multi_dict[EDP_type]
-
- if 'ALL' in self.nondir_multi_dict:
- return self.nondir_multi_dict['ALL']
-
- raise ValueError(
- f"Peak orthogonal EDP multiplier for non-directional demand "
- f"calculation of {EDP_type} not specified.\n"
- f"Please add {EDP_type} in the configuration dictionary "
- f"under ['Options']['NonDirectionalMultipliers']"
- " = {{'edp_type': value, ...}}"
+ verbose=merged_config_options['Verbose'],
+ log_show_ms=merged_config_options['LogShowMS'],
+ print_log=merged_config_options['PrintLog'],
)
@property
- def seed(self):
+ def seed(self) -> float | None:
"""
- Seed property
+ Seed property.
Returns
-------
float
Seed value
+
"""
return self._seed
@seed.setter
- def seed(self, value):
- """
- seed property setter
- """
+ def seed(self, value: float) -> None:
+ """Seed property setter."""
self._seed = value
- self._rng = np.random.default_rng(self._seed)
+ self._rng = np.random.default_rng(self._seed) # type: ignore
@property
- def rng(self):
+ def rng(self) -> np.random.Generator:
"""
- rng property
+ rng property.
Returns
-------
Generator
Random generator
- """
- return self._rng
- @property
- def units_file(self):
- """
- units file property
-
- Returns
- -------
- str
- Units file
"""
- return self._units_file
+ return self._rng
- @units_file.setter
- def units_file(self, value):
- """
- units file property setter
- """
- self._units_file = value
+# Define a module-level LoggerRegistry
+class LoggerRegistry:
+ """Registry to manage all logger instances."""
+
+ _loggers: ClassVar[list[Logger]] = []
+
+ # The @classmethod decorator allows this method to be called on
+ # the class itself, rather than on instances. It interacts with
+ # class-level data (like _loggers), enabling a single registry for
+ # all Logger instances without needing an object of LoggerRegistry
+ # itself.
+ @classmethod
+ def register(cls, logger: Logger) -> None:
+ """Register a logger instance."""
+ cls._loggers.append(logger)
+
+ @classmethod
+ def log_exception(
+ cls,
+ exc_type: type[BaseException],
+ exc_value: BaseException,
+ exc_traceback: TracebackType | None,
+ ) -> None:
+ """Log exceptions to all registered loggers."""
+ message = (
+ f"Unhandled exception occurred:"
+ f"\n"
+ f"{''.join(traceback.format_exception(exc_type, exc_value, exc_traceback))}"
+ )
+ for logger in cls._loggers:
+ logger.msg(message)
-class Logger:
- """
- Logger objects are used to generate log files documenting
- execution events and related messages.
+ # Also call the default excepthook to print the exception to
+ # the console as is done by default.
+ sys.__excepthook__(exc_type, exc_value, exc_traceback)
- Attributes
- ----------
- verbose: bool
- If True, the pelicun echoes more information throughout the
- assessment. This can be useful for debugging purposes. The
- value is specified in the user's configuration dictionary,
- otherwise left as provided in the default configuration file
- (see settings/default_config.json in the pelicun source code).
- show_warnings: bool
- If True, future, deprecation, and performance warnings from python
- packages such as numpy and pandas are printed to the log file
- (and also to the standard output). Otherwise, they are
- suppressed. This setting does not affect warnings defined within
- pelicun that are specific to the damage and loss calculation.
- log_show_ms: bool
- If True, the timestamps in the log file are in microsecond
- precision. The value is specified in the user's configuration
- dictionary, otherwise left as provided in the default
- configuration file (see settings/default_config.json in the
- pelicun source code).
- log_file: str, optional
- If a value is provided, the log is written to that file. The
- value is specified in the user's configuration dictionary,
- otherwise left as provided in the default configuration file
- (see settings/default_config.json in the pelicun source code).
- print_log: bool
- If True, the log is also printed to standard output. The
- value is specified in the user's configuration dictionary,
- otherwise left as provided in the default configuration file
- (see settings/default_config.json in the pelicun source code).
- """
+# Update sys.excepthook to log exceptions in all loggers
+# https://docs.python.org/3/library/sys.html#sys.excepthook
+sys.excepthook = LoggerRegistry.log_exception
- # TODO: finalize docstring
- def __init__(self, verbose, show_warnings, log_show_ms, log_file, print_log):
+class Logger:
+ """Generate log files documenting execution events."""
+
+ __slots__ = [
+ 'emitted',
+ 'log_div',
+ 'log_file',
+ 'log_show_ms',
+ 'log_time_format',
+ 'print_log',
+ 'spaces',
+ 'verbose',
+ 'warning_file',
+ 'warning_stack',
+ ]
+
+ def __init__(
+ self,
+ log_file: str | None,
+ *,
+ verbose: bool,
+ log_show_ms: bool,
+ print_log: bool,
+ ) -> None:
"""
- Initializes a Logger object.
+ Initialize a Logger object.
Parameters
----------
- see attributes of the Logger class.
+ verbose: bool
+ If True, the pelicun echoes more information throughout the
+ assessment. This can be useful for debugging purposes. The
+ value is specified in the user's configuration dictionary,
+ otherwise left as provided in the default configuration file
+ (see settings/default_config.json in the pelicun source code).
+ log_show_ms: bool
+ If True, the timestamps in the log file are in microsecond
+ precision. The value is specified in the user's configuration
+ dictionary, otherwise left as provided in the default
+ configuration file (see settings/default_config.json in the
+ pelicun source code).
+ log_file: str, optional
+ If a value is provided, the log is written to that file. The
+ value is specified in the user's configuration dictionary,
+ otherwise left as provided in the default configuration file
+ (see settings/default_config.json in the pelicun source code).
+ print_log: bool
+ If True, the log is also printed to standard output. The
+ value is specified in the user's configuration dictionary,
+ otherwise left as provided in the default configuration file
+ (see settings/default_config.json in the pelicun source code).
"""
self.verbose = verbose
- self.show_warnings = show_warnings
- self.log_show_ms = log_show_ms
- self.log_file = log_file
- self.print_log = print_log
- self.reset_log_strings()
-
- @property
- def verbose(self):
- """
- verbose property
-
- Returns
- -------
- bool
- Verbose property value
- """
- return self._verbose
-
- @verbose.setter
- def verbose(self, value):
- """
- verbose property setter
- """
- self._verbose = bool(value)
-
- @property
- def show_warnings(self):
- """
- show_warnings property
-
- Returns
- -------
- bool
- show_warnings value
- """
- return self._show_warnings
-
- @show_warnings.setter
- def show_warnings(self, value):
- """
- show_warnings property setter
- """
- self._show_warnings = bool(value)
- # control warnings according to the desired setting
- control_warnings(show=self._show_warnings)
-
- @property
- def log_show_ms(self):
- """
- log_show_ms property
-
- Returns
- bool
- log_show_ms value
- """
- return self._log_show_ms
-
- @log_show_ms.setter
- def log_show_ms(self, value):
- """
- log_show_ms property setter
- """
- self._log_show_ms = bool(value)
-
- self.reset_log_strings()
-
- @property
- def log_pref(self):
- """
- log_pref property
-
- Returns
- -------
- str
- log_pref value
- """
- return self._log_pref
-
- @property
- def log_div(self):
- """
- log_div property
-
- Returns
- -------
- str
- log_div value
- """
- return self._log_div
-
- @property
- def log_time_format(self):
- """
- log_time_format property
- """
- return self._log_time_format
-
- @property
- def log_file(self):
- """
- log_file property
- """
- return self._log_file
-
- @log_file.setter
- def log_file(self, value):
- """
- log_file property setter
- """
-
- if value is None:
- self._log_file = None
+ self.log_show_ms = bool(log_show_ms)
+ if log_file is None:
+ self.log_file = None
+ self.warning_file = None
else:
- try:
- filepath = Path(value).resolve()
-
- self._log_file = str(filepath)
-
- with open(filepath, 'w', encoding='utf-8') as f:
- f.write('')
-
- except BaseException as err:
- print(
- f"WARNING: The filepath provided for the log file does "
- f"not point to a valid location: {value}. \nPelicun "
- f"cannot print the log to a file.\n"
- f"The error was: '{err}'"
- )
- raise
-
- @property
- def print_log(self):
- """
- print_log property
- """
- return self._print_log
-
- @print_log.setter
- def print_log(self, value):
- """
- print_log property setter
- """
- self._print_log = str2bool(value)
+ path = Path(log_file)
+ self.log_file = str(path.resolve())
+ name, extension = split_file_name(self.log_file)
+ self.warning_file = (
+ path.parent / (name + '_warnings' + extension)
+ ).resolve()
+ with Path(self.log_file).open('w', encoding='utf-8') as f:
+ f.write('')
+ with Path(self.warning_file).open('w', encoding='utf-8') as f:
+ f.write('')
+
+ self.print_log = str2bool(print_log)
+ self.warning_stack: list[str] = []
+ self.emitted: set[str] = set()
+ self.reset_log_strings()
+ control_warnings()
- def reset_log_strings(self):
- """
- Populates the string-related attributes of the logger
- """
+ # Register the logger to the LoggerRegistry in order to
+ # capture raised exceptions.
+ LoggerRegistry.register(self)
- if self._log_show_ms:
- self._log_time_format = '%H:%M:%S:%f'
+ def reset_log_strings(self) -> None:
+ """Populate the string-related attributes of the logger."""
+ if self.log_show_ms:
+ self.log_time_format = '%H:%M:%S:%f'
# the length of the time string in the log file
- self._log_pref = ' ' * 16
+ self.spaces = ' ' * 16
# to have a total length of 80 with the time added
- self._log_div = '-' * (80 - 17)
+ self.log_div = '-' * (80 - 17)
else:
- self._log_time_format = '%H:%M:%S'
- self._log_pref = ' ' * 9
- self._log_div = '-' * (80 - 10)
-
- def msg(self, msg='', prepend_timestamp=True, prepend_blank_space=True):
+ self.log_time_format = '%H:%M:%S'
+ self.spaces = ' ' * 9
+ self.log_div = '-' * (80 - 10)
+
+ def msg(
+ self,
+ msg: str = '',
+ *,
+ prepend_timestamp: bool = True,
+ prepend_blank_space: bool = True,
+ ) -> None:
"""
- Writes a message in the log file with the current time as prefix
+ Write a message in the log file with the current time as prefix.
The time is in ISO-8601 format, e.g. 2018-06-16T20:24:04Z
@@ -520,51 +408,91 @@ def msg(self, msg='', prepend_timestamp=True, prepend_blank_space=True):
Controls whether blank space is placed before the message.
"""
-
- # pylint: disable = consider-using-f-string
msg_lines = msg.split('\n')
for msg_i, msg_line in enumerate(msg_lines):
if prepend_timestamp and (msg_i == 0):
- formatted_msg = '{} {}'.format(
- datetime.now().strftime(self.log_time_format), msg_line
+ formatted_msg = (
+ f'{datetime.now().strftime(self.log_time_format)} {msg_line}' # noqa: DTZ005
)
- elif prepend_timestamp:
- formatted_msg = self.log_pref + msg_line
- elif prepend_blank_space:
- formatted_msg = self.log_pref + msg_line
+ elif prepend_timestamp or prepend_blank_space:
+ formatted_msg = self.spaces + msg_line
else:
formatted_msg = msg_line
if self.print_log:
- print(formatted_msg)
+ print(formatted_msg) # noqa: T201
if self.log_file is not None:
- with open(self.log_file, 'a', encoding='utf-8') as f:
+ with Path(self.log_file).open('a', encoding='utf-8') as f:
f.write('\n' + formatted_msg)
- def div(self, prepend_timestamp=False):
- """
- Adds a divider line in the log file
+ def add_warning(self, msg: str) -> None:
"""
+ Add a warning to the warning stack.
- if prepend_timestamp:
- msg = self.log_div
- else:
- msg = '-' * 80
- self.msg(msg, prepend_timestamp=prepend_timestamp)
+ Notes
+ -----
+ Warnings are only emitted when `emit_warnings` is called.
+
+ Parameters
+ ----------
+ msg: str
+ The warning message.
- def print_system_info(self):
"""
- Writes system information in the log.
+ msg_lines = msg.split('\n')
+ formatted_msg = '\n'
+ for msg_line in msg_lines:
+ formatted_msg += (
+ self.spaces + Fore.RED + msg_line + Style.RESET_ALL + '\n'
+ )
+ if formatted_msg not in self.warning_stack:
+ self.warning_stack.append(formatted_msg)
+
+ def emit_warnings(self) -> None:
+ """Issues all warnings and clears the warning stack."""
+ for message in self.warning_stack:
+ if message not in self.emitted:
+ warnings.warn(message, PelicunWarning, stacklevel=3)
+ if self.warning_file is not None:
+ with Path(self.warning_file).open('a', encoding='utf-8') as f:
+ f.write(
+ message.replace(Fore.RED, '')
+ .replace(Style.RESET_ALL, '')
+ .replace(self.spaces, '')
+ )
+
+ self.emitted = self.emitted.union(set(self.warning_stack))
+ self.warning_stack = []
+
+ def warning(self, msg: str) -> None:
+ """
+ Add an emit a warning immediately.
+
+ Parameters
+ ----------
+ msg: str
+ Warning message
+
"""
+ self.add_warning(msg)
+ self.emit_warnings()
+
+ def div(self, *, prepend_timestamp: bool = False) -> None:
+ """Add a divider line in the log file."""
+ msg = self.log_div if prepend_timestamp else '-' * 80
+ self.msg(msg, prepend_timestamp=prepend_timestamp)
+ def print_system_info(self) -> None:
+ """Write system information in the log."""
self.msg(
'System Information:', prepend_timestamp=False, prepend_blank_space=False
)
+ start = datetime.now().strftime('%Y-%m-%dT%H:%M:%S') # noqa: DTZ005
self.msg(
- f'local time zone: {datetime.utcnow().astimezone().tzinfo}\n'
- f'start time: {datetime.now().strftime("%Y-%m-%dT%H:%M:%S")}\n'
+ f'local time zone: {datetime.now(timezone.utc).astimezone().tzinfo}\n'
+ f'start time: {start}\n'
f'python: {sys.version}\n'
f'numpy: {np.__version__}\n'
f'pandas: {pd.__version__}\n',
@@ -573,54 +501,95 @@ def print_system_info(self):
# get the absolute path of the pelicun directory
-pelicun_path = Path(os.path.dirname(os.path.abspath(__file__)))
+pelicun_path = Path(__file__).resolve().parent
-def control_warnings(show):
+def split_file_name(file_path: str) -> tuple[str, str]:
"""
- Convenience function to turn warnings on/off
+ Separate a file name from the extension.
+
+ Separates a file name from the extension accounting for the case
+ where the file name itself contains periods.
Parameters
----------
- show: bool
- If True, warnings are set to the default level. If False,
- warnings are ignored.
+ file_path: str
+ Original file path.
+
+ Returns
+ -------
+ tuple
+ name: str
+ Name of the file.
+ extension: str
+ File extension.
"""
- if show:
- action = 'default'
- else:
- action = 'ignore'
+ path = Path(file_path)
+ name = path.stem
+ extension = path.suffix
+ return name, extension
+
+
+def control_warnings() -> None:
+ """
+ Turn warnings on/off.
+ See also: `pelicun/pytest.ini`. Devs: make sure to update that
+ file when addressing & eliminating warnings.
+
+ """
if not sys.warnoptions:
- warnings.filterwarnings(category=FutureWarning, action=action)
+ # Here we specify *specific* warnings to ignore.
+ # 'message' -- a regex that the warning message must match
- warnings.filterwarnings(category=DeprecationWarning, action=action)
+ # Note: we ignore known warnings emitted from our dependencies
+ # and plan to address them soon.
- warnings.filterwarnings(category=pd.errors.PerformanceWarning, action=action)
+ warnings.filterwarnings(
+ action='ignore', message='.*Use to_numeric without passing `errors`.*'
+ )
+ warnings.filterwarnings(
+ action='ignore', message=".*errors='ignore' is deprecated.*"
+ )
+ warnings.filterwarnings(
+ action='ignore',
+ message='.*The previous implementation of stack is deprecated.*',
+ )
+ warnings.filterwarnings(
+ action='ignore',
+ message='.*Setting an item of incompatible dtype is deprecated.*',
+ )
+ warnings.filterwarnings(
+ action='ignore',
+ message='.*DataFrame.groupby with axis=1 is deprecated.*',
+ )
-def load_default_options():
+def load_default_options() -> dict:
"""
- Load the default_config.json file to set options to default values
+ Load the default_config.json file to set options to default values.
Returns
-------
dict
Default options
- """
- with open(
- pelicun_path / "settings/default_config.json", 'r', encoding='utf-8'
+ """
+ with Path(pelicun_path / 'settings/default_config.json').open(
+ encoding='utf-8'
) as f:
default_config = json.load(f)
- default_options = default_config['Options']
- return default_options
+ return default_config['Options']
-def update_vals(update, primary, update_path, primary_path):
+def update_vals(
+ update_value: dict, primary: dict, update_path: str, primary_path: str
+) -> None:
"""
+ Transfer values between nested dictionaries.
+
Updates the values of the `update` nested dictionary with
those provided in the `primary` nested dictionary. If a key
already exists in update, and does not map to another
@@ -628,7 +597,7 @@ def update_vals(update, primary, update_path, primary_path):
Parameters
----------
- update: dict
+ update_value: dict
Dictionary -which can contain nested dictionaries- to be
updated based on the values of `primary`. New keys existing
in `primary` are added to `update`. Values of which keys
@@ -649,62 +618,56 @@ def update_vals(update, primary, update_path, primary_path):
If primary[key] is dict but update[key] is not.
ValueError
If update[key] is dict but primary[key] is not.
- """
-
- # pylint: disable=else-if-used
- # (`consider using elif`)
+ """
# we go over the keys of `primary`
- for key in primary:
+ for key in primary: # noqa: PLC0206
# if `primary[key]` is a dictionary:
if isinstance(primary[key], dict):
# if the same `key` does not exist in update,
# we associate it with an empty dictionary.
- if key not in update:
- update[key] = {}
+ if key not in update_value:
+ update_value[key] = {}
# if it exists already, it should map to
# a dictionary.
- elif not isinstance(update[key], dict):
- raise ValueError(
+ elif not isinstance(update_value[key], dict):
+ msg = (
f'{update_path}["{key}"] '
'should map to a dictionary. '
'The specified value is '
- f'{update_path}["{key}"] = {update[key]}, but '
+ f'{update_path}["{key}"] = {update_value[key]}, but '
f'the default value is '
f'{primary_path}["{key}"] = {primary[key]}. '
f'Please revise {update_path}["{key}"].'
)
- # With both being dictionaries, we recurse.
+ raise ValueError(msg)
+ # With both being dictionaries, we use recursion.
update_vals(
- update[key],
+ update_value[key],
primary[key],
f'{update_path}["{key}"]',
f'{primary_path}["{key}"]',
)
# if `primary[key]` is NOT a dictionary:
- else:
- # if `key` does not exist in `update`, we add it, with
- # its corresponding value.
- if key not in update:
- update[key] = primary[key]
- else:
- # key exists in update and should be left alone,
- # but we must check that it's not a dict here:
- if isinstance(update[key], dict):
- raise ValueError(
- f'{update_path}["{key}"] '
- 'should not map to a dictionary. '
- f'The specified value is '
- f'{update_path}["{key}"] = {update[key]}, but '
- f'the default value is '
- f'{primary_path}["{key}"] = {primary[key]}. '
- f'Please revise {update_path}["{key}"].'
- )
- # pylint: enable=else-if-used
-
-
-def merge_default_config(user_config):
+ elif key not in update_value:
+ update_value[key] = primary[key]
+ elif isinstance(update_value[key], dict):
+ msg = (
+ f'{update_path}["{key}"] '
+ 'should not map to a dictionary. '
+ f'The specified value is '
+ f'{update_path}["{key}"] = {update_value[key]}, but '
+ f'the default value is '
+ f'{primary_path}["{key}"] = {primary[key]}. '
+ f'Please revise {update_path}["{key}"].'
+ )
+ raise ValueError(msg)
+
+
+def merge_default_config(user_config: dict | None) -> dict:
"""
+ Merge default config with user's options.
+
Merge the user-specified config with the configuration defined in
the default_config.json file. If the user-specified config does
not include some option available in the default options, then the
@@ -719,8 +682,8 @@ def merge_default_config(user_config):
-------
dict
Merged configuration dictionary
- """
+ """
config = user_config # start from the user's config
default_config = load_default_options()
@@ -735,9 +698,28 @@ def merge_default_config(user_config):
return config
-def convert_to_SimpleIndex(data, axis=0, inplace=False):
+# https://stackoverflow.com/questions/52445559/
+# how-can-i-type-hint-a-function-where-the-
+# return-type-depends-on-the-input-type-o
+
+
+@overload
+def convert_to_SimpleIndex(
+ data: pd.DataFrame, axis: int = 0, *, inplace: bool = False
+) -> pd.DataFrame: ...
+
+
+@overload
+def convert_to_SimpleIndex(
+ data: pd.Series, axis: int = 0, *, inplace: bool = False
+) -> pd.Series: ...
+
+
+def convert_to_SimpleIndex( # noqa: N802
+ data: pd.DataFrame | pd.Series, axis: int = 0, *, inplace: bool = False
+) -> pd.DataFrame | pd.Series:
"""
- Converts the index of a DataFrame to a simple, one-level index
+ Convert the index of a DataFrame to a simple, one-level index.
The target index uses standard SimCenter convention to identify
different levels: a dash character ('-') is used to separate each
@@ -763,49 +745,61 @@ def convert_to_SimpleIndex(data, axis=0, inplace=False):
------
ValueError
When an invalid axis parameter is specified
- """
+ """
if axis in {0, 1}:
- if inplace:
- data_mod = data
- else:
- data_mod = data.copy()
+ data_mod = data if inplace else data.copy()
if axis == 0:
# only perform this if there are multiple levels
if data.index.nlevels > 1:
simple_name = '-'.join(
- [n if n is not None else "" for n in data.index.names]
+ [n if n is not None else '' for n in data.index.names]
)
simple_index = [
- '-'.join([str(id_i) for id_i in id]) for id in data.index
+ '-'.join([str(id_i) for id_i in idx]) for idx in data.index
]
- data_mod.index = simple_index
+ data_mod.index = pd.Index(simple_index, name=simple_name)
data_mod.index.name = simple_name
elif axis == 1:
# only perform this if there are multiple levels
if data.columns.nlevels > 1:
simple_name = '-'.join(
- [n if n is not None else "" for n in data.columns.names]
+ [n if n is not None else '' for n in data.columns.names]
)
simple_index = [
- '-'.join([str(id_i) for id_i in id]) for id in data.columns
+ '-'.join([str(id_i) for id_i in idx]) for idx in data.columns
]
- data_mod.columns = simple_index
+ data_mod.columns = pd.Index(simple_index, name=simple_name)
data_mod.columns.name = simple_name
else:
- raise ValueError(f"Invalid axis parameter: {axis}")
+ msg = f'Invalid axis parameter: {axis}'
+ raise ValueError(msg)
return data_mod
-def convert_to_MultiIndex(data, axis=0, inplace=False):
+@overload
+def convert_to_MultiIndex(
+ data: pd.DataFrame, axis: int = 0, *, inplace: bool = False
+) -> pd.DataFrame: ...
+
+
+@overload
+def convert_to_MultiIndex(
+ data: pd.Series, axis: int = 0, *, inplace: bool = False
+) -> pd.Series: ...
+
+
+def convert_to_MultiIndex( # noqa: N802
+ data: pd.DataFrame | pd.Series, axis: int = 0, *, inplace: bool = False
+) -> pd.DataFrame | pd.Series:
"""
- Converts the index of a DataFrame to a MultiIndex
+ Convert the index of a DataFrame to a MultiIndex.
We assume that the index uses standard SimCenter convention to
identify different levels: a dash character ('-') is expected to
@@ -831,8 +825,8 @@ def convert_to_MultiIndex(data, axis=0, inplace=False):
------
ValueError
If an invalid axis is specified.
- """
+ """
# check if the requested axis is already a MultiIndex
if ((axis == 0) and (isinstance(data.index, pd.MultiIndex))) or (
(axis == 1) and (isinstance(data.columns, pd.MultiIndex))
@@ -847,41 +841,38 @@ def convert_to_MultiIndex(data, axis=0, inplace=False):
index_labels = [str(label).split('-') for label in data.columns]
else:
- raise ValueError(f"Invalid axis parameter: {axis}")
+ msg = f'Invalid axis parameter: {axis}'
+ raise ValueError(msg)
max_lbl_len = np.max([len(labels) for labels in index_labels])
for l_i, labels in enumerate(index_labels):
if len(labels) != max_lbl_len:
- labels += [
- '',
- ] * (max_lbl_len - len(labels))
+ labels += [''] * (max_lbl_len - len(labels)) # noqa: PLW2901
index_labels[l_i] = labels
- index_labels = np.array(index_labels)
+ index_labels_np = np.array(index_labels)
- if index_labels.shape[1] > 1:
- if inplace:
- data_mod = data
- else:
- data_mod = data.copy()
+ if index_labels_np.shape[1] > 1:
+ data_mod = data if inplace else data.copy()
if axis == 0:
- data_mod.index = pd.MultiIndex.from_arrays(index_labels.T)
+ data_mod.index = pd.MultiIndex.from_arrays(index_labels_np.T)
else:
- data_mod.columns = pd.MultiIndex.from_arrays(index_labels.T)
+ data_mod.columns = pd.MultiIndex.from_arrays(index_labels_np.T)
return data_mod
return data
-def convert_dtypes(dataframe):
+def convert_dtypes(dataframe: pd.DataFrame) -> pd.DataFrame:
"""
- Convert columns to a numeric datatype whenever possible. The
- function replaces None with NA otherwise columns containing None
- would continue to have the `object` type
+ Convert columns to a numeric datatype whenever possible.
+
+ The function replaces None with NA otherwise columns containing
+ None would continue to have the `object` type.
Parameters
----------
@@ -894,7 +885,11 @@ def convert_dtypes(dataframe):
The modified DataFrame.
"""
- dataframe.fillna(value=np.nan, inplace=True)
+ with (
+ pd.option_context('future.no_silent_downcasting', True), # noqa: FBT003
+ pd.option_context('mode.copy_on_write', True), # noqa: FBT003
+ ):
+ dataframe = dataframe.fillna(value=np.nan).infer_objects()
# note: `axis=0` applies the function to the columns
# note: ignoring errors is a bad idea and should never be done. In
# this case, however, that's not what we do, despite the name of
@@ -903,28 +898,115 @@ def convert_dtypes(dataframe):
# `errors='ignore'` does.
# See:
# https://pandas.pydata.org/docs/reference/api/pandas.to_numeric.html
- return dataframe.apply(lambda x: pd.to_numeric(x, errors='ignore'), axis=0)
+ return dataframe.apply(
+ lambda x: pd.to_numeric(x, errors='ignore'), # type:ignore
+ axis=0,
+ )
-def show_matrix(data, use_describe=False):
+def show_matrix(
+ data: np.ndarray | pd.DataFrame, *, use_describe: bool = False
+) -> None:
"""
Print a matrix in a nice way using a DataFrame.
+
Parameters
----------
- data : array-like
- The matrix data to display. Can be any array-like structure that pandas can convert to a DataFrame.
- use_describe : bool, default: False
- If True, provides a descriptive statistical summary of the matrix including specified percentiles.
+ data: array-like
+ The matrix data to display. Can be any array-like structure
+ that pandas can convert to a DataFrame.
+ use_describe: bool, default: False
+ If True, provides a descriptive statistical summary of the
+ matrix including specified percentiles.
If False, simply prints the matrix as is.
+
"""
if use_describe:
- pp.pprint(pd.DataFrame(data).describe(percentiles=[0.01, 0.1, 0.5, 0.9, 0.99]))
+ pp.pprint(
+ pd.DataFrame(data).describe(percentiles=[0.01, 0.1, 0.5, 0.9, 0.99])
+ )
else:
pp.pprint(pd.DataFrame(data))
-def _warning(message, category, filename, lineno, file=None, line=None):
+def multiply_factor_multiple_levels(
+ df: pd.DataFrame,
+ conditions: dict,
+ factor: float,
+ axis: int = 0,
+ *,
+ raise_missing: bool = True,
+) -> None:
"""
+ Multiply a value to selected rows, in place.
+
+ Multiplies a value to selected rows of a DataFrame that is indexed
+ with a hierarchical index (pd.MultiIndex). The change is done in
+ place.
+
+ Parameters
+ ----------
+ df: pd.DataFrame
+ The DataFrame to be modified.
+ conditions: dict
+ A dictionary mapping level names with a single value. Only the
+ rows where the index levels have the provided values will be
+ affected. The dictionary can be empty, in which case all rows
+ will be affected, or contain only some levels and values, in
+ which case only the matching rows will be affected.
+ factor: float
+ Scaling factor to use.
+ axis: int
+ With 0 the condition is checked against the DataFrame's index,
+ otherwise with 1 it is checked against the DataFrame's
+ columns.
+ raise_missing: bool
+ Raise an error if no rows are matching the given conditions.
+
+ Raises
+ ------
+ ValueError
+ If the provided `axis` values is not either 0 or 1.
+ ValueError
+ If there are no rows matching the conditions and raise_missing
+ is True.
+
+ """
+ if axis == 0:
+ idx_to_use = df.index
+ elif axis == 1:
+ idx_to_use = df.columns
+ else:
+ msg = f'Invalid axis: `{axis}`'
+ raise ValueError(msg)
+
+ mask = pd.Series(data=True, index=idx_to_use)
+
+ # Apply each condition to update the mask
+ for level, value in conditions.items():
+ mask &= idx_to_use.get_level_values(level) == value
+
+ if np.all(mask == False) and raise_missing: # noqa: E712
+ msg = f'No rows found matching the conditions: `{conditions}`'
+ raise ValueError(msg)
+
+ if axis == 0:
+ df.iloc[mask.to_numpy()] *= factor
+ else:
+ df.iloc[:, mask.to_numpy()] *= factor
+
+
+def _warning(
+ message: str,
+ category: type[Warning],
+ filename: str,
+ lineno: int,
+ file: Any = None, # noqa: ARG001, ANN401
+ line: Any = None, # noqa: ARG001, ANN401
+) -> None:
+ """
+ Display warnings in a custom format.
+
Custom warning function to format and print warnings more
attractively. This function modifies how warning messages are
displayed, emphasizing the file path and line number from where
@@ -932,49 +1014,61 @@ def _warning(message, category, filename, lineno, file=None, line=None):
Parameters
----------
- message : str
+ message: str
The warning message to be displayed.
- category : Warning
+ category: Warning
The category of the warning (unused, but required for
compatibility with standard warning signature).
- filename : str
+ filename: str
The path of the file from which the warning is issued. The
function simplifies the path for display.
- lineno : int
+ lineno: int
The line number in the file at which the warning is issued.
- file : file-like object, optional
+ file: file-like object, optional
The target file object to write the warning to (unused, but
required for compatibility with standard warning signature).
- line : str, optional
+ line: str, optional
Line of code causing the warning (unused, but required for
compatibility with standard warning signature).
+
"""
# pylint:disable = unused-argument
- if '\\' in filename:
- file_path = filename.split('\\')
- elif '/' in filename:
- file_path = filename.split('/')
- else:
- file_path = None
+ if category != PelicunWarning:
+ if '\\' in filename:
+ file_path = filename.split('\\')
+ elif '/' in filename:
+ file_path = filename.split('/')
+ else:
+ file_path = None
- if file_path is not None:
- python_file = '/'.join(file_path[-3:])
+ python_file = '/'.join(file_path[-3:]) if file_path is not None else filename
+ print(f'WARNING in {python_file} at line {lineno}\n{message}\n') # noqa: T201
else:
- python_file = filename
-
- print(f'WARNING in {python_file} at line {lineno}\n{message}\n')
+ print(message) # noqa: T201
-warnings.showwarning = _warning
+warnings.showwarning = _warning # type: ignore
def describe(
- df, percentiles=(0.001, 0.023, 0.10, 0.159, 0.5, 0.841, 0.90, 0.977, 0.999)
-):
+ data: pd.DataFrame | pd.Series | np.ndarray,
+ percentiles: tuple[float, ...] = (
+ 0.001,
+ 0.023,
+ 0.10,
+ 0.159,
+ 0.5,
+ 0.841,
+ 0.90,
+ 0.977,
+ 0.999,
+ ),
+) -> pd.DataFrame:
"""
+ Extend descriptive statistics.
+
Provides extended descriptive statistics for given data, including
percentiles and log standard deviation for applicable columns.
-
This function accepts both pandas Series and DataFrame objects
directly, or any array-like structure which can be converted to
them. It calculates common descriptive statistics and optionally
@@ -983,10 +1077,10 @@ def describe(
Parameters
----------
- df : pd.Series, pd.DataFrame, or array-like
+ data: pd.Series, pd.DataFrame, or array-like
The data to describe. If array-like, it is converted to a
DataFrame or Series before analysis.
- percentiles : tuple of float, optional
+ percentiles: tuple of float, optional
Specific percentiles to include in the output. Default
includes an extensive range tailored to provide a detailed
summary.
@@ -996,37 +1090,37 @@ def describe(
pd.DataFrame
A DataFrame containing the descriptive statistics of the input
data, transposed so that each descriptive statistic is a row.
+
"""
- if not isinstance(df, (pd.Series, pd.DataFrame)):
- vals = df
- cols = np.arange(vals.shape[1]) if vals.ndim > 1 else 0
+ if isinstance(data, np.ndarray):
+ vals = data
if vals.ndim == 1:
- df = pd.Series(vals, name=cols)
+ data = pd.Series(vals, name=0)
else:
- df = pd.DataFrame(vals, columns=cols)
+ cols = np.arange(vals.shape[1])
+ data = pd.DataFrame(vals, columns=cols)
- # cast Series into a DataFrame
- if isinstance(df, pd.Series):
- df = pd.DataFrame(df)
+ # convert Series to a DataFrame
+ if isinstance(data, pd.Series):
+ data = pd.DataFrame(data)
- desc = df.describe(percentiles).T
+ desc = pd.DataFrame(data.describe(list(percentiles)).T)
# add log standard deviation to the stats
- desc.insert(3, "log_std", np.nan)
+ desc.insert(3, 'log_std', np.nan)
desc = desc.T
for col in desc.columns:
- if np.min(df[col]) > 0.0:
- desc.loc['log_std', col] = np.std(np.log(df[col]), ddof=1)
+ if np.min(data[col]) > 0.0:
+ desc.loc['log_std', col] = np.std(np.log(data[col]), ddof=1)
return desc
-def str2bool(v):
+def str2bool(v: str | bool) -> bool: # noqa: FBT001
"""
- Converts a string representation of truth to boolean True or
- False.
+ Convert a string representation of truth to boolean True or False.
This function is designed to convert string inputs that represent
boolean values into actual Python boolean types. It handles
@@ -1035,7 +1129,7 @@ def str2bool(v):
Parameters
----------
- v : str or bool
+ v: str or bool
The value to convert into a boolean. This can be a boolean
itself (in which case it is simply returned) or a string that
is expected to represent a boolean value.
@@ -1051,8 +1145,9 @@ def str2bool(v):
If `v` is a string that does not correspond to a boolean
value, an error is raised indicating that a boolean value was
expected.
+
"""
- # courtesy of Maxim @ stackoverflow
+ # courtesy of Maxim @ Stackoverflow
if isinstance(v, bool):
return v
@@ -1060,13 +1155,13 @@ def str2bool(v):
return True
if v.lower() in {'no', 'false', 'False', 'f', 'n', '0'}:
return False
- raise argparse.ArgumentTypeError('Boolean value expected.')
+ msg = 'Boolean value expected.'
+ raise argparse.ArgumentTypeError(msg)
-def float_or_None(string):
+def float_or_None(string: str) -> float | None: # noqa: N802
"""
- This is a convenience function for converting strings to float or
- None
+ Convert strings to float or None.
Parameters
----------
@@ -1078,18 +1173,17 @@ def float_or_None(string):
float or None
A float, if the given string can be converted to a
float. Otherwise, it returns None
+
"""
try:
- res = float(string)
- return res
+ return float(string)
except ValueError:
return None
-def int_or_None(string):
+def int_or_None(string: str) -> int | None: # noqa: N802
"""
- This is a convenience function for converting strings to int or
- None
+ Convert strings to int or None.
Parameters
----------
@@ -1101,68 +1195,82 @@ def int_or_None(string):
int or None
An int, if the given string can be converted to an
int. Otherwise, it returns None
+
"""
try:
- res = int(string)
- return res
+ return int(string)
except ValueError:
return None
-def process_loc(string, stories):
+def check_if_str_is_na(string: Any) -> bool: # noqa: ANN401
"""
- Parses the 'location' parameter from input to determine the
- specific locations to be processed. This function interprets
- various string formats to output a list of integers representing
- locations.
+ Check if the provided string can be interpreted as N/A.
Parameters
----------
- string : str
- A string that describes the location or range of locations of
- the asset. It can be a single number, a range (e.g., '3-7'),
- 'all', 'top', 'roof', or 'last'.
- stories : int
- The total number of locations in the asset, used to interpret
- relative terms like 'top' or 'roof', or to generate a range
- for 'all'.
+ string: object
+ The string to evaluate
Returns
-------
- list of int or None
- A list of integers representing each floor specified by the
- string. Returns None if the string does not conform to
- expected formats.
+ bool
+ The evaluation result. Yes, if the string is considered N/A.
+ """
+ na_vals = {
+ '',
+ 'N/A',
+ '-1.#QNAN',
+ 'null',
+ 'None',
+ '',
+ 'nan',
+ '-NaN',
+ '1.#IND',
+ 'NaN',
+ '#NA',
+ '1.#QNAN',
+ 'NULL',
+ '-nan',
+ '#N/A',
+ '#N/A N/A',
+ 'n/a',
+ '-1.#IND',
+ 'NA',
+ }
+ # obtained from Pandas' internal STR_NA_VALUES variable.
+
+ return isinstance(string, str) and string in na_vals
+
+
+def with_parsed_str_na_values(df: pd.DataFrame) -> pd.DataFrame:
+ """
+ Identify string values interpretable as N/A.
- Raises
- ------
- ValueError
- Raises an exception if the string contains a range that is not
- interpretable (e.g., non-integer values or logical
- inconsistencies in the range).
+ Given a dataframe, this function identifies values that have
+ string type and can be interpreted as N/A, and replaces them with
+ actual NA's.
+
+ Parameters
+ ----------
+ df: pd.DataFrame
+ Dataframe to process
+
+ Returns
+ -------
+ pd.DataFrame
+ The dataframe with proper N/A values.
"""
- try:
- res = int(string)
- return [
- res,
- ]
- except ValueError as exc:
- if "-" in string:
- s_low, s_high = string.split('-')
- s_low = process_loc(s_low, stories)
- s_high = process_loc(s_high, stories)
- return list(range(s_low[0], s_high[0] + 1))
- if string == "all":
- return list(range(1, stories + 1))
- if string in {"top", "roof", "last"}:
- return [
- stories,
- ]
- raise ValueError(f'Invalid string: {string}') from exc
-
-
-def dedupe_index(dataframe, dtype=str):
+ # Replace string NA values with actual NaNs
+ return df.apply(
+ lambda col: col.map(lambda x: np.nan if check_if_str_is_na(x) else x)
+ )
+
+
+def dedupe_index(dataframe: pd.DataFrame, dtype: type = str) -> pd.DataFrame:
"""
+ Add a `uid` level to the index.
+
Modifies the index of a DataFrame to ensure all index elements are
unique by adding an extra level. Assumes that the DataFrame's
original index is a MultiIndex with specified names. A unique
@@ -1172,23 +1280,24 @@ def dedupe_index(dataframe, dtype=str):
Parameters
----------
- dataframe : pd.DataFrame
+ dataframe: pd.DataFrame
The DataFrame whose index is to be modified. It must have a
MultiIndex.
- dtype : type, optional
+ dtype: type, optional
The data type for the new index level 'uid'. Defaults to str.
- Notes
- -----
- This function changes the DataFrame in place, hence it does not
- return the DataFrame but modifies the original one provided.
+ Returns
+ -------
+ dataframe: pd.DataFrame
+ The original dataframe with an additional `uid` level at the
+ index.
"""
inames = dataframe.index.names
- dataframe.reset_index(inplace=True)
+ dataframe = dataframe.reset_index()
dataframe['uid'] = (dataframe.groupby([*inames]).cumcount()).astype(dtype)
- dataframe.set_index([*inames] + ['uid'], inplace=True)
- dataframe.sort_index(inplace=True)
+ dataframe = dataframe.set_index([*inames, 'uid'])
+ return dataframe.sort_index()
# Input specs
@@ -1231,18 +1340,19 @@ def dedupe_index(dataframe, dtype=str):
}
-def dict_raise_on_duplicates(ordered_pairs):
+def dict_raise_on_duplicates(ordered_pairs: list[tuple]) -> dict:
"""
+ Construct a dictionary from a list of key-value pairs.
+
Constructs a dictionary from a list of key-value pairs, raising an
exception if duplicate keys are found.
-
This function ensures that no two pairs have the same key. It is
particularly useful when parsing JSON-like data where unique keys
are expected but not enforced by standard parsing methods.
Parameters
----------
- ordered_pairs : list of tuples
+ ordered_pairs: list of tuples
A list of tuples, each containing a key and a value. Keys are
expected to be unique across the list.
@@ -1269,17 +1379,20 @@ def dict_raise_on_duplicates(ordered_pairs):
-----
This implementation is useful for contexts in which data integrity
is crucial and key uniqueness must be ensured.
- """
+ """
d = {}
for k, v in ordered_pairs:
if k in d:
- raise ValueError(f"duplicate key: {k}")
+ msg = f'duplicate key: {k}'
+ raise ValueError(msg)
d[k] = v
return d
-def parse_units(custom_file=None, preserve_categories=False):
+def parse_units( # noqa: C901
+ custom_file: str | None = None, *, preserve_categories: bool = False
+) -> dict:
"""
Parse the unit conversion factor JSON file and return a dictionary.
@@ -1288,6 +1401,12 @@ def parse_units(custom_file=None, preserve_categories=False):
custom_file: str, optional
If a custom file is provided, only the units specified in the
custom file are used.
+ preserve_categories: bool, optional
+ If True, maintains the original data types of category
+ values from the JSON file. If False, converts all values
+ to floats and flattens the dictionary structure, ensuring
+ that each unit name is globally unique across categories.
+
Returns
-------
@@ -1299,20 +1418,12 @@ def parse_units(custom_file=None, preserve_categories=False):
`preserve_categories` is False, the dictionary is flattened
to have globally unique unit names.
- Raises
- ------
- KeyError
- If a key is defined twice.
- ValueError
- If a unit conversion factor is not a float.
- FileNotFoundError
- If a file does not exist.
- Exception
- If a file does not have the JSON format.
"""
- def get_contents(file_path, preserve_categories=False):
+ def get_contents(file_path: Path, *, preserve_categories: bool = False) -> dict: # noqa: C901
"""
+ Map unit names to conversion factors.
+
Parses a unit conversion factors JSON file and returns a
dictionary mapping unit names to conversion factors.
@@ -1325,10 +1436,10 @@ def get_contents(file_path, preserve_categories=False):
Parameters
----------
- file_path : str
+ file_path: str
The file path to a JSON file containing unit conversion
factors. If not provided, a default file is used.
- preserve_categories : bool, optional
+ preserve_categories: bool, optional
If True, maintains the original data types of category
values from the JSON file. If False, converts all values
to floats and flattens the dictionary structure, ensuring
@@ -1347,10 +1458,9 @@ def get_contents(file_path, preserve_categories=False):
FileNotFoundError
If the specified file does not exist.
ValueError
- If a unit name is duplicated, a conversion factor is not a
- float, or other JSON structure issues are present.
- json.decoder.JSONDecodeError
- If the file is not a valid JSON file.
+ If a unit name is duplicated or other JSON structure issues are present.
+ TypeError
+ If a conversion factor is not a float.
TypeError
If any value that needs to be converted to float cannot be
converted.
@@ -1362,30 +1472,35 @@ def get_contents(file_path, preserve_categories=False):
>>> parse_units('custom_units.json', preserve_categories=True)
{ 'Length': {'m': 1.0, 'cm': 0.01, 'mm': 0.001} }
+
"""
try:
- with open(file_path, 'r', encoding='utf-8') as f:
+ with Path(file_path).open(encoding='utf-8') as f:
dictionary = json.load(f, object_pairs_hook=dict_raise_on_duplicates)
except FileNotFoundError as exc:
- raise FileNotFoundError(f'{file_path} was not found.') from exc
+ msg = f'{file_path} was not found.'
+ raise FileNotFoundError(msg) from exc
except json.decoder.JSONDecodeError as exc:
- raise ValueError(f'{file_path} is not a valid JSON file.') from exc
+ msg = f'{file_path} is not a valid JSON file.'
+ raise ValueError(msg) from exc
for category_dict in list(dictionary.values()):
# ensure all first-level keys point to a dictionary
if not isinstance(category_dict, dict):
- raise ValueError(
+ msg = (
f'{file_path} contains first-level keys '
- 'that don\'t point to a dictionary'
+ "that don't point to a dictionary"
)
+ raise TypeError(msg)
# convert values to float
- for key, val in category_dict.items():
- try:
+ try:
+ for key, val in category_dict.items():
category_dict[key] = float(val)
- except (ValueError, TypeError) as exc:
- raise type(exc)(
- f'Unit {key} has a value of {val} '
- 'which cannot be interpreted as a float'
- ) from exc
+ except (ValueError, TypeError) as exc:
+ msg = (
+ f'Unit {key} has a value of {val} '
+ 'which cannot be interpreted as a float'
+ )
+ raise type(exc)(msg) from exc
if preserve_categories:
return dictionary
@@ -1394,27 +1509,31 @@ def get_contents(file_path, preserve_categories=False):
for category in dictionary:
for unit_name, factor in dictionary[category].items():
if unit_name in flattened:
- raise ValueError(f'{unit_name} defined twice in {file_path}.')
+ msg = f'{unit_name} defined twice in {file_path}.'
+ raise ValueError(msg)
flattened[unit_name] = factor
return flattened
if custom_file:
- return get_contents(custom_file, preserve_categories)
+ return get_contents(
+ Path(custom_file), preserve_categories=preserve_categories
+ )
return get_contents(
- pelicun_path / "settings/default_units.json", preserve_categories
+ pelicun_path / 'settings/default_units.json',
+ preserve_categories=preserve_categories,
)
-def convert_units(
+def convert_units( # noqa: C901
values: float | list[float] | np.ndarray,
unit: str,
to_unit: str,
category: str | None = None,
) -> float | list[float] | np.ndarray:
"""
- Converts numeric values between different units.
+ Convert numeric values between different units.
Supports conversion within a specified category of units and
automatically infers the category if not explicitly provided. It
@@ -1422,13 +1541,13 @@ def convert_units(
Parameters
----------
- values (float | list[float] | np.ndarray):
+ values: (float | list[float] | np.ndarray)
The numeric value(s) to convert.
- unit (str):
+ unit: (str)
The current unit of the values.
- to_unit (str):
+ to_unit: (str)
The target unit to convert the values into.
- category (Optional[str]):
+ category: (Optional[str])
The category of the units (e.g., 'length', 'pressure'). If not
provided, the category will be inferred based on the provided
units.
@@ -1449,13 +1568,13 @@ def convert_units(
and `to_unit` are not in the same category.
"""
-
if isinstance(values, (float, list)):
vals = np.atleast_1d(values)
elif isinstance(values, np.ndarray):
vals = values
else:
- raise TypeError('Invalid input type for `values`')
+ msg = 'Invalid input type for `values`'
+ raise TypeError(msg)
# load default units
all_units = parse_units(preserve_categories=True)
@@ -1463,11 +1582,13 @@ def convert_units(
# if a category is given use it, otherwise try to determine it
if category:
if category not in all_units:
- raise ValueError(f'Unknown category: `{category}`')
+ msg = f'Unknown category: `{category}`'
+ raise ValueError(msg)
units = all_units[category]
for unt in unit, to_unit:
if unt not in units:
- raise ValueError(f'Unknown unit: `{unt}`')
+ msg = f'Unknown unit: `{unt}`'
+ raise ValueError(msg)
else:
unit_category: str | None = None
for key in all_units:
@@ -1476,18 +1597,20 @@ def convert_units(
unit_category = key
break
if not unit_category:
- raise ValueError(f'Unknown unit `{unit}`')
+ msg = f'Unknown unit `{unit}`'
+ raise ValueError(msg)
units = all_units[unit_category]
if to_unit not in units:
- raise ValueError(
+ msg = (
f'`{unit}` is a `{unit_category}` unit, but `{to_unit}` '
f'is not specified in that category.'
)
+ raise ValueError(msg)
# convert units
from_factor = units[unit]
to_factor = units[to_unit]
- new_values = vals * from_factor / to_factor
+ new_values = vals * float(from_factor) / float(to_factor)
# return the results in the same type as that of the provided
# values
@@ -1496,3 +1619,274 @@ def convert_units(
if isinstance(values, list):
return new_values.tolist()
return new_values
+
+
+def stringterpolation(
+ arguments: str,
+) -> Callable[[np.ndarray], np.ndarray]:
+ """
+ Linear interpolation from strings.
+
+ Turns a string of specially formatted arguments into a multilinear
+ interpolating function.
+
+ Parameters
+ ----------
+ arguments: str
+ String of arguments containing Y values and X values,
+ separated by a pipe symbol (`|`). Individual values are
+ separated by commas (`,`). Example:
+ arguments = 'y1,y2,y3|x1,x2,x3'
+
+ Returns
+ -------
+ Callable
+ A callable interpolating function
+
+ """
+ split = arguments.split('|')
+ x_vals = split[1].split(',')
+ y_vals = split[0].split(',')
+ x = np.array(x_vals, dtype=float)
+ y = np.array(y_vals, dtype=float)
+
+ return interp1d(x=x, y=y, kind='linear')
+
+
+def invert_mapping(original_dict: dict) -> dict:
+ """
+ Inverts a dictionary mapping from key to list of values.
+
+ Parameters
+ ----------
+ original_dict: dict
+ Dictionary with values that are lists of hashable items.
+
+ Returns
+ -------
+ dict
+ New dictionary where each item in the original value lists
+ becomes a key and the original key becomes the corresponding
+ value.
+
+ Raises
+ ------
+ ValueError
+ If any value in the original dictionary's value lists appears
+ more than once.
+
+ """
+ inverted_dict = {}
+ for key, value_list in original_dict.items():
+ for value in value_list:
+ if value in inverted_dict:
+ msg = 'Cannot invert mapping with duplicate values.'
+ raise ValueError(msg)
+ inverted_dict[value] = key
+ return inverted_dict
+
+
+def get(
+ d: dict | None,
+ path: str,
+ default: Any | None = None, # noqa: ANN401
+) -> Any: # noqa: ANN401
+ """
+ Path-like dictionary value retrieval.
+
+ Retrieves a value from a nested dictionary using a path with '/'
+ as the separator.
+
+ Parameters
+ ----------
+ d: dict
+ The dictionary to search.
+ path: str
+ The path to the desired value, with keys separated by '/'.
+ default: Any, optional
+ The value to return if the path is not found. Defaults to
+ None.
+
+ Returns
+ -------
+ Any
+ The value found at the specified path, or the default value if
+ the path is not found.
+
+ Examples
+ --------
+ >>> config = {
+ ... "DL": {
+ ... "Outputs": {
+ ... "Format": {
+ ... "JSON": "desired_value"
+ ... }
+ ... }
+ ... }
+ ... }
+ >>> get(config, '/DL/Outputs/Format/JSON', default='default_value')
+ 'desired_value'
+ >>> get(config, '/DL/Outputs/Format/XML', default='default_value')
+ 'default_value'
+
+ """
+ if d is None:
+ return default
+ keys = path.strip('/').split('/')
+ current_dict = d
+ try:
+ for key in keys:
+ current_dict = current_dict[key]
+ return current_dict # noqa: TRY300
+ except (KeyError, TypeError):
+ return default
+
+
+def update(
+ d: dict[str, Any],
+ path: str,
+ value: Any, # noqa: ANN401
+ *,
+ only_if_empty_or_none: bool = False,
+) -> None:
+ """
+ Set a value in a nested dictionary using a path with '/' as the separator.
+
+ Parameters
+ ----------
+ d: dict
+ The dictionary to update.
+ path: str
+ The path to the desired value, with keys separated by '/'.
+ value: Any
+ The value to set at the specified path.
+ only_if_empty_or_none: bool, optional
+ If True, only update the value if it is None or an empty
+ dictionary. Defaults to False.
+
+ Examples
+ --------
+ >>> d = {}
+ >>> update(d, 'x/y/z', 1)
+ >>> d
+ {'x': {'y': {'z': 1}}}
+
+ >>> update(d, 'x/y/z', 2, only_if_empty_or_none=True)
+ >>> d
+ {'x': {'y': {'z': 1}}} # value remains 1 since it is not empty or None
+
+ >>> update(d, 'x/y/z', 2)
+ >>> d
+ {'x': {'y': {'z': 2}}} # value is updated to 2
+
+ """
+ keys = path.strip('/').split('/')
+ current_dict = d
+ for key in keys[:-1]:
+ if key not in current_dict or not isinstance(current_dict[key], dict):
+ current_dict[key] = {}
+ current_dict = current_dict[key]
+ if only_if_empty_or_none:
+ if is_unspecified(current_dict, keys[-1]):
+ current_dict[keys[-1]] = value
+ else:
+ current_dict[keys[-1]] = value
+
+
+def is_unspecified(d: dict[str, Any], path: str) -> bool:
+ """
+ Check if something is specified.
+
+ Checks if a value in a nested dictionary is either non-existent,
+ None, NaN, or an empty dictionary or list.
+
+ Parameters
+ ----------
+ d: dict
+ The dictionary to search.
+ path: str
+ The path to the desired value, with keys separated by '/'.
+
+ Returns
+ -------
+ bool
+ True if the value is non-existent, None, or an empty
+ dictionary or list. False otherwise.
+
+ Examples
+ --------
+ >>> config = {
+ ... "DL": {
+ ... "Outputs": {
+ ... "Format": {
+ ... "JSON": "desired_value",
+ ... "EmptyDict": {}
+ ... }
+ ... }
+ ... }
+ ... }
+ >>> is_unspecified(config, '/DL/Outputs/Format/JSON')
+ False
+ >>> is_unspecified(config, '/DL/Outputs/Format/XML')
+ True
+ >>> is_unspecified(config, '/DL/Outputs/Format/EmptyDict')
+ True
+
+ """
+ value = get(d, path, default=None)
+ if value is None:
+ return True
+ if pd.isna(value):
+ return True
+ if value == {}:
+ return True
+ return value == []
+
+
+def is_specified(d: dict[str, Any], path: str) -> bool:
+ """
+ Opposite of `is_unspecified()`.
+
+ Parameters
+ ----------
+ d: dict
+ The dictionary to search.
+ path: str
+ The path to the desired value, with keys separated by '/'.
+
+ Returns
+ -------
+ bool
+ True if the value is specified, False otherwise.
+
+ """
+ return not is_unspecified(d, path)
+
+
+def ensure_value(value: T | None) -> T:
+ """
+ Ensure a variable is not None.
+
+ This function checks that the provided variable is not None. It is
+ used to assist with type hinting by avoiding repetitive `assert
+ value is not None` statements throughout the code.
+
+ Parameters
+ ----------
+ value : Optional[T]
+ The variable to check, which can be of any type or None.
+
+ Returns
+ -------
+ T
+ The same variable, guaranteed to be non-None.
+
+ Raises
+ ------
+ TypeError
+ If the provided variable is None.
+
+ """
+ if value is None:
+ raise TypeError
+ return value
diff --git a/pelicun/file_io.py b/pelicun/file_io.py
index 3278d3995..67012a321 100644
--- a/pelicun/file_io.py
+++ b/pelicun/file_io.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
@@ -40,25 +39,16 @@
# Kuanshi Zhong
# John Vouvakis Manousakis
-"""
-This module has classes and methods that handle file input and output.
+"""Classes and methods that handle file input and output."""
-.. rubric:: Contents
-
-.. autosummary::
-
- get_required_resources
- save_to_csv
- load_data
- load_from_file
-
-"""
+from __future__ import annotations
from pathlib import Path
+
import numpy as np
import pandas as pd
-from pelicun import base
+from pelicun import base
convert_dv_name = {
'DV_rec_cost': 'Reconstruction Cost',
@@ -91,53 +81,45 @@
}
-def save_to_csv(
- data,
- filepath,
- units=None,
- unit_conversion_factors=None,
- orientation=0,
- use_simpleindex=True,
- log=None,
-):
+def save_to_csv( # noqa: C901
+ data: pd.DataFrame | None,
+ filepath: Path | None,
+ units: pd.Series | None = None,
+ unit_conversion_factors: dict | None = None,
+ orientation: int = 0,
+ *,
+ use_simpleindex: bool = True,
+ log: base.Logger | None = None,
+) -> pd.DataFrame | None:
"""
- Saves data to a CSV file following the standard SimCenter schema.
+ Save data to a CSV file following the standard SimCenter schema.
The produced CSV files have a single header line and an index
column. The second line may start with 'Units' in the index or the
first column may be 'Units' to provide the units for the data in
the file.
- The following data types in pelicun can be saved with this
- function:
-
- Demand Data: Each column in a table corresponds to a demand type;
- each row corresponds to a simulation/sample. The header identifies
- each demand type. The user guide section of the documentation
- provides more information about the header format. Target need to
- be specified in the second row of the DataFrame.
-
Parameters
----------
- data : DataFrame
+ data: DataFrame
The data to save.
- filepath : str
+ filepath: Path
The location of the destination file. If None, the data is not
saved, but returned in the end.
- units : Series, optional
+ units: Series, optional
Provides a Series with variables and corresponding units.
- unit_conversion_factors : dict, optional
+ unit_conversion_factors: dict, optional
Dictionary containing key-value pairs of unit names and their
corresponding factors. Conversion factors are defined as the
number of times a base unit fits in the alternative unit.
- orientation : int, {0, 1}, default 0
+ orientation: int, {0, 1}, default 0
If 0, variables are organized along columns; otherwise, they
are along the rows. This is important when converting values
to follow the prescribed units.
- use_simpleindex : bool, default True
+ use_simpleindex: bool, default True
If True, MultiIndex columns and indexes are converted to
SimpleIndex before saving.
- log : Logger, optional
+ log: Logger, optional
Logger object to be used. If no object is specified, no
logging is performed.
@@ -156,113 +138,119 @@ def save_to_csv(
If `filepath` is None, returns the DataFrame with potential
unit conversions and reformatting applied. Otherwise, returns
None after saving the data to a CSV file.
- """
+ """
if filepath is None:
if log:
log.msg('Preparing data ...', prepend_timestamp=False)
elif log:
- log.msg(f'Saving data to {filepath}...', prepend_timestamp=False)
+ log.msg(f'Saving data to `{filepath!s}`...', prepend_timestamp=False)
- if data is not None:
- # make sure we do not modify the original data
- data = data.copy()
+ if data is None:
+ if log:
+ log.warning('Data was empty, no file saved.')
+ return None
- # convert units and add unit information, if needed
- if units is not None:
- if unit_conversion_factors is None:
- raise ValueError(
- 'When units is not None, '
- 'unit_conversion_factors must be provided'
- )
+ assert isinstance(data, pd.DataFrame)
- if log:
- log.msg('Converting units...', prepend_timestamp=False)
+ # make sure we do not modify the original data
+ data = data.copy()
- # if the orientation is 1, we might not need to scale all columns
- if orientation == 1:
- cols_to_scale = [dt in [float, int] for dt in data.dtypes]
- cols_to_scale = data.columns[cols_to_scale]
+ # convert units and add unit information, if needed
+ if units is not None:
+ if unit_conversion_factors is None:
+ msg = (
+ 'When `units` is not None, '
+ '`unit_conversion_factors` must be provided.'
+ )
+ raise ValueError(msg)
- labels_to_keep = []
+ if log:
+ log.msg('Converting units...', prepend_timestamp=False)
- for unit_name in units.unique():
- labels = units.loc[units == unit_name].index.values
+ # if the orientation is 1, we might not need to scale all columns
+ if orientation == 1:
+ cols_to_scale_bool = [dt in {float, int} for dt in data.dtypes]
+ cols_to_scale = data.columns[cols_to_scale_bool]
- unit_factor = 1.0 / unit_conversion_factors[unit_name]
+ labels_to_keep = []
- active_labels = []
+ for unit_name in units.unique():
+ labels = units.loc[units == unit_name].index.to_numpy()
- if orientation == 0:
- for label in labels:
- if label in data.columns:
- active_labels.append(label)
+ unit_factor = 1.0 / unit_conversion_factors[unit_name]
- if len(active_labels) > 0:
- data.loc[:, active_labels] *= unit_factor
+ active_labels = []
- else: # elif orientation == 1:
- for label in labels:
- if label in data.index:
- active_labels.append(label)
+ if orientation == 0:
+ for label in labels:
+ if label in data.columns:
+ active_labels.append(label) # noqa: PERF401
- if len(active_labels) > 0:
- data.loc[active_labels, cols_to_scale] *= unit_factor
+ if len(active_labels) > 0:
+ data.loc[:, active_labels] *= unit_factor
- labels_to_keep += active_labels
+ else: # elif orientation == 1:
+ for label in labels:
+ if label in data.index:
+ active_labels.append(label) # noqa: PERF401
- units = units.loc[labels_to_keep].to_frame()
+ if len(active_labels) > 0:
+ data.loc[np.array(active_labels), np.array(cols_to_scale)] *= (
+ unit_factor
+ )
- if orientation == 0:
- data = pd.concat([units.T, data], axis=0)
- data.sort_index(axis=1, inplace=True)
- else:
- data = pd.concat([units, data], axis=1)
- data.sort_index(inplace=True)
+ labels_to_keep += active_labels
- if log:
- log.msg('Unit conversion successful.', prepend_timestamp=False)
+ units_df = units.loc[labels_to_keep].to_frame()
- if use_simpleindex:
- # convert MultiIndex to regular index with '-' separators
- if isinstance(data.index, pd.MultiIndex):
- data = base.convert_to_SimpleIndex(data)
+ if orientation == 0:
+ data = pd.concat([units_df.T, data], axis=0)
+ data = data.sort_index(axis=1)
+ else:
+ data = pd.concat([units_df, data], axis=1)
+ data = data.sort_index()
- # same thing for the columns
- if isinstance(data.columns, pd.MultiIndex):
- data = base.convert_to_SimpleIndex(data, axis=1)
+ if log:
+ log.msg('Unit conversion successful.', prepend_timestamp=False)
- if filepath is not None:
- filepath = Path(filepath).resolve()
- if filepath.suffix == '.csv':
- # save the contents of the DataFrame into a csv
- data.to_csv(filepath)
+ assert isinstance(data, pd.DataFrame)
+ if use_simpleindex:
+ # convert MultiIndex to regular index with '-' separators
+ if isinstance(data.index, pd.MultiIndex):
+ data = base.convert_to_SimpleIndex(data)
- if log:
- log.msg('Data successfully saved to file.', prepend_timestamp=False)
+ # same thing for the columns
+ if isinstance(data.columns, pd.MultiIndex):
+ data = base.convert_to_SimpleIndex(data, axis=1)
- else:
- raise ValueError(
- f'ERROR: Unexpected file type received when trying '
- f'to save to csv: {filepath}'
- )
+ if filepath is not None:
+ if filepath.suffix == '.csv':
+ # save the contents of the DataFrame into a csv
+ data.to_csv(filepath)
- return None
+ if log:
+ log.msg('Data successfully saved to file.', prepend_timestamp=False)
- # at this line, filepath is None
- return data
+ else:
+ msg = (
+ f'Please use the `.csv` file extension. '
+ f'Received file name is `{filepath}`'
+ )
+ raise ValueError(msg)
- # at this line, data is None
- if log:
- log.msg('WARNING: Data was empty, no file saved.', prepend_timestamp=False)
- return None
+ return None
+
+ # at this line, filepath is None
+ return data
-def substitute_default_path(data_paths):
+def substitute_default_path(
+ data_paths: list[str | pd.DataFrame],
+) -> list[str | pd.DataFrame]:
"""
- Substitutes the default directory path in a list of data paths
- with a specified path.
+ Substitute the default directory path with a specified path.
This function iterates over a list of data paths and replaces
occurrences of the 'PelicunDefault/' substring with the path
@@ -274,7 +262,7 @@ def substitute_default_path(data_paths):
Parameters
----------
- data_paths : list of str
+ data_paths: list of str
A list containing the paths to data files. These paths may
include a placeholder directory 'PelicunDefault/' that needs
to be substituted with the actual path specified in
@@ -295,8 +283,8 @@ def substitute_default_path(data_paths):
- If a path in the input list does not contain 'PelicunDefault/',
it is added to the output list unchanged.
- Example
- -------
+ Examples
+ --------
>>> data_paths = ['PelicunDefault/data/file1.txt',
'data/file2.txt']
>>> substitute_default_path(data_paths)
@@ -304,9 +292,9 @@ def substitute_default_path(data_paths):
'data/file2.txt']
"""
- updated_paths = []
+ updated_paths: list[str | pd.DataFrame] = []
for data_path in data_paths:
- if 'PelicunDefault/' in data_path:
+ if isinstance(data_path, str) and 'PelicunDefault/' in data_path:
path = data_path.replace(
'PelicunDefault/',
f'{base.pelicun_path}/resources/SimCenterDBDL/',
@@ -317,16 +305,17 @@ def substitute_default_path(data_paths):
return updated_paths
-def load_data(
- data_source,
- unit_conversion_factors,
- orientation=0,
- reindex=True,
- return_units=False,
- log=None,
-):
+def load_data( # noqa: C901
+ data_source: str | pd.DataFrame,
+ unit_conversion_factors: dict | None,
+ orientation: int = 0,
+ *,
+ reindex: bool = True,
+ return_units: bool = False,
+ log: base.Logger | None = None,
+) -> tuple[pd.DataFrame, pd.Series] | pd.DataFrame:
"""
- Loads data assuming it follows standard SimCenter tabular schema.
+ Load data assuming it follows standard SimCenter tabular schema.
The data is assumed to have a single header line and an index column. The
second line may start with 'Units' in the index and provide the units for
@@ -369,20 +358,20 @@ def load_data(
Raises
------
TypeError
- If `data_source` is neither a string nor a DataFrame, a TypeError is raised.
- ValueError
- If `unit_conversion_factors` contains keys that do not correspond to any units in the data, a ValueError may be raised during processing.
- """
+ If `data_source` is neither a string nor a DataFrame, a
+ TypeError is raised.
+ """
if isinstance(data_source, pd.DataFrame):
# store it at proceed (copying is needed to avoid changing the
# original)
- data = data_source.copy()
+ data = base.with_parsed_str_na_values(data_source.copy())
elif isinstance(data_source, str):
# otherwise, load the data from a file
data = load_from_file(data_source)
else:
- raise TypeError(f'Invalid data_source type: {type(data_source)}')
+ msg = f'Invalid data_source type: {type(data_source)}'
+ raise TypeError(msg)
# Define a dictionary to decide the axis based on the orientation
axis = {0: 1, 1: 0}
@@ -392,14 +381,16 @@ def load_data(
# and optionally apply conversions to all numeric values
if 'Units' in the_index:
units = data['Units'] if orientation == 1 else data.loc['Units']
- data.drop('Units', axis=orientation, inplace=True)
+ data = data.drop(['Units'], axis=orientation) # type: ignore
data = base.convert_dtypes(data)
if unit_conversion_factors is not None:
numeric_elements = (
- (data.select_dtypes(include=[np.number]).index)
+ (data.select_dtypes(include=[np.number]).index) # type: ignore
if orientation == 0
- else (data.select_dtypes(include=[np.number]).columns)
+ else (
+ data.select_dtypes(include=[np.number]).columns # type: ignore
+ )
)
if log:
@@ -407,18 +398,26 @@ def load_data(
conversion_factors = units.map(
lambda unit: (
- 1.00 if pd.isna(unit) else unit_conversion_factors.get(unit, 1.00)
+ 1.00
+ if pd.isna(unit)
+ else unit_conversion_factors.get(unit, 1.00)
)
)
if orientation == 1:
- data.loc[:, numeric_elements] = data.loc[:, numeric_elements].multiply(
- conversion_factors, axis=axis[orientation]
- )
+ data.loc[:, numeric_elements] = data.loc[
+ :, numeric_elements # type: ignore
+ ].multiply(
+ conversion_factors,
+ axis=axis[orientation], # type: ignore
+ ) # type: ignore
else:
- data.loc[numeric_elements, :] = data.loc[numeric_elements, :].multiply(
- conversion_factors, axis=axis[orientation]
- )
+ data.loc[numeric_elements, :] = data.loc[
+ numeric_elements, :
+ ].multiply(
+ conversion_factors,
+ axis=axis[orientation], # type: ignore
+ ) # type: ignore
if log:
log.msg('Unit conversion successful.', prepend_timestamp=False)
@@ -429,34 +428,31 @@ def load_data(
# convert columns or index to MultiIndex if needed
data = base.convert_to_MultiIndex(data, axis=1)
- data.sort_index(axis=1, inplace=True)
+ data = data.sort_index(axis=1)
# reindex the data, if needed
if reindex:
- data.index = np.arange(data.shape[0])
+ data.index = pd.RangeIndex(start=0, stop=data.shape[0], step=1)
else:
# convert index to MultiIndex if needed
data = base.convert_to_MultiIndex(data, axis=0)
- data.sort_index(inplace=True)
-
- if log:
- log.msg('Data successfully loaded from file.', prepend_timestamp=False)
+ data = data.sort_index()
if return_units:
if units is not None:
# convert index in units Series to MultiIndex if needed
- units = base.convert_to_MultiIndex(units, axis=0).dropna()
- units.sort_index(inplace=True)
+ units = base.convert_to_MultiIndex(units, axis=0).dropna() # type: ignore
+ units = units.sort_index()
output = data, units
else:
- output = data
+ output = data # type: ignore
- return output
+ return output # type: ignore
-def load_from_file(filepath, log=None):
+def load_from_file(filepath: str, log: base.Logger | None = None) -> pd.DataFrame:
"""
- Loads data from a file and stores it in a DataFrame.
+ Load data from a file and stores it in a DataFrame.
Currently, only CSV files are supported, but the function is easily
extensible to support other file formats.
@@ -464,7 +460,9 @@ def load_from_file(filepath, log=None):
Parameters
----------
filepath: string
- The location of the source file
+ The location of the source file.
+ log: base.Logger, optional
+ Optional logger object.
Returns
-------
@@ -481,24 +479,26 @@ def load_from_file(filepath, log=None):
If the filepath is invalid.
ValueError
If the file is not a CSV.
- """
+ """
if log:
log.msg(f'Loading data from {filepath}...')
# check if the filepath is valid
- filepath = Path(filepath).resolve()
+ filepath_path = Path(filepath).resolve()
- if not filepath.is_file():
- raise FileNotFoundError(
- f"The filepath provided does not point to an existing " f"file: {filepath}"
+ if not filepath_path.is_file():
+ msg = (
+ f'The filepath provided does not point to an existing '
+ f'file: {filepath_path}'
)
+ raise FileNotFoundError(msg)
- if filepath.suffix == '.csv':
+ if filepath_path.suffix == '.csv':
# load the contents of the csv into a DataFrame
data = pd.read_csv(
- filepath,
+ filepath_path,
header=0,
index_col=0,
low_memory=False,
@@ -509,9 +509,10 @@ def load_from_file(filepath, log=None):
log.msg('File successfully opened.', prepend_timestamp=False)
else:
- raise ValueError(
- f'ERROR: Unexpected file type received when trying '
- f'to load from csv: {filepath}'
+ msg = (
+ f'Unexpected file type received when trying '
+ f'to load from csv: {filepath_path}'
)
+ raise ValueError(msg)
return data
diff --git a/pelicun/model/__init__.py b/pelicun/model/__init__.py
index fdb212f1d..41aa1fa1f 100644
--- a/pelicun/model/__init__.py
+++ b/pelicun/model/__init__.py
@@ -1,49 +1,48 @@
-"""
--*- coding: utf-8 -*-
-
-Copyright (c) 2018 Leland Stanford Junior University
-Copyright (c) 2018 The Regents of the University of California
-
-This file is part of pelicun.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice,
-this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright notice,
-this list of conditions and the following disclaimer in the documentation
-and/or other materials provided with the distribution.
-
-3. Neither the name of the copyright holder nor the names of its contributors
-may be used to endorse or promote products derived from this software without
-specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-You should have received a copy of the BSD 3-Clause License along with
-pelicun. If not, see .
-
-Contributors:
-Adam Zsarnóczay
-"""
-
-# flake8: noqa
-
-from .pelicun_model import PelicunModel
-from .demand_model import DemandModel
-from .asset_model import AssetModel
-from .damage_model import DamageModel
-from .loss_model import LossModel
-from .loss_model import RepairModel
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+
+# This file is part of pelicun.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+
+"""Pelicun models."""
+
+from __future__ import annotations
+
+from pelicun.model.asset_model import AssetModel
+from pelicun.model.damage_model import DamageModel, DamageModel_DS
+from pelicun.model.demand_model import DemandModel
+from pelicun.model.loss_model import (
+ LossModel,
+ RepairModel_DS,
+ RepairModel_LF,
+)
+from pelicun.model.pelicun_model import PelicunModel
diff --git a/pelicun/model/asset_model.py b/pelicun/model/asset_model.py
index 5f3c18006..96a7f33a2 100644
--- a/pelicun/model/asset_model.py
+++ b/pelicun/model/asset_model.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
@@ -38,105 +37,73 @@
# Adam Zsarnóczay
# John Vouvakis Manousakis
-"""
-This file defines the AssetModel object and its methods.
-.. rubric:: Contents
+"""AssetModel object and methods."""
-.. autosummary::
-
- AssetModel
-
-"""
+from __future__ import annotations
from itertools import product
+from pathlib import Path
+from typing import TYPE_CHECKING, Any, Callable
+
import numpy as np
import pandas as pd
-from .pelicun_model import PelicunModel
-from .. import base
-from .. import uq
-from .. import file_io
+from pelicun import base, file_io, uq
+from pelicun.model.pelicun_model import PelicunModel
+
+if TYPE_CHECKING:
+ from pelicun.assessment import AssessmentBase
idx = base.idx
class AssetModel(PelicunModel):
- """
- Manages asset information used in assessments.
-
- Parameters
- ----------
-
- """
+ """Asset information used in assessments."""
- def __init__(self, assessment):
- super().__init__(assessment)
-
- self.cmp_marginal_params = None
- self.cmp_units = None
-
- self._cmp_RVs = None
- self._cmp_sample = None
+ __slots__ = ['_cmp_RVs', 'cmp_marginal_params', 'cmp_sample', 'cmp_units']
- @property
- def cmp_sample(self):
+ def __init__(self, assessment: AssessmentBase) -> None:
"""
- A property that gets or creates a DataFrame representing the
- component sample for the current assessment.
+ Initialize an Asset model.
- If the component sample has not been previously set or
- generated, this property will generate it by retrieving
- samples from the component random variables (_cmp_RVs),
- sorting the indexes, and converting the DataFrame to use a
- MultiIndex. The component sample is structured to include
- information on component ('cmp'), location ('loc'), direction
- ('dir'), and unique identifier ('uid').
-
- Returns
- -------
- DataFrame
- A DataFrame containing the component samples, indexed and
- sorted appropriately. The columns are multi-indexed to
- represent various dimensions of the component data.
+ Parameters
+ ----------
+ assessment: AssessmentBase
+ Parent assessment object.
"""
- if self._cmp_sample is None:
- cmp_sample = pd.DataFrame(self._cmp_RVs.RV_sample)
- cmp_sample.sort_index(axis=0, inplace=True)
- cmp_sample.sort_index(axis=1, inplace=True)
-
- cmp_sample = base.convert_to_MultiIndex(cmp_sample, axis=1)['CMP']
-
- cmp_sample.columns.names = ['cmp', 'loc', 'dir', 'uid']
-
- self._cmp_sample = cmp_sample
+ super().__init__(assessment)
- else:
- cmp_sample = self._cmp_sample
+ self.cmp_marginal_params: pd.DataFrame | None = None
+ self.cmp_units: pd.Series | None = None
+ self.cmp_sample: pd.DataFrame | None = None
- return cmp_sample
+ self._cmp_RVs: uq.RandomVariableRegistry | None = None
- def save_cmp_sample(self, filepath=None, save_units=False):
+ def save_cmp_sample(
+ self, filepath: str | None = None, *, save_units: bool = False
+ ) -> pd.DataFrame | tuple[pd.DataFrame, pd.Series] | None:
"""
- Saves the component quantity sample to a CSV file or returns
- it as a DataFrame with optional units.
+ Save or retrieve component quantity sample.
- This method handles the storage of a sample of component
- quantities, which can either be saved directly to a file or
- returned as a DataFrame for further manipulation. When saving
- to a file, additional information such as unit conversion
- factors and column units can be included. If the data is not
- being saved to a file, the method can return the DataFrame
- with or without units as specified.
+ Saves the component quantity sample to a CSV file or returns
+ it as a DataFrame with optional units. This method handles
+ the storage of a sample of component quantities, which can
+ either be saved directly to a file or returned as a DataFrame
+ for further manipulation. When saving to a file, additional
+ information such as unit conversion factors and column units
+ can be included. If the data is not being saved to a file, the
+ method can return the DataFrame with or without units as
+ specified.
Parameters
----------
- filepath : str, optional
+ filepath: str, optional
The path to the file where the component quantity sample
should be saved. If not provided, the sample is not saved
to disk but returned.
- save_units : bool, default: False
+ save_units: bool, default: False
Indicates whether to include a row with unit information
in the returned DataFrame. This parameter is ignored if a
file path is provided.
@@ -147,62 +114,63 @@ def save_cmp_sample(self, filepath=None, save_units=False):
If `filepath` is provided, the function returns None after
saving the data.
If no `filepath` is specified, returns:
- - DataFrame containing the component quantity sample.
- - Optionally, a Series containing the units for each
- column if `save_units` is True.
-
- Raises
- ------
- IOError
- Raises an IOError if there is an issue saving the file to
- the specified `filepath`.
+ * DataFrame containing the component quantity sample.
+ * Optionally, a Series containing the units for each
+ column if `save_units` is True.
Notes
-----
The function utilizes internal logging to notify the start and
completion of the saving process. It adjusts index types and
handles unit conversions based on assessment configurations.
+
"""
- self.log_div()
+ self.log.div()
if filepath is not None:
- self.log_msg('Saving asset components sample...')
+ self.log.msg('Saving asset components sample...')
# prepare a units array
sample = self.cmp_sample
+ assert isinstance(sample, pd.DataFrame)
units = pd.Series(name='Units', index=sample.columns, dtype=object)
+ assert self.cmp_units is not None
for cmp_id, unit_name in self.cmp_units.items():
- units.loc[cmp_id, :] = unit_name
+ units.loc[cmp_id, :] = unit_name # type: ignore
res = file_io.save_to_csv(
sample,
- filepath,
+ Path(filepath) if filepath is not None else None,
units=units,
unit_conversion_factors=self._asmnt.unit_conversion_factors,
use_simpleindex=(filepath is not None),
log=self._asmnt.log,
)
-
if filepath is not None:
- self.log_msg(
+ self.log.msg(
'Asset components sample successfully saved.',
prepend_timestamp=False,
)
return None
# else:
- units = res.loc["Units"]
- res.drop("Units", inplace=True)
+
+ assert isinstance(res, pd.DataFrame)
+
+ units_part = res.loc['Units']
+ assert isinstance(units_part, pd.Series)
+ units = units_part
+
+ res = res.drop('Units')
if save_units:
return res.astype(float), units
return res.astype(float)
- def load_cmp_sample(self, filepath):
+ def load_cmp_sample(self, filepath: str) -> None:
"""
- Loads a component quantity sample from a specified CSV file
- into the system.
+ Load a component quantity sample from a specified CSV file.
This method reads a CSV file that contains component quantity
samples, setting up the necessary DataFrame structures within
@@ -212,10 +180,15 @@ def load_cmp_sample(self, filepath):
Parameters
----------
- filepath : str
+ filepath: str
The path to the CSV file from which to load the component
quantity sample.
+ Raises
+ ------
+ ValueError
+ If the columns have an invalid number of levels.
+
Notes
-----
Upon successful loading, the method sets the component sample
@@ -233,9 +206,10 @@ def load_cmp_sample(self, filepath):
>>> model.load_cmp_sample('path/to/component_sample.csv')
# This will load the component quantity sample into the model
# from the specified file.
+
"""
- self.log_div()
- self.log_msg('Loading asset components sample...')
+ self.log.div()
+ self.log.msg('Loading asset components sample...')
sample, units = file_io.load_data(
filepath,
@@ -243,21 +217,50 @@ def load_cmp_sample(self, filepath):
return_units=True,
log=self._asmnt.log,
)
+ assert isinstance(sample, pd.DataFrame)
+ assert isinstance(units, pd.Series)
+
+ # Check if a `uid` level was passed
+ num_levels = len(sample.columns.names)
+ num_levels_without_uid = 3
+ num_levels_with_uid = num_levels_without_uid + 1
+ if num_levels == num_levels_without_uid:
+ # No `uid`, add one.
+ sample.columns.names = ['cmp', 'loc', 'dir']
+ sample = base.dedupe_index(sample.T).T
+ elif num_levels == num_levels_with_uid:
+ sample.columns.names = ['cmp', 'loc', 'dir', 'uid']
+ else:
+ msg = (
+ f'Invalid component sample: Column MultiIndex '
+ f'has an unexpected length: {num_levels}'
+ )
+ raise ValueError(msg)
- sample.columns.names = ['cmp', 'loc', 'dir', 'uid']
-
- self._cmp_sample = sample
+ self.cmp_sample = sample
self.cmp_units = units.groupby(level=0).first()
- self.log_msg(
+ # Add marginal parameters with Blocks information (later calls
+ # rely on that attribute being defined)
+ # Obviously we can't trace back the distributions and their
+ # parameters, those columns are left undefined.
+ cmp_marginal_params = pd.DataFrame(
+ self.cmp_sample.columns.to_list(), columns=self.cmp_sample.columns.names
+ ).astype(str)
+ cmp_marginal_params['Blocks'] = 1
+ cmp_marginal_params = cmp_marginal_params.set_index(
+ ['cmp', 'loc', 'dir', 'uid']
+ )
+ self.cmp_marginal_params = cmp_marginal_params
+
+ self.log.msg(
'Asset components sample successfully loaded.', prepend_timestamp=False
)
- def load_cmp_model(self, data_source):
+ def load_cmp_model(self, data_source: str | dict[str, pd.DataFrame]) -> None:
"""
- Loads the model describing component quantities in an asset
- from specified data sources.
+ Load the asset model from a specified data source.
This function is responsible for loading data related to the
component model of an asset. It supports loading from multiple
@@ -268,7 +271,7 @@ def load_cmp_model(self, data_source):
Parameters
----------
- data_source : str or dict
+ data_source: str or dict
The source from where to load the component model data. If
it's a string, it should be the prefix for three files:
one for marginal distributions (`_marginals.csv`),
@@ -303,187 +306,8 @@ def load_cmp_model(self, data_source):
>>> model.load_cmp_model(data_dict)
"""
-
- def get_locations(loc_str):
- """
- Parses a location string to determine specific sections of
- an asset to be processed.
-
- This function interprets various string formats to output
- a list of strings representing sections or parts of the
- asset. It can handle single numbers, ranges (e.g.,
- '3--7'), lists separated by commas (e.g., '1,2,5'), and
- special keywords like 'all', 'top', or 'roof'.
-
- Parameters
- ----------
- loc_str : str
- A string that describes the location or range of
- sections in the asset. It can be a single number, a
- range, a comma-separated list, 'all', 'top', or
- 'roof'.
-
- Returns
- -------
- numpy.ndarray
- An array of strings, each representing a section
- number. These sections are processed based on the
- input string, which can denote specific sections,
- ranges of sections, or special keywords.
-
- Raises
- ------
- ValueError
- If the location string cannot be parsed into any
- recognized format, a ValueError is raised with a
- message indicating the problematic string.
-
- Examples
- --------
- Given an asset with multiple sections:
-
- >>> get_locations('5')
- array(['5'])
-
- >>> get_locations('3--7')
- array(['3', '4', '5', '6', '7'])
-
- >>> get_locations('1,2,5')
- array(['1', '2', '5'])
-
- >>> get_locations('all')
- array(['1', '2', '3', ..., '10'])
-
- >>> get_locations('top')
- array(['10'])
-
- >>> get_locations('roof')
- array(['11'])
- """
- try:
- res = str(int(loc_str))
- return np.array([res])
-
- except ValueError as exc:
- stories = self._asmnt.stories
-
- if "--" in loc_str:
- s_low, s_high = loc_str.split('--')
- s_low = get_locations(s_low)
- s_high = get_locations(s_high)
- return np.arange(int(s_low[0]), int(s_high[0]) + 1).astype(str)
-
- if "," in loc_str:
- return np.array(loc_str.split(','), dtype=int).astype(str)
-
- if loc_str == "all":
- return np.arange(1, stories + 1).astype(str)
-
- if loc_str == "top":
- return np.array(
- [
- stories,
- ]
- ).astype(str)
-
- if loc_str == "roof":
- return np.array(
- [
- stories + 1,
- ]
- ).astype(str)
-
- raise ValueError(
- f"Cannot parse location string: " f"{loc_str}"
- ) from exc
-
- def get_directions(dir_str):
- """
- Parses a direction string to determine specific
- orientations or directions applicable within an asset.
-
- This function processes direction descriptions to output
- an array of strings, each representing a specific
- direction. It can handle single numbers, ranges (e.g.,
- '1--3'), lists separated by commas (e.g., '1,2,5'), and
- null values that default to '1'.
-
- Parameters
- ----------
- dir_str : str or None
- A string that describes the direction or range of
- directions in the asset. It can be a single number, a
- range, a comma-separated list, or it can be null,
- which defaults to representing a single default
- direction ('1').
-
- Returns
- -------
- numpy.ndarray
- An array of strings, each representing a
- direction. These directions are processed based on the
- input string, which can denote specific directions,
- ranges of directions, or a list.
-
- Raises
- ------
- ValueError
- If the direction string cannot be parsed into any
- recognized format, a ValueError is raised with a
- message indicating the problematic string.
-
- Examples
- --------
- Given an asset with multiple potential orientations:
-
- >>> get_directions(None)
- array(['1'])
-
- >>> get_directions('2')
- array(['2'])
-
- >>> get_directions('1--3')
- array(['1', '2', '3'])
-
- >>> get_directions('1,2,5')
- array(['1', '2', '5'])
- """
- if pd.isnull(dir_str):
- return np.ones(1).astype(str)
-
- # else:
- try:
- res = str(int(dir_str))
- return np.array(
- [
- res,
- ]
- )
-
- except ValueError as exc:
- if "," in dir_str:
- return np.array(dir_str.split(','), dtype=int).astype(str)
-
- if "--" in dir_str:
- d_low, d_high = dir_str.split('--')
- d_low = get_directions(d_low)
- d_high = get_directions(d_high)
- return np.arange(int(d_low[0]), int(d_high[0]) + 1).astype(str)
-
- # else:
- raise ValueError(
- f"Cannot parse direction string: " f"{dir_str}"
- ) from exc
-
- def get_attribute(attribute_str, dtype=float, default=np.nan):
- # pylint: disable=missing-return-doc
- # pylint: disable=missing-return-type-doc
- if pd.isnull(attribute_str):
- return default
- return dtype(attribute_str)
-
- self.log_div()
- self.log_msg('Loading component model...')
+ self.log.div()
+ self.log.msg('Loading component model...')
# Currently, we assume independent component distributions are defined
# throughout the building. Correlations may be added afterward or this
@@ -491,7 +315,7 @@ def get_attribute(attribute_str, dtype=float, default=np.nan):
# prepare the marginal data source variable to load the data
if isinstance(data_source, dict):
- marginal_data_source = data_source['marginals']
+ marginal_data_source: pd.DataFrame | str = data_source['marginals']
else:
marginal_data_source = data_source + '_marginals.csv'
@@ -503,13 +327,15 @@ def get_attribute(attribute_str, dtype=float, default=np.nan):
return_units=True,
log=self._asmnt.log,
)
+ assert isinstance(marginal_params, pd.DataFrame)
+ assert isinstance(units, pd.Series)
# group units by cmp id to avoid redundant entries
self.cmp_units = units.copy().groupby(level=0).first()
marginal_params = pd.concat([marginal_params, units], axis=1)
- cmp_marginal_param_dct = {
+ cmp_marginal_param_dct: dict[str, list[Any]] = {
'Family': [],
'Theta_0': [],
'Theta_1': [],
@@ -521,19 +347,17 @@ def get_attribute(attribute_str, dtype=float, default=np.nan):
}
index_list = []
for row in marginal_params.itertuples():
- locs = get_locations(row.Location)
- dirs = get_directions(row.Direction)
+ locs = self._get_locations(str(row.Location))
+ dirs = self._get_directions(str(row.Direction))
indices = list(product((row.Index,), locs, dirs))
num_vals = len(indices)
for col, cmp_marginal_param in cmp_marginal_param_dct.items():
if col == 'Blocks':
cmp_marginal_param.extend(
[
- get_attribute(
- getattr(row, 'Blocks', np.nan),
- dtype=int,
- default=1.0,
- )
+ int(row.Blocks) # type: ignore
+ if ('Blocks' in dir(row) and not pd.isna(row.Blocks))
+ else 1,
]
* num_vals
)
@@ -543,7 +367,7 @@ def get_attribute(attribute_str, dtype=float, default=np.nan):
cmp_marginal_param.extend([getattr(row, col, np.nan)] * num_vals)
else:
cmp_marginal_param.extend(
- [get_attribute(getattr(row, col, np.nan))] * num_vals
+ [str(getattr(row, col, np.nan))] * num_vals
)
index_list.extend(indices)
index = pd.MultiIndex.from_tuples(index_list, names=['cmp', 'loc', 'dir'])
@@ -560,82 +384,69 @@ def get_attribute(attribute_str, dtype=float, default=np.nan):
cmp_marginal_param_series = []
for col, cmp_marginal_param in cmp_marginal_param_dct.items():
cmp_marginal_param_series.append(
- pd.Series(cmp_marginal_param, dtype=dtypes[col], name=col, index=index)
+ pd.Series(
+ cmp_marginal_param, dtype=dtypes[col], name=col, index=index
+ )
)
cmp_marginal_params = pd.concat(cmp_marginal_param_series, axis=1)
- assert not cmp_marginal_params['Theta_0'].isnull().values.any()
+ assert not (
+ cmp_marginal_params['Theta_0'].isna().to_numpy().any() # type: ignore
+ )
- cmp_marginal_params.dropna(axis=1, how='all', inplace=True)
+ cmp_marginal_params = cmp_marginal_params.dropna(axis=1, how='all')
- self.log_msg(
- "Model parameters successfully parsed. "
- f"{cmp_marginal_params.shape[0]} performance groups identified",
+ self.log.msg(
+ 'Model parameters successfully parsed. '
+ f'{cmp_marginal_params.shape[0]} performance groups identified',
prepend_timestamp=False,
)
# Now we can take care of converting the values to base units
- self.log_msg(
- "Converting model parameters to internal units...",
+ self.log.msg(
+ 'Converting model parameters to internal units...',
prepend_timestamp=False,
)
# ensure that the index has unique entries by introducing an
# internal component uid
- base.dedupe_index(cmp_marginal_params)
+ cmp_marginal_params = base.dedupe_index(cmp_marginal_params)
- cmp_marginal_params = self.convert_marginal_params(
+ cmp_marginal_params = self._convert_marginal_params(
cmp_marginal_params, cmp_marginal_params['Units']
)
self.cmp_marginal_params = cmp_marginal_params.drop('Units', axis=1)
- self.log_msg("Model parameters successfully loaded.", prepend_timestamp=False)
+ self.log.msg(
+ 'Model parameters successfully loaded.', prepend_timestamp=False
+ )
- self.log_msg(
- "\nComponent model marginal distributions:\n" + str(cmp_marginal_params),
+ self.log.msg(
+ '\nComponent model marginal distributions:\n' + str(cmp_marginal_params),
prepend_timestamp=False,
)
# the empirical data and correlation files can be added later, if needed
- def _create_cmp_RVs(self):
- """
- Defines the RVs used for sampling component quantities.
+ def list_unique_component_ids(self) -> list[str]:
"""
+ Obtain unique component IDs.
- # initialize the registry
- RV_reg = uq.RandomVariableRegistry(self._asmnt.options.rng)
-
- # add a random variable for each component quantity variable
- for rv_params in self.cmp_marginal_params.itertuples():
- cmp = rv_params.Index
-
- # create a random variable and add it to the registry
- family = getattr(rv_params, "Family", 'deterministic')
- RV_reg.add_RV(
- uq.rv_class_map(family)(
- name=f'CMP-{cmp[0]}-{cmp[1]}-{cmp[2]}-{cmp[3]}',
- theta=[
- getattr(rv_params, f"Theta_{t_i}", np.nan) for t_i in range(3)
- ],
- truncation_limits=[
- getattr(rv_params, f"Truncate{side}", np.nan)
- for side in ("Lower", "Upper")
- ],
- )
- )
-
- self.log_msg(
- f"\n{self.cmp_marginal_params.shape[0]} random variables created.",
- prepend_timestamp=False,
- )
+ Returns
+ -------
+ list | set
+ Unique components in the asset model.
- self._cmp_RVs = RV_reg
+ """
+ assert self.cmp_marginal_params is not None
+ return self.cmp_marginal_params.index.unique(level=0).to_list()
- def generate_cmp_sample(self, sample_size=None):
+ def generate_cmp_sample(self, sample_size: int | None = None) -> None:
"""
+ Generate a component sample.
+
Generates a sample of component quantity realizations based on
predefined model parameters and optionally specified sample
size. If no sample size is provided, the function attempts to
@@ -654,37 +465,80 @@ def generate_cmp_sample(self, sample_size=None):
If the model parameters are not loaded before sample
generation, or if neither sample size is specified nor can
be determined from the demand model.
- """
+ """
if self.cmp_marginal_params is None:
- raise ValueError(
- 'Model parameters have not been specified. Load'
+ msg = (
+ 'Model parameters have not been specified. Load '
'parameters from a file before generating a '
'sample.'
)
+ raise ValueError(msg)
- self.log_div()
- self.log_msg('Generating sample from component quantity variables...')
+ self.log.div()
+ self.log.msg('Generating sample from component quantity variables...')
if sample_size is None:
if self._asmnt.demand.sample is None:
- raise ValueError(
+ msg = (
'Sample size was not specified, '
'and it cannot be determined from '
'the demand model.'
)
+ raise ValueError(msg)
sample_size = self._asmnt.demand.sample.shape[0]
self._create_cmp_RVs()
+ assert self._cmp_RVs is not None
+ assert self._asmnt.options.sampling_method is not None
self._cmp_RVs.generate_sample(
sample_size=sample_size, method=self._asmnt.options.sampling_method
)
- # replace the potentially existing sample with the generated one
- self._cmp_sample = None
+ cmp_sample = pd.DataFrame(self._cmp_RVs.RV_sample)
+ cmp_sample = cmp_sample.sort_index(axis=0)
+ cmp_sample = cmp_sample.sort_index(axis=1)
+ cmp_sample_mi = base.convert_to_MultiIndex(cmp_sample, axis=1)['CMP']
+ assert isinstance(cmp_sample_mi, pd.DataFrame)
+ cmp_sample = cmp_sample_mi
+ cmp_sample.columns.names = ['cmp', 'loc', 'dir', 'uid']
+ self.cmp_sample = cmp_sample
+
+ self.log.msg(
+ f'\nSuccessfully generated {sample_size} realizations.',
+ prepend_timestamp=False,
+ )
+
+ def _create_cmp_RVs(self) -> None: # noqa: N802
+ """Define the RVs used for sampling component quantities."""
+ # initialize the registry
+ rv_reg = uq.RandomVariableRegistry(self._asmnt.options.rng)
- self.log_msg(
- f"\nSuccessfully generated {sample_size} realizations.",
+ # add a random variable for each component quantity variable
+ assert self.cmp_marginal_params is not None
+ for rv_params in self.cmp_marginal_params.itertuples():
+ cmp = rv_params.Index
+
+ # create a random variable and add it to the registry
+ family = getattr(rv_params, 'Family', 'deterministic')
+ rv_reg.add_RV(
+ uq.rv_class_map(family)(
+ name=f'CMP-{cmp[0]}-{cmp[1]}-{cmp[2]}-{cmp[3]}', # type: ignore
+ theta=[ # type: ignore
+ getattr(rv_params, f'Theta_{t_i}', np.nan)
+ for t_i in range(3)
+ ],
+ truncation_limits=[
+ getattr(rv_params, f'Truncate{side}', np.nan)
+ for side in ('Lower', 'Upper')
+ ],
+ )
+ )
+
+ self.log.msg(
+ f'\n{self.cmp_marginal_params.shape[0]} random variables created.',
prepend_timestamp=False,
)
+
+ self._cmp_RVs = rv_reg
diff --git a/pelicun/model/damage_model.py b/pelicun/model/damage_model.py
index e5927da6a..827aabe39 100644
--- a/pelicun/model/damage_model.py
+++ b/pelicun/model/damage_model.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
@@ -38,61 +37,318 @@
# Adam Zsarnóczay
# John Vouvakis Manousakis
-"""
-This file defines the DamageModel object and its methods.
-.. rubric:: Contents
+"""DamageModel object and methods."""
-.. autosummary::
+from __future__ import annotations
- DamageModel
-
-"""
+from functools import partial
+from pathlib import Path
+from typing import TYPE_CHECKING
import numpy as np
import pandas as pd
+
+from pelicun import base, file_io, uq
+from pelicun.model.demand_model import (
+ _assemble_required_demand_data,
+ _get_required_demand_type,
+ _verify_edps_available,
+)
from pelicun.model.pelicun_model import PelicunModel
-from pelicun import base
-from pelicun import uq
-from pelicun import file_io
+if TYPE_CHECKING:
+ from pelicun.assessment import AssessmentBase
+ from pelicun.uq import RandomVariableRegistry
idx = base.idx
class DamageModel(PelicunModel):
- """
- Manages damage information used in assessments.
-
- This class contains the following methods:
-
- - save_sample()
- - load_sample()
- - load_damage_model()
- - calculate()
- - _get_pg_batches()
- - _generate_dmg_sample()
- - _create_dmg_rvs()
- - _get_required_demand_type()
- - _assemble_required_demand_data()
- - _evaluate_damage_state()
- - _prepare_dmg_quantities()
- - _perform_dmg_task()
- - _apply_dmg_funcitons()
+ """Manages damage information used in assessments."""
- Parameters
- ----------
+ __slots__ = ['ds_model', 'missing_components']
- """
+ def __init__(self, assessment: AssessmentBase) -> None:
+ """
+ Initialize a Damage model.
+
+ Parameters
+ ----------
+ assessment: AssessmentBase
+ The parent assessment object.
- def __init__(self, assessment):
+ """
super().__init__(assessment)
- self.damage_params = None
- self.sample = None
+ self.ds_model: DamageModel_DS = DamageModel_DS(assessment)
+ self.missing_components: list[str] = []
+
+ @property
+ def _damage_models(self) -> tuple[DamageModel_DS]:
+ """
+ Points to the damage model objects included in DamageModel.
+
+ Returns
+ -------
+ tuple
+ A tuple containing the damage models.
+
+ """
+ return (self.ds_model,)
+
+ def load_damage_model(
+ self, data_paths: list[str | pd.DataFrame], *, warn_missing: bool = False
+ ) -> None:
+ """."""
+ self.log.warning(
+ '`load_damage_model` is deprecated and will be '
+ 'dropped in future versions of pelicun. '
+ 'Please use `load_model_parameters` instead, '
+ 'like so: \n`cmp_set = set({your_assessment_obj}.'
+ 'asset.'
+ 'list_unique_component_ids())`, '
+ 'and then \n`{your_assessment_obj}.damage.'
+ 'load_model_parameters(data_paths, cmp_set)`.'
+ )
+ cmp_set = set(self._asmnt.asset.list_unique_component_ids())
+ self.load_model_parameters(data_paths, cmp_set, warn_missing=warn_missing)
+
+ @property
+ def sample(self) -> pd.DataFrame:
+ """
+ .
+
+ Returns
+ -------
+ pd.DataFrame
+ The damage sample of the `ds_model`.
+
+ """
+ self.log.warning(
+ '`{damage model}.sample` is deprecated and will be '
+ 'dropped in future versions of pelicun. '
+ 'Please use `{damage model}.ds_model.sample` instead. '
+ 'Now returning `{damage model}.ds_model.sample`.'
+ )
+ assert self.ds_model.sample is not None
+ return self.ds_model.sample
+
+ def load_model_parameters(
+ self,
+ data_paths: list[str | pd.DataFrame],
+ cmp_set: set[str],
+ *,
+ warn_missing: bool = False,
+ ) -> None:
+ """
+ Load damage model parameters.
+
+ Parameters
+ ----------
+ data_paths: list of (string | DataFrame)
+ List of paths to data or files with damage model
+ information. Default XY datasets can be accessed as
+ PelicunDefault/XY. Order matters. Parameters defined in
+ prior elements in the list take precedence over the same
+ parameters in subsequent data paths. I.e., place the
+ Default datasets in the back.
+ cmp_set: set
+ Set of component IDs that are present in the asset model.
+ Damage parameters in the input files for components
+ outside of that set are omitted for performance.
+ warn_missing: bool
+ Whether to check if there are components in the asset model
+ that do not have specified damage parameters. Should be
+ set to True if all components in the asset model are
+ damage state-driven, or if only a damage estimation is
+ performed, without a subsequent loss estimation.
+
+ Raises
+ ------
+ ValueError
+ If the method can't parse the damage parameters in the
+ specified paths.
+
+ """
+ self.log.div()
+ self.log.msg('Loading damage model...', prepend_timestamp=False)
+
+ #
+ for i, path in enumerate(data_paths):
+ if 'fragility_DB' in path:
+ path = path.replace('fragility_DB', 'damage_DB') # noqa: PLW2901
+ self.log.warning(
+ '`fragility_DB` is deprecated and will '
+ 'be dropped in future versions of pelicun. '
+ 'Please use `damage_DB` instead.'
+ )
+ data_paths[i] = path
+
+ # replace default flag with default data path
+ data_paths = file_io.substitute_default_path(data_paths)
+
+ #
+ # load damage parameter data into the models
+ #
+
+ for data_path in data_paths:
+ data = file_io.load_data(
+ data_path, None, orientation=1, reindex=False, log=self._asmnt.log
+ )
+ # determine if the damage model parameters are for damage
+ # states
+ assert isinstance(data, pd.DataFrame)
+ if _is_for_ds_model(data):
+ self.ds_model.load_model_parameters(data)
+ else:
+ msg = f'Invalid damage model parameters: {data_path}'
+ raise ValueError(msg)
+
+ self.log.msg(
+ 'Damage model parameters loaded successfully.', prepend_timestamp=False
+ )
+
+ #
+ # remove items
+ #
+
+ self.log.msg(
+ 'Removing unused damage model parameters.', prepend_timestamp=False
+ )
+
+ for damage_model in self._damage_models:
+ # drop unused damage parameter definitions
+ damage_model.drop_unused_damage_parameters(cmp_set)
+ # remove components with incomplete damage parameters
+ damage_model.remove_incomplete_components()
+
+ #
+ # convert units
+ #
+
+ self.log.msg(
+ 'Converting damage model parameter units.', prepend_timestamp=False
+ )
+ for damage_model in self._damage_models:
+ damage_model.convert_damage_parameter_units()
+
+ #
+ # verify damage parameter availability
+ #
- def save_sample(self, filepath=None, save_units=False):
+ self.log.msg(
+ 'Checking damage model parameter '
+ 'availability for all components in the asset model.',
+ prepend_timestamp=False,
+ )
+ missing_components = self._ensure_damage_parameter_availability(
+ cmp_set, warn_missing=warn_missing
+ )
+
+ self.missing_components = missing_components
+
+ def calculate(
+ self,
+ dmg_process: dict | None = None,
+ block_batch_size: int = 1000,
+ scaling_specification: dict | None = None,
+ ) -> None:
"""
+ Calculate the damage of each component block.
+
+ Parameters
+ ----------
+ dmg_process: dict, optional
+ Allows simulating damage processes, where damage to some
+ component can alter the damage state of other components.
+ block_batch_size: int
+ Maximum number of components in each batch.
+ scaling_specification: dict, optional
+ A dictionary defining the shift in median.
+ Example: {'CMP-1-1': '*1.2', 'CMP-1-2': '/1.4'}
+ The keys are individual components that should be present
+ in the `capacity_sample`. The values should be strings
+ containing an operation followed by the value formatted as
+ a float. The operation can be '+' for addition, '-' for
+ subtraction, '*' for multiplication, and '/' for division.
+
+ """
+ self.log.div()
+ self.log.msg('Calculating damages...')
+
+ assert self._asmnt.asset.cmp_sample is not None
+ assert self._asmnt.asset.cmp_marginal_params is not None
+ self.log.msg(
+ f'Number of Performance Groups in Asset Model:'
+ f' {self._asmnt.asset.cmp_sample.shape[1]}',
+ prepend_timestamp=False,
+ )
+
+ # Instantiate `component_blocks`
+ if 'Blocks' in self._asmnt.asset.cmp_marginal_params.columns:
+ # If a `Blocks` column is available, use `cmp_marginals`
+ component_blocks = (
+ self._asmnt.asset.cmp_marginal_params['Blocks']
+ .to_frame()
+ .astype('int64')
+ )
+ else:
+ # Otherwise assume 1.00 for the number of blocks and
+ # initialize `component_blocks` using the columns of `cmp_sample`.
+ component_blocks = pd.DataFrame(
+ np.ones(self._asmnt.asset.cmp_sample.shape[1]),
+ index=self._asmnt.asset.cmp_sample.columns,
+ columns=['Blocks'],
+ dtype='int64',
+ )
+
+ # obtain damage states for applicable components
+ assert self._asmnt.demand.sample is not None
+ self.ds_model.obtain_ds_sample(
+ demand_sample=self._asmnt.demand.sample,
+ component_blocks=component_blocks,
+ block_batch_size=block_batch_size,
+ scaling_specification=scaling_specification,
+ missing_components=self.missing_components,
+ nondirectional_multipliers=self._asmnt.options.nondir_multi_dict,
+ )
+
+ # Apply the prescribed damage process, if any
+ if dmg_process is not None:
+ self.log.msg('Applying damage processes.')
+
+ # Sort the damage processes tasks
+ dmg_process = {key: dmg_process[key] for key in sorted(dmg_process)}
+
+ # Perform damage tasks in the sorted order
+ for task in dmg_process.items():
+ self.ds_model.perform_dmg_task(task)
+
+ self.log.msg(
+ 'Damage processes successfully applied.', prepend_timestamp=False
+ )
+
+ qnt_sample = self.ds_model.prepare_dmg_quantities(
+ self._asmnt.asset.cmp_sample,
+ self._asmnt.asset.cmp_marginal_params,
+ dropzero=False,
+ )
+
+ # If requested, extend the quantity table with all possible DSs
+ if self._asmnt.options.list_all_ds:
+ qnt_sample = self.ds_model.complete_ds_cols(qnt_sample)
+
+ self.ds_model.sample = qnt_sample
+
+ self.log.msg('Damage calculation completed.', prepend_timestamp=False)
+
+ def save_sample(
+ self, filepath: str | None = None, *, save_units: bool = False
+ ) -> pd.DataFrame | tuple[pd.DataFrame, pd.Series] | None:
+ """
+ Save or return the damage sample data.
+
Saves the damage sample data to a CSV file or returns it
directly with an option to include units.
@@ -104,11 +360,11 @@ def save_sample(self, filepath=None, save_units=False):
Parameters
----------
- filepath : str, optional
+ filepath: str, optional
The path to the file where the damage sample should be
saved. If not provided, the sample is not saved to disk
but returned.
- save_units : bool, default: False
+ save_units: bool, default: False
Indicates whether to include a row with unit information
in the returned DataFrame. This parameter is ignored if a
file path is provided.
@@ -121,19 +377,26 @@ def save_sample(self, filepath=None, save_units=False):
If no `filepath` is specified, returns:
- DataFrame containing the damage sample.
- Optionally, a Series containing the units for each
- column if `save_units` is True.
+ column if `save_units` is True.
+
"""
- self.log_div()
- self.log_msg('Saving damage sample...')
+ self.log.div()
+ self.log.msg('Saving damage sample...')
+
+ if self.ds_model.sample is None:
+ return None
cmp_units = self._asmnt.asset.cmp_units
- qnt_units = pd.Series(index=self.sample.columns, name='Units', dtype='object')
+ qnt_units = pd.Series(
+ index=self.ds_model.sample.columns, name='Units', dtype='object'
+ )
+ assert cmp_units is not None
for cmp in cmp_units.index:
qnt_units.loc[cmp] = cmp_units.loc[cmp]
res = file_io.save_to_csv(
- self.sample,
- filepath,
+ self.ds_model.sample,
+ Path(filepath) if filepath is not None else None,
units=qnt_units,
unit_conversion_factors=self._asmnt.unit_conversion_factors,
use_simpleindex=(filepath is not None),
@@ -141,121 +404,473 @@ def save_sample(self, filepath=None, save_units=False):
)
if filepath is not None:
- self.log_msg('Damage sample successfully saved.', prepend_timestamp=False)
+ self.log.msg(
+ 'Damage sample successfully saved.', prepend_timestamp=False
+ )
return None
# else:
- units = res.loc["Units"]
- res.drop("Units", inplace=True)
+ assert isinstance(res, pd.DataFrame)
+ units = res.loc['Units']
+ assert isinstance(units, pd.Series)
+ res = res.drop('Units')
res.index = res.index.astype('int64')
+ res = res.astype(float)
+ assert isinstance(res, pd.DataFrame)
if save_units:
- return res.astype(float), units
+ return res, units
- return res.astype(float)
+ return res
- def load_sample(self, filepath):
- """
- Load damage state sample data.
+ def load_sample(self, filepath: str) -> None:
+ """Load damage state sample data."""
+ self.log.div()
+ self.log.msg('Loading damage sample...')
- """
- self.log_div()
- self.log_msg('Loading damage sample...')
-
- self.sample = file_io.load_data(
+ data = file_io.load_data(
filepath, self._asmnt.unit_conversion_factors, log=self._asmnt.log
)
+ assert isinstance(data, pd.DataFrame)
+ self.ds_model.sample = data
# set the names of the columns
- self.sample.columns.names = ['cmp', 'loc', 'dir', 'uid', 'ds']
+ self.ds_model.sample.columns.names = ['cmp', 'loc', 'dir', 'uid', 'ds']
- self.log_msg('Damage sample successfully loaded.', prepend_timestamp=False)
+ self.log.msg('Damage sample successfully loaded.', prepend_timestamp=False)
- def load_damage_model(self, data_paths):
+ def _ensure_damage_parameter_availability(
+ self, cmp_set: set[str], *, warn_missing: bool
+ ) -> list[str]:
"""
- Load limit state damage model parameters and damage state assignments
+ Make sure that all components have damage parameters.
Parameters
----------
- data_paths: list of string
- List of paths to data files with damage model information. Default
- XY datasets can be accessed as PelicunDefault/XY.
+ cmp_set: list
+ List of component IDs in the asset model.
+ warn_missing: bool
+ Whether to issue a warning if missing components are found.
+
+ Returns
+ -------
+ list
+ List of component IDs with missing damage parameters.
+
"""
+ available_components = self._get_component_id_set()
+
+ missing_components = [
+ component
+ for component in cmp_set
+ if component not in available_components
+ ]
+
+ if missing_components and warn_missing:
+ self.log.warning(
+ f'The damage model does not provide '
+ f'damage information for the following component(s) '
+ f'in the asset model: {missing_components}.'
+ )
- self.log_div()
- self.log_msg('Loading damage model...')
+ return missing_components
- # replace default flag with default data path
- data_paths = file_io.substitute_default_path(data_paths)
+ def _get_component_id_set(self) -> set[str]:
+ """
+ Get a set of components with available damage parameters.
- data_list = []
- # load the data files one by one
- for data_path in data_paths:
- data = file_io.load_data(
- data_path, None, orientation=1, reindex=False, log=self._asmnt.log
- )
+ Returns
+ -------
+ set
+ Set of components with available damage parameters.
- data_list.append(data)
+ """
+ cmp_list = []
+ if self.ds_model.damage_params is not None:
+ cmp_list.extend(self.ds_model.damage_params.index.to_list())
+ return set(cmp_list)
- damage_params = pd.concat(data_list, axis=0)
+
+class DamageModel_Base(PelicunModel):
+ """Base class for damage models."""
+
+ __slots__ = ['damage_params', 'sample']
+
+ def __init__(self, assessment: AssessmentBase) -> None:
+ """
+ Initialize the object.
+
+ Parameters
+ ----------
+ assessment: AssessmentBase
+ Parent assessment object.
+
+ """
+ super().__init__(assessment)
+
+ self.damage_params: pd.DataFrame | None = None
+ self.sample: pd.DataFrame | None = None
+
+ def load_model_parameters(self, data: pd.DataFrame) -> None:
+ """
+ Load model parameters from a DataFrame.
+
+ Loads model parameters from a DataFrame, extending those
+ already available. Parameters already defined take precedence,
+ i.e. redefinitions of parameters are ignored.
+
+ Parameters
+ ----------
+ data: DataFrame
+ Data with damage model information.
+
+ """
+ if self.damage_params is not None:
+ data = pd.concat((self.damage_params, data), axis=0)
# drop redefinitions of components
- damage_params = damage_params.groupby(damage_params.index).first()
+ data = data.groupby(data.index).first()
- # get the component types defined in the asset model
- cmp_labels = self._asmnt.asset.cmp_sample.columns
+ # TODO(AZ): load defaults for Demand-Offset and Demand-Directional
- # only keep the damage model parameters for the components in the model
- cmp_unique = cmp_labels.unique(level=0)
- cmp_mask = damage_params.index.isin(cmp_unique, level=0)
+ self.damage_params = data
- damage_params = damage_params.loc[cmp_mask, :]
+ def convert_damage_parameter_units(self) -> None:
+ """Convert previously loaded damage parameters to base units."""
+ if self.damage_params is None:
+ return
- if np.sum(cmp_mask) != len(cmp_unique):
- cmp_list = cmp_unique[
- np.isin(cmp_unique, damage_params.index.values, invert=True)
- ].to_list()
+ units = self.damage_params['Demand', 'Unit']
+ self.damage_params = self.damage_params.drop(('Demand', 'Unit'), axis=1)
+ for ls_i in self.damage_params.columns.unique(level=0):
+ if ls_i.startswith('LS'):
+ params = self.damage_params.loc[:, ls_i].copy()
+ assert isinstance(params, pd.DataFrame)
+ self.damage_params.loc[:, ls_i] = self._convert_marginal_params(
+ params, units
+ ).to_numpy()
+
+ def remove_incomplete_components(self) -> None:
+ """
+ Remove components with incompelte damage parameter info.
- self.log_msg(
- "\nWARNING: The damage model does not provide "
- "vulnerability information for the following component(s) "
- f"in the asset model: {cmp_list}.\n",
- prepend_timestamp=False,
- )
+ Removes components that have incomplete damage model
+ definitions from the damage model parameters.
- # TODO: load defaults for Demand-Offset and Demand-Directional
+ """
+ if self.damage_params is None:
+ return
- # Now convert model parameters to base units
- for LS_i in damage_params.columns.unique(level=0):
- if LS_i.startswith('LS'):
- damage_params.loc[:, LS_i] = self.convert_marginal_params(
- damage_params.loc[:, LS_i].copy(),
- damage_params[('Demand', 'Unit')],
- ).values
+ if ('Incomplete', '') not in self.damage_params.columns:
+ return
- # check for components with incomplete damage model information
- cmp_incomplete_list = damage_params.loc[
- damage_params[('Incomplete', '')] == 1
+ cmp_incomplete_idx = self.damage_params.loc[
+ self.damage_params['Incomplete', ''] == 1
].index
- damage_params.drop(cmp_incomplete_list, inplace=True)
+ self.damage_params = self.damage_params.drop(cmp_incomplete_idx)
+
+ def drop_unused_damage_parameters(self, cmp_set: set[str]) -> None:
+ """
+ Remove info for non existent components.
+
+ Removes damage parameter definitions for component IDs not
+ present in the given list.
- if len(cmp_incomplete_list) > 0:
- self.log_msg(
- f"\nWARNING: Damage model information is incomplete for "
- f"the following component(s) {cmp_incomplete_list}. They "
- f"were removed from the analysis.\n",
- prepend_timestamp=False,
+ Parameters
+ ----------
+ cmp_set: set
+ Set of component IDs to be preserved in the damage
+ parameters.
+
+ """
+ if self.damage_params is None:
+ return
+ cmp_mask = self.damage_params.index.isin(cmp_set, level=0)
+ self.damage_params = self.damage_params.iloc[cmp_mask, :]
+
+ def _get_pg_batches(
+ self,
+ component_blocks: pd.DataFrame,
+ block_batch_size: int,
+ missing_components: list[str],
+ ) -> pd.DataFrame:
+ """
+ Group performance groups into batches for efficiency.
+
+ The method takes as input the block_batch_size, which
+ specifies the maximum number of blocks per batch. The method
+ first checks if performance groups have been defined in the
+ cmp_marginal_params DataFrame, and if so, it uses the 'Blocks'
+ column as the performance group information. If performance
+ groups have not been defined in cmp_marginal_params, the
+ method uses the cmp_sample DataFrame to define the performance
+ groups, with each performance group having a single block.
+
+ The method then checks if the performance groups are available
+ in the damage parameters DataFrame, and removes any
+ performance groups that are not found in the damage
+ parameters. The method then groups the performance groups
+ based on the locations and directions of the components, and
+ calculates the cumulative sum of the blocks for each
+ group. The method then divides the performance groups into
+ batches of size specified by block_batch_size and assigns a
+ batch number to each group. Finally, the method groups the
+ performance groups by batch number, component, location, and
+ direction, and returns a DataFrame that shows the number of
+ blocks for each batch.
+
+ Parameters
+ ----------
+ component_blocks: pd.DataFrame
+ DataFrame containing a single column, `Blocks`, which lists
+ the number of blocks for each (`cmp`-`loc`-`dir`-`uid`).
+ block_batch_size: int
+ Maximum number of components in each batch.
+ missing_components: list[str]
+ Set of component IDs for which damage parameters are
+ unavailable. These components are ignored.
+
+ Returns
+ -------
+ DataFrame
+ A DataFrame indexed by batch number, component identifier,
+ location, direction, and unique ID, with a column
+ indicating the number of blocks assigned to each
+ batch. This DataFrame facilitates the management and
+ execution of damage assessment tasks by grouping
+ components into manageable batches based on the specified
+ block batch size.
+
+ """
+ # A warning has already been issued for components with
+ # missing damage parameters (in
+ # `DamageModel._ensure_damage_parameter_availability`).
+ component_blocks = component_blocks.drop(pd.Index(missing_components))
+
+ # It is safe to simply disregard components that are not
+ # present in the `damage_params` of *this* model, and let them
+ # be handled by another damage model.
+ assert self.damage_params is not None
+ available_components = self.damage_params.index.unique().to_list()
+ component_blocks = component_blocks.loc[
+ pd.IndexSlice[available_components, :, :, :], :
+ ]
+
+ # Sum up the number of blocks for each performance group
+ component_blocks = component_blocks.groupby(
+ ['loc', 'dir', 'cmp', 'uid']
+ ).sum()
+ component_blocks = component_blocks.sort_index(axis=0)
+
+ # Calculate cumulative sum of blocks
+ component_blocks['CBlocks'] = np.cumsum(
+ component_blocks['Blocks'].to_numpy().astype(int)
+ )
+ component_blocks['Batch'] = 0
+
+ # Group the performance groups into batches
+ for batch_i in range(1, component_blocks.shape[0] + 1):
+ # Find the mask for blocks that are less than the batch
+ # size and greater than 0
+ batch_mask = np.all(
+ np.array(
+ [
+ component_blocks['CBlocks'] <= block_batch_size,
+ component_blocks['CBlocks'] > 0,
+ ]
+ ),
+ axis=0,
)
- self.damage_params = damage_params
+ if np.sum(batch_mask) < 1:
+ batch_mask = np.full(batch_mask.shape, fill_value=False)
+ batch_mask[np.where(component_blocks['CBlocks'] > 0)[0][0]] = True
+
+ component_blocks.loc[batch_mask, 'Batch'] = batch_i
- self.log_msg(
- "Damage model parameters successfully parsed.", prepend_timestamp=False
+ # Decrement the cumulative block count by the max count in
+ # the current batch
+ component_blocks['CBlocks'] -= component_blocks.loc[
+ component_blocks['Batch'] == batch_i, 'CBlocks'
+ ].max()
+
+ # If the maximum cumulative block count is 0, exit the
+ # loop
+ if component_blocks['CBlocks'].max() == 0:
+ break
+
+ # Group the performance groups by batch, component, location,
+ # and direction, and keep only the number of blocks for each
+ # group
+ component_blocks = (
+ component_blocks.groupby(['Batch', 'cmp', 'loc', 'dir', 'uid'])
+ .sum()
+ .loc[:, 'Blocks']
+ .to_frame()
+ )
+ return component_blocks.sort_index(
+ level=['Batch', 'cmp', 'loc', 'dir', 'uid']
)
- def _handle_operation(self, initial_value, operation, other_value):
+
+class DamageModel_DS(DamageModel_Base):
+ """Damage model for components that have discrete Damage States (DS)."""
+
+ __slots__ = ['ds_sample']
+
+ def __init__(self, assessment: AssessmentBase) -> None:
+ """
+ Initialize the object.
+
+ Parameters
+ ----------
+ assessment: AssessmentBase
+ Parent assessment object.
+
+ """
+ super().__init__(assessment)
+ self.ds_sample: pd.DataFrame | None = None
+
+ def probabilities(self) -> pd.DataFrame:
+ """
+ Return the probability of each observed damage state.
+
+ Returns
+ -------
+ pd.DataFrame
+ DataFrame with the probability of each damage state for
+ each component block.
+
"""
+ sample = self.ds_sample
+ assert sample is not None
+
+ probabilities = {}
+
+ for col in sample.columns:
+ values = sample[col]
+ # skip NA cases that are the result of damage processes
+ values = values[values != -1]
+ if len(values) == 0:
+ # can't determine without a sample
+ probabilities[col] = np.nan
+ else:
+ vcounts = values.value_counts() / len(values)
+ probabilities[col] = vcounts # type: ignore
+
+ return (
+ pd.DataFrame(probabilities)
+ .T.rename_axis(
+ index=['cmp', 'loc', 'dir', 'uid', 'block'], columns='Damage State'
+ )
+ .sort_index(axis=1)
+ .sort_index(axis=0)
+ )
+
+ def obtain_ds_sample(
+ self,
+ demand_sample: pd.DataFrame,
+ component_blocks: pd.DataFrame,
+ block_batch_size: int,
+ scaling_specification: dict | None,
+ missing_components: list[str],
+ nondirectional_multipliers: dict[str, float],
+ ) -> None:
+ """Obtain the damage state of each performance group."""
+ # Break up damage calculation and perform it by performance group.
+ # Compared to the simultaneous calculation of all PGs, this approach
+ # reduces demands on memory and increases the load on CPU. This leads
+ # to a more balanced workload on most machines for typical problems.
+ # It also allows for a straightforward extension with parallel
+ # computing.
+
+ sample_size = len(demand_sample)
+
+ component_blocks = self._get_pg_batches(
+ component_blocks, block_batch_size, missing_components
+ )
+ batches = component_blocks.index.get_level_values(0).unique()
+
+ self.log.msg(
+ f'Number of Component Blocks: {component_blocks["Blocks"].sum()}',
+ prepend_timestamp=False,
+ )
+
+ self.log.msg(
+ f'{len(batches)} batches of Performance Groups prepared '
+ 'for damage assessment',
+ prepend_timestamp=False,
+ )
+
+ # for PG_i in self._asmnt.asset.cmp_sample.columns:
+ ds_samples = []
+ for pgb_i in batches:
+ performance_group = component_blocks.loc[pgb_i]
+
+ self.log.msg(
+ f"Calculating damage states for PG batch {pgb_i} with "
+ f"{int(performance_group['Blocks'].sum())} blocks"
+ )
+
+ # Generate an array with component capacities for each block and
+ # generate a second array that assigns a specific damage state to
+ # each component limit state. The latter is primarily needed to
+ # handle limit states with multiple, mutually exclusive DS options
+ capacity_sample, lsds_sample = self._generate_dmg_sample(
+ sample_size, performance_group, scaling_specification
+ )
+
+ # Get the required demand types for the analysis
+ if self._asmnt.log.verbose:
+ self.log.msg(
+ 'Collecting required demand information...',
+ prepend_timestamp=True,
+ )
+ demand_offset = self._asmnt.options.demand_offset
+ assert self.damage_params is not None
+ required_edps = _get_required_demand_type(
+ self.damage_params, performance_group, demand_offset
+ )
+
+ available_edps = (
+ pd.DataFrame(index=demand_sample.columns)
+ .reset_index()
+ .groupby(['type', 'loc'])['dir']
+ .agg(lambda x: list(set(x)))
+ .to_dict()
+ )
+
+ # Raise an error if demand sample is missing necessary entries.
+ _verify_edps_available(available_edps, set(required_edps.keys()))
+
+ # Create the demand vector
+ if self._asmnt.log.verbose:
+ self.log.msg(
+ 'Assembling demand data for calculation...',
+ prepend_timestamp=True,
+ )
+ demand_dict = _assemble_required_demand_data(
+ set(required_edps.keys()), nondirectional_multipliers, demand_sample
+ )
+
+ # Evaluate the Damage State of each Component Block
+ ds_sample = self._evaluate_damage_state(
+ demand_dict, required_edps, capacity_sample, lsds_sample
+ )
+
+ ds_samples.append(ds_sample)
+
+ self.ds_sample = pd.concat(ds_samples, axis=1)
+
+ self.log.msg('Damage state calculation successful.', prepend_timestamp=False)
+
+ def _handle_operation( # noqa: PLR6301
+ self, initial_value: float, operation: str, other_value: float
+ ) -> float:
+ """
+ Handle a capacity adjustment operation.
+
This method is used in `_create_dmg_RVs` to apply capacity
adjustment operations whenever required. It is defined as a
safer alternative to directly using `eval`.
@@ -263,41 +878,273 @@ def _handle_operation(self, initial_value, operation, other_value):
Parameters
----------
initial_value: float
- Value before operation
+ Value before operation
operation: str
- Any of +, -, *, /
+ Any of `+`, `-`, `*`, `/`
other_value: float
- Value used to apply the operation
+ Value used to apply the operation
Returns
-------
float
The result of the operation
- Raises
- ------
- ValueError
- If the operation is invalid.
+ Raises
+ ------
+ ValueError
+ If the operation is invalid.
+
+ """
+ if operation == '+':
+ return initial_value + other_value
+ if operation == '-':
+ return initial_value - other_value
+ if operation == '*':
+ return initial_value * other_value
+ if operation == '/':
+ return initial_value / other_value
+ msg = f'Invalid operation: `{operation}`'
+ raise ValueError(msg)
+
+ def _generate_dmg_sample(
+ self,
+ sample_size: int,
+ pgb: pd.DataFrame,
+ scaling_specification: dict | None = None,
+ ) -> tuple[pd.DataFrame, pd.DataFrame]:
+ """
+ Generate the damage sample.
+
+ Generates a damage sample by creating random variables (RVs)
+ for capacities and limit-state-damage-states (lsds), and then
+ sampling from these RVs. The sample size and performance group
+ batches (PGB) are specified as inputs. The method returns the
+ capacity sample and the lsds sample.
+
+ Parameters
+ ----------
+ sample_size: int
+ The number of realizations to generate.
+ pgb: DataFrame
+ A DataFrame that groups performance groups into batches
+ for efficient damage assessment.
+ scaling_specification: dict, optional
+ A dictionary defining the shift in median.
+ Example: {'CMP-1-1': '*1.2', 'CMP-1-2': '/1.4'}
+ The keys are individual components that should be present
+ in the `capacity_sample`. The values should be strings
+ containing an operation followed by the value formatted as
+ a float. The operation can be '+' for addition, '-' for
+ subtraction, '*' for multiplication, and '/' for division.
+
+ Returns
+ -------
+ capacity_sample: DataFrame
+ A DataFrame that represents the capacity sample.
+ lsds_sample: DataFrame
+ A DataFrame that represents the .
+
+ Raises
+ ------
+ ValueError
+ If the damage parameters have not been specified.
+
+ """
+ # Check if damage model parameters have been specified
+ if self.damage_params is None:
+ msg = (
+ 'Damage model parameters have not been specified. '
+ 'Load parameters from the default damage model '
+ 'databases or provide your own damage model '
+ 'definitions before generating a sample.'
+ )
+ raise ValueError(msg)
+
+ # Create capacity and LSD RVs for each performance group
+ capacity_rvs, lsds_rvs = self._create_dmg_RVs(pgb, scaling_specification)
+
+ if self._asmnt.log.verbose:
+ self.log.msg('Sampling capacities...', prepend_timestamp=True)
+
+ # Generate samples for capacity RVs
+ assert self._asmnt.options.sampling_method is not None
+ capacity_rvs.generate_sample(
+ sample_size=sample_size, method=self._asmnt.options.sampling_method
+ )
+
+ # Generate samples for LSD RVs
+ lsds_rvs.generate_sample(
+ sample_size=sample_size, method=self._asmnt.options.sampling_method
+ )
+
+ if self._asmnt.log.verbose:
+ self.log.msg('Raw samples are available', prepend_timestamp=True)
+
+ # get the capacity and lsds samples
+ capacity_sample = (
+ pd.DataFrame(capacity_rvs.RV_sample)
+ .sort_index(axis=0)
+ .sort_index(axis=1)
+ )
+ capacity_sample_mi = base.convert_to_MultiIndex(capacity_sample, axis=1)[
+ 'FRG'
+ ]
+ assert isinstance(capacity_sample_mi, pd.DataFrame)
+ capacity_sample = capacity_sample_mi
+ capacity_sample.columns.names = ['cmp', 'loc', 'dir', 'uid', 'block', 'ls']
+
+ lsds_sample = (
+ pd.DataFrame(lsds_rvs.RV_sample)
+ .sort_index(axis=0)
+ .sort_index(axis=1)
+ .astype(int)
+ )
+ lsds_sample_mi = base.convert_to_MultiIndex(lsds_sample, axis=1)['LSDS']
+ assert isinstance(lsds_sample_mi, pd.DataFrame)
+ lsds_sample = lsds_sample_mi
+ lsds_sample.columns.names = ['cmp', 'loc', 'dir', 'uid', 'block', 'ls']
+
+ if self._asmnt.log.verbose:
+ self.log.msg(
+ f'Successfully generated {sample_size} realizations.',
+ prepend_timestamp=True,
+ )
+
+ return capacity_sample, lsds_sample
+
+ def _evaluate_damage_state(
+ self,
+ demand_dict: dict[str, np.ndarray],
+ required_edps: dict[str, list[tuple]],
+ capacity_sample: pd.DataFrame,
+ lsds_sample: pd.DataFrame,
+ ) -> pd.DataFrame:
+ """
+ Use the demand and LS capacity sample to evaluate damage states.
+
+ Parameters
+ ----------
+ demand_dict: dict
+ Dictionary containing the demand of each demand type.
+ required_edps: dict
+ Dictionary containing the EDPs assigned to each demand
+ type.
+ capacity_sample: DataFrame
+ Provides a sample of the capacity.
+ lsds_sample: DataFrame
+ Provides the mapping between limit states and damage
+ states.
+
+ Returns
+ -------
+ DataFrame
+ Assigns a Damage State to each component block in the
+ asset model.
+
+ """
+ if self._asmnt.log.verbose:
+ self.log.msg('Evaluating damage states...', prepend_timestamp=True)
+
+ # Create an empty DataFrame with columns and index taken from
+ # the input capacity sample
+ dmg_eval = pd.DataFrame(
+ columns=capacity_sample.columns, index=capacity_sample.index
+ )
+
+ # Initialize an empty list to store demand data
+ demand_df = []
+
+ # For each demand type in the demand dictionary
+ for demand_name, demand_vals in demand_dict.items():
+ # Get the list of PGs assigned to this demand type
+ pg_list = required_edps[demand_name]
+
+ # Create a list of columns for the demand data
+ # corresponding to each PG in the PG_list
+ pg_cols = pd.concat(
+ [dmg_eval.loc[:1, PG_i] for PG_i in pg_list], # type: ignore
+ axis=1,
+ keys=pg_list,
+ ).columns
+ pg_cols.names = ['cmp', 'loc', 'dir', 'uid', 'block', 'ls']
+ # Create a DataFrame with demand values repeated for the
+ # number of PGs and assign the columns as PG_cols
+ demand_df.append(
+ pd.concat(
+ [pd.Series(demand_vals)] * len(pg_cols), axis=1, keys=pg_cols
+ )
+ )
+
+ # Concatenate all demand DataFrames into a single DataFrame
+ demand_df_concat = pd.concat(demand_df, axis=1)
+ # Sort the columns of the demand DataFrame
+ demand_df_concat = demand_df_concat.sort_index(axis=1)
+
+ # Evaluate the damage exceedance by subtracting demand from
+ # capacity and checking if the result is less than zero
+ dmg_eval = (capacity_sample - demand_df_concat) < 0
+
+ # Remove any columns with NaN values from the damage
+ # exceedance DataFrame
+ dmg_eval = dmg_eval.dropna(axis=1)
+
+ # initialize the DataFrames that store the damage states and
+ # quantities
+ ds_sample = pd.DataFrame(
+ 0, # fill value
+ columns=capacity_sample.columns.droplevel('ls').unique(),
+ index=capacity_sample.index,
+ dtype='int64',
+ )
+
+ # get a list of limit state ids among all components in the damage model
+ ls_list = dmg_eval.columns.get_level_values(5).unique()
+
+ # for each consecutive limit state...
+ for ls_id in ls_list:
+ # get all cmp - loc - dir - block where this limit state occurs
+ dmg_e_ls = dmg_eval.loc[
+ :, # type: ignore
+ idx[:, :, :, :, :, ls_id],
+ ].dropna(axis=1)
+
+ # Get the damage states corresponding to this limit state in each
+ # block
+ # Note that limit states with a set of mutually exclusive damage
+ # states options have their damage state picked here.
+ lsds = lsds_sample.loc[:, dmg_e_ls.columns] # type: ignore
+
+ # Drop the limit state level from the columns to make the damage
+ # exceedance DataFrame compatible with the other DataFrames in the
+ # following steps
+ dmg_e_ls.columns = dmg_e_ls.columns.droplevel(5)
+
+ # Same thing for the lsds DataFrame
+ lsds.columns = dmg_e_ls.columns
+
+ # Update the damage state in the result with the values from the
+ # lsds DF if the limit state was exceeded according to the
+ # dmg_e_ls DF.
+ # This one-liner updates the given Limit State exceedance in the
+ # entire damage model. If subsequent Limit States are also exceeded,
+ # those cells in the result matrix will get overwritten by higher
+ # damage states.
+ ds_sample.loc[:, dmg_e_ls.columns] = ds_sample.loc[
+ :, dmg_e_ls.columns # type: ignore
+ ].mask(dmg_e_ls, lsds)
- """
- if operation == '+':
- return initial_value + other_value
- if operation == '-':
- return initial_value - other_value
- if operation == '*':
- return initial_value * other_value
- if operation == '/':
- return initial_value / other_value
- raise ValueError(f'Invalid operation: {operation}')
+ return ds_sample
- def _create_dmg_RVs(self, PGB, scaling_specification=None):
+ def _create_dmg_RVs( # noqa: N802, C901
+ self, pgb: pd.DataFrame, scaling_specification: dict | None = None
+ ) -> tuple[uq.RandomVariableRegistry, uq.RandomVariableRegistry]:
"""
- Creates random variables required later for the damage calculation.
+ Create random variables for the damage calculation.
The method initializes two random variable registries,
capacity_RV_reg and lsds_RV_reg, and loops through each
performance group in the input performance group batch (PGB)
- dataframe. For each performance group, it retrieves the
+ DataFrame. For each performance group, it retrieves the
component sample and blocks and checks if the limit state is
defined for the component. If the limit state is defined, the
method gets the list of limit states and the parameters for
@@ -309,7 +1156,7 @@ def _create_dmg_RVs(self, PGB, scaling_specification=None):
Parameters
----------
- PGB : DataFrame
+ pgb: DataFrame
A DataFrame that groups performance groups into batches
for efficient damage assessment.
scaling_specification: dict, optional
@@ -333,12 +1180,21 @@ def _create_dmg_RVs(self, PGB, scaling_specification=None):
ValueError
Raises an error if the scaling specification is invalid or
if the input DataFrame does not meet the expected format.
- Also, raises errors if there are any issues with the types
- or formats of the data in the input DataFrame.
+ TypeError
+ If there are any issues with the types of the data in the
+ input DataFrame.
+
"""
- def assign_lsds(ds_weights, ds_id, lsds_RV_reg, lsds_rv_tag):
+ def assign_lsds(
+ ds_weights: str | None,
+ ds_id: int,
+ lsds_rv_reg: RandomVariableRegistry,
+ lsds_rv_tag: str,
+ ) -> int:
"""
+ Assign limit states to damage states.
+
Assigns limit states to damage states using random
variables, updating the provided random variable registry.
This function either creates a deterministic random
@@ -347,19 +1203,19 @@ def assign_lsds(ds_weights, ds_id, lsds_RV_reg, lsds_rv_tag):
Parameters
----------
- ds_weights : str or None
+ ds_weights: str or None
A string representing the weights of different damage
states associated with a limit state, separated by
'|'. If None, indicates that there is only one damage
state associated with the limit state.
- ds_id : int
+ ds_id: int
The starting index for damage state IDs. This ID helps
in mapping damage states to limit states.
- lsds_RV_reg : RandomVariableRegistry
+ lsds_rv_reg: RandomVariableRegistry
The registry where the newly created random variables
(for mapping limit states to damage states) will be
added.
- lsds_rv_tag : str
+ lsds_rv_tag: str
A unique identifier for the random variable being
created, typically including identifiers for
component, location, direction, and limit state.
@@ -378,39 +1234,42 @@ def assign_lsds(ds_weights, ds_id, lsds_RV_reg, lsds_rv_tag):
probabilistic damage assessments. It dynamically adjusts
to the number of damage states specified and applies a
mapping function to correctly assign state IDs.
+
"""
# If the limit state has a single damage state assigned
# to it, we don't need random sampling
- if pd.isnull(ds_weights):
+ if pd.isna(ds_weights):
ds_id += 1
-
- lsds_RV_reg.add_RV(
+ lsds_rv_reg.add_RV(
uq.DeterministicRandomVariable(
name=lsds_rv_tag,
- theta=ds_id,
+ theta=np.array((ds_id,)),
)
)
# Otherwise, we create a multinomial random variable
else:
+ assert isinstance(ds_weights, str)
# parse the DS weights
- ds_weights = np.array(
- ds_weights.replace(" ", "").split('|'), dtype=float
+ ds_weights_np = np.array(
+ ds_weights.replace(' ', '').split('|'), dtype=float
)
- def map_ds(values, offset=int(ds_id + 1)):
+ def map_ds(values: np.ndarray, offset: int) -> np.ndarray:
"""
+ Map DS indices to damage states.
+
Maps an array of damage state indices to their
corresponding actual state IDs by applying an
offset.
Parameters
----------
- values : array-like
+ values: array-like
An array of indices representing damage
states. These indices are typically sequential
integers starting from zero.
- offset : int
+ offset: int
The value to be added to each element in
`values` to obtain the actual damage state
IDs.
@@ -421,27 +1280,28 @@ def map_ds(values, offset=int(ds_id + 1)):
An array where each original index from
`values` has been incremented by `offset` to
reflect its actual damage state ID.
+
"""
return values + offset
- lsds_RV_reg.add_RV(
+ lsds_rv_reg.add_RV(
uq.MultinomialRandomVariable(
name=lsds_rv_tag,
- theta=ds_weights,
- f_map=map_ds,
+ theta=ds_weights_np,
+ f_map=partial(map_ds, offset=ds_id + 1),
)
)
- ds_id += len(ds_weights)
+ ds_id += len(ds_weights_np)
return ds_id
if self._asmnt.log.verbose:
- self.log_msg('Generating capacity variables ...', prepend_timestamp=True)
+ self.log.msg('Generating capacity variables ...', prepend_timestamp=True)
# initialize the registry
- capacity_RV_reg = uq.RandomVariableRegistry(self._asmnt.options.rng)
- lsds_RV_reg = uq.RandomVariableRegistry(self._asmnt.options.rng)
+ capacity_rv_reg = uq.RandomVariableRegistry(self._asmnt.options.rng)
+ lsds_rv_reg = uq.RandomVariableRegistry(self._asmnt.options.rng)
# capacity adjustment:
# ensure the scaling_specification is a dictionary
@@ -455,20 +1315,23 @@ def map_ds(values, offset=int(ds_id + 1)):
for key, value in scaling_specification.items():
css = 'capacity adjustment specification'
if not isinstance(value, str):
- raise ValueError(
- f'Invalud entry in {css}: {value}. It has to be a string. '
+ msg = (
+ f'Invalid entry in {css}: {value}. It has to be a string. '
f'See docstring of DamageModel._create_dmg_RVs.'
)
+ raise TypeError(msg)
capacity_adjustment_operation = value[0]
number = value[1::]
- if capacity_adjustment_operation not in ('+', '-', '*', '/'):
- raise ValueError(
+ if capacity_adjustment_operation not in {'+', '-', '*', '/'}:
+ msg = (
f'Invalid operation in {css}: '
f'{capacity_adjustment_operation}'
)
+ raise ValueError(msg)
fnumber = base.float_or_None(number)
if fnumber is None:
- raise ValueError(f'Invalid number in {css}: {number}')
+ msg = f'Invalid number in {css}: {number}'
+ raise ValueError(msg)
parsed_scaling_specification[key] = (
capacity_adjustment_operation,
fnumber,
@@ -476,13 +1339,15 @@ def map_ds(values, offset=int(ds_id + 1)):
scaling_specification = parsed_scaling_specification
# get the component sample and blocks from the asset model
- for PG in PGB.index:
+ for pg in pgb.index:
# determine demand capacity adjustment operation, if required
- cmp_loc_dir = '-'.join(PG[0:3])
- capacity_adjustment_operation = scaling_specification.get(cmp_loc_dir, None)
+ cmp_loc_dir = '-'.join(pg[0:3])
+ capacity_adjustment_operation = scaling_specification.get( # type: ignore
+ cmp_loc_dir,
+ )
- cmp_id = PG[0]
- blocks = PGB.loc[PG, 'Blocks']
+ cmp_id = pg[0]
+ blocks = pgb.loc[pg, 'Blocks']
# Calculate the block weights
blocks = np.full(int(blocks), 1.0 / blocks)
@@ -497,541 +1362,145 @@ def map_ds(values, offset=int(ds_id + 1)):
for val in frg_params.index.get_level_values(0).unique():
if 'LS' in val:
- limit_states.append(val[2:])
+ limit_states.append(val[2:]) # noqa: PERF401
ds_id = 0
- frg_rv_set_tags = [[] for b in blocks]
- anchor_RVs = []
+ frg_rv_set_tags: list = [[] for b in blocks]
+ anchor_rvs: list = []
for ls_id in limit_states:
- frg_params_LS = frg_params[f'LS{ls_id}']
+ frg_params_ls = frg_params[f'LS{ls_id}']
+
+ theta_0 = frg_params_ls.get('Theta_0', np.nan)
+ family = frg_params_ls.get('Family', 'deterministic')
- theta_0 = frg_params_LS.get('Theta_0', np.nan)
- family = frg_params_LS.get('Family', 'deterministic')
- ds_weights = frg_params_LS.get('DamageStateWeights', np.nan)
+ # if `family` is defined but is `None`, we
+ # consider it to be `deterministic`
+ if not family:
+ family = 'deterministic'
+ ds_weights = frg_params_ls.get('DamageStateWeights', None)
# check if the limit state is defined for the component
if pd.isna(theta_0):
continue
theta = [
- frg_params_LS.get(f"Theta_{t_i}", np.nan) for t_i in range(3)
+ frg_params_ls.get(f'Theta_{t_i}', np.nan) for t_i in range(3)
]
if capacity_adjustment_operation:
- if family in {'normal', 'lognormal'}:
+ if family in {'normal', 'lognormal', 'deterministic'}:
theta[0] = self._handle_operation(
theta[0],
capacity_adjustment_operation[0],
- capacity_adjustment_operation[1],
+ float(capacity_adjustment_operation[1]),
)
else:
- self.log_msg(
- f'\nWARNING: Capacity adjustment is only supported '
+ self.log.warning(
+ f'Capacity adjustment is only supported '
f'for `normal` or `lognormal` distributions. '
- f'Ignoring: {cmp_loc_dir}, which is {family}',
- prepend_timestamp=False,
+ f'Ignoring: `{cmp_loc_dir}`, which is `{family}`'
)
tr_lims = [
- frg_params_LS.get(f"Truncate{side}", np.nan)
- for side in ("Lower", "Upper")
+ frg_params_ls.get(f'Truncate{side}', np.nan)
+ for side in ('Lower', 'Upper')
]
for block_i, _ in enumerate(blocks):
- frg_rv_tag = (
- 'FRG-'
- f'{PG[0]}-' # cmp_id
- f'{PG[1]}-' # loc
- f'{PG[2]}-' # dir
- f'{PG[3]}-' # uid
- f'{block_i+1}-' # block
- f'{ls_id}'
- )
-
- # Assign correlation between limit state random
- # variables
- # Note that we assume perfectly correlated limit
- # state random variables here. This approach is in
- # line with how mainstream PBE calculations are
- # performed. Assigning more sophisticated
- # correlations between limit state RVs is possible,
- # if needed. Please let us know through the
- # SimCenter Message Board if you are interested in
- # such a feature.
- # Anchor all other limit state random variables to
- # the first one to consider the perfect correlation
- # between capacities in each LS
- if ls_id == limit_states[0]:
- anchor = None
- else:
- anchor = anchor_RVs[block_i]
-
- # parse theta values for multilinear_CDF
- if family == 'multilinear_CDF':
- theta = np.column_stack(
- (
- np.array(
- theta[0].split('|')[0].split(','),
- dtype=float,
- ),
- np.array(
- theta[0].split('|')[1].split(','),
- dtype=float,
- ),
- )
- )
-
- RV = uq.rv_class_map(family)(
- name=frg_rv_tag,
- theta=theta,
- truncation_limits=tr_lims,
- anchor=anchor,
- )
-
- capacity_RV_reg.add_RV(RV)
-
- # add the RV to the set of correlated variables
- frg_rv_set_tags[block_i].append(frg_rv_tag)
-
- if ls_id == limit_states[0]:
- anchor_RVs.append(RV)
-
- # Now add the LS->DS assignments
- lsds_rv_tag = (
- 'LSDS-'
- f'{PG[0]}-' # cmp_id
- f'{PG[1]}-' # loc
- f'{PG[2]}-' # dir
- f'{PG[3]}-' # uid
- f'{block_i+1}-' # block
- f'{ls_id}'
- )
-
- ds_id_next = assign_lsds(
- ds_weights, ds_id, lsds_RV_reg, lsds_rv_tag
- )
-
- ds_id = ds_id_next
-
- if self._asmnt.log.verbose:
- rv_count = len(lsds_RV_reg.RV)
- self.log_msg(
- f"2x{rv_count} random variables created.", prepend_timestamp=False
- )
-
- return capacity_RV_reg, lsds_RV_reg
-
- def _generate_dmg_sample(self, sample_size, PGB, scaling_specification=None):
- """
- This method generates a damage sample by creating random
- variables (RVs) for capacities and limit-state-damage-states
- (lsds), and then sampling from these RVs. The sample size and
- performance group batches (PGB) are specified as inputs. The
- method returns the capacity sample and the lsds sample.
-
- Parameters
- ----------
- sample_size : int
- The number of realizations to generate.
- PGB : DataFrame
- A DataFrame that groups performance groups into batches
- for efficient damage assessment.
- scaling_specification: dict, optional
- A dictionary defining the shift in median.
- Example: {'CMP-1-1': '*1.2', 'CMP-1-2': '/1.4'}
- The keys are individual components that should be present
- in the `capacity_sample`. The values should be strings
- containing an operation followed by the value formatted as
- a float. The operation can be '+' for addition, '-' for
- subtraction, '*' for multiplication, and '/' for division.
-
- Returns
- -------
- capacity_sample : DataFrame
- A DataFrame that represents the capacity sample.
- lsds_sample : DataFrame
- A DataFrame that represents the .
-
- Raises
- ------
- ValueError
- If the damage parameters have not been specified.
-
- """
-
- # Check if damage model parameters have been specified
- if self.damage_params is None:
- raise ValueError(
- 'Damage model parameters have not been specified. '
- 'Load parameters from the default damage model '
- 'databases or provide your own damage model '
- 'definitions before generating a sample.'
- )
-
- # Create capacity and LSD RVs for each performance group
- capacity_RVs, lsds_RVs = self._create_dmg_RVs(PGB, scaling_specification)
-
- if self._asmnt.log.verbose:
- self.log_msg('Sampling capacities...', prepend_timestamp=True)
-
- # Generate samples for capacity RVs
- capacity_RVs.generate_sample(
- sample_size=sample_size, method=self._asmnt.options.sampling_method
- )
-
- # Generate samples for LSD RVs
- lsds_RVs.generate_sample(
- sample_size=sample_size, method=self._asmnt.options.sampling_method
- )
-
- if self._asmnt.log.verbose:
- self.log_msg("Raw samples are available", prepend_timestamp=True)
-
- # get the capacity and lsds samples
- capacity_sample = (
- pd.DataFrame(capacity_RVs.RV_sample).sort_index(axis=0).sort_index(axis=1)
- )
- capacity_sample = base.convert_to_MultiIndex(capacity_sample, axis=1)['FRG']
- capacity_sample.columns.names = ['cmp', 'loc', 'dir', 'uid', 'block', 'ls']
-
- lsds_sample = (
- pd.DataFrame(lsds_RVs.RV_sample)
- .sort_index(axis=0)
- .sort_index(axis=1)
- .astype(int)
- )
- lsds_sample = base.convert_to_MultiIndex(lsds_sample, axis=1)['LSDS']
- lsds_sample.columns.names = ['cmp', 'loc', 'dir', 'uid', 'block', 'ls']
-
- if self._asmnt.log.verbose:
- self.log_msg(
- f"Successfully generated {sample_size} realizations.",
- prepend_timestamp=True,
- )
-
- return capacity_sample, lsds_sample
-
- def _get_required_demand_type(self, PGB):
- """
- Returns the id of the demand needed to calculate damage to a
- component. We assume that a damage model sample is available.
-
- This method returns the demand type and its properties
- required to calculate the damage to a component. The
- properties include whether the demand is directional, the
- offset, and the type of the demand. The method takes as input
- a dataframe PGB that contains information about the component
- groups in the asset. For each component group PG in the PGB
- dataframe, the method retrieves the relevant damage parameters
- from the damage_params dataframe and parses the demand type
- into its properties. If the demand type has a subtype, the
- method splits it and adds the subtype to the demand type to
- form the EDP (engineering demand parameter) type. The method
- also considers the default offset for the demand type, if it
- is specified in the options attribute of the assessment, and
- adds the offset to the EDP. If the demand is directional, the
- direction is added to the EDP. The method collects all the
- unique EDPs for each component group and returns them as a
- dictionary where each key is an EDP and its value is a list of
- component groups that require that EDP.
-
- Parameters
- ----------
- `PGB`: pd.DataFrame
- A pandas DataFrame with the block information for
- each component
-
- Returns
- -------
- dict
- A dictionary of EDP requirements, where each key is the EDP
- string (e.g., "Peak Ground Acceleration-0-0"), and the
- corresponding value is a list of tuples (component_id,
- location, direction)
-
- """
-
- # Assign the damage_params attribute to a local variable `DP`
- DP = self.damage_params
-
- # Check if verbose logging is enabled in `self._asmnt.log`
- if self._asmnt.log.verbose:
- # If verbose logging is enabled, log a message indicating
- # that we are collecting demand information
- self.log_msg(
- 'Collecting required demand information...', prepend_timestamp=True
- )
-
- # Initialize an empty dictionary to store the unique EDP
- # requirements
- EDP_req = {}
-
- # Iterate over the index of the `PGB` DataFrame
- for PG in PGB.index:
- # Get the component name from the first element of the
- # `PG` tuple
- cmp = PG[0]
-
- # Get the directional, offset, and demand_type parameters
- # from the `DP` DataFrame
- directional, offset, demand_type = DP.loc[
- cmp,
- [
- ('Demand', 'Directional'),
- ('Demand', 'Offset'),
- ('Demand', 'Type'),
- ],
- ]
-
- # Parse the demand type
-
- # Check if there is a subtype included in the demand_type
- # string
- if '|' in demand_type:
- # If there is a subtype, split the demand_type string
- # on the '|' character
- demand_type, subtype = demand_type.split('|')
- # Convert the demand type to the corresponding EDP
- # type using `base.EDP_to_demand_type`
- demand_type = base.EDP_to_demand_type[demand_type]
- # Concatenate the demand type and subtype to form the
- # EDP type
- EDP_type = f'{demand_type}_{subtype}'
- else:
- # If there is no subtype, convert the demand type to
- # the corresponding EDP type using
- # `base.EDP_to_demand_type`
- demand_type = base.EDP_to_demand_type[demand_type]
- # Assign the EDP type to be equal to the demand type
- EDP_type = demand_type
-
- # Consider the default offset, if needed
- if demand_type in self._asmnt.options.demand_offset.keys():
- # If the demand type has a default offset in
- # `self._asmnt.options.demand_offset`, add the offset
- # to the default offset
- offset = int(offset + self._asmnt.options.demand_offset[demand_type])
- else:
- # If the demand type does not have a default offset in
- # `self._asmnt.options.demand_offset`, convert the
- # offset to an integer
- offset = int(offset)
-
- # Determine the direction
- if directional:
- # If the demand is directional, use the third element
- # of the `PG` tuple as the direction
- direction = PG[2]
- else:
- # If the demand is not directional, use '0' as the
- # direction
- direction = '0'
-
- # Concatenate the EDP type, offset, and direction to form
- # the EDP key
- EDP = f"{EDP_type}-{str(int(PG[1]) + offset)}-{direction}"
-
- # If the EDP key is not already in the `EDP_req`
- # dictionary, add it and initialize it with an empty list
- if EDP not in EDP_req:
- EDP_req.update({EDP: []})
-
- # Add the current PG (performance group) to the list of
- # PGs associated with the current EDP key
- EDP_req[EDP].append(PG)
-
- # Return the unique EDP requirements
- return EDP_req
-
- def _assemble_required_demand_data(self, EDP_req):
- """
- Assembles demand data for damage state determination.
-
- The method takes the maximum of all available directions for
- non-directional demand, scaling it using the non-directional
- multiplier specified in self._asmnt.options, and returning the
- result as a dictionary with keys in the format of
- '--' and values as arrays of
- demand values. If demand data is not found, logs a warning
- message and skips the corresponding damages calculation.
-
- Parameters
- ----------
- EDP_req : dict
- A dictionary of unique EDP requirements
-
- Returns
- -------
- demand_dict : dict
- A dictionary of assembled demand data for calculation
-
- Raises
- ------
- KeyError
- If demand data for a given EDP cannot be found
-
- """
-
- if self._asmnt.log.verbose:
- self.log_msg(
- 'Assembling demand data for calculation...', prepend_timestamp=True
- )
-
- demand_source = self._asmnt.demand.sample
-
- demand_dict = {}
-
- for EDP in EDP_req.keys():
- EDP = EDP.split('-')
-
- # if non-directional demand is requested...
- if EDP[2] == '0':
- # assume that the demand at the given location is available
- try:
- # take the maximum of all available directions and scale it
- # using the nondirectional multiplier specified in the
- # self._asmnt.options (the default value is 1.2)
- demand = demand_source.loc[:, (EDP[0], EDP[1])].max(axis=1).values
- demand = demand * self._asmnt.options.nondir_multi(EDP[0])
-
- except KeyError:
- demand = None
-
- else:
- demand = demand_source[(EDP[0], EDP[1], EDP[2])].values
-
- if demand is None:
- self.log_msg(
- f'\nWARNING: Cannot find demand data for {EDP}. The '
- 'corresponding damages cannot be calculated.',
- prepend_timestamp=False,
- )
- else:
- demand_dict.update({f'{EDP[0]}-{EDP[1]}-{EDP[2]}': demand})
-
- return demand_dict
-
- def _evaluate_damage_state(
- self, demand_dict, EDP_req, capacity_sample, lsds_sample
- ):
- """
- Use the demand and LS capacity sample to evaluate damage states
-
- Parameters
- ----------
- demand_dict: dict
- Dictionary containing the demand of each demand type.
- EDP_req: dict
- Dictionary containing the EDPs assigned to each demand
- type.
- capacity_sample: DataFrame
- Provides a sample of the capacity.
- lsds_sample: DataFrame
- Provides the mapping between limit states and damage
- states.
-
- Returns
- -------
- DataFrame
- Assigns a Damage State to each component block in the
- asset model.
- """
-
- # Log a message indicating that damage states are being
- # evaluated
-
- if self._asmnt.log.verbose:
- self.log_msg('Evaluating damage states...', prepend_timestamp=True)
-
- # Create an empty dataframe with columns and index taken from
- # the input capacity sample
- dmg_eval = pd.DataFrame(
- columns=capacity_sample.columns, index=capacity_sample.index
- )
-
- # Initialize an empty list to store demand data
- demand_df = []
-
- # For each demand type in the demand dictionary
- for demand_name, demand_vals in demand_dict.items():
- # Get the list of PGs assigned to this demand type
- PG_list = EDP_req[demand_name]
-
- # Create a list of columns for the demand data
- # corresponding to each PG in the PG_list
- PG_cols = pd.concat(
- [dmg_eval.loc[:1, PG_i] for PG_i in PG_list], axis=1, keys=PG_list
- ).columns
- PG_cols.names = ['cmp', 'loc', 'dir', 'uid', 'block', 'ls']
- # Create a dataframe with demand values repeated for the
- # number of PGs and assign the columns as PG_cols
- demand_df.append(
- pd.concat([pd.Series(demand_vals)] * len(PG_cols), axis=1, keys=PG_cols)
- )
+ frg_rv_tag = (
+ 'FRG-'
+ f'{pg[0]}-' # cmp_id
+ f'{pg[1]}-' # loc
+ f'{pg[2]}-' # dir
+ f'{pg[3]}-' # uid
+ f'{block_i + 1}-' # block
+ f'{ls_id}'
+ )
- # Concatenate all demand dataframes into a single dataframe
- demand_df = pd.concat(demand_df, axis=1)
- # Sort the columns of the demand dataframe
- demand_df.sort_index(axis=1, inplace=True)
+ # Assign correlation between limit state random
+ # variables
+ # Note that we assume perfectly correlated limit
+ # state random variables here. This approach is in
+ # line with how mainstream PBE calculations are
+ # performed. Assigning more sophisticated
+ # correlations between limit state RVs is possible,
+ # if needed. Please let us know through the
+ # SimCenter Message Board if you are interested in
+ # such a feature.
+ # Anchor all other limit state random variables to
+ # the first one to consider the perfect correlation
+ # between capacities in each LS
+ if ls_id == limit_states[0]:
+ anchor = None
+ else:
+ anchor = anchor_rvs[block_i]
- # Evaluate the damage exceedance by subtracting demand from
- # capacity and checking if the result is less than zero
- dmg_eval = (capacity_sample - demand_df) < 0
+ # parse theta values for multilinear_CDF
+ if family == 'multilinear_CDF':
+ theta = np.column_stack( # type: ignore
+ (
+ np.array(
+ theta[0].split('|')[0].split(','),
+ dtype=float,
+ ),
+ np.array(
+ theta[0].split('|')[1].split(','),
+ dtype=float,
+ ),
+ )
+ )
- # Remove any columns with NaN values from the damage
- # exceedance dataframe
- dmg_eval.dropna(axis=1, inplace=True)
+ rv = uq.rv_class_map(family)( # type: ignore
+ name=frg_rv_tag,
+ theta=theta,
+ truncation_limits=tr_lims,
+ anchor=anchor,
+ )
- # initialize the DataFrames that store the damage states and
- # quantities
- ds_sample = pd.DataFrame(
- 0, # fill value
- columns=capacity_sample.columns.droplevel('ls').unique(),
- index=capacity_sample.index,
- dtype='int32',
- )
+ capacity_rv_reg.add_RV(rv) # type: ignore
- # get a list of limit state ids among all components in the damage model
- ls_list = dmg_eval.columns.get_level_values(5).unique()
+ # add the RV to the set of correlated variables
+ frg_rv_set_tags[block_i].append(frg_rv_tag)
- # for each consecutive limit state...
- for LS_id in ls_list:
- # get all cmp - loc - dir - block where this limit state occurs
- dmg_e_ls = dmg_eval.loc[:, idx[:, :, :, :, :, LS_id]].dropna(axis=1)
+ if ls_id == limit_states[0]:
+ anchor_rvs.append(rv)
- # Get the damage states corresponding to this limit state in each
- # block
- # Note that limit states with a set of mutually exclusive damage
- # states options have their damage state picked here.
- lsds = lsds_sample.loc[:, dmg_e_ls.columns]
+ # Now add the LS->DS assignments
+ lsds_rv_tag = (
+ 'LSDS-'
+ f'{pg[0]}-' # cmp_id
+ f'{pg[1]}-' # loc
+ f'{pg[2]}-' # dir
+ f'{pg[3]}-' # uid
+ f'{block_i + 1}-' # block
+ f'{ls_id}'
+ )
- # Drop the limit state level from the columns to make the damage
- # exceedance DataFrame compatible with the other DataFrames in the
- # following steps
- dmg_e_ls.columns = dmg_e_ls.columns.droplevel(5)
+ ds_id_next = assign_lsds(
+ ds_weights, ds_id, lsds_rv_reg, lsds_rv_tag
+ )
- # Same thing for the lsds DataFrame
- lsds.columns = dmg_e_ls.columns
+ ds_id = ds_id_next
- # Update the damage state in the result with the values from the
- # lsds DF if the limit state was exceeded according to the
- # dmg_e_ls DF.
- # This one-liner updates the given Limit State exceedance in the
- # entire damage model. If subsequent Limit States are also exceeded,
- # those cells in the result matrix will get overwritten by higher
- # damage states.
- ds_sample.loc[:, dmg_e_ls.columns] = ds_sample.loc[
- :, dmg_e_ls.columns
- ].mask(dmg_e_ls, lsds)
+ if self._asmnt.log.verbose:
+ rv_count = len(lsds_rv_reg.RV)
+ self.log.msg(
+ f'2x{rv_count} random variables created.', prepend_timestamp=False
+ )
- return ds_sample
+ return capacity_rv_reg, lsds_rv_reg
- def _prepare_dmg_quantities(self, damage_state_sample, dropzero=True):
+ def prepare_dmg_quantities(
+ self,
+ component_sample: pd.DataFrame,
+ component_marginal_parameters: pd.DataFrame | None,
+ *,
+ dropzero: bool = True,
+ ) -> pd.DataFrame:
"""
- Combine component quantity and damage state information in one
- DataFrame.
+ Combine component quantity and damage state information.
This method assumes that a component quantity sample is
available in the asset model and a damage state sample is
@@ -1039,9 +1508,10 @@ def _prepare_dmg_quantities(self, damage_state_sample, dropzero=True):
Parameters
----------
- damage_state_sample: DataFrame
- A DataFrame that assigns a damage state to each component
- block in the asset model.
+ component_sample: pd.DataFrame
+ Component quantity sample from the AssetModel.
+ component_marginal_parameters: pd.DataFrame
+ Component marginal parameters from the AssetModel.
dropzero: bool, optional, default: True
If True, the quantity of non-damaged components is not
saved.
@@ -1053,18 +1523,15 @@ def _prepare_dmg_quantities(self, damage_state_sample, dropzero=True):
damage state information.
"""
+ # ('cmp', 'loc', 'dir', 'uid') -> component quantity series
+ component_quantities = component_sample.to_dict('series')
- # pylint: disable=missing-return-doc
if self._asmnt.log.verbose:
- self.log_msg('Calculating damage quantities...', prepend_timestamp=True)
+ self.log.msg('Calculating damage quantities...', prepend_timestamp=True)
# Retrieve the component quantity information and component
# marginal parameters from the asset model
- # ('cmp', 'loc', 'dir', 'uid') -> component quantity series
- component_quantities = self._asmnt.asset.cmp_sample.to_dict('series')
- component_marginal_parameters = self._asmnt.asset.cmp_marginal_params
-
if (component_marginal_parameters is not None) and (
'Blocks' in component_marginal_parameters.columns
):
@@ -1073,23 +1540,27 @@ def _prepare_dmg_quantities(self, damage_state_sample, dropzero=True):
# ('cmp', 'loc', 'dir', 'uid) -> number of blocks
num_blocks = component_marginal_parameters['Blocks'].to_dict()
- def get_num_blocks(key):
- # pylint: disable=missing-return-type-doc
+ def get_num_blocks(key: object) -> float:
return float(num_blocks[key])
else:
# otherwise assume 1 block regardless of
# ('cmp', 'loc', 'dir', 'uid) key
- def get_num_blocks(_):
- # pylint: disable=missing-return-type-doc
+ def get_num_blocks(key: object) -> float: # noqa: ARG001
return 1.00
# ('cmp', 'loc', 'dir', 'uid', 'block') -> damage state series
- damage_state_sample_dict = damage_state_sample.to_dict('series')
+ assert self.ds_sample is not None
+ damage_state_sample_dict = self.ds_sample.to_dict('series')
dmg_qnt_series_collection = {}
for key, damage_state_series in damage_state_sample_dict.items():
- component, location, direction, uid, block = key
+ component: str
+ location: str
+ direction: str
+ uid: str
+ block: str
+ component, location, direction, uid, block = key # type: ignore
damage_state_set = set(damage_state_series.values)
for ds in damage_state_set:
if ds == -1:
@@ -1097,18 +1568,20 @@ def get_num_blocks(_):
if dropzero and ds == 0:
continue
dmg_qnt_vals = np.where(
- damage_state_series.values == ds,
- component_quantities[component, location, direction, uid].values
+ damage_state_series.to_numpy() == ds,
+ component_quantities[
+ component, location, direction, uid
+ ].to_numpy()
/ get_num_blocks((component, location, direction, uid)),
0.00,
)
if -1 in damage_state_set:
dmg_qnt_vals = np.where(
- damage_state_series.values != -1, dmg_qnt_vals, np.nan
+ damage_state_series.to_numpy() != -1, dmg_qnt_vals, np.nan
)
dmg_qnt_series = pd.Series(dmg_qnt_vals)
dmg_qnt_series_collection[
- (component, location, direction, uid, block, str(ds))
+ component, location, direction, uid, block, str(ds)
] = dmg_qnt_series
damage_quantities = pd.concat(
@@ -1118,14 +1591,14 @@ def get_num_blocks(_):
)
damage_quantities.columns.names = ['cmp', 'loc', 'dir', 'uid', 'block', 'ds']
- # sum up block quantities
- damage_quantities = damage_quantities.groupby(
+ # min_count=1 is specified so that the sum cross all NaNs will
+ # result in NaN instead of zero.
+ # https://stackoverflow.com/questions/33448003/sum-across-all-nans-in-pandas-returns-zero
+ return damage_quantities.groupby( # type: ignore
level=['cmp', 'loc', 'dir', 'uid', 'ds'], axis=1
- ).sum()
+ ).sum(min_count=1)
- return damage_quantities
-
- def _perform_dmg_task(self, task, ds_sample):
+ def perform_dmg_task(self, task: tuple) -> None: # noqa: C901
"""
Perform a task from a damage process.
@@ -1139,39 +1612,35 @@ def _perform_dmg_task(self, task, ds_sample):
Parameters
----------
- task : list
+ task: list
A list representing a task from the damage process. The
list contains two elements:
- The first element is a string representing the source
- component, e.g., `'1_CMP_A'`. The number in the beginning
- is used to order the tasks and is not considered here.
+ component, e.g., `'1_CMP_A'`. The number in the beginning
+ is used to order the tasks and is not considered here.
- The second element is a dictionary representing the
- events triggered by the damage state of the source
- component. The keys of the dictionary are strings that
- represent the damage state of the source component,
- e.g., `'DS1'`. The values are lists of strings
- representing the target component(s) and event(s), e.g.,
- `['CMP_B.DS1', 'CMP_C.DS1']`. They could also be a
- single element instead of a list.
- Examples of a task:
- ['1_CMP.A', {'DS1': ['CMP.B_DS1', 'CMP.C_DS2']}]
- ['1_CMP.A', {'DS1': 'CMP.B_DS1', 'DS2': 'CMP.B_DS2'}]
- ['1_CMP.A-LOC', {'DS1': 'CMP.B_DS1'}]
- ds_sample : pandas DataFrame
- A DataFrame representing the damage state of the
- components. It is modified in place to represent the
- damage states of the components after the task has been
- performed.
+ events triggered by the damage state of the source
+ component. The keys of the dictionary are strings that
+ represent the damage state of the source component,
+ e.g., `'DS1'`. The values are lists of strings
+ representing the target component(s) and event(s), e.g.,
+ `['CMP_B.DS1', 'CMP_C.DS1']`. They could also be a
+ single element instead of a list.
+
+ Examples of a task:
+ ['1_CMP.A', {'DS1': ['CMP.B_DS1', 'CMP.C_DS2']}]
+ ['1_CMP.A', {'DS1': 'CMP.B_DS1', 'DS2': 'CMP.B_DS2'}]
+ ['1_CMP.A-LOC', {'DS1': 'CMP.B_DS1'}]
Raises
------
ValueError
Raises an error if the source or target event descriptions
do not follow expected formats.
- """
+ """
if self._asmnt.log.verbose:
- self.log_msg(f'Applying task {task}...', prepend_timestamp=True)
+ self.log.msg(f'Applying task {task}...', prepend_timestamp=True)
# parse task
source_cmp = task[0].split('_')[1] # source component
@@ -1186,46 +1655,46 @@ def _perform_dmg_task(self, task, ds_sample):
match_locations = False
# check if the source component exists in the damage state
- # dataframe
- if source_cmp not in ds_sample.columns.get_level_values('cmp'):
- self.log_msg(
- f"WARNING: Source component {source_cmp} in the prescribed "
- "damage process not found among components in the damage "
- "sample. The corresponding part of the damage process is "
- "skipped.",
- prepend_timestamp=False,
+ # DataFrame
+ assert self.ds_sample is not None
+ if source_cmp not in self.ds_sample.columns.get_level_values('cmp'):
+ self.log.warning(
+ f'Source component `{source_cmp}` in the prescribed '
+ 'damage process not found among components in the damage '
+ 'sample. The corresponding part of the damage process is '
+ 'skipped.'
)
return
- # execute the events pres prescribed in the damage task
+ # execute the events prescribed in the damage task
for source_event, target_infos in events.items():
# events can only be triggered by damage state occurrence
if not source_event.startswith('DS'):
- raise ValueError(
- f"Unable to parse source event in damage "
- f"process: {source_event}"
+ msg = (
+ f'Unable to parse source event in damage '
+ f'process: `{source_event}`'
)
+ raise ValueError(msg)
# get the ID of the damage state that triggers the event
ds_source = int(source_event[2:])
# turn the target_infos into a list if it is a single
# argument, for consistency
if not isinstance(target_infos, list):
- target_infos = [target_infos]
+ target_infos = [target_infos] # noqa: PLW2901
for target_info in target_infos:
# get the target component and event type
target_cmp, target_event = target_info.split('_')
if (target_cmp != 'ALL') and (
- target_cmp not in ds_sample.columns.get_level_values('cmp')
+ target_cmp not in self.ds_sample.columns.get_level_values('cmp')
):
- self.log_msg(
- f"WARNING: Target component {target_cmp} in the prescribed "
- "damage process not found among components in the damage "
- "sample. The corresponding part of the damage process is "
- "skipped.",
- prepend_timestamp=False,
+ self.log.warning(
+ f'Target component `{target_cmp}` in the prescribed '
+ 'damage process not found among components in the damage '
+ 'sample. The corresponding part of the damage process is '
+ 'skipped.'
)
continue
@@ -1241,248 +1710,114 @@ def _perform_dmg_task(self, task, ds_sample):
# -1 stands for nan (ints don'ts support nan)
else:
- raise ValueError(
- f"Unable to parse target event in damage "
- f"process: {target_event}"
+ msg = (
+ f'Unable to parse target event in damage '
+ f'process: `{target_event}`'
)
+ raise ValueError(msg)
if match_locations:
self._perform_dmg_event_loc(
- ds_sample, source_cmp, ds_source, target_cmp, ds_target
+ source_cmp, ds_source, target_cmp, ds_target
)
else:
self._perform_dmg_event(
- ds_sample, source_cmp, ds_source, target_cmp, ds_target
+ source_cmp, ds_source, target_cmp, ds_target
)
if self._asmnt.log.verbose:
- self.log_msg(
+ self.log.msg(
'Damage process task successfully applied.', prepend_timestamp=False
)
def _perform_dmg_event(
- self, ds_sample, source_cmp, ds_source, target_cmp, ds_target
- ):
+ self, source_cmp: str, ds_source: int, target_cmp: str, ds_target: int
+ ) -> None:
"""
Perform a damage event.
- See `_perform_dmg_task`.
+ See `_perform_dmg_task`.
"""
-
# affected rows
+ assert self.ds_sample is not None
row_selection = np.where(
# for many instances of source_cmp, we
# consider the highest damage state
- ds_sample[source_cmp].max(axis=1).values
+ self.ds_sample[source_cmp].max(axis=1).to_numpy() # type: ignore
== ds_source
)[0]
# affected columns
if target_cmp == 'ALL':
column_selection = np.where(
- ds_sample.columns.get_level_values('cmp') != source_cmp
+ self.ds_sample.columns.get_level_values('cmp') != source_cmp
)[0]
else:
column_selection = np.where(
- ds_sample.columns.get_level_values('cmp') == target_cmp
+ self.ds_sample.columns.get_level_values('cmp') == target_cmp
)[0]
- ds_sample.iloc[row_selection, column_selection] = ds_target
+ self.ds_sample.iloc[row_selection, column_selection] = ds_target
def _perform_dmg_event_loc(
- self, ds_sample, source_cmp, ds_source, target_cmp, ds_target
- ):
+ self, source_cmp: str, ds_source: int, target_cmp: str, ds_target: int
+ ) -> None:
"""
Perform a damage event matching locations.
- See `_perform_dmg_task`.
- """
+ Parameters
+ ----------
+ source_cmp: str
+ Source component, e.g., `'1_CMP_A'`. The number in the beginning
+ is used to order the tasks and is not considered here.
+ ds_source: int
+ Source damage state.
+ target_cmp: str
+ Target component, e.g., `'CMP_B'`. The components that
+ will be affected when `source_cmp` gets to `ds_source`.
+ ds_target: int
+ Target damage state, e.g., `'DS_1'`. The damage state that
+ is assigned to `target_cmp` when `source_cmp` gets to
+ `ds_source`.
+ """
# get locations of source component
- source_locs = set(ds_sample[source_cmp].columns.get_level_values('loc'))
+ assert self.ds_sample is not None
+ source_locs = set(self.ds_sample[source_cmp].columns.get_level_values('loc'))
for loc in source_locs:
# apply damage task matching locations
row_selection = np.where(
# for many instances of source_cmp, we
# consider the highest damage state
- ds_sample[source_cmp, loc].max(axis=1).values
- == ds_source
+ self.ds_sample[source_cmp, loc].max(axis=1).to_numpy() == ds_source
)[0]
# affected columns
if target_cmp == 'ALL':
column_selection = np.where(
np.logical_and(
- ds_sample.columns.get_level_values('cmp') != source_cmp,
- ds_sample.columns.get_level_values('loc') == loc,
+ self.ds_sample.columns.get_level_values('cmp') != source_cmp,
+ self.ds_sample.columns.get_level_values('loc') == loc,
)
)[0]
else:
column_selection = np.where(
np.logical_and(
- ds_sample.columns.get_level_values('cmp') == target_cmp,
- ds_sample.columns.get_level_values('loc') == loc,
+ self.ds_sample.columns.get_level_values('cmp') == target_cmp,
+ self.ds_sample.columns.get_level_values('loc') == loc,
)
)[0]
- ds_sample.iloc[row_selection, column_selection] = ds_target
-
- def _get_pg_batches(self, block_batch_size):
- """
- Group performance groups into batches for efficient damage
- assessment.
-
- The method takes as input the block_batch_size, which
- specifies the maximum number of blocks per batch. The method
- first checks if performance groups have been defined in the
- cmp_marginal_params dataframe, and if so, it uses the 'Blocks'
- column as the performance group information. If performance
- groups have not been defined in cmp_marginal_params, the
- method uses the cmp_sample dataframe to define the performance
- groups, with each performance group having a single block.
-
- The method then checks if the performance groups are available
- in the damage parameters dataframe, and removes any
- performance groups that are not found in the damage
- parameters. The method then groups the performance groups
- based on the locations and directions of the components, and
- calculates the cumulative sum of the blocks for each
- group. The method then divides the performance groups into
- batches of size specified by block_batch_size and assigns a
- batch number to each group. Finally, the method groups the
- performance groups by batch number, component, location, and
- direction, and returns a dataframe that shows the number of
- blocks for each batch.
-
- Returns
- -------
- DataFrame
- A DataFrame indexed by batch number, component identifier,
- location, direction, and unique ID, with a column
- indicating the number of blocks assigned to each
- batch. This dataframe facilitates the management and
- execution of damage assessment tasks by grouping
- components into manageable batches based on the specified
- block batch size.
+ self.ds_sample.iloc[row_selection, column_selection] = ds_target
- Raises
- ------
- Warning
- Logs a warning if any performance groups do not have
- corresponding damage model information and are therefore
- excluded from the analysis.
+ def complete_ds_cols(self, dmg_sample: pd.DataFrame) -> pd.DataFrame:
"""
+ Complete damage state columns.
- # Get the marginal parameters for the components from the
- # asset model
- cmp_marginals = self._asmnt.asset.cmp_marginal_params
-
- # Initialize the batch dataframe
- pg_batch = None
-
- # If marginal parameters are available, use the 'Blocks'
- # column to initialize the batch dataframe
- if cmp_marginals is not None:
- # Check if the "Blocks" column exists in the component
- # marginal parameters
- if 'Blocks' in cmp_marginals.columns:
- pg_batch = cmp_marginals['Blocks'].to_frame()
-
- # If the "Blocks" column doesn't exist, create a new dataframe
- # with "Blocks" column filled with ones, using the component
- # sample as the index.
- if pg_batch is None:
- cmp_sample = self._asmnt.asset.cmp_sample
- pg_batch = pd.DataFrame(
- np.ones(cmp_sample.shape[1]),
- index=cmp_sample.columns,
- columns=['Blocks'],
- )
-
- # Check if the damage model information exists for each
- # performance group If not, remove the performance group from
- # the analysis and log a warning message.
- first_time = True
- for pg_i in pg_batch.index:
- if np.any(np.isin(pg_i, self.damage_params.index)):
- blocks_i = pg_batch.loc[pg_i, 'Blocks']
- pg_batch.loc[pg_i, 'Blocks'] = blocks_i
-
- else:
- pg_batch.drop(pg_i, inplace=True)
-
- if first_time:
- self.log_msg(
- "\nWARNING: Damage model information is "
- "incomplete for some of the performance groups "
- "and they had to be removed from the analysis:",
- prepend_timestamp=False,
- )
-
- first_time = False
-
- self.log_msg(f"{pg_i}", prepend_timestamp=False)
-
- # Convert the data types of the dataframe to be efficient
- pg_batch = pg_batch.convert_dtypes()
-
- # Sum up the number of blocks for each performance group
- pg_batch = pg_batch.groupby(['loc', 'dir', 'cmp', 'uid']).sum()
- pg_batch.sort_index(axis=0, inplace=True)
-
- # Calculate cumulative sum of blocks
- pg_batch['CBlocks'] = np.cumsum(pg_batch['Blocks'].values.astype(int))
- pg_batch['Batch'] = 0
-
- # Group the performance groups into batches
- for batch_i in range(1, pg_batch.shape[0] + 1):
- # Find the mask for blocks that are less than the batch
- # size and greater than 0
- batch_mask = np.all(
- np.array(
- [
- pg_batch['CBlocks'] <= block_batch_size,
- pg_batch['CBlocks'] > 0,
- ]
- ),
- axis=0,
- )
-
- if np.sum(batch_mask) < 1:
- batch_mask = np.full(batch_mask.shape, False)
- batch_mask[np.where(pg_batch['CBlocks'] > 0)[0][0]] = True
-
- pg_batch.loc[batch_mask, 'Batch'] = batch_i
-
- # Decrement the cumulative block count by the max count in
- # the current batch
- pg_batch['CBlocks'] -= pg_batch.loc[
- pg_batch['Batch'] == batch_i, 'CBlocks'
- ].max()
-
- # If the maximum cumulative block count is 0, exit the
- # loop
- if pg_batch['CBlocks'].max() == 0:
- break
-
- # Group the performance groups by batch, component, location,
- # and direction, and keep only the number of blocks for each
- # group
- pg_batch = (
- pg_batch.groupby(['Batch', 'cmp', 'loc', 'dir', 'uid'])
- .sum()
- .loc[:, 'Blocks']
- .to_frame()
- )
-
- return pg_batch
-
- def _complete_ds_cols(self, dmg_sample):
- """
- Completes the damage sample dataframe with all possible damage
+ Completes the damage sample DataFrame with all possible damage
states for each component.
Parameters
----------
- dmg_sample : DataFrame
+ dmg_sample: DataFrame
A DataFrame containing the damage state information for
each component block in the asset model. The columns are
MultiIndexed with levels corresponding to component
@@ -1508,22 +1843,36 @@ def _complete_ds_cols(self, dmg_sample):
"""
# get a shortcut for the damage model parameters
- DP = self.damage_params
+ dp = self.damage_params
+ assert dp is not None
# Get the header for the results that we can use to identify
# cmp-loc-dir-uid sets
- dmg_header = dmg_sample.groupby(level=[0, 1, 2, 3], axis=1).first().iloc[:2, :]
+ dmg_header = (
+ dmg_sample.groupby( # type: ignore
+ level=[0, 1, 2, 3],
+ axis=1,
+ )
+ .first()
+ .iloc[:2, :]
+ )
+ damaged_components = set(dmg_header.columns.get_level_values('cmp'))
# get the number of possible limit states
- ls_list = [col for col in DP.columns.unique(level=0) if 'LS' in col]
+ ls_list = [col for col in dp.columns.unique(level=0) if 'LS' in col]
- # initialize the result dataframe
+ # initialize the result DataFrame
res = pd.DataFrame()
+ # TODO(JVM): For the code below, store the number of damage states
+ # for each component ID as an attribute of the ds_model when
+ # loading the parameters, and then directly access them here
+ # much faster instead of parsing the parameters again.
+
# walk through all components that have damage parameters provided
- for cmp_id in DP.index:
+ for cmp_id in dp.index:
# get the component-specific parameters
- cmp_data = DP.loc[cmp_id]
+ cmp_data = dp.loc[cmp_id]
# and initialize the damage state counter
ds_count = 0
@@ -1531,41 +1880,40 @@ def _complete_ds_cols(self, dmg_sample):
# walk through all limit states for the component
for ls in ls_list:
# check if the given limit state is defined
- if not pd.isna(cmp_data[(ls, 'Theta_0')]):
+ if not pd.isna(cmp_data[ls, 'Theta_0']):
# check if there is only one damage state
- if pd.isna(cmp_data[(ls, 'DamageStateWeights')]):
+ if pd.isna(cmp_data[ls, 'DamageStateWeights']):
ds_count += 1
else:
# or if there are more than one, how many
- ds_count += len(cmp_data[(ls, 'DamageStateWeights')].split('|'))
+ ds_count += len(
+ cmp_data[ls, 'DamageStateWeights'].split('|')
+ )
# get the list of valid cmp-loc-dir-uid sets
- cmp_header = dmg_header.loc[
- :,
- [
- cmp_id,
- ],
- ]
-
- # Create a dataframe where they are repeated ds_count times in the
+ if cmp_id not in damaged_components:
+ continue
+ cmp_header = dmg_header.loc[:, [cmp_id]]
+
+ # Create a DataFrame where they are repeated ds_count times in the
# columns. The keys put the DS id in the first level of the
# multiindexed column
cmp_headers = pd.concat(
[cmp_header for ds_i in range(ds_count + 1)],
- keys=[str(r) for r in range(0, ds_count + 1)],
+ keys=[str(r) for r in range(ds_count + 1)],
axis=1,
)
cmp_headers.columns.names = ['ds', *cmp_headers.columns.names[1::]]
- # add these new columns to the result dataframe
+ # add these new columns to the result DataFrame
res = pd.concat([res, cmp_headers], axis=1)
- # Fill the result dataframe with zeros and reorder its columns to have
+ # Fill the result DataFrame with zeros and reorder its columns to have
# the damage states at the lowest like - matching the dmg_sample input
res = pd.DataFrame(
0.0,
- columns=res.columns.reorder_levels([1, 2, 3, 4, 0]),
+ columns=res.columns.reorder_levels([1, 2, 3, 4, 0]), # type: ignore
index=dmg_sample.index,
)
@@ -1574,122 +1922,23 @@ def _complete_ds_cols(self, dmg_sample):
return res
- def calculate(
- self,
- sample_size=None,
- dmg_process=None,
- block_batch_size=1000,
- scaling_specification=None,
- ):
- """
- Wrapper around the new calculate method that requires sample size.
- Exists for backwards compatibility
- """
- if not sample_size:
- # todo: Deprecation warning
- sample_size = self._asmnt.demand.sample.shape[0]
- self.calculate_internal(
- sample_size, dmg_process, block_batch_size, scaling_specification
- )
-
- def calculate_internal(
- self,
- sample_size,
- dmg_process=None,
- block_batch_size=1000,
- scaling_specification=None,
- ):
- """
- Calculate the damage state of each component block in the asset.
-
- """
-
- self.log_div()
- self.log_msg('Calculating damages...')
-
- # Break up damage calculation and perform it by performance group.
- # Compared to the simultaneous calculation of all PGs, this approach
- # reduces demands on memory and increases the load on CPU. This leads
- # to a more balanced workload on most machines for typical problems.
- # It also allows for a straightforward extension with parallel
- # computing.
-
- # get the list of performance groups
- self.log_msg(
- f'Number of Performance Groups in Asset Model:'
- f' {self._asmnt.asset.cmp_sample.shape[1]}',
- prepend_timestamp=False,
- )
-
- pg_batch = self._get_pg_batches(block_batch_size)
- batches = pg_batch.index.get_level_values(0).unique()
-
- self.log_msg(
- f'Number of Component Blocks: {pg_batch["Blocks"].sum()}',
- prepend_timestamp=False,
- )
-
- self.log_msg(
- f"{len(batches)} batches of Performance Groups prepared "
- "for damage assessment",
- prepend_timestamp=False,
- )
-
- # for PG_i in self._asmnt.asset.cmp_sample.columns:
- ds_samples = []
- for PGB_i in batches:
- performance_group = pg_batch.loc[PGB_i]
-
- self.log_msg(
- f"Calculating damage for PG batch {PGB_i} with "
- f"{int(performance_group['Blocks'].sum())} blocks"
- )
-
- # Generate an array with component capacities for each block and
- # generate a second array that assigns a specific damage state to
- # each component limit state. The latter is primarily needed to
- # handle limit states with multiple, mutually exclusive DS options
- capacity_sample, lsds_sample = self._generate_dmg_sample(
- sample_size, performance_group, scaling_specification
- )
-
- # Get the required demand types for the analysis
- EDP_req = self._get_required_demand_type(performance_group)
-
- # Create the demand vector
- demand_dict = self._assemble_required_demand_data(EDP_req)
-
- # Evaluate the Damage State of each Component Block
- ds_sample = self._evaluate_damage_state(
- demand_dict, EDP_req, capacity_sample, lsds_sample
- )
-
- ds_samples.append(ds_sample)
-
- ds_sample = pd.concat(ds_samples, axis=1)
- self.log_msg("Raw damage calculation successful.", prepend_timestamp=False)
- # Apply the prescribed damage process, if any
- if dmg_process is not None:
- self.log_msg("Applying damage processes...")
-
- # Sort the damage processes tasks
- dmg_process = {key: dmg_process[key] for key in sorted(dmg_process)}
-
- # Perform damage tasks in the sorted order
- for task in dmg_process.items():
- self._perform_dmg_task(task, ds_sample)
-
- self.log_msg(
- "Damage processes successfully applied.", prepend_timestamp=False
- )
+def _is_for_ds_model(data: pd.DataFrame) -> bool:
+ """
+ Check if data are for `ds_model`.
- qnt_sample = self._prepare_dmg_quantities(ds_sample, dropzero=False)
+ Determines if the specified damage model parameters are for
+ components modeled with discrete Damage States (DS).
- # If requested, extend the quantity table with all possible DSs
- if self._asmnt.options.list_all_ds:
- qnt_sample = self._complete_ds_cols(qnt_sample)
+ Parameters
+ ----------
+ data: pd.DataFrame
+ The data to check.
- self.sample = qnt_sample
+ Returns
+ -------
+ bool
+ If the data are for `ds_model`.
- self.log_msg('Damage calculation successfully completed.')
+ """
+ return 'LS1' in data.columns.get_level_values(0)
diff --git a/pelicun/model/demand_model.py b/pelicun/model/demand_model.py
index 8707cddc8..75296b5e1 100644
--- a/pelicun/model/demand_model.py
+++ b/pelicun/model/demand_model.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
@@ -38,24 +37,25 @@
# Adam Zsarnóczay
# John Vouvakis Manousakis
-"""
-This file defines the DemandModel object and its methods.
-.. rubric:: Contents
+"""DemandModel object and associated methods."""
-.. autosummary::
+from __future__ import annotations
- DemandModel
-
-"""
+import re
+from collections import defaultdict
+from pathlib import Path
+from typing import TYPE_CHECKING, overload
+import numexpr as ne
import numpy as np
import pandas as pd
-from .pelicun_model import PelicunModel
-from .. import base
-from .. import uq
-from .. import file_io
+from pelicun import base, file_io, uq
+from pelicun.model.pelicun_model import PelicunModel
+
+if TYPE_CHECKING:
+ from pelicun.assessment import AssessmentBase
idx = base.idx
@@ -93,21 +93,50 @@ class DemandModel(PelicunModel):
"""
- def __init__(self, assessment):
+ __slots__ = [
+ '_RVs',
+ 'calibrated',
+ 'correlation',
+ 'empirical_data',
+ 'marginal_params',
+ 'sample',
+ 'user_units',
+ ]
+
+ def __init__(self, assessment: AssessmentBase) -> None:
+ """
+ Instantiate a DemandModel.
+
+ Parameters
+ ----------
+ assessment: Assessment
+ Parent assessment object.
+
+ """
super().__init__(assessment)
- self.marginal_params = None
- self.correlation = None
- self.empirical_data = None
- self.units = None
+ self.marginal_params: pd.DataFrame | None = None
+ self.correlation: pd.DataFrame | None = None
+ self.empirical_data: pd.DataFrame | None = None
+ self.user_units: pd.Series | None = None
self.calibrated = False
- self._RVs = None
- self.sample = None
+ self._RVs: uq.RandomVariableRegistry | None = None
+ self.sample: pd.DataFrame | None = None
+
+ @overload
+ def save_sample(
+ self, filepath: None = None, *, save_units: bool = False
+ ) -> tuple[pd.DataFrame, pd.Series] | pd.DataFrame: ...
- def save_sample(self, filepath=None, save_units=False):
+ @overload
+ def save_sample(self, filepath: str, *, save_units: bool = False) -> None: ...
+
+ def save_sample(
+ self, filepath: str | None = None, *, save_units: bool = False
+ ) -> None | tuple[pd.DataFrame, pd.Series] | pd.DataFrame:
"""
- Save demand sample to a csv file or return it in a DataFrame
+ Save demand sample to a csv file or return it in a DataFrame.
Returns
-------
@@ -119,33 +148,32 @@ def save_sample(self, filepath=None, save_units=False):
If `save_units` is True, it returns a tuple of the
DataFrame and a Series containing the units.
- Raises
- ------
- IOError
- Raises an IOError if there is an issue saving the file to
- the specified `filepath`.
"""
-
- self.log_div()
+ self.log.div()
if filepath is not None:
- self.log_msg('Saving demand sample...')
+ self.log.msg('Saving demand sample...')
+ assert self.sample is not None
res = file_io.save_to_csv(
self.sample,
- filepath,
- units=self.units,
+ Path(filepath) if filepath is not None else None,
+ units=self.user_units,
unit_conversion_factors=self._asmnt.unit_conversion_factors,
use_simpleindex=(filepath is not None),
log=self._asmnt.log,
)
-
if filepath is not None:
- self.log_msg('Demand sample successfully saved.', prepend_timestamp=False)
+ self.log.msg(
+ 'Demand sample successfully saved.', prepend_timestamp=False
+ )
return None
# else:
- units = res.loc["Units"]
- res.drop("Units", inplace=True)
+ assert isinstance(res, pd.DataFrame)
+
+ units = res.loc['Units']
+ res = res.drop('Units')
+ assert isinstance(units, pd.Series)
if save_units:
return res.astype(float), units
@@ -153,13 +181,13 @@ def save_sample(self, filepath=None, save_units=False):
# else:
return res.astype(float)
- def load_sample(self, filepath):
+ def load_sample(self, filepath: str | pd.DataFrame) -> None:
"""
Load demand sample data and parse it.
- Besides parsing the sample, the method also reads and saves the units
- specified for each demand variable. If no units are specified, Standard
- Units are assumed.
+ Besides parsing the sample, the method also reads and saves
+ the units specified for each demand variable. If no units are
+ specified, base units are assumed.
Parameters
----------
@@ -168,8 +196,10 @@ def load_sample(self, filepath):
"""
- def parse_header(raw_header):
+ def parse_header(raw_header: pd.Index[str]) -> pd.Index[str]:
"""
+ Parse and clean header.
+
Parses and cleans the header of a demand DataFrame from
raw multi-level index to a standardized format.
@@ -184,7 +214,7 @@ def parse_header(raw_header):
Parameters
----------
- raw_header : pd.MultiIndex
+ raw_header: pd.MultiIndex
The original multi-level index (header) of the
DataFrame, which may contain an optional event_ID and
might have excess whitespace in the labels.
@@ -197,30 +227,32 @@ def parse_header(raw_header):
index has three levels: 'type', 'loc', and 'dir',
representing the type of demand, location, and
direction, respectively.
+
"""
- old_MI = raw_header
+ old_mi = raw_header
# The first number (event_ID) in the demand labels is optional and
# currently not used. We remove it if it was in the raw data.
- if old_MI.nlevels == 4:
+ num_levels_with_event_id = 4
+ if old_mi.nlevels == num_levels_with_event_id:
if self._asmnt.log.verbose:
- self.log_msg(
+ self.log.msg(
'Removing event_ID from header...', prepend_timestamp=False
)
new_column_index_array = np.array(
- [old_MI.get_level_values(i) for i in range(1, 4)]
+ [old_mi.get_level_values(i) for i in range(1, 4)]
)
else:
new_column_index_array = np.array(
- [old_MI.get_level_values(i) for i in range(3)]
+ [old_mi.get_level_values(i) for i in range(3)]
)
# Remove whitespace to avoid ambiguity
if self._asmnt.log.verbose:
- self.log_msg(
+ self.log.msg(
'Removing whitespace from header...', prepend_timestamp=False
)
@@ -230,14 +262,12 @@ def parse_header(raw_header):
# Creating new, cleaned-up header
- new_MI = pd.MultiIndex.from_arrays(
+ return pd.MultiIndex.from_arrays(
new_column_index, names=['type', 'loc', 'dir']
)
- return new_MI
-
- self.log_div()
- self.log_msg('Loading demand data...')
+ self.log.div()
+ self.log.msg('Loading demand data...')
demand_data, units = file_io.load_data(
filepath,
@@ -245,6 +275,8 @@ def parse_header(raw_header):
return_units=True,
log=self._asmnt.log,
)
+ assert isinstance(demand_data, pd.DataFrame)
+ assert isinstance(units, pd.Series)
parsed_data = demand_data.copy()
@@ -254,34 +286,50 @@ def parse_header(raw_header):
# Remove errors, if needed
if 'ERROR' in parsed_data.columns.get_level_values(0):
- self.log_msg(
+ self.log.msg(
'Removing errors from the raw data...', prepend_timestamp=False
)
- error_list = parsed_data.loc[:, idx['ERROR', :, :]].values.astype(bool)
+ error_list = (
+ parsed_data.loc[ # type: ignore
+ :, # type: ignore
+ idx['ERROR', :, :], # type: ignore
+ ]
+ .to_numpy()
+ .astype( # type: ignore
+ bool # type: ignore
+ )
+ ) # type: ignore
parsed_data = parsed_data.loc[~error_list, :].copy()
- parsed_data.drop('ERROR', level=0, axis=1, inplace=True)
+ parsed_data = parsed_data.drop('ERROR', level=0, axis=1)
- self.log_msg(
- "\nBased on the values in the ERROR column, "
- f"{np.sum(error_list)} demand samples were removed.\n",
+ self.log.msg(
+ '\nBased on the values in the ERROR column, '
+ f'{np.sum(error_list)} demand samples were removed.\n',
prepend_timestamp=False,
)
self.sample = parsed_data
- self.log_msg('Demand data successfully parsed.', prepend_timestamp=False)
+ self.log.msg('Demand data successfully parsed.', prepend_timestamp=False)
# parse the index for the units
units.index = parse_header(units.index)
- self.units = units
+ self.user_units = units
- self.log_msg('Demand units successfully parsed.', prepend_timestamp=False)
+ self.log.msg('Demand units successfully parsed.', prepend_timestamp=False)
- def estimate_RID(self, demands, params, method='FEMA P58'):
+ def estimate_RID( # noqa: N802
+ self,
+ demands: pd.DataFrame | pd.Series,
+ params: dict,
+ method: str = 'FEMA P58',
+ ) -> pd.DataFrame:
"""
+ Estimate residual inter-story drift (RID).
+
Estimates residual inter-story drift (RID) realizations based
on peak inter-story drift (PID) and other demand parameters
using specified methods.
@@ -294,16 +342,16 @@ def estimate_RID(self, demands, params, method='FEMA P58'):
Parameters
----------
- demands : DataFrame
+ demands: DataFrame
A DataFrame containing samples of demands, specifically
peak inter-story drift (PID) values for various
location-direction pairs required for the estimation
method.
- params : dict
+ params: dict
A dictionary containing parameters required for the
estimation method, such as 'yield_drift', which is the
drift at which yielding is expected to occur.
- method : str, optional
+ method: str, optional
The method used to estimate the RID values. Currently,
only 'FEMA P58' is implemented. Defaults to 'FEMA P58'.
@@ -329,56 +377,152 @@ def estimate_RID(self, demands, params, method='FEMA P58'):
RID values to model the inherent uncertainty. The method
ensures that the RID values do not exceed the corresponding
PID values.
+
"""
- if method == 'FEMA P58':
- # method is described in FEMA P-58 Volume 1 Section 5.4 & Appendix C
+ if method in {'FEMA P58', 'FEMA P-58'}:
+ # method is described in FEMA P-58 Volume 1 Section 5.4 &
+ # Appendix C
- # the provided demands shall be PID values at various loc-dir pairs
- PID = demands
+ # the provided demands shall be PID values at various
+ # loc-dir pairs
+ pid = demands
# there's only one parameter needed: the yield drift
yield_drift = params['yield_drift']
# three subdomains of demands are identified
- small = PID < yield_drift
- medium = PID < 4 * yield_drift
- large = PID >= 4 * yield_drift
+ small = yield_drift > pid
+ medium = 4 * yield_drift > pid
+ large = 4 * yield_drift <= pid
# convert PID to RID in each subdomain
- RID = PID.copy()
- RID[large] = PID[large] - 3 * yield_drift
- RID[medium] = 0.3 * (PID[medium] - yield_drift)
- RID[small] = 0.0
+ rid = pid.copy()
+ rid[large] = pid[large] - 3 * yield_drift
+ rid[medium] = 0.3 * (pid[medium] - yield_drift)
+ rid[small] = 0.0
# add extra uncertainty to nonzero values
rng = self._asmnt.options.rng
- eps = rng.normal(scale=0.2, size=RID.shape)
- RID[RID > 0] = np.exp(np.log(RID[RID > 0]) + eps)
-
- # finally, make sure the RID values are never larger than the PIDs
- RID = pd.DataFrame(
- np.minimum(PID.values, RID.values),
- columns=pd.DataFrame(
+ eps = rng.normal(scale=0.2, size=rid.shape)
+ rid[rid > 0] = np.exp(np.log(rid[rid > 0]) + eps) # type: ignore
+
+ # finally, make sure the RID values are never larger than
+ # the PIDs
+ rid = pd.DataFrame(
+ np.minimum(pid.values, rid.values), # type: ignore
+ columns=pd.DataFrame( # noqa: PD013
1,
- index=[
- 'RID',
- ],
- columns=PID.columns,
+ index=['RID'],
+ columns=pid.columns,
)
.stack(level=[0, 1])
.index,
- index=PID.index,
+ index=pid.index,
)
else:
- RID = None
+ msg = f'Invalid method: `{method}`.'
+ raise ValueError(msg)
+
+ return rid
+
+ def estimate_RID_and_adjust_sample( # noqa: N802
+ self, params: dict, method: str = 'FEMA P58'
+ ) -> None:
+ """
+ Estimate residual inter-story drift (RID) and modifies sample.
+
+ Uses `self.estimate_RID` and adjusts the demand sample.
+ See the docstring of the `estimate_RID` method for details.
+
+ Parameters
+ ----------
+ params: dict
+ A dictionary containing parameters required for the
+ estimation method, such as 'yield_drift', which is the
+ drift at which yielding is expected to occur.
+ method: str, optional
+ The method used to estimate the RID values. Currently,
+ only 'FEMA P58' is implemented. Defaults to 'FEMA P58'.
- # return the generated drift realizations
- return RID
+ Raises
+ ------
+ ValueError
+ If the method is called before a sample is generated.
- def calibrate_model(self, config):
"""
- Calibrate a demand model to describe the raw demand data
+ if self.sample is None:
+ msg = 'Demand model does not have a sample yet.'
+ raise ValueError(msg)
+
+ sample_tuple = self.save_sample(save_units=True)
+ assert isinstance(sample_tuple, tuple)
+ demand_sample, demand_units = sample_tuple
+ assert isinstance(demand_sample, pd.DataFrame)
+ assert isinstance(demand_units, pd.Series)
+ pid = demand_sample['PID']
+ rid = self.estimate_RID(pid, params, method)
+ rid_units = pd.Series('unitless', index=rid.columns)
+ demand_sample_ext = pd.concat([demand_sample, rid], axis=1)
+ units_ext = pd.concat([demand_units, rid_units])
+ demand_sample_ext.loc['Units', :] = units_ext
+ self.load_sample(demand_sample_ext)
+
+ def expand_sample(
+ self,
+ label: str,
+ value: float | np.ndarray,
+ unit: str,
+ location: str = '0',
+ direction: str = '1',
+ ) -> None:
+ """
+ Add an extra column to the demand sample.
+
+ The column contains repeated instances of `value`, is accessed
+ via the multi-index (`label`-`location`-`direction`), and has
+ units of `unit`.
+
+ Parameters
+ ----------
+ label: str
+ Label to use to extend the MultiIndex of the demand sample.
+ value: float | np.ndarray
+ Values to add to the rows of the additional column.
+ unit: str
+ Unit that corresponds to the additional column.
+ location: str, optional
+ Optional location, defaults to `0`.
+ direction: str, optional
+ Optional direction, defaults to `1`.
+
+ Raises
+ ------
+ ValueError
+ If the method is called before a sample is generated.
+ ValueError
+ If `value` is a numpy array of incorrect shape.
+
+ """
+ if self.sample is None:
+ msg = 'Demand model does not have a sample yet.'
+ raise ValueError(msg)
+ sample_tuple = self.save_sample(save_units=True)
+ assert isinstance(sample_tuple, tuple)
+ demand_sample, demand_units = sample_tuple
+ assert isinstance(demand_sample, pd.DataFrame)
+ assert isinstance(demand_units, pd.Series)
+ if isinstance(value, np.ndarray) and len(value) != len(demand_sample):
+ msg = 'Incompatible array length.'
+ raise ValueError(msg)
+ demand_sample[label, location, direction] = value
+ demand_units[label, location, direction] = unit
+ demand_sample.loc['Units', :] = demand_units
+ self.load_sample(demand_sample)
+
+ def calibrate_model(self, config: dict) -> None: # noqa: C901
+ """
+ Calibrate a demand model to describe the raw demand data.
The raw data shall be parsed first to ensure that it follows the
schema expected by this method. The calibration settings define the
@@ -388,36 +532,35 @@ def calibrate_model(self, config):
Parameters
----------
config: dict
- A dictionary, typically read from a json file, that specifies the
+ A dictionary, typically read from a JSON file, that specifies the
distribution family, truncation and censoring limits, and other
settings for the calibration.
"""
-
if self.calibrated:
- self.log_msg(
- 'WARNING: DemandModel has been previously calibrated.',
- prepend_timestamp=False,
- )
+ self.log.warning('DemandModel has been previously calibrated.')
- def parse_settings(settings, demand_type):
- def parse_str_to_float(in_str, context_string):
- # pylint: disable = missing-return-type-doc
- # pylint: disable = missing-return-doc
+ def parse_settings( # noqa: C901
+ cal_df: pd.DataFrame, settings: dict, demand_type: str
+ ) -> None:
+ def parse_str_to_float(in_str: str, context_string: str) -> float:
try:
- out_float = float(in_str)
+ out_float = (
+ np.nan if base.check_if_str_is_na(in_str) else float(in_str)
+ )
except ValueError:
- self.log_msg(
- f"WARNING: Could not parse {in_str} provided as "
- f"{context_string}. Using NaN instead.",
- prepend_timestamp=False,
+ self.log.warning(
+ f'Could not parse {in_str} provided as '
+ f'{context_string}. Using NaN instead.'
)
out_float = np.nan
return out_float
+ demand_sample = self.save_sample()
+ assert isinstance(demand_sample, pd.DataFrame)
active_d_types = demand_sample.columns.get_level_values('type').unique()
if demand_type == 'ALL':
@@ -428,12 +571,12 @@ def parse_str_to_float(in_str, context_string):
for d_type in active_d_types:
if d_type.split('_')[0] == demand_type:
- cols_lst.append(d_type)
+ cols_lst.append(d_type) # noqa: PERF401
cols = tuple(cols_lst)
# load the distribution family
- cal_df.loc[idx[cols, :, :], 'Family'] = settings['DistributionFamily']
+ cal_df.loc[list(cols), 'Family'] = settings['DistributionFamily']
# load limits
for lim in (
@@ -442,13 +585,13 @@ def parse_str_to_float(in_str, context_string):
'TruncateLower',
'TruncateUpper',
):
- if lim in settings.keys():
+ if lim in settings:
val = parse_str_to_float(settings[lim], lim)
if not pd.isna(val):
- cal_df.loc[idx[cols, :, :], lim] = val
+ cal_df.loc[list(cols), lim] = val
# scale the censor and truncation limits, if needed
- scale_factor = self._asmnt.scale_factor(settings.get('Unit', None))
+ scale_factor = self._asmnt.scale_factor(settings.get('Unit'))
rows_to_scale = [
'CensorLower',
@@ -456,10 +599,10 @@ def parse_str_to_float(in_str, context_string):
'TruncateLower',
'TruncateUpper',
]
- cal_df.loc[idx[cols, :, :], rows_to_scale] *= scale_factor
+ cal_df.loc[idx[cols, :, :], rows_to_scale] *= scale_factor # type: ignore
# load the prescribed additional uncertainty
- if 'AddUncertainty' in settings.keys():
+ if 'AddUncertainty' in settings:
sig_increase = parse_str_to_float(
settings['AddUncertainty'], 'AddUncertainty'
)
@@ -468,11 +611,13 @@ def parse_str_to_float(in_str, context_string):
if settings['DistributionFamily'] == 'normal':
sig_increase *= scale_factor
- cal_df.loc[idx[cols, :, :], 'SigIncrease'] = sig_increase
+ cal_df.loc[list(cols), 'SigIncrease'] = sig_increase
- def get_filter_mask(lower_lims, upper_lims):
- # pylint: disable=missing-return-doc
- # pylint: disable=missing-return-type-doc
+ def get_filter_mask(
+ demand_sample: pd.DataFrame,
+ lower_lims: np.ndarray,
+ upper_lims: np.ndarray,
+ ) -> bool:
demands_of_interest = demand_sample.iloc[:, pd.notna(upper_lims)]
limits_of_interest = upper_lims[pd.notna(upper_lims)]
upper_mask = np.all(demands_of_interest < limits_of_interest, axis=1)
@@ -483,10 +628,11 @@ def get_filter_mask(lower_lims, upper_lims):
return np.all([lower_mask, upper_mask], axis=0)
- self.log_div()
- self.log_msg('Calibrating demand model...')
+ self.log.div()
+ self.log.msg('Calibrating demand model...')
demand_sample = self.sample
+ assert isinstance(demand_sample, pd.DataFrame)
# initialize a DataFrame that contains calibration information
cal_df = pd.DataFrame(
@@ -507,21 +653,21 @@ def get_filter_mask(lower_lims, upper_lims):
cal_df['Family'] = cal_df['Family'].astype(str)
# start by assigning the default option ('ALL') to every demand column
- parse_settings(config['ALL'], 'ALL')
+ parse_settings(cal_df, config['ALL'], demand_type='ALL')
# then parse the additional settings and make the necessary adjustments
- for demand_type in config.keys():
+ for demand_type in config: # noqa: PLC0206
if demand_type != 'ALL':
- parse_settings(config[demand_type], demand_type)
+ parse_settings(cal_df, config[demand_type], demand_type=demand_type)
if self._asmnt.log.verbose:
- self.log_msg(
- "\nCalibration settings successfully parsed:\n" + str(cal_df),
+ self.log.msg(
+ '\nCalibration settings successfully parsed:\n' + str(cal_df),
prepend_timestamp=False,
)
else:
- self.log_msg(
- "\nCalibration settings successfully parsed:\n",
+ self.log.msg(
+ '\nCalibration settings successfully parsed:\n',
prepend_timestamp=False,
)
@@ -532,18 +678,19 @@ def get_filter_mask(lower_lims, upper_lims):
# Currently, non-empirical demands are assumed to have some level of
# correlation, hence, a censored value in any demand triggers the
# removal of the entire sample from the population.
- upper_lims = cal_df.loc[:, 'CensorUpper'].values
- lower_lims = cal_df.loc[:, 'CensorLower'].values
+ upper_lims = cal_df.loc[:, 'CensorUpper'].to_numpy()
+ lower_lims = cal_df.loc[:, 'CensorLower'].to_numpy()
+ assert isinstance(demand_sample, pd.DataFrame)
if ~np.all(pd.isna(np.array([upper_lims, lower_lims]))):
- censor_mask = get_filter_mask(lower_lims, upper_lims)
+ censor_mask = get_filter_mask(demand_sample, lower_lims, upper_lims)
censored_count = np.sum(~censor_mask)
- demand_sample = demand_sample.loc[censor_mask, :]
+ demand_sample = pd.DataFrame(demand_sample.loc[censor_mask, :])
- self.log_msg(
- "\nBased on the provided censoring limits, "
- f"{censored_count} samples were censored.",
+ self.log.msg(
+ '\nBased on the provided censoring limits, '
+ f'{censored_count} samples were censored.',
prepend_timestamp=False,
)
else:
@@ -553,20 +700,21 @@ def get_filter_mask(lower_lims, upper_lims):
# If yes, that suggests an error either in the samples or the
# configuration. We handle such errors gracefully: the analysis is not
# terminated, but we show an error in the log file.
- upper_lims = cal_df.loc[:, 'TruncateUpper'].values
- lower_lims = cal_df.loc[:, 'TruncateLower'].values
+ upper_lims = cal_df.loc[:, 'TruncateUpper'].to_numpy()
+ lower_lims = cal_df.loc[:, 'TruncateLower'].to_numpy()
+ assert isinstance(demand_sample, pd.DataFrame)
if ~np.all(pd.isna(np.array([upper_lims, lower_lims]))):
- truncate_mask = get_filter_mask(lower_lims, upper_lims)
+ truncate_mask = get_filter_mask(demand_sample, lower_lims, upper_lims)
truncated_count = np.sum(~truncate_mask)
if truncated_count > 0:
- demand_sample = demand_sample.loc[truncate_mask, :]
+ demand_sample = pd.DataFrame(demand_sample.loc[truncate_mask, :])
- self.log_msg(
- "\nBased on the provided truncation limits, "
- f"{truncated_count} samples were removed before demand "
- "calibration.",
+ self.log.msg(
+ '\nBased on the provided truncation limits, '
+ f'{truncated_count} samples were removed before demand '
+ 'calibration.',
prepend_timestamp=False,
)
@@ -577,8 +725,9 @@ def get_filter_mask(lower_lims, upper_lims):
empirical_edps = []
for edp in cal_df.index:
if cal_df.loc[edp, 'Family'] == 'empirical':
- empirical_edps.append(edp)
+ empirical_edps.append(edp) # noqa: PERF401
+ assert isinstance(demand_sample, pd.DataFrame)
if empirical_edps:
self.empirical_data = demand_sample.loc[:, empirical_edps].copy()
@@ -589,29 +738,34 @@ def get_filter_mask(lower_lims, upper_lims):
cal_df = cal_df.drop(empirical_edps, axis=0)
if self._asmnt.log.verbose:
- self.log_msg(
- f"\nDemand data used for calibration:\n{demand_sample}",
+ self.log.msg(
+ f'\nDemand data used for calibration:\n{demand_sample}',
prepend_timestamp=False,
)
# fit the joint distribution
- self.log_msg(
- "\nFitting the prescribed joint demand distribution...",
+ self.log.msg(
+ '\nFitting the prescribed joint demand distribution...',
prepend_timestamp=False,
)
demand_theta, demand_rho = uq.fit_distribution_to_sample(
- raw_samples=demand_sample.values.T,
- distribution=cal_df.loc[:, 'Family'].values,
+ raw_sample=demand_sample.to_numpy().T,
+ distribution=cal_df.loc[:, 'Family'].values, # type: ignore
censored_count=censored_count,
- detection_limits=cal_df.loc[:, ['CensorLower', 'CensorUpper']].values,
- truncation_limits=cal_df.loc[:, ['TruncateLower', 'TruncateUpper']].values,
+ detection_limits=cal_df.loc[ # type: ignore
+ :,
+ ['CensorLower', 'CensorUpper'],
+ ].values,
+ truncation_limits=cal_df.loc[ # type: ignore
+ :, ['TruncateLower', 'TruncateUpper']
+ ].values,
multi_fit=False,
logger_object=self._asmnt.log,
)
# fit the joint distribution
- self.log_msg(
- "\nCalibration successful, processing results...",
+ self.log.msg(
+ '\nCalibration successful, processing results...',
prepend_timestamp=False,
)
@@ -620,12 +774,16 @@ def get_filter_mask(lower_lims, upper_lims):
# increase the variance of the marginal distributions, if needed
if ~np.all(pd.isna(model_params.loc[:, 'SigIncrease'].values)):
- self.log_msg("\nIncreasing demand variance...", prepend_timestamp=False)
+ self.log.msg('\nIncreasing demand variance...', prepend_timestamp=False)
- sig_inc = np.nan_to_num(model_params.loc[:, 'SigIncrease'].values)
- sig_0 = model_params.loc[:, 'Theta_1'].values
+ sig_inc = np.nan_to_num(
+ model_params.loc[:, 'SigIncrease'].values, # type: ignore
+ )
+ sig_0 = model_params.loc[:, 'Theta_1'].to_numpy()
- model_params.loc[:, 'Theta_1'] = np.sqrt(sig_0**2.0 + sig_inc**2.0)
+ model_params.loc[:, 'Theta_1'] = np.sqrt(
+ sig_0**2.0 + sig_inc**2.0, # type: ignore
+ )
# remove unneeded fields from model_params
for col in ('SigIncrease', 'CensorLower', 'CensorUpper'):
@@ -638,8 +796,9 @@ def get_filter_mask(lower_lims, upper_lims):
self.marginal_params = model_params
- self.log_msg(
- "\nCalibrated demand model marginal distributions:\n" + str(model_params),
+ self.log.msg(
+ '\nCalibrated demand model marginal distributions:\n'
+ + str(model_params),
prepend_timestamp=False,
)
@@ -648,60 +807,50 @@ def get_filter_mask(lower_lims, upper_lims):
demand_rho, columns=cal_df.index, index=cal_df.index
)
- self.log_msg(
- "\nCalibrated demand model correlation matrix:\n" + str(self.correlation),
+ self.log.msg(
+ '\nCalibrated demand model correlation matrix:\n'
+ + str(self.correlation),
prepend_timestamp=False,
)
self.calibrated = True
- def save_model(self, file_prefix):
- """
- Save parameters of the demand model to a set of csv files
-
- """
-
- self.log_div()
- self.log_msg('Saving demand model...')
+ def save_model(self, file_prefix: str) -> None:
+ """Save parameters of the demand model to a set of csv files."""
+ self.log.div()
+ self.log.msg('Saving demand model...')
# save the correlation and empirical data
- file_io.save_to_csv(self.correlation, file_prefix + '_correlation.csv')
+ file_io.save_to_csv(self.correlation, Path(file_prefix + '_correlation.csv'))
if self.empirical_data is not None:
file_io.save_to_csv(
self.empirical_data,
- file_prefix + '_empirical.csv',
- units=self.units,
+ Path(file_prefix + '_empirical.csv'),
+ units=self.user_units,
unit_conversion_factors=self._asmnt.unit_conversion_factors,
log=self._asmnt.log,
)
- # the log standard deviations in the marginal parameters need to be
- # scaled up before feeding to the saving method where they will be
- # scaled back down and end up being saved unscaled to the target file
-
- marginal_params = self.marginal_params.copy()
-
- log_rows = marginal_params['Family'] == 'lognormal'
- log_demands = marginal_params.loc[log_rows, :]
-
- for label in log_demands.index:
- if label in self.units.index:
- unit_factor = self._asmnt.calc_unit_scale_factor(self.units[label])
-
- marginal_params.loc[label, 'Theta_1'] *= unit_factor
+ # Converting the marginal parameters requires special
+ # treatment, so we can't rely on file_io's universal unit
+ # conversion functionality. We do it manually here instead.
+ assert isinstance(self.marginal_params, pd.DataFrame)
+ assert isinstance(self.user_units, pd.Series)
+ marginal_params_user_units = self._convert_marginal_params(
+ self.marginal_params.copy(), self.user_units, inverse_conversion=True
+ )
+ marginal_params_user_units['Units'] = self.user_units
file_io.save_to_csv(
- marginal_params,
- file_prefix + '_marginals.csv',
- units=self.units,
- unit_conversion_factors=self._asmnt.unit_conversion_factors,
+ marginal_params_user_units,
+ Path(file_prefix + '_marginals.csv'),
orientation=1,
log=self._asmnt.log,
)
- self.log_msg('Demand model successfully saved.', prepend_timestamp=False)
+ self.log.msg('Demand model successfully saved.', prepend_timestamp=False)
- def load_model(self, data_source):
+ def load_model(self, data_source: str | dict) -> None:
"""
Load the model that describes demands on the asset.
@@ -714,47 +863,64 @@ def load_model(self, data_source):
_correlation.csv. If dict, the data source is a dictionary
with the following optional keys: 'marginals', 'empirical', and
'correlation'. The value under each key shall be a DataFrame.
- """
- self.log_div()
- self.log_msg('Loading demand model...')
+ Raises
+ ------
+ TypeError
+ If the data source type is invalid.
+
+ """
+ self.log.div()
+ self.log.msg('Loading demand model...')
# prepare the marginal data source variable to load the data
if isinstance(data_source, dict):
marginal_data_source = data_source.get('marginals')
+ assert isinstance(marginal_data_source, pd.DataFrame)
empirical_data_source = data_source.get('empirical', None)
correlation_data_source = data_source.get('correlation', None)
- else:
+ elif isinstance(data_source, str):
marginal_data_source = data_source + '_marginals.csv'
empirical_data_source = data_source + '_empirical.csv'
correlation_data_source = data_source + '_correlation.csv'
+ else:
+ msg = f'Invalid data_source type: {type(data_source)}.'
+ raise TypeError(msg)
if empirical_data_source is not None:
- self.empirical_data = file_io.load_data(
- empirical_data_source,
- self._asmnt.unit_conversion_factors,
- log=self._asmnt.log,
- )
- self.empirical_data.columns.names = ('type', 'loc', 'dir')
+ if (
+ isinstance(empirical_data_source, str)
+ and Path(empirical_data_source).exists()
+ ):
+ empirical_data = file_io.load_data(
+ empirical_data_source,
+ self._asmnt.unit_conversion_factors,
+ log=self._asmnt.log,
+ )
+ assert isinstance(empirical_data, pd.DataFrame)
+ self.empirical_data = empirical_data
+ self.empirical_data.columns.names = ['type', 'loc', 'dir']
else:
self.empirical_data = None
if correlation_data_source is not None:
- self.correlation = file_io.load_data(
+ correlation = file_io.load_data(
correlation_data_source,
self._asmnt.unit_conversion_factors,
reindex=False,
log=self._asmnt.log,
)
- self.correlation.index.set_names(['type', 'loc', 'dir'], inplace=True)
- self.correlation.columns.set_names(['type', 'loc', 'dir'], inplace=True)
+ assert isinstance(correlation, pd.DataFrame)
+ self.correlation = correlation
+ self.correlation.index = self.correlation.index.set_names(
+ ['type', 'loc', 'dir']
+ )
+ self.correlation.columns = self.correlation.columns.set_names(
+ ['type', 'loc', 'dir']
+ )
else:
self.correlation = None
- # the log standard deviations in the marginal parameters need to be
- # adjusted after getting the data from the loading method where they
- # were scaled according to the units of the corresponding variable
-
# Note that a data source without marginal information is not valid
marginal_params, units = file_io.load_data(
marginal_data_source,
@@ -764,62 +930,67 @@ def load_model(self, data_source):
return_units=True,
log=self._asmnt.log,
)
- marginal_params.index.set_names(['type', 'loc', 'dir'], inplace=True)
+ assert isinstance(marginal_params, pd.DataFrame)
+ assert isinstance(units, pd.Series)
- marginal_params = self.convert_marginal_params(marginal_params.copy(), units)
+ marginal_params.index = marginal_params.index.set_names(
+ ['type', 'loc', 'dir']
+ )
- self.marginal_params = marginal_params
- self.units = units
+ marginal_params = self._convert_marginal_params(
+ marginal_params.copy(), units
+ )
- self.log_msg('Demand model successfully loaded.', prepend_timestamp=False)
+ self.marginal_params = marginal_params
+ self.user_units = units
- def _create_RVs(self, preserve_order=False):
- """
- Create a random variable registry for the joint distribution of demands.
+ self.log.msg('Demand model successfully loaded.', prepend_timestamp=False)
- """
+ def _create_RVs(self, *, preserve_order: bool = False) -> None: # noqa: N802
+ """Create a random variable registry for the joint distribution of demands."""
+ assert self.marginal_params is not None
# initialize the registry
- RV_reg = uq.RandomVariableRegistry(self._asmnt.options.rng)
+ rv_reg = uq.RandomVariableRegistry(self._asmnt.options.rng)
# add a random variable for each demand variable
for rv_params in self.marginal_params.itertuples():
edp = rv_params.Index
- rv_tag = f'EDP-{edp[0]}-{edp[1]}-{edp[2]}'
- family = getattr(rv_params, "Family", 'deterministic')
+ rv_tag = f'EDP-{edp[0]}-{edp[1]}-{edp[2]}' # type: ignore
+ family = getattr(rv_params, 'Family', 'deterministic')
if family == 'empirical':
- if preserve_order:
- dist_family = 'coupled_empirical'
- else:
- dist_family = 'empirical'
+ dist_family = 'coupled_empirical' if preserve_order else 'empirical'
# empirical RVs need the data points
- RV_reg.add_RV(
+ rv_reg.add_RV(
uq.rv_class_map(dist_family)(
name=rv_tag,
- raw_samples=self.empirical_data.loc[:, edp].values,
+ theta=self.empirical_data.loc[ # type: ignore
+ :, # type: ignore
+ edp,
+ ].values,
)
)
else:
# all other RVs need parameters of their distributions
- RV_reg.add_RV(
+ rv_reg.add_RV(
uq.rv_class_map(family)(
name=rv_tag,
- theta=[
- getattr(rv_params, f"Theta_{t_i}", np.nan)
+ theta=[ # type: ignore
+ getattr(rv_params, f'Theta_{t_i}', np.nan)
for t_i in range(3)
],
truncation_limits=[
- getattr(rv_params, f"Truncate{side}", np.nan)
- for side in ("Lower", "Upper")
+ getattr(rv_params, f'Truncate{side}', np.nan)
+ for side in ('Lower', 'Upper')
],
)
)
- self.log_msg(
- f"\n{self.marginal_params.shape[0]} random variables created.",
+ self.log.msg(
+ f'\n{self.marginal_params.shape[0]} random variables created.',
prepend_timestamp=False,
)
@@ -827,32 +998,33 @@ def _create_RVs(self, preserve_order=False):
if self.correlation is not None:
rv_set_tags = [
f'EDP-{edp[0]}-{edp[1]}-{edp[2]}'
- for edp in self.correlation.index.values
+ for edp in self.correlation.index.to_numpy()
]
- RV_reg.add_RV_set(
+ rv_reg.add_RV_set(
uq.RandomVariableSet(
'EDP_set',
- list(RV_reg.RVs(rv_set_tags).values()),
+ list(rv_reg.RVs(rv_set_tags).values()),
self.correlation.values,
)
)
- self.log_msg(
- f"\nCorrelations between {len(rv_set_tags)} random variables "
- "successfully defined.",
+ self.log.msg(
+ f'\nCorrelations between {len(rv_set_tags)} random variables '
+ 'successfully defined.',
prepend_timestamp=False,
)
- self._RVs = RV_reg
+ self._RVs = rv_reg
- def clone_demands(self, demand_cloning):
+ def clone_demands(self, demand_cloning: dict) -> None:
"""
- Clones demands. This means copying over columns of the
- original demand sample and assigning given names to them. The
- columns to be copied over and the names to assign to the
- copies are defined as the keys and values of the
- `demand_cloning` dictionary, respectively.
+ Clone demands.
+
+ Copies over columns of the original demand sample and
+ assigns given names to them. The columns to be copied over
+ and the names to be assigned to the copies are defined as the keys
+ and values of the `demand_cloning` dictionary.
The method modifies `sample` inplace.
Parameters
@@ -873,7 +1045,6 @@ def clone_demands(self, demand_cloning):
In multiple instances of invalid demand_cloning entries.
"""
-
# it's impossible to have duplicate keys, because
# demand_cloning is a dictionary.
new_columns_list = demand_cloning.values()
@@ -888,12 +1059,11 @@ def clone_demands(self, demand_cloning):
for new_columns in new_columns_list:
flat_list.extend(new_columns)
if len(set(flat_list)) != len(flat_list):
- raise ValueError('Duplicate entries in demand cloning configuration.')
+ msg = 'Duplicate entries in demand cloning configuration.'
+ raise ValueError(msg)
# turn the config entries to tuples
- def turn_to_tuples(demand_cloning):
- # pylint: disable=missing-return-doc
- # pylint: disable=missing-return-type-doc
+ def turn_to_tuples(demand_cloning: dict) -> dict:
demand_cloning_tuples = {}
for key, values in demand_cloning.items():
demand_cloning_tuples[tuple(key.split('-'))] = [
@@ -903,19 +1073,19 @@ def turn_to_tuples(demand_cloning):
demand_cloning = turn_to_tuples(demand_cloning)
- # The demand cloning confuguration should not include
- # columns that are not present in the orignal sample.
+ # The demand cloning configuration should not include
+ # columns that are not present in the original sample.
warn_columns = []
+ assert self.sample is not None
for column in demand_cloning:
if column not in self.sample.columns:
- warn_columns.append(column)
+ warn_columns.append(column) # noqa: PERF401
if warn_columns:
warn_columns = ['-'.join(x) for x in warn_columns]
- self.log_msg(
- "\nWARNING: The demand cloning configuration lists "
+ self.log.warning(
+ 'The demand cloning configuration lists '
"columns that are not present in the original demand sample's "
- f"columns: {warn_columns}.\n",
- prepend_timestamp=False,
+ f'columns: {warn_columns}.'
)
# we iterate over the existing columns of the sample and try
@@ -940,11 +1110,14 @@ def turn_to_tuples(demand_cloning):
# update the column index
self.sample.columns = pd.MultiIndex.from_tuples(column_values)
# update units
- self.units = self.units.iloc[column_index]
- self.units.index = self.sample.columns
+ self.user_units = self.user_units.iloc[column_index] # type: ignore
+ assert self.user_units is not None
+ self.user_units.index = self.sample.columns
- def generate_sample(self, config):
+ def generate_sample(self, config: dict) -> None:
"""
+ Generate the demand sample.
+
Generates a sample of random variables (RVs) based on the
specified configuration for demand modeling.
@@ -957,15 +1130,15 @@ def generate_sample(self, config):
Parameters
----------
- config : dict
+ config: dict
A dictionary containing configuration options for the
sample generation. Key options include:
- - 'SampleSize': The number of samples to generate.
- - 'PreserveRawOrder': Boolean indicating whether to
- preserve the order of the raw data. Defaults to False.
- - 'DemandCloning': Specifies if and how demand cloning
- should be applied. Can be a boolean or a detailed
- configuration.
+ * 'SampleSize': The number of samples to generate.
+ * 'PreserveRawOrder': Boolean indicating whether to
+ preserve the order of the raw data. Defaults to False.
+ * 'DemandCloning': Specifies if and how demand cloning
+ should be applied. Can be a boolean or a detailed
+ configuration.
Raises
------
@@ -993,20 +1166,23 @@ def generate_sample(self, config):
>>> model.generate_sample(config)
# This will generate 1000 realizations of demand variables
# with the specified configuration.
- """
+ """
if self.marginal_params is None:
- raise ValueError(
- 'Model parameters have not been specified. Either'
+ msg = (
+ 'Model parameters have not been specified. Either '
'load parameters from a file or calibrate the '
'model using raw demand data.'
)
+ raise ValueError(msg)
- self.log_div()
- self.log_msg('Generating sample from demand variables...')
+ self.log.div()
+ self.log.msg('Generating sample from demand variables...')
self._create_RVs(preserve_order=config.get('PreserveRawOrder', False))
+ assert self._RVs is not None
+ assert self._asmnt.options.sampling_method is not None
sample_size = config['SampleSize']
self._RVs.generate_sample(
sample_size=sample_size, method=self._asmnt.options.sampling_method
@@ -1016,10 +1192,12 @@ def generate_sample(self, config):
assert self._RVs is not None
assert self._RVs.RV_sample is not None
sample = pd.DataFrame(self._RVs.RV_sample)
- sample.sort_index(axis=0, inplace=True)
- sample.sort_index(axis=1, inplace=True)
+ sample = sample.sort_index(axis=0)
+ sample = sample.sort_index(axis=1)
- sample = base.convert_to_MultiIndex(sample, axis=1)['EDP']
+ sample_mi = base.convert_to_MultiIndex(sample, axis=1)['EDP']
+ assert isinstance(sample_mi, pd.DataFrame)
+ sample = sample_mi
sample.columns.names = ['type', 'loc', 'dir']
self.sample = sample
@@ -1027,7 +1205,368 @@ def generate_sample(self, config):
if config.get('DemandCloning', False):
self.clone_demands(config['DemandCloning'])
- self.log_msg(
- f"\nSuccessfully generated {sample_size} realizations.",
+ self.log.msg(
+ f'\nSuccessfully generated {sample_size} realizations.',
prepend_timestamp=False,
)
+
+
+def _get_required_demand_type(
+ model_parameters: pd.DataFrame,
+ pgb: pd.DataFrame,
+ demand_offset: dict | None = None,
+) -> dict:
+ """
+ Get the required demand type for the components.
+
+ Returns the demand type and its properties required to calculate
+ the the damage or loss of a component. The properties include
+ whether the demand is directional, the offset, and the type of the
+ demand. The method takes as input a dataframe `PGB` that contains
+ information about the component groups in the asset. For each
+ performance group PG in the PGB dataframe, the method retrieves
+ the relevant parameters from the model_params dataframe and parses
+ the demand type into its properties. If the demand type has a
+ subtype, the method splits it and adds the subtype to the demand
+ type to form the EDP type. The method also considers the default
+ offset for the demand type, if it is specified in the options
+ attribute of the assessment, and adds the offset to the EDP. If
+ the demand is directional, the direction is added to the EDP. The
+ method collects all the unique EDPs for each component group and
+ returns them as a dictionary where each key is an EDP and its
+ value is a list of component groups that require that EDP.
+
+ Parameters
+ ----------
+ model_parameters: pd.DataFrame
+ Model parameters. Damage model parameters, or
+ loss-function loss model parameters.
+ pgb: pd.DataFrame
+ A pandas DataFrame with the block information for
+ each component
+ demand_offset: dict, optional
+ Specifies an additional location offset for specific
+ demand types. Example:
+ {'PFA': -1, 'PFV': +2}.
+
+ Returns
+ -------
+ dict
+ A dictionary of EDP requirements, where each key is the EDP
+ string (e.g., "PGA-0-1"), and the
+ corresponding value is a list of tuples (component_id,
+ location, direction)
+
+ Raises
+ ------
+ ValueError
+ When a negative value is used for `loc`. Currently not
+ supported.
+
+ """
+ model_parameters = model_parameters.sort_index(axis=1)
+
+ # Assign default demand_offset to empty dict.
+ if not demand_offset:
+ demand_offset = {}
+
+ required_edps = defaultdict(list)
+
+ for pg in pgb.index:
+ cmp = pg[0]
+
+ # Utility Demand: if there is an `Expression`, then load the
+ # rest of the demand types.
+ expression = model_parameters.loc[cmp, :].get(('Demand', 'Expression'))
+ if expression is not None:
+ # get the number of variables in the expression using
+ # the numexpr library
+ parsed_expr = ne.NumExpr(_clean_up_expression(expression))
+ num_terms = len(parsed_expr.input_names)
+ demand_parameters_list = []
+ for i in range(num_terms):
+ if i == 0:
+ index_lvl0 = 'Demand'
+ else:
+ index_lvl0 = f'Demand{i + 1}'
+ demand_parameters_list.append(
+ (
+ model_parameters.loc[cmp, (index_lvl0, 'Type')],
+ model_parameters.loc[cmp, (index_lvl0, 'Offset')],
+ model_parameters.loc[cmp, (index_lvl0, 'Directional')],
+ )
+ )
+ else:
+ demand_parameters_list = [
+ (
+ model_parameters.loc[cmp, ('Demand', 'Type')],
+ model_parameters.loc[cmp, ('Demand', 'Offset')],
+ model_parameters.loc[cmp, ('Demand', 'Directional')],
+ )
+ ]
+
+ # Parse the demand type
+
+ edps = []
+ for demand_parameters in demand_parameters_list:
+ demand_type = demand_parameters[0]
+ offset = demand_parameters[1]
+ directional = demand_parameters[2]
+
+ assert isinstance(demand_type, str)
+
+ # Check if there is a subtype included in the demand_type
+ # string
+ if '|' in demand_type:
+ # If there is a subtype, split the demand_type string
+ # on the '|' character
+ demand_type, subtype = demand_type.split('|')
+ # Convert the demand type to the corresponding EDP
+ # type using `base.EDP_to_demand_type`
+ demand_type = base.EDP_to_demand_type[demand_type]
+ # Concatenate the demand type and subtype to form the
+ # EDP type
+ edp_type = f'{demand_type}_{subtype}'
+ else:
+ # If there is no subtype, convert the demand type to
+ # the corresponding EDP type using
+ # `base.EDP_to_demand_type`
+ demand_type = base.EDP_to_demand_type[demand_type]
+ # Assign the EDP type to be equal to the demand type
+ edp_type = demand_type
+
+ # Consider the default offset, if needed
+ if demand_type in demand_offset:
+ # If the demand type has a default offset in
+ # `demand_offset`, add the offset
+ # to the default offset
+ offset = int(offset + demand_offset[demand_type]) # type: ignore
+ else:
+ # If the demand type does not have a default offset in
+ # `demand_offset`, convert the
+ # offset to an integer
+ offset = int(offset) # type: ignore
+
+ # Determine the direction
+ direction = pg[2] if directional else '0'
+
+ # Concatenate the EDP type, offset, and direction to form
+ # the EDP key
+ edp = f'{edp_type}-{int(pg[1]) + offset!s}-{direction}'
+
+ if int(pg[1]) + offset < 0:
+ msg = (
+ f'Negative location encountered for component '
+ f'(cmp, loc, dir, uid)=`{pg}`. Would require `{edp}`. '
+ f'Please update the location of the component.'
+ )
+ raise ValueError(msg)
+
+ edps.append(edp)
+
+ edps_t = tuple(edps) # makes it hashable
+
+ # Add the current PG (performance group) to the list of
+ # PGs associated with the current EDP key
+ required_edps[edps_t, expression].append(pg)
+
+ # Return the required EDPs
+ return required_edps
+
+
+def _assemble_required_demand_data(
+ required_edps: set, nondirectional_multipliers: dict, demand_sample: pd.DataFrame
+) -> dict:
+ """
+ Assembles demand data for damage state determination.
+
+ The method takes the maximum of all available directions for
+ non-directional demand, scaling it using the non-directional
+ multiplier specified in self._asmnt.options, and returning the
+ result as a dictionary with keys in the format of
+ '--' and values as arrays of
+ demand values.
+
+ Parameters
+ ----------
+ required_edps: set
+ Set of required EDPs. The elements in the set should be
+ tuples. For each, the first element should be a tuple
+ containing EDPs in the `type`-`loc`-`dir` format. The second
+ should be an expression defining how the EDPs in the tuple
+ should be combined when it contains more than a single EDP.
+ nondirectional_multipliers: dict
+ Nondirectional components are sensitive to demands coming
+ in any direction. Results are typically available in two
+ orthogonal directions. FEMA P-58 suggests using the
+ formula `max(dir_1, dir_2) * 1.2` to estimate the demand
+ for such components. This parameter allows modifying the
+ 1.2 multiplier with a user-specified value. The change can
+ be applied to "ALL" EDPs, or for specific EDPs, such as
+ "PFA", "PFV", etc. Examples:
+ #. {'PFA': 1.2, 'PID': 1.00}
+ #. {'ALL': 1.0}
+ demand_sample: pd.DataFrame
+ Dataframe containing the demand sample, realizations of EDPs
+ (or/and IMs) that are used for damage and loss calculations.
+
+ Returns
+ -------
+ demand_dict: dict
+ A dictionary of assembled demand data for calculation
+
+ Raises
+ ------
+ ValueError
+ If demand data for a given EDP cannot be found
+
+ """
+ demand_dict = {}
+
+ for edps, expression in required_edps:
+ edp_values = {}
+
+ for edp in edps:
+ edp_type, location, direction = edp.split('-')
+
+ if direction == '0':
+ # non-directional
+ demand = (
+ demand_sample.loc[
+ :, # type: ignore
+ (edp_type, location),
+ ]
+ .max(axis=1)
+ .to_numpy()
+ )
+
+ if edp_type in nondirectional_multipliers:
+ multiplier = nondirectional_multipliers[edp_type]
+
+ elif 'ALL' in nondirectional_multipliers:
+ multiplier = nondirectional_multipliers['ALL']
+
+ else:
+ msg = (
+ f'Peak orthogonal EDP multiplier '
+ f'for non-directional demand '
+ f'calculation of `{edp_type}` not specified.'
+ )
+ raise ValueError(msg)
+
+ demand *= multiplier
+
+ else:
+ # directional
+ demand = demand_sample[edp_type, location, direction].to_numpy()
+
+ edp_values[edp] = demand
+
+ # evaluate expression
+ if expression is not None:
+ # build a dict of values
+ value_dict = {}
+ for i, edp_value in enumerate(edp_values.values()):
+ value_dict[f'X{i + 1}'] = edp_value
+ demand = ne.evaluate(
+ _clean_up_expression(expression), local_dict=value_dict
+ )
+ demand_dict[edps, expression] = demand
+
+ return demand_dict
+
+
+def _clean_up_expression(expression: str) -> str:
+ """
+ Clean up a mathematical expression in a string.
+
+ Cleans up the given mathematical expression by ensuring it
+ contains only allowed characters and replaces the caret (^)
+ exponentiation operator with the double asterisk (**) operator.
+
+ Parameters
+ ----------
+ expression: str
+ The mathematical expression to clean up.
+
+ Returns
+ -------
+ str
+ The cleaned-up mathematical expression.
+
+ Raises
+ ------
+ ValueError
+ If the expression contains invalid characters.
+
+ Examples
+ --------
+ >>> _clean_up_expression('3 + 5 * 2')
+ '3 + 5 * 2'
+ >>> _clean_up_expression('2^3')
+ '2**3'
+ >>> _clean_up_expression('2 ** 3')
+ '2 ** 3'
+ >>> _clean_up_expression(
+ ... "[o.fork() for (o,i) in "
+ ... "[(__import__('os'), __import__('itertools'))] "
+ ... "for x in i.repeat(0)]"
+ ... )
+ Traceback (most recent call last): ...
+
+ """
+ allowed_chars = re.compile(r'^[0-9a-zA-Z\^\+\-\*/\(\)\s]*$')
+ if not bool(allowed_chars.match(expression)):
+ msg = f'Invalid expression: {expression}'
+ raise ValueError(msg)
+ # replace exponantiation with `^` with the native `**` in case `^`
+ # was used. But please use `**`..
+ return expression.replace('^', '**')
+
+
+def _verify_edps_available(available_edps: dict, required: set) -> None:
+ """
+ Verify EDP availability.
+
+ Verifies that the required EDPs are available and raises
+ appropriate errors otherwise.
+
+ Parameters
+ ----------
+ available_edps: dict
+ Dictionary mapping (`edp_type`-`cmp`-`dir`) to list of `loc`
+ where values are available.
+ required: set
+ Set of required EDPs, expressed as
+ `edp_type`-`loc`-`dir`. Direction `0` has special meaning: It
+ is used for directional demands.
+
+ Raises
+ ------
+ ValueError
+ When the verification fails.
+
+ """
+ # Verify that the required EDPs are available in the
+ # demand sample
+ for edps, _ in required:
+ for edp in edps:
+ edp_type, location, direction = edp.split('-')
+ if (edp_type, location) not in available_edps:
+ msg = (
+ f'Unable to locate `{edp_type}` at location '
+ f'{location} in demand sample.'
+ )
+ raise ValueError(msg)
+ # if non-directional demand is requested, ensure there
+ # are entries (all directions accepted)
+ num_entries = len(available_edps[edp_type, location])
+ if edp[2] == '0' and num_entries == 0:
+ msg = (
+ f'Unable to locate any `{edp_type}` '
+ f'at location {location} and direction {direction}.'
+ )
+ raise ValueError(msg)
+ if edp[2] != '0' and num_entries == 0:
+ msg = f'Unable to locate `{edp_type}-{location}-{direction}`.'
+ raise ValueError(msg)
diff --git a/pelicun/model/loss_model.py b/pelicun/model/loss_model.py
index 6940ca2af..4748c1990 100644
--- a/pelicun/model/loss_model.py
+++ b/pelicun/model/loss_model.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
@@ -38,56 +37,1646 @@
# Adam Zsarnóczay
# John Vouvakis Manousakis
-"""
-This file defines Loss model objects and their methods.
-.. rubric:: Contents
+"""Loss model objects and associated methods."""
-.. autosummary::
+from __future__ import annotations
- prep_constant_median_DV
- prep_bounded_multilinear_median_DV
+from abc import ABC, abstractmethod
+from collections import defaultdict
+from itertools import product
+from pathlib import Path
+from typing import TYPE_CHECKING, Any, overload
- LossModel
- RepairModel
+import numpy as np
+import pandas as pd
+from scipy.interpolate import RegularGridInterpolator
+
+from pelicun import base, file_io, uq
+from pelicun.model.demand_model import (
+ _assemble_required_demand_data,
+ _get_required_demand_type,
+ _verify_edps_available,
+)
+from pelicun.model.pelicun_model import PelicunModel
+from pelicun.pelicun_warnings import InconsistentUnitsError
+
+if TYPE_CHECKING:
+ from collections.abc import Callable
+
+ from pelicun.assessment import AssessmentBase
+
+idx = base.idx
+
+
+class LossModel(PelicunModel):
+ """
+ Manages loss information used in assessments.
+
+ Contains a loss model for components with Damage States (DS) and
+ one for components with Loss Functions (LF).
+
+ """
+
+ __slots__ = ['ds_model', 'dv_units', 'lf_model']
+
+ def __init__(
+ self,
+ assessment: AssessmentBase,
+ decision_variables: tuple[str, ...] = ('Cost', 'Time'),
+ dv_units: dict[str, str] | None = None,
+ ) -> None:
+ """
+ Initialize LossModel objects.
+
+ Parameters
+ ----------
+ assessment: pelicun.AssessmentBase
+ Parent assessment
+ decision_variables: tuple
+ Defines the decision variables to be included in the loss
+ calculations. Defaults to those supported, but fewer can be
+ used if desired. When fewer are used, the loss parameters for
+ those not used will not be required.
+
+ """
+ super().__init__(assessment)
+
+ self.ds_model: RepairModel_DS = RepairModel_DS(assessment)
+ self.lf_model: RepairModel_LF = RepairModel_LF(assessment)
+ self._loss_map = None
+ self.decision_variables = decision_variables
+ self.dv_units = dv_units
+
+ @property
+ def sample(self) -> pd.DataFrame | None:
+ """
+ Combines the samples of the ds_model and lf_model sub-models.
+
+ Returns
+ -------
+ pd.DataFrame
+ The combined loss sample.
+
+ """
+ # Handle `None` cases
+
+ if self.ds_model.sample is None and self.lf_model.sample is None:
+ return None
+
+ if self.ds_model.sample is None:
+ return self.lf_model.sample
+
+ if self.lf_model.sample is None:
+ return self.ds_model.sample
+
+ # If both are not None, combine
+
+ ds_model_levels = self.ds_model.sample.columns.names
+
+ # add a `ds` level to the lf_model sample
+ new_index = self.lf_model.sample.columns.to_frame(index=False)
+ # add
+ new_index['ds'] = 'N/A'
+ # reorder
+ new_index = new_index[ds_model_levels]
+ new_multiindex = pd.MultiIndex.from_frame(new_index)
+ self.lf_model.sample.columns = new_multiindex
+
+ return pd.concat((self.ds_model.sample, self.lf_model.sample), axis=1)
+
+ @property
+ def decision_variables(self) -> tuple[str, ...]:
+ """
+ Retrieve the decision variables.
+
+ Returns
+ -------
+ tuple
+ Decision variables.
+
+ """
+ # pick the object from one of the models
+ # it's the same for the other(s).
+ return self.ds_model.decision_variables
+
+ @decision_variables.setter
+ def decision_variables(self, decision_variables: tuple[str, ...]) -> None:
+ """
+ Set the decision variables.
+
+ Supported: {`Cost`, `Time`, `Energy`, `Carbon`}.
+ Could also be any other string, as long as the provided loss
+ parameters contain that decision variable.
+
+ """
+ # assign the same DVs to the included loss models.
+ for model in self._loss_models:
+ model.decision_variables = decision_variables
+
+ def add_loss_map(
+ self,
+ loss_map_path: str | pd.DataFrame | None = None,
+ loss_map_policy: str | None = None,
+ ) -> None:
+ """
+ Add a loss map to the loss model.
+
+ A loss map defines what loss parameter definition should be
+ used for each component ID in the asset model.
+
+ Parameters
+ ----------
+ loss_map_path: str or pd.DataFrame or None
+ Path to a csv file or DataFrame object that maps
+ components IDs to their loss parameter definitions.
+ loss_map_policy: str or None
+ If None, does not modify the loss map.
+ If set to `fill`, each component ID that is present in
+ the asset model but not in the loss map is mapped to
+ itself, but `excessiveRID` is excluded.
+ If set to `fill_all`, each component ID that is present in
+ the asset model but not in the loss map is mapped to
+ itself without exceptions.
+
+
+ Raises
+ ------
+ ValueError
+ If both arguments are None.
+
+ """
+ self.log.msg('Loading loss map...')
+
+ # If no loss map is provided and no default is requested,
+ # there is no loss map and we can't proceed.
+ if loss_map_path is None and loss_map_policy is None:
+ msg = 'Please provide a loss map and/or a loss map extension policy.'
+ raise ValueError(msg)
+
+ # get a list of unique component IDs
+ cmp_set = set(self._asmnt.asset.list_unique_component_ids())
+
+ if loss_map_path is not None:
+ self.log.msg('Loss map is provided.', prepend_timestamp=False)
+ # Read the loss map into a variable
+ loss_map = file_io.load_data(
+ loss_map_path,
+ None,
+ orientation=1,
+ reindex=False,
+ log=self._asmnt.log,
+ )
+ assert isinstance(loss_map, pd.DataFrame)
+ #
+ if np.any(['DMG' in x for x in loss_map.index]): # type: ignore
+ self.log.warning(
+ 'The `DMG-` flag in the loss_map index is deprecated '
+ 'and no longer necessary. '
+ 'Please do not prepend `DMG-` before the component '
+ 'names in the loss map.'
+ )
+ loss_map.index = pd.Index([x[1] for x in loss_map.index])
+
+ else:
+ self.log.msg('Using default loss map.', prepend_timestamp=False)
+ # Instantiate an empty loss map.
+ loss_map = pd.DataFrame({'Repair': pd.Series(dtype='object')})
+ loss_map.index = loss_map.index.astype('object')
+
+ if loss_map_policy in {'fill', 'fill_all'}:
+ # Populate missing rows with cmp_id -> cmp_id
+ for component in cmp_set:
+ if component not in loss_map.index:
+ if loss_map_policy == 'fill' and component == 'excessiveRID':
+ continue
+ loss_map.loc[component, :] = component
+
+ elif loss_map_policy is None:
+ # Don't do anything.
+ pass
+
+ # TODO(AZ): add other loss map policies.
+ else:
+ msg = f'Unknown loss map policy: `{loss_map_policy}`.'
+ raise ValueError(msg)
+
+ # Assign the loss map to the available loss models
+ self._loss_map = loss_map
+
+ self.log.msg('Loss map loaded successfully.', prepend_timestamp=True)
+
+ def load_model(
+ self,
+ data_paths: list[str | pd.DataFrame],
+ loss_map: str | pd.DataFrame,
+ decision_variables: tuple[str, ...] | None = None,
+ ) -> None:
+ """."""
+ self.log.warning(
+ '`load_model` is deprecated and will be dropped in '
+ 'future versions of pelicun. '
+ 'Please use `load_model_parameters` instead.'
+ )
+ self.add_loss_map(loss_map)
+ self.load_model_parameters(data_paths, decision_variables)
+
+ def load_model_parameters(
+ self,
+ data_paths: list[str | pd.DataFrame],
+ decision_variables: tuple[str, ...] | None = None,
+ ) -> None:
+ """
+ Load loss model parameters.
+
+ Parameters
+ ----------
+ data_paths: list of (string | DataFrame)
+ List of paths to data or files with loss model
+ information. Default XY datasets can be accessed as
+ PelicunDefault/XY. Order matters. Parameters defined in
+ prior elements in the list take precedence over the same
+ parameters in subsequent data paths. I.e., place the
+ Default datasets in the back.
+ decision_variables: tuple
+ Defines the decision variables to be included in the loss
+ calculations. Defaults to those supported, but fewer can be
+ used if desired. When fewer are used, the loss parameters for
+ those not used will not be required.
+
+ """
+ if decision_variables is not None:
+ #
+ self.decision_variables = decision_variables
+ self.log.warning(
+ 'The `decision_variables` argument has been removed. '
+ 'Please set your desired decision variables like so: '
+ '{assessment object}.loss.decision_variables '
+ "= ('dv1', 'dv2', ...) before calling "
+ '{assessment object}.add_loss_map().'
+ )
+
+ self.log.div()
+ self.log.msg('Loading loss parameters...')
+
+ # replace `PelicunDefault/` flag with default data path
+ data_paths = file_io.substitute_default_path(data_paths)
+
+ #
+ # load loss parameter data into the models
+ #
+
+ for data_path in data_paths:
+ self._load_from_data_path(data_path)
+
+ self.log.msg(
+ 'Loss model parameters loaded successfully.', prepend_timestamp=False
+ )
+
+ #
+ # remove items
+ #
+
+ self.log.msg(
+ 'Removing unused loss model parameters.', prepend_timestamp=False
+ )
+
+ assert self._loss_map is not None
+ for loss_model in self._loss_models:
+ # drop unused loss parameter definitions
+ loss_model.drop_unused_loss_parameters(self._loss_map)
+ # remove components with incomplete loss parameters
+ loss_model.remove_incomplete_components()
+
+ # drop unused damage state columns
+ self.ds_model.drop_unused_damage_states()
+
+ #
+ # obtain DV units
+ #
+ dv_units: dict = {}
+ if self.ds_model.loss_params is not None:
+ dv_units.update(
+ self.ds_model.loss_params['DV', 'Unit']
+ .groupby(level=[1])
+ .first()
+ .to_dict()
+ )
+ if self.lf_model.loss_params is not None:
+ dv_units.update(
+ self.lf_model.loss_params['DV', 'Unit']
+ .groupby(level=[1])
+ .first()
+ .to_dict()
+ )
+ self.dv_units = dv_units
+
+ #
+ # convert units
+ #
+
+ self.log.msg(
+ 'Converting loss model parameter units.', prepend_timestamp=False
+ )
+ for loss_model in self._loss_models:
+ loss_model.convert_loss_parameter_units()
+
+ #
+ # verify loss parameter availability
+ #
+
+ self.log.msg(
+ 'Checking loss model parameter '
+ 'availability for all components in the asset model.',
+ prepend_timestamp=False,
+ )
+ self._ensure_loss_parameter_availability()
+
+ def _load_from_data_path(self, data_path: str | pd.DataFrame) -> None:
+ if 'bldg_repair_DB' in data_path:
+ data_path = data_path.replace('bldg_repair_DB', 'loss_repair_DB')
+ self.log.warning(
+ '`bldg_repair_DB` is deprecated and will '
+ 'be dropped in future versions of pelicun. '
+ 'Please use `loss_repair_DB` instead.'
+ )
+ data = file_io.load_data(
+ data_path, None, orientation=1, reindex=False, log=self._asmnt.log
+ )
+ assert isinstance(data, pd.DataFrame)
+
+ # Check for unit consistency
+ data.index.names = ['cmp', 'dv']
+ units_isolated = data.reset_index()[[('dv', ''), ('DV', 'Unit')]]
+ units_isolated.columns = pd.Index(['dv', 'Units'])
+ units_isolated_grp = units_isolated.groupby('dv')['Units']
+ unit_counts = units_isolated_grp.nunique()
+ more_than_one = unit_counts[unit_counts > 1]
+ if not more_than_one.empty:
+ raise InconsistentUnitsError
+
+ # determine if the loss model parameters are for damage
+ # states or loss functions
+ if _is_for_ds_model(data):
+ self.ds_model.load_model_parameters(data)
+ elif _is_for_lf_model(data):
+ self.lf_model.load_model_parameters(data)
+ else:
+ msg = f'Invalid loss model parameters: {data_path}'
+ raise ValueError(msg)
+
+ def calculate(self) -> None:
+ """
+ Calculate the loss of each component block.
+
+ Note: This method simply calculates the loss of each component
+ block without any special treatment to `replacement`
+ consequences. This can be done at a later step with the
+ `aggregate_losses` method.
+
+ Raises
+ ------
+ ValueError
+ If the size of the demand sample and the damage sample
+ don't match.
+
+ """
+ self.log.div()
+ self.log.msg('Calculating losses...')
+
+ # Get the damaged quantities in each damage state for each
+ # component of interest.
+ demand = self._asmnt.demand.sample
+ assert demand is not None
+ demand_offset = self._asmnt.options.demand_offset
+ assert demand_offset is not None
+ nondirectional_multipliers = self._asmnt.options.nondir_multi_dict
+ assert nondirectional_multipliers is not None
+ assert self._asmnt.asset.cmp_sample is not None
+ cmp_sample = self._asmnt.asset.cmp_sample.to_dict('series')
+ cmp_marginal_params = self._asmnt.asset.cmp_marginal_params
+ assert cmp_marginal_params is not None
+ if self._asmnt.damage.ds_model.sample is not None:
+ # TODO(JVM): FIND A WAY to avoid making a copy of this.
+ dmg_quantities = self._asmnt.damage.ds_model.sample.copy()
+ if len(demand) != len(dmg_quantities):
+ msg = (
+ f'The demand sample contains {len(demand)} realizations, '
+ f'but the damage sample contains {len(dmg_quantities)}. '
+ f'Loss calculation cannot proceed when '
+ f'these numbers are different. '
+ )
+ raise ValueError(msg)
+ self.ds_model.calculate(dmg_quantities)
+
+ self.lf_model.calculate(
+ demand,
+ cmp_sample,
+ cmp_marginal_params,
+ demand_offset,
+ nondirectional_multipliers,
+ )
+
+ self.log.msg('Loss calculation successful.')
+
+ def consequence_scaling(self, scaling_specification: str) -> None:
+ """
+ Apply scale factors to losses.
+
+ Applies scale factors to the loss sample according to the
+ given scaling specification. The scaling specification should
+ be a path to a CSV file. It should contain a `Decision
+ Variable` column with a specified decision variable in each
+ row. Other optional columns are `Component`, `Location`,
+ `Direction`. Each row acts as an independent scaling
+ operation, with the scale factor defined in the `Scale Factor`
+ column, which is required. If any value is missing in the
+ optional columns, it is assumed that the scale factor should
+ be applied to all entries of the loss sample where the other
+ column values match. For example, if the specification has a
+ single row with `Decision Variable` set to 'Cost', `Scale
+ Factor` set to 2.0, and no other columns, this will double the
+ 'Cost' DV. If instead `Location` was also set to `1`, it would
+ double the Cost of all components that have that location. The
+ columns `Location` and `Direction` can contain ranges, like
+ this: `1--3` means `1`, `2`, and `3`. If a range is used in
+ both `Location` and `Direction`, the factor of that row will
+ be applied once to all combinations.
+
+ Parameters
+ ----------
+ scaling_specification: str
+ Path to a CSV file containing the scaling specification.
+
+ Raises
+ ------
+ ValueError
+ If required columns are missing or contain NaNs.
+
+ """
+ # Specify expected dtypes from the start.
+ dtypes = {
+ 'Decision Variable': 'str',
+ 'Component': 'str',
+ 'Location': 'str',
+ 'Direction': 'str',
+ 'Scale Factor': 'float64',
+ }
+
+ scaling_specification_df = pd.read_csv(scaling_specification, dtype=dtypes)
+
+ if (
+ 'Decision Variable' not in scaling_specification_df.columns
+ or scaling_specification_df['Decision Variable'].isna().any()
+ ):
+ msg = (
+ 'The `Decision Variable` column is missing '
+ 'from the scaling specification or contains NaN values.'
+ )
+ raise ValueError(msg)
+ if (
+ 'Scale Factor' not in scaling_specification_df.columns
+ or scaling_specification_df['Scale Factor'].isna().any()
+ ):
+ msg = (
+ 'The `Scale Factor` column is missing '
+ 'from the scaling specification or contains NaN values.'
+ )
+ raise ValueError(msg)
+
+ # Add missing optional columns with NaN values
+ optional_cols = ['Component', 'Location', 'Direction']
+ for col in optional_cols:
+ if col not in scaling_specification_df.columns:
+ scaling_specification_df[col] = np.nan
+
+ # Rename the columns to the internally used values
+ name_map = {
+ 'Decision Variable': 'dv',
+ 'Component': 'dmg',
+ 'Location': 'loc',
+ 'Direction': 'dir',
+ 'Scale Factor': 'scaling',
+ }
+ scaling_specification_df = scaling_specification_df.rename(columns=name_map)
+
+ # Expand ranges in 'loc' and 'dir'
+ def _expand_range(col): # noqa: ANN001, ANN202
+ if pd.isna(col):
+ return [col]
+ if '--' in col:
+ start, end = (int(x) for x in col.split('--'))
+ return [str(x) for x in range(start, end + 1)]
+ return [col]
+
+ # Generate all combinations of loc and dir ranges
+ expanded_df = scaling_specification_df.apply( # type: ignore
+ lambda row: pd.DataFrame(
+ list(product(_expand_range(row['loc']), _expand_range(row['dir']))),
+ columns=['loc', 'dir'],
+ ).assign(dv=row['dv'], dmg=row['dmg'], scaling=row['scaling']),
+ axis=1,
+ )
+
+ expanded_df = pd.concat(expanded_df.values)
+
+ # Now, for each unique combination in expanded_df, apply
+ # consequence scaling
+ for _, row in expanded_df.iterrows():
+ scaling_conditions = {
+ k: row[k] for k in ('dv', 'dmg', 'loc', 'dir') if not pd.isna(row[k])
+ }
+ self._apply_consequence_scaling(
+ scaling_conditions, row['scaling'], raise_missing=False
+ )
+
+ def _apply_consequence_scaling(
+ self,
+ scaling_conditions: dict,
+ scale_factor: float,
+ *,
+ raise_missing: bool = True,
+ ) -> None:
+ """
+ Apply a scale factor to selected loss sample columns.
+
+ The scaling conditions are passed as a dictionary mapping
+ level names with their required value for the condition to be
+ met. It has to contain `dv` as one of its keys, defining the
+ decision variable where the factors should be applied. Other
+ valid levels include:
+ - `dmg`: containing a source component name,
+ - `loc`: containing a location,
+ - `dir`: containing a direction,
+ - `uid`: containing a Unique Component ID (UID).
+
+ If any of the keys is missing, it is assumed that the scaling
+ factor should be applied to all relevant consequences (those
+ matching the remaining values of the hierarchical index).
+
+ Parameters
+ ----------
+ scaling_conditions: dict
+ A dictionary mapping level names with a single value. Only the
+ rows where the index levels have the provided values will be
+ affected. The dictionary can be empty, in which case all rows
+ will be affected, or contain only some levels and values, in
+ which case only the matching rows will be affected.
+ scale_factor: float
+ Scale factor to use.
+ raise_missing: bool
+ Raise an error if no rows are matching the given conditions.
+
+ Raises
+ ------
+ ValueError
+ If the scaling_conditions dictionary does not contain a
+ `dv` key.
+
+ """
+ # make sure we won't apply the same factor to all DVs at once,
+ # highly unlikely anyone would actually want to do this.
+ if 'dv' not in scaling_conditions:
+ msg = (
+ 'The index of the `scaling_conditions` dictionary '
+ 'should contain a level named `dv` listing the '
+ 'relevant decision variable.'
+ )
+ raise ValueError(msg)
+
+ for model in self._loss_models:
+ # check if it's empty
+ if model.sample is None:
+ continue
+
+ # ensure the levels exist (but don't check if specified
+ # values exist yet)
+ for name in scaling_conditions:
+ if name not in model.sample.columns.names:
+ msg = (
+ f'`scaling_conditions` contains an unknown level: `{name}`.'
+ )
+ raise ValueError(msg)
+
+ # apply scale factors
+ base.multiply_factor_multiple_levels(
+ model.sample,
+ scaling_conditions,
+ scale_factor,
+ axis=1,
+ raise_missing=raise_missing,
+ )
+
+ def save_sample(
+ self, filepath: str | None = None, *, save_units: bool = False
+ ) -> None | pd.DataFrame | tuple[pd.DataFrame, pd.Series]:
+ """
+ .
+
+ Saves the sample of the `ds_model`.
+
+ Returns
+ -------
+ tuple
+ The output of {loss model}.ds_model.save_sample.
+
+ """
+ self.log.warning(
+ '`{loss model}.save_sample` is deprecated and will raise '
+ 'in future versions of pelicun. Please use '
+ '{loss model}.ds_model.save_sample instead.'
+ )
+ return self.ds_model.save_sample(filepath=filepath, save_units=save_units)
+
+ def load_sample(self, filepath: str | pd.DataFrame) -> None:
+ """
+ .
+
+ Saves the sample of the `ds_model`.
+
+ """
+ self.log.warning(
+ '`{loss model}.load_sample` is deprecated and will raise '
+ 'in future versions of pelicun. Please use '
+ '{loss model}.ds_model.load_sample instead.'
+ )
+ dv_units = self.ds_model.load_sample(filepath=filepath)
+ self.dv_units = dv_units
+
+ def aggregate_losses( # noqa: C901
+ self,
+ replacement_configuration: (
+ tuple[uq.RandomVariableRegistry, dict[str, float]] | None
+ ) = None,
+ loss_combination: dict | None = None,
+ *,
+ future: bool = False,
+ ) -> pd.DataFrame | tuple[pd.DataFrame, pd.DataFrame]:
+ """
+ Aggregate the losses produced by each component.
+
+ Parameters
+ ----------
+ replacement_configuration: Tuple, optional
+ Tuple containing a RandomVariableRegistry and a
+ dictionary. The RandomVariableRegistry is defining
+ building replacement consequence RVs for the active
+ decision variables. The dictionary defines exceedance
+ thresholds. If the aggregated value for a decision
+ variable (conditioned on no replacement) exceeds the
+ threshold, then replacement is triggered. This can happen
+ for multiple decision variables at the same
+ realization. The consequence keyword `replacement` is
+ reserved to represent exclusive triggering of the
+ replacement consequences, and other consequences are
+ ignored for those realizations where replacement is
+ triggered. When assigned to None, then `replacement` is
+ still treated as an exclusive consequence (other
+ consequences are set to zero when replacement is nonzero)
+ but it is not being additionally triggered by the
+ exceedance of any thresholds. The aggregated loss sample
+ contains an additional column with information on whether
+ replacement was already present or triggered by a
+ threshold exceedance for each realization.
+ loss_combination: dict, optional
+ Dictionary defining how losses for specific components
+ should be aggregated for a given decision variable. It has
+ the following structure: {`dv`: {(`c1`, `c2`): `arr`,
+ ...}, ...}, where `dv` is some decision variable, (`c1`,
+ `c2`) is a tuple defining a component pair, `arr` is a NxN
+ numpy array defining a combination table, and `...` means
+ that more key-value pairs with the same schema can exist
+ in the dictionaries. The loss sample is expected to
+ contain columns that include both `c1` and `c2` listed as
+ the component. The combination is applied to all pairs of
+ columns where the components are `c1` and `c2`, and all of
+ the rest of the multiindex levels match (`loc`, `dir`,
+ `uid`). This means, for example, that when combining wind
+ and flood losses, the asset model should contain both a
+ wind and a flood component defined at the same
+ location-direction. `arr` can also be an M-dimensional
+ numpy array where each dimension has length N (NxNx...xN).
+ This structure allows for the loss combination of M
+ components. In this case the (`c1`, `c2`) tuple should
+ contain M elements instead of two.
+ future: bool, optional
+ Defaults to False. When set to True, it enables the
+ updated return type.
+
+ Notes
+ -----
+ Regardless of the value of the arguments, this method does not
+ alter the state of the loss model, i.e., it does not modify
+ the values of the `.sample` attributes.
+
+ Returns
+ -------
+ dataframe or tuple
+ Dataframe with the aggregated loss of each realization,
+ and another boolean dataframe with information on which DV
+ thresholds were exceeded in each realization, triggering
+ replacement. If no thresholds are specified it only
+ contains False values. The second dataframe is only
+ returned with `future` set to True.
+
+ """
+ # TODO(JVM): When we start working on the documentation,
+ # simplify the docstring above and point the relevant detailed
+ # section in the documentation.
+
+ # validate input
+ if replacement_configuration is not None:
+ self._validate_input_replacement_thresholds(replacement_configuration)
+ # validate loss_combination input
+ if loss_combination is not None:
+ self._validate_input_loss_combination(loss_combination)
+
+ #
+ # operate on copes of the loss samples to avoid altering them.
+ #
+
+ if self.ds_model.sample is not None:
+ ds_sample = self.ds_model.sample.copy()
+ else:
+ ds_sample = None
+ if self.lf_model.sample is not None:
+ lf_sample = self.lf_model.sample.copy()
+ else:
+ lf_sample = None
+
+ def _construct_columns() -> list[str]:
+ columns = [
+ f'repair_{x.lower()}' for x in self.decision_variables if x != 'Time'
+ ]
+ # Note: The `Time` DV gets special treatment.
+ # create the summary DF
+ if 'Time' in self.decision_variables:
+ columns.extend(('repair_time-sequential', 'repair_time-parallel'))
+ return columns
+
+ if ds_sample is None and lf_sample is None:
+ self.log.msg('There are no losses.')
+ return pd.DataFrame(0.00, index=[0], columns=_construct_columns())
+
+ #
+ # handle `replacement`, regardless of whether
+ # `replacement_thresholds` is empty. (if `replacement`
+ # occurs, we ignore the losses from other componnets)
+ #
+
+ if ds_sample is not None:
+ self._make_replacement_exclusive(ds_sample, lf_sample)
+
+ #
+ # combine samples
+ #
+
+ # levels to preserve (this aggregates `ds` for the ds_model)
+ column_levels = ['dv', 'loss', 'dmg', 'loc', 'dir', 'uid']
+ combined_sample = self.sample
+ sample = (
+ combined_sample.groupby(by=column_levels, axis=1) # type: ignore
+ .sum()
+ .sort_index(axis=1)
+ )
+
+ #
+ # perform loss combinations (special non-additive
+ # aggregations, e.g., Wind + Flood)
+ #
+
+ if loss_combination is not None:
+ sample = self._apply_loss_combinations(loss_combination, sample)
+
+ #
+ # consider replacement threshold values
+ #
+
+ sample, exceedance_bool_df = self._apply_replacement_thresholds(
+ sample, replacement_configuration
+ )
+
+ # Sum-up component losses
+ df_agg = self._aggregate_sample(sample, _construct_columns())
+
+ if not future:
+ self.log.warning(
+ '`aggregate_losses` has been expanded to support the '
+ 'consideration of the exceedance of loss threshold '
+ 'values leading to asset replacement '
+ '(like excessive repair costs). The new implementation '
+ 'returns a tuple where the first element is the '
+ 'aggregated losses and the second contains information '
+ 'on which decision variables triggered replacement '
+ 'considering the specified replacement trhesholds. '
+ 'To obtain the new output and silence this warning, '
+ 'please specify `future=True` as an argument to this method.'
+ )
+ return df_agg
+
+ return df_agg, exceedance_bool_df
+
+ def _validate_input_loss_combination(self, loss_combination: dict) -> None:
+ for dv, combinations in loss_combination.items():
+ if dv not in self.decision_variables:
+ msg = (
+ f'`loss_combination` contains the key '
+ f'`{dv}` which is not found in the active '
+ f'decision variables. These are: '
+ f'{self.decision_variables}.'
+ )
+ raise ValueError(msg)
+ for components, array in combinations.items():
+ if not isinstance(components, tuple):
+ msg = (
+ f'Invalid type for components in loss combination '
+ f'for `{dv}`: {type(components)}. It should be a tuple.'
+ )
+ raise TypeError(msg)
+ if not all(isinstance(c, str) for c in components):
+ msg = (
+ f'All elements of the components tuple in loss '
+ f'combination for `{dv}` should be strings.'
+ )
+ raise ValueError(msg)
+ if not isinstance(array, np.ndarray):
+ msg = (
+ f'Invalid type for array in loss combination '
+ f'for `{dv}`: {type(array)}. It should be a numpy array.'
+ )
+ raise TypeError(msg)
+
+ def _validate_input_replacement_thresholds(
+ self,
+ replacement_configuration: tuple[
+ uq.RandomVariableRegistry, dict[str, float]
+ ],
+ ) -> None:
+ replacement_consequence_rv_reg, replacement_ratios = (
+ replacement_configuration
+ )
+ if not isinstance(replacement_consequence_rv_reg, uq.RandomVariableRegistry):
+ msg = (
+ f'Invalid type for replacement consequence RV registry: '
+ f'{type(replacement_consequence_rv_reg)}. It should be '
+ f'uq.RandomVariableRegistry.'
+ )
+ raise TypeError(msg)
+ for key in replacement_consequence_rv_reg.RV:
+ if key not in self.decision_variables:
+ msg = (
+ f'`replacement_consequence_RV_reg` contains the key '
+ f'`{key}` which is not found in the active '
+ f'decision variables. These are: '
+ f'{self.decision_variables}.'
+ )
+ if self.query_error_setup(
+ 'Loss/ReplacementThreshold/RaiseOnUnknownKeys'
+ ):
+ raise ValueError(msg)
+ self.log.warning(msg)
+
+ for key in replacement_ratios:
+ if key not in self.decision_variables:
+ msg = (
+ f'`replacement_ratios` contains the key '
+ f'`{key}` which is not found in the active '
+ f'decision variables. These are: '
+ f'{self.decision_variables}.'
+ )
+ if self.query_error_setup(
+ 'Loss/ReplacementThreshold/RaiseOnUnknownKeys'
+ ):
+ raise ValueError(msg)
+ self.log.warning(msg)
+ # The replacement_consequence_RV_reg should contain an RV for
+ # all active DVs, regardless of whether there is a replacement
+ # threshold for that DV, because when replacememnt is
+ # triggered, we need to assign a consequence for all DVs.
+ for key in self.decision_variables:
+ if key not in replacement_consequence_rv_reg.RV:
+ msg = f'Missing replacement consequence RV ' f'for `{key}`.'
+ raise ValueError(msg)
+
+ def _apply_loss_combinations(
+ self, loss_combination: dict, sample: pd.DataFrame
+ ) -> pd.DataFrame:
+ """
+ Perform loss combinations of specified components.
+
+ This function deconstructs the loss combination arrays,
+ identifies the combinable components, and applies the
+ specified loss combinations to the sample data. The
+ transformed sample, including the combined columns, is
+ returned as a new DataFrame.
+
+ Parameters
+ ----------
+ loss_combination: dict
+ A dictionary containing the loss combination
+ information. The structure is nested dictionaries where
+ the outer keys are decision variables, inner keys are
+ components to combine, and the values are array objects
+ representing the combination data.
+
+ sample: pandas.DataFrame
+ The input DataFrame containing the sample data. The
+ columns are assumed to be a MultiIndex with at least the
+ levels (decision_variable, loss_id, component_id,
+ location, direction, uid).
+
+ Returns
+ -------
+ pandas.DataFrame
+ A new DataFrame with the combined loss data.
+
+ """
+ # deconstruct combination arrays to extract the input domains
+ loss_combination_converted = self._deconstruct_loss_combination_arrays(
+ loss_combination
+ )
+
+ # initialize variables
+
+ # sample as dictionary for fast column retrieval
+ dsample = {col: sample[col] for col in sample.columns}
+
+ # Transformed sample (includes the combined columns), as
+ # dictionary. Will be turned into a dataframe in the end.
+ # This avoids manipulating the original sample dataframe which
+ # would be slow.
+ dcsample: dict = {}
+
+ # add columns to the new sample dictionary.
+ # those that should be combined
+ self._loss_combination_add_combinable(
+ dsample, loss_combination_converted, dcsample
+ )
+ # and the remaining
+ for col, val in dsample.items():
+ dcsample[col] = val # noqa: PERF403
+
+ # turn into a dataframe
+ return pd.DataFrame(dcsample).rename_axis(columns=sample.columns.names)
+
+ def _loss_combination_add_combinable(
+ self, dsample: dict, loss_combination_converted: dict, dcsample: dict
+ ) -> None:
+ """
+ Add combinable loss data.
+
+ This function identifies groups of `loc`-`dir`-`uid` that can
+ be combined for each decision variable and computes the
+ combined loss using interpolation functions. It modifies the
+ given datasets `dsample` and `dcsample` in-place, removing
+ combinable columns from dsample and adding the combined losses
+ to dcsample.
+
+ Parameters
+ ----------
+ dsample: dict
+ A dictionary representing the loss sample data, where keys
+ are tuples of the form (decision_variable, loss_id,
+ component_id, location, direction, uid) and values are the
+ corresponding data arrays.
+
+ loss_combination_converted: dict
+ A dictionary containing loss combination data. The
+ structure is nested dictionaries where the outer keys are
+ decision variables, inner keys are components to combine,
+ and the values are tuples of combination parameters
+ (domains and reference values).
+
+ dcsample: dict
+ A dictionary to store the combined loss data, where keys
+ are tuples of the form (decision_variable, 'combination',
+ combined_component_string, location, direction, uid) and
+ values are the combined loss data arrays.
+
+ """
+ dmg_to_loss = self._map_component_ids_to_loss_ids(dsample)
+
+ # identify all `loc`-`dir`-`uid`s that can be grouped for each
+ # decision variable.
+ potential_groups = self._identify_potential_groups(dsample)
+
+ # cache already defined interpolation functions. This obviates
+ # the need to define all of them and we can just define them
+ # on the spot when needed, and reuse them if available.
+ interpolation_function_cache: dict = {}
+
+ for (
+ decision_variable,
+ combination_data,
+ ) in loss_combination_converted.items():
+ for (
+ components_to_combine,
+ combination_parameters,
+ ) in combination_data.items():
+ # determine if the components to combine are part of
+ # an available group
+ target_group: dict | None = None
+ for available_group in potential_groups[decision_variable]:
+ # check if `components_to_combine` is a subset of
+ # that available group
+ if frozenset(components_to_combine) <= available_group:
+ target_group = available_group
+ break
+ assert target_group is not None
+ # construct relevant loss sample columns
+ for loc_dir_uid in potential_groups[decision_variable][target_group]:
+ cols = [
+ (decision_variable, dmg_to_loss[x], x, *loc_dir_uid)
+ for x in target_group
+ ]
+ values = np.column_stack([dsample[col] for col in cols])
+ # define/get interpolation function
+ if (
+ interpolation_function_cache.get(components_to_combine)
+ is not None
+ ):
+ interp_func = interpolation_function_cache.get(
+ components_to_combine
+ )
+ else:
+ domains, reference_values = combination_parameters
+ interp_func = RegularGridInterpolator(
+ domains, reference_values
+ )
+ assert interp_func is not None
+ combined_loss = interp_func(values)
+ combined_loss_col = (
+ decision_variable,
+ 'combination',
+ '(' + ', '.join(components_to_combine) + ')',
+ *loc_dir_uid,
+ )
+ dcsample[combined_loss_col] = combined_loss
+ for col in cols:
+ dsample.pop(col)
+
+ def _identify_potential_groups(self, dsample: dict) -> dict: # noqa: PLR6301
+ """
+ Identify potential groups of `loc`-`dir`-`uid` for each DV.
+
+ This function identifies all combinations of `loc`-`dir`-`uid`
+ that can be grouped for each decision variable based on the
+ provided data sample.
+
+ Parameters
+ ----------
+ dsample: iterable
+ An iterable where each containing tuple contains
+ information about the components and their attributes. The
+ expected format of each tuple is (decision_variable,
+ loss_id, component_id, location, direction, uid).
+
+ Returns
+ -------
+ dict
+ A dictionary where keys are decision variables and values
+ are nested dictionaries. The nested dictionaries map
+ frozensets of component IDs to lists of (location,
+ direction, uid) tuples.
+
+ """
+ grouped: defaultdict = defaultdict(defaultdict(list).copy)
+ for col in dsample:
+ c_dv, _, c_dmg, c_loc, c_dir, c_uid = col
+ grouped[c_dv][c_loc, c_dir, c_uid].append(c_dmg)
+ # invert so that we have component sets mapped to
+ # `loc`-`dir`-`uid`s.
+ inverted: defaultdict = defaultdict(defaultdict(list).copy)
+ for c_dv in grouped:
+ for loc_dir_uid, component_set in grouped[c_dv].items():
+ inverted[c_dv][frozenset(component_set)].append(loc_dir_uid)
+ return inverted
+
+ def _map_component_ids_to_loss_ids(self, dsample: dict) -> dict: # noqa: PLR6301
+ """
+ Map component IDs to loss IDs.
+
+ This function maps components to losses based on the loss
+ sample's columns. It assumes that multiple component IDs can
+ have the same loss ID, but the same component ID cannot have
+ multiple loss IDs.
+
+ Parameters
+ ----------
+ dsample: tuple dictionary keys
+ Each tuple contains information about the components and
+ corresponding losses.
+
+ Returns
+ -------
+ dict
+ A dictionary where keys are component IDs and values are
+ loss IDs.
+
+ """
+ dmg_to_loss = {}
+ for col in dsample:
+ c_loss = col[1]
+ c_dmg = col[2]
+ dmg_to_loss[c_dmg] = c_loss
+ return dmg_to_loss
+
+ def _deconstruct_loss_combination_arrays(self, loss_combination: dict) -> dict: # noqa: PLR6301
+ """
+ Deconstruct loss combination arrays.
+
+ This function converts a nested dictionary of loss combination
+ arrays into a format suitable for further processing. It
+ extracts the array values and the index information from each
+ array.
+
+ Parameters
+ ----------
+ loss_combination: dict
+ A dictionary where keys are decision variables and values
+ are another dictionary. The inner dictionary has keys as
+ components to combine and values as numpy array
+ objects representing the combination data.
+
+ Returns
+ -------
+ dict
+ A dictionary with the same structure as the input
+ `loss_combination`. For each decision variable and
+ component combination, the array is replaced with a
+ tuple containing the combination domain and the combination
+ array itself.
+
+ """
+ loss_combination_converted: dict = {}
+ for decision_variable, combination_data in loss_combination.items():
+ loss_combination_converted[decision_variable] = {}
+ for (
+ components_to_combine,
+ combination_array,
+ ) in combination_data.items():
+ combination_index = (
+ combination_array[:, 0],
+ combination_array[0, :],
+ )
+ loss_combination_converted[decision_variable][
+ components_to_combine
+ ] = (
+ combination_index,
+ combination_array,
+ )
+ return loss_combination_converted
+
+ def _aggregate_sample(self, sample: pd.DataFrame, columns: list) -> pd.DataFrame:
+ """
+ Sum up component losses.
+
+ Returns
+ -------
+ pd.DataFrame
+ Dataframe with the aggregated losses.
+
+ """
+ df_agg = pd.DataFrame(index=sample.index, columns=columns)
+ # group results by DV type and location
+ aggregated = sample.groupby(
+ level=['dv', 'loc'],
+ axis=1, # type: ignore
+ ).sum()
+
+ for decision_variable in self.decision_variables:
+ # Time
+ if (
+ decision_variable == 'Time'
+ and 'Time' in aggregated.columns.get_level_values('dv')
+ ):
+ df_agg['repair_time-sequential'] = aggregated['Time'].sum(axis=1)
+
+ df_agg['repair_time-parallel'] = aggregated['Time'].max(axis=1)
+ elif (
+ decision_variable == 'Time'
+ and 'Time' not in aggregated.columns.get_level_values('dv')
+ ):
+ df_agg = df_agg.drop(
+ ['repair_time-parallel', 'repair_time-sequential'], axis=1
+ )
+ # All other
+ elif decision_variable in aggregated.columns.get_level_values('dv'):
+ df_agg[f'repair_{decision_variable.lower()}'] = aggregated[
+ decision_variable
+ ].sum(axis=1)
+ else:
+ df_agg = df_agg.drop(f'repair_{decision_variable.lower()}', axis=1)
+
+ # Convert units ..
+ column_measures = [
+ x.replace('repair_', '')
+ .replace('-sequential', '')
+ .replace('-parallel', '')
+ for x in df_agg.columns.get_level_values(0)
+ ]
+ assert self.dv_units is not None
+ column_units = [self.dv_units[x.title()] for x in column_measures]
+ dv_units = pd.Series(column_units, index=df_agg.columns, name='Units')
+ res = file_io.save_to_csv(
+ df_agg,
+ None,
+ units=dv_units,
+ unit_conversion_factors=self._asmnt.unit_conversion_factors,
+ use_simpleindex=False,
+ log=self._asmnt.log,
+ )
+ assert isinstance(res, pd.DataFrame)
+ df_agg = res
+ df_agg = df_agg.drop('Units')
+ df_agg = df_agg.astype(float)
+
+ df_agg_mi = base.convert_to_MultiIndex(df_agg, axis=1)
+ assert isinstance(df_agg_mi, pd.DataFrame)
+ df_agg = df_agg_mi
+ df_agg = df_agg.sort_index(axis=1)
+ df_agg = df_agg.reset_index(drop=True)
+ assert isinstance(df_agg, pd.DataFrame)
+ return df_agg
+
+ def _apply_replacement_thresholds( # noqa: PLR6301
+ self,
+ sample: pd.DataFrame,
+ replacement_configuration: (
+ tuple[uq.RandomVariableRegistry, dict[str, float]] | None
+ ),
+ ) -> tuple[pd.DataFrame, pd.DataFrame]:
+ # If there is no `replacement_configuration`, simply return.
+ if replacement_configuration is None:
+ # `exceedance_bool_df` is empty in this case.
+ exceedance_bool_df = pd.DataFrame(index=sample.index, dtype=bool)
+ return sample, exceedance_bool_df
+
+ replacement_consequence_rv_reg, replacement_ratios = (
+ replacement_configuration
+ )
+ exceedance_bool_df = pd.DataFrame( # type: ignore
+ data=False,
+ index=sample.index,
+ columns=replacement_consequence_rv_reg.RV.keys(),
+ dtype=bool,
+ )
+
+ # Sample replacement consequences from the registry
+ replacement_consequence_rv_reg.generate_sample(len(sample), 'MonteCarlo')
+
+ sample_dvs = replacement_consequence_rv_reg.RV.keys()
+ for sample_dv in sample_dvs:
+ sub_sample = sample.loc[:, sample_dv]
+ if 'replacement' in sub_sample.columns.get_level_values('loss'):
+ # If `replacement` already exists as a consequence,
+ # determine the realizations where it is non-zero.
+ no_replacement_mask = (
+ ~(sub_sample['replacement'] > 0.00).any(axis=1).to_numpy()
+ )
+ no_replacement_columns = (
+ sub_sample.columns.get_level_values('loss') != 'replacement'
+ )
+ else:
+ # Otherwise there is no row where we already have replacement
+ no_replacement_mask = np.full(len(sub_sample), fill_value=True)
+ no_replacement_columns = np.full(
+ len(sub_sample.columns), fill_value=True
+ )
+ # Get the sum to compare with the thresholds
+ consequence_sum_given_no_replacement = sub_sample.iloc[ # type: ignore
+ no_replacement_mask, no_replacement_columns
+ ].sum(axis=1)
+ if not consequence_sum_given_no_replacement.empty:
+ consequence_values = replacement_consequence_rv_reg.RV[
+ sample_dv
+ ].sample
+ assert consequence_values is not None
+ exceedance_mask = (
+ consequence_sum_given_no_replacement
+ > consequence_values[no_replacement_mask]
+ * replacement_ratios[sample_dv]
+ )
+ # Fill the remaining rows with False
+ if len(exceedance_mask) < len(sub_sample):
+ exceedance_mask = exceedance_mask.reindex(
+ sub_sample.index, fill_value=False
+ )
+ else:
+ exceedance_mask = pd.Series(
+ np.full(len(sub_sample), fill_value=False),
+ index=sub_sample.index,
+ )
+
+ # Monitor triggering of replacement
+ exceedance_bool_df[sample_dv] = exceedance_mask
+
+ # Assign replacement consequences where the threshold has been
+ # exceeded.
+ exceedance_realizations = exceedance_bool_df.any(axis=1)
+ # Assign replacement consequences: needs to include all DVs
+ for other_dv in replacement_consequence_rv_reg.RV:
+ col = (
+ other_dv,
+ 'replacement',
+ 'threshold_exceedance',
+ '0',
+ '1',
+ '0',
+ )
+ # If it doesn't exist, initialize and set to 0.00
+ if col not in sample:
+ sample[col] = 0.00
+ sample = sample.sort_index(axis=1)
+ # Assign replacement consequences
+ other_sample = replacement_consequence_rv_reg.RV[other_dv].sample
+ assert other_sample is not None
+ sample.loc[exceedance_realizations, col] = other_sample[
+ exceedance_realizations
+ ]
+ # Remove all other realized consequences from the realizations
+ # where the threshold was exceeded.
+ sample.loc[
+ exceedance_realizations,
+ sample.columns.get_level_values('dmg') != 'threshold_exceedance',
+ ] = 0.00
+
+ return sample, exceedance_bool_df
+
+ def _make_replacement_exclusive( # noqa: PLR6301
+ self, ds_sample: pd.DataFrame, lf_sample: pd.DataFrame | None
+ ) -> None:
+ """
+ Make the replacement consequence exclusive.
+
+ If `replacement` columns exist in `ds_sample`, this method
+ treats all nonzero loss values driven by `replacement` as
+ exclusive and zeroes-out the loss values of all other columns
+ for the applicable rows.
+ """
+ # rows where replacement is non-zero
+ replacement_rows: list = []
+
+ # columns that correspond to the replacement consequence
+ replacement_columns = (
+ ds_sample.columns.get_level_values('loss') == 'replacement'
+ )
+ rows_df = ds_sample.iloc[:, replacement_columns]
+
+ if not rows_df.empty:
+ replacement_rows = (
+ np.argwhere(np.any(rows_df.to_numpy() > 0.0, axis=1))
+ .reshape(-1)
+ .tolist()
+ )
+ ds_sample.iloc[replacement_rows, ~replacement_columns] = 0.00 # type: ignore
+ if lf_sample is not None:
+ lf_sample.iloc[replacement_rows, :] = 0.00
+
+ @property
+ def _loss_models(self) -> tuple[RepairModel_DS, RepairModel_LF]:
+ return (self.ds_model, self.lf_model)
+
+ @property
+ def _loss_map(self) -> pd.DataFrame | None:
+ """
+ Returns the loss map.
+
+ Returns
+ -------
+ pd.DataFrame
+ The loss map.
+
+ """
+ # Retrieve the DataFrame from one of the included loss models.
+ # We use a single loss map for all.
+ return self.ds_model.loss_map
+
+ @_loss_map.setter
+ def _loss_map(self, loss_map: pd.DataFrame) -> None:
+ """
+ Set the loss map.
+
+ Parameters
+ ----------
+ loss_map: pd.DataFrame
+ The loss map.
+
+ """
+ # Add the DataFrame to the included loss models.
+ # We use a single loss map for all.
+ for model in self._loss_models:
+ model.loss_map = loss_map
+
+ @property
+ def _missing(self) -> set[tuple[str, str]]:
+ """
+ Returns the missing components.
+
+ Returns
+ -------
+ set
+ Set containing tuples identifying missing loss parameter
+ definitions.
+
+ """
+ return self.ds_model.missing
-"""
+ @_missing.setter
+ def _missing(self, missing: set[tuple[str, str]]) -> None:
+ """
+ Assign missing parameter definitions to the loss models.
-import warnings
-import numpy as np
-import pandas as pd
-from .pelicun_model import PelicunModel
-from .. import base
-from .. import uq
-from .. import file_io
+ Parameters
+ ----------
+ missing: set
+ Set containing tuples identifying missing loss parameter
+ definitions.
+ """
+ for model in self._loss_models:
+ model.missing = missing
+
+ def _ensure_loss_parameter_availability(self) -> None:
+ """Make sure that all components have loss parameters."""
+ #
+ # Repair Models (currently the only type supported)
+ #
+
+ required = []
+ assert self._loss_map is not None
+ for dv in self.decision_variables:
+ required.extend(
+ [(component, dv) for component in self._loss_map['Repair']]
+ )
+ missing_set = set(required)
-idx = base.idx
+ for model in (self.ds_model, self.lf_model):
+ missing_set -= model.get_available()
+ if missing_set:
+ self.log.warning(
+ f'The loss model does not provide '
+ f'loss information for the following component(s) '
+ f'in the asset model: {sorted(missing_set)}.'
+ )
-class LossModel(PelicunModel):
- """
- Parent object for loss models.
+ self._missing = missing_set
- All loss assessment methods should be children of this class.
- Parameters
- ----------
+class RepairModel_Base(PelicunModel):
+ """Base class for loss models."""
- """
+ __slots__ = [
+ 'consequence',
+ 'decision_variables',
+ 'loss_map',
+ 'loss_params',
+ 'missing',
+ 'sample',
+ ]
+
+ def __init__(self, assessment: AssessmentBase) -> None:
+ """
+ Initialize RepairModel_Base objects.
- def __init__(self, assessment):
+ Parameters
+ ----------
+ assessment: pelicun.AssessmentBase
+ Parent assessment
+
+ """
super().__init__(assessment)
- self.sample = None
- self.loss_map = None
- self.loss_params = None
- self.loss_type = 'Generic'
+ self.loss_params: pd.DataFrame | None = None
+ self.sample: pd.DataFrame | None = None
+ self.consequence = 'Repair'
+ self.decision_variables: tuple[str, ...] = ()
+ self.loss_map: pd.DataFrame | None = None
+ self.missing: set = set()
+
+ def load_model_parameters(self, data: pd.DataFrame) -> None:
+ """
+ Load model parameters from a DataFrame.
+
+ Extends those already available. Parameters already defined
+ take precedence, i.e. redefinitions of parameters are ignored.
+
+ Parameters
+ ----------
+ data: DataFrame
+ Data with loss model information.
+
+ """
+ data.index.names = ['Loss Driver', 'Decision Variable']
+
+ if self.loss_params is not None:
+ data = pd.concat((self.loss_params, data), axis=0)
+
+ # drop redefinitions of components
+ data = (
+ data.groupby(level=[0, 1]).first().transform(lambda x: x.fillna(np.nan))
+ )
+ # note: .groupby introduces None entries. We replace them with
+ # NaN for consistency.
+
+ self.loss_params = data
+
+ def drop_unused_loss_parameters(self, loss_map: pd.DataFrame) -> None:
+ """
+ Remove loss parameter definitions.
+
+ Applicable to component IDs not present in the loss map.
+
+ Parameters
+ ----------
+ loss_map: str or pd.DataFrame or None
+ Path to a csv file or DataFrame object that maps
+ components IDs to their loss parameter definitions.
+ Components in the asset model that are omitted from the
+ provided loss map are mapped to the same ID.
+
+
+ """
+ if self.loss_params is None:
+ return
+
+ #
+ if 'BldgRepair' in loss_map.columns:
+ loss_map['Repair'] = loss_map['BldgRepair']
+ loss_map = loss_map.drop('BldgRepair', axis=1)
+ self.log.warning(
+ '`BldgRepair` as a loss map column name is '
+ 'deprecated and will be dropped in '
+ 'future versions of pelicun. Please use `Repair` instead.'
+ )
+
+ # get a list of unique component IDs
+ cmp_set = set(loss_map['Repair'].unique())
+
+ cmp_mask = self.loss_params.index.get_level_values(0).isin(cmp_set, level=0)
+ self.loss_params = self.loss_params.iloc[cmp_mask, :]
+
+ def remove_incomplete_components(self) -> None:
+ """
+ Remove incomplete components.
+
+ Removes components that have incomplete loss model
+ definitions from the loss model parameters.
+
+ """
+ if self.loss_params is None:
+ return
+
+ if ('Incomplete', '') not in self.loss_params.columns:
+ return
+
+ cmp_incomplete_idx = self.loss_params.loc[
+ self.loss_params['Incomplete', ''] == 1
+ ].index
+
+ self.loss_params = self.loss_params.drop(cmp_incomplete_idx)
+
+ if len(cmp_incomplete_idx) > 0:
+ self.log.msg(
+ f'\n'
+ f'WARNING: Loss model information is incomplete for '
+ f'the following component(s) '
+ f'{cmp_incomplete_idx.to_list()}. They '
+ f'were removed from the analysis.'
+ f'\n',
+ prepend_timestamp=False,
+ )
+
+ def get_available(self) -> set:
+ """
+ Get a set of components with available loss parameters.
+
+ Returns
+ -------
+ set
+ Set of components with available loss parameters.
+ """
+ if self.loss_params is not None:
+ cmp_list = self.loss_params.index.to_list()
+ return set(cmp_list)
+ return set()
+
+ @abstractmethod
+ def convert_loss_parameter_units(self) -> None:
+ """Convert previously loaded loss parameters to base units."""
+
+
+class RepairModel_DS(RepairModel_Base):
+ """Repair consequences for components with damage states."""
- def save_sample(self, filepath=None, save_units=False):
+ __slots__ = ['RV_reg']
+
+ def save_sample(
+ self, filepath: str | None = None, *, save_units: bool = False
+ ) -> None | pd.DataFrame | tuple[pd.DataFrame, pd.Series]:
"""
- Saves the loss sample to a CSV file or returns it as a
- DataFrame with optional units.
+ Save or return the loss sample.
This method handles the storage of a sample of loss estimates,
which can either be saved directly to a file or returned as a
@@ -99,11 +1688,11 @@ def save_sample(self, filepath=None, save_units=False):
Parameters
----------
- filepath : str, optional
+ filepath: str, optional
The path to the file where the loss sample should be
saved. If not provided, the sample is not saved to disk
but returned.
- save_units : bool, default: False
+ save_units: bool, default: False
Indicates whether to include a row with unit information
in the returned DataFrame. This parameter is ignored if a
file path is provided.
@@ -114,548 +1703,580 @@ def save_sample(self, filepath=None, save_units=False):
If `filepath` is provided, the function returns None after
saving the data.
If no `filepath` is specified, returns:
- - DataFrame containing the loss sample.
- - Optionally, a Series containing the units for each
- column if `save_units` is True.
+ * DataFrame containing the loss sample.
+ * Optionally, a Series containing the units for each
+ column if `save_units` is True.
- Raises
- ------
- IOError
- Raises an IOError if there is an issue saving the file to
- the specified `filepath`.
"""
- self.log_div()
+ self.log.div()
if filepath is not None:
- self.log_msg('Saving loss sample...')
+ self.log.msg('Saving loss sample...')
- cmp_units = self.loss_params[('DV', 'Unit')]
- dv_units = pd.Series(index=self.sample.columns, name='Units', dtype='object')
+ assert self.sample is not None
+ assert self.loss_params is not None
+ cmp_units = self.loss_params['DV', 'Unit'].sort_index()
+ dv_units = pd.Series(
+ index=self.sample.columns, name='Units', dtype='object'
+ ).sort_index()
valid_dv_types = dv_units.index.unique(level=0)
valid_cmp_ids = dv_units.index.unique(level=1)
for cmp_id, dv_type in cmp_units.index:
if (dv_type in valid_dv_types) and (cmp_id in valid_cmp_ids):
- dv_units.loc[(dv_type, cmp_id)] = cmp_units.at[(cmp_id, dv_type)]
+ dv_units.loc[dv_type, cmp_id] = cmp_units.loc[cmp_id, dv_type]
res = file_io.save_to_csv(
self.sample,
- filepath,
+ Path(filepath) if filepath is not None else None,
units=dv_units,
unit_conversion_factors=self._asmnt.unit_conversion_factors,
use_simpleindex=(filepath is not None),
log=self._asmnt.log,
)
-
if filepath is not None:
- self.log_msg('Loss sample successfully saved.', prepend_timestamp=False)
+ self.log.msg('Loss sample successfully saved.', prepend_timestamp=False)
return None
- # else:
- units = res.loc["Units"]
- res.drop("Units", inplace=True)
-
- if save_units:
- return res.astype(float), units
-
- return res.astype(float)
+ assert isinstance(res, pd.DataFrame)
- def load_sample(self, filepath):
- """
- Load damage sample data.
-
- """
- self.log_div()
- self.log_msg('Loading loss sample...')
+ units = res.loc['Units']
+ res = res.drop('Units')
+ res = res.astype(float)
+ assert isinstance(res, pd.DataFrame)
+ assert isinstance(units, pd.Series)
- self.sample = file_io.load_data(
- filepath, self._asmnt.unit_conversion_factors, log=self._asmnt.log
- )
+ if save_units:
+ return res, units
- self.log_msg('Loss sample successfully loaded.', prepend_timestamp=False)
+ return res
- def load_model(self, data_paths, mapping_path, decision_variables=None):
+ def load_sample(self, filepath: str | pd.DataFrame) -> dict[str, str]:
"""
- Load the list of prescribed consequence models and their parameters
+ Load loss sample data.
Parameters
----------
- data_paths: list of string or DataFrame
- List of paths to data files with consequence model
- parameters. Default XY datasets can be accessed as
- PelicunDefault/XY. The list can also contain DataFrame
- objects, in which case that data is used directly.
- mapping_path: string
- Path to a csv file that maps drivers (i.e., damage or edp data) to
- loss models.
- decision_variables: list of string, optional
- List of decision variables to include in the analysis. If None,
- all variables provided in the consequence models are included. When
- a list is provided, only variables in the list will be included.
- """
-
- self.log_div()
- self.log_msg(f'Loading loss map for {self.loss_type}...')
-
- loss_map = file_io.load_data(
- mapping_path, None, orientation=1, reindex=False, log=self._asmnt.log
- )
-
- loss_map['Driver'] = loss_map.index.values
- loss_map['Consequence'] = loss_map[self.loss_type]
- loss_map.index = np.arange(loss_map.shape[0])
- loss_map = loss_map.loc[:, ['Driver', 'Consequence']]
- loss_map.dropna(inplace=True)
-
- self.loss_map = loss_map
+ filepath: str
+ Path to an existing sample stored in a file, or dataframe
+ containing the existing sample.
- self.log_msg("Loss map successfully parsed.", prepend_timestamp=False)
-
- self.log_div()
- self.log_msg(f'Loading loss parameters for {self.loss_type}...')
-
- # replace default flag with default data path
- data_paths = file_io.substitute_default_path(data_paths)
-
- data_list = []
- # load the data files one by one
- for data_path in data_paths:
- data = file_io.load_data(
- data_path, None, orientation=1, reindex=False, log=self._asmnt.log
- )
+ Returns
+ -------
+ dict[str, str]
+ Dictionary mapping each decision variable to its assigned
+ unit.
- data_list.append(data)
+ Raises
+ ------
+ ValueError
+ If the columns have an invalid number of levels.
- loss_params = pd.concat(data_list, axis=0)
+ """
+ names = ['dv', 'loss', 'dmg', 'ds', 'loc', 'dir', 'uid']
+ self.log.div()
+ self.log.msg('Loading loss sample...')
- # drop redefinitions of components
- loss_params = (
- loss_params.groupby(level=[0, 1])
- .first()
- .transform(lambda x: x.fillna(np.nan))
+ sample, units = file_io.load_data(
+ filepath,
+ self._asmnt.unit_conversion_factors,
+ log=self._asmnt.log,
+ return_units=True,
)
- # note: .groupby introduces None entries. We replace them with
- # NaN for consistency.
-
- # keep only the relevant data
- loss_cmp = np.unique(self.loss_map['Consequence'].values)
-
- available_cmp = loss_params.index.unique(level=0)
- missing_cmp = []
- for cmp in loss_cmp:
- if cmp not in available_cmp:
- missing_cmp.append(cmp)
-
- if len(missing_cmp) > 0:
- self.log_msg(
- "\nWARNING: The loss model does not provide "
- "consequence information for the following component(s) "
- f"in the loss map: {missing_cmp}. They are removed from "
- "further analysis\n",
- prepend_timestamp=False,
+ assert isinstance(sample, pd.DataFrame)
+ assert isinstance(units, pd.Series)
+ units.index.names = names
+ # Obtain the DV units
+ # Note: we don't need to check for consistency (all rows
+ # having the same unit) since the units are extracted from a
+ # single row in the CSV, affecting all subsequent rows.
+ units_isolated = (
+ units.reset_index()[['dv', 'Units']]
+ .set_index('dv')
+ .groupby('dv')['Units']
+ )
+ dv_units = units_isolated.first().to_dict()
+
+ # check if `uid` level was provided
+ num_levels = len(sample.columns.names)
+ num_levels_without_uid = 6
+ num_levels_with_uid = num_levels_without_uid + 1
+ if num_levels == num_levels_without_uid:
+ sample.columns.names = names[:-1]
+ sample = base.dedupe_index(sample.T).T
+ elif num_levels == num_levels_with_uid:
+ sample.columns.names = names
+ else:
+ msg = (
+ f'Invalid loss sample: Column MultiIndex '
+ f'has an unexpected length: {num_levels}'
)
+ raise ValueError(msg)
- self.loss_map = self.loss_map.loc[~loss_map['Consequence'].isin(missing_cmp)]
- loss_cmp = np.unique(self.loss_map['Consequence'].values)
+ self.sample = sample
- loss_params = loss_params.loc[idx[loss_cmp, :], :]
+ self.log.msg('Loss sample successfully loaded.', prepend_timestamp=False)
- # drop unused damage states
- DS_list = loss_params.columns.get_level_values(0).unique()
- DS_to_drop = []
- for DS in DS_list:
- if np.all(pd.isna(loss_params.loc[:, idx[DS, :]].values)) is True:
- DS_to_drop.append(DS)
+ return dv_units
- loss_params.drop(columns=DS_to_drop, level=0, inplace=True)
+ def calculate(self, dmg_quantities: pd.DataFrame) -> None: # noqa: C901
+ """
+ Calculate damage consequences.
- # convert values to internal base units
- for DS in loss_params.columns.unique(level=0):
- if DS.startswith('DS'):
- loss_params.loc[:, DS] = self.convert_marginal_params(
- loss_params.loc[:, DS].copy(),
- loss_params[('DV', 'Unit')],
- loss_params[('Quantity', 'Unit')],
- ).values
+ Parameters
+ ----------
+ dmg_quantities: DataFrame
+ A table with the quantity of damage experienced in each
+ damage state of each performance group at each location
+ and direction. You can use the prepare_dmg_quantities
+ method in the DamageModel to get such a DF.
- # check for components with incomplete loss information
- cmp_incomplete_list = loss_params.loc[
- loss_params[('Incomplete', '')] == 1
- ].index
+ """
+ assert self.loss_map is not None
- if len(cmp_incomplete_list) > 0:
- loss_params.drop(cmp_incomplete_list, inplace=True)
+ sample_size = len(dmg_quantities)
- self.log_msg(
- "\n"
- "WARNING: Loss information is incomplete for the "
- f"following component(s) {cmp_incomplete_list}. "
- "They were removed from the analysis."
- "\n",
+ # If everything is undamaged there are no losses
+ if set(dmg_quantities.columns.get_level_values('ds')) == {'0'}:
+ self.sample = None
+ self.log.msg(
+ 'There is no damage---DV sample is set to None.',
prepend_timestamp=False,
)
+ return
- # filter decision variables, if needed
- if decision_variables is not None:
- loss_params = loss_params.reorder_levels([1, 0])
-
- available_DVs = loss_params.index.unique(level=0)
- filtered_DVs = []
-
- for DV_i in decision_variables:
- if DV_i in available_DVs:
- filtered_DVs.append(DV_i)
-
- loss_params = loss_params.loc[filtered_DVs, :].reorder_levels([1, 0])
-
- self.loss_params = loss_params.sort_index(axis=1)
-
- self.log_msg("Loss parameters successfully parsed.", prepend_timestamp=False)
-
- def aggregate_losses(self):
- """
- This is placeholder method.
+ # calculate the quantities for economies of scale
+ self.log.msg('\nAggregating damage quantities...', prepend_timestamp=False)
- The method of aggregating the Decision Variable sample is specific to
- each DV and needs to be implemented in every child of the LossModel
- independently.
- """
- raise NotImplementedError
+ if self._asmnt.options.eco_scale['AcrossFloors']:
+ if self._asmnt.options.eco_scale['AcrossDamageStates']:
+ eco_levels = [0]
+ eco_columns = ['cmp']
- def _generate_DV_sample(self, dmg_quantities, sample_size):
- """
- This is placeholder method.
+ else:
+ eco_levels = [0, 4]
+ eco_columns = ['cmp', 'ds']
- The method of sampling decision variables in Decision
- Variable-specific and needs to be implemented in every child
- of the LossModel independently.
- """
- raise NotImplementedError
+ elif self._asmnt.options.eco_scale['AcrossDamageStates']:
+ eco_levels = [0, 1]
+ eco_columns = ['cmp', 'loc']
- def calculate(self, sample_size=None):
- """
- Calculate the consequences of each component block damage in
- the asset.
+ else:
+ eco_levels = [0, 1, 4]
+ eco_columns = ['cmp', 'loc', 'ds']
- """
- if not sample_size:
- sample_size = self._asmnt.demand.sample.shape[0]
- warnings.warn(
- 'Using default sample size is deprecated and will '
- 'be removed in future versions. '
- 'Please provide the `sample_size` explicitly.',
- DeprecationWarning,
- )
+ eco_group = dmg_quantities.groupby(level=eco_levels, axis=1) # type: ignore
+ eco_qnt = eco_group.sum().mask(eco_group.count() == 0, np.nan)
+ assert eco_qnt.columns.names == eco_columns
- self.log_div()
- self.log_msg("Calculating losses...")
+ self.log.msg(
+ 'Successfully aggregated damage quantities.', prepend_timestamp=False
+ )
- # First, get the damaged quantities in each damage state for
- # each component of interest.
- dmg_q = self._asmnt.damage.sample.copy()
+ # apply the median functions, if needed, to get median consequences for
+ # each realization
+ self.log.msg(
+ '\nCalculating the median repair consequences...',
+ prepend_timestamp=False,
+ )
- # Now sample random Decision Variables
- # Note that this method is DV-specific and needs to be
- # implemented in every child of the LossModel independently.
- self._generate_DV_sample(dmg_q, sample_size)
+ medians = self._calc_median_consequence(eco_qnt)
- self.log_msg("Loss calculation successful.")
+ self.log.msg(
+ 'Successfully determined median repair consequences.',
+ prepend_timestamp=False,
+ )
+ # combine the median consequences with the samples of deviation from the
+ # median to get the consequence realizations.
+ self.log.msg(
+ '\nConsidering deviations from the median values to obtain '
+ 'random DV sample...'
+ )
-class RepairModel(LossModel):
- """
- Manages building repair consequence assessments.
+ self.log.msg(
+ 'Preparing random variables for repair consequences...',
+ prepend_timestamp=False,
+ )
+ self.RV_reg = self._create_DV_RVs(dmg_quantities.columns) # type: ignore
- Parameters
- ----------
+ if self.RV_reg is not None:
+ assert self._asmnt.options.sampling_method is not None
+ self.RV_reg.generate_sample(
+ sample_size=sample_size, method=self._asmnt.options.sampling_method
+ )
- """
+ std_sample = base.convert_to_MultiIndex(
+ pd.DataFrame(self.RV_reg.RV_sample), axis=1
+ )
+ std_sample.columns.names = ['dv', 'cmp', 'ds', 'loc', 'dir', 'uid']
+ std_sample = std_sample.sort_index(axis=1)
- def __init__(self, assessment):
- super().__init__(assessment)
+ else:
+ std_sample = None
- self.loss_type = 'Repair'
+ self.log.msg(
+ f'\nSuccessfully generated {sample_size} realizations of '
+ 'deviation from the median consequences.',
+ prepend_timestamp=False,
+ )
- def _create_DV_RVs(self, case_list):
- """
- Prepare the random variables associated with decision
- variables, such as repair cost and time.
+ res_list = []
+ key_list: list[tuple[Any, ...]] = []
- Parameters
- ----------
- case_list: MultiIndex
- Index with cmp-loc-dir-ds descriptions that identify the
- RVs we need for the simulation.
+ dmg_quantities.columns = dmg_quantities.columns.reorder_levels( # type: ignore
+ ['cmp', 'ds', 'loc', 'dir', 'uid']
+ )
+ dmg_quantities = dmg_quantities.sort_index(axis=1)
- Returns
- -------
- RandomVariableRegistry or None
- A RandomVariableRegistry containing all the generated
- random variables necessary for the simulation. If no
- random variables are generated (due to missing parameters
- or conditions), returns None.
+ if std_sample is not None:
+ std_dvs = std_sample.columns.unique(level=0)
+ else:
+ std_dvs = []
- Raises
- ------
- ValueError
- If an unrecognized loss driver type is encountered,
- indicating a configuration or data input error.
+ for decision_variable in self.decision_variables: # noqa: PLR1702
+ if decision_variable in std_dvs:
+ assert isinstance(std_sample, pd.DataFrame)
+ prob_cmp_list = std_sample[decision_variable].columns.unique(level=0)
+ else:
+ prob_cmp_list = []
- """
+ cmp_list: list[tuple[Any, ...]] = []
- RV_reg = uq.RandomVariableRegistry(self._asmnt.options.rng)
- LP = self.loss_params
+ if decision_variable not in medians:
+ continue
+ for component in medians[decision_variable].columns.unique(level=0):
+ # check if there is damage in the component
+ consequence = self.loss_map.loc[component, 'Repair']
- # make ds the second level in the MultiIndex
- case_DF = pd.DataFrame(
- index=case_list.reorder_levels([0, 4, 1, 2, 3]),
- columns=[
- 0,
- ],
- )
- case_DF.sort_index(axis=0, inplace=True)
- driver_cmps = case_list.get_level_values(0).unique()
+ if component not in dmg_quantities.columns.get_level_values('cmp'):
+ continue
- rv_count = 0
+ ds_list = []
- # for each loss component
- for loss_cmp_id in self.loss_map.index.values:
- # load the corresponding parameters
- driver_type, driver_cmp_id = self.loss_map.loc[loss_cmp_id, 'Driver']
- conseq_cmp_id = self.loss_map.loc[loss_cmp_id, 'Consequence']
-
- # currently, we only support DMG-based loss calculations
- # but this will be extended in the very near future
- if driver_type != 'DMG':
- raise ValueError(f"Loss Driver type not recognized: " f"{driver_type}")
-
- # load the parameters
- # TODO: remove specific DV_type references and make the code below
- # generate parameters for any DV_types provided
- if (conseq_cmp_id, 'Cost') in LP.index:
- cost_params = LP.loc[(conseq_cmp_id, 'Cost'), :]
- else:
- cost_params = None
+ for ds in (
+ medians[decision_variable]
+ .loc[:, component]
+ .columns.unique(level=0)
+ ):
+ loc_list = []
- if (conseq_cmp_id, 'Time') in LP.index:
- time_params = LP.loc[(conseq_cmp_id, 'Time'), :]
- else:
- time_params = None
+ for loc_id, loc in enumerate(
+ dmg_quantities.loc[
+ :, (component, ds) # type: ignore
+ ].columns.unique(level=0)
+ ):
+ if (
+ self._asmnt.options.eco_scale['AcrossFloors'] is True
+ ) and (loc_id > 0):
+ break
- if (conseq_cmp_id, 'Carbon') in LP.index:
- carbon_params = LP.loc[(conseq_cmp_id, 'Carbon'), :]
- else:
- carbon_params = None
+ if self._asmnt.options.eco_scale['AcrossFloors'] is True:
+ median_i = medians[decision_variable].loc[
+ :, (component, ds)
+ ]
+ dmg_i = dmg_quantities.loc[
+ :, (component, ds) # type: ignore
+ ]
+
+ if component in prob_cmp_list:
+ assert std_sample is not None
+ std_i = std_sample.loc[
+ :,
+ (
+ decision_variable,
+ component,
+ ds,
+ ), # type: ignore
+ ]
+ else:
+ std_i = None
- if (conseq_cmp_id, 'Energy') in LP.index:
- energy_params = LP.loc[(conseq_cmp_id, 'Energy'), :]
- else:
- energy_params = None
+ else:
+ median_i = medians[decision_variable].loc[
+ :, (component, ds, loc)
+ ]
+ dmg_i = dmg_quantities.loc[
+ :, (component, ds, loc) # type: ignore
+ ]
+
+ if component in prob_cmp_list:
+ assert std_sample is not None
+ std_i = std_sample.loc[
+ :,
+ (
+ decision_variable,
+ component,
+ ds,
+ loc,
+ ), # type: ignore
+ ]
+ else:
+ std_i = None
- if driver_cmp_id not in driver_cmps:
- continue
+ if std_i is not None:
+ res_list.append(dmg_i.mul(median_i, axis=0) * std_i)
+ else:
+ res_list.append(dmg_i.mul(median_i, axis=0))
- for ds in case_DF.loc[driver_cmp_id, :].index.unique(level=0):
- if ds == '0':
- continue
+ loc_list.append(loc)
- if cost_params is not None:
- cost_params_DS = cost_params[f'DS{ds}']
+ if self._asmnt.options.eco_scale['AcrossFloors'] is True:
+ ds_list += [
+ ds,
+ ]
+ else:
+ ds_list += [(ds, loc) for loc in loc_list]
- cost_family = cost_params_DS.get('Family', np.nan)
- cost_theta = [
- cost_params_DS.get(f"Theta_{t_i}", np.nan) for t_i in range(3)
+ if self._asmnt.options.eco_scale['AcrossFloors'] is True:
+ cmp_list += [(consequence, component, ds) for ds in ds_list]
+ else:
+ cmp_list += [
+ (consequence, component, ds, loc) for ds, loc in ds_list
]
- # If the first parameter is controlled by a function, we use
- # 1.0 in its place and will scale the results in a later
- # step
- if '|' in str(cost_theta[0]):
- # if isinstance(cost_theta[0], str):
- cost_theta[0] = 1.0
-
- else:
- cost_family = np.nan
+ if self._asmnt.options.eco_scale['AcrossFloors'] is True:
+ key_list += [
+ (decision_variable, loss_cmp_i, dmg_cmp_i, ds)
+ for loss_cmp_i, dmg_cmp_i, ds in cmp_list
+ ]
+ else:
+ key_list += [
+ (decision_variable, loss_cmp_i, dmg_cmp_i, ds, loc)
+ for loss_cmp_i, dmg_cmp_i, ds, loc in cmp_list
+ ]
- if time_params is not None:
- time_params_DS = time_params[f'DS{ds}']
+ lvl_names = ['dv', 'loss', 'dmg', 'ds', 'loc', 'dir', 'uid']
+ dv_sample = pd.concat(res_list, axis=1, keys=key_list, names=lvl_names)
- time_family = time_params_DS.get('Family', np.nan)
- time_theta = [
- time_params_DS.get(f"Theta_{t_i}", np.nan) for t_i in range(3)
- ]
+ dv_sample = dv_sample.fillna(0).convert_dtypes()
- # If the first parameter is controlled by a function, we use
- # 1.0 in its place and will scale the results in a later
- # step
- if '|' in str(time_theta[0]):
- # if isinstance(time_theta[0], str):
- time_theta[0] = 1.0
+ self.log.msg('Successfully obtained DV sample.', prepend_timestamp=False)
+ self.sample = dv_sample
- else:
- time_family = np.nan
+ def convert_loss_parameter_units(self) -> None:
+ """Convert previously loaded loss parameters to base units."""
+ if self.loss_params is None:
+ return
+ units = self.loss_params['DV', 'Unit']
+ arg_units = self.loss_params['Quantity', 'Unit']
+ for column in self.loss_params.columns.unique(level=0):
+ if not column.startswith('DS'):
+ continue
+ params = self.loss_params.loc[:, column].copy()
+ assert isinstance(params, pd.DataFrame)
+ self.loss_params.loc[:, column] = self._convert_marginal_params(
+ params, units, arg_units
+ ).to_numpy()
- if carbon_params is not None:
- carbon_params_DS = carbon_params[f'DS{ds}']
+ def drop_unused_damage_states(self) -> None:
+ """
+ Remove unused columns.
- carbon_family = carbon_params_DS.get('Family', np.nan)
- carbon_theta = [
- carbon_params_DS.get(f"Theta_{t_i}", np.nan) for t_i in range(3)
- ]
+ Remove columns from the loss model parameters corresponding
+ to unused damage states.
- # If the first parameter is controlled by a function, we use
- # 1.0 in its place and will scale the results in a later
- # step
- if '|' in str(carbon_theta[0]):
- # if isinstance(carbon_theta[0], str):
- carbon_theta[0] = 1.0
+ """
+ if self.loss_params is None:
+ return
+ first_level = self.loss_params.columns.get_level_values(0).unique().to_list()
+ ds_list = [x for x in first_level if x.startswith('DS')]
+ ds_to_drop = []
+ for damage_state in ds_list:
+ if (
+ np.all(
+ pd.isna(
+ self.loss_params.loc[
+ :, # type: ignore
+ idx[damage_state, :],
+ ].values
+ )
+ )
+ # Note: When this evaluates to True, when you add `is
+ # True` on the right it suddenly evaluates to
+ # False. We need to figure out why this is happening,
+ # but the way it's written now does what we want in
+ # each case.
+ ):
+ ds_to_drop.append(damage_state) # noqa: PERF401
+
+ self.loss_params = self.loss_params.drop(columns=ds_to_drop, level=0)
+
+ def _create_DV_RVs( # noqa: N802, C901
+ self, cases: pd.MultiIndex
+ ) -> uq.RandomVariableRegistry | None:
+ """
+ Prepare the random variables.
- else:
- carbon_family = np.nan
+ Prepare the random variables associated with decision
+ variables, such as repair cost and time.
- if energy_params is not None:
- energy_params_DS = energy_params[f'DS{ds}']
+ Parameters
+ ----------
+ cases: MultiIndex
+ Index with cmp-loc-uid-dir-ds descriptions that identify
+ the RVs we need for the simulation.
- energy_family = energy_params_DS.get('Family', np.nan)
- energy_theta = [
- energy_params_DS.get(f"Theta_{t_i}", np.nan) for t_i in range(3)
- ]
+ Returns
+ -------
+ RandomVariableRegistry or None
+ A RandomVariableRegistry containing all the generated
+ random variables necessary for the simulation. If no
+ random variables are generated (due to missing parameters
+ or conditions), returns None.
- # If the first parameter is controlled by a function, we use
- # 1.0 in its place and will scale the results in a later
- # step
- if '|' in str(energy_theta[0]):
- # if isinstance(energy_theta[0], str):
- energy_theta[0] = 1.0
+ """
+ # Convert the MultiIndex to a DataFrame
+ case_df = pd.DataFrame(index=cases).reset_index()
+ # maps `cmp` to array of damage states
+ damage_states = case_df.groupby('cmp')['ds'].unique().to_dict()
+ # maps `cmp`-`ds` to tuple of `loc`-`dir`-`uid` tuples
+ loc_dir_uids = (
+ case_df.groupby(['cmp', 'ds'])
+ .apply(
+ lambda x: tuple( # type: ignore
+ zip(
+ x['loc'],
+ x['dir'],
+ x['uid'],
+ )
+ )
+ )
+ .to_dict()
+ )
+ damaged_components = set(cases.get_level_values('cmp'))
- else:
- energy_family = np.nan
-
- # If neither of the DV_types has a stochastic model assigned,
- # we do not need random variables for this DS
- if (
- (pd.isna(cost_family))
- and (pd.isna(time_family))
- and (pd.isna(carbon_family))
- and (pd.isna(energy_family))
- ):
- continue
+ rv_reg = uq.RandomVariableRegistry(self._asmnt.options.rng)
- # Otherwise, load the loc-dir cases
- loc_dir_uid = case_DF.loc[(driver_cmp_id, ds)].index.values
+ rv_count = 0
- for loc, direction, uid in loc_dir_uid:
- # assign cost RV
- if pd.isna(cost_family) is False:
- cost_rv_tag = f'Cost-{loss_cmp_id}-{ds}-{loc}-{direction}-{uid}'
+ # for each component in the loss map
+ assert isinstance(self.loss_map, pd.DataFrame)
+ for component, consequence in self.loss_map['Repair'].items():
+ # if that component does not have realized damage states,
+ # skip it (e.g., this can happen when there is only
+ # `collapse`).
+ if component not in damaged_components:
+ continue
- RV_reg.add_RV(
- uq.rv_class_map(cost_family)(
- name=cost_rv_tag,
- theta=cost_theta,
- truncation_limits=[0.0, np.nan],
- )
- )
- rv_count += 1
+ # for each DV
+ for decision_variable in self.decision_variables:
+ # If loss parameters are missing for that consequence,
+ # don't estimate losses for it. A warning has already
+ # been issued for what is missing.
+ if (consequence, decision_variable) in self.missing:
+ continue
- # assign time RV
- if pd.isna(time_family) is False:
- time_rv_tag = f'Time-{loss_cmp_id}-{ds}-{loc}-{direction}-{uid}'
+ # If loss parameters are missing for that consequence,
+ # for this particular loss model, they will exist in
+ # the other(s).
+ assert self.loss_params is not None
+ if (consequence, decision_variable) not in self.loss_params.index:
+ continue
- RV_reg.add_RV(
- uq.rv_class_map(time_family)(
- name=time_rv_tag,
- theta=time_theta,
- truncation_limits=[0.0, np.nan],
- )
- )
- rv_count += 1
+ # load the corresponding parameters
+ parameters = (
+ self.loss_params.loc[(consequence, decision_variable), :]
+ .dropna()
+ .to_dict()
+ )
- # assign time RV
- if pd.isna(carbon_family) is False:
- carbon_rv_tag = (
- f'Carbon-{loss_cmp_id}-{ds}-{loc}-{direction}-{uid}'
- )
+ for ds in damage_states[component]:
+ if ds == '0':
+ continue
- RV_reg.add_RV(
- uq.rv_class_map(carbon_family)(
- name=carbon_rv_tag,
- theta=carbon_theta,
- truncation_limits=[0.0, np.nan],
- )
- )
- rv_count += 1
+ ds_family = parameters.get((f'DS{ds}', 'Family'))
+ ds_theta = [
+ parameters.get((f'DS{ds}', f'Theta_{t_i}'), np.nan)
+ for t_i in range(3)
+ ]
- # assign time RV
- if pd.isna(energy_family) is False:
- energy_rv_tag = (
- f'Energy-{loss_cmp_id}-{ds}-{loc}-{direction}-{uid}'
- )
+ # If there is no RV family we don't need an RV
+ if ds_family is None:
+ continue
- RV_reg.add_RV(
- uq.rv_class_map(energy_family)(
- name=energy_rv_tag,
- theta=energy_theta,
+ # If the first parameter is controlled by a function, we use
+ # 1.0 in its place and will scale the results in a later
+ # step
+ if isinstance(ds_theta[0], str) and '|' in ds_theta[0]:
+ ds_theta[0] = 1.0
+
+ loc_dir_uid = loc_dir_uids[component, ds]
+
+ for loc, direction, uid in loc_dir_uid:
+ # assign RVs
+ rv_reg.add_RV(
+ uq.rv_class_map(ds_family)( # type: ignore
+ name=(
+ f'{decision_variable}-{component}-'
+ f'{ds}-{loc}-{direction}-{uid}'
+ ),
+ theta=ds_theta,
truncation_limits=[0.0, np.nan],
)
)
rv_count += 1
- # assign correlation between RVs across DV_types
- # TODO: add more DV_types and handle cases with only a
- # subset of them being defined
- if (
- (pd.isna(cost_family) is False)
- and (pd.isna(time_family) is False)
- and (self._asmnt.options.rho_cost_time != 0.0)
- ):
- rho = self._asmnt.options.rho_cost_time
-
- RV_reg.add_RV_set(
- uq.RandomVariableSet(
- f'DV-{loss_cmp_id}-{ds}-{loc}-{direction}-{uid}_set',
- list(RV_reg.RVs([cost_rv_tag, time_rv_tag]).values()),
- np.array([[1.0, rho], [rho, 1.0]]),
- )
+ # assign Time-Cost correlation whenever applicable
+ rho = self._asmnt.options.rho_cost_time
+ if rho != 0.0:
+ for rv_tag in rv_reg.RV:
+ if not rv_tag.startswith('Cost'):
+ continue
+ component = rv_tag.split('-')[1]
+ ds = rv_tag.split('-')[2]
+ loc = rv_tag.split('-')[3]
+ direction = rv_tag.split('-')[4]
+ uid = rv_tag.split('-')[5]
+ time_rv_tag = rv_tag.replace('Cost', 'Time')
+ if time_rv_tag in rv_reg.RV:
+ rv_reg.add_RV_set(
+ uq.RandomVariableSet(
+ f'DV-{component}-{ds}-{loc}-{direction}-{uid}_set',
+ list(rv_reg.RVs([rv_tag, time_rv_tag]).values()),
+ np.array([[1.0, rho], [rho, 1.0]]),
)
+ )
- self.log_msg(f"\n{rv_count} random variables created.", prepend_timestamp=False)
+ self.log.msg(
+ f'\n{rv_count} random variables created.', prepend_timestamp=False
+ )
if rv_count > 0:
- return RV_reg
- # else:
+ return rv_reg
return None
- def _calc_median_consequence(self, eco_qnt):
+ def _calc_median_consequence(self, eco_qnt: pd.DataFrame) -> dict: # noqa: C901
"""
- Calculates the median repair consequences for each loss
- component based on their quantities and the associated loss
- parameters.
+ Calculate median reiapr consequences.
- This function evaluates the median consequences for different
- types of decision variables (DV), such as repair costs or
- repair time, based on the provided loss parameters. It
- utilizes the eco_qnt DataFrame, which contains economic
- quantity realizations for various damage states and
- components, to compute the consequences.
+ Calculate the median repair consequences for each loss
+ component based on its quantity realizations and the
+ associated loss parameters.
Parameters
----------
- eco_qnt : DataFrame
- A DataFrame containing economic quantity realizations for
- various components and damage states, indexed or
- structured to align with the loss parameters.
+ eco_qnt: DataFrame
+ A DataFrame containing quantity realizations for various
+ components and damage states, appropriately grouped to
+ account for economies of scale.
+
+ decision_variables: list
+ Defines the decision variables to be included in the loss
+ calculations. Defaults to those supported, but fewer can be
+ used if desired. When fewer are used, the loss parameters for
+ those not used will not be required.
Returns
-------
dict
- A dictionary where keys are the types of decision variables
- (DV) like 'COST' or 'TIME', and values are DataFrames
- containing the median consequences for each component and
- damage state. These DataFrames are structured with
- MultiIndex columns that may include 'cmp' (component),
+ A dictionary where keys are the types of decision
+ variables (DV) like 'COST' or 'TIME', and values are
+ DataFrames containing the median consequences for each
+ component and damage state. These DataFrames are structured
+ with MultiIndex columns that may include 'cmp' (component),
'ds' (damage state), and potentially 'loc' (location),
- depending on assessment options.
+ depending on the way economies of scale are handled.
Raises
------
@@ -663,37 +2284,26 @@ def _calc_median_consequence(self, eco_qnt):
If any loss driver types or distribution types are not
recognized, or if the parameters are incomplete or
unsupported.
- """
+ """
medians = {}
- DV_types = self.loss_params.index.unique(level=1)
-
- # for DV_type, DV_type_scase in zip(['COST', 'TIME'], ['Cost', 'Time']):
- for DV_type in DV_types:
+ for decision_variable in self.decision_variables:
cmp_list = []
median_list = []
- for loss_cmp_id in self.loss_map.index:
- driver_type, driver_cmp = self.loss_map.loc[loss_cmp_id, 'Driver']
- loss_cmp_name = self.loss_map.loc[loss_cmp_id, 'Consequence']
-
- # check if the given DV type is available as an output for the
- # selected component
- if (loss_cmp_name, DV_type) not in self.loss_params.index:
+ assert self.loss_map is not None
+ for loss_cmp_id, loss_cmp_name in self.loss_map['Repair'].items():
+ if (loss_cmp_name, decision_variable) in self.missing:
continue
- if driver_type != 'DMG':
- raise ValueError(
- f"Loss Driver type not recognized: " f"{driver_type}"
- )
-
- if driver_cmp not in eco_qnt.columns.get_level_values(0).unique():
+ if loss_cmp_id not in eco_qnt.columns.get_level_values(0).unique():
continue
ds_list = []
sub_medians = []
+ assert self.loss_params is not None
for ds in self.loss_params.columns.get_level_values(0).unique():
if not ds.startswith('DS'):
continue
@@ -703,37 +2313,40 @@ def _calc_median_consequence(self, eco_qnt):
if ds_id == '0':
continue
- loss_params_DS = self.loss_params.loc[(loss_cmp_name, DV_type), ds]
+ loss_params_ds = self.loss_params.loc[
+ (loss_cmp_name, decision_variable), ds
+ ]
# check if theta_0 is defined
- theta_0 = loss_params_DS.get('Theta_0', np.nan)
+ theta_0 = loss_params_ds.get('Theta_0', np.nan)
if pd.isna(theta_0):
continue
# check if the distribution type is supported
- family = loss_params_DS.get('Family', np.nan)
+ family = loss_params_ds.get('Family', np.nan)
if (not pd.isna(family)) and (
- family not in ['normal', 'lognormal', 'deterministic']
+ family not in {'normal', 'lognormal', 'deterministic'}
):
- raise ValueError(
- f"Loss Distribution of type {family} " f"not supported."
+ msg = (
+ f'Loss Distribution of type {family} ' f'not supported.'
)
+ raise ValueError(msg)
# If theta_0 is a scalar
try:
theta_0 = float(theta_0)
- if pd.isna(loss_params_DS.get('Family', np.nan)):
+ if pd.isna(loss_params_ds.get('Family', np.nan)):
# if theta_0 is constant, then use it directly
- f_median = prep_constant_median_DV(theta_0)
+ f_median = _prep_constant_median_DV(theta_0)
else:
# otherwise use a constant 1.0 as the median
# The random variable will be generated as a
# variation from this 1.0 and added in a later step.
- f_median = prep_constant_median_DV(1.0)
+ f_median = _prep_constant_median_DV(1.0)
except ValueError:
# otherwise, use the multilinear function
@@ -743,20 +2356,29 @@ def _calc_median_consequence(self, eco_qnt):
)
medns = all_vals[0]
qnts = all_vals[1]
- f_median = prep_bounded_multilinear_median_DV(medns, qnts)
+ f_median = _prep_bounded_multilinear_median_DV(medns, qnts)
# get the corresponding aggregate damage quantities
# to consider economies of scale
if 'ds' in eco_qnt.columns.names:
- avail_ds = eco_qnt.loc[:, driver_cmp].columns.unique(level=0)
+ avail_ds = eco_qnt.loc[
+ :, # type: ignore
+ loss_cmp_id,
+ ].columns.unique(level=0)
if ds_id not in avail_ds:
continue
- eco_qnt_i = eco_qnt.loc[:, (driver_cmp, ds_id)].copy()
+ eco_qnt_i = eco_qnt.loc[
+ :, # type: ignore
+ (loss_cmp_id, ds_id),
+ ].copy()
else:
- eco_qnt_i = eco_qnt.loc[:, driver_cmp].copy()
+ eco_qnt_i = eco_qnt.loc[
+ :, # type: ignore
+ loss_cmp_id,
+ ].copy()
if isinstance(eco_qnt_i, pd.Series):
eco_qnt_i = eco_qnt_i.to_frame()
@@ -764,7 +2386,7 @@ def _calc_median_consequence(self, eco_qnt):
eco_qnt_i.columns.name = 'del'
# generate the median values for each realization
- eco_qnt_i.loc[:, :] = f_median(eco_qnt_i.values)
+ eco_qnt_i.loc[:, :] = f_median(eco_qnt_i.values) # type: ignore
sub_medians.append(eco_qnt_i)
ds_list.append(ds_id)
@@ -783,388 +2405,419 @@ def _calc_median_consequence(self, eco_qnt):
result.columns = result.columns.droplevel('del')
# name the remaining column header levels
- if self._asmnt.options.eco_scale["AcrossFloors"] is True:
- result.columns.names = ['cmp', 'ds']
-
- else:
+ if eco_qnt.columns.names == ['cmp', 'ds', 'loc']:
result.columns.names = ['cmp', 'ds', 'loc']
+ else:
+ result.columns.names = ['cmp', 'ds']
# save the results to the returned dictionary
- medians.update({DV_type: result})
+ medians.update({decision_variable: result})
return medians
- def _generate_DV_sample(self, dmg_quantities, sample_size):
+
+class RepairModel_LF(RepairModel_Base):
+ """Repair consequences for components with loss functions."""
+
+ __slots__ = []
+
+ def calculate(
+ self,
+ demand_sample: pd.DataFrame,
+ cmp_sample: dict,
+ cmp_marginal_params: pd.DataFrame,
+ demand_offset: dict,
+ nondirectional_multipliers: dict,
+ ) -> None:
"""
- Generate a sample of repair costs and times.
+ Calculate repair consequences.
Parameters
----------
- dmg_quantities: DataFrame
- A table with the quantity of damage experienced in each damage state
- of each performance group at each location and direction. You can use
- the prepare_dmg_quantities method in the DamageModel to get such a
- DF.
- sample_size: integer
- The number of realizations to generate.
+ demand_sample: pd.DataFrame
+ The sample of the demand model to be used for the inputs
+ of the loss functions.
+ cmp_sample: dict
+ Dict mapping each `cmp`-`loc`-`dir`-`uid` to the component
+ quantity realizations in the asset model in the form of
+ pd.Series objects.
+ cmp_marginal_params: pd.DataFrame
+ Dataframe containing component marginal distribution
+ parameters.
+ demand_offset: dict
+ Dictionary specifying the demand offset.
+ nondirectional_multipliers: dict
+ Dictionary specifying the non directional multipliers used
+ to combine the directional demands.
+
- Raises
- ------
- ValueError
- When any Loss Driver is not recognized.
"""
+ if self.loss_params is None:
+ return
- # calculate the quantities for economies of scale
- self.log_msg("\nAggregating damage quantities...", prepend_timestamp=False)
+ assert self.loss_map is not None
+ loss_map = self.loss_map['Repair'].to_dict()
+ sample_size = len(demand_sample)
+
+ # TODO(JVM): this can be taken out and simply passed as blocks in
+ # the arguments, and cast to a dict in here. Index can be
+ # obtained from there.
+ index = [
+ x
+ for x in cmp_marginal_params.index.get_level_values(0)
+ if loss_map.get(x) in self.loss_params.index
+ ]
+ # If `Blocks` information is unspecified add one block per
+ # component.
+ if 'Blocks' not in cmp_marginal_params.columns:
+ cmp_marginal_params['Blocks'] = 1
+ blocks = cmp_marginal_params.loc[index, 'Blocks'].to_dict()
+
+ performance_group_dict = {}
+ for (component, location, direction, uid), num_blocks in blocks.items():
+ for decision_variable in self.decision_variables:
+ if (component, decision_variable) in self.missing:
+ continue
+ performance_group_dict[
+ (component, decision_variable), location, direction, uid
+ ] = num_blocks
- # If everything is undamaged there are no losses
- if set(dmg_quantities.columns.get_level_values('ds')) == {'0'}:
- self.sample = None
- self.log_msg(
- "There is no damage---DV sample is set to None.",
+ if not performance_group_dict:
+ self.log.msg(
+ 'No loss function-driven components---LF sample is set to None.',
prepend_timestamp=False,
)
return
- if self._asmnt.options.eco_scale["AcrossFloors"]:
- if self._asmnt.options.eco_scale["AcrossDamageStates"]:
- eco_levels = [
- 0,
- ]
- eco_columns = [
- 'cmp',
- ]
-
- else:
- eco_levels = [0, 4]
- eco_columns = ['cmp', 'ds']
+ performance_group = pd.DataFrame( # type: ignore
+ performance_group_dict.values(),
+ index=performance_group_dict.keys(),
+ columns=['Blocks'],
+ )
+ performance_group.index.names = ['cmp', 'loc', 'dir', 'uid']
- elif self._asmnt.options.eco_scale["AcrossDamageStates"]:
- eco_levels = [0, 1]
- eco_columns = ['cmp', 'loc']
+ required_edps = base.invert_mapping(
+ _get_required_demand_type(
+ self.loss_params, performance_group, demand_offset
+ )
+ )
- else:
- eco_levels = [0, 1, 4]
- eco_columns = ['cmp', 'loc', 'ds']
+ available_edps = (
+ pd.DataFrame(index=demand_sample.columns)
+ .reset_index()
+ .groupby(['type', 'loc'])['dir']
+ .agg(lambda x: list(set(x)))
+ .to_dict()
+ )
- eco_group = dmg_quantities.groupby(level=eco_levels, axis=1)
- eco_qnt = eco_group.sum().mask(eco_group.count() == 0, np.nan)
- assert eco_qnt.columns.names == eco_columns
+ # Raise an error if demand sample is missing necessary entries.
+ _verify_edps_available(available_edps, set(required_edps.values()))
- self.log_msg(
- "Successfully aggregated damage quantities.", prepend_timestamp=False
+ demand_dict = _assemble_required_demand_data(
+ set(required_edps.values()),
+ nondirectional_multipliers,
+ demand_sample,
)
- # apply the median functions, if needed, to get median consequences for
- # each realization
- self.log_msg(
- "\nCalculating the median repair consequences...",
+ self.log.msg(
+ '\nCalculating the median repair consequences...',
prepend_timestamp=False,
)
- medians = self._calc_median_consequence(eco_qnt)
+ medians = self._calc_median_consequence(
+ performance_group, loss_map, required_edps, demand_dict, cmp_sample
+ )
- self.log_msg(
- "Successfully determined median repair consequences.",
+ self.log.msg(
+ 'Successfully determined median repair consequences.',
prepend_timestamp=False,
)
- # combine the median consequences with the samples of deviation from the
- # median to get the consequence realizations.
- self.log_msg(
- "\nConsidering deviations from the median values to obtain "
- "random DV sample..."
+ self.log.msg(
+ '\nConsidering deviations from the median values to obtain '
+ 'random DV sample...'
)
- self.log_msg(
- "Preparing random variables for repair cost and time...",
+ self.log.msg(
+ 'Preparing random variables for repair cost and time...',
prepend_timestamp=False,
)
- RV_reg = self._create_DV_RVs(dmg_quantities.columns)
- if RV_reg is not None:
- RV_reg.generate_sample(
+ rv_reg = self._create_DV_RVs(medians.columns) # type: ignore
+ if rv_reg is not None:
+ assert self._asmnt.options.sampling_method is not None
+ rv_reg.generate_sample(
sample_size=sample_size, method=self._asmnt.options.sampling_method
)
std_sample = base.convert_to_MultiIndex(
- pd.DataFrame(RV_reg.RV_sample), axis=1
- ).sort_index(axis=1)
- std_sample.columns.names = ['dv', 'cmp', 'ds', 'loc', 'dir', 'uid']
-
- # convert column names to int
- std_idx = std_sample.columns.levels
-
- std_sample.columns = std_sample.columns.set_levels(
- [
- std_idx[0],
- std_idx[1].astype(int),
- std_idx[2],
- std_idx[3],
- std_idx[4],
- std_idx[5],
- ]
+ pd.DataFrame(rv_reg.RV_sample), axis=1
)
-
- std_sample.sort_index(axis=1, inplace=True)
+ std_sample.columns.names = [
+ 'dv',
+ 'loss',
+ 'dmg',
+ 'loc',
+ 'dir',
+ 'uid',
+ 'block',
+ ]
+ std_sample = std_sample.sort_index(axis=1)
+ sample = (medians * std_sample).combine_first(medians)
else:
- std_sample = None
+ sample = medians
- self.log_msg(
- f"\nSuccessfully generated {sample_size} realizations of "
- "deviation from the median consequences.",
+ self.log.msg(
+ f'\nSuccessfully generated {sample_size} realizations of '
+ 'deviation from the median consequences.',
prepend_timestamp=False,
)
- res_list = []
- key_list = []
-
- dmg_quantities.columns = dmg_quantities.columns.reorder_levels([0, 4, 1, 2, 3])
- dmg_quantities.sort_index(axis=1, inplace=True)
-
- DV_types = self.loss_params.index.unique(level=1)
-
- if isinstance(std_sample, pd.DataFrame):
- std_DV_types = std_sample.columns.unique(level=0)
- else:
- std_DV_types = []
-
- # for DV_type, _ in zip(['COST', 'TIME'], ['Cost', 'Time']):
- for DV_type in DV_types:
- if DV_type in std_DV_types:
- prob_cmp_list = std_sample[DV_type].columns.unique(level=0)
- else:
- prob_cmp_list = []
-
- cmp_list = []
-
- if DV_type not in medians:
- continue
-
- for cmp_i in medians[DV_type].columns.unique(level=0):
- # check if there is damage in the component
- driver_type, dmg_cmp_i = self.loss_map.loc[cmp_i, 'Driver']
- loss_cmp_i = self.loss_map.loc[cmp_i, 'Consequence']
-
- if driver_type != 'DMG':
- raise ValueError(
- f"Loss Driver type not " f"recognized: {driver_type}"
- )
-
- if not (dmg_cmp_i in dmg_quantities.columns.unique(level=0)):
- continue
-
- ds_list = []
-
- for ds in medians[DV_type].loc[:, cmp_i].columns.unique(level=0):
- loc_list = []
-
- for loc_id, loc in enumerate(
- dmg_quantities.loc[:, (dmg_cmp_i, ds)].columns.unique(level=0)
- ):
- if (self._asmnt.options.eco_scale["AcrossFloors"] is True) and (
- loc_id > 0
- ):
- break
-
- if self._asmnt.options.eco_scale["AcrossFloors"] is True:
- median_i = medians[DV_type].loc[:, (cmp_i, ds)]
- dmg_i = dmg_quantities.loc[:, (dmg_cmp_i, ds)]
-
- if cmp_i in prob_cmp_list:
- std_i = std_sample.loc[:, (DV_type, cmp_i, ds)]
- else:
- std_i = None
-
- else:
- median_i = medians[DV_type].loc[:, (cmp_i, ds, loc)]
- dmg_i = dmg_quantities.loc[:, (dmg_cmp_i, ds, loc)]
-
- if cmp_i in prob_cmp_list:
- std_i = std_sample.loc[:, (DV_type, cmp_i, ds, loc)]
- else:
- std_i = None
-
- if std_i is not None:
- res_list.append(dmg_i.mul(median_i, axis=0) * std_i)
- else:
- res_list.append(dmg_i.mul(median_i, axis=0))
-
- loc_list.append(loc)
-
- if self._asmnt.options.eco_scale["AcrossFloors"] is True:
- ds_list += [
- ds,
- ]
- else:
- ds_list += [(ds, loc) for loc in loc_list]
-
- if self._asmnt.options.eco_scale["AcrossFloors"] is True:
- cmp_list += [(loss_cmp_i, dmg_cmp_i, ds) for ds in ds_list]
- else:
- cmp_list += [
- (loss_cmp_i, dmg_cmp_i, ds, loc) for ds, loc in ds_list
- ]
-
- if self._asmnt.options.eco_scale["AcrossFloors"] is True:
- key_list += [
- (DV_type, loss_cmp_i, dmg_cmp_i, ds)
- for loss_cmp_i, dmg_cmp_i, ds in cmp_list
- ]
- else:
- key_list += [
- (DV_type, loss_cmp_i, dmg_cmp_i, ds, loc)
- for loss_cmp_i, dmg_cmp_i, ds, loc in cmp_list
- ]
-
- lvl_names = ['dv', 'loss', 'dmg', 'ds', 'loc', 'dir', 'uid']
- DV_sample = pd.concat(res_list, axis=1, keys=key_list, names=lvl_names)
-
- DV_sample = DV_sample.fillna(0).convert_dtypes()
- DV_sample.columns.names = lvl_names
-
- # Get the flags for replacement consequence trigger
- DV_sum = DV_sample.groupby(
- level=[
- 1,
- ],
- axis=1,
+ # sum up the block losses
+ sample = sample.groupby( # type: ignore
+ by=['dv', 'loss', 'dmg', 'loc', 'dir', 'uid'], axis=1
).sum()
- if 'replacement' in DV_sum.columns:
- # When the 'replacement' consequence is triggered, all
- # local repair consequences are discarded. Note that
- # global consequences are assigned to location '0'.
-
- id_replacement = DV_sum['replacement'] > 0
- # get the list of non-zero locations
- locs = DV_sample.columns.get_level_values(4).unique().values
+ self.log.msg('Successfully obtained DV sample.', prepend_timestamp=False)
+ self.sample = sample
- locs = locs[locs != '0']
+ return
- DV_sample.loc[id_replacement, idx[:, :, :, :, locs]] = 0.0
+ def convert_loss_parameter_units(self) -> None:
+ """Convert previously loaded loss parameters to base units."""
+ if self.loss_params is None:
+ return
+ units = self.loss_params['DV', 'Unit']
+ arg_units = self.loss_params['Demand', 'Unit']
+ column = 'LossFunction'
+ params = self.loss_params[column].copy()
+ assert isinstance(params, pd.DataFrame)
+ self.loss_params.loc[:, column] = self._convert_marginal_params(
+ params,
+ units,
+ arg_units,
+ divide_units=False,
+ ).to_numpy()
+ return
+
+ def _calc_median_consequence(
+ self,
+ performance_group: pd.DataFrame,
+ loss_map: dict,
+ required_edps: dict,
+ demand_dict: dict,
+ cmp_sample: dict,
+ ) -> pd.DataFrame:
+ """
+ Calculate median repair consequences.
- self.sample = DV_sample
+ Calculates the median repair consequences for each loss
+ function-driven component based on its quantity realizations
+ and the associated loss parameters.
- self.log_msg("Successfully obtained DV sample.", prepend_timestamp=False)
+ Parameters
+ ----------
+ performance_group: pd.DataFrame
+ Dataframe with a single column `Blocks` containing an
+ integer for the number of blocks of each
+ (`cmp`-`decision_variable`)-`loc`-`dir`-`uid`.
+ loss_map: dict
+ Dictionary mpping component IDs, `cmp`, to their repair
+ consequences.
+ required_edps: dict
+ Dictionary mapping (`cmp`-`realization`)-`loc`-`dir`-`uid`
+ (each entry of the `performance_group`'s index) with the
+ EDP name (e.g., `PFA-1-1`) that should be used as its loss
+ function input.
+ demand_dict: dict
+ Dictionary mapping each EDP name to the values in the
+ demand sample in the form of numpy arrays.
+ cmp_sample: dict
+ Dict mapping each `cmp`-`loc`-`dir`-`uid` to the component
+ quantity realizations in the asset model in the form of
+ pd.Series objects.
- def aggregate_losses(self):
- """
- Aggregates repair consequences across components.
Returns
-------
- DataFrame
- A DataFrame containing aggregated repair
- consequences. Columns include:
- - 'repair_cost': Total repair costs across all components.
- - 'repair_time-parallel': Minimum possible repair time
- assuming repairs are conducted in parallel.
- - 'repair_time-sequential': Maximum possible repair time
- assuming sequential repairs.
- - 'repair_carbon': Total carbon emissions associated with
- repairs.
- - 'repair_energy': Total energy usage associated with
- repairs.
- Each of these columns is summed or calculated based on the
- repair data available.
- """
-
- self.log_div()
- self.log_msg("Aggregating repair consequences...")
-
- DV = self.sample
-
- # group results by DV type and location
- DVG = DV.groupby(level=[0, 4], axis=1).sum()
-
- # create the summary DF
- df_agg = pd.DataFrame(
- index=DV.index,
- columns=[
- 'repair_cost',
- 'repair_time-parallel',
- 'repair_time-sequential',
- 'repair_carbon',
- 'repair_energy',
- ],
- )
-
- if 'Cost' in DVG.columns:
- df_agg['repair_cost'] = DVG['Cost'].sum(axis=1)
- else:
- df_agg = df_agg.drop('repair_cost', axis=1)
+ pd.DataFrame
+ Dataframe with medial loss for loss-function driven
+ components.
- if 'Time' in DVG.columns:
- df_agg['repair_time-sequential'] = DVG['Time'].sum(axis=1)
+ Raises
+ ------
+ ValueError
+ If loss function interpolation fails.
- df_agg['repair_time-parallel'] = DVG['Time'].max(axis=1)
- else:
- df_agg = df_agg.drop(
- ['repair_time-parallel', 'repair_time-sequential'], axis=1
- )
+ """
+ medians_dict = {}
+
+ # for each component in the asset model
+ component: str
+ decision_variable: str
+ location: str
+ direction: str
+ uid: str
+ blocks: int
+ for ( # type: ignore
+ (component, decision_variable),
+ location,
+ direction,
+ uid,
+ ), blocks in performance_group['Blocks'].items():
+ consequence = loss_map[component]
+ edp = required_edps[
+ (consequence, decision_variable), location, direction, uid
+ ]
+ edp_values = demand_dict[edp]
+ assert self.loss_params is not None
+ loss_function_str = self.loss_params.loc[
+ (component, decision_variable), ('LossFunction', 'Theta_0')
+ ]
+ assert isinstance(loss_function_str, str)
+ try:
+ median_loss = base.stringterpolation(loss_function_str)(edp_values)
+ except ValueError as exc:
+ msg = (
+ f'Loss function interpolation for consequence '
+ f'`{consequence}-{decision_variable}` has failed. '
+ f'Ensure a sufficient interpolation domain '
+ f'for the X values (those after the `|` symbol) '
+ f'and verify the X-value and Y-value lengths match.'
+ )
+ raise ValueError(msg) from exc
+ for block in range(blocks):
+ medians_dict[
+ decision_variable,
+ consequence,
+ component,
+ location,
+ direction,
+ uid,
+ str(block),
+ ] = (
+ median_loss
+ * cmp_sample[component, location, direction, uid].to_numpy()
+ / float(blocks)
+ )
+
+ medians = pd.DataFrame(medians_dict)
+ medians.columns.names = ['dv', 'loss', 'dmg', 'loc', 'dir', 'uid', 'block']
+ return medians.sort_index(axis=1)
+
+ def _create_DV_RVs( # noqa: N802
+ self, cases: pd.MultiIndex
+ ) -> uq.RandomVariableRegistry | None:
+ """
+ Prepare the decision variable random variables.
- if 'Carbon' in DVG.columns:
- df_agg['repair_carbon'] = DVG['Carbon'].sum(axis=1)
- else:
- df_agg = df_agg.drop('repair_carbon', axis=1)
+ Prepares the random variables associated with decision
+ variables, such as repair cost and time.
- if 'Energy' in DVG.columns:
- df_agg['repair_energy'] = DVG['Energy'].sum(axis=1)
- else:
- df_agg = df_agg.drop('repair_energy', axis=1)
+ Parameters
+ ----------
+ cases: MultiIndex
+ Index with `dv`-`loss`-`dmg`-`loc`-`dir`-`uid`
+ entries that identify the RVs we need for the
+ simulation (columns of the `medians` dataframe).
- # convert units
+ Returns
+ -------
+ RandomVariableRegistry or None
+ A RandomVariableRegistry containing all the generated
+ random variables necessary for the simulation. If no
+ random variables are generated (due to missing parameters
+ or conditions), returns None.
- cmp_units = (
- self.loss_params[('DV', 'Unit')]
- .groupby(
- level=[
- 1,
- ]
- )
- .agg(lambda x: x.value_counts().index[0])
- )
+ """
+ rv_reg = uq.RandomVariableRegistry(self._asmnt.options.rng)
- dv_units = pd.Series(index=df_agg.columns, name='Units', dtype='object')
+ rv_count = 0
- if 'Cost' in DVG.columns:
- dv_units['repair_cost'] = cmp_units['Cost']
+ # for each component in the loss map
+ for (
+ decision_variable,
+ consequence,
+ component,
+ location,
+ direction,
+ uid,
+ block,
+ ) in cases:
+ # load the corresponding parameters
+ assert self.loss_params is not None
+ parameters = self.loss_params.loc[(consequence, decision_variable), :]
- if 'Time' in DVG.columns:
- dv_units['repair_time-parallel'] = cmp_units['Time']
- dv_units['repair_time-sequential'] = cmp_units['Time']
+ if ('LossFunction', 'Family') not in parameters:
+ # Everything is deterministic, no need to create RVs.
+ continue
+ family = parameters.loc['LossFunction', 'Family']
+ theta = [
+ parameters.get(('LossFunction', f'Theta_{t_i}'), np.nan)
+ for t_i in range(3)
+ ]
+
+ # If there is no RV family we don't need an RV
+ if pd.isna(family):
+ continue
- if 'Carbon' in DVG.columns:
- dv_units['repair_carbon'] = cmp_units['Carbon']
+ # Since the first parameter is controlled by a function,
+ # we use 1.0 in its place and will scale the results in a
+ # later step.
+ theta[0] = 1.0
+
+ # assign RVs
+ rv_reg.add_RV(
+ uq.rv_class_map(family)( # type: ignore
+ name=(
+ f'{decision_variable}-{consequence}-'
+ f'{component}-{location}-{direction}-{uid}-{block}'
+ ),
+ theta=theta,
+ truncation_limits=[0.0, np.nan],
+ )
+ )
+ rv_count += 1
- if 'Energy' in DVG.columns:
- dv_units['repair_energy'] = cmp_units['Energy']
+ # assign Time-Cost correlation whenever applicable
+ rho = self._asmnt.options.rho_cost_time
+ if rho != 0.0:
+ for rv_tag in rv_reg.RV:
+ if not rv_tag.startswith('Cost'):
+ continue
+ split = rv_tag.split('-')
+ consequence = split[1]
+ component = split[2]
+ location = split[3]
+ direction = split[4]
+ uid = split[5]
+ block = split[6]
+ time_rv_tag = rv_tag.replace('Cost', 'Time')
+ if time_rv_tag in rv_reg.RV:
+ rv_reg.add_RV_set(
+ uq.RandomVariableSet(
+ (
+ f'DV-{consequence}-{component}-'
+ f'{location}-{direction}-{uid}-{block}_set'
+ ),
+ list(rv_reg.RVs([rv_tag, time_rv_tag]).values()),
+ np.array([[1.0, rho], [rho, 1.0]]),
+ )
+ )
- df_agg = file_io.save_to_csv(
- df_agg,
- None,
- units=dv_units,
- unit_conversion_factors=self._asmnt.unit_conversion_factors,
- use_simpleindex=False,
- log=self._asmnt.log,
+ self.log.msg(
+ f'\n{rv_count} random variables created.', prepend_timestamp=False
)
- df_agg.drop("Units", inplace=True)
-
- # convert header
-
- df_agg = base.convert_to_MultiIndex(df_agg, axis=1)
-
- self.log_msg("Repair consequences successfully aggregated.")
-
- return df_agg.astype(float)
+ if rv_count > 0:
+ return rv_reg
+ return None
-def prep_constant_median_DV(median):
+def _prep_constant_median_DV(median: float) -> Callable: # noqa: N802
"""
- Returns a constant median Decision Variable (DV) function.
+ Return a constant median Decision Variable (DV) function.
Parameters
----------
@@ -1176,20 +2829,20 @@ def prep_constant_median_DV(median):
callable
A function that returns the constant median DV for all component
quantities.
+
"""
- def f(*args):
- # pylint: disable=unused-argument
- # pylint: disable=missing-return-doc
- # pylint: disable=missing-return-type-doc
+ def f(*args): # noqa: ANN002, ANN202, ARG001
return median
return f
-def prep_bounded_multilinear_median_DV(medians, quantities):
+def _prep_bounded_multilinear_median_DV( # noqa: N802
+ medians: np.ndarray, quantities: np.ndarray
+) -> Callable:
"""
- Returns a bounded multilinear median Decision Variable (DV) function.
+ Return a bounded multilinear median Decision Variable (DV) function.
The median DV equals the min and max values when the quantity is
outside of the prescribed quantity bounds. When the quantity is within the
@@ -1210,23 +2863,57 @@ def prep_bounded_multilinear_median_DV(medians, quantities):
callable
A function that returns the median DV given the quantity of damaged
components.
+
"""
- def f(quantity):
- # pylint: disable=missing-return-doc
- # pylint: disable=missing-return-type-doc
+ def f(quantity): # noqa: ANN001, ANN202
if quantity is None:
- raise ValueError(
+ msg = (
'A bounded linear median Decision Variable function called '
'without specifying the quantity of damaged components'
)
+ raise ValueError(msg)
q_array = np.asarray(quantity, dtype=np.float64)
# calculate the median consequence given the quantity of damaged
# components
- output = np.interp(q_array, quantities, medians)
-
- return output
+ return np.interp(q_array, quantities, medians)
return f
+
+
+def _is_for_lf_model(data: pd.DataFrame) -> bool:
+ """
+ Determine if the data are for the lf_model.
+
+ Parameters
+ ----------
+ data: pd.DataFrame
+ Data to be checked.
+
+ Returns
+ -------
+ bool
+ Whether the data are for the lf_model.
+
+ """
+ return 'LossFunction' in data.columns.get_level_values(0)
+
+
+def _is_for_ds_model(data: pd.DataFrame) -> bool:
+ """
+ Determine if the data are for the ds_model.
+
+ Parameters
+ ----------
+ data: pd.DataFrame
+ Data to be checked.
+
+ Returns
+ -------
+ bool
+ Whether the data are for the ds_model.
+
+ """
+ return 'DS1' in data.columns.get_level_values(0)
diff --git a/pelicun/model/pelicun_model.py b/pelicun/model/pelicun_model.py
index 8a11acb78..767a8e1ff 100644
--- a/pelicun/model/pelicun_model.py
+++ b/pelicun/model/pelicun_model.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
@@ -38,44 +37,58 @@
# Adam Zsarnóczay
# John Vouvakis Manousakis
-"""
-This file defines the PelicunModel object and its methods.
-.. rubric:: Contents
+"""PelicunModel object and associated methods."""
-.. autosummary::
+from __future__ import annotations
- PelicunModel
-
-"""
+from typing import TYPE_CHECKING, Any
import numpy as np
import pandas as pd
-from pelicun import base
-from pelicun import uq
+from pelicun import base, uq
+
+if TYPE_CHECKING:
+ from pelicun.assessment import AssessmentBase
+ from pelicun.base import Logger
idx = base.idx
class PelicunModel:
- """
- Generic model class to manage methods shared between all models in Pelicun.
+ """Generic model class to manage methods shared between all models in Pelicun."""
- """
+ __slots__ = ['_asmnt', 'log']
- def __init__(self, assessment):
- # link the PelicunModel object to its Assessment object
- self._asmnt = assessment
+ def __init__(self, assessment: AssessmentBase) -> None:
+ """
+ Instantiate PelicunModel objects.
+
+ Parameters
+ ----------
+ assessment: Assessment
+ Parent assessment object.
+
+ """
+ # link the PelicunModel object to its AssessmentBase object
+ self._asmnt: AssessmentBase = assessment
# link logging methods as attributes enabling more
# concise syntax
- self.log_msg = self._asmnt.log.msg
- self.log_div = self._asmnt.log.div
-
- def convert_marginal_params(self, marginal_params, units, arg_units=None):
+ self.log: Logger = self._asmnt.log
+
+ def _convert_marginal_params( # noqa: C901
+ self,
+ marginal_params: pd.DataFrame,
+ units: pd.Series,
+ arg_units: pd.Series | None = None,
+ *,
+ divide_units: bool = True,
+ inverse_conversion: bool = False,
+ ) -> pd.DataFrame:
"""
- Converts the parameters of marginal distributions in a model to SI units.
+ Convert the parameters of marginal distributions in a model to SI units.
Parameters
----------
@@ -97,6 +110,17 @@ def convert_marginal_params(self, marginal_params, units, arg_units=None):
skipped. This Series provides the units of the reference entities
for each component. Use '1 EA' if you want to skip such scaling for
select components but provide arg units for others.
+ divide_units: bool, defaults to True
+ This parameter affects how the units of parameters
+ specified in SimCenter notation will be converted. It
+ should be True when the arg units represent the quantity
+ corresponding to the primary parameters, and False
+ otherwise.
+ inverse_conversion: bool
+ If False, converts from user-defined units to internal. If
+ True, converts from internal units to user-defined.
+ Defaults to False, since the method is mostly applied on
+ user-defined data.
Returns
-------
@@ -125,10 +149,10 @@ def convert_marginal_params(self, marginal_params, units, arg_units=None):
marginal_params[col_name] = np.nan
# get a list of unique units
- unique_units = units.unique()
+ unique_units = units.dropna().unique()
# for each unit
- for unit_name in unique_units:
+ for unit_name in unique_units: # noqa: PLR1702
# get the scale factor for converting from the source unit
unit_factor = self._asmnt.calc_unit_scale_factor(unit_name)
@@ -138,7 +162,7 @@ def convert_marginal_params(self, marginal_params, units, arg_units=None):
# for each variable
for row_id in unit_ids:
# pull the parameters of the marginal distribution
- family = marginal_params.at[row_id, 'Family']
+ family = marginal_params.loc[row_id, 'Family']
if family == 'empirical':
continue
@@ -146,10 +170,10 @@ def convert_marginal_params(self, marginal_params, units, arg_units=None):
# load the theta values
theta = marginal_params.loc[
row_id, ['Theta_0', 'Theta_1', 'Theta_2']
- ].values
+ ].to_numpy()
# for each theta
- args = []
+ args: list[Any] = []
for t_i, theta_i in enumerate(theta):
# if theta_i evaluates to NaN, it is considered undefined
if pd.isna(theta_i):
@@ -183,22 +207,34 @@ def convert_marginal_params(self, marginal_params, units, arg_units=None):
arg_unit_factor = 1.0
# check if there is a need to scale due to argument units
- if not (arg_units is None):
+ if arg_units is not None:
# get the argument unit for the given marginal
arg_unit = arg_units.get(row_id)
+ assert isinstance(arg_unit, str)
if arg_unit != '1 EA':
# get the scale factor
- arg_unit_factor = self._asmnt.calc_unit_scale_factor(arg_unit)
+ arg_unit_factor = self._asmnt.calc_unit_scale_factor(
+ arg_unit
+ )
# scale arguments, if needed
for a_i, arg in enumerate(args):
if isinstance(arg, np.ndarray):
args[a_i] = arg * arg_unit_factor
- # convert the distribution parameters to SI
- theta, tr_limits = uq.scale_distribution(
- unit_factor / arg_unit_factor, family, theta, tr_limits
+ # convert units
+ if divide_units:
+ conversion_factor = unit_factor / arg_unit_factor
+ else:
+ conversion_factor = unit_factor
+ if inverse_conversion:
+ conversion_factor = 1.00 / conversion_factor
+ theta, tr_limits = uq.scale_distribution( # type: ignore
+ conversion_factor,
+ family,
+ theta,
+ tr_limits, # type: ignore
)
# convert multilinear function parameters back into strings
@@ -207,18 +243,215 @@ def convert_marginal_params(self, marginal_params, units, arg_units=None):
theta[a_i] = '|'.join(
[
','.join([f'{val:g}' for val in vals])
- for vals in (theta[a_i], args[a_i])
+ for vals in (theta[a_i], arg)
]
)
# and update the values in the DF
- marginal_params.loc[row_id, ['Theta_0', 'Theta_1', 'Theta_2']] = theta
+ marginal_params.loc[row_id, ['Theta_0', 'Theta_1', 'Theta_2']] = (
+ theta
+ )
- marginal_params.loc[
- row_id, ['TruncateLower', 'TruncateUpper']
- ] = tr_limits
+ marginal_params.loc[row_id, ['TruncateLower', 'TruncateUpper']] = (
+ tr_limits
+ )
# remove the added columns
- marginal_params = marginal_params[original_cols]
+ return marginal_params[original_cols]
+
+ def _get_locations(self, loc_str: str) -> np.ndarray:
+ """
+ Parse a location string.
+
+ Parses a location string to determine specific sections of an
+ asset to be processed.
+
+ This function interprets various string formats to output
+ a list of strings representing sections or parts of the
+ asset. It can handle single numbers, ranges (e.g.,
+ '3--7'), lists separated by commas (e.g., '1,2,5'), and
+ special keywords like 'all', 'top', or 'roof'.
+
+ Parameters
+ ----------
+ loc_str: str
+ A string that describes the location or range of
+ sections in the asset. It can be a single number, a
+ range, a comma-separated list, 'all', 'top', or
+ 'roof'.
+
+ Returns
+ -------
+ numpy.ndarray
+ An array of strings, each representing a section
+ number. These sections are processed based on the
+ input string, which can denote specific sections,
+ ranges of sections, or special keywords.
+
+ Raises
+ ------
+ ValueError
+ If the location string cannot be parsed into any
+ recognized format, a ValueError is raised with a
+ message indicating the problematic string.
+
+ Examples
+ --------
+ Given an asset with multiple sections:
+
+ >>> _get_locations('5')
+ array(['5'])
+
+ >>> _get_locations('3--7')
+ array(['3', '4', '5', '6', '7'])
+
+ >>> _get_locations('1,2,5')
+ array(['1', '2', '5'])
+
+ >>> _get_locations('all')
+ array(['1', '2', '3', ..., '10'])
+
+ >>> _get_locations('top')
+ array(['10'])
+
+ >>> _get_locations('roof')
+ array(['11'])
+
+ """
+ try:
+ res = str(int(float(loc_str)))
+ return np.array([res])
+
+ except ValueError as exc:
+ stories = self._asmnt.stories
- return marginal_params
+ if '--' in loc_str:
+ s_low, s_high = loc_str.split('--')
+ s_low = self._get_locations(s_low)[0]
+ s_high = self._get_locations(s_high)[0]
+ return np.arange(int(s_low), int(s_high) + 1).astype(str)
+
+ if ',' in loc_str:
+ return np.array(loc_str.split(','), dtype=int).astype(str)
+
+ if loc_str == 'all':
+ assert stories is not None
+ return np.arange(1, stories + 1).astype(str)
+
+ if loc_str == 'top':
+ assert stories is not None
+ return np.array([stories]).astype(str)
+
+ if loc_str == 'roof':
+ assert stories is not None
+ return np.array([stories + 1]).astype(str)
+
+ msg = f'Cannot parse location string: ' f'{loc_str}'
+ raise ValueError(msg) from exc
+
+ def _get_directions(self, dir_str: str | None) -> np.ndarray:
+ """
+ Parse a direction string.
+
+ Parses a direction string to determine specific orientations
+ or directions applicable within an asset.
+
+ This function processes direction descriptions to output
+ an array of strings, each representing a specific
+ direction. It can handle single numbers, ranges (e.g.,
+ '1--3'), lists separated by commas (e.g., '1,2,5'), and
+ null values that default to '1'.
+
+ Parameters
+ ----------
+ dir_str: str or None
+ A string that describes the direction or range of
+ directions in the asset. It can be a single number, a
+ range, a comma-separated list, or it can be null,
+ which defaults to representing a single default
+ direction ('1').
+
+ Returns
+ -------
+ numpy.ndarray
+ An array of strings, each representing a
+ direction. These directions are processed based on the
+ input string, which can denote specific directions,
+ ranges of directions, or a list.
+
+ Raises
+ ------
+ ValueError
+ If the direction string cannot be parsed into any
+ recognized format, a ValueError is raised with a
+ message indicating the problematic string.
+
+ Examples
+ --------
+ Given an asset with multiple potential orientations:
+
+ >>> get_directions(None)
+ array(['1'])
+
+ >>> get_directions('2')
+ array(['2'])
+
+ >>> get_directions('1--3')
+ array(['1', '2', '3'])
+
+ >>> get_directions('1,2,5')
+ array(['1', '2', '5'])
+
+ """
+ if pd.isna(dir_str):
+ return np.ones(1).astype(str)
+
+ try:
+ res = str(int(float(dir_str))) # type: ignore
+ return np.array([res])
+
+ except ValueError as exc:
+ if ',' in dir_str: # type: ignore
+ return np.array(
+ dir_str.split(','), # type: ignore
+ dtype=int,
+ ).astype(str) # type: ignore
+
+ if '--' in dir_str: # type: ignore
+ d_low, d_high = dir_str.split('--') # type: ignore
+ d_low = self._get_directions(d_low)[0]
+ d_high = self._get_directions(d_high)[0]
+ return np.arange(int(d_low), int(d_high) + 1).astype(str)
+
+ # else:
+ msg = f'Cannot parse direction string: ' f'{dir_str}'
+ raise ValueError(msg) from exc
+
+ def query_error_setup(self, path: str) -> str | bool:
+ """
+ Obtain error setup settings.
+
+ Obtain settings associated with the treatment of errors and
+ warnings.
+
+ Parameters
+ ----------
+ path: str
+ Path to a setting.
+
+ Returns
+ -------
+ str | bool
+ Value of the setting
+
+ Raises
+ ------
+ KeyError
+ If the path does not point to a setting
+
+ """
+ error_setup = self._asmnt.options.error_setup
+ value = base.get(error_setup, path)
+ if value is None:
+ raise KeyError
+ return value
diff --git a/pelicun/pelicun_warnings.py b/pelicun/pelicun_warnings.py
new file mode 100644
index 000000000..399f32ab7
--- /dev/null
+++ b/pelicun/pelicun_warnings.py
@@ -0,0 +1,83 @@
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+
+"""Pelicun warning and error classes."""
+
+from __future__ import annotations
+
+
+class PelicunWarning(Warning):
+ """Custom warning for specific use in the Pelicun project."""
+
+
+class PelicunInvalidConfigError(Exception):
+ """
+ Exception raised for errors in the configuration of Pelicun.
+
+ Attributes
+ ----------
+ message: str
+ Explanation of the error.
+
+ """
+
+ def __init__(
+ self, message: str = 'Invalid options in configuration file.'
+ ) -> None:
+ """Instantiate the error."""
+ self.message = message
+ super().__init__(self.message)
+
+
+class InconsistentUnitsError(Exception):
+ """
+ Exception raised for inconsistent or invalid units.
+
+ Attributes
+ ----------
+ message : str
+ Explanation of the error.
+ """
+
+ def __init__(
+ self, message: str = 'Inconsistent units.', file: str | None = None
+ ) -> None:
+ self.message: str
+
+ if file:
+ self.message = f'{self.message}\n' f'File: {file}'
+ else:
+ self.message = message
+ super().__init__(self.message)
diff --git a/pelicun/resources/SimCenterDBDL/combined_loss_matrices/Wind_Flood_Hazus_HU_bldg.csv b/pelicun/resources/SimCenterDBDL/combined_loss_matrices/Wind_Flood_Hazus_HU_bldg.csv
new file mode 100644
index 000000000..6abdd3dc3
--- /dev/null
+++ b/pelicun/resources/SimCenterDBDL/combined_loss_matrices/Wind_Flood_Hazus_HU_bldg.csv
@@ -0,0 +1,11 @@
+0.0,0.10,0.200,0.30,0.40,0.50,0.600,0.70,0.80,0.90,1.0
+0.1,0.19,0.285,0.37,0.46,0.55,0.640,0.73,0.82,0.91,1.0
+0.2,0.28,0.360,0.44,0.52,0.60,0.680,0.76,0.84,0.92,1.0
+0.3,0.37,0.440,0.51,0.58,0.65,0.720,0.79,0.86,0.93,1.0
+0.4,0.46,0.520,0.58,0.64,0.70,0.760,0.82,0.88,0.94,1.0
+0.5,0.55,0.600,0.65,0.70,0.75,0.806,0.85,0.90,0.95,1.0
+0.6,0.64,0.680,0.72,0.76,0.80,0.840,0.88,0.92,0.96,1.0
+0.7,0.73,0.760,0.79,0.82,0.85,0.880,0.91,0.94,0.97,1.0
+0.8,0.82,0.840,0.86,0.88,0.90,0.920,0.94,0.96,0.98,1.0
+0.9,0.91,0.920,0.93,0.94,0.95,0.960,0.97,0.98,0.99,1.0
+1.0,1.00,1.000,1.00,1.00,1.00,1.000,1.00,1.00,1.00,1.0
diff --git a/pelicun/resources/SimCenterDBDL/combined_loss_matrices/note.md b/pelicun/resources/SimCenterDBDL/combined_loss_matrices/note.md
new file mode 100644
index 000000000..f13af4c86
--- /dev/null
+++ b/pelicun/resources/SimCenterDBDL/combined_loss_matrices/note.md
@@ -0,0 +1 @@
+The convention will be that the first source of loss in the file name will correspond to the rows of the CSV file and the second to the columns.
diff --git a/pelicun/resources/SimCenterDBDL/loss_repair_DB_Hazus_EQ_bldg.csv b/pelicun/resources/SimCenterDBDL/loss_repair_DB_Hazus_EQ_bldg.csv
index 4a880b735..c56104a50 100644
--- a/pelicun/resources/SimCenterDBDL/loss_repair_DB_Hazus_EQ_bldg.csv
+++ b/pelicun/resources/SimCenterDBDL/loss_repair_DB_Hazus_EQ_bldg.csv
@@ -1,169 +1,169 @@
ID,Incomplete,Quantity-Unit,DV-Unit,DS1-Theta_0,DS2-Theta_0,DS3-Theta_0,DS4-Theta_0,DS5-Theta_0
-STR.RES1-Cost,0,1 EA,loss_ratio,0.5,2.3,11.7,23.4,23.4
-STR.RES1-Time,0,1 EA,day,2.0,30.0,90.0,180.0,180.0
-STR.RES2-Cost,0,1 EA,loss_ratio,0.4,2.4,7.3,24.4,24.4
-STR.RES2-Time,0,1 EA,day,2.0,10.0,30.0,60.0,60.0
-STR.RES3-Cost,0,1 EA,loss_ratio,0.3,1.4,6.9,13.8,13.8
-STR.RES3-Time,0,1 EA,day,5.0,30.0,120.0,240.0,240.0
-STR.RES4-Cost,0,1 EA,loss_ratio,0.2,1.4,6.8,13.6,13.6
-STR.RES4-Time,0,1 EA,day,5.0,30.0,120.0,240.0,240.0
-STR.RES5-Cost,0,1 EA,loss_ratio,0.4,1.9,9.4,18.8,18.8
-STR.RES5-Time,0,1 EA,day,5.0,30.0,120.0,240.0,240.0
-STR.RES6-Cost,0,1 EA,loss_ratio,0.4,1.8,9.2,18.4,18.4
-STR.RES6-Time,0,1 EA,day,5.0,30.0,120.0,240.0,240.0
-STR.COM1-Cost,0,1 EA,loss_ratio,0.6,2.9,14.7,29.4,29.4
-STR.COM1-Time,0,1 EA,day,5.0,30.0,90.0,180.0,180.0
-STR.COM2-Cost,0,1 EA,loss_ratio,0.6,3.2,16.2,32.4,32.4
-STR.COM2-Time,0,1 EA,day,5.0,30.0,90.0,180.0,180.0
-STR.COM3-Cost,0,1 EA,loss_ratio,0.3,1.6,8.1,16.2,16.2
-STR.COM3-Time,0,1 EA,day,5.0,30.0,90.0,180.0,180.0
-STR.COM4-Cost,0,1 EA,loss_ratio,0.4,1.9,9.6,19.2,19.2
-STR.COM4-Time,0,1 EA,day,5.0,30.0,120.0,240.0,240.0
-STR.COM5-Cost,0,1 EA,loss_ratio,0.3,1.4,6.9,13.8,13.8
-STR.COM5-Time,0,1 EA,day,5.0,30.0,90.0,180.0,180.0
-STR.COM6-Cost,0,1 EA,loss_ratio,0.2,1.4,7.0,14.0,14.0
-STR.COM6-Time,0,1 EA,day,10.0,45.0,180.0,360.0,360.0
-STR.COM7-Cost,0,1 EA,loss_ratio,0.3,1.4,7.2,14.4,14.4
-STR.COM7-Time,0,1 EA,day,10.0,45.0,180.0,240.0,240.0
-STR.COM8-Cost,0,1 EA,loss_ratio,0.2,1.0,5.0,10.0,10.0
-STR.COM8-Time,0,1 EA,day,5.0,30.0,90.0,180.0,180.0
-STR.COM9-Cost,0,1 EA,loss_ratio,0.3,1.2,6.1,12.2,12.2
-STR.COM9-Time,0,1 EA,day,5.0,30.0,120.0,240.0,240.0
-STR.COM10-Cost,0,1 EA,loss_ratio,1.3,6.1,30.4,60.9,60.9
-STR.COM10-Time,0,1 EA,day,2.0,20.0,80.0,160.0,160.0
-STR.IND1-Cost,0,1 EA,loss_ratio,0.4,1.6,7.8,15.7,15.7
-STR.IND1-Time,0,1 EA,day,10.0,30.0,120.0,240.0,240.0
-STR.IND2-Cost,0,1 EA,loss_ratio,0.4,1.6,7.8,15.7,15.7
-STR.IND2-Time,0,1 EA,day,10.0,30.0,120.0,240.0,240.0
-STR.IND3-Cost,0,1 EA,loss_ratio,0.4,1.6,7.8,15.7,15.7
-STR.IND3-Time,0,1 EA,day,10.0,30.0,120.0,240.0,240.0
-STR.IND4-Cost,0,1 EA,loss_ratio,0.4,1.6,7.8,15.7,15.7
-STR.IND4-Time,0,1 EA,day,10.0,30.0,120.0,240.0,240.0
-STR.IND5-Cost,0,1 EA,loss_ratio,0.4,1.6,7.8,15.7,15.7
-STR.IND5-Time,0,1 EA,day,20.0,45.0,180.0,360.0,360.0
-STR.IND6-Cost,0,1 EA,loss_ratio,0.4,1.6,7.8,15.7,15.7
-STR.IND6-Time,0,1 EA,day,5.0,20.0,80.0,160.0,160.0
-STR.AGR1-Cost,0,1 EA,loss_ratio,0.8,4.6,23.1,46.2,46.2
-STR.AGR1-Time,0,1 EA,day,2.0,10.0,30.0,60.0,60.0
-STR.REL1-Cost,0,1 EA,loss_ratio,0.3,2.0,9.9,19.8,19.8
-STR.REL1-Time,0,1 EA,day,10.0,30.0,120.0,240.0,240.0
-STR.GOV1-Cost,0,1 EA,loss_ratio,0.3,1.8,9.0,17.9,17.9
-STR.GOV1-Time,0,1 EA,day,10.0,30.0,120.0,240.0,240.0
-STR.GOV2-Cost,0,1 EA,loss_ratio,0.3,1.5,7.7,15.3,15.3
-STR.GOV2-Time,0,1 EA,day,5.0,20.0,90.0,180.0,180.0
-STR.EDU1-Cost,0,1 EA,loss_ratio,0.4,1.9,9.5,18.9,18.9
-STR.EDU1-Time,0,1 EA,day,10.0,30.0,120.0,240.0,240.0
-STR.EDU2-Cost,0,1 EA,loss_ratio,0.2,1.1,5.5,11.0,11.0
-STR.EDU2-Time,0,1 EA,day,10.0,45.0,180.0,360.0,360.0
-NSD.RES1-Cost,0,1 EA,loss_ratio,1.0,5.0,25.0,50.0,
-NSD.RES2-Cost,0,1 EA,loss_ratio,0.8,3.8,18.9,37.8,
-NSD.RES3-Cost,0,1 EA,loss_ratio,0.9,4.3,21.3,42.5,
-NSD.RES4-Cost,0,1 EA,loss_ratio,0.9,4.3,21.6,43.2,
-NSD.RES5-Cost,0,1 EA,loss_ratio,0.8,4.0,20.0,40.0,
-NSD.RES6-Cost,0,1 EA,loss_ratio,0.8,4.1,20.4,40.8,
-NSD.COM1-Cost,0,1 EA,loss_ratio,0.6,2.7,13.8,27.5,
-NSD.COM2-Cost,0,1 EA,loss_ratio,0.6,2.6,13.2,26.5,
-NSD.COM3-Cost,0,1 EA,loss_ratio,0.7,3.4,16.9,33.8,
-NSD.COM4-Cost,0,1 EA,loss_ratio,0.7,3.3,16.4,32.9,
-NSD.COM5-Cost,0,1 EA,loss_ratio,0.7,3.4,17.2,34.5,
-NSD.COM6-Cost,0,1 EA,loss_ratio,0.8,3.5,17.4,34.7,
-NSD.COM7-Cost,0,1 EA,loss_ratio,0.7,3.4,17.2,34.4,
-NSD.COM8-Cost,0,1 EA,loss_ratio,0.7,3.6,17.8,35.6,
-NSD.COM9-Cost,0,1 EA,loss_ratio,0.7,3.5,17.6,35.1,
-NSD.COM10-Cost,0,1 EA,loss_ratio,0.4,1.7,8.7,17.4,
-NSD.IND1-Cost,0,1 EA,loss_ratio,0.2,1.2,5.9,11.8,
-NSD.IND2-Cost,0,1 EA,loss_ratio,0.2,1.2,5.9,11.8,
-NSD.IND3-Cost,0,1 EA,loss_ratio,0.2,1.2,5.9,11.8,
-NSD.IND4-Cost,0,1 EA,loss_ratio,0.2,1.2,5.9,11.8,
-NSD.IND5-Cost,0,1 EA,loss_ratio,0.2,1.2,5.9,11.8,
-NSD.IND6-Cost,0,1 EA,loss_ratio,0.2,1.2,5.9,11.8,
-NSD.AGR1-Cost,0,1 EA,loss_ratio,0.0,0.8,3.8,7.7,
-NSD.REL1-Cost,0,1 EA,loss_ratio,0.8,3.3,16.3,32.6,
-NSD.GOV1-Cost,0,1 EA,loss_ratio,0.7,3.3,16.4,32.8,
-NSD.GOV2-Cost,0,1 EA,loss_ratio,0.7,3.4,17.1,34.2,
-NSD.EDU1-Cost,0,1 EA,loss_ratio,0.9,4.9,24.3,48.7,
-NSD.EDU2-Cost,0,1 EA,loss_ratio,1.2,6.0,30.0,60.0,
-NSA.RES1-Cost,0,1 EA,loss_ratio,0.5,2.7,8.0,26.6,
-NSA.RES2-Cost,0,1 EA,loss_ratio,0.8,3.8,11.3,37.8,
-NSA.RES3-Cost,0,1 EA,loss_ratio,0.8,4.3,13.1,43.7,
-NSA.RES4-Cost,0,1 EA,loss_ratio,0.9,4.3,13.0,43.2,
-NSA.RES5-Cost,0,1 EA,loss_ratio,0.8,4.1,12.4,41.2,
-NSA.RES6-Cost,0,1 EA,loss_ratio,0.8,4.1,12.2,40.8,
-NSA.COM1-Cost,0,1 EA,loss_ratio,0.8,4.4,12.9,43.1,
-NSA.COM2-Cost,0,1 EA,loss_ratio,0.8,4.2,12.4,41.1,
-NSA.COM3-Cost,0,1 EA,loss_ratio,1.0,5.0,15.0,50.0,
-NSA.COM4-Cost,0,1 EA,loss_ratio,0.9,4.8,14.4,47.9,
-NSA.COM5-Cost,0,1 EA,loss_ratio,1.0,5.2,15.5,51.7,
-NSA.COM6-Cost,0,1 EA,loss_ratio,1.0,5.1,15.4,51.3,
-NSA.COM7-Cost,0,1 EA,loss_ratio,1.0,5.2,15.3,51.2,
-NSA.COM8-Cost,0,1 EA,loss_ratio,1.1,5.4,16.3,54.4,
-NSA.COM9-Cost,0,1 EA,loss_ratio,1.0,5.3,15.8,52.7,
-NSA.COM10-Cost,0,1 EA,loss_ratio,0.3,2.2,6.5,21.7,
-NSA.IND1-Cost,0,1 EA,loss_ratio,1.4,7.2,21.8,72.5,
-NSA.IND2-Cost,0,1 EA,loss_ratio,1.4,7.2,21.8,72.5,
-NSA.IND3-Cost,0,1 EA,loss_ratio,1.4,7.2,21.8,72.5,
-NSA.IND4-Cost,0,1 EA,loss_ratio,1.4,7.2,21.8,72.5,
-NSA.IND5-Cost,0,1 EA,loss_ratio,1.4,7.2,21.8,72.5,
-NSA.IND6-Cost,0,1 EA,loss_ratio,1.4,7.2,21.8,72.5,
-NSA.AGR1-Cost,0,1 EA,loss_ratio,0.8,4.6,13.8,46.1,
-NSA.REL1-Cost,0,1 EA,loss_ratio,0.9,4.7,14.3,47.6,
-NSA.GOV1-Cost,0,1 EA,loss_ratio,1.0,4.9,14.8,49.3,
-NSA.GOV2-Cost,0,1 EA,loss_ratio,1.0,5.1,15.1,50.5,
-NSA.EDU1-Cost,0,1 EA,loss_ratio,0.7,3.2,9.7,32.4,
-NSA.EDU2-Cost,0,1 EA,loss_ratio,0.6,2.9,8.7,29.0,
-LF.RES1-Cost,0,1 EA,loss_ratio,2.0,10.0,44.7,100.0,100.0
-LF.RES1-Time,0,1 EA,day,2.0,30.0,90.0,180.0,180.0
-LF.RES2-Cost,0,1 EA,loss_ratio,2.0,10.0,37.5,100.0,100.0
-LF.RES2-Time,0,1 EA,day,2.0,10.0,30.0,60.0,60.0
-LF.RES3-Cost,0,1 EA,loss_ratio,2.0,10.0,41.3,100.0,100.0
-LF.RES3-Time,0,1 EA,day,5.0,30.0,120.0,240.0,240.0
-LF.RES4-Cost,0,1 EA,loss_ratio,2.0,10.0,41.4,100.0,100.0
-LF.RES4-Time,0,1 EA,day,5.0,30.0,120.0,240.0,240.0
-LF.RES5-Cost,0,1 EA,loss_ratio,2.0,10.0,41.8,100.0,100.0
-LF.RES5-Time,0,1 EA,day,5.0,30.0,120.0,240.0,240.0
-LF.RES6-Cost,0,1 EA,loss_ratio,2.0,10.0,41.8,100.0,100.0
-LF.RES6-Time,0,1 EA,day,5.0,30.0,120.0,240.0,240.0
-LF.COM1-Cost,0,1 EA,loss_ratio,2.0,10.0,41.4,100.0,100.0
-LF.COM1-Time,0,1 EA,day,5.0,30.0,90.0,180.0,180.0
-LF.COM2-Cost,0,1 EA,loss_ratio,2.0,10.0,41.8,100.0,100.0
-LF.COM2-Time,0,1 EA,day,5.0,30.0,90.0,180.0,180.0
-LF.COM3-Cost,0,1 EA,loss_ratio,2.0,10.0,40.0,100.0,100.0
-LF.COM3-Time,0,1 EA,day,5.0,30.0,90.0,180.0,180.0
-LF.COM4-Cost,0,1 EA,loss_ratio,2.0,10.0,40.4,100.0,100.0
-LF.COM4-Time,0,1 EA,day,5.0,30.0,120.0,240.0,240.0
-LF.COM5-Cost,0,1 EA,loss_ratio,2.0,10.0,39.6,100.0,100.0
-LF.COM5-Time,0,1 EA,day,5.0,30.0,90.0,180.0,180.0
-LF.COM6-Cost,0,1 EA,loss_ratio,2.0,10.0,39.8,100.0,100.0
-LF.COM6-Time,0,1 EA,day,10.0,45.0,180.0,360.0,360.0
-LF.COM7-Cost,0,1 EA,loss_ratio,2.0,10.0,39.7,100.0,100.0
-LF.COM7-Time,0,1 EA,day,10.0,45.0,180.0,240.0,240.0
-LF.COM8-Cost,0,1 EA,loss_ratio,2.0,10.0,39.1,100.0,100.0
-LF.COM8-Time,0,1 EA,day,5.0,30.0,90.0,180.0,180.0
-LF.COM9-Cost,0,1 EA,loss_ratio,2.0,10.0,39.5,100.0,100.0
-LF.COM9-Time,0,1 EA,day,5.0,30.0,120.0,240.0,240.0
-LF.COM10-Cost,0,1 EA,loss_ratio,2.0,10.0,45.6,100.0,100.0
-LF.COM10-Time,0,1 EA,day,2.0,20.0,80.0,160.0,160.0
-LF.IND1-Cost,0,1 EA,loss_ratio,2.0,10.0,35.5,100.0,100.0
-LF.IND1-Time,0,1 EA,day,10.0,30.0,120.0,240.0,240.0
-LF.IND2-Cost,0,1 EA,loss_ratio,2.0,10.0,35.5,100.0,100.0
-LF.IND2-Time,0,1 EA,day,10.0,30.0,120.0,240.0,240.0
-LF.IND3-Cost,0,1 EA,loss_ratio,2.0,10.0,35.5,100.0,100.0
-LF.IND3-Time,0,1 EA,day,10.0,30.0,120.0,240.0,240.0
-LF.IND4-Cost,0,1 EA,loss_ratio,2.0,10.0,35.5,100.0,100.0
-LF.IND4-Time,0,1 EA,day,10.0,30.0,120.0,240.0,240.0
-LF.IND5-Cost,0,1 EA,loss_ratio,2.0,10.0,35.5,100.0,100.0
-LF.IND5-Time,0,1 EA,day,20.0,45.0,180.0,360.0,360.0
-LF.IND6-Cost,0,1 EA,loss_ratio,2.0,10.0,35.5,100.0,100.0
-LF.IND6-Time,0,1 EA,day,5.0,20.0,80.0,160.0,160.0
-LF.AGR1-Cost,0,1 EA,loss_ratio,1.6,10.0,40.7,100.0,100.0
-LF.AGR1-Time,0,1 EA,day,2.0,10.0,30.0,60.0,60.0
-LF.REL1-Cost,0,1 EA,loss_ratio,2.0,10.0,40.5,100.0,100.0
-LF.REL1-Time,0,1 EA,day,10.0,30.0,120.0,240.0,240.0
-LF.GOV1-Cost,0,1 EA,loss_ratio,2.0,10.0,40.2,100.0,100.0
-LF.GOV1-Time,0,1 EA,day,10.0,30.0,120.0,240.0,240.0
-LF.GOV2-Cost,0,1 EA,loss_ratio,2.0,10.0,39.9,100.0,100.0
-LF.GOV2-Time,0,1 EA,day,5.0,20.0,90.0,180.0,180.0
-LF.EDU1-Cost,0,1 EA,loss_ratio,2.0,10.0,43.5,100.0,100.0
-LF.EDU1-Time,0,1 EA,day,10.0,30.0,120.0,240.0,240.0
-LF.EDU2-Cost,0,1 EA,loss_ratio,2.0,10.0,44.2,100.0,100.0
-LF.EDU2-Time,0,1 EA,day,10.0,45.0,180.0,360.0,360.0
+STR.RES1-Cost,0,1 EA,loss_ratio,0.005,0.023,0.117,0.234,0.234
+STR.RES1-Time,0,1 EA,day,2,30,90,180,180
+STR.RES2-Cost,0,1 EA,loss_ratio,0.004,0.024,0.073,0.244,0.244
+STR.RES2-Time,0,1 EA,day,2,10,30,60,60
+STR.RES3-Cost,0,1 EA,loss_ratio,0.003,0.014,0.069,0.138,0.138
+STR.RES3-Time,0,1 EA,day,5,30,120,240,240
+STR.RES4-Cost,0,1 EA,loss_ratio,0.002,0.014,0.068,0.136,0.136
+STR.RES4-Time,0,1 EA,day,5,30,120,240,240
+STR.RES5-Cost,0,1 EA,loss_ratio,0.004,0.019,0.094,0.188,0.188
+STR.RES5-Time,0,1 EA,day,5,30,120,240,240
+STR.RES6-Cost,0,1 EA,loss_ratio,0.004,0.018,0.092,0.184,0.184
+STR.RES6-Time,0,1 EA,day,5,30,120,240,240
+STR.COM1-Cost,0,1 EA,loss_ratio,0.006,0.029,0.147,0.294,0.294
+STR.COM1-Time,0,1 EA,day,5,30,90,180,180
+STR.COM2-Cost,0,1 EA,loss_ratio,0.006,0.032,0.162,0.324,0.324
+STR.COM2-Time,0,1 EA,day,5,30,90,180,180
+STR.COM3-Cost,0,1 EA,loss_ratio,0.003,0.016,0.081,0.162,0.162
+STR.COM3-Time,0,1 EA,day,5,30,90,180,180
+STR.COM4-Cost,0,1 EA,loss_ratio,0.004,0.019,0.096,0.192,0.192
+STR.COM4-Time,0,1 EA,day,5,30,120,240,240
+STR.COM5-Cost,0,1 EA,loss_ratio,0.003,0.014,0.069,0.138,0.138
+STR.COM5-Time,0,1 EA,day,5,30,90,180,180
+STR.COM6-Cost,0,1 EA,loss_ratio,0.002,0.014,0.070,0.140,0.140
+STR.COM6-Time,0,1 EA,day,10,45,180,360,360
+STR.COM7-Cost,0,1 EA,loss_ratio,0.003,0.014,0.072,0.144,0.144
+STR.COM7-Time,0,1 EA,day,10,45,180,240,240
+STR.COM8-Cost,0,1 EA,loss_ratio,0.002,0.010,0.050,0.100,0.100
+STR.COM8-Time,0,1 EA,day,5,30,90,180,180
+STR.COM9-Cost,0,1 EA,loss_ratio,0.003,0.012,0.061,0.122,0.122
+STR.COM9-Time,0,1 EA,day,5,30,120,240,240
+STR.COM10-Cost,0,1 EA,loss_ratio,0.013,0.061,0.304,0.609,0.609
+STR.COM10-Time,0,1 EA,day,2,20,80,160,160
+STR.IND1-Cost,0,1 EA,loss_ratio,0.004,0.016,0.078,0.157,0.157
+STR.IND1-Time,0,1 EA,day,10,30,120,240,240
+STR.IND2-Cost,0,1 EA,loss_ratio,0.004,0.016,0.078,0.157,0.157
+STR.IND2-Time,0,1 EA,day,10,30,120,240,240
+STR.IND3-Cost,0,1 EA,loss_ratio,0.004,0.016,0.078,0.157,0.157
+STR.IND3-Time,0,1 EA,day,10,30,120,240,240
+STR.IND4-Cost,0,1 EA,loss_ratio,0.004,0.016,0.078,0.157,0.157
+STR.IND4-Time,0,1 EA,day,10,30,120,240,240
+STR.IND5-Cost,0,1 EA,loss_ratio,0.004,0.016,0.078,0.157,0.157
+STR.IND5-Time,0,1 EA,day,20,45,180,360,360
+STR.IND6-Cost,0,1 EA,loss_ratio,0.004,0.016,0.078,0.157,0.157
+STR.IND6-Time,0,1 EA,day,5,20,80,160,160
+STR.AGR1-Cost,0,1 EA,loss_ratio,0.008,0.046,0.231,0.462,0.462
+STR.AGR1-Time,0,1 EA,day,2,10,30,60,60
+STR.REL1-Cost,0,1 EA,loss_ratio,0.003,0.020,0.099,0.198,0.198
+STR.REL1-Time,0,1 EA,day,10,30,120,240,240
+STR.GOV1-Cost,0,1 EA,loss_ratio,0.003,0.018,0.090,0.179,0.179
+STR.GOV1-Time,0,1 EA,day,10,30,120,240,240
+STR.GOV2-Cost,0,1 EA,loss_ratio,0.003,0.015,0.077,0.153,0.153
+STR.GOV2-Time,0,1 EA,day,5,20,90,180,180
+STR.EDU1-Cost,0,1 EA,loss_ratio,0.004,0.019,0.095,0.189,0.189
+STR.EDU1-Time,0,1 EA,day,10,30,120,240,240
+STR.EDU2-Cost,0,1 EA,loss_ratio,0.002,0.011,0.055,0.110,0.110
+STR.EDU2-Time,0,1 EA,day,10,45,180,360,360
+NSD.RES1-Cost,0,1 EA,loss_ratio,0.010,0.050,0.250,0.500,
+NSD.RES2-Cost,0,1 EA,loss_ratio,0.008,0.038,0.189,0.378,
+NSD.RES3-Cost,0,1 EA,loss_ratio,0.009,0.043,0.213,0.425,
+NSD.RES4-Cost,0,1 EA,loss_ratio,0.009,0.043,0.216,0.432,
+NSD.RES5-Cost,0,1 EA,loss_ratio,0.008,0.040,0.200,0.400,
+NSD.RES6-Cost,0,1 EA,loss_ratio,0.008,0.041,0.204,0.408,
+NSD.COM1-Cost,0,1 EA,loss_ratio,0.006,0.027,0.138,0.275,
+NSD.COM2-Cost,0,1 EA,loss_ratio,0.006,0.026,0.132,0.265,
+NSD.COM3-Cost,0,1 EA,loss_ratio,0.007,0.034,0.169,0.338,
+NSD.COM4-Cost,0,1 EA,loss_ratio,0.007,0.033,0.164,0.329,
+NSD.COM5-Cost,0,1 EA,loss_ratio,0.007,0.034,0.172,0.345,
+NSD.COM6-Cost,0,1 EA,loss_ratio,0.008,0.035,0.174,0.347,
+NSD.COM7-Cost,0,1 EA,loss_ratio,0.007,0.034,0.172,0.344,
+NSD.COM8-Cost,0,1 EA,loss_ratio,0.007,0.036,0.178,0.356,
+NSD.COM9-Cost,0,1 EA,loss_ratio,0.007,0.035,0.176,0.351,
+NSD.COM10-Cost,0,1 EA,loss_ratio,0.004,0.017,0.087,0.174,
+NSD.IND1-Cost,0,1 EA,loss_ratio,0.002,0.012,0.059,0.118,
+NSD.IND2-Cost,0,1 EA,loss_ratio,0.002,0.012,0.059,0.118,
+NSD.IND3-Cost,0,1 EA,loss_ratio,0.002,0.012,0.059,0.118,
+NSD.IND4-Cost,0,1 EA,loss_ratio,0.002,0.012,0.059,0.118,
+NSD.IND5-Cost,0,1 EA,loss_ratio,0.002,0.012,0.059,0.118,
+NSD.IND6-Cost,0,1 EA,loss_ratio,0.002,0.012,0.059,0.118,
+NSD.AGR1-Cost,0,1 EA,loss_ratio,0.000,0.008,0.038,0.077,
+NSD.REL1-Cost,0,1 EA,loss_ratio,0.008,0.033,0.163,0.326,
+NSD.GOV1-Cost,0,1 EA,loss_ratio,0.007,0.033,0.164,0.328,
+NSD.GOV2-Cost,0,1 EA,loss_ratio,0.007,0.034,0.171,0.342,
+NSD.EDU1-Cost,0,1 EA,loss_ratio,0.009,0.049,0.243,0.487,
+NSD.EDU2-Cost,0,1 EA,loss_ratio,0.012,0.060,0.300,0.600,
+NSA.RES1-Cost,0,1 EA,loss_ratio,0.005,0.027,0.080,0.266,
+NSA.RES2-Cost,0,1 EA,loss_ratio,0.008,0.038,0.113,0.378,
+NSA.RES3-Cost,0,1 EA,loss_ratio,0.008,0.043,0.131,0.437,
+NSA.RES4-Cost,0,1 EA,loss_ratio,0.009,0.043,0.130,0.432,
+NSA.RES5-Cost,0,1 EA,loss_ratio,0.008,0.041,0.124,0.412,
+NSA.RES6-Cost,0,1 EA,loss_ratio,0.008,0.041,0.122,0.408,
+NSA.COM1-Cost,0,1 EA,loss_ratio,0.008,0.044,0.129,0.431,
+NSA.COM2-Cost,0,1 EA,loss_ratio,0.008,0.042,0.124,0.411,
+NSA.COM3-Cost,0,1 EA,loss_ratio,0.010,0.050,0.150,0.500,
+NSA.COM4-Cost,0,1 EA,loss_ratio,0.009,0.048,0.144,0.479,
+NSA.COM5-Cost,0,1 EA,loss_ratio,0.010,0.052,0.155,0.517,
+NSA.COM6-Cost,0,1 EA,loss_ratio,0.010,0.051,0.154,0.513,
+NSA.COM7-Cost,0,1 EA,loss_ratio,0.010,0.052,0.153,0.512,
+NSA.COM8-Cost,0,1 EA,loss_ratio,0.011,0.054,0.163,0.544,
+NSA.COM9-Cost,0,1 EA,loss_ratio,0.010,0.053,0.158,0.527,
+NSA.COM10-Cost,0,1 EA,loss_ratio,0.003,0.022,0.065,0.217,
+NSA.IND1-Cost,0,1 EA,loss_ratio,0.014,0.072,0.218,0.725,
+NSA.IND2-Cost,0,1 EA,loss_ratio,0.014,0.072,0.218,0.725,
+NSA.IND3-Cost,0,1 EA,loss_ratio,0.014,0.072,0.218,0.725,
+NSA.IND4-Cost,0,1 EA,loss_ratio,0.014,0.072,0.218,0.725,
+NSA.IND5-Cost,0,1 EA,loss_ratio,0.014,0.072,0.218,0.725,
+NSA.IND6-Cost,0,1 EA,loss_ratio,0.014,0.072,0.218,0.725,
+NSA.AGR1-Cost,0,1 EA,loss_ratio,0.008,0.046,0.138,0.461,
+NSA.REL1-Cost,0,1 EA,loss_ratio,0.009,0.047,0.143,0.476,
+NSA.GOV1-Cost,0,1 EA,loss_ratio,0.010,0.049,0.148,0.493,
+NSA.GOV2-Cost,0,1 EA,loss_ratio,0.010,0.051,0.151,0.505,
+NSA.EDU1-Cost,0,1 EA,loss_ratio,0.007,0.032,0.097,0.324,
+NSA.EDU2-Cost,0,1 EA,loss_ratio,0.006,0.029,0.087,0.290,
+LF.RES1-Cost,0,1 EA,loss_ratio,0.020,0.100,0.447,1.000,1.000
+LF.RES1-Time,0,1 EA,day,2,30,90,180,180
+LF.RES2-Cost,0,1 EA,loss_ratio,0.020,0.100,0.375,1.000,1.000
+LF.RES2-Time,0,1 EA,day,2,10,30,60,60
+LF.RES3-Cost,0,1 EA,loss_ratio,0.020,0.100,0.413,1.000,1.000
+LF.RES3-Time,0,1 EA,day,5,30,120,240,240
+LF.RES4-Cost,0,1 EA,loss_ratio,0.020,0.100,0.414,1.000,1.000
+LF.RES4-Time,0,1 EA,day,5,30,120,240,240
+LF.RES5-Cost,0,1 EA,loss_ratio,0.020,0.100,0.418,1.000,1.000
+LF.RES5-Time,0,1 EA,day,5,30,120,240,240
+LF.RES6-Cost,0,1 EA,loss_ratio,0.020,0.100,0.418,1.000,1.000
+LF.RES6-Time,0,1 EA,day,5,30,120,240,240
+LF.COM1-Cost,0,1 EA,loss_ratio,0.020,0.100,0.414,1.000,1.000
+LF.COM1-Time,0,1 EA,day,5,30,90,180,180
+LF.COM2-Cost,0,1 EA,loss_ratio,0.020,0.100,0.418,1.000,1.000
+LF.COM2-Time,0,1 EA,day,5,30,90,180,180
+LF.COM3-Cost,0,1 EA,loss_ratio,0.020,0.100,0.400,1.000,1.000
+LF.COM3-Time,0,1 EA,day,5,30,90,180,180
+LF.COM4-Cost,0,1 EA,loss_ratio,0.020,0.100,0.404,1.000,1.000
+LF.COM4-Time,0,1 EA,day,5,30,120,240,240
+LF.COM5-Cost,0,1 EA,loss_ratio,0.020,0.100,0.396,1.000,1.000
+LF.COM5-Time,0,1 EA,day,5,30,90,180,180
+LF.COM6-Cost,0,1 EA,loss_ratio,0.020,0.100,0.398,1.000,1.000
+LF.COM6-Time,0,1 EA,day,10,45,180,360,360
+LF.COM7-Cost,0,1 EA,loss_ratio,0.020,0.100,0.397,1.000,1.000
+LF.COM7-Time,0,1 EA,day,10,45,180,240,240
+LF.COM8-Cost,0,1 EA,loss_ratio,0.020,0.100,0.391,1.000,1.000
+LF.COM8-Time,0,1 EA,day,5,30,90,180,180
+LF.COM9-Cost,0,1 EA,loss_ratio,0.020,0.100,0.395,1.000,1.000
+LF.COM9-Time,0,1 EA,day,5,30,120,240,240
+LF.COM10-Cost,0,1 EA,loss_ratio,0.020,0.100,0.456,1.000,1.000
+LF.COM10-Time,0,1 EA,day,2,20,80,160,160
+LF.IND1-Cost,0,1 EA,loss_ratio,0.020,0.100,0.355,1.000,1.000
+LF.IND1-Time,0,1 EA,day,10,30,120,240,240
+LF.IND2-Cost,0,1 EA,loss_ratio,0.020,0.100,0.355,1.000,1.000
+LF.IND2-Time,0,1 EA,day,10,30,120,240,240
+LF.IND3-Cost,0,1 EA,loss_ratio,0.020,0.100,0.355,1.000,1.000
+LF.IND3-Time,0,1 EA,day,10,30,120,240,240
+LF.IND4-Cost,0,1 EA,loss_ratio,0.020,0.100,0.355,1.000,1.000
+LF.IND4-Time,0,1 EA,day,10,30,120,240,240
+LF.IND5-Cost,0,1 EA,loss_ratio,0.020,0.100,0.355,1.000,1.000
+LF.IND5-Time,0,1 EA,day,20,45,180,360,360
+LF.IND6-Cost,0,1 EA,loss_ratio,0.020,0.100,0.355,1.000,1.000
+LF.IND6-Time,0,1 EA,day,5,20,80,160,160
+LF.AGR1-Cost,0,1 EA,loss_ratio,0.016,0.100,0.407,1.000,1.000
+LF.AGR1-Time,0,1 EA,day,2,10,30,60,60
+LF.REL1-Cost,0,1 EA,loss_ratio,0.020,0.100,0.405,1.000,1.000
+LF.REL1-Time,0,1 EA,day,10,30,120,240,240
+LF.GOV1-Cost,0,1 EA,loss_ratio,0.020,0.100,0.402,1.000,1.000
+LF.GOV1-Time,0,1 EA,day,10,30,120,240,240
+LF.GOV2-Cost,0,1 EA,loss_ratio,0.020,0.100,0.399,1.000,1.000
+LF.GOV2-Time,0,1 EA,day,5,20,90,180,180
+LF.EDU1-Cost,0,1 EA,loss_ratio,0.020,0.100,0.435,1.000,1.000
+LF.EDU1-Time,0,1 EA,day,10,30,120,240,240
+LF.EDU2-Cost,0,1 EA,loss_ratio,0.020,0.100,0.442,1.000,1.000
+LF.EDU2-Time,0,1 EA,day,10,45,180,360,360
diff --git a/pelicun/resources/SimCenterDBDL/loss_repair_DB_Hazus_EQ_story.csv b/pelicun/resources/SimCenterDBDL/loss_repair_DB_Hazus_EQ_story.csv
index 26242efae..9e9957fbe 100644
--- a/pelicun/resources/SimCenterDBDL/loss_repair_DB_Hazus_EQ_story.csv
+++ b/pelicun/resources/SimCenterDBDL/loss_repair_DB_Hazus_EQ_story.csv
@@ -1,113 +1,113 @@
ID,Incomplete,Quantity-Unit,DV-Unit,DS1-Theta_0,DS2-Theta_0,DS3-Theta_0,DS4-Theta_0,DS5-Theta_0
-STR.RES1-Cost,0,1 EA,loss_ratio,0.5,2.3,11.7,23.4,23.4
-STR.RES1-Time,0,1 EA,day,2.0,30.0,90.0,180.0,180.0
-STR.RES2-Cost,0,1 EA,loss_ratio,0.4,2.4,7.3,24.4,24.4
-STR.RES2-Time,0,1 EA,day,2.0,10.0,30.0,60.0,60.0
-STR.RES3-Cost,0,1 EA,loss_ratio,0.3,1.4,6.9,13.8,13.8
-STR.RES3-Time,0,1 EA,day,5.0,30.0,120.0,240.0,240.0
-STR.RES4-Cost,0,1 EA,loss_ratio,0.2,1.4,6.8,13.6,13.6
-STR.RES4-Time,0,1 EA,day,5.0,30.0,120.0,240.0,240.0
-STR.RES5-Cost,0,1 EA,loss_ratio,0.4,1.9,9.4,18.8,18.8
-STR.RES5-Time,0,1 EA,day,5.0,30.0,120.0,240.0,240.0
-STR.RES6-Cost,0,1 EA,loss_ratio,0.4,1.8,9.2,18.4,18.4
-STR.RES6-Time,0,1 EA,day,5.0,30.0,120.0,240.0,240.0
-STR.COM1-Cost,0,1 EA,loss_ratio,0.6,2.9,14.7,29.4,29.4
-STR.COM1-Time,0,1 EA,day,5.0,30.0,90.0,180.0,180.0
-STR.COM2-Cost,0,1 EA,loss_ratio,0.6,3.2,16.2,32.4,32.4
-STR.COM2-Time,0,1 EA,day,5.0,30.0,90.0,180.0,180.0
-STR.COM3-Cost,0,1 EA,loss_ratio,0.3,1.6,8.1,16.2,16.2
-STR.COM3-Time,0,1 EA,day,5.0,30.0,90.0,180.0,180.0
-STR.COM4-Cost,0,1 EA,loss_ratio,0.4,1.9,9.6,19.2,19.2
-STR.COM4-Time,0,1 EA,day,5.0,30.0,120.0,240.0,240.0
-STR.COM5-Cost,0,1 EA,loss_ratio,0.3,1.4,6.9,13.8,13.8
-STR.COM5-Time,0,1 EA,day,5.0,30.0,90.0,180.0,180.0
-STR.COM6-Cost,0,1 EA,loss_ratio,0.2,1.4,7.0,14.0,14.0
-STR.COM6-Time,0,1 EA,day,10.0,45.0,180.0,360.0,360.0
-STR.COM7-Cost,0,1 EA,loss_ratio,0.3,1.4,7.2,14.4,14.4
-STR.COM7-Time,0,1 EA,day,10.0,45.0,180.0,240.0,240.0
-STR.COM8-Cost,0,1 EA,loss_ratio,0.2,1.0,5.0,10.0,10.0
-STR.COM8-Time,0,1 EA,day,5.0,30.0,90.0,180.0,180.0
-STR.COM9-Cost,0,1 EA,loss_ratio,0.3,1.2,6.1,12.2,12.2
-STR.COM9-Time,0,1 EA,day,5.0,30.0,120.0,240.0,240.0
-STR.COM10-Cost,0,1 EA,loss_ratio,1.3,6.1,30.4,60.9,60.9
-STR.COM10-Time,0,1 EA,day,2.0,20.0,80.0,160.0,160.0
-STR.IND1-Cost,0,1 EA,loss_ratio,0.4,1.6,7.8,15.7,15.7
-STR.IND1-Time,0,1 EA,day,10.0,30.0,120.0,240.0,240.0
-STR.IND2-Cost,0,1 EA,loss_ratio,0.4,1.6,7.8,15.7,15.7
-STR.IND2-Time,0,1 EA,day,10.0,30.0,120.0,240.0,240.0
-STR.IND3-Cost,0,1 EA,loss_ratio,0.4,1.6,7.8,15.7,15.7
-STR.IND3-Time,0,1 EA,day,10.0,30.0,120.0,240.0,240.0
-STR.IND4-Cost,0,1 EA,loss_ratio,0.4,1.6,7.8,15.7,15.7
-STR.IND4-Time,0,1 EA,day,10.0,30.0,120.0,240.0,240.0
-STR.IND5-Cost,0,1 EA,loss_ratio,0.4,1.6,7.8,15.7,15.7
-STR.IND5-Time,0,1 EA,day,20.0,45.0,180.0,360.0,360.0
-STR.IND6-Cost,0,1 EA,loss_ratio,0.4,1.6,7.8,15.7,15.7
-STR.IND6-Time,0,1 EA,day,5.0,20.0,80.0,160.0,160.0
-STR.AGR1-Cost,0,1 EA,loss_ratio,0.8,4.6,23.1,46.2,46.2
-STR.AGR1-Time,0,1 EA,day,2.0,10.0,30.0,60.0,60.0
-STR.REL1-Cost,0,1 EA,loss_ratio,0.3,2.0,9.9,19.8,19.8
-STR.REL1-Time,0,1 EA,day,10.0,30.0,120.0,240.0,240.0
-STR.GOV1-Cost,0,1 EA,loss_ratio,0.3,1.8,9.0,17.9,17.9
-STR.GOV1-Time,0,1 EA,day,10.0,30.0,120.0,240.0,240.0
-STR.GOV2-Cost,0,1 EA,loss_ratio,0.3,1.5,7.7,15.3,15.3
-STR.GOV2-Time,0,1 EA,day,5.0,20.0,90.0,180.0,180.0
-STR.EDU1-Cost,0,1 EA,loss_ratio,0.4,1.9,9.5,18.9,18.9
-STR.EDU1-Time,0,1 EA,day,10.0,30.0,120.0,240.0,240.0
-STR.EDU2-Cost,0,1 EA,loss_ratio,0.2,1.1,5.5,11.0,11.0
-STR.EDU2-Time,0,1 EA,day,10.0,45.0,180.0,360.0,360.0
-NSD.RES1-Cost,0,1 EA,loss_ratio,1.0,5.0,25.0,50.0,
-NSD.RES2-Cost,0,1 EA,loss_ratio,0.8,3.8,18.9,37.8,
-NSD.RES3-Cost,0,1 EA,loss_ratio,0.9,4.3,21.3,42.5,
-NSD.RES4-Cost,0,1 EA,loss_ratio,0.9,4.3,21.6,43.2,
-NSD.RES5-Cost,0,1 EA,loss_ratio,0.8,4.0,20.0,40.0,
-NSD.RES6-Cost,0,1 EA,loss_ratio,0.8,4.1,20.4,40.8,
-NSD.COM1-Cost,0,1 EA,loss_ratio,0.6,2.7,13.8,27.5,
-NSD.COM2-Cost,0,1 EA,loss_ratio,0.6,2.6,13.2,26.5,
-NSD.COM3-Cost,0,1 EA,loss_ratio,0.7,3.4,16.9,33.8,
-NSD.COM4-Cost,0,1 EA,loss_ratio,0.7,3.3,16.4,32.9,
-NSD.COM5-Cost,0,1 EA,loss_ratio,0.7,3.4,17.2,34.5,
-NSD.COM6-Cost,0,1 EA,loss_ratio,0.8,3.5,17.4,34.7,
-NSD.COM7-Cost,0,1 EA,loss_ratio,0.7,3.4,17.2,34.4,
-NSD.COM8-Cost,0,1 EA,loss_ratio,0.7,3.6,17.8,35.6,
-NSD.COM9-Cost,0,1 EA,loss_ratio,0.7,3.5,17.6,35.1,
-NSD.COM10-Cost,0,1 EA,loss_ratio,0.4,1.7,8.7,17.4,
-NSD.IND1-Cost,0,1 EA,loss_ratio,0.2,1.2,5.9,11.8,
-NSD.IND2-Cost,0,1 EA,loss_ratio,0.2,1.2,5.9,11.8,
-NSD.IND3-Cost,0,1 EA,loss_ratio,0.2,1.2,5.9,11.8,
-NSD.IND4-Cost,0,1 EA,loss_ratio,0.2,1.2,5.9,11.8,
-NSD.IND5-Cost,0,1 EA,loss_ratio,0.2,1.2,5.9,11.8,
-NSD.IND6-Cost,0,1 EA,loss_ratio,0.2,1.2,5.9,11.8,
-NSD.AGR1-Cost,0,1 EA,loss_ratio,0.0,0.8,3.8,7.7,
-NSD.REL1-Cost,0,1 EA,loss_ratio,0.8,3.3,16.3,32.6,
-NSD.GOV1-Cost,0,1 EA,loss_ratio,0.7,3.3,16.4,32.8,
-NSD.GOV2-Cost,0,1 EA,loss_ratio,0.7,3.4,17.1,34.2,
-NSD.EDU1-Cost,0,1 EA,loss_ratio,0.9,4.9,24.3,48.7,
-NSD.EDU2-Cost,0,1 EA,loss_ratio,1.2,6.0,30.0,60.0,
-NSA.RES1-Cost,0,1 EA,loss_ratio,0.5,2.7,8.0,26.6,
-NSA.RES2-Cost,0,1 EA,loss_ratio,0.8,3.8,11.3,37.8,
-NSA.RES3-Cost,0,1 EA,loss_ratio,0.8,4.3,13.1,43.7,
-NSA.RES4-Cost,0,1 EA,loss_ratio,0.9,4.3,13.0,43.2,
-NSA.RES5-Cost,0,1 EA,loss_ratio,0.8,4.1,12.4,41.2,
-NSA.RES6-Cost,0,1 EA,loss_ratio,0.8,4.1,12.2,40.8,
-NSA.COM1-Cost,0,1 EA,loss_ratio,0.8,4.4,12.9,43.1,
-NSA.COM2-Cost,0,1 EA,loss_ratio,0.8,4.2,12.4,41.1,
-NSA.COM3-Cost,0,1 EA,loss_ratio,1.0,5.0,15.0,50.0,
-NSA.COM4-Cost,0,1 EA,loss_ratio,0.9,4.8,14.4,47.9,
-NSA.COM5-Cost,0,1 EA,loss_ratio,1.0,5.2,15.5,51.7,
-NSA.COM6-Cost,0,1 EA,loss_ratio,1.0,5.1,15.4,51.3,
-NSA.COM7-Cost,0,1 EA,loss_ratio,1.0,5.2,15.3,51.2,
-NSA.COM8-Cost,0,1 EA,loss_ratio,1.1,5.4,16.3,54.4,
-NSA.COM9-Cost,0,1 EA,loss_ratio,1.0,5.3,15.8,52.7,
-NSA.COM10-Cost,0,1 EA,loss_ratio,0.3,2.2,6.5,21.7,
-NSA.IND1-Cost,0,1 EA,loss_ratio,1.4,7.2,21.8,72.5,
-NSA.IND2-Cost,0,1 EA,loss_ratio,1.4,7.2,21.8,72.5,
-NSA.IND3-Cost,0,1 EA,loss_ratio,1.4,7.2,21.8,72.5,
-NSA.IND4-Cost,0,1 EA,loss_ratio,1.4,7.2,21.8,72.5,
-NSA.IND5-Cost,0,1 EA,loss_ratio,1.4,7.2,21.8,72.5,
-NSA.IND6-Cost,0,1 EA,loss_ratio,1.4,7.2,21.8,72.5,
-NSA.AGR1-Cost,0,1 EA,loss_ratio,0.8,4.6,13.8,46.1,
-NSA.REL1-Cost,0,1 EA,loss_ratio,0.9,4.7,14.3,47.6,
-NSA.GOV1-Cost,0,1 EA,loss_ratio,1.0,4.9,14.8,49.3,
-NSA.GOV2-Cost,0,1 EA,loss_ratio,1.0,5.1,15.1,50.5,
-NSA.EDU1-Cost,0,1 EA,loss_ratio,0.7,3.2,9.7,32.4,
-NSA.EDU2-Cost,0,1 EA,loss_ratio,0.6,2.9,8.7,29.0,
+STR.RES1-Cost,0,1 EA,loss_ratio,0.005,0.023,0.117,0.234,0.234
+STR.RES1-Time,0,1 EA,day,2,30,90,180,180
+STR.RES2-Cost,0,1 EA,loss_ratio,0.004,0.024,0.073,0.244,0.244
+STR.RES2-Time,0,1 EA,day,2,10,30,60,60
+STR.RES3-Cost,0,1 EA,loss_ratio,0.003,0.014,0.069,0.138,0.138
+STR.RES3-Time,0,1 EA,day,5,30,120,240,240
+STR.RES4-Cost,0,1 EA,loss_ratio,0.002,0.014,0.068,0.136,0.136
+STR.RES4-Time,0,1 EA,day,5,30,120,240,240
+STR.RES5-Cost,0,1 EA,loss_ratio,0.004,0.019,0.094,0.188,0.188
+STR.RES5-Time,0,1 EA,day,5,30,120,240,240
+STR.RES6-Cost,0,1 EA,loss_ratio,0.004,0.018,0.092,0.184,0.184
+STR.RES6-Time,0,1 EA,day,5,30,120,240,240
+STR.COM1-Cost,0,1 EA,loss_ratio,0.006,0.029,0.147,0.294,0.294
+STR.COM1-Time,0,1 EA,day,5,30,90,180,180
+STR.COM2-Cost,0,1 EA,loss_ratio,0.006,0.032,0.162,0.324,0.324
+STR.COM2-Time,0,1 EA,day,5,30,90,180,180
+STR.COM3-Cost,0,1 EA,loss_ratio,0.003,0.016,0.081,0.162,0.162
+STR.COM3-Time,0,1 EA,day,5,30,90,180,180
+STR.COM4-Cost,0,1 EA,loss_ratio,0.004,0.019,0.096,0.192,0.192
+STR.COM4-Time,0,1 EA,day,5,30,120,240,240
+STR.COM5-Cost,0,1 EA,loss_ratio,0.003,0.014,0.069,0.138,0.138
+STR.COM5-Time,0,1 EA,day,5,30,90,180,180
+STR.COM6-Cost,0,1 EA,loss_ratio,0.002,0.014,0.070,0.140,0.140
+STR.COM6-Time,0,1 EA,day,10,45,180,360,360
+STR.COM7-Cost,0,1 EA,loss_ratio,0.003,0.014,0.072,0.144,0.144
+STR.COM7-Time,0,1 EA,day,10,45,180,240,240
+STR.COM8-Cost,0,1 EA,loss_ratio,0.002,0.010,0.050,0.100,0.100
+STR.COM8-Time,0,1 EA,day,5,30,90,180,180
+STR.COM9-Cost,0,1 EA,loss_ratio,0.003,0.012,0.061,0.122,0.122
+STR.COM9-Time,0,1 EA,day,5,30,120,240,240
+STR.COM10-Cost,0,1 EA,loss_ratio,0.013,0.061,0.304,0.609,0.609
+STR.COM10-Time,0,1 EA,day,2,20,80,160,160
+STR.IND1-Cost,0,1 EA,loss_ratio,0.004,0.016,0.078,0.157,0.157
+STR.IND1-Time,0,1 EA,day,10,30,120,240,240
+STR.IND2-Cost,0,1 EA,loss_ratio,0.004,0.016,0.078,0.157,0.157
+STR.IND2-Time,0,1 EA,day,10,30,120,240,240
+STR.IND3-Cost,0,1 EA,loss_ratio,0.004,0.016,0.078,0.157,0.157
+STR.IND3-Time,0,1 EA,day,10,30,120,240,240
+STR.IND4-Cost,0,1 EA,loss_ratio,0.004,0.016,0.078,0.157,0.157
+STR.IND4-Time,0,1 EA,day,10,30,120,240,240
+STR.IND5-Cost,0,1 EA,loss_ratio,0.004,0.016,0.078,0.157,0.157
+STR.IND5-Time,0,1 EA,day,20,45,180,360,360
+STR.IND6-Cost,0,1 EA,loss_ratio,0.004,0.016,0.078,0.157,0.157
+STR.IND6-Time,0,1 EA,day,5,20,80,160,160
+STR.AGR1-Cost,0,1 EA,loss_ratio,0.008,0.046,0.231,0.462,0.462
+STR.AGR1-Time,0,1 EA,day,2,10,30,60,60
+STR.REL1-Cost,0,1 EA,loss_ratio,0.003,0.020,0.099,0.198,0.198
+STR.REL1-Time,0,1 EA,day,10,30,120,240,240
+STR.GOV1-Cost,0,1 EA,loss_ratio,0.003,0.018,0.090,0.179,0.179
+STR.GOV1-Time,0,1 EA,day,10,30,120,240,240
+STR.GOV2-Cost,0,1 EA,loss_ratio,0.003,0.015,0.077,0.153,0.153
+STR.GOV2-Time,0,1 EA,day,5,20,90,180,180
+STR.EDU1-Cost,0,1 EA,loss_ratio,0.004,0.019,0.095,0.189,0.189
+STR.EDU1-Time,0,1 EA,day,10,30,120,240,240
+STR.EDU2-Cost,0,1 EA,loss_ratio,0.002,0.011,0.055,0.110,0.110
+STR.EDU2-Time,0,1 EA,day,10,45,180,360,360
+NSD.RES1-Cost,0,1 EA,loss_ratio,0.010,0.050,0.250,0.500,
+NSD.RES2-Cost,0,1 EA,loss_ratio,0.008,0.038,0.189,0.378,
+NSD.RES3-Cost,0,1 EA,loss_ratio,0.009,0.043,0.213,0.425,
+NSD.RES4-Cost,0,1 EA,loss_ratio,0.009,0.043,0.216,0.432,
+NSD.RES5-Cost,0,1 EA,loss_ratio,0.008,0.040,0.200,0.400,
+NSD.RES6-Cost,0,1 EA,loss_ratio,0.008,0.041,0.204,0.408,
+NSD.COM1-Cost,0,1 EA,loss_ratio,0.006,0.027,0.138,0.275,
+NSD.COM2-Cost,0,1 EA,loss_ratio,0.006,0.026,0.132,0.265,
+NSD.COM3-Cost,0,1 EA,loss_ratio,0.007,0.034,0.169,0.338,
+NSD.COM4-Cost,0,1 EA,loss_ratio,0.007,0.033,0.164,0.329,
+NSD.COM5-Cost,0,1 EA,loss_ratio,0.007,0.034,0.172,0.345,
+NSD.COM6-Cost,0,1 EA,loss_ratio,0.008,0.035,0.174,0.347,
+NSD.COM7-Cost,0,1 EA,loss_ratio,0.007,0.034,0.172,0.344,
+NSD.COM8-Cost,0,1 EA,loss_ratio,0.007,0.036,0.178,0.356,
+NSD.COM9-Cost,0,1 EA,loss_ratio,0.007,0.035,0.176,0.351,
+NSD.COM10-Cost,0,1 EA,loss_ratio,0.004,0.017,0.087,0.174,
+NSD.IND1-Cost,0,1 EA,loss_ratio,0.002,0.012,0.059,0.118,
+NSD.IND2-Cost,0,1 EA,loss_ratio,0.002,0.012,0.059,0.118,
+NSD.IND3-Cost,0,1 EA,loss_ratio,0.002,0.012,0.059,0.118,
+NSD.IND4-Cost,0,1 EA,loss_ratio,0.002,0.012,0.059,0.118,
+NSD.IND5-Cost,0,1 EA,loss_ratio,0.002,0.012,0.059,0.118,
+NSD.IND6-Cost,0,1 EA,loss_ratio,0.002,0.012,0.059,0.118,
+NSD.AGR1-Cost,0,1 EA,loss_ratio,0.000,0.008,0.038,0.077,
+NSD.REL1-Cost,0,1 EA,loss_ratio,0.008,0.033,0.163,0.326,
+NSD.GOV1-Cost,0,1 EA,loss_ratio,0.007,0.033,0.164,0.328,
+NSD.GOV2-Cost,0,1 EA,loss_ratio,0.007,0.034,0.171,0.342,
+NSD.EDU1-Cost,0,1 EA,loss_ratio,0.009,0.049,0.243,0.487,
+NSD.EDU2-Cost,0,1 EA,loss_ratio,0.012,0.060,0.300,0.600,
+NSA.RES1-Cost,0,1 EA,loss_ratio,0.005,0.027,0.080,0.266,
+NSA.RES2-Cost,0,1 EA,loss_ratio,0.008,0.038,0.113,0.378,
+NSA.RES3-Cost,0,1 EA,loss_ratio,0.008,0.043,0.131,0.437,
+NSA.RES4-Cost,0,1 EA,loss_ratio,0.009,0.043,0.130,0.432,
+NSA.RES5-Cost,0,1 EA,loss_ratio,0.008,0.041,0.124,0.412,
+NSA.RES6-Cost,0,1 EA,loss_ratio,0.008,0.041,0.122,0.408,
+NSA.COM1-Cost,0,1 EA,loss_ratio,0.008,0.044,0.129,0.431,
+NSA.COM2-Cost,0,1 EA,loss_ratio,0.008,0.042,0.124,0.411,
+NSA.COM3-Cost,0,1 EA,loss_ratio,0.010,0.050,0.150,0.500,
+NSA.COM4-Cost,0,1 EA,loss_ratio,0.009,0.048,0.144,0.479,
+NSA.COM5-Cost,0,1 EA,loss_ratio,0.010,0.052,0.155,0.517,
+NSA.COM6-Cost,0,1 EA,loss_ratio,0.010,0.051,0.154,0.513,
+NSA.COM7-Cost,0,1 EA,loss_ratio,0.010,0.052,0.153,0.512,
+NSA.COM8-Cost,0,1 EA,loss_ratio,0.011,0.054,0.163,0.544,
+NSA.COM9-Cost,0,1 EA,loss_ratio,0.010,0.053,0.158,0.527,
+NSA.COM10-Cost,0,1 EA,loss_ratio,0.003,0.022,0.065,0.217,
+NSA.IND1-Cost,0,1 EA,loss_ratio,0.014,0.072,0.218,0.725,
+NSA.IND2-Cost,0,1 EA,loss_ratio,0.014,0.072,0.218,0.725,
+NSA.IND3-Cost,0,1 EA,loss_ratio,0.014,0.072,0.218,0.725,
+NSA.IND4-Cost,0,1 EA,loss_ratio,0.014,0.072,0.218,0.725,
+NSA.IND5-Cost,0,1 EA,loss_ratio,0.014,0.072,0.218,0.725,
+NSA.IND6-Cost,0,1 EA,loss_ratio,0.014,0.072,0.218,0.725,
+NSA.AGR1-Cost,0,1 EA,loss_ratio,0.008,0.046,0.138,0.461,
+NSA.REL1-Cost,0,1 EA,loss_ratio,0.009,0.047,0.143,0.476,
+NSA.GOV1-Cost,0,1 EA,loss_ratio,0.010,0.049,0.148,0.493,
+NSA.GOV2-Cost,0,1 EA,loss_ratio,0.010,0.051,0.151,0.505,
+NSA.EDU1-Cost,0,1 EA,loss_ratio,0.007,0.032,0.097,0.324,
+NSA.EDU2-Cost,0,1 EA,loss_ratio,0.006,0.029,0.087,0.290,
diff --git a/pelicun/resources/SimCenterDBDL/loss_repair_DB_SimCenter_Hazus_FL_bldg.csv b/pelicun/resources/SimCenterDBDL/loss_repair_DB_SimCenter_Hazus_FL_bldg.csv
new file mode 100644
index 000000000..0dbbfde6b
--- /dev/null
+++ b/pelicun/resources/SimCenterDBDL/loss_repair_DB_SimCenter_Hazus_FL_bldg.csv
@@ -0,0 +1,1827 @@
+index,ID,Incomplete,Demand-Type,Demand-Unit,Demand-Offset,Demand-Directional,DV-Unit,LossFunction-Theta_0
+0,RES1.FIA.contents.one_floor.no_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 12.0, 25.0, 35.0, 36.0, 38.0, 41.0, 45.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+1,RES1.FIA.contents.one_floor.with_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 16.0, 20.0, 22.0, 28.0, 33.0, 39.0, 44.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+2,RES1.FIA.contents.two_floors.no_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 11.0, 19.0, 23.0, 28.0, 33.0, 39.0, 44.0, 50.0, 54.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+3,RES1.FIA-Modified.contents.two_floors.with_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 16.0, 18.0, 25.0, 29.0, 33.0, 37.0, 42.0, 46.0, 52.0, 55.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+4,RES1.FIA-Modified.contents.three_or_more_floors.no_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 7.0, 15.0, 21.0, 22.0, 23.0, 25.0, 27.0, 30.0, 35.0, 40.0, 43.0, 45.0, 48.0, 50.0, 52.0, 54.0, 56.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+5,RES1.FIA-Modified.contents.three_or_more_floors.with_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 15.0, 22.0, 27.0, 28.0, 29.0, 30.0, 32.0, 35.0, 39.0, 43.0, 46.0, 47.0, 50.0, 52.0, 53.0, 55.0, 57.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+6,RES1.FIA.contents.split_level.no_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 11.0, 19.0, 23.0, 28.0, 33.0, 39.0, 44.0, 50.0, 54.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+7,RES1.FIA-Modified.contents.split_level.with_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 16.0, 18.0, 25.0, 29.0, 33.0, 37.0, 42.0, 46.0, 52.0, 55.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+8,RES1.FIA.contents.one_floor.no_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 17.0, 23.0, 29.0, 35.0, 40.0, 45.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+9,RES1.FIA.contents.one_floor.with_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 15.0, 20.0, 22.0, 28.0, 33.0, 39.0, 44.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+10,RES1.FIA.contents.two_floors.no_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 7.0, 9.0, 17.0, 22.0, 28.0, 33.0, 39.0, 44.0, 50.0, 54.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+11,RES1.FIA-Modified.contents.two_floors.with_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 15.0, 17.0, 23.0, 28.0, 33.0, 37.0, 42.0, 46.0, 52.0, 55.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+12,RES1.FIA-Modified.contents.three_or_more_floors.no_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 6.0, 11.0, 15.0, 19.0, 23.0, 26.0, 29.0, 32.0, 35.0, 41.0, 43.0, 45.0, 48.0, 50.0, 52.0, 54.0, 56.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+13,RES1.FIA-Modified.contents.three_or_more_floors.with_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 14.0, 18.0, 22.0, 25.0, 29.0, 31.0, 34.0, 36.0, 39.0, 44.0, 46.0, 47.0, 50.0, 52.0, 53.0, 55.0, 57.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+14,RES1.FIA.contents.split_level.no_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 7.0, 9.0, 17.0, 22.0, 28.0, 33.0, 39.0, 44.0, 50.0, 54.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+15,RES1.FIA-Modified.contents.split_level.with_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 15.0, 17.0, 23.0, 28.0, 33.0, 37.0, 42.0, 46.0, 52.0, 55.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+16,RES1.USACE_IWR.contents.one_story.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 4.0, 16.0, 26.0, 36.0, 44.0, 52.0, 58.0, 64.0, 68.0, 72.0, 74.0, 76.0, 78.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+17,RES1.USACE_IWR.contents.two_or_more_stories.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 2.0, 10.0, 18.0, 24.0, 32.0, 38.0, 42.0, 48.0, 52.0, 56.0, 60.0, 64.0, 66.0, 70.0, 72.0, 72.0, 74.0, 74.0, 76.0, 76.0, 78.0, 78.0, 80.0, 80.0, 82.0, 82.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+18,RES1.USACE_IWR.contents.split_level.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 4.0, 6.0, 10.0, 16.0, 22.0, 30.0, 40.0, 50.0, 62.0, 72.0, 82.0, 92.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+19,RES1.USACE_Chicago.contents.one_story.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 12.0, 24.0, 33.0, 35.0, 37.0, 41.0, 45.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+20,RES1.USACE_Chicago.contents.one_story.with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 6.0, 7.0, 8.0, 15.0, 19.0, 22.0, 28.0, 33.0, 39.0, 43.0, 49.0, 54.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+21,RES1.USACE_Chicago.contents.split_level.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 19.0, 32.0, 41.0, 47.0, 51.0, 53.0, 55.0, 56.0, 62.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+22,RES1.USACE_Chicago.contents.split_level.with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 10.0, 15.0, 18.0, 31.0, 44.0, 52.0, 58.0, 61.0, 63.0, 64.0, 66.0, 69.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+23,RES1.USACE_Chicago.contents.two_story.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 11.0, 18.0, 23.0, 28.0, 33.0, 39.0, 44.0, 50.0, 54.0, 58.0, 62.0, 66.0, 70.0, 74.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+24,RES1.USACE_Chicago.contents.two_story.with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 6.0, 9.0, 11.0, 17.0, 22.0, 28.0, 33.0, 39.0, 44.0, 49.0, 55.0, 61.0, 64.0, 71.0, 76.0, 78.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+25,RES1.USACE_Galveston.contents.one_&_1/2_story.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 22.0, 36.0, 45.0, 57.0, 66.0, 71.0, 77.0, 79.0, 82.0, 84.0, 86.0, 87.0, 89.0, 90.0, 91.0, 92.0, 92.0, 92.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+26,RES1.USACE_Galveston.contents.one_story.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 42.0, 60.0, 71.0, 77.0, 82.0, 85.0, 86.0, 87.0, 88.0, 88.0, 88.0, 89.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+27,RES1.USACE_Galveston.contents.two_story.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 4.0, 24.0, 34.0, 40.0, 47.0, 53.0, 56.0, 58.0, 58.0, 58.0, 61.0, 66.0, 68.0, 76.0, 81.0, 86.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+28,RES1.USACE_New-Orleans.contents.one_story.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 42.0, 63.0, 82.0, 85.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+29,RES1.USACE_New-Orleans.contents.one_story.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+30,RES1.USACE_New-Orleans.contents.two_story.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 37.0, 51.0, 51.0, 55.0, 55.0, 55.0, 55.0, 55.0, 73.0, 81.0, 87.0, 90.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+31,RES1.USACE_New-Orleans.contents.two_story.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+32,RES1.USACE_St-Paul.contents.one_story-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 27.0, 44.0, 54.0, 63.0, 68.0, 73.0, 75.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+33,RES1.USACE_St-Paul.contents.two_story-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 27.0, 44.0, 54.0, 63.0, 68.0, 73.0, 75.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+34,RES1.USACE_Wilmington.contents.one_&_1/2_story.pile_foundation-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"6.0, 7.0, 8.0, 9.0, 9.0, 21.0, 31.0, 43.0, 54.0, 67.0, 70.0, 73.0, 76.0, 78.0, 81.0, 82.0, 84.0, 85.0, 86.0, 87.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+35,RES1.USACE_Wilmington.contents.one_&_1/2_story-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 9.0, 21.0, 31.0, 37.0, 43.0, 51.0, 57.0, 63.0, 68.0, 74.0, 80.0, 81.0, 83.0, 84.0, 86.0, 87.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+36,RES1.USACE_Wilmington.contents.one_&_1/2_story_with_1/2_living_area_below-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 6.0, 10.0, 15.0, 18.0, 21.0, 23.0, 24.0, 28.0, 34.0, 44.0, 54.0, 63.0, 73.0, 76.0, 79.0, 82.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+37,RES1.USACE_Wilmington.contents.one_story.pile_foundation-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"9.0, 10.0, 13.0, 13.0, 14.0, 26.0, 38.0, 52.0, 64.0, 78.0, 81.0, 85.0, 88.0, 91.0, 95.0, 96.0, 97.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+38,RES1.USACE_Wilmington.contents.one_story-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 11.0, 24.0, 36.0, 43.0, 46.0, 52.0, 59.0, 66.0, 73.0, 80.0, 87.0, 88.0, 90.0, 92.0, 94.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+39,RES1.USACE_Wilmington.contents.one_story_with_1/2_living_area_below-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 11.0, 16.0, 20.0, 24.0, 28.0, 30.0, 32.0, 38.0, 46.0, 54.0, 63.0, 72.0, 82.0, 85.0, 89.0, 92.0, 95.0, 96.0, 97.0, 98.0, 98.0, 99.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+40,RES1.USACE_Wilmington.contents.one_story_with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"10.0, 12.0, 13.0, 13.0, 20.0, 34.0, 48.0, 52.0, 64.0, 78.0, 81.0, 85.0, 88.0, 91.0, 95.0, 96.0, 98.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+41,RES1.USACE_Wilmington.contents.split_level-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.0, 15.0, 21.0, 25.0, 32.0, 44.0, 50.0, 55.0, 61.0, 66.0, 72.0, 75.0, 79.0, 83.0, 87.0, 91.0, 94.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+42,RES1.USACE_Wilmington.contents.two_story.pile_foundation-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"6.0, 7.0, 8.0, 9.0, 10.0, 19.0, 29.0, 38.0, 45.0, 53.0, 55.0, 57.0, 59.0, 61.0, 63.0, 67.0, 71.0, 75.0, 78.0, 82.0, 86.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+43,RES1.USACE_Wilmington.contents.two_story-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 7.0, 18.0, 26.0, 30.0, 40.0, 50.0, 52.0, 53.0, 55.0, 56.0, 58.0, 63.0, 68.0, 72.0, 77.0, 82.0, 86.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+44,RES1.USACE_Wilmington.contents.two_story_with_1/2_living_area_below-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 1.0, 7.0, 10.0, 13.0, 15.0, 18.0, 19.0, 21.0, 25.0, 33.0, 40.0, 47.0, 54.0, 56.0, 59.0, 62.0, 65.0, 67.0, 71.0, 75.0, 79.0, 83.0, 87.0, 91.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+45,RES2.FIA.contents.mobile_home.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.0, 27.0, 49.0, 64.0, 70.0, 76.0, 78.0, 79.0, 81.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+46,RES2.FIA.contents.mobile_home.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.0, 27.0, 50.0, 65.0, 71.0, 76.0, 78.0, 79.0, 81.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+47,RES2.USACE_Chicago.contents.mobile_home-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.0, 27.0, 49.0, 64.0, 70.0, 76.0, 78.0, 79.0, 81.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+48,RES2.USACE_Galveston.contents.mobile_home-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.0, 23.0, 36.0, 43.0, 55.0, 66.0, 78.0, 86.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+49,RES2.USACE_New-Orleans.contents.mobile_home.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 39.0, 54.0, 75.0, 77.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+50,RES2.USACE_New-Orleans.contents.mobile_home.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+51,RES2.USACE_Wilmington.contents.mobile_home-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 6.0, 22.0, 32.0, 37.0, 50.0, 63.0, 74.0, 82.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+52,RES3.USACE_Chicago.contents.apartment_unit_grade-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 12.0, 24.0, 33.0, 35.0, 37.0, 41.0, 45.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+53,RES3.USACE_Chicago.contents.apartment_unit_sub_grade-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 6.0, 7.0, 8.0, 15.0, 19.0, 22.0, 28.0, 33.0, 39.0, 43.0, 49.0, 54.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+54,RES3.USACE_Galveston.contents.apartment.living_area_on_one_floor-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 6.0, 34.0, 44.0, 55.0, 67.0, 77.0, 87.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+55,RES3.USACE_Galveston.contents.condominium.living_area_on_multiple_floors-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 4.0, 24.0, 34.0, 40.0, 47.0, 53.0, 56.0, 58.0, 58.0, 58.0, 61.0, 66.0, 68.0, 76.0, 81.0, 86.0, 91.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+56,RES4.USACE_Galveston.contents.average_hotel/motel.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 11.0, 19.0, 25.0, 29.0, 34.0, 39.0, 44.0, 49.0, 56.0, 65.0, 74.0, 82.0, 88.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+57,RES4.USACE_Galveston.contents.hotel.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 11.0, 22.0, 28.0, 33.0, 37.0, 41.0, 44.0, 46.0, 49.0, 54.0, 60.0, 69.0, 81.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+58,RES4.USACE_Galveston.contents.motel_unit.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 16.0, 21.0, 25.0, 30.0, 36.0, 43.0, 52.0, 63.0, 76.0, 88.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+59,RES5.USACE_Galveston.contents.average_institutional_dormitory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 38.0, 60.0, 73.0, 81.0, 88.0, 94.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+60,RES6.USACE_Galveston.contents.nursing_home.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 38.0, 60.0, 73.0, 81.0, 88.0, 94.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+61,COM1.USACE_Galveston.contents.average_retail_trade_equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 26.0, 42.0, 56.0, 68.0, 78.0, 83.0, 85.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 92.0, 92.0, 93.0, 93.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+62,COM1.USACE_Galveston.contents.antique.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 20.0, 75.0, 78.0, 85.0, 90.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+63,COM1.USACE_Galveston.contents.appliance_sales.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 16.0, 28.0, 58.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+64,COM1.USACE_Galveston.contents.appliance_sales.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 83.0, 91.0, 94.0, 95.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+65,COM1.USACE_Galveston.contents.large_auto_dealer.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 20.0, 50.0, 90.0, 95.0, 98.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+66,COM1.USACE_Galveston.contents.bait_stand.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 7.0, 11.0, 16.0, 22.0, 29.0, 36.0, 44.0, 52.0, 60.0, 69.0, 79.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+67,COM1.USACE_Galveston.contents.bakery.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 14.0, 35.0, 70.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+68,COM1.USACE_Galveston.contents.bakery.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 55.0, 65.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+69,COM1.USACE_Galveston.contents.boat_sales/service.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 41.0, 65.0, 83.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+70,COM1.USACE_Galveston.contents.boat_sales/service.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 13.0, 24.0, 43.0, 82.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+71,COM1.USACE_Galveston.contents.book_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 7.0, 14.0, 21.0, 27.0, 35.0, 42.0, 49.0, 57.0, 65.0, 73.0, 82.0, 92.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+72,COM1.USACE_Galveston.contents.camera_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 18.0, 33.0, 65.0, 88.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+73,COM1.USACE_Galveston.contents.carpet_and_paint_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+74,COM1.USACE_Galveston.contents.carpet_and_paint_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 60.0, 80.0, 95.0, 97.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+75,COM1.USACE_Galveston.contents.mens_clothing.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 32.0, 41.0, 60.0, 78.0, 94.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+76,COM1.USACE_Galveston.contents.mens_clothing.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 6.0, 37.0, 50.0, 75.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+77,COM1.USACE_Galveston.contents.crafts.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+78,COM1.USACE_Galveston.contents.crafts.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 30.0, 50.0, 70.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+79,COM1.USACE_Galveston.contents.chain_drug_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+80,COM1.USACE_Galveston.contents.fabric_store.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 30.0, 40.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+81,COM1.USACE_Galveston.contents.fabric_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 30.0, 50.0, 75.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+82,COM1.USACE_Galveston.contents.feed_store.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+83,COM1.USACE_Galveston.contents.feed_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 15.0, 15.0, 15.0, 25.0, 25.0, 25.0, 25.0, 50.0, 50.0, 50.0, 50.0, 75.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+84,COM1.USACE_Galveston.contents.florist.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 49.0, 60.0, 75.0, 78.0, 79.0, 82.0, 85.0, 89.0, 93.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+85,COM1.USACE_Galveston.contents.florist.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 50.0, 85.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+86,COM1.USACE_Galveston.contents.fruit_stand.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 45.0, 80.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+87,COM1.USACE_Galveston.contents.large_furniture_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 20.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+88,COM1.USACE_Galveston.contents.gas/butane_supply.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 46.0, 65.0, 75.0, 81.0, 86.0, 90.0, 94.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+89,COM1.USACE_Galveston.contents.gift_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 54.0, 63.0, 75.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+90,COM1.USACE_Galveston.contents.greenhouse.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 34.0, 50.0, 66.0, 80.0, 92.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+91,COM1.USACE_Galveston.contents.greenhouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 66.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+92,COM1.USACE_Galveston.contents.small_grocery.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 20.0, 20.0, 20.0, 20.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+93,COM1.USACE_Galveston.contents.small_grocery.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 35.0, 50.0, 65.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+94,COM1.USACE_Galveston.contents.medium_grocery.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 4.0, 80.0, 90.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+95,COM1.USACE_Galveston.contents.medium_grocery.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 4.0, 22.0, 44.0, 74.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+96,COM1.USACE_Galveston.contents.gun_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 30.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+97,COM1.USACE_Galveston.contents.gun_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 22.0, 39.0, 58.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+98,COM1.USACE_Galveston.contents.hardware.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 12.0, 20.0, 29.0, 40.0, 50.0, 59.0, 67.0, 75.0, 84.0, 92.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+99,COM1.USACE_Galveston.contents.hardware.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 33.0, 52.0, 70.0, 75.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+100,COM1.USACE_Galveston.contents.hobby_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 56.0, 87.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+101,COM1.USACE_Galveston.contents.hobby_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 49.0, 64.0, 77.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+102,COM1.USACE_Galveston.contents.lawnmower.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 24.0, 46.0, 57.0, 65.0, 72.0, 79.0, 86.0, 91.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+103,COM1.USACE_Galveston.contents.lawnmower.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+104,COM1.USACE_Galveston.contents.liquor_store.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 4.0, 9.0, 16.0, 84.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+105,COM1.USACE_Galveston.contents.liquor_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 60.0, 81.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+106,COM1.USACE_Galveston.contents.meat_market.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 78.0, 81.0, 84.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+107,COM1.USACE_Galveston.contents.meat_market.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+108,COM1.USACE_Galveston.contents.motorcycle_dealer.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+109,COM1.USACE_Galveston.contents.motorcycle_dealer.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 45.0, 65.0, 85.0, 85.0, 85.0, 85.0, 85.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+110,COM1.USACE_Galveston.contents.music_center.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 40.0, 52.0, 72.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+111,COM1.USACE_Galveston.contents.music_center.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 66.0, 72.0, 75.0, 80.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+112,COM1.USACE_Galveston.contents.plant_nursery.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 4.0, 8.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+113,COM1.USACE_Galveston.contents.plant_nursery.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 20.0, 79.0, 88.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+114,COM1.USACE_Galveston.contents.paint_store.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+115,COM1.USACE_Galveston.contents.paint_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 60.0, 70.0, 73.0, 76.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+116,COM1.USACE_Galveston.contents.pawn_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 40.0, 60.0, 70.0, 70.0, 70.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+117,COM1.USACE_Galveston.contents.pawn_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+118,COM1.USACE_Galveston.contents.remnant_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+119,COM1.USACE_Galveston.contents.remnant_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 60.0, 80.0, 90.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+120,COM1.USACE_Galveston.contents.service_station.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 39.0, 59.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+121,COM1.USACE_Galveston.contents.service_station.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 42.0, 62.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+122,COM1.USACE_Galveston.contents.shoe_repair.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 28.0, 38.0, 41.0, 44.0, 47.0, 47.0, 47.0, 47.0, 47.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+123,COM1.USACE_Galveston.contents.shoe_repair.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 23.0, 35.0, 48.0, 60.0, 74.0, 87.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+124,COM1.USACE_Galveston.contents.toy_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 40.0, 70.0, 85.0, 90.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+125,COM1.USACE_Galveston.contents.tractor_sales.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 4.0, 18.0, 28.0, 36.0, 44.0, 48.0, 52.0, 56.0, 60.0, 65.0, 70.0, 74.0, 79.0, 84.0, 88.0, 93.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+126,COM1.USACE_Galveston.contents.tractor_sales.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 17.0, 29.0, 44.0, 59.0, 70.0, 77.0, 81.0, 84.0, 88.0, 92.0, 95.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+127,COM1.USACE_Galveston.contents.trailer_sales.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 18.0, 37.0, 60.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+128,COM1.USACE_Galveston.contents.trophy_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 35.0, 38.0, 56.0, 60.0, 60.0, 60.0, 60.0, 60.0, 61.0, 62.0, 64.0, 66.0, 68.0, 71.0, 76.0, 84.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+129,COM1.USACE_Galveston.contents.trophy_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 5.0, 12.0, 31.0, 66.0, 82.0, 92.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+130,COM1.USACE_Galveston.contents.upholstery_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 10.0, 10.0, 10.0, 10.0, 30.0, 35.0, 40.0, 45.0, 50.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+131,COM1.USACE_Galveston.contents.upholstery_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 30.0, 40.0, 45.0, 50.0, 53.0, 56.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+132,COM1.USACE_Galveston.contents.used_appliances/cloth.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 50.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+133,COM1.USACE_Galveston.contents.used_appliances/cloth.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 35.0, 40.0, 50.0, 60.0, 80.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+134,COM1.USACE_Galveston.contents.used_furniture.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+135,COM1.USACE_Galveston.contents.used_furniture.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 30.0, 45.0, 65.0, 80.0, 80.0, 85.0, 85.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+136,COM1.USACE_Galveston.contents.vacuum_cleaner_sales.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 25.0, 30.0, 35.0, 40.0, 40.0, 40.0, 60.0, 60.0, 60.0, 60.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+137,COM1.USACE_Galveston.contents.vacuum_cleaner_sales.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 70.0, 80.0, 90.0, 95.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+138,COM1.USACE_Galveston.contents.video_games.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 10.0, 20.0, 30.0, 45.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 75.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+139,COM1.USACE_Galveston.contents.video_games.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+140,COM1.USACE_New-Orleans.contents.bakery.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+141,COM1.USACE_New-Orleans.contents.bakery.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 74.0, 81.0, 85.0, 88.0, 96.0, 96.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+142,COM1.USACE_New-Orleans.contents.candy_store.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+143,COM1.USACE_New-Orleans.contents.candy_store.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 74.0, 81.0, 85.0, 88.0, 96.0, 96.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+144,COM1.USACE_New-Orleans.contents.clothing_store.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+145,COM1.USACE_New-Orleans.contents.clothing_store.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 48.0, 54.0, 54.0, 55.0, 55.0, 55.0, 55.0, 55.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+146,COM1.USACE_New-Orleans.contents.convenience_store.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+147,COM1.USACE_New-Orleans.contents.convenience_store.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 74.0, 81.0, 85.0, 88.0, 96.0, 96.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+148,COM1.USACE_New-Orleans.contents.department_store.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+149,COM1.USACE_New-Orleans.contents.department_store.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 48.0, 54.0, 54.0, 55.0, 55.0, 55.0, 55.0, 55.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+150,COM1.USACE_New-Orleans.contents.furniture_store.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+151,COM1.USACE_New-Orleans.contents.furniture_store.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 48.0, 54.0, 54.0, 55.0, 55.0, 55.0, 55.0, 55.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+152,COM1.USACE_New-Orleans.contents.gas_stations.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+153,COM1.USACE_New-Orleans.contents.gas_stations.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 74.0, 81.0, 85.0, 88.0, 96.0, 96.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+154,COM1.USACE_New-Orleans.contents.large_grocery.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+155,COM1.USACE_New-Orleans.contents.large_grocery.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 74.0, 81.0, 85.0, 88.0, 96.0, 96.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+156,COM1.USACE_New-Orleans.contents.neighborhood_grocery.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+157,COM1.USACE_New-Orleans.contents.neighborhood_grocery.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 74.0, 81.0, 85.0, 88.0, 96.0, 96.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+158,COM1.USACE_New-Orleans.contents.home_repair_store.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 88.0, 88.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+159,COM1.USACE_New-Orleans.contents.home_repair_store.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 45.0, 60.0, 65.0, 86.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+160,COM1.USACE_New-Orleans.contents.liquor_store.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+161,COM1.USACE_New-Orleans.contents.liquor_store.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 74.0, 81.0, 85.0, 88.0, 96.0, 96.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+162,COM1.USACE_New-Orleans.contents.shoe_store.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+163,COM1.USACE_New-Orleans.contents.shoe_store.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 48.0, 54.0, 54.0, 55.0, 55.0, 55.0, 55.0, 55.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+164,COM1.USACE_New-Orleans.contents.wine_store.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+165,COM1.USACE_New-Orleans.contents.wine_store.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 74.0, 81.0, 85.0, 88.0, 96.0, 96.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+166,COM2.USACE_Galveston.contents.average_wholesale_trade-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.0, 16.0, 27.0, 36.0, 49.0, 57.0, 63.0, 69.0, 72.0, 76.0, 80.0, 82.0, 84.0, 86.0, 87.0, 87.0, 88.0, 89.0, 89.0, 89.0, 89.0, 89.0, 89.0, 89.0, 89.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+167,COM2.USACE_Galveston.contents.auto_parts.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 18.0, 30.0, 59.0, 70.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+168,COM2.USACE_Galveston.contents.auto_parts/mufflers.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 40.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+169,COM2.USACE_Galveston.contents.heavy_equipment_storage.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 14.0, 17.0, 20.0, 23.0, 25.0, 29.0, 35.0, 38.0, 42.0, 51.0, 63.0, 77.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+170,COM2.USACE_Galveston.contents.food_warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 40.0, 55.0, 70.0, 85.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+171,COM2.USACE_Galveston.contents.highway_material_storage.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 5.0, 10.0, 10.0, 25.0, 25.0, 50.0, 50.0, 50.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+172,COM2.USACE_Galveston.contents.jewelry.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 7.0, 15.0, 24.0, 33.0, 39.0, 45.0, 51.0, 56.0, 61.0, 65.0, 70.0, 74.0, 79.0, 83.0, 87.0, 92.0, 95.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+173,COM2.USACE_Galveston.contents.lumber_yard.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 30.0, 45.0, 60.0, 75.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+174,COM2.USACE_Galveston.contents.medical_supplies.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+175,COM2.USACE_Galveston.contents.medical_supplies.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 11.0, 20.0, 27.0, 35.0, 43.0, 50.0, 57.0, 65.0, 73.0, 80.0, 88.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+176,COM2.USACE_Galveston.contents.municipal_storage_warehouse.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 28.0, 40.0, 58.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+177,COM2.USACE_Galveston.contents.municipal_storage_warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 11.0, 16.0, 19.0, 21.0, 23.0, 28.0, 35.0, 47.0, 67.0, 85.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+178,COM2.USACE_Galveston.contents.paper_products_warehouse.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 22.0, 42.0, 58.0, 75.0, 87.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+179,COM2.USACE_Galveston.contents.paper_products_warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 27.0, 36.0, 44.0, 52.0, 59.0, 67.0, 73.0, 80.0, 90.0, 98.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+180,COM2.USACE_Galveston.contents.safety_equipment.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 12.0, 25.0, 37.0, 50.0, 62.0, 75.0, 87.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+181,COM2.USACE_Galveston.contents.safety_equipment.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 12.0, 25.0, 37.0, 50.0, 62.0, 75.0, 85.0, 93.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+182,COM2.USACE_Galveston.contents.sporting_goods.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+183,COM2.USACE_Galveston.contents.sporting_goods.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 30.0, 50.0, 53.0, 55.0, 57.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+184,COM2.USACE_Galveston.contents.sporting_goods_warehouse.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 50.0, 63.0, 75.0, 87.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+185,COM2.USACE_Galveston.contents.sporting_goods_warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 50.0, 63.0, 75.0, 87.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+186,COM2.USACE_Galveston.contents.storage_chemicals.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 15.0, 20.0, 25.0, 35.0, 45.0, 55.0, 65.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+187,COM2.USACE_Galveston.contents.storage_chemicals.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 20.0, 30.0, 40.0, 50.0, 60.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+188,COM2.USACE_Galveston.contents.storage_machine_parts.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 30.0, 40.0, 50.0, 50.0, 50.0, 75.0, 75.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+189,COM2.USACE_Galveston.contents.t.v._station.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 9.0, 9.0, 9.0, 9.0, 11.0, 13.0, 15.0, 18.0, 22.0, 26.0, 30.0, 35.0, 43.0, 54.0, 70.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+190,COM2.USACE_Galveston.contents.t.v._repair.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 7.0, 10.0, 11.0, 13.0, 16.0, 21.0, 27.0, 34.0, 42.0, 52.0, 63.0, 74.0, 86.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+191,COM2.USACE_Galveston.contents.t.v._repair.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+192,COM2.USACE_Galveston.contents.trailer_parts.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 50.0, 50.0, 50.0, 50.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+193,COM2.USACE_Galveston.contents.trailer_parts.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 16.0, 28.0, 32.0, 40.0, 43.0, 46.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+194,COM2.USACE_Galveston.contents.warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 11.0, 16.0, 19.0, 21.0, 23.0, 28.0, 35.0, 47.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+195,COM2.USACE_Galveston.contents.beer_warehouse.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 40.0, 55.0, 70.0, 85.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+196,COM2.USACE_Galveston.contents.beer_warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+197,COM2.USACE_Galveston.contents.bottled_gases_warehouse.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 35.0, 50.0, 50.0, 50.0, 60.0, 60.0, 60.0, 60.0, 60.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+198,COM2.USACE_Galveston.contents.bottled_gases_warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+199,COM2.USACE_Galveston.contents.cement_warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 60.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+200,COM2.USACE_Galveston.contents.detergents_warehouse.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 10.0, 15.0, 20.0, 25.0, 35.0, 35.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+201,COM2.USACE_Galveston.contents.detergents_warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+202,COM2.USACE_Galveston.contents.heavy_machinery_warehouse.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 11.0, 17.0, 20.0, 23.0, 25.0, 29.0, 35.0, 42.0, 51.0, 63.0, 77.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+203,COM2.USACE_Galveston.contents.heavy_machinery_warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 25.0, 25.0, 35.0, 40.0, 50.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 85.0, 85.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+204,COM2.USACE_Galveston.contents.petroleum_warehouse.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 35.0, 50.0, 50.0, 50.0, 50.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+205,COM2.USACE_Galveston.contents.petroleum_warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 40.0, 60.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+206,COM2.USACE_Galveston.contents.western_auto.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 8.0, 16.0, 59.0, 65.0, 70.0, 73.0, 77.0, 81.0, 84.0, 87.0, 90.0, 93.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+207,COM2.USACE_Galveston.contents.western_auto.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 50.0, 75.0, 87.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+208,COM2.USACE_New-Orleans.contents.warehouse.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 53.0, 70.0, 80.0, 96.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+209,COM2.USACE_New-Orleans.contents.warehouse.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 19.0, 31.0, 40.0, 45.0, 49.0, 54.0, 58.0, 63.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+210,COM2.USACE_St-Paul.contents.warehouse.fresh_water-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 15.0, 31.0, 43.0, 53.0, 61.0, 67.0, 71.0, 73.0, 76.0, 77.0, 78.0, 79.0, 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+211,COM3.USACE_Galveston.contents.average_personal/repair_services.invetory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 4.0, 29.0, 46.0, 67.0, 79.0, 85.0, 91.0, 92.0, 92.0, 93.0, 94.0, 96.0, 96.0, 97.0, 97.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+212,COM3.USACE_Galveston.contents.auto_repair.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 56.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+213,COM3.USACE_Galveston.contents.auto_repair.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 50.0, 80.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+214,COM3.USACE_Galveston.contents.auto_service.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 40.0, 60.0, 85.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+215,COM3.USACE_Galveston.contents.barber_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 22.0, 29.0, 37.0, 48.0, 62.0, 78.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+216,COM3.USACE_Galveston.contents.barber_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 50.0, 75.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+217,COM3.USACE_Galveston.contents.beauty_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 70.0, 87.0, 94.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+218,COM3.USACE_Galveston.contents.beauty_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 18.0, 31.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+219,COM3.USACE_Galveston.contents.boat_service.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 12.0, 20.0, 29.0, 40.0, 50.0, 59.0, 67.0, 75.0, 84.0, 92.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+220,COM3.USACE_Galveston.contents.boat_service.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 33.0, 52.0, 70.0, 75.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+221,COM3.USACE_Galveston.contents.car_wash.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 25.0, 40.0, 50.0, 60.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+222,COM3.USACE_Galveston.contents.car_wash.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+223,COM3.USACE_Galveston.contents.cemetary_complex.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 29.0, 34.0, 76.0, 88.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+224,COM3.USACE_Galveston.contents.cemetary_complex.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 92.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+225,COM3.USACE_Galveston.contents.cleaners.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 30.0, 50.0, 75.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+226,COM3.USACE_Galveston.contents.cleaners_substation.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 10.0, 75.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+227,COM3.USACE_Galveston.contents.cleaners_substation.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 49.0, 75.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+228,COM3.USACE_Galveston.contents.private_day_care.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+229,COM3.USACE_Galveston.contents.private_day_care.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+230,COM3.USACE_Galveston.contents.funeral_home.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 30.0, 60.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+231,COM3.USACE_Galveston.contents.laundry.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 10.0, 13.0, 16.0, 19.0, 22.0, 27.0, 33.0, 40.0, 47.0, 56.0, 65.0, 74.0, 84.0, 94.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+232,COM3.USACE_Galveston.contents.photo_studio.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 10.0, 10.0, 10.0, 10.0, 60.0, 60.0, 60.0, 60.0, 60.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+233,COM3.USACE_Galveston.contents.photo_studio.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 80.0, 80.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+234,COM3.USACE_Galveston.contents.truck_mfg_&_sales.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 65.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+235,COM3.USACE_Galveston.contents.truck_mfg_&_sales.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 34.0, 50.0, 57.0, 65.0, 71.0, 76.0, 80.0, 88.0, 89.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+236,COM3.USACE_Galveston.contents.washateria.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 55.0, 78.0, 83.0, 86.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+237,COM3.USACE_New-Orleans.contents.auto_repair.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 88.0, 88.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+238,COM3.USACE_New-Orleans.contents.auto_repair.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 45.0, 60.0, 65.0, 86.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+239,COM3.USACE_New-Orleans.contents.barber_shop.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+240,COM3.USACE_New-Orleans.contents.barber_shop.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 48.0, 54.0, 54.0, 55.0, 55.0, 55.0, 55.0, 55.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+241,COM3.USACE_New-Orleans.contents.beauty_salon.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+242,COM3.USACE_New-Orleans.contents.beauty_salon.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 48.0, 54.0, 54.0, 55.0, 55.0, 55.0, 55.0, 55.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+243,COM3.USACE_New-Orleans.contents.funeral_home.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+244,COM3.USACE_New-Orleans.contents.funeral_home.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 26.0, 46.0, 94.0, 97.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+245,COM3.USACE_New-Orleans.contents.laundromat.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+246,COM3.USACE_New-Orleans.contents.laundromat.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 48.0, 54.0, 54.0, 55.0, 55.0, 55.0, 55.0, 55.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+247,COM3.USACE_New-Orleans.contents.reupholstery.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 88.0, 88.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+248,COM3.USACE_New-Orleans.contents.reupholstery.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 45.0, 60.0, 65.0, 86.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+249,COM3.USACE_New-Orleans.contents.watch_repair.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 88.0, 88.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+250,COM3.USACE_New-Orleans.contents.watch_repair.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 45.0, 60.0, 65.0, 86.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+251,COM4.USACE_Galveston.contents.average_prof/tech_services.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 18.0, 25.0, 35.0, 43.0, 49.0, 52.0, 55.0, 57.0, 58.0, 60.0, 65.0, 67.0, 68.0, 69.0, 70.0, 71.0, 71.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+252,COM4.USACE_Galveston.contents.airport.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+253,COM4.USACE_Galveston.contents.airport.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 40.0, 48.0, 55.0, 75.0, 78.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+254,COM4.USACE_Galveston.contents.boat_stalls.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 6.0, 8.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, 22.0, 24.0, 25.0, 27.0, 28.0, 29.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+255,COM4.USACE_Galveston.contents.boat_storage.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 1.0, 4.0, 7.0, 12.0, 18.0, 24.0, 32.0, 40.0, 48.0, 54.0, 58.0, 63.0, 66.0, 68.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+256,COM4.USACE_Galveston.contents.business.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 6.0, 10.0, 15.0, 19.0, 24.0, 28.0, 33.0, 38.0, 44.0, 49.0, 55.0, 62.0, 69.0, 78.0, 86.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+257,COM4.USACE_Galveston.contents.import_sales.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+258,COM4.USACE_Galveston.contents.import_sales.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.0, 65.0, 70.0, 75.0, 80.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+259,COM4.USACE_Galveston.contents.large_commercial_office.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 16.0, 21.0, 24.0, 25.0, 26.0, 28.0, 31.0, 36.0, 42.0, 50.0, 71.0, 84.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+260,COM4.USACE_Galveston.contents.real_estate_office.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 9.0, 42.0, 75.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+261,COM4.USACE_Galveston.contents.real_estate_office.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 25.0, 43.0, 63.0, 70.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+262,COM4.USACE_Galveston.contents.transport_comapny.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.0, 75.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+263,COM4.USACE_Galveston.contents.utility_company.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+264,COM4.USACE_Galveston.contents.utility_company.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 5.0, 7.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 15.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+265,COM4.USACE_Galveston.contents.water_supply.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+266,COM4.USACE_New-Orleans.contents.accounting_firm.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+267,COM4.USACE_New-Orleans.contents.accounting_firm.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 26.0, 46.0, 94.0, 97.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+268,COM4.USACE_New-Orleans.contents.legal_office.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+269,COM4.USACE_New-Orleans.contents.legal_office.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 26.0, 46.0, 94.0, 97.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+270,COM4.USACE_New-Orleans.contents.real_estate_office.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+271,COM4.USACE_New-Orleans.contents.real_estate_office.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 26.0, 46.0, 94.0, 97.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+272,COM4.USACE_New-Orleans.contents.utility_company.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+273,COM4.USACE_New-Orleans.contents.utility_company.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 18.0, 50.0, 50.0, 52.0, 58.0, 58.0, 58.0, 58.0, 58.0, 59.0, 69.0, 70.0, 70.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+274,COM4.USACE_St-Paul.contents.professional.fresh_water-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 15.0, 31.0, 43.0, 53.0, 61.0, 67.0, 71.0, 73.0, 76.0, 77.0, 78.0, 79.0, 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+275,COM5.USACE_Galveston.contents.bank-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 50.0, 74.0, 83.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+276,COM5.USACE_Galveston.contents.bank.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 50.0, 60.0, 70.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+277,COM5.USACE_Galveston.contents.bank.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 50.0, 87.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+278,COM5.USACE_New-Orleans.contents.bank.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+279,COM5.USACE_New-Orleans.contents.bank.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 26.0, 46.0, 94.0, 97.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+280,COM6.USACE_Galveston.contents.hospital-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 30.0, 65.0, 72.0, 78.0, 85.0, 95.0, 95.0, 95.0, 95.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+281,COM6.USACE_Galveston.contents.hospital.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 95.0, 95.0, 95.0, 95.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+282,COM6.USACE_Galveston.contents.hospital.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 80.0, 83.0, 86.0, 89.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+283,COM7.USACE_Galveston.contents.average_medical_offic/clinic.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 28.0, 51.0, 60.0, 63.0, 67.0, 71.0, 72.0, 74.0, 77.0, 81.0, 86.0, 92.0, 94.0, 97.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+284,COM7.USACE_Galveston.contents.doctor's_office.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 7.0, 12.0, 15.0, 16.0, 18.0, 22.0, 27.0, 34.0, 43.0, 57.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+285,COM7.USACE_Galveston.contents.dentist's_office.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+286,COM7.USACE_Galveston.contents.dentist's_office.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 21.0, 40.0, 55.0, 70.0, 85.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+287,COM7.USACE_Galveston.contents.chiropractic_clinic.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 30.0, 30.0, 30.0, 31.0, 32.0, 35.0, 38.0, 42.0, 47.0, 54.0, 62.0, 72.0, 84.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+288,COM7.USACE_Galveston.contents.x_ray_service.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+289,COM7.USACE_New-Orleans.contents.medical_office.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+290,COM7.USACE_New-Orleans.contents.medical_office.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 26.0, 46.0, 94.0, 97.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+291,COM7.USACE_New-Orleans.contents.dentist's_office.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+292,COM7.USACE_New-Orleans.contents.dentist's_office.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 26.0, 46.0, 94.0, 97.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+293,COM8.USACE_Galveston.contents.average_entertainment/recreation.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 13.0, 45.0, 55.0, 64.0, 73.0, 77.0, 80.0, 82.0, 83.0, 85.0, 87.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+294,COM8.USACE_Galveston.contents.fishing_party_boat.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 24.0, 24.0, 24.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+295,COM8.USACE_Galveston.contents.fishing_party_boat.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 40.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+296,COM8.USACE_Galveston.contents.bowling_alley.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 22.0, 25.0, 27.0, 27.0, 28.0, 30.0, 32.0, 35.0, 39.0, 45.0, 51.0, 58.0, 67.0, 76.0, 86.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+297,COM8.USACE_Galveston.contents.country_club.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 38.0, 41.0, 44.0, 47.0, 52.0, 56.0, 62.0, 67.0, 74.0, 80.0, 87.0, 94.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+298,COM8.USACE_Galveston.contents.country_club.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 12.0, 19.0, 27.0, 34.0, 42.0, 48.0, 55.0, 62.0, 68.0, 73.0, 78.0, 84.0, 89.0, 94.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+299,COM8.USACE_Galveston.contents.physical_fitness.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 45.0, 75.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+300,COM8.USACE_Galveston.contents.private_pool.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+301,COM8.USACE_Galveston.contents.private_club.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 40.0, 45.0, 49.0, 52.0, 55.0, 59.0, 63.0, 68.0, 74.0, 85.0, 90.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+302,COM8.USACE_Galveston.contents.private_club.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 21.0, 28.0, 35.0, 41.0, 47.0, 54.0, 62.0, 71.0, 83.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+303,COM8.USACE_Galveston.contents.radio_station.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+304,COM8.USACE_Galveston.contents.recreation_facilities.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 10.0, 20.0, 30.0, 45.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 75.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+305,COM8.USACE_Galveston.contents.recreation_facilities.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+306,COM8.USACE_Galveston.contents.tavern.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 36.0, 62.0, 73.0, 88.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+307,COM8.USACE_Galveston.contents.tavern.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 42.0, 53.0, 78.0, 92.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+308,COM8.USACE_Galveston.contents.telephone_exchange.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+309,COM8.USACE_Galveston.contents.ymca_inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 33.0, 33.0, 33.0, 33.0, 33.0, 33.0, 33.0, 33.0, 33.0, 35.0, 35.0, 35.0, 36.0, 36.0, 37.0, 37.0, 38.0, 38.0, 39.0, 39.0, 40.0, 40.0, 40.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+310,COM8.USACE_Galveston.contents.cafeteria_restaurant.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 40.0, 57.0, 70.0, 71.0, 75.0, 82.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+311,COM8.USACE_Galveston.contents.cafeteria_restaurant.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 73.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+312,COM8.USACE_Galveston.contents.drive_in_restaurant.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 82.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+313,COM8.USACE_Galveston.contents.drive_in_restaurant.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 52.0, 60.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+314,COM8.USACE_New-Orleans.contents.bowling_alley.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 83.0, 88.0, 93.0, 94.0, 94.0, 94.0, 94.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+315,COM8.USACE_New-Orleans.contents.bowling_alley.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 28.0, 61.0, 81.0, 89.0, 90.0, 92.0, 92.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+316,COM8.USACE_New-Orleans.contents.fast_food_restaurant.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 83.0, 88.0, 93.0, 94.0, 94.0, 94.0, 94.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+317,COM8.USACE_New-Orleans.contents.fast_food_restaurant.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 28.0, 61.0, 81.0, 89.0, 90.0, 92.0, 92.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+318,COM8.USACE_New-Orleans.contents.full_service_restaurant.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 83.0, 88.0, 93.0, 94.0, 94.0, 94.0, 94.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+319,COM8.USACE_New-Orleans.contents.full_service_restaurant.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 28.0, 61.0, 81.0, 89.0, 90.0, 92.0, 92.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+320,COM8.USACE_New-Orleans.contents.lounge.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 83.0, 88.0, 93.0, 94.0, 94.0, 94.0, 94.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+321,COM8.USACE_New-Orleans.contents.lounge.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 28.0, 61.0, 81.0, 89.0, 90.0, 92.0, 92.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+322,COM8.USACE_St-Paul.contents.recreation.fresh_water-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 15.0, 31.0, 43.0, 53.0, 61.0, 67.0, 71.0, 73.0, 76.0, 77.0, 78.0, 79.0, 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+323,COM9.USACE_Galveston.contents.theater.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 6.0, 8.0, 9.0, 10.0, 12.0, 17.0, 22.0, 30.0, 41.0, 57.0, 66.0, 73.0, 79.0, 84.0, 90.0, 97.0, 98.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+324,COM9.USACE_Galveston.contents.private_hall.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 8.0, 10.0, 12.0, 14.0, 18.0, 24.0, 32.0, 44.0, 60.0, 85.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+325,COM9.USACE_Galveston.contents.indoor_theater.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 4.0, 5.0, 6.0, 6.0, 6.0, 9.0, 12.0, 16.0, 22.0, 28.0, 37.0, 46.0, 57.0, 68.0, 80.0, 93.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+326,COM9.USACE_New-Orleans.contents.movie_theater.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 83.0, 88.0, 93.0, 94.0, 94.0, 94.0, 94.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+327,COM9.USACE_New-Orleans.contents.movie_theater.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 28.0, 61.0, 81.0, 89.0, 90.0, 92.0, 92.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+328,COM10.USACE_Galveston.contents.garage.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 11.0, 17.0, 20.0, 23.0, 25.0, 29.0, 35.0, 42.0, 51.0, 63.0, 77.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+329,IND1.USACE_Galveston.contents.average_heavy_industrial.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 24.0, 34.0, 41.0, 47.0, 52.0, 57.0, 60.0, 63.0, 64.0, 66.0, 68.0, 69.0, 72.0, 73.0, 73.0, 73.0, 74.0, 74.0, 74.0, 74.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+330,IND1.USACE_Galveston.contents.boiler_building.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 10.0, 10.0, 10.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+331,IND1.USACE_Galveston.contents.cabinet_shop_mfg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0, 55.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+332,IND1.USACE_Galveston.contents.cabinet_shop_mfg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 4.0, 8.0, 10.0, 11.0, 12.0, 13.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+333,IND1.USACE_Galveston.contents.concrete_mfg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 50.0, 51.0, 53.0, 54.0, 55.0, 55.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+334,IND1.USACE_Galveston.contents.door_mfg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 25.0, 45.0, 70.0, 80.0, 90.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+335,IND1.USACE_Galveston.contents.door_mfg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+336,IND1.USACE_Galveston.contents.engine_room.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0, 55.0, 65.0, 65.0, 65.0, 65.0, 65.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+337,IND1.USACE_Galveston.contents.fabrication_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 75.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+338,IND1.USACE_Galveston.contents.heat_exchanger_mfg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 12.0, 22.0, 32.0, 40.0, 46.0, 52.0, 55.0, 58.0, 60.0, 63.0, 71.0, 80.0, 85.0, 91.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+339,IND1.USACE_Galveston.contents.heat_exchanger_mfg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 11.0, 15.0, 17.0, 19.0, 21.0, 23.0, 24.0, 26.0, 28.0, 29.0, 32.0, 34.0, 36.0, 38.0, 39.0, 41.0, 43.0, 45.0, 47.0, 49.0, 51.0, 53.0, 55.0, 57.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+340,IND1.USACE_Galveston.contents.lock_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 12.0, 20.0, 29.0, 40.0, 50.0, 59.0, 67.0, 75.0, 84.0, 92.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+341,IND1.USACE_Galveston.contents.lock_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 33.0, 52.0, 70.0, 75.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+342,IND1.USACE_Galveston.contents.lumber_mill.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+343,IND1.USACE_Galveston.contents.heavy_machine_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 16.0, 24.0, 33.0, 41.0, 49.0, 58.0, 66.0, 74.0, 82.0, 91.0, 98.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+344,IND1.USACE_Galveston.contents.heavy_machine_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 5.0, 10.0, 15.0, 20.0, 25.0, 30.0, 40.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+345,IND1.USACE_Galveston.contents.light_machine_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 12.0, 18.0, 27.0, 35.0, 42.0, 42.0, 42.0, 55.0, 55.0, 55.0, 65.0, 65.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+346,IND1.USACE_Galveston.contents.metal_coatings_serv.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 32.0, 50.0, 63.0, 75.0, 87.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+347,IND1.USACE_Galveston.contents.metal_coatings_serv.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+348,IND1.USACE_Galveston.contents.pipe_threader_facility.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 25.0, 50.0, 50.0, 50.0, 50.0, 75.0, 75.0, 75.0, 75.0, 90.0, 90.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+349,IND1.USACE_Galveston.contents.pressure_test_facility.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 20.0, 25.0, 25.0, 30.0, 30.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+350,IND1.USACE_Galveston.contents.metal_recycling.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 20.0, 20.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+351,IND1.USACE_Galveston.contents.machine_research_lab.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 25.0, 30.0, 35.0, 40.0, 42.0, 44.0, 46.0, 48.0, 50.0, 50.0, 50.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+352,IND1.USACE_Galveston.contents.machine_research_lab.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 60.0, 80.0, 85.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+353,IND1.USACE_Galveston.contents.scale_bldg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 15.0, 25.0, 40.0, 50.0, 75.0, 85.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+354,IND1.USACE_Galveston.contents.welding_machine.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 6.0, 15.0, 18.0, 20.0, 21.0, 22.0, 24.0, 27.0, 30.0, 33.0, 37.0, 41.0, 45.0, 49.0, 54.0, 59.0, 63.0, 68.0, 72.0, 76.0, 80.0, 84.0, 88.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+355,IND2.USACE_Galveston.contents.average_light_industrial.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 9.0, 23.0, 35.0, 44.0, 52.0, 58.0, 62.0, 65.0, 68.0, 70.0, 73.0, 74.0, 77.0, 78.0, 78.0, 79.0, 80.0, 80.0, 80.0, 80.0, 81.0, 81.0, 81.0, 81.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+356,IND2.USACE_Galveston.contents.battery_mfg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 30.0, 40.0, 60.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+357,IND2.USACE_Galveston.contents.battery_mfg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 10.0, 15.0, 15.0, 20.0, 20.0, 25.0, 25.0, 30.0, 30.0, 30.0, 30.0, 40.0, 40.0, 40.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+358,IND2.USACE_Galveston.contents.control_bldg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 75.0, 75.0, 85.0, 85.0, 90.0, 90.0, 90.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+359,IND2.USACE_Galveston.contents.electronic_equip_mfg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+360,IND2.USACE_Galveston.contents.electronic_equip_mfg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 40.0, 55.0, 70.0, 85.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+361,IND2.USACE_Galveston.contents.frame_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 16.0, 45.0, 80.0, 88.0, 93.0, 95.0, 98.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+362,IND2.USACE_Galveston.contents.furniture_mfg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+363,IND2.USACE_Galveston.contents.furniture_mfg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+364,IND2.USACE_Galveston.contents.instrument_mfg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 7.0, 13.0, 20.0, 27.0, 34.0, 42.0, 48.0, 57.0, 71.0, 80.0, 85.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+365,IND2.USACE_Galveston.contents.instrument_mfg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 50.0, 61.0, 73.0, 82.0, 90.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+366,IND2.USACE_Galveston.contents.leather_goods_mfg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0, 27.0, 30.0, 33.0, 36.0, 39.0, 42.0, 45.0, 48.0, 51.0, 54.0, 57.0, 60.0, 63.0, 66.0, 69.0, 72.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+367,IND2.USACE_Galveston.contents.leather_goods_mfg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 7.0, 12.0, 16.0, 20.0, 23.0, 26.0, 27.0, 29.0, 30.0, 30.0, 36.0, 39.0, 41.0, 44.0, 46.0, 49.0, 51.0, 54.0, 57.0, 60.0, 63.0, 66.0, 69.0, 72.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+368,IND2.USACE_Galveston.contents.industrial_loading_dock.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 10.0, 25.0, 40.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+369,IND2.USACE_Galveston.contents.industrial_loading_dock.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 20.0, 20.0, 20.0, 20.0, 20.0, 30.0, 30.0, 30.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+370,IND2.USACE_Galveston.contents.locker_bldg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 5.0, 25.0, 55.0, 75.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+371,IND2.USACE_Galveston.contents.maint_bldg_mfg_facilility.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 15.0, 20.0, 25.0, 35.0, 45.0, 45.0, 45.0, 45.0, 50.0, 50.0, 50.0, 55.0, 55.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+372,IND2.USACE_Galveston.contents.newspaper_print_plant.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 8.0, 11.0, 13.0, 16.0, 20.0, 25.0, 31.0, 39.0, 48.0, 59.0, 70.0, 82.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+373,IND2.USACE_Galveston.contents.newspaper_sales_office.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 9.0, 18.0, 33.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+374,IND2.USACE_Galveston.contents.newspaper_sales_office.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 46.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+375,IND2.USACE_Galveston.contents.manuf_facility_office.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+376,IND2.USACE_Galveston.contents.manuf_facility_office.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 16.0, 21.0, 30.0, 40.0, 45.0, 50.0, 60.0, 75.0, 85.0, 90.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+377,IND2.USACE_Galveston.contents.commercial_printing.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+378,IND2.USACE_Galveston.contents.commercial_printing.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 35.0, 50.0, 70.0, 73.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+379,IND3.USACE_Galveston.contents.average_food/drugs/chemicals.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 20.0, 41.0, 51.0, 62.0, 67.0, 71.0, 73.0, 76.0, 78.0, 79.0, 82.0, 83.0, 84.0, 86.0, 87.0, 87.0, 88.0, 88.0, 88.0, 88.0, 88.0, 88.0, 88.0, 88.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+380,IND3.USACE_Galveston.contents.chemical_plant.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 38.0, 44.0, 52.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+381,IND3.USACE_Galveston.contents.chemical_plant.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+382,IND3.USACE_Galveston.contents.chemical_plant_bonding.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 38.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+383,IND3.USACE_Galveston.contents.chemical_plant_bonding.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+384,IND3.USACE_Galveston.contents.chemical_refinery.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 45.0, 60.0, 75.0, 90.0, 93.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+385,IND3.USACE_Galveston.contents.chemical_refinery.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+386,IND3.USACE_Galveston.contents.deodorizer_bldg_chemical.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 15.0, 20.0, 20.0, 20.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+387,IND3.USACE_Galveston.contents.deodorizer_bldg_chem.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 75.0, 75.0, 90.0, 90.0, 90.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+388,IND3.USACE_Galveston.contents.feed_mill.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+389,IND3.USACE_Galveston.contents.feed_mill.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 22.0, 24.0, 26.0, 28.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+390,IND3.USACE_Galveston.contents.food_processor.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 27.0, 33.0, 40.0, 50.0, 60.0, 70.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+391,IND3.USACE_Galveston.contents.food_processor.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+392,IND3.USACE_Galveston.contents.chemical_laboratory.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 25.0, 50.0, 50.0, 60.0, 70.0, 80.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+393,IND3.USACE_Galveston.contents.chemical_laboratory.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 43.0, 60.0, 60.0, 60.0, 60.0, 70.0, 70.0, 80.0, 80.0, 90.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+394,IND3.USACE_Galveston.contents.detergent_manuf._facility.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 16.0, 21.0, 24.0, 25.0, 26.0, 28.0, 31.0, 36.0, 42.0, 50.0, 71.0, 84.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+395,IND3.USACE_Galveston.contents.detergent_manuf._facility.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 40.0, 55.0, 70.0, 85.0, 90.0, 90.0, 90.0, 90.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+396,IND3.USACE_Galveston.contents.meat_packing.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 30.0, 30.0, 70.0, 75.0, 85.0, 90.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+397,IND3.USACE_Galveston.contents.meat_packing.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+398,IND3.USACE_Galveston.contents.detergent_mixer_bldg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 25.0, 25.0, 25.0, 25.0, 25.0, 35.0, 35.0, 45.0, 45.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+399,IND3.USACE_Galveston.contents.detergent_mixer_bldg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+400,IND3.USACE_Galveston.contents.plastic_mfg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+401,IND3.USACE_Galveston.contents.plastic_mfg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 30.0, 40.0, 47.0, 55.0, 62.0, 67.0, 72.0, 77.0, 80.0, 92.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+402,IND3.USACE_Galveston.contents.caustic_materials_refinery.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 38.0, 50.0, 75.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+403,IND3.USACE_Galveston.contents.caustic_materials_refinery.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 30.0, 30.0, 40.0, 40.0, 50.0, 50.0, 60.0, 60.0, 70.0, 70.0, 80.0, 80.0, 90.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+404,IND4.USACE_Galveston.contents.average_metals/minerals_processing.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 15.0, 20.0, 26.0, 31.0, 37.0, 40.0, 44.0, 48.0, 53.0, 56.0, 57.0, 60.0, 62.0, 63.0, 63.0, 63.0, 64.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+405,IND4.USACE_Galveston.contents.foundry.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 17.0, 24.0, 29.0, 34.0, 38.0, 43.0, 45.0, 50.0, 58.0, 62.0, 67.0, 70.0, 75.0, 78.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+406,IND4.USACE_Galveston.contents.foundry.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 11.0, 16.0, 19.0, 21.0, 23.0, 28.0, 35.0, 47.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+407,IND4.USACE_Galveston.contents.lead_refinery.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+408,IND4.USACE_Galveston.contents.lead_refinery.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 16.0, 21.0, 30.0, 40.0, 45.0, 50.0, 60.0, 75.0, 85.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+409,IND4.USACE_Galveston.contents.sand_&_gravel.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 10.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+410,IND4.USACE_Galveston.contents.sand_&_gravel.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 3.0, 6.0, 10.0, 13.0, 16.0, 19.0, 24.0, 27.0, 30.0, 45.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+411,IND4.USACE_Galveston.contents.sheet_metal.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 24.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+412,IND4.USACE_Galveston.contents.sheet_metal.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+413,IND5.USACE_Galveston.contents.average_high_technology.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 20.0, 41.0, 51.0, 62.0, 67.0, 71.0, 73.0, 76.0, 78.0, 79.0, 82.0, 83.0, 84.0, 86.0, 87.0, 87.0, 88.0, 88.0, 88.0, 88.0, 88.0, 88.0, 88.0, 88.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+414,IND6.USACE_Galveston.contents.average_construction.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 35.0, 47.0, 56.0, 59.0, 66.0, 69.0, 71.0, 72.0, 78.0, 79.0, 80.0, 80.0, 81.0, 81.0, 81.0, 82.0, 82.0, 82.0, 83.0, 83.0, 83.0, 83.0, 83.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+415,IND6.USACE_Galveston.contents.carpet_tile_flooring.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 42.0, 54.0, 65.0, 75.0, 85.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+416,IND6.USACE_Galveston.contents.carpet_tile_flooring.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 70.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+417,IND6.USACE_Galveston.contents.contractor_roofing.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 27.0, 36.0, 43.0, 48.0, 50.0, 50.0, 50.0, 50.0, 50.0, 51.0, 51.0, 51.0, 52.0, 52.0, 52.0, 52.0, 53.0, 53.0, 53.0, 54.0, 54.0, 54.0, 55.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+418,IND6.USACE_Galveston.contents.contractor_roofing.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 27.0, 36.0, 43.0, 48.0, 53.0, 57.0, 59.0, 60.0, 66.0, 69.0, 72.0, 76.0, 79.0, 83.0, 86.0, 90.0, 93.0, 97.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+419,IND6.USACE_Galveston.contents.contractor_electric.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 10.0, 10.0, 10.0, 40.0, 40.0, 40.0, 40.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+420,IND6.USACE_Galveston.contents.pier_drilling_co.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 23.0, 39.0, 55.0, 55.0, 56.0, 56.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+421,IND6.USACE_Galveston.contents.plumbing_co.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 45.0, 55.0, 63.0, 70.0, 76.0, 82.0, 87.0, 92.0, 98.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+422,IND6.USACE_Galveston.contents.plumbing_co.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 14.0, 25.0, 35.0, 44.0, 53.0, 61.0, 69.0, 77.0, 85.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+423,IND6.USACE_Galveston.contents.sandblasting_co.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 45.0, 68.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+424,IND6.USACE_Galveston.contents.water_well_service.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+425,IND6.USACE_New-Orleans.contents.carpeting_service.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 53.0, 70.0, 80.0, 96.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+426,IND6.USACE_New-Orleans.contents.carpeting_service.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 19.0, 31.0, 40.0, 45.0, 49.0, 54.0, 58.0, 63.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+427,IND6.USACE_New-Orleans.contents.heating_&_air_conditioning_service.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 53.0, 70.0, 80.0, 96.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+428,IND6.USACE_New-Orleans.contents.heating_&_air_conditioning_service.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 19.0, 31.0, 40.0, 45.0, 49.0, 54.0, 58.0, 63.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+429,IND6.USACE_New-Orleans.contents.plumbing_services.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 53.0, 70.0, 80.0, 96.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+430,IND6.USACE_New-Orleans.contents.plumbing_services.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 19.0, 31.0, 40.0, 45.0, 49.0, 54.0, 58.0, 63.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+431,AGR1.USACE_Galveston.contents.average_agriculture.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 6.0, 20.0, 43.0, 58.0, 65.0, 66.0, 66.0, 67.0, 70.0, 75.0, 76.0, 76.0, 76.0, 77.0, 77.0, 77.0, 78.0, 78.0, 78.0, 79.0, 79.0, 79.0, 79.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+432,AGR1.USACE_Galveston.contents.dairy_processing.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 50.0, 50.0, 50.0, 50.0, 60.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+433,AGR1.USACE_Galveston.contents.dairy_processing.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+434,AGR1.USACE_Galveston.contents.horse_stalls.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 6.0, 8.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, 22.0, 24.0, 25.0, 27.0, 28.0, 29.0, 31.0, 32.0, 33.0, 34.0, 34.0, 36.0, 37.0, 38.0, 39.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+435,AGR1.USACE_Galveston.contents.veterinary_clinic.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+436,AGR1.USACE_New-Orleans.contents.veterinary_office.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+437,AGR1.USACE_New-Orleans.contents.veterinary_office.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 26.0, 46.0, 94.0, 97.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+438,REL1.USACE_Galveston.contents.church-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 52.0, 72.0, 85.0, 92.0, 95.0, 98.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+439,REL1.USACE_Galveston.contents.church.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 75.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+440,REL1.USACE_Galveston.contents.church.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 28.0, 54.0, 70.0, 84.0, 90.0, 95.0, 97.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+441,REL1.USACE_New-Orleans.contents.civic_association.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+442,REL1.USACE_New-Orleans.contents.civic_association.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 18.0, 50.0, 50.0, 52.0, 58.0, 58.0, 58.0, 58.0, 58.0, 59.0, 69.0, 70.0, 70.0, 79.0, 88.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+443,GOV1.USACE_Galveston.contents.average_govt_services.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 59.0, 74.0, 83.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+444,GOV1.USACE_Galveston.contents.city_hall.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 75.0, 85.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+445,GOV1.USACE_Galveston.contents.post_office.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 43.0, 63.0, 70.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+446,GOV1.USACE_New-Orleans.contents.government_facility.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+447,GOV1.USACE_New-Orleans.contents.government_facility.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 18.0, 50.0, 50.0, 52.0, 58.0, 58.0, 58.0, 58.0, 58.0, 59.0, 69.0, 70.0, 70.0, 79.0, 88.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+448,GOV2.USACE_Galveston.contents.average_emergency_response.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 20.0, 38.0, 55.0, 70.0, 81.0, 89.0, 98.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+449,GOV2.USACE_Galveston.contents.fire_station.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 25.0, 50.0, 75.0, 91.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+450,GOV2.USACE_Galveston.contents.police_station.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 15.0, 25.0, 35.0, 48.0, 62.0, 78.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+451,EDU1.USACE_Galveston.contents.average_school.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 27.0, 38.0, 53.0, 64.0, 68.0, 70.0, 72.0, 75.0, 79.0, 83.0, 88.0, 94.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+452,EDU1.USACE_Galveston.contents.commercial_school.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 18.0, 26.0, 30.0, 33.0, 35.0, 39.0, 44.0, 50.0, 58.0, 66.0, 76.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+453,EDU1.USACE_Galveston.contents.library.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 50.0, 75.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+454,EDU1.USACE_New-Orleans.contents.elementary_school.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+455,EDU1.USACE_New-Orleans.contents.elementary_school.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 18.0, 50.0, 50.0, 52.0, 58.0, 58.0, 58.0, 58.0, 58.0, 59.0, 69.0, 70.0, 70.0, 79.0, 88.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+456,EDU2.USACE_Galveston.contents.average_college/university.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 27.0, 38.0, 53.0, 64.0, 68.0, 70.0, 72.0, 75.0, 79.0, 83.0, 88.0, 94.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+457,EDU2.USACE_New-Orleans.contents.college.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+458,EDU2.USACE_New-Orleans.contents.college.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 18.0, 50.0, 50.0, 52.0, 58.0, 58.0, 58.0, 58.0, 58.0, 59.0, 69.0, 70.0, 70.0, 79.0, 88.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+459,RES1.BCAR_Jan-201.contents.all_floors.slab_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 30.0, 45.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+460,RES3A.BCAR_Jan-201.contents.1to2_stories.slab_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 30.0, 45.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+461,RES3B.BCAR_Jan-201.contents.1to2_stories.slab_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 30.0, 45.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+462,RES1.BCAR_Jan-201.contents.all_floors.wall_2ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 20.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+463,RES3A.BCAR_Jan-201.contents.1to2_stories.wall_2ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 20.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+464,RES3B.BCAR_Jan-201.contents.1to2_stories.wall_2ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 20.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+465,RES1.BCAR_Jan-201.contents.all_floors.wall_3ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 10.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+466,RES3A.BCAR_Jan-201.contents.1to2_stories.wall_3ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 10.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+467,RES3B.BCAR_Jan-201.contents.1to2_stories.wall_3ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 10.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+468,RES2.BCAR_Jan-201.contents.manufactured_home_mobile.structure.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+469,RES1.BCAR_Jan-201.contents.all_floors.elevated_open+2ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 45.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+470,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_open+2ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 45.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+471,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_open+2ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 45.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+472,RES1.BCAR_Jan-201.contents.all_floors.elevated_open+4ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 35.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+473,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_open+4ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 35.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+474,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_open+4ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 35.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+475,RES1.BCAR_Jan-201.contents.all_floors.elevated_open+6ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 30.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+476,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_open+6ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 30.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+477,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_open+6ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 30.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+478,RES1.BCAR_Jan-201.contents.all_floors.elevated_open+8ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 25.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+479,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_open+8ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 25.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+480,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_open+8ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 25.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+481,RES1.BCAR_Jan-201.contents.all_floors.elevated_open+10ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"25.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+482,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_open+10ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"25.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+483,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_open+10ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"25.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+484,RES1.BCAR_Jan-201.contents.all_floors.elevated_open+12ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"65.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+485,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_open+12ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"65.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+486,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_open+12ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"65.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+487,RES1.BCAR_Jan-201.contents.all_floors.elevated_obstr+2ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 2.0, 3.0, 48.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+488,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+2ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 2.0, 3.0, 48.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+489,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+2ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 2.0, 3.0, 48.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+490,RES1.BCAR_Jan-201.contents.all_floors.elevated_obstr+4ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"2.0, 3.0, 3.0, 38.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+491,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+4ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"2.0, 3.0, 3.0, 38.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+492,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+4ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"2.0, 3.0, 3.0, 38.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+493,RES1.BCAR_Jan-201.contents.all_floors.elevated_obstr+6ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"3.0, 3.0, 33.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+494,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+6ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"3.0, 3.0, 33.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+495,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+6ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"3.0, 3.0, 33.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+496,RES1.BCAR_Jan-201.contents.all_floors.elevated_obstr+8ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"3.0, 28.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+497,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+8ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"3.0, 28.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+498,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+8ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"3.0, 28.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+499,RES1.BCAR_Jan-201.contents.all_floors.elevated_obstr+10ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"28.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+500,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+10ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"28.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+501,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+10ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"28.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+502,RES1.BCAR_Jan-201.contents.all_floors.elevated_obstr+12ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"68.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+503,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+12ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"68.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+504,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+12ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"68.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+505,RES3.USACE_Chicago.contents.apartment_unit_grade-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 12.0, 24.0, 33.0, 35.0, 37.0, 41.0, 45.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+506,RES1.BCAR_Jan-201.contents.one_story.with_basement.b14-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 11.0, 13.0, 16.0, 19.0, 22.0, 25.0, 27.0, 30.0, 32.0, 35.0, 36.0, 38.0, 39.0, 39.0, 39.0, 39.0, 39.0, 39.0, 39.0, 39.0, 39.0, 39.0, 39.0, 39.0, 39.0, 39.0, 39.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+507,AGR1.USACE-Sacramento.contents.usace_sacramento_farms.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 30.0, 76.0, 76.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+508,AGR1.USACE-Sacramento.contents.usace_sacramento_farms.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 56.0, 56.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+509,AGR1.USACE-Sacramento.contents.usace_sacramento_farms.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 27.0, 27.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+510,COM.USACE-NACCS.contents.naccs_2_commercial_engineeered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.5, 24.0, 33.25, 40.0, 42.75, 45.5, 52.75, 60.0, 61.66667, 63.33333, 65.0, 66.66667, 68.33333, 70.0, 71.66667, 73.33333, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+511,COM.USACE-NACCS.contents.naccs_2_commercial_engineeered_waves-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 15.0, 26.25, 38.5, 52.375, 66.25, 73.125, 80.0, 82.0, 84.0, 86.0, 88.0, 90.0, 92.0, 94.0, 96.0, 97.5, 97.5, 97.5, 97.5, 97.5, 97.5, 97.5, 97.5, 97.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+512,COM.USACE-NACCS.contents.naccs_3_commercial_non/pre_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 1.0, 21.0, 29.75, 44.5, 49.75, 55.0, 59.75, 64.5, 67.5, 70.5, 73.5, 76.5, 79.5, 82.5, 85.5, 88.5, 90.5, 90.5, 90.5, 90.5, 90.5, 90.5, 90.5, 90.5, 90.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+513,COM.USACE-NACCS.contents.naccs_3_commercial_non/pre_engineered_waves-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.5, 21.0, 33.75, 52.5, 67.5, 82.5, 91.25, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+514,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_furniture_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 46.85714, 61.85714, 68.14286, 79.14286, 85.71429, 90.71429, 97.14286, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+515,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_electronics_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 34.14286, 44.28571, 67.0, 77.71429, 86.71429, 95.42857, 97.42857, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+516,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_clothing_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.0, 46.28571, 55.42857, 70.0, 79.0, 89.0, 95.71429, 97.85714, 97.85714, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+517,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_service_station_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.35714, 16.42857, 28.92857, 40.85714, 57.71429, 63.28571, 70.71429, 79.28571, 84.28571, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+518,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_convenience_store_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 23.14286, 39.85714, 52.85714, 70.71429, 79.28571, 88.0, 94.14286, 95.71429, 97.14286, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+519,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_grocery_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 30.71429, 40.92857, 52.85714, 64.0, 75.42857, 87.28571, 98.85714, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+520,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_furniture_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 46.85714, 61.85714, 68.14286, 79.14286, 85.71429, 90.71429, 97.14286, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+521,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_electronics_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 34.14286, 44.28571, 67.0, 77.71429, 86.71429, 95.42857, 97.42857, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+522,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_clothing_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.0, 46.28571, 55.42857, 70.0, 79.0, 89.0, 95.71429, 97.85714, 97.85714, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+523,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_service_station_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.35714, 16.42857, 28.92857, 40.85714, 57.71429, 63.28571, 70.71429, 79.28571, 84.28571, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+524,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_convenience_store_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 23.14286, 39.85714, 52.85714, 70.71429, 79.28571, 88.0, 94.14286, 95.71429, 97.14286, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+525,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_grocery_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 30.71429, 40.92857, 52.85714, 64.0, 75.42857, 87.28571, 98.85714, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+526,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_furniture_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 46.85714, 61.85714, 68.14286, 79.14286, 85.71429, 90.71429, 97.14286, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+527,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_electronics_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 34.14286, 44.28571, 67.0, 77.71429, 86.71429, 95.42857, 97.42857, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+528,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_clothing_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.0, 46.28571, 55.42857, 70.0, 79.0, 89.0, 95.71429, 97.85714, 97.85714, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+529,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_service_station_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.35714, 16.42857, 28.92857, 40.85714, 57.71429, 63.28571, 70.71429, 79.28571, 84.28571, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+530,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_convenience_store_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 23.14286, 39.85714, 52.85714, 70.71429, 79.28571, 88.0, 94.14286, 95.71429, 97.14286, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+531,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_grocery_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 30.71429, 40.92857, 52.85714, 64.0, 75.42857, 87.28571, 98.85714, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+532,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_furniture_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 46.85714, 61.85714, 68.14286, 79.14286, 85.71429, 90.71429, 97.14286, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+533,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_electronics_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 34.14286, 44.28571, 67.0, 77.71429, 86.71429, 95.42857, 97.42857, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+534,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_clothing_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.0, 46.28571, 55.42857, 70.0, 79.0, 89.0, 95.71429, 97.85714, 97.85714, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+535,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_service_station_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.35714, 16.42857, 28.92857, 40.85714, 57.71429, 63.28571, 70.71429, 79.28571, 84.28571, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+536,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_convenience_store_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 23.14286, 39.85714, 52.85714, 70.71429, 79.28571, 88.0, 94.14286, 95.71429, 97.14286, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+537,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_grocery_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 30.71429, 40.92857, 52.85714, 64.0, 75.42857, 87.28571, 98.85714, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+538,COM1.FEMA-BCA-Toolkit.contents.convenience_store.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 23.14286, 39.85714, 52.85714, 70.71429, 79.28571, 88.0, 94.14286, 95.71429, 97.14286, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+539,COM1.FEMA-BCA-Toolkit.contents.convenience_store.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 23.14286, 39.85714, 52.85714, 70.71429, 79.28571, 88.0, 94.14286, 95.71429, 97.14286, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+540,COM1.FEMA-BCA-Toolkit.contents.grocery.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.71429, 40.92857, 52.85714, 64.0, 75.42857, 87.28571, 98.85714, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+541,COM1.FEMA-BCA-Toolkit.contents.grocery.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.71429, 40.92857, 52.85714, 64.0, 75.42857, 87.28571, 98.85714, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+542,COM1.FEMA-BCA-Toolkit.contents.retail_clothing.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 29.0, 46.28571, 55.42857, 70.0, 79.0, 89.0, 95.71429, 97.85714, 97.85714, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+543,COM1.FEMA-BCA-Toolkit.contents.retail_clothing.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 29.0, 46.28571, 55.42857, 70.0, 79.0, 89.0, 95.71429, 97.85714, 97.85714, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+544,COM1.FEMA-BCA-Toolkit.contents.retail_electronics.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 34.14286, 44.28571, 67.0, 77.71429, 86.71429, 95.42857, 97.42857, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+545,COM1.FEMA-BCA-Toolkit.contents.retail_electronics.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 34.14286, 44.28571, 67.0, 77.71429, 86.71429, 95.42857, 97.42857, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+546,COM1.FEMA-BCA-Toolkit.contents.retail_furniture.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 46.85714, 61.85714, 68.14286, 79.14286, 85.71429, 90.71429, 97.14286, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+547,COM1.FEMA-BCA-Toolkit.contents.retail_furniture.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 46.85714, 61.85714, 68.14286, 79.14286, 85.71429, 90.71429, 97.14286, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+548,COM1.FEMA-BCA-Toolkit.contents.service_station.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.35714, 16.42857, 28.92857, 40.85714, 57.71429, 63.28571, 70.71429, 79.28571, 84.28571, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+549,COM1.FEMA-BCA-Toolkit.contents.service_station.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.35714, 16.42857, 28.92857, 40.85714, 57.71429, 63.28571, 70.71429, 79.28571, 84.28571, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+550,COM1.USACE-New-Orleans.contents.usace_new_orleans_department_store.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.3, 48.2, 54.1, 54.3, 54.8, 54.8, 54.8, 54.8, 54.8, 98.9, 99.9, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+551,COM1.USACE-New-Orleans.contents.usace_new_orleans_department_store.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 99.5, 99.8, 99.9, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+552,COM1.USACE-New-Orleans.contents.usace_new_orleans_large_grocery.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 73.6, 81.4, 84.8, 87.6, 96.3, 96.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+553,COM1.USACE-New-Orleans.contents.usace_new_orleans_large_grocery.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 97.5, 99.1, 99.4, 99.7, 99.7, 99.7, 99.7, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+554,COM1.USACE-Sacramento.contents.usace_sacramento_food_stores.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 29.0, 29.0, 96.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+555,COM1.USACE-Sacramento.contents.usace_sacramento_furniture_retail.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 80.0, 80.0, 96.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+556,COM1.USACE-Sacramento.contents.usace_sacramento_grocery_store.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 32.0, 32.0, 89.0, 89.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+557,COM1.USACE-Sacramento.contents.usace_sacramento_retail.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 80.0, 80.0, 96.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+558,COM1.USACE-Sacramento.contents.usace_sacramento_shopping_centers.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 33.0, 72.0, 72.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+559,COM1.USACE-Sacramento.contents.usace_sacramento_food_stores.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 25.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+560,COM1.USACE-Sacramento.contents.usace_sacramento_furniture_retail.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 42.0, 42.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+561,COM1.USACE-Sacramento.contents.usace_sacramento_grocery_store.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 27.0, 27.0, 49.0, 49.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+562,COM1.USACE-Sacramento.contents.usace_sacramento_retail.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 19.0, 19.0, 36.0, 36.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+563,COM1.USACE-Sacramento.contents.usace_sacramento_shopping_centers.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 28.0, 28.0, 40.0, 40.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+564,COM1.USACE-Sacramento.contents.usace_sacramento_food_stores.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 78.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+565,COM1.USACE-Sacramento.contents.usace_sacramento_furniture_retail.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.0, 98.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+566,COM1.USACE-Sacramento.contents.usace_sacramento_grocery_store.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 87.0, 87.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+567,COM1.USACE-Sacramento.contents.usace_sacramento_retail.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 80.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+568,COM1.USACE-Sacramento.contents.usace_sacramento_shopping_centers.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 96.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+569,COM1.USACE-Sacramento.contents.usace_sacramento_food_stores.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 38.0, 38.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+570,COM1.USACE-Sacramento.contents.usace_sacramento_furniture_retail.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 47.0, 47.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+571,COM1.USACE-Sacramento.contents.usace_sacramento_grocery_store.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 42.0, 42.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+572,COM1.USACE-Sacramento.contents.usace_sacramento_retail.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 38.0, 38.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+573,COM1.USACE-Sacramento.contents.usace_sacramento_shopping_centers.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 46.0, 46.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+574,COM2.FEMA-BCA-Toolkit.contents.bca_toolkit_warehouse.refrig_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.71429, 48.0, 59.14286, 65.71429, 74.28571, 79.71429, 84.0, 89.85714, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+575,COM2.FEMA-BCA-Toolkit.contents.bca_toolkit_warehouse.non_refrig_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.71429, 33.71429, 47.42857, 56.85714, 65.57143, 73.57143, 81.28571, 88.42857, 91.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+576,COM2.FEMA-BCA-Toolkit.contents.bca_toolkit_warehouse.refrig_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.71429, 48.0, 59.14286, 65.71429, 74.28571, 79.71429, 84.0, 89.85714, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+577,COM2.FEMA-BCA-Toolkit.contents.bca_toolkit_warehouse.non_refrig_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.71429, 33.71429, 47.42857, 56.85714, 65.57143, 73.57143, 81.28571, 88.42857, 91.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+578,COM2.FEMA-BCA-Toolkit.contents.bca_toolkit_warehouse.refrig_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.71429, 48.0, 59.14286, 65.71429, 74.28571, 79.71429, 84.0, 89.85714, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+579,COM2.FEMA-BCA-Toolkit.contents.bca_toolkit_warehouse.non_refrig_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.71429, 33.71429, 47.42857, 56.85714, 65.57143, 73.57143, 81.28571, 88.42857, 91.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+580,COM2.FEMA-BCA-Toolkit.contents.bca_toolkit_warehouse.refrig_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.71429, 48.0, 59.14286, 65.71429, 74.28571, 79.71429, 84.0, 89.85714, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+581,COM2.FEMA-BCA-Toolkit.contents.bca_toolkit_warehouse.non_refrig_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.71429, 33.71429, 47.42857, 56.85714, 65.57143, 73.57143, 81.28571, 88.42857, 91.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+582,COM2.USACE-New-Orleans.contents.usace_new_orleans_warehouse.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 19.2, 30.7, 39.7, 44.5, 48.8, 54.1, 58.3, 62.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+583,COM2.USACE-New-Orleans.contents.usace_new_orleans_warehouse.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 53.0, 69.9, 79.9, 96.3, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+584,COM2.FEMA-BCA-Toolkit.contents.warehouse.non_refrig_default.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.71429, 33.71429, 47.42857, 56.85714, 65.57143, 73.57143, 81.28571, 88.42857, 91.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+585,COM2.FEMA-BCA-Toolkit.contents.warehouse.non_refrig_default.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.71429, 33.71429, 47.42857, 56.85714, 65.57143, 73.57143, 81.28571, 88.42857, 91.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+586,COM2.FEMA-BCA-Toolkit.contents.warehouse.refrig_default.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 29.71429, 48.0, 59.14286, 65.71429, 74.28571, 79.71429, 84.0, 89.85714, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+587,COM2.FEMA-BCA-Toolkit.contents.warehouse.refrig_default.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 29.71429, 48.0, 59.14286, 65.71429, 74.28571, 79.71429, 84.0, 89.85714, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+588,COM2.USACE-Sacramento.contents.usace_sacramento_warehouse.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 23.0, 69.0, 69.0, 96.0, 96.0, 96.0, 96.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+589,COM2.USACE-Sacramento.contents.usace_sacramento_warehouse.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 20.0, 38.0, 38.0, 48.0, 48.0, 48.0, 48.0, 48.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+590,COM2.USACE-Sacramento.contents.usace_sacramento_warehouse.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 84.0, 84.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+591,COM2.USACE-Sacramento.contents.usace_sacramento_warehouse.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 40.0, 40.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+592,COM3.FEMA-BCA-Toolkit.contents.protective_services.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 14.28571, 24.71429, 36.71429, 46.57143, 55.28571, 62.85714, 74.42857, 82.71429, 84.42857, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+593,COM3.FEMA-BCA-Toolkit.contents.protective_services.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 14.28571, 24.71429, 36.71429, 46.57143, 55.28571, 62.85714, 74.42857, 82.71429, 84.42857, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+594,COM3.USACE-Sacramento.contents.usace_sacramento_service_auto.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 9.0, 10.0, 23.0, 23.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+595,COM3.USACE-Sacramento.contents.usace_sacramento_service_auto.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 8.0, 8.0, 19.0, 19.0, 37.0, 37.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+596,COM3.USACE-Sacramento.contents.usace_sacramento_service_auto.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 10.0, 10.0, 74.0, 74.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+597,COM3.USACE-Sacramento.contents.usace_sacramento_service_auto.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 5.0, 5.0, 35.0, 35.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+598,COM4.FEMA-BCA-Toolkit.contents.bca_toolkit_protective_services_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.28571, 24.71429, 36.71429, 46.57143, 55.28571, 62.85714, 74.42857, 82.71429, 84.42857, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+599,COM4.FEMA-BCA-Toolkit.contents.bca_toolkit_office_one_story_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.71429, 0.71429, 1.0, 20.0, 34.28571, 45.42857, 55.0, 63.85714, 73.28571, 76.42857, 83.42857, 89.28571, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+600,COM4.FEMA-BCA-Toolkit.contents.bca_toolkit_protective_services_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.28571, 24.71429, 36.71429, 46.57143, 55.28571, 62.85714, 74.42857, 82.71429, 84.42857, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+601,COM4.FEMA-BCA-Toolkit.contents.bca_toolkit_office_one_story_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.71429, 0.71429, 1.0, 20.0, 34.28571, 45.42857, 55.0, 63.85714, 73.28571, 76.42857, 83.42857, 89.28571, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+602,COM4.FEMA-BCA-Toolkit.contents.bca_toolkit_protective_services_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.28571, 24.71429, 36.71429, 46.57143, 55.28571, 62.85714, 74.42857, 82.71429, 84.42857, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+603,COM4.FEMA-BCA-Toolkit.contents.bca_toolkit_office_one_story_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.71429, 0.71429, 1.0, 20.0, 34.28571, 45.42857, 55.0, 63.85714, 73.28571, 76.42857, 83.42857, 89.28571, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+604,COM4.FEMA-BCA-Toolkit.contents.bca_toolkit_protective_services_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.28571, 24.71429, 36.71429, 46.57143, 55.28571, 62.85714, 74.42857, 82.71429, 84.42857, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+605,COM4.FEMA-BCA-Toolkit.contents.bca_toolkit_office_one_story_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.71429, 0.71429, 1.0, 20.0, 34.28571, 45.42857, 55.0, 63.85714, 73.28571, 76.42857, 83.42857, 89.28571, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+606,COM4.FEMA-BCA-Toolkit.contents.office_one_story.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.71429, 0.71429, 1.0, 20.0, 34.28571, 45.42857, 55.0, 63.85714, 73.28571, 76.42857, 83.42857, 89.28571, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+607,COM4.FEMA-BCA-Toolkit.contents.office_one_story.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.71429, 0.71429, 1.0, 20.0, 34.28571, 45.42857, 55.0, 63.85714, 73.28571, 76.42857, 83.42857, 89.28571, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+608,COM4.USACE-New-Orleans.contents.usace_new_orleans_utility_company.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.1, 17.5, 49.8, 50.1, 51.8, 57.6, 57.6, 57.6, 57.6, 57.8, 59.4, 69.2, 69.6, 69.7, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+609,COM4.USACE-New-Orleans.contents.usace_new_orleans_utility_company.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+610,COM4.USACE-Sacramento.contents.usace_sacramento_office.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 35.0, 90.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+611,COM4.USACE-Sacramento.contents.usace_sacramento_office.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 29.0, 29.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+612,COM4.USACE-Sacramento.contents.usace_sacramento_office.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 97.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+613,COM4.USACE-Sacramento.contents.usace_sacramento_office.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 46.0, 46.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+614,COM6.FEMA-BCA-Toolkit.contents.bca_toolkit_hospital_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.625, 27.0, 37.0, 53.375, 70.0, 79.125, 85.625, 92.5, 95.625, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+615,COM6.FEMA-BCA-Toolkit.contents.bca_toolkit_hospital_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.625, 27.0, 37.0, 53.375, 70.0, 79.125, 85.625, 92.5, 95.625, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+616,COM6.FEMA-BCA-Toolkit.contents.bca_toolkit_hospital_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.625, 27.0, 37.0, 53.375, 70.0, 79.125, 85.625, 92.5, 95.625, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+617,COM6.FEMA-BCA-Toolkit.contents.bca_toolkit_hospital_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.625, 27.0, 37.0, 53.375, 70.0, 79.125, 85.625, 92.5, 95.625, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+618,COM6.FEMA-BCA-Toolkit.contents.hospital.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 14.625, 27.0, 37.0, 53.375, 70.0, 79.125, 85.625, 92.5, 95.625, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+619,COM6.FEMA-BCA-Toolkit.contents.hospital.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 14.625, 27.0, 37.0, 53.375, 70.0, 79.125, 85.625, 92.5, 95.625, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+620,COM6.USACE-New-Orleans.contents.usace_new_orleans_medical_office.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.8, 45.7, 94.1, 96.9, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+621,COM6.USACE-New-Orleans.contents.usace_new_orleans_medical_office.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.5, 98.5, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+622,COM6.USACE-Sacramento.contents.usace_sacramento_medical.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 33.0, 89.0, 89.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+623,COM6.USACE-Sacramento.contents.usace_sacramento_medical.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 28.0, 28.0, 49.0, 49.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+624,COM6.USACE-Sacramento.contents.usace_sacramento_medical.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 75.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+625,COM6.USACE-Sacramento.contents.usace_sacramento_medical.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 36.0, 36.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+626,COM7.FEMA-BCA-Toolkit.contents.bca_toolkit_medical_office_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.25, 26.875, 40.375, 57.125, 67.25, 75.375, 82.25, 91.25, 96.25, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+627,COM7.FEMA-BCA-Toolkit.contents.bca_toolkit_medical_office_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.25, 26.875, 40.375, 57.125, 67.25, 75.375, 82.25, 91.25, 96.25, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+628,COM7.FEMA-BCA-Toolkit.contents.bca_toolkit_medical_office_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.25, 26.875, 40.375, 57.125, 67.25, 75.375, 82.25, 91.25, 96.25, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+629,COM7.FEMA-BCA-Toolkit.contents.bca_toolkit_medical_office_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.25, 26.875, 40.375, 57.125, 67.25, 75.375, 82.25, 91.25, 96.25, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+630,COM7.FEMA-BCA-Toolkit.contents.medical_office.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 14.25, 26.875, 40.375, 57.125, 67.25, 75.375, 82.25, 91.25, 96.25, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+631,COM7.FEMA-BCA-Toolkit.contents.medical_office.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 14.25, 26.875, 40.375, 57.125, 67.25, 75.375, 82.25, 91.25, 96.25, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+632,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_fast_food_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.28571, 38.57143, 52.71429, 62.57143, 73.0, 79.28571, 88.28571, 94.85714, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+633,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_non_fast_food_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 27.71429, 48.85714, 57.28571, 71.85714, 79.71429, 84.85714, 92.85714, 93.42857, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+634,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_recreation_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 25.71429, 43.71429, 62.71429, 72.85714, 80.0, 84.0, 91.14286, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+635,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_fast_food_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.28571, 38.57143, 52.71429, 62.57143, 73.0, 79.28571, 88.28571, 94.85714, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+636,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_non_fast_food_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 27.71429, 48.85714, 57.28571, 71.85714, 79.71429, 84.85714, 92.85714, 93.42857, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+637,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_recreation_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 25.71429, 43.71429, 62.71429, 72.85714, 80.0, 84.0, 91.14286, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+638,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_fast_food_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.28571, 38.57143, 52.71429, 62.57143, 73.0, 79.28571, 88.28571, 94.85714, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+639,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_non_fast_food_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 27.71429, 48.85714, 57.28571, 71.85714, 79.71429, 84.85714, 92.85714, 93.42857, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+640,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_recreation_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 25.71429, 43.71429, 62.71429, 72.85714, 80.0, 84.0, 91.14286, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+641,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_fast_food_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.28571, 38.57143, 52.71429, 62.57143, 73.0, 79.28571, 88.28571, 94.85714, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+642,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_non_fast_food_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 27.71429, 48.85714, 57.28571, 71.85714, 79.71429, 84.85714, 92.85714, 93.42857, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+643,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_recreation_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 25.71429, 43.71429, 62.71429, 72.85714, 80.0, 84.0, 91.14286, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+644,COM8.FEMA-BCA-Toolkit.contents.fast_food.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 21.28571, 38.57143, 52.71429, 62.57143, 73.0, 79.28571, 88.28571, 94.85714, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+645,COM8.FEMA-BCA-Toolkit.contents.fast_food.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 21.28571, 38.57143, 52.71429, 62.57143, 73.0, 79.28571, 88.28571, 94.85714, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+646,COM8.FEMA-BCA-Toolkit.contents.non_fast_food.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 27.71429, 48.85714, 57.28571, 71.85714, 79.71429, 84.85714, 92.85714, 93.42857, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+647,COM8.FEMA-BCA-Toolkit.contents.non_fast_food.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 27.71429, 48.85714, 57.28571, 71.85714, 79.71429, 84.85714, 92.85714, 93.42857, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+648,COM8.FEMA-BCA-Toolkit.contents.recreation.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.71429, 43.71429, 62.71429, 72.85714, 80.0, 84.0, 91.14286, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+649,COM8.FEMA-BCA-Toolkit.contents.recreation.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.71429, 43.71429, 62.71429, 72.85714, 80.0, 84.0, 91.14286, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+650,COM8.USACE-Sacramento.contents.usace_sacramento_restaurant.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 30.0, 96.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+651,COM8.USACE-Sacramento.contents.usace_sacramento_restaurant_fast_food.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 23.0, 90.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+652,COM8.USACE-Sacramento.contents.usace_sacramento_recreation.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 38.0, 38.0, 95.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+653,COM8.USACE-Sacramento.contents.usace_sacramento_restaurant.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 25.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+654,COM8.USACE-Sacramento.contents.usace_sacramento_restaurant_fast_food.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 20.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+655,COM8.USACE-Sacramento.contents.usace_sacramento_recreation.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 32.0, 32.0, 49.0, 49.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+656,COM8.USACE-Sacramento.contents.usace_sacramento_restaurant.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 91.0, 91.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+657,COM8.USACE-Sacramento.contents.usace_sacramento_restaurant_fast_food.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 88.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+658,COM8.USACE-Sacramento.contents.usace_sacramento_recreation.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.0, 98.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+659,COM8.USACE-Sacramento.contents.usace_sacramento_restaurant.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 44.0, 44.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+660,COM8.USACE-Sacramento.contents.usace_sacramento_restaurant_fast_food.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 42.0, 42.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+661,COM8.USACE-Sacramento.contents.usace_sacramento_recreation.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 47.0, 47.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+662,EDU1.FEMA-BCA-Toolkit.contents.bca_toolkit_schools_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+663,EDU1.FEMA-BCA-Toolkit.contents.bca_toolkit_schools_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+664,EDU1.FEMA-BCA-Toolkit.contents.bca_toolkit_schools_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+665,EDU1.FEMA-BCA-Toolkit.contents.bca_toolkit_schools_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+666,EDU1.FEMA-BCA-Toolkit.contents.schools.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+667,EDU1.FEMA-BCA-Toolkit.contents.schools.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+668,EDU1.USACE-New-Orleans.contents.usace_new_orleans_elementary_school.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.1, 17.5, 49.8, 50.1, 51.8, 57.6, 57.6, 57.6, 57.6, 57.8, 59.4, 69.2, 69.6, 69.7, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+669,EDU1.USACE-New-Orleans.contents.usace_new_orleans_elementary_school.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+670,EDU1.USACE-Sacramento.contents.usace_sacramento_schools.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 22.0, 22.0, 67.0, 67.0, 88.0, 88.0, 88.0, 88.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+671,EDU1.USACE-Sacramento.contents.usace_sacramento_schools.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 18.0, 18.0, 37.0, 37.0, 44.0, 44.0, 44.0, 44.0, 44.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+672,EDU1.USACE-Sacramento.contents.usace_sacramento_schools.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 88.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+673,EDU1.USACE-Sacramento.contents.usace_sacramento_schools.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 42.0, 42.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+674,EDU2.USACE-New-Orleans.contents.usace_new_orleans_college.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.1, 17.5, 49.8, 50.1, 51.8, 57.6, 57.6, 57.6, 57.6, 57.8, 59.4, 69.2, 69.6, 69.7, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+675,EDU2.USACE-New-Orleans.contents.usace_new_orleans_college.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+676,GOV1.USACE-New-Orleans.contents.usace_new_orleans_government_facility.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.1, 17.5, 49.8, 50.1, 51.8, 57.6, 57.6, 57.6, 57.6, 57.8, 59.4, 69.2, 69.6, 69.7, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+677,GOV1.USACE-New-Orleans.contents.usace_new_orleans_government_facility.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+678,GOV1.USACE-Sacramento.contents.usace_sacramento_government.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 35.0, 90.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+679,GOV1.USACE-Sacramento.contents.usace_sacramento_government.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 30.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+680,GOV1.USACE-Sacramento.contents.usace_sacramento_government.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 97.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+681,GOV1.USACE-Sacramento.contents.usace_sacramento_government.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 45.0, 45.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+682,IND1.USACE-Sacramento.contents.usace_sacramento_heavy.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 16.0, 16.0, 56.0, 56.0, 92.0, 92.0, 92.0, 92.0, 92.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+683,IND1.USACE-Sacramento.contents.usace_sacramento_heavy.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 14.0, 14.0, 31.0, 31.0, 46.0, 46.0, 46.0, 46.0, 46.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+684,IND1.USACE-Sacramento.contents.usace_sacramento_heavy.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 33.0, 77.0, 77.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+685,IND1.USACE-Sacramento.contents.usace_sacramento_heavy.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 40.0, 40.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+686,IND2.FEMA-BCA-Toolkit.contents.bca_toolkit_industrial_light_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 19.28571, 31.0, 42.28571, 52.28571, 60.71429, 72.0, 82.14286, 90.71429, 94.28571, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+687,IND2.FEMA-BCA-Toolkit.contents.bca_toolkit_industrial_light_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 19.28571, 31.0, 42.28571, 52.28571, 60.71429, 72.0, 82.14286, 90.71429, 94.28571, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+688,IND2.FEMA-BCA-Toolkit.contents.bca_toolkit_industrial_light_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 19.28571, 31.0, 42.28571, 52.28571, 60.71429, 72.0, 82.14286, 90.71429, 94.28571, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+689,IND2.FEMA-BCA-Toolkit.contents.bca_toolkit_industrial_light_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 19.28571, 31.0, 42.28571, 52.28571, 60.71429, 72.0, 82.14286, 90.71429, 94.28571, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+690,IND2.FEMA-BCA-Toolkit.contents.industrial_light.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 19.28571, 31.0, 42.28571, 52.28571, 60.71429, 72.0, 82.14286, 90.71429, 94.28571, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+691,IND2.FEMA-BCA-Toolkit.contents.industrial_light.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 19.28571, 31.0, 42.28571, 52.28571, 60.71429, 72.0, 82.14286, 90.71429, 94.28571, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+692,IND2.USACE-Sacramento.contents.usace_sacramento_light.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 35.0, 75.0, 75.0, 96.0, 96.0, 96.0, 96.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+693,IND2.USACE-Sacramento.contents.usace_sacramento_light.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 30.0, 41.0, 41.0, 48.0, 48.0, 48.0, 48.0, 48.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+694,IND2.USACE-Sacramento.contents.usace_sacramento_light.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 88.0, 88.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+695,IND2.USACE-Sacramento.contents.usace_sacramento_light.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 42.0, 42.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+696,REL1.FEMA-BCA-Toolkit.contents.bca_toolkit_religious_facilities_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.28571, 48.42857, 60.0, 69.28571, 76.42857, 81.42857, 88.42857, 94.28571, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+697,REL1.FEMA-BCA-Toolkit.contents.bca_toolkit_religious_facilities_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.28571, 48.42857, 60.0, 69.28571, 76.42857, 81.42857, 88.42857, 94.28571, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+698,REL1.FEMA-BCA-Toolkit.contents.bca_toolkit_religious_facilities_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.28571, 48.42857, 60.0, 69.28571, 76.42857, 81.42857, 88.42857, 94.28571, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+699,REL1.FEMA-BCA-Toolkit.contents.bca_toolkit_religious_facilities_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.28571, 48.42857, 60.0, 69.28571, 76.42857, 81.42857, 88.42857, 94.28571, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+700,REL1.FEMA-BCA-Toolkit.contents.religious_facilities.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 29.28571, 48.42857, 60.0, 69.28571, 76.42857, 81.42857, 88.42857, 94.28571, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+701,REL1.FEMA-BCA-Toolkit.contents.religious_facilities.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 29.28571, 48.42857, 60.0, 69.28571, 76.42857, 81.42857, 88.42857, 94.28571, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+702,REL1.USACE-Sacramento.contents.usace_sacramento_churches.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 33.0, 85.0, 85.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+703,REL1.USACE-Sacramento.contents.usace_sacramento_churches.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 28.0, 28.0, 47.0, 47.0, 49.0, 49.0, 49.0, 49.0, 49.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+704,REL1.USACE-Sacramento.contents.usace_sacramento_churches.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 73.0, 73.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+705,REL1.USACE-Sacramento.contents.usace_sacramento_churches.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 35.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+706,RES1.USACE-NACCS.contents.naccs_5a_single_story_residence_no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 40.0, 60.0, 80.0, 85.0, 90.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+707,RES1.USACE-NACCS.contents.naccs_5a_single_story_residence_no_basement_wave_crawlspace-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 30.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+708,RES1.USACE-NACCS.contents.naccs_5b_two_story_residence_no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 5.0, 25.0, 35.0, 45.0, 50.0, 55.0, 62.5, 70.0, 73.33333, 76.66667, 80.0, 83.33333, 86.66667, 90.0, 93.33333, 96.66667, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+709,RES1.USACE-NACCS.contents.naccs_5b_two_story_residence_no_basement_wave_crawlspace-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 5.0, 20.0, 35.0, 45.0, 94.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+710,RES1.USACE-NACCS.contents.naccs_6a_single_story_residence_with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 10.0, 15.0, 15.0, 45.0, 64.0, 80.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+711,RES1.USACE-NACCS.contents.naccs_6b_two_story_residence_with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 10.0, 15.0, 20.0, 35.0, 40.0, 50.0, 55.0, 60.0, 65.0, 70.0, 76.66667, 83.33333, 90.0, 96.66667, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+712,RES1.USACE-NACCS.contents.naccs_7a_building_on_open_pile_foundation-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 1.0, 1.0, 10.0, 40.0, 50.0, 80.0, 89.0, 98.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+713,RES1.USACE-NACCS.contents.naccs_7a_building_on_open_pile_foundation_waves-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 12.5, 20.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+714,RES1.USACE-NACCS.contents.naccs_7b_building_on_pile_foundation_with_enclosure-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 9.0, 11.0, 20.0, 40.0, 75.0, 85.0, 92.5, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+715,RES1.USACE-NACCS.contents.naccs_7b_building_on_pile_foundation_with_enclosure_waves-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 25.0, 40.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+716,RES1.USACE-Generic.contents.single_family_residential.1_story_with_basements-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 10.5, 13.2, 16.0, 18.9, 21.8, 24.7, 27.4, 30.0, 32.4, 34.5, 36.3, 37.7, 38.6, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+717,RES1.USACE-Generic.contents.single_family_residential.2_or_more_stories_with_basements-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 8.4, 10.1, 11.9, 13.8, 15.7, 17.7, 19.8, 22.0, 24.3, 26.7, 29.1, 31.7, 34.4, 37.2, 40.0, 43.0, 46.1, 49.3, 52.6, 52.6, 52.6, 52.6, 52.6, 52.6, 52.6, 52.6, 52.6|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+718,RES1.USACE-Generic.contents.single_family_residential.split_level_with_basements-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 7.3, 9.4, 11.6, 13.8, 16.1, 18.2, 20.2, 22.1, 23.6, 24.9, 25.8, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+719,RES1.USACE-Generic.contents.single_family_residential.1_story_with_no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 2.4, 8.1, 13.3, 17.9, 22.0, 25.7, 28.8, 31.5, 33.8, 35.7, 37.2, 38.4, 39.2, 39.7, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+720,RES1.USACE-Generic.contents.single_family_residential.2_or_more_stories_with_no_basements-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 1.0, 5.0, 8.7, 12.2, 15.5, 18.5, 21.3, 23.9, 26.3, 28.4, 30.3, 32.0, 33.4, 34.7, 35.6, 36.4, 36.9, 37.2, 37.2, 37.2, 37.2, 37.2, 37.2, 37.2, 37.2, 37.2|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+721,RES1.USACE-Generic.contents.single_family_residential.split_level_with_no_basements-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 2.2, 2.9, 4.7, 7.5, 11.1, 15.3, 20.1, 25.2, 30.5, 35.7, 40.9, 45.8, 50.2, 54.1, 57.2, 59.4, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+722,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_slab_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 30.0, 45.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+723,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_wall_2_feet_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+724,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_wall_3_feet_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+725,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_open_+2_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 45.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+726,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_open_+4_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+727,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_open_+6_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+728,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_open_+8_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+729,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_open_+10_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 25.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+730,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_open_+12_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 10.0, 65.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+731,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_obstr_+2_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 3.0, 48.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+732,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_obstr_+4_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 3.0, 3.0, 38.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+733,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_obstr_+6_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 2.0, 3.0, 3.0, 3.0, 33.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+734,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_obstr_+8_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 3.0, 3.0, 3.0, 28.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+735,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_obstr_+10_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 3.0, 3.0, 28.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+736,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_obstr_+12_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 3.0, 13.0, 68.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+737,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_1_story_without_basement_outside_caz_usace_generic_riverine-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.4, 8.1, 13.3, 17.9, 22.0, 25.7, 28.8, 31.5, 33.8, 35.7, 37.2, 38.4, 39.2, 39.7, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+738,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_2_story_without_basement_outside_caz_usace_generic_riverine-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 5.0, 8.7, 12.2, 15.5, 18.5, 21.3, 23.9, 26.3, 28.4, 30.3, 32.0, 33.4, 34.7, 35.6, 36.4, 36.9, 37.2, 37.2, 37.2, 37.2, 37.2|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+739,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_split_level_without_basement_outside_caz_usace_generic_riverine-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2, 2.9, 4.7, 7.5, 11.1, 15.3, 20.1, 25.2, 30.5, 35.7, 40.9, 45.8, 50.2, 54.1, 57.2, 59.4, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+740,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_1_story_with_basement_outside_caz_usace_generic_riverine-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.5, 13.2, 16.0, 18.9, 21.8, 24.7, 27.4, 30.0, 32.4, 34.5, 36.3, 37.7, 38.6, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+741,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_2_story_with_basement_outside_caz_usace_generic_riverine-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 8.4, 10.1, 11.9, 13.8, 15.7, 17.7, 19.8, 22.0, 24.3, 26.7, 29.1, 31.7, 34.4, 37.2, 40.0, 43.0, 46.1, 49.3, 52.6, 52.6, 52.6, 52.6, 52.6|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+742,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_split_level_with_basement_outside_caz_usace_generic_riverine-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 7.3, 9.4, 11.6, 13.8, 16.1, 18.2, 20.2, 22.1, 23.6, 24.9, 25.8, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+743,RES1.FEMA-FIMA.contents.fema_fia_split_level_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.5, 13.5, 19.5, 37.5, 40.5, 42.0, 49.5, 51.0, 61.5, 64.5, 67.5, 69.0, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+744,RES1.FEMA-FIMA.contents.fema_fia_two_or_more_stories_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 12.0, 16.5, 22.5, 30.0, 34.5, 42.0, 49.5, 57.0, 66.0, 73.5, 76.5, 79.5, 82.5, 85.5, 88.5, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+745,RES1.FEMA-FIMA.contents.fema_fia_split_level_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.5, 7.5, 9.0, 24.0, 28.5, 33.0, 40.5, 48.0, 52.5, 54.0, 66.0, 72.0, 75.0, 78.0, 81.0, 84.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+746,RES1.FEMA-FIMA.contents.fema_fia_one_story_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 12.0, 16.5, 22.5, 30.0, 34.5, 42.0, 49.5, 57.0, 66.0, 73.5, 76.5, 79.5, 82.5, 85.5, 88.5, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+747,RES1.FEMA-FIMA.contents.fema_fia_coastal_building_zone_v-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 15.0, 23.0, 35.0, 50.0, 58.0, 63.0, 66.5, 69.5, 72.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 87.0, 87.0, 87.0, 87.0, 87.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+748,RES1.FEMA-FIMA.contents.fema_fia_two_or_more_stories_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 7.5, 13.5, 19.5, 27.0, 30.0, 33.0, 36.0, 39.0, 43.5, 49.5, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+749,RES1.FEMA-FIMA.contents.fema_fia_one_story_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 13.5, 21.0, 33.0, 40.5, 43.5, 45.0, 60.0, 64.5, 66.0, 67.5, 69.0, 70.5, 72.0, 73.5, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+750,RES1.FEMA-FIMA.contents.fema_fia_coastal_building_zone_v-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 11.0, 24.0, 29.0, 37.0, 54.0, 60.5, 64.5, 68.0, 70.0, 72.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 87.0, 87.0, 87.0, 87.0, 87.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+751,RES1.FEMA-FIMA.contents.fema_fia.split_level.with_basement_split_level-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.5, 7.5, 9.0, 24.0, 28.5, 33.0, 40.5, 48.0, 52.5, 54.0, 66.0, 72.0, 75.0, 78.0, 81.0, 84.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+752,RES1.FEMA-FIMA.contents.fema_fia_default_one_story_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 12.0, 16.5, 22.5, 30.0, 34.5, 42.0, 49.5, 57.0, 66.0, 73.5, 76.5, 79.5, 82.5, 85.5, 88.5, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+753,RES1.FEMA-FIMA.contents.fema_fia.1_story.no_basement_one_story-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 13.5, 21.0, 33.0, 40.5, 43.5, 45.0, 60.0, 64.5, 66.0, 67.5, 69.0, 70.5, 72.0, 73.5, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+754,RES1.FEMA-FIMA.contents.fema_fia.2_story.no_basement_two_or_more_stories-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 7.5, 13.5, 19.5, 27.0, 30.0, 33.0, 36.0, 39.0, 43.5, 49.5, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+755,RES1.FEMA-FIMA.contents.fema_fia_default_one_story_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 13.5, 21.0, 33.0, 40.5, 43.5, 45.0, 60.0, 64.5, 66.0, 67.5, 69.0, 70.5, 72.0, 73.5, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+756,RES1.FEMA-FIMA.contents.fema_fia_default_coastal_building_zone_v-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 11.0, 24.0, 29.0, 37.0, 54.0, 60.5, 64.5, 68.0, 70.0, 72.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 87.0, 87.0, 87.0, 87.0, 87.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+757,RES1.FEMA-FIMA.contents.fema_fia_default_two_or_more_stories_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 7.5, 13.5, 19.5, 27.0, 30.0, 33.0, 36.0, 39.0, 43.5, 49.5, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+758,RES1.FEMA-FIMA.contents.fema_fia.1_story.with_basement_one_story-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 12.0, 16.5, 22.5, 30.0, 34.5, 42.0, 49.5, 57.0, 66.0, 73.5, 76.5, 79.5, 82.5, 85.5, 88.5, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+759,RES1.FEMA-FIMA.contents.fema_fia_default_split_level_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.5, 13.5, 19.5, 37.5, 40.5, 42.0, 49.5, 51.0, 61.5, 64.5, 67.5, 69.0, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+760,RES1.FEMA-FIMA.contents.fema_fia_default_two_or_more_stories_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 12.0, 16.5, 22.5, 30.0, 34.5, 42.0, 49.5, 57.0, 66.0, 73.5, 76.5, 79.5, 82.5, 85.5, 88.5, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+761,RES1.FEMA-FIMA.contents.fema_fia_default_coastal_building_zone_v-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 15.0, 23.0, 35.0, 50.0, 58.0, 63.0, 66.5, 69.5, 72.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 87.0, 87.0, 87.0, 87.0, 87.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+762,RES1.FEMA-BCA-Toolkit.contents.1_story_single_family_home.basement.fema_fia_original_source-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 6.0, 12.0, 16.5, 22.5, 30.0, 34.5, 42.0, 49.5, 57.0, 66.0, 73.5, 76.5, 79.5, 82.5, 85.5, 88.5, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+763,RES1.FEMA-BCA-Toolkit.contents.1_story_single_family_home.no_basement.fema_fia_original_source-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 13.5, 21.0, 33.0, 40.5, 43.5, 45.0, 60.0, 64.5, 66.0, 67.5, 69.0, 70.5, 72.0, 73.5, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+764,RES1.FEMA-BCA-Toolkit.contents.split_level_single_family_home.basement.fema_fia_original_source-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 4.5, 7.5, 9.0, 24.0, 28.5, 33.0, 40.5, 48.0, 52.5, 54.0, 66.0, 72.0, 75.0, 78.0, 81.0, 84.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+765,RES1.FEMA-BCA-Toolkit.contents.split_level_single_family_home.no_basement.fema_fia_original_source-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 4.5, 13.5, 19.5, 37.5, 40.5, 42.0, 49.5, 51.0, 61.5, 64.5, 67.5, 69.0, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+766,RES1.FEMA-BCA-Toolkit.contents.2_or_more_story_single_family_home.basement.fema_fia_original_source-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 6.0, 12.0, 16.5, 22.5, 30.0, 34.5, 42.0, 49.5, 57.0, 66.0, 73.5, 76.5, 79.5, 82.5, 85.5, 88.5, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+767,RES1.FEMA-BCA-Toolkit.contents.2_or_more_story_single_family_home.no_basement.fema_fia_original_source-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 7.5, 13.5, 19.5, 27.0, 30.0, 33.0, 36.0, 39.0, 43.5, 49.5, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+768,RES1.FEMA-BCA-Toolkit.contents.single_family_home.basement.fema_fia_original_source-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 11.0, 24.0, 29.0, 37.0, 54.0, 60.5, 64.5, 68.0, 70.0, 72.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+769,RES1.FEMA-BCA-Toolkit.contents.single_family_home.no_basement.fema_fia_original_source-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 6.0, 15.0, 23.0, 35.0, 50.0, 58.0, 63.0, 66.5, 69.5, 72.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+770,RES1.USACE-New-Orleans.contents.one_story.usace_new_orleans_one_story.pier_foundation.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 41.8, 62.9, 82.1, 84.6, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+771,RES1.USACE-New-Orleans.contents.one_story.usace_new_orleans_one_story.pier_foundation.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+772,RES1.USACE-New-Orleans.contents.one_story.usace_new_orleans_one_story.slab_foundation.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 41.8, 62.9, 82.1, 84.6, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+773,RES1.USACE-New-Orleans.contents.one_story.usace_new_orleans_one_story.slab_foundation.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+774,RES1.USACE-New-Orleans.contents.two_or_more.usace_new_orleans_two_story.pier_foundation.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 36.5, 50.8, 50.8, 55.3, 55.3, 55.3, 55.3, 55.3, 72.5, 80.7, 87.1, 90.1, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+775,RES1.USACE-New-Orleans.contents.two_or_more.usace_new_orleans_two_story.pier_foundation.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+776,RES1.USACE-New-Orleans.contents.two_or_more.usace_new_orleans_two_story.slab_foundation.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 36.5, 50.8, 50.8, 55.3, 55.3, 55.3, 55.3, 55.3, 72.5, 80.7, 87.1, 90.1, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+777,RES1.USACE-New-Orleans.contents.two_or_more.usace_new_orleans_two_story.slab_foundation.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+778,RES1.USACE-Generic.contents.one_story.usace_generic.with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 10.5, 13.2, 16.0, 18.9, 21.8, 24.7, 27.4, 30.0, 32.4, 34.5, 36.3, 37.7, 38.6, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+779,RES1.USACE-Generic.contents.one_story.usace_generic.with_no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 2.4, 8.1, 13.3, 17.9, 22.0, 25.7, 28.8, 31.5, 33.8, 35.7, 37.2, 38.4, 39.2, 39.7, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+780,RES1.USACE-Generic.contents.split_level.usace_generic.with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 7.3, 9.4, 11.6, 13.8, 16.1, 18.2, 20.2, 22.1, 23.6, 24.9, 25.8, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+781,RES1.USACE-Generic.contents.split_level.usace_generic.with_no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 2.2, 2.9, 4.7, 7.5, 11.1, 15.3, 20.1, 25.2, 30.5, 35.7, 40.9, 45.8, 50.2, 54.1, 57.2, 59.4, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+782,RES1.USACE-Generic.contents.two_or_more_stories.usace_generic.with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 8.4, 10.1, 11.9, 13.8, 15.7, 17.7, 19.8, 22.0, 24.3, 26.7, 29.1, 31.7, 34.4, 37.2, 40.0, 43.0, 46.1, 49.3, 52.6, 52.6, 52.6, 52.6, 52.6, 52.6, 52.6, 52.6, 52.6|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+783,RES1.USACE-Generic.contents.two_or_more_stories.usace_generic.with_no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 1.0, 5.0, 8.7, 12.2, 15.5, 18.5, 21.3, 23.9, 26.3, 28.4, 30.3, 32.0, 33.4, 34.7, 35.6, 36.4, 36.9, 37.2, 37.2, 37.2, 37.2, 37.2, 37.2, 37.2, 37.2, 37.2|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+784,RES2.FEMA-BCA-Toolkit.contents.bca_toolkit_manufactured_home_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+785,RES2.FEMA-BCA-Toolkit.contents.bca_toolkit_mobile_home_outside_caz_fema_fia_riverine_adjusted-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 12.0, 66.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+786,RES2.FEMA-FIMA.contents.fema_fia_mobile_home_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 12.0, 66.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+787,RES2.FEMA-FIMA.contents.fema_fia_mobile_home-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 12.0, 66.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+788,RES2.FEMA-FIMA.contents.fema_fia_default_mobile_home-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 12.0, 66.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+789,RES2.FEMA-FIMA.contents.fema_fia_default_mobile_home_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 12.0, 66.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+790,RES2.FEMA-BCA-Toolkit.contents.mobile_home.no_basement.fema_fia_original_source-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 12.0, 66.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+791,RES3.USACE-NACCS.contents.naccs_1a_1_apartments_1_story_no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.5, 28.0, 45.0, 60.0, 70.5, 81.0, 90.5, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+792,RES3.USACE-NACCS.contents.naccs_1a_3_apartments_3_story_no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 15.0, 20.0, 25.0, 27.5, 30.0, 32.5, 35.0, 38.33333, 41.66667, 45.0, 48.33333, 51.66667, 55.0, 58.33333, 61.66667, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+793,RES3.USACE-NACCS.contents.naccs_4a_urban_high_rise-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.375, 0.5, 4.0, 5.0, 7.0, 7.5, 8.75, 10.0, 10.5, 11.0, 11.33333, 11.66667, 12.0, 12.33333, 12.66667, 13.0, 13.33333, 13.66667, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+794,RES3.USACE-NACCS.contents.naccs_4b_beach_high_rise-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 4.5, 5.5, 6.25, 7.0, 7.75, 8.5, 8.66667, 8.83333, 9.0, 9.16667, 9.33333, 9.5, 9.66667, 9.83333, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+795,RES3.USACE-NACCS.contents.naccs_4b_beach_high_rise_waves-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.5, 21.0, 33.75, 52.5, 67.5, 82.5, 91.25, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+796,RES3.FEMA-BCA-Toolkit.contents.bca_toolkit_apartment_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+797,RES3.FEMA-BCA-Toolkit.contents.bca_toolkit_apartment_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+798,RES3.FEMA-BCA-Toolkit.contents.bca_toolkit_apartment_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+799,RES3.FEMA-BCA-Toolkit.contents.bca_toolkit_apartment_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+800,RES3.FEMA-BCA-Toolkit.contents.apartment.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+801,RES3.FEMA-BCA-Toolkit.contents.apartment.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+802,RES4.FEMA-BCA-Toolkit.contents.bca_toolkit_hotel_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 16.14286, 26.28571, 34.14286, 39.71429, 48.71429, 52.42857, 58.42857, 61.28571, 63.14286, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+803,RES4.FEMA-BCA-Toolkit.contents.bca_toolkit_hotel_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 16.14286, 26.28571, 34.14286, 39.71429, 48.71429, 52.42857, 58.42857, 61.28571, 63.14286, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+804,RES4.FEMA-BCA-Toolkit.contents.bca_toolkit_hotel_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 16.14286, 26.28571, 34.14286, 39.71429, 48.71429, 52.42857, 58.42857, 61.28571, 63.14286, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+805,RES4.FEMA-BCA-Toolkit.contents.bca_toolkit_hotel_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 16.14286, 26.28571, 34.14286, 39.71429, 48.71429, 52.42857, 58.42857, 61.28571, 63.14286, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+806,RES4.FEMA-BCA-Toolkit.contents.hotel.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 16.14286, 26.28571, 34.14286, 39.71429, 48.71429, 52.42857, 58.42857, 61.28571, 63.14286, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+807,RES4.FEMA-BCA-Toolkit.contents.hotel.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 16.14286, 26.28571, 34.14286, 39.71429, 48.71429, 52.42857, 58.42857, 61.28571, 63.14286, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+808,RES4.USACE-Sacramento.contents.usace_sacramento_hotel_full_service.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 23.0, 90.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+809,RES4.USACE-Sacramento.contents.usace_sacramento_hotel_full_service.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 20.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+810,RES4.USACE-Sacramento.contents.usace_sacramento_hotel_full_service.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 88.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+811,RES4.USACE-Sacramento.contents.usace_sacramento_hotel_full_service.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 42.0, 42.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+812,RES5.FEMA-BCA-Toolkit.contents.bca_toolkit_correctional_facility_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 13.125, 21.25, 31.0, 44.125, 53.0, 62.25, 69.5, 77.5, 83.75, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+813,RES5.FEMA-BCA-Toolkit.contents.bca_toolkit_correctional_facility_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 13.125, 21.25, 31.0, 44.125, 53.0, 62.25, 69.5, 77.5, 83.75, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+814,RES5.FEMA-BCA-Toolkit.contents.bca_toolkit_correctional_facility_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 13.125, 21.25, 31.0, 44.125, 53.0, 62.25, 69.5, 77.5, 83.75, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+815,RES5.FEMA-BCA-Toolkit.contents.bca_toolkit_correctional_facility_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 13.125, 21.25, 31.0, 44.125, 53.0, 62.25, 69.5, 77.5, 83.75, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+816,RES5.FEMA-BCA-Toolkit.contents.correctional_facility.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 13.125, 21.25, 31.0, 44.125, 53.0, 62.25, 69.5, 77.5, 83.75, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+817,RES5.FEMA-BCA-Toolkit.contents.correctional_facility.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 13.125, 21.25, 31.0, 44.125, 53.0, 62.25, 69.5, 77.5, 83.75, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+818,RES1.FEMA-PFRA-2021.structural.sfh_1_story_crawlspace_without_openings_freshwater_moderate_velocity_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 5.41, 14.53, 29.50675, 37.45002, 42.55856, 48.09843, 52.6553, 58.05831, 61.17923, 64.72929, 68.04079, 69.90791, 72.11384, 73.95114, 75.85186, 77.67767, 79.25999, 80.59, 80.59, 80.59, 80.59, 80.59, 80.59, 80.59, 80.59, 80.59|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+819,RES1.FEMA-PFRA-2021.structural.sfh_1_story_crawlspace_without_openings_freshwater_high_velocity_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 10.52182, 22.12909, 41.19042, 51.30002, 57.8018, 64.85255, 70.6522, 77.52875, 81.50083, 86.0191, 90.23374, 92.61006, 95.41762, 97.756, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+820,RES1.FEMA-PFRA-2021.structural.sfh_2_story_crawlspace_without_openings_freshwater_inundation_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 5.27955, 11.49773, 21.70915, 27.12501, 30.60811, 34.38529, 37.49225, 41.17612, 43.30402, 45.72452, 47.98236, 49.25539, 50.75944, 52.01214, 53.30808, 54.55296, 55.63181, 56.53864, 56.53864, 56.53864, 56.53864, 56.53864, 56.53864, 56.53864, 56.53864, 56.53864|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+821,RES1.FEMA-PFRA-2021.structural.sfh_2_story_crawlspace_without_openings_freshwater_long_duration_inundation_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 5.3075, 12.1475, 23.38007, 29.33751, 33.16892, 37.32382, 40.74147, 44.79373, 47.13442, 49.79697, 52.2806, 53.68093, 55.33538, 56.71336, 58.13889, 59.50825, 60.69499, 61.6925, 61.6925, 61.6925, 61.6925, 61.6925, 61.6925, 61.6925, 61.6925, 61.6925|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+822,RES1.FEMA-PFRA-2021.structural.sfh_2_story_crawlspace_without_openings_freshwater_moderate_velocity_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 5.33545, 12.79727, 25.05098, 31.55001, 35.72973, 40.26235, 43.9907, 48.41134, 50.96482, 53.86942, 56.57883, 58.10647, 59.91133, 61.41457, 62.9697, 64.46355, 65.75817, 66.84636, 66.84636, 66.84636, 66.84636, 66.84636, 66.84636, 66.84636, 66.84636, 66.84636|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+823,RES1.FEMA-PFRA-2021.structural.sfh_2_story_crawlspace_without_openings_freshwater_high_velocity_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 10.41932, 19.74659, 35.06373, 43.18752, 48.41216, 54.07794, 58.73837, 64.26418, 67.45603, 71.08678, 74.47354, 76.38309, 78.63916, 80.51821, 82.46213, 84.32943, 85.94771, 87.30795, 87.30795, 87.30795, 87.30795, 87.30795, 87.30795, 87.30795, 87.30795, 87.30795|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+824,RES1.FEMA-PFRA-2021.structural.sfh_1_story_slab_on_grade_freshwater_inundation_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 6.16364, 19.77887, 27.00002, 31.64414, 36.68039, 40.823, 45.73482, 48.57202, 51.79936, 54.80981, 56.50719, 58.51258, 60.18286, 61.91078, 63.57061, 65.00908, 66.21818, 66.21818, 66.21818, 66.21818, 66.21818, 66.21818, 66.21818, 66.21818, 66.21818|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+825,RES1.FEMA-PFRA-2021.structural.sfh_1_story_slab_on_grade_freshwater_long_duration_inundation_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 7.03, 22.00675, 29.95002, 35.05856, 40.59843, 45.1553, 50.55831, 53.67923, 57.22929, 60.54079, 62.40791, 64.61384, 66.45114, 68.35186, 70.17767, 71.75999, 73.09, 73.09, 73.09, 73.09, 73.09, 73.09, 73.09, 73.09, 73.09|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+826,RES1.FEMA-PFRA-2021.structural.sfh_1_story_slab_on_grade_freshwater_moderate_velocity_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 7.03, 22.00675, 29.95002, 35.05856, 40.59843, 45.1553, 50.55831, 53.67923, 57.22929, 60.54079, 62.40791, 64.61384, 66.45114, 68.35186, 70.17767, 71.75999, 73.09, 73.09, 73.09, 73.09, 73.09, 73.09, 73.09, 73.09, 73.09|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+827,RES1.FEMA-PFRA-2021.structural.sfh_1_story_slab_on_grade_freshwater_high_velocity_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 9.62909, 28.69042, 38.80002, 45.3018, 52.35255, 58.1522, 65.02875, 69.00083, 73.5191, 77.73374, 80.11006, 82.91762, 85.256, 87.67509, 89.99885, 92.01271, 93.70545, 93.70545, 93.70545, 93.70545, 93.70545, 93.70545, 93.70545, 93.70545, 93.70545|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+828,RES1.FEMA-PFRA-2021.structural.sfh_2_story_slab_on_grade_freshwater_inundation_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.99773, 14.20915, 19.62501, 23.10811, 26.88529, 29.99225, 33.67612, 35.80402, 38.22452, 40.48236, 41.75539, 43.25944, 44.51214, 45.80808, 47.05296, 48.13181, 49.03864, 49.03864, 49.03864, 49.03864, 49.03864, 49.03864, 49.03864, 49.03864, 49.03864|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+829,RES1.FEMA-PFRA-2021.structural.sfh_2_story_slab_on_grade_freshwater_long_duration_inundation_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 4.6475, 15.88007, 21.83751, 25.66892, 29.82382, 33.24147, 37.29373, 39.63442, 42.29697, 44.7806, 46.18093, 47.83538, 49.21336, 50.63889, 52.00825, 53.19499, 54.1925, 54.1925, 54.1925, 54.1925, 54.1925, 54.1925, 54.1925, 54.1925, 54.1925|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+830,RES1.FEMA-PFRA-2021.structural.sfh_2_story_slab_on_grade_freshwater_moderate_velocity_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 5.29727, 17.55098, 24.05001, 28.22973, 32.76235, 36.4907, 40.91134, 43.46482, 46.36942, 49.07883, 50.60647, 52.41133, 53.91457, 55.4697, 56.96355, 58.25817, 59.34636, 59.34636, 59.34636, 59.34636, 59.34636, 59.34636, 59.34636, 59.34636, 59.34636|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+831,RES1.FEMA-PFRA-2021.structural.sfh_2_story_slab_on_grade_freshwater_high_velocity_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 7.24659, 22.56373, 30.68752, 35.91216, 41.57794, 46.23837, 51.76418, 54.95603, 58.58678, 61.97354, 63.88309, 66.13916, 68.01821, 69.96213, 71.82943, 73.44771, 74.80795, 74.80795, 74.80795, 74.80795, 74.80795, 74.80795, 74.80795, 74.80795, 74.80795|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+832,RES1.FEMA-PFRA-2021.structural.sfh_1_story_basement_freshwater_inundation_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 9.66667, 12.03939, 20.3303, 33.94553, 41.16668, 45.81081, 50.84706, 54.98967, 59.90149, 62.73869, 65.96603, 68.97648, 70.67385, 72.67925, 74.34952, 76.07745, 77.73727, 79.17575, 80.38485, 80.38485, 80.38485, 80.38485, 80.38485, 80.38485, 80.38485, 80.38485, 80.38485|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+833,RES1.FEMA-PFRA-2021.structural.sfh_1_story_basement_freshwater_long_duration_inundation_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 10.63333, 13.24333, 22.36333, 37.34009, 45.28335, 50.39189, 55.93177, 60.48863, 65.89164, 69.01256, 72.56263, 75.87413, 77.74124, 79.94718, 81.78448, 83.68519, 85.511, 87.09332, 88.42333, 88.42333, 88.42333, 88.42333, 88.42333, 88.42333, 88.42333, 88.42333, 88.42333|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+834,RES1.FEMA-PFRA-2021.structural.sfh_1_story_basement_freshwater_moderate_velocity_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 10.15, 12.64136, 21.34682, 35.64281, 43.22502, 48.10135, 53.38941, 57.73915, 62.89656, 65.87563, 69.26433, 72.4253, 74.20755, 76.31321, 78.067, 79.88132, 81.62414, 83.13453, 84.40409, 84.40409, 84.40409, 84.40409, 84.40409, 84.40409, 84.40409, 84.40409, 84.40409|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+835,RES1.FEMA-PFRA-2021.structural.sfh_1_story_basement_freshwater_high_velocity_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 13.05, 16.25318, 27.44591, 45.82647, 55.57502, 61.84459, 68.64353, 74.23605, 80.86701, 84.69723, 89.05413, 93.11825, 95.4097, 98.11699, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+836,RES1.FEMA-PFRA-2021.structural.sfh_2_story_basement_freshwater_inundation_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 8.66667, 9.94621, 16.16439, 26.37582, 31.79168, 35.27477, 39.05196, 42.15892, 45.84278, 47.97068, 50.39119, 52.64903, 53.92206, 55.4261, 56.67881, 57.97475, 59.21962, 60.29848, 61.2053, 61.2053, 61.2053, 61.2053, 61.2053, 61.2053, 61.2053, 61.2053, 61.2053|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+837,RES1.FEMA-PFRA-2021.structural.sfh_2_story_basement_freshwater_long_duration_inundation_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 9.53333, 10.94083, 17.78083, 29.0134, 34.97085, 38.80225, 42.95716, 46.37481, 50.42706, 52.76775, 55.4303, 57.91393, 59.31426, 60.96872, 62.34669, 63.77223, 65.14159, 66.32832, 67.32583, 67.32583, 67.32583, 67.32583, 67.32583, 67.32583, 67.32583, 67.32583, 67.32583|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+838,RES1.FEMA-PFRA-2021.structural.sfh_2_story_basement_freshwater_moderate_velocity_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 9.53333, 10.94083, 17.78083, 29.0134, 34.97085, 38.80225, 42.95716, 46.37481, 50.42706, 52.76775, 55.4303, 57.91393, 59.31426, 60.96872, 62.34669, 63.77223, 65.14159, 66.32832, 67.32583, 67.32583, 67.32583, 67.32583, 67.32583, 67.32583, 67.32583, 67.32583, 67.32583|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+839,RES1.FEMA-PFRA-2021.structural.sfh_2_story_basement_freshwater_high_velocity_flooding-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 12.39333, 14.22308, 23.11508, 37.71742, 45.4621, 50.44293, 55.8443, 60.28725, 65.55518, 68.59808, 72.0594, 75.28811, 77.10854, 79.25933, 81.0507, 82.90389, 84.68406, 86.22682, 87.52358, 87.52358, 87.52358, 87.52358, 87.52358, 87.52358, 87.52358, 87.52358, 87.52358|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+840,RES1.USACE-NACCS.structural.naccs_5a_single_story_residence_no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 1.0, 18.0, 28.0, 33.0, 37.5, 42.0, 48.5, 55.0, 58.33333, 61.66667, 65.0, 68.33333, 71.66667, 75.0, 78.33333, 81.66667, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+841,RES1.USACE-NACCS.structural.naccs_5a_single_story_residence_no_basement_wave_crawlspace-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 5.0, 10.0, 40.0, 70.0, 90.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+842,RES1.USACE-NACCS.structural.naccs_5b_two_story_residence_no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 5.0, 15.0, 20.0, 25.0, 27.5, 30.0, 40.0, 50.0, 53.33333, 56.66667, 60.0, 63.33333, 66.66667, 70.0, 73.33333, 76.66667, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+843,RES1.USACE-NACCS.structural.naccs_5b_two_story_residence_no_basement_wave_crawlspace-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 10.0, 20.0, 36.0, 50.0, 86.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+844,RES1.USACE-NACCS.structural.naccs_6a_single_story_residence_with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 7.5, 10.0, 18.0, 30.0, 35.0, 40.0, 55.0, 70.0, 80.0, 90.0, 91.66667, 93.33333, 95.0, 96.66667, 98.33333, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+845,RES1.USACE-NACCS.structural.naccs_6b_two_story_residence_with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 8.5, 10.0, 15.0, 25.0, 30.0, 35.0, 42.5, 50.0, 55.0, 60.0, 63.33333, 66.66667, 70.0, 73.33333, 76.66667, 80.0, 83.33333, 86.66667, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+846,RES1.USACE-NACCS.structural.naccs_7a_building_on_open_pile_foundation-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.75, 1.5, 5.0, 25.0, 40.0, 50.0, 55.0, 60.0, 67.5, 75.0, 76.66667, 78.33333, 80.0, 81.66667, 83.33333, 85.0, 86.66667, 88.33333, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+847,RES1.USACE-NACCS.structural.naccs_7a_building_on_open_pile_foundation_waves-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 7.0, 10.0, 50.0, 70.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+848,RES1.USACE-NACCS.structural.naccs_7b_building_on_pile_foundation_with_enclosure-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 8.0, 12.0, 20.0, 35.0, 40.0, 60.0, 65.0, 70.0, 75.0, 80.0, 86.66667, 93.33333, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+849,RES1.USACE-NACCS.structural.naccs_7b_building_on_pile_foundation_with_enclosure_waves-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 27.0, 40.0, 60.0, 85.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+850,RES1.USACE-Generic.structural.single_family_residential_structures.1_story_with_basements-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"5.2, 9.0, 13.8, 19.4, 25.5, 32.0, 38.7, 45.5, 52.2, 58.6, 64.5, 69.8, 74.2, 77.7, 80.1, 81.1, 81.1, 81.1, 81.1, 81.1, 81.1, 81.1, 81.1, 81.1, 81.1, 81.1, 81.1, 81.1, 81.1|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+851,RES1.USACE-Generic.structural.single_family_residential_structures.2_or_more_stories_with_basements-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"4.7, 7.2, 10.2, 13.9, 17.9, 22.3, 27.0, 31.9, 36.9, 41.9, 46.9, 51.8, 56.4, 60.8, 64.8, 68.4, 71.4, 73.7, 75.4, 76.4, 76.4, 76.4, 76.4, 76.4, 76.4, 76.4, 76.4, 76.4, 76.4|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+852,RES1.USACE-Generic.structural.single_family_residential_structures.split_level_with_basements-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"4.7, 10.4, 14.2, 18.5, 23.2, 28.2, 33.4, 38.6, 43.8, 48.8, 53.5, 57.8, 61.6, 64.8, 67.2, 68.8, 69.3, 69.3, 69.3, 69.3, 69.3, 69.3, 69.3, 69.3, 69.3, 69.3, 69.3, 69.3, 69.3|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+853,RES1.USACE-Generic.structural.single_family_residential_structures.1_story_with_no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 2.5, 13.4, 23.3, 32.1, 40.1, 47.1, 53.2, 58.6, 63.2, 67.2, 70.5, 73.2, 75.4, 77.2, 78.5, 79.5, 80.2, 80.7, 80.7, 80.7, 80.7, 80.7, 80.7, 80.7, 80.7, 80.7|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+854,RES1.USACE-Generic.structural.single_family_residential_structures.2_or_more_stories_with_no_basements-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 3.0, 9.3, 15.2, 20.9, 26.3, 31.4, 36.2, 40.7, 44.9, 48.8, 52.4, 55.7, 58.7, 61.4, 63.8, 65.9, 67.7, 69.2, 69.2, 69.2, 69.2, 69.2, 69.2, 69.2, 69.2, 69.2|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+855,RES1.USACE-Generic.structural.single_family_residential_structures.split_level_with_no_basements-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 6.4, 7.2, 9.4, 12.9, 17.4, 22.8, 28.9, 35.5, 42.3, 49.2, 56.1, 62.6, 68.6, 73.9, 78.4, 81.7, 83.8, 84.4, 84.4, 84.4, 84.4, 84.4, 84.4, 84.4, 84.4, 84.4|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+856,RES2.FEMA-FIMA.structural.fema_fia_mobile_home_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 44.0, 63.0, 73.0, 78.0, 80.0, 81.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+857,RES2.FEMA-FIMA.structural.fema_fia_mobile_home-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 44.0, 63.0, 73.0, 78.0, 80.0, 81.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+858,RES2.FEMA-FIMA.structural.fema_fia_default_mobile_home-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 44.0, 63.0, 73.0, 78.0, 80.0, 81.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+859,RES2.FEMA-FIMA.structural.fema_fia_default_mobile_home_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 44.0, 63.0, 73.0, 78.0, 80.0, 81.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+860,RES2.FEMA-BCA-Toolkit.structural.bca_toolkit_manufactured_home_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 15.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+861,RES2.FEMA-BCA-Toolkit.structural.bca_toolkit_mobile_home_outside_caz_fema_fia_riverine_adjusted-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 44.0, 63.0, 73.0, 78.0, 80.0, 81.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+862,RES2.USACE-Wilmington.structural.mobile_home.usace_wilmington_mobile_home-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 2.0, 3.0, 8.0, 27.2, 41.7, 60.0, 93.0, 96.0, 96.36, 96.72, 97.08, 97.44, 97.8, 98.0, 98.17, 98.33, 98.5, 98.67, 98.83, 98.83, 98.83, 98.83, 98.83, 98.83, 98.83, 98.83, 98.83|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+863,RES3.Coastal-Resilience-Center.structural.one_story_residential_building_on_a_slab_on_grade_foundation-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 24.24578, 39.6524, 46.0891, 51.15489, 56.29364, 61.60844, 67.16664, 72.89188, 78.47971, 83.55899, 87.86561, 91.30374, 93.91655, 95.82575, 97.17853, 98.11447, 98.75031, 99.17637, 99.45901, 99.64519, 99.76726, 99.8471, 99.89927, 99.93339|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+864,RES3.Coastal-Resilience-Center.structural.two_story_residential_building_on_a_slab_on_grade_foundation-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 19.92181, 28.75192, 31.4835, 32.64505, 33.37454, 34.47743, 36.80944, 40.92171, 46.73971, 53.66177, 60.90508, 67.80474, 73.94418, 79.14829, 83.41429, 86.83566, 89.54434, 91.67543, 93.34967, 94.66733, 95.70818, 96.53419, 97.19288, 97.72056|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+865,RES3.FEMA-BCA-Toolkit.structural.apartment.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 11.0, 17.4026, 21.2987, 26.22078, 29.58442, 32.06494, 34.12987, 37.24675, 38.90909, 41.05195, 41.05195, 41.05195, 41.05195, 41.05195, 41.05195, 41.05195, 41.05195, 41.05195, 41.05195, 41.05195, 41.05195, 41.05195, 41.05195, 41.05195|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+866,RES3.FEMA-BCA-Toolkit.structural.apartment.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.47619, 0.47619, 0.95238, 12.52381, 20.38095, 25.90476, 31.66667, 33.52381, 37.47619, 39.42857, 42.19048, 45.14286, 46.61905, 46.61905, 46.61905, 46.61905, 46.61905, 46.61905, 46.61905, 46.61905, 46.61905, 46.61905, 46.61905, 46.61905, 46.61905, 46.61905, 46.61905|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+867,RES3.FEMA-CPFRA-2021.structural.low_rise_apartment_shallow_foundation_freshwater_adjusted-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 9.9, 15.66, 19.17, 23.58, 26.64, 28.89, 30.69, 33.48, 35.01, 36.99, 38.97, 40.95, 42.93, 44.91, 46.89, 48.87, 48.87, 48.87, 48.87, 48.87, 48.87, 48.87, 48.87, 48.87|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+868,RES3.FEMA-CPFRA-2021.structural.low_rise_apartment_shallow_foundation_saltwater_adjusted-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 12.1, 19.14, 23.43, 28.82, 32.56, 35.31, 37.51, 40.92, 42.79, 45.21, 47.63, 50.05, 52.47, 54.89, 57.31, 59.73, 59.73, 59.73, 59.73, 59.73, 59.73, 59.73, 59.73, 59.73|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+869,RES3.FEMA-CPFRA-2021.structural.low_rise_apartment_shallow_foundation_moderate_waves_adjusted-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 14.3, 22.62, 27.69, 34.06, 38.48, 41.73, 44.33, 48.36, 50.57, 53.43, 56.29, 59.15, 62.01, 64.87, 67.73, 70.59, 70.59, 70.59, 70.59, 70.59, 70.59, 70.59, 70.59, 70.59|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+870,RES3.FEMA-CPFRA-2021.structural.low_rise_apartment_shallow_foundation_high_waves_adjusted_source_naccs_1a_1-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.3, 22.0, 33.0, 42.35, 49.775, 57.2, 60.775, 64.35, 67.925, 71.5, 75.075, 78.65, 82.225, 85.8, 89.375, 92.95, 96.525, 96.525, 96.525, 96.525, 96.525, 96.525, 96.525, 96.525, 96.525|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+871,RES3.FEMA-CPFRA-2021.structural.low_rise_apartment_deep_foundation_freshwater_adjusted-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 9.9, 15.66, 19.17, 23.58, 26.64, 28.89, 30.69, 33.48, 35.01, 36.99, 38.97, 40.95, 42.93, 44.91, 46.89, 48.87, 48.87, 48.87, 48.87, 48.87, 48.87, 48.87, 48.87, 48.87|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+872,RES3.FEMA-CPFRA-2021.structural.low_rise_apartment_deep_foundation_saltwater_adjusted-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 12.1, 19.14, 23.43, 28.82, 32.56, 35.31, 37.51, 40.92, 42.79, 45.21, 47.63, 50.05, 52.47, 54.89, 57.31, 59.73, 59.73, 59.73, 59.73, 59.73, 59.73, 59.73, 59.73, 59.73|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+873,RES3.FEMA-CPFRA-2021.structural.low_rise_apartment_deep_foundation_moderate_waves_adjusted-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 14.3, 22.62, 27.69, 34.06, 38.48, 41.73, 44.33, 48.36, 50.57, 53.43, 56.29, 59.15, 62.01, 64.87, 67.73, 70.59, 70.59, 70.59, 70.59, 70.59, 70.59, 70.59, 70.59, 70.59|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+874,RES3.FEMA-CPFRA-2021.structural.low_rise_apartment_deep_foundation_high_waves_adjusted_source_naccs_1a_1-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.85, 19.0, 28.5, 36.575, 42.9875, 49.4, 52.4875, 55.575, 58.6625, 61.75, 64.8375, 67.925, 71.0125, 74.1, 77.1875, 80.275, 83.3625, 83.3625, 83.3625, 83.3625, 83.3625, 83.3625, 83.3625, 83.3625, 83.3625|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+875,RES3.FEMA-CPFRA-2021.structural.low_rise_apartment_basement_foundation_freshwater_adjusted-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"12.0, 12.0, 12.0, 12.0, 12.0, 21.9, 27.66, 31.17, 35.58, 38.64, 40.89, 42.69, 45.48, 47.01, 48.99, 50.97, 52.95, 54.93, 56.91, 58.89, 60.87, 60.87, 60.87, 60.87, 60.87, 60.87, 60.87, 60.87, 60.87|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+876,RES3.FEMA-CPFRA-2021.structural.low_rise_apartment_basement_foundation_saltwater_adjusted-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"12.0, 12.0, 12.0, 12.0, 12.0, 24.1, 31.14, 35.43, 40.82, 44.56, 47.31, 49.51, 52.92, 54.79, 57.21, 59.63, 62.05, 64.47, 66.89, 69.31, 71.73, 71.73, 71.73, 71.73, 71.73, 71.73, 71.73, 71.73, 71.73|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+877,RES3.FEMA-CPFRA-2021.structural.low_rise_apartment_basement_foundation_moderate_waves_adjusted-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"12.0, 12.0, 12.0, 12.0, 12.0, 26.3, 34.62, 39.69, 46.06, 50.48, 53.73, 56.33, 60.36, 62.57, 65.43, 68.29, 71.15, 74.01, 76.87, 79.73, 82.59, 82.59, 82.59, 82.59, 82.59, 82.59, 82.59, 82.59, 82.59|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+878,RES3.FEMA-CPFRA-2021.structural.low_rise_apartment_basement_foundation_high_waves_adjusted_source_naccs_1a_1-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"12.0, 12.0, 12.0, 12.0, 14.61, 29.4, 38.1, 45.495, 51.3675, 57.24, 60.0675, 62.895, 65.7225, 68.55, 71.3775, 74.205, 77.0325, 79.86, 82.6875, 85.515, 88.3425, 88.3425, 88.3425, 88.3425, 88.3425, 88.3425, 88.3425, 88.3425, 88.3425|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+879,RES3.FEMA-CPFRA-2021.structural.low_rise_apartment_basement_foundation_with_elevator_freshwater_adjusted-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"15.0, 15.0, 15.0, 15.0, 15.0, 24.9, 30.66, 34.17, 38.58, 41.64, 43.89, 45.69, 48.48, 50.01, 51.99, 53.97, 55.95, 57.93, 59.91, 61.89, 63.87, 63.87, 63.87, 63.87, 63.87, 63.87, 63.87, 63.87, 63.87|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+880,RES3.FEMA-CPFRA-2021.structural.low_rise_apartment_basement_foundation_with_elevator_saltwater_adjusted-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"15.0, 15.0, 15.0, 15.0, 15.0, 27.1, 34.14, 38.43, 43.82, 47.56, 50.31, 52.51, 55.92, 57.79, 60.21, 62.63, 65.05, 67.47, 69.89, 72.31, 74.73, 74.73, 74.73, 74.73, 74.73, 74.73, 74.73, 74.73, 74.73|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+881,RES3.FEMA-CPFRA-2021.structural.low_rise_apartment_basement_foundation_with_elevator_moderate_waves_adjusted-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"15.0, 15.0, 15.0, 15.0, 15.0, 29.3, 37.62, 42.69, 49.06, 53.48, 56.73, 59.33, 63.36, 65.57, 68.43, 71.29, 74.15, 77.01, 79.87, 82.73, 85.59, 85.59, 85.59, 85.59, 85.59, 85.59, 85.59, 85.59, 85.59|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+882,RES3.FEMA-CPFRA-2021.structural.low_rise_apartment_basement_foundation_with_elevator_high_waves_adjusted_source_naccs_1a_1-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"15.0, 15.0, 15.0, 15.0, 17.61, 32.4, 41.1, 48.495, 54.3675, 60.24, 63.0675, 65.895, 68.7225, 71.55, 74.3775, 77.205, 80.0325, 82.86, 85.6875, 88.515, 91.3425, 91.3425, 91.3425, 91.3425, 91.3425, 91.3425, 91.3425, 91.3425, 91.3425|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+883,RES3.USACE-NACCS.structural.naccs_1a_1_apartments_1_story_no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 25.0, 35.0, 43.0, 51.5, 60.0, 63.75, 67.5, 71.25, 75.0, 78.75, 82.5, 86.25, 90.0, 93.75, 97.5, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+884,RES3.USACE-NACCS.structural.naccs_1a_3_apartments_3_story_no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 5.0, 20.0, 28.0, 28.0, 33.0, 38.0, 42.0, 46.0, 47.33333, 48.66667, 50.0, 51.33333, 52.66667, 54.0, 55.33333, 56.66667, 58.0, 58.0, 58.0, 58.0, 58.0, 58.0, 58.0, 58.0, 58.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+885,RES3.USACE-NACCS.structural.naccs_4a_urban_high_rise-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 11.0, 13.0, 13.75, 15.5, 17.5, 19.0, 20.25, 21.5, 22.0, 22.5, 22.83333, 23.16667, 23.5, 23.83333, 24.16667, 24.5, 24.83333, 25.16667, 25.5, 25.5, 25.5, 25.5, 25.5, 25.5, 25.5, 25.5, 25.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+886,RES3.USACE-NACCS.structural.naccs_4b_beach_high_rise-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 4.5, 7.0, 7.75, 9.625, 11.5, 12.125, 12.75, 14.0, 15.25, 16.5, 17.75, 19.0, 20.25, 21.5, 22.75, 24.0, 24.0, 24.0, 24.0, 24.0, 24.0, 24.0, 24.0, 24.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+887,RES3.USACE-NACCS.structural.naccs_4b_beach_high_rise_waves-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 1.5, 5.0, 7.5, 11.0, 12.5, 14.0, 15.0, 16.0, 17.16667, 18.33333, 19.5, 20.66667, 21.83333, 23.0, 24.16667, 25.33333, 26.5, 26.5, 26.5, 26.5, 26.5, 26.5, 26.5, 26.5, 26.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+888,RES4.FEMA-BCA-Toolkit.structural.hotel.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.14286, 10.17143, 15.28571, 19.22857, 23.6, 27.82857, 29.8, 32.51429, 35.6, 37.68571, 39.77143, 39.77143, 39.77143, 39.77143, 39.77143, 39.77143, 39.77143, 39.77143, 39.77143, 39.77143, 39.77143, 39.77143, 39.77143, 39.77143, 39.77143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+889,RES4.FEMA-BCA-Toolkit.structural.hotel.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.53571, 0.53571, 1.25, 11.67857, 18.10714, 23.89286, 29.07143, 31.82143, 35.32143, 38.07143, 40.75, 44.39286, 45.71429, 45.71429, 45.71429, 45.71429, 45.71429, 45.71429, 45.71429, 45.71429, 45.71429, 45.71429, 45.71429, 45.71429, 45.71429, 45.71429, 45.71429|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+890,RES5.FEMA-BCA-Toolkit.structural.correctional_facility.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.41223, 0.41223, 0.41223, 10.27052, 18.60638, 22.77964, 31.21695, 34.39666, 40.15957, 46.2481, 52.77432, 55.40426, 58.3598, 58.3598, 58.3598, 58.3598, 58.3598, 58.3598, 58.3598, 58.3598, 58.3598, 58.3598, 58.3598, 58.3598, 58.3598, 58.3598, 58.3598|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+891,RES5.FEMA-BCA-Toolkit.structural.correctional_facility.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.64992, 0.64992, 0.84925, 10.84053, 19.96512, 24.84551, 33.96138, 36.49336, 43.17774, 49.59344, 56.28821, 59.54817, 62.30025, 62.30025, 62.30025, 62.30025, 62.30025, 62.30025, 62.30025, 62.30025, 62.30025, 62.30025, 62.30025, 62.30025, 62.30025, 62.30025, 62.30025|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+0,RES1.FIA.contents.one_floor.no_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 12.0, 25.0, 35.0, 36.0, 38.0, 41.0, 45.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+1,RES1.FIA.contents.one_floor.with_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 16.0, 20.0, 22.0, 28.0, 33.0, 39.0, 44.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+2,RES1.FIA.contents.two_floors.no_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 11.0, 19.0, 23.0, 28.0, 33.0, 39.0, 44.0, 50.0, 54.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+3,RES1.FIA-Modified.contents.two_floors.with_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 16.0, 18.0, 25.0, 29.0, 33.0, 37.0, 42.0, 46.0, 52.0, 55.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+4,RES1.FIA-Modified.contents.three_or_more_floors.no_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 7.0, 15.0, 21.0, 22.0, 23.0, 25.0, 27.0, 30.0, 35.0, 40.0, 43.0, 45.0, 48.0, 50.0, 52.0, 54.0, 56.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+5,RES1.FIA-Modified.contents.three_or_more_floors.with_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 15.0, 22.0, 27.0, 28.0, 29.0, 30.0, 32.0, 35.0, 39.0, 43.0, 46.0, 47.0, 50.0, 52.0, 53.0, 55.0, 57.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+6,RES1.FIA.contents.split_level.no_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 11.0, 19.0, 23.0, 28.0, 33.0, 39.0, 44.0, 50.0, 54.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+7,RES1.FIA-Modified.contents.split_level.with_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 16.0, 18.0, 25.0, 29.0, 33.0, 37.0, 42.0, 46.0, 52.0, 55.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+8,RES1.FIA.contents.one_floor.no_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 17.0, 23.0, 29.0, 35.0, 40.0, 45.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+9,RES1.FIA.contents.one_floor.with_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 15.0, 20.0, 22.0, 28.0, 33.0, 39.0, 44.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+10,RES1.FIA.contents.two_floors.no_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 7.0, 9.0, 17.0, 22.0, 28.0, 33.0, 39.0, 44.0, 50.0, 54.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+11,RES1.FIA-Modified.contents.two_floors.with_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 15.0, 17.0, 23.0, 28.0, 33.0, 37.0, 42.0, 46.0, 52.0, 55.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+12,RES1.FIA-Modified.contents.three_or_more_floors.no_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 6.0, 11.0, 15.0, 19.0, 23.0, 26.0, 29.0, 32.0, 35.0, 41.0, 43.0, 45.0, 48.0, 50.0, 52.0, 54.0, 56.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+13,RES1.FIA-Modified.contents.three_or_more_floors.with_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 14.0, 18.0, 22.0, 25.0, 29.0, 31.0, 34.0, 36.0, 39.0, 44.0, 46.0, 47.0, 50.0, 52.0, 53.0, 55.0, 57.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+14,RES1.FIA.contents.split_level.no_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 7.0, 9.0, 17.0, 22.0, 28.0, 33.0, 39.0, 44.0, 50.0, 54.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+15,RES1.FIA-Modified.contents.split_level.with_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 15.0, 17.0, 23.0, 28.0, 33.0, 37.0, 42.0, 46.0, 52.0, 55.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+16,RES1.USACE_IWR.contents.one_story.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 4.0, 16.0, 26.0, 36.0, 44.0, 52.0, 58.0, 64.0, 68.0, 72.0, 74.0, 76.0, 78.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+17,RES1.USACE_IWR.contents.two_or_more_stories.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 2.0, 10.0, 18.0, 24.0, 32.0, 38.0, 42.0, 48.0, 52.0, 56.0, 60.0, 64.0, 66.0, 70.0, 72.0, 72.0, 74.0, 74.0, 76.0, 76.0, 78.0, 78.0, 80.0, 80.0, 82.0, 82.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+18,RES1.USACE_IWR.contents.split_level.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 4.0, 6.0, 10.0, 16.0, 22.0, 30.0, 40.0, 50.0, 62.0, 72.0, 82.0, 92.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+19,RES1.USACE_Chicago.contents.one_story.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 12.0, 24.0, 33.0, 35.0, 37.0, 41.0, 45.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+20,RES1.USACE_Chicago.contents.one_story.with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 6.0, 7.0, 8.0, 15.0, 19.0, 22.0, 28.0, 33.0, 39.0, 43.0, 49.0, 54.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+21,RES1.USACE_Chicago.contents.split_level.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 19.0, 32.0, 41.0, 47.0, 51.0, 53.0, 55.0, 56.0, 62.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+22,RES1.USACE_Chicago.contents.split_level.with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 10.0, 15.0, 18.0, 31.0, 44.0, 52.0, 58.0, 61.0, 63.0, 64.0, 66.0, 69.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+23,RES1.USACE_Chicago.contents.two_story.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 11.0, 18.0, 23.0, 28.0, 33.0, 39.0, 44.0, 50.0, 54.0, 58.0, 62.0, 66.0, 70.0, 74.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+24,RES1.USACE_Chicago.contents.two_story.with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 6.0, 9.0, 11.0, 17.0, 22.0, 28.0, 33.0, 39.0, 44.0, 49.0, 55.0, 61.0, 64.0, 71.0, 76.0, 78.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+25,RES1.USACE_Galveston.contents.one_&_1/2_story.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 22.0, 36.0, 45.0, 57.0, 66.0, 71.0, 77.0, 79.0, 82.0, 84.0, 86.0, 87.0, 89.0, 90.0, 91.0, 92.0, 92.0, 92.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+26,RES1.USACE_Galveston.contents.one_story.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 42.0, 60.0, 71.0, 77.0, 82.0, 85.0, 86.0, 87.0, 88.0, 88.0, 88.0, 89.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+27,RES1.USACE_Galveston.contents.two_story.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 4.0, 24.0, 34.0, 40.0, 47.0, 53.0, 56.0, 58.0, 58.0, 58.0, 61.0, 66.0, 68.0, 76.0, 81.0, 86.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+28,RES1.USACE_New-Orleans.contents.one_story.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 42.0, 63.0, 82.0, 85.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+29,RES1.USACE_New-Orleans.contents.one_story.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+30,RES1.USACE_New-Orleans.contents.two_story.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 37.0, 51.0, 51.0, 55.0, 55.0, 55.0, 55.0, 55.0, 73.0, 81.0, 87.0, 90.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+31,RES1.USACE_New-Orleans.contents.two_story.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+32,RES1.USACE_St-Paul.contents.one_story-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 27.0, 44.0, 54.0, 63.0, 68.0, 73.0, 75.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+33,RES1.USACE_St-Paul.contents.two_story-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 27.0, 44.0, 54.0, 63.0, 68.0, 73.0, 75.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+34,RES1.USACE_Wilmington.contents.one_&_1/2_story.pile_foundation-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"6.0, 7.0, 8.0, 9.0, 9.0, 21.0, 31.0, 43.0, 54.0, 67.0, 70.0, 73.0, 76.0, 78.0, 81.0, 82.0, 84.0, 85.0, 86.0, 87.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+35,RES1.USACE_Wilmington.contents.one_&_1/2_story-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 9.0, 21.0, 31.0, 37.0, 43.0, 51.0, 57.0, 63.0, 68.0, 74.0, 80.0, 81.0, 83.0, 84.0, 86.0, 87.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+36,RES1.USACE_Wilmington.contents.one_&_1/2_story_with_1/2_living_area_below-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 6.0, 10.0, 15.0, 18.0, 21.0, 23.0, 24.0, 28.0, 34.0, 44.0, 54.0, 63.0, 73.0, 76.0, 79.0, 82.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+37,RES1.USACE_Wilmington.contents.one_story.pile_foundation-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"9.0, 10.0, 13.0, 13.0, 14.0, 26.0, 38.0, 52.0, 64.0, 78.0, 81.0, 85.0, 88.0, 91.0, 95.0, 96.0, 97.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+38,RES1.USACE_Wilmington.contents.one_story-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 11.0, 24.0, 36.0, 43.0, 46.0, 52.0, 59.0, 66.0, 73.0, 80.0, 87.0, 88.0, 90.0, 92.0, 94.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+39,RES1.USACE_Wilmington.contents.one_story_with_1/2_living_area_below-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 11.0, 16.0, 20.0, 24.0, 28.0, 30.0, 32.0, 38.0, 46.0, 54.0, 63.0, 72.0, 82.0, 85.0, 89.0, 92.0, 95.0, 96.0, 97.0, 98.0, 98.0, 99.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+40,RES1.USACE_Wilmington.contents.one_story_with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"10.0, 12.0, 13.0, 13.0, 20.0, 34.0, 48.0, 52.0, 64.0, 78.0, 81.0, 85.0, 88.0, 91.0, 95.0, 96.0, 98.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+41,RES1.USACE_Wilmington.contents.split_level-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.0, 15.0, 21.0, 25.0, 32.0, 44.0, 50.0, 55.0, 61.0, 66.0, 72.0, 75.0, 79.0, 83.0, 87.0, 91.0, 94.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+42,RES1.USACE_Wilmington.contents.two_story.pile_foundation-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"6.0, 7.0, 8.0, 9.0, 10.0, 19.0, 29.0, 38.0, 45.0, 53.0, 55.0, 57.0, 59.0, 61.0, 63.0, 67.0, 71.0, 75.0, 78.0, 82.0, 86.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+43,RES1.USACE_Wilmington.contents.two_story-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 7.0, 18.0, 26.0, 30.0, 40.0, 50.0, 52.0, 53.0, 55.0, 56.0, 58.0, 63.0, 68.0, 72.0, 77.0, 82.0, 86.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+44,RES1.USACE_Wilmington.contents.two_story_with_1/2_living_area_below-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 1.0, 7.0, 10.0, 13.0, 15.0, 18.0, 19.0, 21.0, 25.0, 33.0, 40.0, 47.0, 54.0, 56.0, 59.0, 62.0, 65.0, 67.0, 71.0, 75.0, 79.0, 83.0, 87.0, 91.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+45,RES2.FIA.contents.mobile_home.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.0, 27.0, 49.0, 64.0, 70.0, 76.0, 78.0, 79.0, 81.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+46,RES2.FIA.contents.mobile_home.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.0, 27.0, 50.0, 65.0, 71.0, 76.0, 78.0, 79.0, 81.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+47,RES2.USACE_Chicago.contents.mobile_home-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.0, 27.0, 49.0, 64.0, 70.0, 76.0, 78.0, 79.0, 81.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+48,RES2.USACE_Galveston.contents.mobile_home-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.0, 23.0, 36.0, 43.0, 55.0, 66.0, 78.0, 86.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+49,RES2.USACE_New-Orleans.contents.mobile_home.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 39.0, 54.0, 75.0, 77.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+50,RES2.USACE_New-Orleans.contents.mobile_home.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+51,RES2.USACE_Wilmington.contents.mobile_home-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 6.0, 22.0, 32.0, 37.0, 50.0, 63.0, 74.0, 82.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+52,RES3.USACE_Chicago.contents.apartment_unit_grade-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 12.0, 24.0, 33.0, 35.0, 37.0, 41.0, 45.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+53,RES3.USACE_Chicago.contents.apartment_unit_sub_grade-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 6.0, 7.0, 8.0, 15.0, 19.0, 22.0, 28.0, 33.0, 39.0, 43.0, 49.0, 54.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+54,RES3.USACE_Galveston.contents.apartment.living_area_on_one_floor-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 6.0, 34.0, 44.0, 55.0, 67.0, 77.0, 87.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+55,RES3.USACE_Galveston.contents.condominium.living_area_on_multiple_floors-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 4.0, 24.0, 34.0, 40.0, 47.0, 53.0, 56.0, 58.0, 58.0, 58.0, 61.0, 66.0, 68.0, 76.0, 81.0, 86.0, 91.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+56,RES4.USACE_Galveston.contents.average_hotel/motel.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 11.0, 19.0, 25.0, 29.0, 34.0, 39.0, 44.0, 49.0, 56.0, 65.0, 74.0, 82.0, 88.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+57,RES4.USACE_Galveston.contents.hotel.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 11.0, 22.0, 28.0, 33.0, 37.0, 41.0, 44.0, 46.0, 49.0, 54.0, 60.0, 69.0, 81.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+58,RES4.USACE_Galveston.contents.motel_unit.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 16.0, 21.0, 25.0, 30.0, 36.0, 43.0, 52.0, 63.0, 76.0, 88.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+59,RES5.USACE_Galveston.contents.average_institutional_dormitory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 38.0, 60.0, 73.0, 81.0, 88.0, 94.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+60,RES6.USACE_Galveston.contents.nursing_home.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 38.0, 60.0, 73.0, 81.0, 88.0, 94.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+61,COM1.USACE_Galveston.contents.average_retail_trade_equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 26.0, 42.0, 56.0, 68.0, 78.0, 83.0, 85.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 92.0, 92.0, 93.0, 93.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+62,COM1.USACE_Galveston.contents.antique.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 20.0, 75.0, 78.0, 85.0, 90.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+63,COM1.USACE_Galveston.contents.appliance_sales.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 16.0, 28.0, 58.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+64,COM1.USACE_Galveston.contents.appliance_sales.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 83.0, 91.0, 94.0, 95.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+65,COM1.USACE_Galveston.contents.large_auto_dealer.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 20.0, 50.0, 90.0, 95.0, 98.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+66,COM1.USACE_Galveston.contents.bait_stand.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 7.0, 11.0, 16.0, 22.0, 29.0, 36.0, 44.0, 52.0, 60.0, 69.0, 79.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+67,COM1.USACE_Galveston.contents.bakery.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 14.0, 35.0, 70.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+68,COM1.USACE_Galveston.contents.bakery.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 55.0, 65.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+69,COM1.USACE_Galveston.contents.boat_sales/service.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 41.0, 65.0, 83.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+70,COM1.USACE_Galveston.contents.boat_sales/service.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 13.0, 24.0, 43.0, 82.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+71,COM1.USACE_Galveston.contents.book_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 7.0, 14.0, 21.0, 27.0, 35.0, 42.0, 49.0, 57.0, 65.0, 73.0, 82.0, 92.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+72,COM1.USACE_Galveston.contents.camera_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 18.0, 33.0, 65.0, 88.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+73,COM1.USACE_Galveston.contents.carpet_and_paint_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+74,COM1.USACE_Galveston.contents.carpet_and_paint_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 60.0, 80.0, 95.0, 97.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+75,COM1.USACE_Galveston.contents.mens_clothing.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 32.0, 41.0, 60.0, 78.0, 94.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+76,COM1.USACE_Galveston.contents.mens_clothing.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 6.0, 37.0, 50.0, 75.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+77,COM1.USACE_Galveston.contents.crafts.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+78,COM1.USACE_Galveston.contents.crafts.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 30.0, 50.0, 70.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+79,COM1.USACE_Galveston.contents.chain_drug_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+80,COM1.USACE_Galveston.contents.fabric_store.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 30.0, 40.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+81,COM1.USACE_Galveston.contents.fabric_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 30.0, 50.0, 75.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+82,COM1.USACE_Galveston.contents.feed_store.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+83,COM1.USACE_Galveston.contents.feed_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 15.0, 15.0, 15.0, 25.0, 25.0, 25.0, 25.0, 50.0, 50.0, 50.0, 50.0, 75.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+84,COM1.USACE_Galveston.contents.florist.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 49.0, 60.0, 75.0, 78.0, 79.0, 82.0, 85.0, 89.0, 93.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+85,COM1.USACE_Galveston.contents.florist.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 50.0, 85.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+86,COM1.USACE_Galveston.contents.fruit_stand.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 45.0, 80.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+87,COM1.USACE_Galveston.contents.large_furniture_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 20.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+88,COM1.USACE_Galveston.contents.gas/butane_supply.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 46.0, 65.0, 75.0, 81.0, 86.0, 90.0, 94.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+89,COM1.USACE_Galveston.contents.gift_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 54.0, 63.0, 75.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+90,COM1.USACE_Galveston.contents.greenhouse.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 34.0, 50.0, 66.0, 80.0, 92.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+91,COM1.USACE_Galveston.contents.greenhouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 66.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+92,COM1.USACE_Galveston.contents.small_grocery.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 20.0, 20.0, 20.0, 20.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+93,COM1.USACE_Galveston.contents.small_grocery.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 35.0, 50.0, 65.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+94,COM1.USACE_Galveston.contents.medium_grocery.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 4.0, 80.0, 90.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+95,COM1.USACE_Galveston.contents.medium_grocery.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 4.0, 22.0, 44.0, 74.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+96,COM1.USACE_Galveston.contents.gun_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 30.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+97,COM1.USACE_Galveston.contents.gun_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 22.0, 39.0, 58.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+98,COM1.USACE_Galveston.contents.hardware.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 12.0, 20.0, 29.0, 40.0, 50.0, 59.0, 67.0, 75.0, 84.0, 92.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+99,COM1.USACE_Galveston.contents.hardware.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 33.0, 52.0, 70.0, 75.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+100,COM1.USACE_Galveston.contents.hobby_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 56.0, 87.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+101,COM1.USACE_Galveston.contents.hobby_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 49.0, 64.0, 77.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+102,COM1.USACE_Galveston.contents.lawnmower.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 24.0, 46.0, 57.0, 65.0, 72.0, 79.0, 86.0, 91.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+103,COM1.USACE_Galveston.contents.lawnmower.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+104,COM1.USACE_Galveston.contents.liquor_store.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 4.0, 9.0, 16.0, 84.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+105,COM1.USACE_Galveston.contents.liquor_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 60.0, 81.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+106,COM1.USACE_Galveston.contents.meat_market.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 78.0, 81.0, 84.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+107,COM1.USACE_Galveston.contents.meat_market.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+108,COM1.USACE_Galveston.contents.motorcycle_dealer.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+109,COM1.USACE_Galveston.contents.motorcycle_dealer.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 45.0, 65.0, 85.0, 85.0, 85.0, 85.0, 85.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+110,COM1.USACE_Galveston.contents.music_center.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 40.0, 52.0, 72.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+111,COM1.USACE_Galveston.contents.music_center.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 66.0, 72.0, 75.0, 80.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+112,COM1.USACE_Galveston.contents.plant_nursery.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 4.0, 8.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+113,COM1.USACE_Galveston.contents.plant_nursery.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 20.0, 79.0, 88.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+114,COM1.USACE_Galveston.contents.paint_store.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+115,COM1.USACE_Galveston.contents.paint_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 60.0, 70.0, 73.0, 76.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+0,RES1.FIA.contents.one_floor.no_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 12.0, 25.0, 35.0, 36.0, 38.0, 41.0, 45.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+1,RES1.FIA.contents.one_floor.with_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 16.0, 20.0, 22.0, 28.0, 33.0, 39.0, 44.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+2,RES1.FIA.contents.two_floors.no_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 11.0, 19.0, 23.0, 28.0, 33.0, 39.0, 44.0, 50.0, 54.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+3,RES1.FIA-Modified.contents.two_floors.with_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 16.0, 18.0, 25.0, 29.0, 33.0, 37.0, 42.0, 46.0, 52.0, 55.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+4,RES1.FIA-Modified.contents.three_or_more_floors.no_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 7.0, 15.0, 21.0, 22.0, 23.0, 25.0, 27.0, 30.0, 35.0, 40.0, 43.0, 45.0, 48.0, 50.0, 52.0, 54.0, 56.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+5,RES1.FIA-Modified.contents.three_or_more_floors.with_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 15.0, 22.0, 27.0, 28.0, 29.0, 30.0, 32.0, 35.0, 39.0, 43.0, 46.0, 47.0, 50.0, 52.0, 53.0, 55.0, 57.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+6,RES1.FIA.contents.split_level.no_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 11.0, 19.0, 23.0, 28.0, 33.0, 39.0, 44.0, 50.0, 54.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+7,RES1.FIA-Modified.contents.split_level.with_basement.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 16.0, 18.0, 25.0, 29.0, 33.0, 37.0, 42.0, 46.0, 52.0, 55.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+8,RES1.FIA.contents.one_floor.no_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 17.0, 23.0, 29.0, 35.0, 40.0, 45.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+9,RES1.FIA.contents.one_floor.with_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 15.0, 20.0, 22.0, 28.0, 33.0, 39.0, 44.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+10,RES1.FIA.contents.two_floors.no_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 7.0, 9.0, 17.0, 22.0, 28.0, 33.0, 39.0, 44.0, 50.0, 54.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+11,RES1.FIA-Modified.contents.two_floors.with_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 15.0, 17.0, 23.0, 28.0, 33.0, 37.0, 42.0, 46.0, 52.0, 55.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+12,RES1.FIA-Modified.contents.three_or_more_floors.no_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 6.0, 11.0, 15.0, 19.0, 23.0, 26.0, 29.0, 32.0, 35.0, 41.0, 43.0, 45.0, 48.0, 50.0, 52.0, 54.0, 56.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+13,RES1.FIA-Modified.contents.three_or_more_floors.with_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 14.0, 18.0, 22.0, 25.0, 29.0, 31.0, 34.0, 36.0, 39.0, 44.0, 46.0, 47.0, 50.0, 52.0, 53.0, 55.0, 57.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+14,RES1.FIA.contents.split_level.no_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 7.0, 9.0, 17.0, 22.0, 28.0, 33.0, 39.0, 44.0, 50.0, 54.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+15,RES1.FIA-Modified.contents.split_level.with_basement.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 7.0, 8.0, 15.0, 17.0, 23.0, 28.0, 33.0, 37.0, 42.0, 46.0, 52.0, 55.0, 58.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+16,RES1.USACE_IWR.contents.one_story.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 4.0, 16.0, 26.0, 36.0, 44.0, 52.0, 58.0, 64.0, 68.0, 72.0, 74.0, 76.0, 78.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+17,RES1.USACE_IWR.contents.two_or_more_stories.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 2.0, 10.0, 18.0, 24.0, 32.0, 38.0, 42.0, 48.0, 52.0, 56.0, 60.0, 64.0, 66.0, 70.0, 72.0, 72.0, 74.0, 74.0, 76.0, 76.0, 78.0, 78.0, 80.0, 80.0, 82.0, 82.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+18,RES1.USACE_IWR.contents.split_level.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 4.0, 6.0, 10.0, 16.0, 22.0, 30.0, 40.0, 50.0, 62.0, 72.0, 82.0, 92.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+19,RES1.USACE_Chicago.contents.one_story.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 12.0, 24.0, 33.0, 35.0, 37.0, 41.0, 45.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+20,RES1.USACE_Chicago.contents.one_story.with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 6.0, 7.0, 8.0, 15.0, 19.0, 22.0, 28.0, 33.0, 39.0, 43.0, 49.0, 54.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+21,RES1.USACE_Chicago.contents.split_level.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 19.0, 32.0, 41.0, 47.0, 51.0, 53.0, 55.0, 56.0, 62.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+22,RES1.USACE_Chicago.contents.split_level.with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 10.0, 15.0, 18.0, 31.0, 44.0, 52.0, 58.0, 61.0, 63.0, 64.0, 66.0, 69.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0, 73.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+23,RES1.USACE_Chicago.contents.two_story.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 11.0, 18.0, 23.0, 28.0, 33.0, 39.0, 44.0, 50.0, 54.0, 58.0, 62.0, 66.0, 70.0, 74.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+24,RES1.USACE_Chicago.contents.two_story.with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 5.0, 6.0, 9.0, 11.0, 17.0, 22.0, 28.0, 33.0, 39.0, 44.0, 49.0, 55.0, 61.0, 64.0, 71.0, 76.0, 78.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+25,RES1.USACE_Galveston.contents.one_&_1/2_story.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 22.0, 36.0, 45.0, 57.0, 66.0, 71.0, 77.0, 79.0, 82.0, 84.0, 86.0, 87.0, 89.0, 90.0, 91.0, 92.0, 92.0, 92.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+26,RES1.USACE_Galveston.contents.one_story.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 42.0, 60.0, 71.0, 77.0, 82.0, 85.0, 86.0, 87.0, 88.0, 88.0, 88.0, 89.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+27,RES1.USACE_Galveston.contents.two_story.no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 4.0, 24.0, 34.0, 40.0, 47.0, 53.0, 56.0, 58.0, 58.0, 58.0, 61.0, 66.0, 68.0, 76.0, 81.0, 86.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+28,RES1.USACE_New-Orleans.contents.one_story.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 42.0, 63.0, 82.0, 85.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0, 91.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+29,RES1.USACE_New-Orleans.contents.one_story.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+30,RES1.USACE_New-Orleans.contents.two_story.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 37.0, 51.0, 51.0, 55.0, 55.0, 55.0, 55.0, 55.0, 73.0, 81.0, 87.0, 90.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0, 93.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+31,RES1.USACE_New-Orleans.contents.two_story.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+32,RES1.USACE_St-Paul.contents.one_story-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 27.0, 44.0, 54.0, 63.0, 68.0, 73.0, 75.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+33,RES1.USACE_St-Paul.contents.two_story-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 27.0, 44.0, 54.0, 63.0, 68.0, 73.0, 75.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0, 78.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+34,RES1.USACE_Wilmington.contents.one_&_1/2_story.pile_foundation-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"6.0, 7.0, 8.0, 9.0, 9.0, 21.0, 31.0, 43.0, 54.0, 67.0, 70.0, 73.0, 76.0, 78.0, 81.0, 82.0, 84.0, 85.0, 86.0, 87.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+35,RES1.USACE_Wilmington.contents.one_&_1/2_story-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 9.0, 21.0, 31.0, 37.0, 43.0, 51.0, 57.0, 63.0, 68.0, 74.0, 80.0, 81.0, 83.0, 84.0, 86.0, 87.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+36,RES1.USACE_Wilmington.contents.one_&_1/2_story_with_1/2_living_area_below-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 6.0, 10.0, 15.0, 18.0, 21.0, 23.0, 24.0, 28.0, 34.0, 44.0, 54.0, 63.0, 73.0, 76.0, 79.0, 82.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+37,RES1.USACE_Wilmington.contents.one_story.pile_foundation-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"9.0, 10.0, 13.0, 13.0, 14.0, 26.0, 38.0, 52.0, 64.0, 78.0, 81.0, 85.0, 88.0, 91.0, 95.0, 96.0, 97.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+38,RES1.USACE_Wilmington.contents.one_story-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 11.0, 24.0, 36.0, 43.0, 46.0, 52.0, 59.0, 66.0, 73.0, 80.0, 87.0, 88.0, 90.0, 92.0, 94.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+39,RES1.USACE_Wilmington.contents.one_story_with_1/2_living_area_below-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 11.0, 16.0, 20.0, 24.0, 28.0, 30.0, 32.0, 38.0, 46.0, 54.0, 63.0, 72.0, 82.0, 85.0, 89.0, 92.0, 95.0, 96.0, 97.0, 98.0, 98.0, 99.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+40,RES1.USACE_Wilmington.contents.one_story_with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"10.0, 12.0, 13.0, 13.0, 20.0, 34.0, 48.0, 52.0, 64.0, 78.0, 81.0, 85.0, 88.0, 91.0, 95.0, 96.0, 98.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+41,RES1.USACE_Wilmington.contents.split_level-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.0, 15.0, 21.0, 25.0, 32.0, 44.0, 50.0, 55.0, 61.0, 66.0, 72.0, 75.0, 79.0, 83.0, 87.0, 91.0, 94.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+42,RES1.USACE_Wilmington.contents.two_story.pile_foundation-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"6.0, 7.0, 8.0, 9.0, 10.0, 19.0, 29.0, 38.0, 45.0, 53.0, 55.0, 57.0, 59.0, 61.0, 63.0, 67.0, 71.0, 75.0, 78.0, 82.0, 86.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+43,RES1.USACE_Wilmington.contents.two_story-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 7.0, 18.0, 26.0, 30.0, 40.0, 50.0, 52.0, 53.0, 55.0, 56.0, 58.0, 63.0, 68.0, 72.0, 77.0, 82.0, 86.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+44,RES1.USACE_Wilmington.contents.two_story_with_1/2_living_area_below-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 1.0, 7.0, 10.0, 13.0, 15.0, 18.0, 19.0, 21.0, 25.0, 33.0, 40.0, 47.0, 54.0, 56.0, 59.0, 62.0, 65.0, 67.0, 71.0, 75.0, 79.0, 83.0, 87.0, 91.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+45,RES2.FIA.contents.mobile_home.a_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.0, 27.0, 49.0, 64.0, 70.0, 76.0, 78.0, 79.0, 81.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+46,RES2.FIA.contents.mobile_home.v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.0, 27.0, 50.0, 65.0, 71.0, 76.0, 78.0, 79.0, 81.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+47,RES2.USACE_Chicago.contents.mobile_home-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.0, 27.0, 49.0, 64.0, 70.0, 76.0, 78.0, 79.0, 81.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0, 83.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+48,RES2.USACE_Galveston.contents.mobile_home-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.0, 23.0, 36.0, 43.0, 55.0, 66.0, 78.0, 86.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+49,RES2.USACE_New-Orleans.contents.mobile_home.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 39.0, 54.0, 75.0, 77.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0, 85.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+50,RES2.USACE_New-Orleans.contents.mobile_home.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+51,RES2.USACE_Wilmington.contents.mobile_home-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 6.0, 22.0, 32.0, 37.0, 50.0, 63.0, 74.0, 82.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+52,RES3.USACE_Chicago.contents.apartment_unit_grade-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 12.0, 24.0, 33.0, 35.0, 37.0, 41.0, 45.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+53,RES3.USACE_Chicago.contents.apartment_unit_sub_grade-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 6.0, 7.0, 8.0, 15.0, 19.0, 22.0, 28.0, 33.0, 39.0, 43.0, 49.0, 54.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+54,RES3.USACE_Galveston.contents.apartment.living_area_on_one_floor-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 6.0, 34.0, 44.0, 55.0, 67.0, 77.0, 87.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+55,RES3.USACE_Galveston.contents.condominium.living_area_on_multiple_floors-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 4.0, 24.0, 34.0, 40.0, 47.0, 53.0, 56.0, 58.0, 58.0, 58.0, 61.0, 66.0, 68.0, 76.0, 81.0, 86.0, 91.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+56,RES4.USACE_Galveston.contents.average_hotel/motel.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 11.0, 19.0, 25.0, 29.0, 34.0, 39.0, 44.0, 49.0, 56.0, 65.0, 74.0, 82.0, 88.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+57,RES4.USACE_Galveston.contents.hotel.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 11.0, 22.0, 28.0, 33.0, 37.0, 41.0, 44.0, 46.0, 49.0, 54.0, 60.0, 69.0, 81.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+58,RES4.USACE_Galveston.contents.motel_unit.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 16.0, 21.0, 25.0, 30.0, 36.0, 43.0, 52.0, 63.0, 76.0, 88.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+59,RES5.USACE_Galveston.contents.average_institutional_dormitory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 38.0, 60.0, 73.0, 81.0, 88.0, 94.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+60,RES6.USACE_Galveston.contents.nursing_home.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 38.0, 60.0, 73.0, 81.0, 88.0, 94.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+61,COM1.USACE_Galveston.contents.average_retail_trade_equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 26.0, 42.0, 56.0, 68.0, 78.0, 83.0, 85.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 92.0, 92.0, 93.0, 93.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+62,COM1.USACE_Galveston.contents.antique.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 20.0, 75.0, 78.0, 85.0, 90.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+63,COM1.USACE_Galveston.contents.appliance_sales.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 16.0, 28.0, 58.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+64,COM1.USACE_Galveston.contents.appliance_sales.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 83.0, 91.0, 94.0, 95.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+65,COM1.USACE_Galveston.contents.large_auto_dealer.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 20.0, 50.0, 90.0, 95.0, 98.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+66,COM1.USACE_Galveston.contents.bait_stand.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 7.0, 11.0, 16.0, 22.0, 29.0, 36.0, 44.0, 52.0, 60.0, 69.0, 79.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+67,COM1.USACE_Galveston.contents.bakery.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 14.0, 35.0, 70.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+68,COM1.USACE_Galveston.contents.bakery.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 55.0, 65.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+69,COM1.USACE_Galveston.contents.boat_sales/service.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 41.0, 65.0, 83.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+70,COM1.USACE_Galveston.contents.boat_sales/service.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 13.0, 24.0, 43.0, 82.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+71,COM1.USACE_Galveston.contents.book_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 7.0, 14.0, 21.0, 27.0, 35.0, 42.0, 49.0, 57.0, 65.0, 73.0, 82.0, 92.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+72,COM1.USACE_Galveston.contents.camera_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 18.0, 33.0, 65.0, 88.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+73,COM1.USACE_Galveston.contents.carpet_and_paint_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+74,COM1.USACE_Galveston.contents.carpet_and_paint_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 60.0, 80.0, 95.0, 97.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+75,COM1.USACE_Galveston.contents.mens_clothing.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 32.0, 41.0, 60.0, 78.0, 94.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+76,COM1.USACE_Galveston.contents.mens_clothing.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 6.0, 37.0, 50.0, 75.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+77,COM1.USACE_Galveston.contents.crafts.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+78,COM1.USACE_Galveston.contents.crafts.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 30.0, 50.0, 70.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+79,COM1.USACE_Galveston.contents.chain_drug_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+80,COM1.USACE_Galveston.contents.fabric_store.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 30.0, 40.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+81,COM1.USACE_Galveston.contents.fabric_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 30.0, 50.0, 75.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+82,COM1.USACE_Galveston.contents.feed_store.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+83,COM1.USACE_Galveston.contents.feed_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 15.0, 15.0, 15.0, 25.0, 25.0, 25.0, 25.0, 50.0, 50.0, 50.0, 50.0, 75.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+84,COM1.USACE_Galveston.contents.florist.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 49.0, 60.0, 75.0, 78.0, 79.0, 82.0, 85.0, 89.0, 93.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+85,COM1.USACE_Galveston.contents.florist.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 50.0, 85.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+86,COM1.USACE_Galveston.contents.fruit_stand.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 45.0, 80.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+87,COM1.USACE_Galveston.contents.large_furniture_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 20.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+88,COM1.USACE_Galveston.contents.gas/butane_supply.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 46.0, 65.0, 75.0, 81.0, 86.0, 90.0, 94.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+89,COM1.USACE_Galveston.contents.gift_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 54.0, 63.0, 75.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+90,COM1.USACE_Galveston.contents.greenhouse.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 34.0, 50.0, 66.0, 80.0, 92.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+91,COM1.USACE_Galveston.contents.greenhouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 66.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+92,COM1.USACE_Galveston.contents.small_grocery.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 20.0, 20.0, 20.0, 20.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+93,COM1.USACE_Galveston.contents.small_grocery.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 35.0, 50.0, 65.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+94,COM1.USACE_Galveston.contents.medium_grocery.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 4.0, 80.0, 90.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+95,COM1.USACE_Galveston.contents.medium_grocery.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 4.0, 22.0, 44.0, 74.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+96,COM1.USACE_Galveston.contents.gun_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 30.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+97,COM1.USACE_Galveston.contents.gun_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 22.0, 39.0, 58.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+98,COM1.USACE_Galveston.contents.hardware.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 12.0, 20.0, 29.0, 40.0, 50.0, 59.0, 67.0, 75.0, 84.0, 92.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+99,COM1.USACE_Galveston.contents.hardware.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 33.0, 52.0, 70.0, 75.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+100,COM1.USACE_Galveston.contents.hobby_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 56.0, 87.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+101,COM1.USACE_Galveston.contents.hobby_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 49.0, 64.0, 77.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+102,COM1.USACE_Galveston.contents.lawnmower.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 24.0, 46.0, 57.0, 65.0, 72.0, 79.0, 86.0, 91.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+103,COM1.USACE_Galveston.contents.lawnmower.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+104,COM1.USACE_Galveston.contents.liquor_store.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 4.0, 9.0, 16.0, 84.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+105,COM1.USACE_Galveston.contents.liquor_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 60.0, 81.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+106,COM1.USACE_Galveston.contents.meat_market.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 78.0, 81.0, 84.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+107,COM1.USACE_Galveston.contents.meat_market.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+108,COM1.USACE_Galveston.contents.motorcycle_dealer.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+109,COM1.USACE_Galveston.contents.motorcycle_dealer.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 45.0, 65.0, 85.0, 85.0, 85.0, 85.0, 85.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+110,COM1.USACE_Galveston.contents.music_center.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 40.0, 52.0, 72.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+111,COM1.USACE_Galveston.contents.music_center.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 66.0, 72.0, 75.0, 80.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+112,COM1.USACE_Galveston.contents.plant_nursery.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 4.0, 8.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+113,COM1.USACE_Galveston.contents.plant_nursery.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 20.0, 79.0, 88.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+114,COM1.USACE_Galveston.contents.paint_store.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+115,COM1.USACE_Galveston.contents.paint_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 60.0, 70.0, 73.0, 76.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+116,COM1.USACE_Galveston.contents.pawn_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 40.0, 60.0, 70.0, 70.0, 70.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+117,COM1.USACE_Galveston.contents.pawn_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+118,COM1.USACE_Galveston.contents.remnant_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+119,COM1.USACE_Galveston.contents.remnant_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 60.0, 80.0, 90.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+120,COM1.USACE_Galveston.contents.service_station.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 39.0, 59.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+121,COM1.USACE_Galveston.contents.service_station.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 42.0, 62.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+122,COM1.USACE_Galveston.contents.shoe_repair.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 28.0, 38.0, 41.0, 44.0, 47.0, 47.0, 47.0, 47.0, 47.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+123,COM1.USACE_Galveston.contents.shoe_repair.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 23.0, 35.0, 48.0, 60.0, 74.0, 87.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+124,COM1.USACE_Galveston.contents.toy_store.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 40.0, 70.0, 85.0, 90.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+125,COM1.USACE_Galveston.contents.tractor_sales.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 4.0, 18.0, 28.0, 36.0, 44.0, 48.0, 52.0, 56.0, 60.0, 65.0, 70.0, 74.0, 79.0, 84.0, 88.0, 93.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+126,COM1.USACE_Galveston.contents.tractor_sales.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 17.0, 29.0, 44.0, 59.0, 70.0, 77.0, 81.0, 84.0, 88.0, 92.0, 95.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+127,COM1.USACE_Galveston.contents.trailer_sales.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 18.0, 37.0, 60.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+128,COM1.USACE_Galveston.contents.trophy_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 35.0, 38.0, 56.0, 60.0, 60.0, 60.0, 60.0, 60.0, 61.0, 62.0, 64.0, 66.0, 68.0, 71.0, 76.0, 84.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+129,COM1.USACE_Galveston.contents.trophy_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 5.0, 12.0, 31.0, 66.0, 82.0, 92.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+130,COM1.USACE_Galveston.contents.upholstery_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 10.0, 10.0, 10.0, 10.0, 30.0, 35.0, 40.0, 45.0, 50.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+131,COM1.USACE_Galveston.contents.upholstery_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 30.0, 40.0, 45.0, 50.0, 53.0, 56.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+132,COM1.USACE_Galveston.contents.used_appliances/cloth.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 50.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+133,COM1.USACE_Galveston.contents.used_appliances/cloth.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 35.0, 40.0, 50.0, 60.0, 80.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+134,COM1.USACE_Galveston.contents.used_furniture.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+135,COM1.USACE_Galveston.contents.used_furniture.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 30.0, 45.0, 65.0, 80.0, 80.0, 85.0, 85.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+136,COM1.USACE_Galveston.contents.vacuum_cleaner_sales.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 25.0, 30.0, 35.0, 40.0, 40.0, 40.0, 60.0, 60.0, 60.0, 60.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+137,COM1.USACE_Galveston.contents.vacuum_cleaner_sales.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 70.0, 80.0, 90.0, 95.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+138,COM1.USACE_Galveston.contents.video_games.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 10.0, 20.0, 30.0, 45.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 75.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+139,COM1.USACE_Galveston.contents.video_games.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+140,COM1.USACE_New-Orleans.contents.bakery.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+141,COM1.USACE_New-Orleans.contents.bakery.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 74.0, 81.0, 85.0, 88.0, 96.0, 96.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+142,COM1.USACE_New-Orleans.contents.candy_store.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+143,COM1.USACE_New-Orleans.contents.candy_store.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 74.0, 81.0, 85.0, 88.0, 96.0, 96.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+144,COM1.USACE_New-Orleans.contents.clothing_store.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+145,COM1.USACE_New-Orleans.contents.clothing_store.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 48.0, 54.0, 54.0, 55.0, 55.0, 55.0, 55.0, 55.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+146,COM1.USACE_New-Orleans.contents.convenience_store.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+147,COM1.USACE_New-Orleans.contents.convenience_store.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 74.0, 81.0, 85.0, 88.0, 96.0, 96.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+148,COM1.USACE_New-Orleans.contents.department_store.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+149,COM1.USACE_New-Orleans.contents.department_store.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 48.0, 54.0, 54.0, 55.0, 55.0, 55.0, 55.0, 55.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+150,COM1.USACE_New-Orleans.contents.furniture_store.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+151,COM1.USACE_New-Orleans.contents.furniture_store.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 48.0, 54.0, 54.0, 55.0, 55.0, 55.0, 55.0, 55.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+152,COM1.USACE_New-Orleans.contents.gas_stations.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+153,COM1.USACE_New-Orleans.contents.gas_stations.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 74.0, 81.0, 85.0, 88.0, 96.0, 96.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+154,COM1.USACE_New-Orleans.contents.large_grocery.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+155,COM1.USACE_New-Orleans.contents.large_grocery.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 74.0, 81.0, 85.0, 88.0, 96.0, 96.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+156,COM1.USACE_New-Orleans.contents.neighborhood_grocery.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+157,COM1.USACE_New-Orleans.contents.neighborhood_grocery.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 74.0, 81.0, 85.0, 88.0, 96.0, 96.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+158,COM1.USACE_New-Orleans.contents.home_repair_store.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 88.0, 88.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+159,COM1.USACE_New-Orleans.contents.home_repair_store.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 45.0, 60.0, 65.0, 86.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+160,COM1.USACE_New-Orleans.contents.liquor_store.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+161,COM1.USACE_New-Orleans.contents.liquor_store.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 74.0, 81.0, 85.0, 88.0, 96.0, 96.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+162,COM1.USACE_New-Orleans.contents.shoe_store.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+163,COM1.USACE_New-Orleans.contents.shoe_store.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 48.0, 54.0, 54.0, 55.0, 55.0, 55.0, 55.0, 55.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+164,COM1.USACE_New-Orleans.contents.wine_store.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+165,COM1.USACE_New-Orleans.contents.wine_store.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 74.0, 81.0, 85.0, 88.0, 96.0, 96.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+166,COM2.USACE_Galveston.contents.average_wholesale_trade-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.0, 16.0, 27.0, 36.0, 49.0, 57.0, 63.0, 69.0, 72.0, 76.0, 80.0, 82.0, 84.0, 86.0, 87.0, 87.0, 88.0, 89.0, 89.0, 89.0, 89.0, 89.0, 89.0, 89.0, 89.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+167,COM2.USACE_Galveston.contents.auto_parts.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 18.0, 30.0, 59.0, 70.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+168,COM2.USACE_Galveston.contents.auto_parts/mufflers.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 40.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+169,COM2.USACE_Galveston.contents.heavy_equipment_storage.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 14.0, 17.0, 20.0, 23.0, 25.0, 29.0, 35.0, 38.0, 42.0, 51.0, 63.0, 77.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+170,COM2.USACE_Galveston.contents.food_warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 40.0, 55.0, 70.0, 85.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+171,COM2.USACE_Galveston.contents.highway_material_storage.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 5.0, 10.0, 10.0, 25.0, 25.0, 50.0, 50.0, 50.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+172,COM2.USACE_Galveston.contents.jewelry.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 7.0, 15.0, 24.0, 33.0, 39.0, 45.0, 51.0, 56.0, 61.0, 65.0, 70.0, 74.0, 79.0, 83.0, 87.0, 92.0, 95.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+173,COM2.USACE_Galveston.contents.lumber_yard.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 30.0, 45.0, 60.0, 75.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+174,COM2.USACE_Galveston.contents.medical_supplies.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+175,COM2.USACE_Galveston.contents.medical_supplies.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 11.0, 20.0, 27.0, 35.0, 43.0, 50.0, 57.0, 65.0, 73.0, 80.0, 88.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+176,COM2.USACE_Galveston.contents.municipal_storage_warehouse.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 28.0, 40.0, 58.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+177,COM2.USACE_Galveston.contents.municipal_storage_warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 11.0, 16.0, 19.0, 21.0, 23.0, 28.0, 35.0, 47.0, 67.0, 85.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+178,COM2.USACE_Galveston.contents.paper_products_warehouse.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 22.0, 42.0, 58.0, 75.0, 87.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+179,COM2.USACE_Galveston.contents.paper_products_warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 27.0, 36.0, 44.0, 52.0, 59.0, 67.0, 73.0, 80.0, 90.0, 98.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+180,COM2.USACE_Galveston.contents.safety_equipment.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 12.0, 25.0, 37.0, 50.0, 62.0, 75.0, 87.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+181,COM2.USACE_Galveston.contents.safety_equipment.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 12.0, 25.0, 37.0, 50.0, 62.0, 75.0, 85.0, 93.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+182,COM2.USACE_Galveston.contents.sporting_goods.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+183,COM2.USACE_Galveston.contents.sporting_goods.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 30.0, 50.0, 53.0, 55.0, 57.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+184,COM2.USACE_Galveston.contents.sporting_goods_warehouse.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 50.0, 63.0, 75.0, 87.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+185,COM2.USACE_Galveston.contents.sporting_goods_warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 50.0, 63.0, 75.0, 87.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+186,COM2.USACE_Galveston.contents.storage_chemicals.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 15.0, 20.0, 25.0, 35.0, 45.0, 55.0, 65.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+187,COM2.USACE_Galveston.contents.storage_chemicals.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 20.0, 30.0, 40.0, 50.0, 60.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+188,COM2.USACE_Galveston.contents.storage_machine_parts.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 30.0, 40.0, 50.0, 50.0, 50.0, 75.0, 75.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+189,COM2.USACE_Galveston.contents.t.v._station.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 9.0, 9.0, 9.0, 9.0, 11.0, 13.0, 15.0, 18.0, 22.0, 26.0, 30.0, 35.0, 43.0, 54.0, 70.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+190,COM2.USACE_Galveston.contents.t.v._repair.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 7.0, 10.0, 11.0, 13.0, 16.0, 21.0, 27.0, 34.0, 42.0, 52.0, 63.0, 74.0, 86.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+191,COM2.USACE_Galveston.contents.t.v._repair.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+192,COM2.USACE_Galveston.contents.trailer_parts.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 50.0, 50.0, 50.0, 50.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+193,COM2.USACE_Galveston.contents.trailer_parts.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 16.0, 28.0, 32.0, 40.0, 43.0, 46.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+194,COM2.USACE_Galveston.contents.warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 11.0, 16.0, 19.0, 21.0, 23.0, 28.0, 35.0, 47.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+195,COM2.USACE_Galveston.contents.beer_warehouse.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 40.0, 55.0, 70.0, 85.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+196,COM2.USACE_Galveston.contents.beer_warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+197,COM2.USACE_Galveston.contents.bottled_gases_warehouse.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 35.0, 50.0, 50.0, 50.0, 60.0, 60.0, 60.0, 60.0, 60.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+198,COM2.USACE_Galveston.contents.bottled_gases_warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+199,COM2.USACE_Galveston.contents.cement_warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 60.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+200,COM2.USACE_Galveston.contents.detergents_warehouse.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 10.0, 15.0, 20.0, 25.0, 35.0, 35.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+201,COM2.USACE_Galveston.contents.detergents_warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+202,COM2.USACE_Galveston.contents.heavy_machinery_warehouse.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 11.0, 17.0, 20.0, 23.0, 25.0, 29.0, 35.0, 42.0, 51.0, 63.0, 77.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+203,COM2.USACE_Galveston.contents.heavy_machinery_warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 25.0, 25.0, 35.0, 40.0, 50.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 85.0, 85.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+204,COM2.USACE_Galveston.contents.petroleum_warehouse.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 35.0, 50.0, 50.0, 50.0, 50.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+205,COM2.USACE_Galveston.contents.petroleum_warehouse.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 40.0, 60.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+206,COM2.USACE_Galveston.contents.western_auto.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 8.0, 16.0, 59.0, 65.0, 70.0, 73.0, 77.0, 81.0, 84.0, 87.0, 90.0, 93.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+207,COM2.USACE_Galveston.contents.western_auto.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 50.0, 75.0, 87.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+208,COM2.USACE_New-Orleans.contents.warehouse.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 53.0, 70.0, 80.0, 96.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+209,COM2.USACE_New-Orleans.contents.warehouse.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 19.0, 31.0, 40.0, 45.0, 49.0, 54.0, 58.0, 63.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+210,COM2.USACE_St-Paul.contents.warehouse.fresh_water-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 15.0, 31.0, 43.0, 53.0, 61.0, 67.0, 71.0, 73.0, 76.0, 77.0, 78.0, 79.0, 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+211,COM3.USACE_Galveston.contents.average_personal/repair_services.invetory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 4.0, 29.0, 46.0, 67.0, 79.0, 85.0, 91.0, 92.0, 92.0, 93.0, 94.0, 96.0, 96.0, 97.0, 97.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+212,COM3.USACE_Galveston.contents.auto_repair.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 56.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+213,COM3.USACE_Galveston.contents.auto_repair.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 50.0, 80.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+214,COM3.USACE_Galveston.contents.auto_service.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 40.0, 60.0, 85.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+215,COM3.USACE_Galveston.contents.barber_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 22.0, 29.0, 37.0, 48.0, 62.0, 78.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+216,COM3.USACE_Galveston.contents.barber_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 50.0, 75.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+217,COM3.USACE_Galveston.contents.beauty_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 70.0, 87.0, 94.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+218,COM3.USACE_Galveston.contents.beauty_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 18.0, 31.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+219,COM3.USACE_Galveston.contents.boat_service.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 12.0, 20.0, 29.0, 40.0, 50.0, 59.0, 67.0, 75.0, 84.0, 92.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+220,COM3.USACE_Galveston.contents.boat_service.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 33.0, 52.0, 70.0, 75.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+221,COM3.USACE_Galveston.contents.car_wash.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 25.0, 40.0, 50.0, 60.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+222,COM3.USACE_Galveston.contents.car_wash.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+223,COM3.USACE_Galveston.contents.cemetary_complex.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 29.0, 34.0, 76.0, 88.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+224,COM3.USACE_Galveston.contents.cemetary_complex.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 92.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+225,COM3.USACE_Galveston.contents.cleaners.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 30.0, 50.0, 75.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+226,COM3.USACE_Galveston.contents.cleaners_substation.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 10.0, 75.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+227,COM3.USACE_Galveston.contents.cleaners_substation.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 49.0, 75.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+228,COM3.USACE_Galveston.contents.private_day_care.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+229,COM3.USACE_Galveston.contents.private_day_care.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+230,COM3.USACE_Galveston.contents.funeral_home.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 30.0, 60.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+231,COM3.USACE_Galveston.contents.laundry.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 10.0, 13.0, 16.0, 19.0, 22.0, 27.0, 33.0, 40.0, 47.0, 56.0, 65.0, 74.0, 84.0, 94.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+232,COM3.USACE_Galveston.contents.photo_studio.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 10.0, 10.0, 10.0, 10.0, 60.0, 60.0, 60.0, 60.0, 60.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+233,COM3.USACE_Galveston.contents.photo_studio.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 80.0, 80.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+234,COM3.USACE_Galveston.contents.truck_mfg_&_sales.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 65.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+235,COM3.USACE_Galveston.contents.truck_mfg_&_sales.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 34.0, 50.0, 57.0, 65.0, 71.0, 76.0, 80.0, 88.0, 89.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+236,COM3.USACE_Galveston.contents.washateria.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 55.0, 78.0, 83.0, 86.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+237,COM3.USACE_New-Orleans.contents.auto_repair.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 88.0, 88.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+238,COM3.USACE_New-Orleans.contents.auto_repair.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 45.0, 60.0, 65.0, 86.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+239,COM3.USACE_New-Orleans.contents.barber_shop.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+240,COM3.USACE_New-Orleans.contents.barber_shop.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 48.0, 54.0, 54.0, 55.0, 55.0, 55.0, 55.0, 55.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+241,COM3.USACE_New-Orleans.contents.beauty_salon.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+242,COM3.USACE_New-Orleans.contents.beauty_salon.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 48.0, 54.0, 54.0, 55.0, 55.0, 55.0, 55.0, 55.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+243,COM3.USACE_New-Orleans.contents.funeral_home.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+244,COM3.USACE_New-Orleans.contents.funeral_home.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 26.0, 46.0, 94.0, 97.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+245,COM3.USACE_New-Orleans.contents.laundromat.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+246,COM3.USACE_New-Orleans.contents.laundromat.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 48.0, 54.0, 54.0, 55.0, 55.0, 55.0, 55.0, 55.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+247,COM3.USACE_New-Orleans.contents.reupholstery.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 88.0, 88.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+248,COM3.USACE_New-Orleans.contents.reupholstery.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 45.0, 60.0, 65.0, 86.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+249,COM3.USACE_New-Orleans.contents.watch_repair.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 88.0, 88.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+250,COM3.USACE_New-Orleans.contents.watch_repair.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 45.0, 60.0, 65.0, 86.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0, 94.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+251,COM4.USACE_Galveston.contents.average_prof/tech_services.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 18.0, 25.0, 35.0, 43.0, 49.0, 52.0, 55.0, 57.0, 58.0, 60.0, 65.0, 67.0, 68.0, 69.0, 70.0, 71.0, 71.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+252,COM4.USACE_Galveston.contents.airport.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+253,COM4.USACE_Galveston.contents.airport.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 40.0, 48.0, 55.0, 75.0, 78.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+254,COM4.USACE_Galveston.contents.boat_stalls.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 6.0, 8.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, 22.0, 24.0, 25.0, 27.0, 28.0, 29.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+255,COM4.USACE_Galveston.contents.boat_storage.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 1.0, 4.0, 7.0, 12.0, 18.0, 24.0, 32.0, 40.0, 48.0, 54.0, 58.0, 63.0, 66.0, 68.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+256,COM4.USACE_Galveston.contents.business.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 6.0, 10.0, 15.0, 19.0, 24.0, 28.0, 33.0, 38.0, 44.0, 49.0, 55.0, 62.0, 69.0, 78.0, 86.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+257,COM4.USACE_Galveston.contents.import_sales.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+258,COM4.USACE_Galveston.contents.import_sales.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.0, 65.0, 70.0, 75.0, 80.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+259,COM4.USACE_Galveston.contents.large_commercial_office.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 16.0, 21.0, 24.0, 25.0, 26.0, 28.0, 31.0, 36.0, 42.0, 50.0, 71.0, 84.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+260,COM4.USACE_Galveston.contents.real_estate_office.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 9.0, 42.0, 75.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+261,COM4.USACE_Galveston.contents.real_estate_office.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 25.0, 43.0, 63.0, 70.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+262,COM4.USACE_Galveston.contents.transport_comapny.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.0, 75.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+263,COM4.USACE_Galveston.contents.utility_company.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+264,COM4.USACE_Galveston.contents.utility_company.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 5.0, 7.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 15.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0, 16.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+265,COM4.USACE_Galveston.contents.water_supply.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+266,COM4.USACE_New-Orleans.contents.accounting_firm.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+267,COM4.USACE_New-Orleans.contents.accounting_firm.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 26.0, 46.0, 94.0, 97.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+268,COM4.USACE_New-Orleans.contents.legal_office.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+269,COM4.USACE_New-Orleans.contents.legal_office.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 26.0, 46.0, 94.0, 97.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+270,COM4.USACE_New-Orleans.contents.real_estate_office.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+271,COM4.USACE_New-Orleans.contents.real_estate_office.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 26.0, 46.0, 94.0, 97.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+272,COM4.USACE_New-Orleans.contents.utility_company.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+273,COM4.USACE_New-Orleans.contents.utility_company.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 18.0, 50.0, 50.0, 52.0, 58.0, 58.0, 58.0, 58.0, 58.0, 59.0, 69.0, 70.0, 70.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0, 79.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+274,COM4.USACE_St-Paul.contents.professional.fresh_water-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 15.0, 31.0, 43.0, 53.0, 61.0, 67.0, 71.0, 73.0, 76.0, 77.0, 78.0, 79.0, 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+275,COM5.USACE_Galveston.contents.bank-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 50.0, 74.0, 83.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+276,COM5.USACE_Galveston.contents.bank.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 50.0, 60.0, 70.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+277,COM5.USACE_Galveston.contents.bank.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 50.0, 87.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+278,COM5.USACE_New-Orleans.contents.bank.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+279,COM5.USACE_New-Orleans.contents.bank.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 26.0, 46.0, 94.0, 97.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+280,COM6.USACE_Galveston.contents.hospital-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 30.0, 65.0, 72.0, 78.0, 85.0, 95.0, 95.0, 95.0, 95.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+281,COM6.USACE_Galveston.contents.hospital.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 95.0, 95.0, 95.0, 95.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+282,COM6.USACE_Galveston.contents.hospital.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 80.0, 83.0, 86.0, 89.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+283,COM7.USACE_Galveston.contents.average_medical_offic/clinic.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 28.0, 51.0, 60.0, 63.0, 67.0, 71.0, 72.0, 74.0, 77.0, 81.0, 86.0, 92.0, 94.0, 97.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+284,COM7.USACE_Galveston.contents.doctor's_office.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 7.0, 12.0, 15.0, 16.0, 18.0, 22.0, 27.0, 34.0, 43.0, 57.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+285,COM7.USACE_Galveston.contents.dentist's_office.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+286,COM7.USACE_Galveston.contents.dentist's_office.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 21.0, 40.0, 55.0, 70.0, 85.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+287,COM7.USACE_Galveston.contents.chiropractic_clinic.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 30.0, 30.0, 30.0, 31.0, 32.0, 35.0, 38.0, 42.0, 47.0, 54.0, 62.0, 72.0, 84.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+288,COM7.USACE_Galveston.contents.x_ray_service.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+289,COM7.USACE_New-Orleans.contents.medical_office.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+290,COM7.USACE_New-Orleans.contents.medical_office.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 26.0, 46.0, 94.0, 97.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+291,COM7.USACE_New-Orleans.contents.dentist's_office.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+292,COM7.USACE_New-Orleans.contents.dentist's_office.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 26.0, 46.0, 94.0, 97.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+293,COM8.USACE_Galveston.contents.average_entertainment/recreation.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 13.0, 45.0, 55.0, 64.0, 73.0, 77.0, 80.0, 82.0, 83.0, 85.0, 87.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0, 96.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+294,COM8.USACE_Galveston.contents.fishing_party_boat.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 24.0, 24.0, 24.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+295,COM8.USACE_Galveston.contents.fishing_party_boat.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 40.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+296,COM8.USACE_Galveston.contents.bowling_alley.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 22.0, 25.0, 27.0, 27.0, 28.0, 30.0, 32.0, 35.0, 39.0, 45.0, 51.0, 58.0, 67.0, 76.0, 86.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+297,COM8.USACE_Galveston.contents.country_club.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 38.0, 41.0, 44.0, 47.0, 52.0, 56.0, 62.0, 67.0, 74.0, 80.0, 87.0, 94.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+298,COM8.USACE_Galveston.contents.country_club.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 12.0, 19.0, 27.0, 34.0, 42.0, 48.0, 55.0, 62.0, 68.0, 73.0, 78.0, 84.0, 89.0, 94.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+299,COM8.USACE_Galveston.contents.physical_fitness.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 45.0, 75.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+300,COM8.USACE_Galveston.contents.private_pool.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+301,COM8.USACE_Galveston.contents.private_club.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 40.0, 45.0, 49.0, 52.0, 55.0, 59.0, 63.0, 68.0, 74.0, 85.0, 90.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+302,COM8.USACE_Galveston.contents.private_club.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 21.0, 28.0, 35.0, 41.0, 47.0, 54.0, 62.0, 71.0, 83.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+303,COM8.USACE_Galveston.contents.radio_station.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+304,COM8.USACE_Galveston.contents.recreation_facilities.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 10.0, 20.0, 30.0, 45.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 75.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+305,COM8.USACE_Galveston.contents.recreation_facilities.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+306,COM8.USACE_Galveston.contents.tavern.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 36.0, 62.0, 73.0, 88.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+307,COM8.USACE_Galveston.contents.tavern.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 42.0, 53.0, 78.0, 92.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+308,COM8.USACE_Galveston.contents.telephone_exchange.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+309,COM8.USACE_Galveston.contents.ymca_inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 33.0, 33.0, 33.0, 33.0, 33.0, 33.0, 33.0, 33.0, 33.0, 35.0, 35.0, 35.0, 36.0, 36.0, 37.0, 37.0, 38.0, 38.0, 39.0, 39.0, 40.0, 40.0, 40.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+310,COM8.USACE_Galveston.contents.cafeteria_restaurant.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 40.0, 57.0, 70.0, 71.0, 75.0, 82.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+311,COM8.USACE_Galveston.contents.cafeteria_restaurant.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 73.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+312,COM8.USACE_Galveston.contents.drive_in_restaurant.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 82.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+313,COM8.USACE_Galveston.contents.drive_in_restaurant.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 52.0, 60.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+314,COM8.USACE_New-Orleans.contents.bowling_alley.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 83.0, 88.0, 93.0, 94.0, 94.0, 94.0, 94.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+315,COM8.USACE_New-Orleans.contents.bowling_alley.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 28.0, 61.0, 81.0, 89.0, 90.0, 92.0, 92.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+316,COM8.USACE_New-Orleans.contents.fast_food_restaurant.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 83.0, 88.0, 93.0, 94.0, 94.0, 94.0, 94.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+317,COM8.USACE_New-Orleans.contents.fast_food_restaurant.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 28.0, 61.0, 81.0, 89.0, 90.0, 92.0, 92.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+318,COM8.USACE_New-Orleans.contents.full_service_restaurant.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 83.0, 88.0, 93.0, 94.0, 94.0, 94.0, 94.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+319,COM8.USACE_New-Orleans.contents.full_service_restaurant.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 28.0, 61.0, 81.0, 89.0, 90.0, 92.0, 92.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+320,COM8.USACE_New-Orleans.contents.lounge.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 83.0, 88.0, 93.0, 94.0, 94.0, 94.0, 94.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+321,COM8.USACE_New-Orleans.contents.lounge.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 28.0, 61.0, 81.0, 89.0, 90.0, 92.0, 92.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+322,COM8.USACE_St-Paul.contents.recreation.fresh_water-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 15.0, 31.0, 43.0, 53.0, 61.0, 67.0, 71.0, 73.0, 76.0, 77.0, 78.0, 79.0, 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+323,COM9.USACE_Galveston.contents.theater.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 6.0, 8.0, 9.0, 10.0, 12.0, 17.0, 22.0, 30.0, 41.0, 57.0, 66.0, 73.0, 79.0, 84.0, 90.0, 97.0, 98.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+324,COM9.USACE_Galveston.contents.private_hall.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 8.0, 10.0, 12.0, 14.0, 18.0, 24.0, 32.0, 44.0, 60.0, 85.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+325,COM9.USACE_Galveston.contents.indoor_theater.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 4.0, 5.0, 6.0, 6.0, 6.0, 9.0, 12.0, 16.0, 22.0, 28.0, 37.0, 46.0, 57.0, 68.0, 80.0, 93.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+326,COM9.USACE_New-Orleans.contents.movie_theater.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 83.0, 88.0, 93.0, 94.0, 94.0, 94.0, 94.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+327,COM9.USACE_New-Orleans.contents.movie_theater.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 28.0, 61.0, 81.0, 89.0, 90.0, 92.0, 92.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0, 98.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+328,COM10.USACE_Galveston.contents.garage.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 11.0, 17.0, 20.0, 23.0, 25.0, 29.0, 35.0, 42.0, 51.0, 63.0, 77.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+329,IND1.USACE_Galveston.contents.average_heavy_industrial.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 24.0, 34.0, 41.0, 47.0, 52.0, 57.0, 60.0, 63.0, 64.0, 66.0, 68.0, 69.0, 72.0, 73.0, 73.0, 73.0, 74.0, 74.0, 74.0, 74.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+330,IND1.USACE_Galveston.contents.boiler_building.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 10.0, 10.0, 10.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+331,IND1.USACE_Galveston.contents.cabinet_shop_mfg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0, 55.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+332,IND1.USACE_Galveston.contents.cabinet_shop_mfg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 4.0, 8.0, 10.0, 11.0, 12.0, 13.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+333,IND1.USACE_Galveston.contents.concrete_mfg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 50.0, 51.0, 53.0, 54.0, 55.0, 55.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+334,IND1.USACE_Galveston.contents.door_mfg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 25.0, 45.0, 70.0, 80.0, 90.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+335,IND1.USACE_Galveston.contents.door_mfg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+336,IND1.USACE_Galveston.contents.engine_room.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0, 55.0, 65.0, 65.0, 65.0, 65.0, 65.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+337,IND1.USACE_Galveston.contents.fabrication_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 75.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+338,IND1.USACE_Galveston.contents.heat_exchanger_mfg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 12.0, 22.0, 32.0, 40.0, 46.0, 52.0, 55.0, 58.0, 60.0, 63.0, 71.0, 80.0, 85.0, 91.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+339,IND1.USACE_Galveston.contents.heat_exchanger_mfg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 11.0, 15.0, 17.0, 19.0, 21.0, 23.0, 24.0, 26.0, 28.0, 29.0, 32.0, 34.0, 36.0, 38.0, 39.0, 41.0, 43.0, 45.0, 47.0, 49.0, 51.0, 53.0, 55.0, 57.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+340,IND1.USACE_Galveston.contents.lock_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 12.0, 20.0, 29.0, 40.0, 50.0, 59.0, 67.0, 75.0, 84.0, 92.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+341,IND1.USACE_Galveston.contents.lock_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 8.0, 33.0, 52.0, 70.0, 75.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+342,IND1.USACE_Galveston.contents.lumber_mill.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 20.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+343,IND1.USACE_Galveston.contents.heavy_machine_shop.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 16.0, 24.0, 33.0, 41.0, 49.0, 58.0, 66.0, 74.0, 82.0, 91.0, 98.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+344,IND1.USACE_Galveston.contents.heavy_machine_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 5.0, 10.0, 15.0, 20.0, 25.0, 30.0, 40.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+345,IND1.USACE_Galveston.contents.light_machine_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 12.0, 18.0, 27.0, 35.0, 42.0, 42.0, 42.0, 55.0, 55.0, 55.0, 65.0, 65.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0, 70.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+346,IND1.USACE_Galveston.contents.metal_coatings_serv.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 32.0, 50.0, 63.0, 75.0, 87.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+347,IND1.USACE_Galveston.contents.metal_coatings_serv.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+348,IND1.USACE_Galveston.contents.pipe_threader_facility.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 25.0, 50.0, 50.0, 50.0, 50.0, 75.0, 75.0, 75.0, 75.0, 90.0, 90.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+349,IND1.USACE_Galveston.contents.pressure_test_facility.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 20.0, 25.0, 25.0, 30.0, 30.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+350,IND1.USACE_Galveston.contents.metal_recycling.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 20.0, 20.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+351,IND1.USACE_Galveston.contents.machine_research_lab.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 25.0, 30.0, 35.0, 40.0, 42.0, 44.0, 46.0, 48.0, 50.0, 50.0, 50.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+352,IND1.USACE_Galveston.contents.machine_research_lab.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 40.0, 60.0, 80.0, 85.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+353,IND1.USACE_Galveston.contents.scale_bldg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 15.0, 25.0, 40.0, 50.0, 75.0, 85.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+354,IND1.USACE_Galveston.contents.welding_machine.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 6.0, 15.0, 18.0, 20.0, 21.0, 22.0, 24.0, 27.0, 30.0, 33.0, 37.0, 41.0, 45.0, 49.0, 54.0, 59.0, 63.0, 68.0, 72.0, 76.0, 80.0, 84.0, 88.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+355,IND2.USACE_Galveston.contents.average_light_industrial.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 9.0, 23.0, 35.0, 44.0, 52.0, 58.0, 62.0, 65.0, 68.0, 70.0, 73.0, 74.0, 77.0, 78.0, 78.0, 79.0, 80.0, 80.0, 80.0, 80.0, 81.0, 81.0, 81.0, 81.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+356,IND2.USACE_Galveston.contents.battery_mfg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 30.0, 40.0, 60.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+357,IND2.USACE_Galveston.contents.battery_mfg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 10.0, 15.0, 15.0, 20.0, 20.0, 25.0, 25.0, 30.0, 30.0, 30.0, 30.0, 40.0, 40.0, 40.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+358,IND2.USACE_Galveston.contents.control_bldg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 75.0, 75.0, 85.0, 85.0, 90.0, 90.0, 90.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+359,IND2.USACE_Galveston.contents.electronic_equip_mfg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+360,IND2.USACE_Galveston.contents.electronic_equip_mfg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 40.0, 55.0, 70.0, 85.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+361,IND2.USACE_Galveston.contents.frame_shop.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 16.0, 45.0, 80.0, 88.0, 93.0, 95.0, 98.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+362,IND2.USACE_Galveston.contents.furniture_mfg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+363,IND2.USACE_Galveston.contents.furniture_mfg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+364,IND2.USACE_Galveston.contents.instrument_mfg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 7.0, 13.0, 20.0, 27.0, 34.0, 42.0, 48.0, 57.0, 71.0, 80.0, 85.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+365,IND2.USACE_Galveston.contents.instrument_mfg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 50.0, 61.0, 73.0, 82.0, 90.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+366,IND2.USACE_Galveston.contents.leather_goods_mfg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0, 27.0, 30.0, 33.0, 36.0, 39.0, 42.0, 45.0, 48.0, 51.0, 54.0, 57.0, 60.0, 63.0, 66.0, 69.0, 72.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+367,IND2.USACE_Galveston.contents.leather_goods_mfg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 7.0, 12.0, 16.0, 20.0, 23.0, 26.0, 27.0, 29.0, 30.0, 30.0, 36.0, 39.0, 41.0, 44.0, 46.0, 49.0, 51.0, 54.0, 57.0, 60.0, 63.0, 66.0, 69.0, 72.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+368,IND2.USACE_Galveston.contents.industrial_loading_dock.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 10.0, 25.0, 40.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+369,IND2.USACE_Galveston.contents.industrial_loading_dock.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 20.0, 20.0, 20.0, 20.0, 20.0, 30.0, 30.0, 30.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+370,IND2.USACE_Galveston.contents.locker_bldg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 5.0, 25.0, 55.0, 75.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+371,IND2.USACE_Galveston.contents.maint_bldg_mfg_facilility.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 15.0, 20.0, 25.0, 35.0, 45.0, 45.0, 45.0, 45.0, 50.0, 50.0, 50.0, 55.0, 55.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+372,IND2.USACE_Galveston.contents.newspaper_print_plant.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 8.0, 11.0, 13.0, 16.0, 20.0, 25.0, 31.0, 39.0, 48.0, 59.0, 70.0, 82.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+373,IND2.USACE_Galveston.contents.newspaper_sales_office.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 9.0, 18.0, 33.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+374,IND2.USACE_Galveston.contents.newspaper_sales_office.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 46.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+375,IND2.USACE_Galveston.contents.manuf_facility_office.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+376,IND2.USACE_Galveston.contents.manuf_facility_office.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 16.0, 21.0, 30.0, 40.0, 45.0, 50.0, 60.0, 75.0, 85.0, 90.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+377,IND2.USACE_Galveston.contents.commercial_printing.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+378,IND2.USACE_Galveston.contents.commercial_printing.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 35.0, 50.0, 70.0, 73.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+379,IND3.USACE_Galveston.contents.average_food/drugs/chemicals.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 20.0, 41.0, 51.0, 62.0, 67.0, 71.0, 73.0, 76.0, 78.0, 79.0, 82.0, 83.0, 84.0, 86.0, 87.0, 87.0, 88.0, 88.0, 88.0, 88.0, 88.0, 88.0, 88.0, 88.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+380,IND3.USACE_Galveston.contents.chemical_plant.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 38.0, 44.0, 52.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0, 92.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+381,IND3.USACE_Galveston.contents.chemical_plant.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+382,IND3.USACE_Galveston.contents.chemical_plant_bonding.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 38.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0, 69.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+383,IND3.USACE_Galveston.contents.chemical_plant_bonding.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+384,IND3.USACE_Galveston.contents.chemical_refinery.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 45.0, 60.0, 75.0, 90.0, 93.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+385,IND3.USACE_Galveston.contents.chemical_refinery.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+386,IND3.USACE_Galveston.contents.deodorizer_bldg_chemical.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 15.0, 20.0, 20.0, 20.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0, 25.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+387,IND3.USACE_Galveston.contents.deodorizer_bldg_chem.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 75.0, 75.0, 90.0, 90.0, 90.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+388,IND3.USACE_Galveston.contents.feed_mill.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+389,IND3.USACE_Galveston.contents.feed_mill.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 22.0, 24.0, 26.0, 28.0, 30.0, 30.0, 30.0, 30.0, 30.0, 30.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+390,IND3.USACE_Galveston.contents.food_processor.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 27.0, 33.0, 40.0, 50.0, 60.0, 70.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+391,IND3.USACE_Galveston.contents.food_processor.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+392,IND3.USACE_Galveston.contents.chemical_laboratory.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 25.0, 50.0, 50.0, 60.0, 70.0, 80.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+393,IND3.USACE_Galveston.contents.chemical_laboratory.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 43.0, 60.0, 60.0, 60.0, 60.0, 70.0, 70.0, 80.0, 80.0, 90.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+394,IND3.USACE_Galveston.contents.detergent_manuf._facility.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 16.0, 21.0, 24.0, 25.0, 26.0, 28.0, 31.0, 36.0, 42.0, 50.0, 71.0, 84.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+395,IND3.USACE_Galveston.contents.detergent_manuf._facility.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 40.0, 55.0, 70.0, 85.0, 90.0, 90.0, 90.0, 90.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+396,IND3.USACE_Galveston.contents.meat_packing.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 30.0, 30.0, 70.0, 75.0, 85.0, 90.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+397,IND3.USACE_Galveston.contents.meat_packing.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+398,IND3.USACE_Galveston.contents.detergent_mixer_bldg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 25.0, 25.0, 25.0, 25.0, 25.0, 35.0, 35.0, 45.0, 45.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0, 55.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+399,IND3.USACE_Galveston.contents.detergent_mixer_bldg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+400,IND3.USACE_Galveston.contents.plastic_mfg.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+401,IND3.USACE_Galveston.contents.plastic_mfg.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 30.0, 40.0, 47.0, 55.0, 62.0, 67.0, 72.0, 77.0, 80.0, 92.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+402,IND3.USACE_Galveston.contents.caustic_materials_refinery.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 38.0, 50.0, 75.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+403,IND3.USACE_Galveston.contents.caustic_materials_refinery.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 30.0, 30.0, 40.0, 40.0, 50.0, 50.0, 60.0, 60.0, 70.0, 70.0, 80.0, 80.0, 90.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+404,IND4.USACE_Galveston.contents.average_metals/minerals_processing.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 15.0, 20.0, 26.0, 31.0, 37.0, 40.0, 44.0, 48.0, 53.0, 56.0, 57.0, 60.0, 62.0, 63.0, 63.0, 63.0, 64.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+405,IND4.USACE_Galveston.contents.foundry.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 17.0, 24.0, 29.0, 34.0, 38.0, 43.0, 45.0, 50.0, 58.0, 62.0, 67.0, 70.0, 75.0, 78.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0, 82.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+406,IND4.USACE_Galveston.contents.foundry.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 11.0, 16.0, 19.0, 21.0, 23.0, 28.0, 35.0, 47.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+407,IND4.USACE_Galveston.contents.lead_refinery.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+408,IND4.USACE_Galveston.contents.lead_refinery.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 16.0, 21.0, 30.0, 40.0, 45.0, 50.0, 60.0, 75.0, 85.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+409,IND4.USACE_Galveston.contents.sand_&_gravel.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 10.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+410,IND4.USACE_Galveston.contents.sand_&_gravel.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 3.0, 6.0, 10.0, 13.0, 16.0, 19.0, 24.0, 27.0, 30.0, 45.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+411,IND4.USACE_Galveston.contents.sheet_metal.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 17.0, 24.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+412,IND4.USACE_Galveston.contents.sheet_metal.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+413,IND5.USACE_Galveston.contents.average_high_technology.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 20.0, 41.0, 51.0, 62.0, 67.0, 71.0, 73.0, 76.0, 78.0, 79.0, 82.0, 83.0, 84.0, 86.0, 87.0, 87.0, 88.0, 88.0, 88.0, 88.0, 88.0, 88.0, 88.0, 88.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+414,IND6.USACE_Galveston.contents.average_construction.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 35.0, 47.0, 56.0, 59.0, 66.0, 69.0, 71.0, 72.0, 78.0, 79.0, 80.0, 80.0, 81.0, 81.0, 81.0, 82.0, 82.0, 82.0, 83.0, 83.0, 83.0, 83.0, 83.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+415,IND6.USACE_Galveston.contents.carpet_tile_flooring.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 42.0, 54.0, 65.0, 75.0, 85.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+416,IND6.USACE_Galveston.contents.carpet_tile_flooring.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 70.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+417,IND6.USACE_Galveston.contents.contractor_roofing.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 27.0, 36.0, 43.0, 48.0, 50.0, 50.0, 50.0, 50.0, 50.0, 51.0, 51.0, 51.0, 52.0, 52.0, 52.0, 52.0, 53.0, 53.0, 53.0, 54.0, 54.0, 54.0, 55.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+418,IND6.USACE_Galveston.contents.contractor_roofing.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 20.0, 27.0, 36.0, 43.0, 48.0, 53.0, 57.0, 59.0, 60.0, 66.0, 69.0, 72.0, 76.0, 79.0, 83.0, 86.0, 90.0, 93.0, 97.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+419,IND6.USACE_Galveston.contents.contractor_electric.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 10.0, 10.0, 10.0, 40.0, 40.0, 40.0, 40.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+420,IND6.USACE_Galveston.contents.pier_drilling_co.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 23.0, 39.0, 55.0, 55.0, 56.0, 56.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+421,IND6.USACE_Galveston.contents.plumbing_co.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 45.0, 55.0, 63.0, 70.0, 76.0, 82.0, 87.0, 92.0, 98.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+422,IND6.USACE_Galveston.contents.plumbing_co.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 14.0, 25.0, 35.0, 44.0, 53.0, 61.0, 69.0, 77.0, 85.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+423,IND6.USACE_Galveston.contents.sandblasting_co.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 15.0, 45.0, 68.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+424,IND6.USACE_Galveston.contents.water_well_service.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+425,IND6.USACE_New-Orleans.contents.carpeting_service.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 53.0, 70.0, 80.0, 96.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+426,IND6.USACE_New-Orleans.contents.carpeting_service.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 19.0, 31.0, 40.0, 45.0, 49.0, 54.0, 58.0, 63.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+427,IND6.USACE_New-Orleans.contents.heating_&_air_conditioning_service.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 53.0, 70.0, 80.0, 96.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+428,IND6.USACE_New-Orleans.contents.heating_&_air_conditioning_service.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 19.0, 31.0, 40.0, 45.0, 49.0, 54.0, 58.0, 63.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+429,IND6.USACE_New-Orleans.contents.plumbing_services.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 53.0, 70.0, 80.0, 96.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+430,IND6.USACE_New-Orleans.contents.plumbing_services.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 19.0, 31.0, 40.0, 45.0, 49.0, 54.0, 58.0, 63.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0, 72.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+431,AGR1.USACE_Galveston.contents.average_agriculture.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 6.0, 20.0, 43.0, 58.0, 65.0, 66.0, 66.0, 67.0, 70.0, 75.0, 76.0, 76.0, 76.0, 77.0, 77.0, 77.0, 78.0, 78.0, 78.0, 79.0, 79.0, 79.0, 79.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+432,AGR1.USACE_Galveston.contents.dairy_processing.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 50.0, 50.0, 50.0, 50.0, 60.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0, 80.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+433,AGR1.USACE_Galveston.contents.dairy_processing.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+434,AGR1.USACE_Galveston.contents.horse_stalls.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 6.0, 8.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, 22.0, 24.0, 25.0, 27.0, 28.0, 29.0, 31.0, 32.0, 33.0, 34.0, 34.0, 36.0, 37.0, 38.0, 39.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+435,AGR1.USACE_Galveston.contents.veterinary_clinic.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 25.0, 50.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+436,AGR1.USACE_New-Orleans.contents.veterinary_office.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+437,AGR1.USACE_New-Orleans.contents.veterinary_office.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 26.0, 46.0, 94.0, 97.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+438,REL1.USACE_Galveston.contents.church-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 52.0, 72.0, 85.0, 92.0, 95.0, 98.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+439,REL1.USACE_Galveston.contents.church.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 75.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+440,REL1.USACE_Galveston.contents.church.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 28.0, 54.0, 70.0, 84.0, 90.0, 95.0, 97.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+441,REL1.USACE_New-Orleans.contents.civic_association.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+442,REL1.USACE_New-Orleans.contents.civic_association.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 18.0, 50.0, 50.0, 52.0, 58.0, 58.0, 58.0, 58.0, 58.0, 59.0, 69.0, 70.0, 70.0, 79.0, 88.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+443,GOV1.USACE_Galveston.contents.average_govt_services.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 59.0, 74.0, 83.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+444,GOV1.USACE_Galveston.contents.city_hall.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 75.0, 85.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+445,GOV1.USACE_Galveston.contents.post_office.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 43.0, 63.0, 70.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+446,GOV1.USACE_New-Orleans.contents.government_facility.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+447,GOV1.USACE_New-Orleans.contents.government_facility.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 18.0, 50.0, 50.0, 52.0, 58.0, 58.0, 58.0, 58.0, 58.0, 59.0, 69.0, 70.0, 70.0, 79.0, 88.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+448,GOV2.USACE_Galveston.contents.average_emergency_response.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 8.0, 20.0, 38.0, 55.0, 70.0, 81.0, 89.0, 98.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+449,GOV2.USACE_Galveston.contents.fire_station.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 25.0, 50.0, 75.0, 91.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+450,GOV2.USACE_Galveston.contents.police_station.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 15.0, 25.0, 35.0, 48.0, 62.0, 78.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+451,EDU1.USACE_Galveston.contents.average_school.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 27.0, 38.0, 53.0, 64.0, 68.0, 70.0, 72.0, 75.0, 79.0, 83.0, 88.0, 94.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+452,EDU1.USACE_Galveston.contents.commercial_school.equipment-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 18.0, 26.0, 30.0, 33.0, 35.0, 39.0, 44.0, 50.0, 58.0, 66.0, 76.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+453,EDU1.USACE_Galveston.contents.library.inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 50.0, 75.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+454,EDU1.USACE_New-Orleans.contents.elementary_school.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+455,EDU1.USACE_New-Orleans.contents.elementary_school.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 18.0, 50.0, 50.0, 52.0, 58.0, 58.0, 58.0, 58.0, 58.0, 59.0, 69.0, 70.0, 70.0, 79.0, 88.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+456,EDU2.USACE_Galveston.contents.average_college/university.equipment/inventory-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 27.0, 38.0, 53.0, 64.0, 68.0, 70.0, 72.0, 75.0, 79.0, 83.0, 88.0, 94.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+457,EDU2.USACE_New-Orleans.contents.college.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+458,EDU2.USACE_New-Orleans.contents.college.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.0, 18.0, 50.0, 50.0, 52.0, 58.0, 58.0, 58.0, 58.0, 58.0, 59.0, 69.0, 70.0, 70.0, 79.0, 88.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+459,RES1.BCAR_Jan-201.contents.all_floors.slab_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 30.0, 45.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+460,RES3A.BCAR_Jan-201.contents.1to2_stories.slab_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 30.0, 45.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+461,RES3B.BCAR_Jan-201.contents.1to2_stories.slab_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 30.0, 45.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+462,RES1.BCAR_Jan-201.contents.all_floors.wall_2ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 20.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+463,RES3A.BCAR_Jan-201.contents.1to2_stories.wall_2ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 20.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+464,RES3B.BCAR_Jan-201.contents.1to2_stories.wall_2ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 20.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+465,RES1.BCAR_Jan-201.contents.all_floors.wall_3ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 10.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+466,RES3A.BCAR_Jan-201.contents.1to2_stories.wall_3ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 10.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+467,RES3B.BCAR_Jan-201.contents.1to2_stories.wall_3ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 10.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+468,RES2.BCAR_Jan-201.contents.manufactured_home_mobile.structure.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+469,RES1.BCAR_Jan-201.contents.all_floors.elevated_open+2ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 45.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+470,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_open+2ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 45.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+471,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_open+2ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 45.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+472,RES1.BCAR_Jan-201.contents.all_floors.elevated_open+4ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 35.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+473,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_open+4ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 35.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+474,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_open+4ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 35.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+475,RES1.BCAR_Jan-201.contents.all_floors.elevated_open+6ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 30.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+476,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_open+6ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 30.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+477,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_open+6ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 30.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+478,RES1.BCAR_Jan-201.contents.all_floors.elevated_open+8ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 25.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+479,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_open+8ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 25.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+480,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_open+8ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 25.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+481,RES1.BCAR_Jan-201.contents.all_floors.elevated_open+10ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"25.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+482,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_open+10ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"25.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+483,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_open+10ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"25.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+484,RES1.BCAR_Jan-201.contents.all_floors.elevated_open+12ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"65.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+485,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_open+12ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"65.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+486,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_open+12ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"65.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+487,RES1.BCAR_Jan-201.contents.all_floors.elevated_obstr+2ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 2.0, 3.0, 48.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+488,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+2ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 2.0, 3.0, 48.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+489,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+2ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 2.0, 3.0, 48.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+490,RES1.BCAR_Jan-201.contents.all_floors.elevated_obstr+4ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"2.0, 3.0, 3.0, 38.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+491,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+4ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"2.0, 3.0, 3.0, 38.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+492,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+4ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"2.0, 3.0, 3.0, 38.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+493,RES1.BCAR_Jan-201.contents.all_floors.elevated_obstr+6ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"3.0, 3.0, 33.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+494,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+6ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"3.0, 3.0, 33.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+495,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+6ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"3.0, 3.0, 33.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+496,RES1.BCAR_Jan-201.contents.all_floors.elevated_obstr+8ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"3.0, 28.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+497,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+8ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"3.0, 28.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+498,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+8ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"3.0, 28.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+499,RES1.BCAR_Jan-201.contents.all_floors.elevated_obstr+10ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"28.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+500,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+10ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"28.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+501,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+10ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"28.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+502,RES1.BCAR_Jan-201.contents.all_floors.elevated_obstr+12ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"68.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+503,RES3A.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+12ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"68.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+504,RES3B.BCAR_Jan-201.contents.1to2_stories.elevated_obstr+12ft_no_basement.coastal_a_or_v_zone-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"68.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+505,RES3.USACE_Chicago.contents.apartment_unit_grade-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 12.0, 24.0, 33.0, 35.0, 37.0, 41.0, 45.0, 50.0, 55.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0, 60.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+506,RES1.BCAR_Jan-201.contents.one_story.with_basement.b14-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 11.0, 13.0, 16.0, 19.0, 22.0, 25.0, 27.0, 30.0, 32.0, 35.0, 36.0, 38.0, 39.0, 39.0, 39.0, 39.0, 39.0, 39.0, 39.0, 39.0, 39.0, 39.0, 39.0, 39.0, 39.0, 39.0, 39.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+507,AGR1.USACE-Sacramento.contents.usace_sacramento_farms.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 30.0, 76.0, 76.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+508,AGR1.USACE-Sacramento.contents.usace_sacramento_farms.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 56.0, 56.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+509,AGR1.USACE-Sacramento.contents.usace_sacramento_farms.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 27.0, 27.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+510,COM.USACE-NACCS.contents.naccs_2_commercial_engineeered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.5, 24.0, 33.25, 40.0, 42.75, 45.5, 52.75, 60.0, 61.66667, 63.33333, 65.0, 66.66667, 68.33333, 70.0, 71.66667, 73.33333, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+511,COM.USACE-NACCS.contents.naccs_2_commercial_engineeered_waves-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 15.0, 26.25, 38.5, 52.375, 66.25, 73.125, 80.0, 82.0, 84.0, 86.0, 88.0, 90.0, 92.0, 94.0, 96.0, 97.5, 97.5, 97.5, 97.5, 97.5, 97.5, 97.5, 97.5, 97.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+512,COM.USACE-NACCS.contents.naccs_3_commercial_non/pre_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 1.0, 21.0, 29.75, 44.5, 49.75, 55.0, 59.75, 64.5, 67.5, 70.5, 73.5, 76.5, 79.5, 82.5, 85.5, 88.5, 90.5, 90.5, 90.5, 90.5, 90.5, 90.5, 90.5, 90.5, 90.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+513,COM.USACE-NACCS.contents.naccs_3_commercial_non/pre_engineered_waves-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.5, 21.0, 33.75, 52.5, 67.5, 82.5, 91.25, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+514,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_furniture_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 46.85714, 61.85714, 68.14286, 79.14286, 85.71429, 90.71429, 97.14286, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+515,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_electronics_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 34.14286, 44.28571, 67.0, 77.71429, 86.71429, 95.42857, 97.42857, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+516,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_clothing_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.0, 46.28571, 55.42857, 70.0, 79.0, 89.0, 95.71429, 97.85714, 97.85714, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+517,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_service_station_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.35714, 16.42857, 28.92857, 40.85714, 57.71429, 63.28571, 70.71429, 79.28571, 84.28571, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+518,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_convenience_store_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 23.14286, 39.85714, 52.85714, 70.71429, 79.28571, 88.0, 94.14286, 95.71429, 97.14286, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+519,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_grocery_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 30.71429, 40.92857, 52.85714, 64.0, 75.42857, 87.28571, 98.85714, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+520,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_furniture_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 46.85714, 61.85714, 68.14286, 79.14286, 85.71429, 90.71429, 97.14286, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+521,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_electronics_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 34.14286, 44.28571, 67.0, 77.71429, 86.71429, 95.42857, 97.42857, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+522,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_clothing_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.0, 46.28571, 55.42857, 70.0, 79.0, 89.0, 95.71429, 97.85714, 97.85714, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+523,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_service_station_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.35714, 16.42857, 28.92857, 40.85714, 57.71429, 63.28571, 70.71429, 79.28571, 84.28571, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+524,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_convenience_store_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 23.14286, 39.85714, 52.85714, 70.71429, 79.28571, 88.0, 94.14286, 95.71429, 97.14286, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+525,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_grocery_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 30.71429, 40.92857, 52.85714, 64.0, 75.42857, 87.28571, 98.85714, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+526,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_furniture_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 46.85714, 61.85714, 68.14286, 79.14286, 85.71429, 90.71429, 97.14286, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+527,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_electronics_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 34.14286, 44.28571, 67.0, 77.71429, 86.71429, 95.42857, 97.42857, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+528,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_clothing_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.0, 46.28571, 55.42857, 70.0, 79.0, 89.0, 95.71429, 97.85714, 97.85714, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+529,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_service_station_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.35714, 16.42857, 28.92857, 40.85714, 57.71429, 63.28571, 70.71429, 79.28571, 84.28571, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+530,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_convenience_store_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 23.14286, 39.85714, 52.85714, 70.71429, 79.28571, 88.0, 94.14286, 95.71429, 97.14286, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+531,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_grocery_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 30.71429, 40.92857, 52.85714, 64.0, 75.42857, 87.28571, 98.85714, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+532,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_furniture_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 46.85714, 61.85714, 68.14286, 79.14286, 85.71429, 90.71429, 97.14286, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+533,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_electronics_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 34.14286, 44.28571, 67.0, 77.71429, 86.71429, 95.42857, 97.42857, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+534,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_retail_clothing_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.0, 46.28571, 55.42857, 70.0, 79.0, 89.0, 95.71429, 97.85714, 97.85714, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+535,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_service_station_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.35714, 16.42857, 28.92857, 40.85714, 57.71429, 63.28571, 70.71429, 79.28571, 84.28571, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+536,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_convenience_store_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 23.14286, 39.85714, 52.85714, 70.71429, 79.28571, 88.0, 94.14286, 95.71429, 97.14286, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+537,COM1.FEMA-BCA-Toolkit.contents.bca_toolkit_grocery_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 30.71429, 40.92857, 52.85714, 64.0, 75.42857, 87.28571, 98.85714, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+538,COM1.FEMA-BCA-Toolkit.contents.convenience_store.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 23.14286, 39.85714, 52.85714, 70.71429, 79.28571, 88.0, 94.14286, 95.71429, 97.14286, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+539,COM1.FEMA-BCA-Toolkit.contents.convenience_store.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 23.14286, 39.85714, 52.85714, 70.71429, 79.28571, 88.0, 94.14286, 95.71429, 97.14286, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+540,COM1.FEMA-BCA-Toolkit.contents.grocery.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.71429, 40.92857, 52.85714, 64.0, 75.42857, 87.28571, 98.85714, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+541,COM1.FEMA-BCA-Toolkit.contents.grocery.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.71429, 40.92857, 52.85714, 64.0, 75.42857, 87.28571, 98.85714, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+542,COM1.FEMA-BCA-Toolkit.contents.retail_clothing.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 29.0, 46.28571, 55.42857, 70.0, 79.0, 89.0, 95.71429, 97.85714, 97.85714, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+543,COM1.FEMA-BCA-Toolkit.contents.retail_clothing.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 29.0, 46.28571, 55.42857, 70.0, 79.0, 89.0, 95.71429, 97.85714, 97.85714, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+544,COM1.FEMA-BCA-Toolkit.contents.retail_electronics.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 34.14286, 44.28571, 67.0, 77.71429, 86.71429, 95.42857, 97.42857, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+545,COM1.FEMA-BCA-Toolkit.contents.retail_electronics.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 34.14286, 44.28571, 67.0, 77.71429, 86.71429, 95.42857, 97.42857, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+546,COM1.FEMA-BCA-Toolkit.contents.retail_furniture.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 46.85714, 61.85714, 68.14286, 79.14286, 85.71429, 90.71429, 97.14286, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+547,COM1.FEMA-BCA-Toolkit.contents.retail_furniture.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 46.85714, 61.85714, 68.14286, 79.14286, 85.71429, 90.71429, 97.14286, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571, 99.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+548,COM1.FEMA-BCA-Toolkit.contents.service_station.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.35714, 16.42857, 28.92857, 40.85714, 57.71429, 63.28571, 70.71429, 79.28571, 84.28571, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+549,COM1.FEMA-BCA-Toolkit.contents.service_station.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.35714, 16.42857, 28.92857, 40.85714, 57.71429, 63.28571, 70.71429, 79.28571, 84.28571, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286, 87.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+550,COM1.USACE-New-Orleans.contents.usace_new_orleans_department_store.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.3, 48.2, 54.1, 54.3, 54.8, 54.8, 54.8, 54.8, 54.8, 98.9, 99.9, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+551,COM1.USACE-New-Orleans.contents.usace_new_orleans_department_store.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 99.5, 99.8, 99.9, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+552,COM1.USACE-New-Orleans.contents.usace_new_orleans_large_grocery.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 73.6, 81.4, 84.8, 87.6, 96.3, 96.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3, 98.3|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+553,COM1.USACE-New-Orleans.contents.usace_new_orleans_large_grocery.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 97.5, 99.1, 99.4, 99.7, 99.7, 99.7, 99.7, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+554,COM1.USACE-Sacramento.contents.usace_sacramento_food_stores.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 29.0, 29.0, 96.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+555,COM1.USACE-Sacramento.contents.usace_sacramento_furniture_retail.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 80.0, 80.0, 96.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+556,COM1.USACE-Sacramento.contents.usace_sacramento_grocery_store.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 32.0, 32.0, 89.0, 89.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+557,COM1.USACE-Sacramento.contents.usace_sacramento_retail.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 80.0, 80.0, 96.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+558,COM1.USACE-Sacramento.contents.usace_sacramento_shopping_centers.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 33.0, 72.0, 72.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+559,COM1.USACE-Sacramento.contents.usace_sacramento_food_stores.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 25.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+560,COM1.USACE-Sacramento.contents.usace_sacramento_furniture_retail.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 42.0, 42.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+561,COM1.USACE-Sacramento.contents.usace_sacramento_grocery_store.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 27.0, 27.0, 49.0, 49.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+562,COM1.USACE-Sacramento.contents.usace_sacramento_retail.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 19.0, 19.0, 36.0, 36.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+563,COM1.USACE-Sacramento.contents.usace_sacramento_shopping_centers.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 28.0, 28.0, 40.0, 40.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+564,COM1.USACE-Sacramento.contents.usace_sacramento_food_stores.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 78.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+565,COM1.USACE-Sacramento.contents.usace_sacramento_furniture_retail.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.0, 98.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+566,COM1.USACE-Sacramento.contents.usace_sacramento_grocery_store.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 87.0, 87.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+567,COM1.USACE-Sacramento.contents.usace_sacramento_retail.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 80.0, 80.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+568,COM1.USACE-Sacramento.contents.usace_sacramento_shopping_centers.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 96.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+569,COM1.USACE-Sacramento.contents.usace_sacramento_food_stores.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 38.0, 38.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+570,COM1.USACE-Sacramento.contents.usace_sacramento_furniture_retail.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 47.0, 47.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+571,COM1.USACE-Sacramento.contents.usace_sacramento_grocery_store.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 42.0, 42.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+572,COM1.USACE-Sacramento.contents.usace_sacramento_retail.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 38.0, 38.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+573,COM1.USACE-Sacramento.contents.usace_sacramento_shopping_centers.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 46.0, 46.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+574,COM2.FEMA-BCA-Toolkit.contents.bca_toolkit_warehouse.refrig_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.71429, 48.0, 59.14286, 65.71429, 74.28571, 79.71429, 84.0, 89.85714, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+575,COM2.FEMA-BCA-Toolkit.contents.bca_toolkit_warehouse.non_refrig_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.71429, 33.71429, 47.42857, 56.85714, 65.57143, 73.57143, 81.28571, 88.42857, 91.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+576,COM2.FEMA-BCA-Toolkit.contents.bca_toolkit_warehouse.refrig_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.71429, 48.0, 59.14286, 65.71429, 74.28571, 79.71429, 84.0, 89.85714, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+577,COM2.FEMA-BCA-Toolkit.contents.bca_toolkit_warehouse.non_refrig_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.71429, 33.71429, 47.42857, 56.85714, 65.57143, 73.57143, 81.28571, 88.42857, 91.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+578,COM2.FEMA-BCA-Toolkit.contents.bca_toolkit_warehouse.refrig_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.71429, 48.0, 59.14286, 65.71429, 74.28571, 79.71429, 84.0, 89.85714, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+579,COM2.FEMA-BCA-Toolkit.contents.bca_toolkit_warehouse.non_refrig_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.71429, 33.71429, 47.42857, 56.85714, 65.57143, 73.57143, 81.28571, 88.42857, 91.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+580,COM2.FEMA-BCA-Toolkit.contents.bca_toolkit_warehouse.refrig_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.71429, 48.0, 59.14286, 65.71429, 74.28571, 79.71429, 84.0, 89.85714, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+581,COM2.FEMA-BCA-Toolkit.contents.bca_toolkit_warehouse.non_refrig_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.71429, 33.71429, 47.42857, 56.85714, 65.57143, 73.57143, 81.28571, 88.42857, 91.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+582,COM2.USACE-New-Orleans.contents.usace_new_orleans_warehouse.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 19.2, 30.7, 39.7, 44.5, 48.8, 54.1, 58.3, 62.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6, 71.6|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+583,COM2.USACE-New-Orleans.contents.usace_new_orleans_warehouse.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 53.0, 69.9, 79.9, 96.3, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0, 97.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+584,COM2.FEMA-BCA-Toolkit.contents.warehouse.non_refrig_default.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.71429, 33.71429, 47.42857, 56.85714, 65.57143, 73.57143, 81.28571, 88.42857, 91.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+585,COM2.FEMA-BCA-Toolkit.contents.warehouse.non_refrig_default.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.71429, 33.71429, 47.42857, 56.85714, 65.57143, 73.57143, 81.28571, 88.42857, 91.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+586,COM2.FEMA-BCA-Toolkit.contents.warehouse.refrig_default.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 29.71429, 48.0, 59.14286, 65.71429, 74.28571, 79.71429, 84.0, 89.85714, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+587,COM2.FEMA-BCA-Toolkit.contents.warehouse.refrig_default.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 29.71429, 48.0, 59.14286, 65.71429, 74.28571, 79.71429, 84.0, 89.85714, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143, 93.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+588,COM2.USACE-Sacramento.contents.usace_sacramento_warehouse.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 23.0, 69.0, 69.0, 96.0, 96.0, 96.0, 96.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+589,COM2.USACE-Sacramento.contents.usace_sacramento_warehouse.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 20.0, 38.0, 38.0, 48.0, 48.0, 48.0, 48.0, 48.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+590,COM2.USACE-Sacramento.contents.usace_sacramento_warehouse.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 84.0, 84.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+591,COM2.USACE-Sacramento.contents.usace_sacramento_warehouse.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 40.0, 40.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+592,COM3.FEMA-BCA-Toolkit.contents.protective_services.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 14.28571, 24.71429, 36.71429, 46.57143, 55.28571, 62.85714, 74.42857, 82.71429, 84.42857, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+593,COM3.FEMA-BCA-Toolkit.contents.protective_services.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 14.28571, 24.71429, 36.71429, 46.57143, 55.28571, 62.85714, 74.42857, 82.71429, 84.42857, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+594,COM3.USACE-Sacramento.contents.usace_sacramento_service_auto.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 9.0, 10.0, 23.0, 23.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+595,COM3.USACE-Sacramento.contents.usace_sacramento_service_auto.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 8.0, 8.0, 19.0, 19.0, 37.0, 37.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+596,COM3.USACE-Sacramento.contents.usace_sacramento_service_auto.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 10.0, 10.0, 74.0, 74.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+597,COM3.USACE-Sacramento.contents.usace_sacramento_service_auto.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 5.0, 5.0, 35.0, 35.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+598,COM4.FEMA-BCA-Toolkit.contents.bca_toolkit_protective_services_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.28571, 24.71429, 36.71429, 46.57143, 55.28571, 62.85714, 74.42857, 82.71429, 84.42857, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+599,COM4.FEMA-BCA-Toolkit.contents.bca_toolkit_office_one_story_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.71429, 0.71429, 1.0, 20.0, 34.28571, 45.42857, 55.0, 63.85714, 73.28571, 76.42857, 83.42857, 89.28571, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+600,COM4.FEMA-BCA-Toolkit.contents.bca_toolkit_protective_services_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.28571, 24.71429, 36.71429, 46.57143, 55.28571, 62.85714, 74.42857, 82.71429, 84.42857, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+601,COM4.FEMA-BCA-Toolkit.contents.bca_toolkit_office_one_story_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.71429, 0.71429, 1.0, 20.0, 34.28571, 45.42857, 55.0, 63.85714, 73.28571, 76.42857, 83.42857, 89.28571, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+602,COM4.FEMA-BCA-Toolkit.contents.bca_toolkit_protective_services_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.28571, 24.71429, 36.71429, 46.57143, 55.28571, 62.85714, 74.42857, 82.71429, 84.42857, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+603,COM4.FEMA-BCA-Toolkit.contents.bca_toolkit_office_one_story_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.71429, 0.71429, 1.0, 20.0, 34.28571, 45.42857, 55.0, 63.85714, 73.28571, 76.42857, 83.42857, 89.28571, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+604,COM4.FEMA-BCA-Toolkit.contents.bca_toolkit_protective_services_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.28571, 24.71429, 36.71429, 46.57143, 55.28571, 62.85714, 74.42857, 82.71429, 84.42857, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0, 86.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+605,COM4.FEMA-BCA-Toolkit.contents.bca_toolkit_office_one_story_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.71429, 0.71429, 1.0, 20.0, 34.28571, 45.42857, 55.0, 63.85714, 73.28571, 76.42857, 83.42857, 89.28571, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+606,COM4.FEMA-BCA-Toolkit.contents.office_one_story.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.71429, 0.71429, 1.0, 20.0, 34.28571, 45.42857, 55.0, 63.85714, 73.28571, 76.42857, 83.42857, 89.28571, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+607,COM4.FEMA-BCA-Toolkit.contents.office_one_story.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.71429, 0.71429, 1.0, 20.0, 34.28571, 45.42857, 55.0, 63.85714, 73.28571, 76.42857, 83.42857, 89.28571, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857, 91.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+608,COM4.USACE-New-Orleans.contents.usace_new_orleans_utility_company.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.1, 17.5, 49.8, 50.1, 51.8, 57.6, 57.6, 57.6, 57.6, 57.8, 59.4, 69.2, 69.6, 69.7, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+609,COM4.USACE-New-Orleans.contents.usace_new_orleans_utility_company.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+610,COM4.USACE-Sacramento.contents.usace_sacramento_office.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 35.0, 90.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+611,COM4.USACE-Sacramento.contents.usace_sacramento_office.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 29.0, 29.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+612,COM4.USACE-Sacramento.contents.usace_sacramento_office.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 97.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+613,COM4.USACE-Sacramento.contents.usace_sacramento_office.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 46.0, 46.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+614,COM6.FEMA-BCA-Toolkit.contents.bca_toolkit_hospital_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.625, 27.0, 37.0, 53.375, 70.0, 79.125, 85.625, 92.5, 95.625, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+615,COM6.FEMA-BCA-Toolkit.contents.bca_toolkit_hospital_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.625, 27.0, 37.0, 53.375, 70.0, 79.125, 85.625, 92.5, 95.625, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+616,COM6.FEMA-BCA-Toolkit.contents.bca_toolkit_hospital_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.625, 27.0, 37.0, 53.375, 70.0, 79.125, 85.625, 92.5, 95.625, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+617,COM6.FEMA-BCA-Toolkit.contents.bca_toolkit_hospital_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.625, 27.0, 37.0, 53.375, 70.0, 79.125, 85.625, 92.5, 95.625, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+618,COM6.FEMA-BCA-Toolkit.contents.hospital.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 14.625, 27.0, 37.0, 53.375, 70.0, 79.125, 85.625, 92.5, 95.625, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+619,COM6.FEMA-BCA-Toolkit.contents.hospital.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 14.625, 27.0, 37.0, 53.375, 70.0, 79.125, 85.625, 92.5, 95.625, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25, 96.25|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+620,COM6.USACE-New-Orleans.contents.usace_new_orleans_medical_office.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.8, 45.7, 94.1, 96.9, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3, 99.3|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+621,COM6.USACE-New-Orleans.contents.usace_new_orleans_medical_office.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.5, 98.5, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+622,COM6.USACE-Sacramento.contents.usace_sacramento_medical.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 33.0, 89.0, 89.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+623,COM6.USACE-Sacramento.contents.usace_sacramento_medical.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 28.0, 28.0, 49.0, 49.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+624,COM6.USACE-Sacramento.contents.usace_sacramento_medical.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 75.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+625,COM6.USACE-Sacramento.contents.usace_sacramento_medical.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 36.0, 36.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+626,COM7.FEMA-BCA-Toolkit.contents.bca_toolkit_medical_office_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.25, 26.875, 40.375, 57.125, 67.25, 75.375, 82.25, 91.25, 96.25, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+627,COM7.FEMA-BCA-Toolkit.contents.bca_toolkit_medical_office_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.25, 26.875, 40.375, 57.125, 67.25, 75.375, 82.25, 91.25, 96.25, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+628,COM7.FEMA-BCA-Toolkit.contents.bca_toolkit_medical_office_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.25, 26.875, 40.375, 57.125, 67.25, 75.375, 82.25, 91.25, 96.25, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+629,COM7.FEMA-BCA-Toolkit.contents.bca_toolkit_medical_office_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 14.25, 26.875, 40.375, 57.125, 67.25, 75.375, 82.25, 91.25, 96.25, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+630,COM7.FEMA-BCA-Toolkit.contents.medical_office.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 14.25, 26.875, 40.375, 57.125, 67.25, 75.375, 82.25, 91.25, 96.25, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+631,COM7.FEMA-BCA-Toolkit.contents.medical_office.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 14.25, 26.875, 40.375, 57.125, 67.25, 75.375, 82.25, 91.25, 96.25, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875, 96.875|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+632,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_fast_food_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.28571, 38.57143, 52.71429, 62.57143, 73.0, 79.28571, 88.28571, 94.85714, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+633,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_non_fast_food_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 27.71429, 48.85714, 57.28571, 71.85714, 79.71429, 84.85714, 92.85714, 93.42857, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+634,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_recreation_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 25.71429, 43.71429, 62.71429, 72.85714, 80.0, 84.0, 91.14286, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+635,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_fast_food_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.28571, 38.57143, 52.71429, 62.57143, 73.0, 79.28571, 88.28571, 94.85714, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+636,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_non_fast_food_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 27.71429, 48.85714, 57.28571, 71.85714, 79.71429, 84.85714, 92.85714, 93.42857, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+637,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_recreation_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 25.71429, 43.71429, 62.71429, 72.85714, 80.0, 84.0, 91.14286, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+638,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_fast_food_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.28571, 38.57143, 52.71429, 62.57143, 73.0, 79.28571, 88.28571, 94.85714, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+639,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_non_fast_food_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 27.71429, 48.85714, 57.28571, 71.85714, 79.71429, 84.85714, 92.85714, 93.42857, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+640,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_recreation_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 25.71429, 43.71429, 62.71429, 72.85714, 80.0, 84.0, 91.14286, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+641,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_fast_food_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.28571, 38.57143, 52.71429, 62.57143, 73.0, 79.28571, 88.28571, 94.85714, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+642,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_non_fast_food_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 27.71429, 48.85714, 57.28571, 71.85714, 79.71429, 84.85714, 92.85714, 93.42857, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+643,COM8.FEMA-BCA-Toolkit.contents.bca_toolkit_recreation_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 25.71429, 43.71429, 62.71429, 72.85714, 80.0, 84.0, 91.14286, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+644,COM8.FEMA-BCA-Toolkit.contents.fast_food.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 21.28571, 38.57143, 52.71429, 62.57143, 73.0, 79.28571, 88.28571, 94.85714, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+645,COM8.FEMA-BCA-Toolkit.contents.fast_food.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 21.28571, 38.57143, 52.71429, 62.57143, 73.0, 79.28571, 88.28571, 94.85714, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143, 98.57143|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+646,COM8.FEMA-BCA-Toolkit.contents.non_fast_food.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 27.71429, 48.85714, 57.28571, 71.85714, 79.71429, 84.85714, 92.85714, 93.42857, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+647,COM8.FEMA-BCA-Toolkit.contents.non_fast_food.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 27.71429, 48.85714, 57.28571, 71.85714, 79.71429, 84.85714, 92.85714, 93.42857, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571, 94.28571|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+648,COM8.FEMA-BCA-Toolkit.contents.recreation.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.71429, 43.71429, 62.71429, 72.85714, 80.0, 84.0, 91.14286, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+649,COM8.FEMA-BCA-Toolkit.contents.recreation.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.71429, 43.71429, 62.71429, 72.85714, 80.0, 84.0, 91.14286, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+650,COM8.USACE-Sacramento.contents.usace_sacramento_restaurant.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 30.0, 96.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+651,COM8.USACE-Sacramento.contents.usace_sacramento_restaurant_fast_food.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 23.0, 90.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+652,COM8.USACE-Sacramento.contents.usace_sacramento_recreation.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 38.0, 38.0, 95.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+653,COM8.USACE-Sacramento.contents.usace_sacramento_restaurant.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 25.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+654,COM8.USACE-Sacramento.contents.usace_sacramento_restaurant_fast_food.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 20.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+655,COM8.USACE-Sacramento.contents.usace_sacramento_recreation.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 32.0, 32.0, 49.0, 49.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+656,COM8.USACE-Sacramento.contents.usace_sacramento_restaurant.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 91.0, 91.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+657,COM8.USACE-Sacramento.contents.usace_sacramento_restaurant_fast_food.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 88.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+658,COM8.USACE-Sacramento.contents.usace_sacramento_recreation.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 98.0, 98.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+659,COM8.USACE-Sacramento.contents.usace_sacramento_restaurant.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 44.0, 44.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+660,COM8.USACE-Sacramento.contents.usace_sacramento_restaurant_fast_food.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 42.0, 42.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+661,COM8.USACE-Sacramento.contents.usace_sacramento_recreation.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 47.0, 47.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+662,EDU1.FEMA-BCA-Toolkit.contents.bca_toolkit_schools_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+663,EDU1.FEMA-BCA-Toolkit.contents.bca_toolkit_schools_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+664,EDU1.FEMA-BCA-Toolkit.contents.bca_toolkit_schools_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+665,EDU1.FEMA-BCA-Toolkit.contents.bca_toolkit_schools_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+666,EDU1.FEMA-BCA-Toolkit.contents.schools.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+667,EDU1.FEMA-BCA-Toolkit.contents.schools.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+668,EDU1.USACE-New-Orleans.contents.usace_new_orleans_elementary_school.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.1, 17.5, 49.8, 50.1, 51.8, 57.6, 57.6, 57.6, 57.6, 57.8, 59.4, 69.2, 69.6, 69.7, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+669,EDU1.USACE-New-Orleans.contents.usace_new_orleans_elementary_school.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+670,EDU1.USACE-Sacramento.contents.usace_sacramento_schools.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 22.0, 22.0, 67.0, 67.0, 88.0, 88.0, 88.0, 88.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+671,EDU1.USACE-Sacramento.contents.usace_sacramento_schools.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 18.0, 18.0, 37.0, 37.0, 44.0, 44.0, 44.0, 44.0, 44.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+672,EDU1.USACE-Sacramento.contents.usace_sacramento_schools.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 88.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+673,EDU1.USACE-Sacramento.contents.usace_sacramento_schools.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 42.0, 42.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+674,EDU2.USACE-New-Orleans.contents.usace_new_orleans_college.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.1, 17.5, 49.8, 50.1, 51.8, 57.6, 57.6, 57.6, 57.6, 57.8, 59.4, 69.2, 69.6, 69.7, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+675,EDU2.USACE-New-Orleans.contents.usace_new_orleans_college.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+676,GOV1.USACE-New-Orleans.contents.usace_new_orleans_government_facility.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 5.1, 17.5, 49.8, 50.1, 51.8, 57.6, 57.6, 57.6, 57.6, 57.8, 59.4, 69.2, 69.6, 69.7, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9, 78.9|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+677,GOV1.USACE-New-Orleans.contents.usace_new_orleans_government_facility.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 60.2, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+678,GOV1.USACE-Sacramento.contents.usace_sacramento_government.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 35.0, 90.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+679,GOV1.USACE-Sacramento.contents.usace_sacramento_government.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 30.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+680,GOV1.USACE-Sacramento.contents.usace_sacramento_government.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 97.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+681,GOV1.USACE-Sacramento.contents.usace_sacramento_government.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 45.0, 45.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+682,IND1.USACE-Sacramento.contents.usace_sacramento_heavy.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 16.0, 16.0, 56.0, 56.0, 92.0, 92.0, 92.0, 92.0, 92.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+683,IND1.USACE-Sacramento.contents.usace_sacramento_heavy.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 14.0, 14.0, 31.0, 31.0, 46.0, 46.0, 46.0, 46.0, 46.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+684,IND1.USACE-Sacramento.contents.usace_sacramento_heavy.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 33.0, 77.0, 77.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+685,IND1.USACE-Sacramento.contents.usace_sacramento_heavy.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 40.0, 40.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+686,IND2.FEMA-BCA-Toolkit.contents.bca_toolkit_industrial_light_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 19.28571, 31.0, 42.28571, 52.28571, 60.71429, 72.0, 82.14286, 90.71429, 94.28571, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+687,IND2.FEMA-BCA-Toolkit.contents.bca_toolkit_industrial_light_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 19.28571, 31.0, 42.28571, 52.28571, 60.71429, 72.0, 82.14286, 90.71429, 94.28571, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+688,IND2.FEMA-BCA-Toolkit.contents.bca_toolkit_industrial_light_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 19.28571, 31.0, 42.28571, 52.28571, 60.71429, 72.0, 82.14286, 90.71429, 94.28571, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+689,IND2.FEMA-BCA-Toolkit.contents.bca_toolkit_industrial_light_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 19.28571, 31.0, 42.28571, 52.28571, 60.71429, 72.0, 82.14286, 90.71429, 94.28571, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+690,IND2.FEMA-BCA-Toolkit.contents.industrial_light.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 19.28571, 31.0, 42.28571, 52.28571, 60.71429, 72.0, 82.14286, 90.71429, 94.28571, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+691,IND2.FEMA-BCA-Toolkit.contents.industrial_light.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 19.28571, 31.0, 42.28571, 52.28571, 60.71429, 72.0, 82.14286, 90.71429, 94.28571, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+692,IND2.USACE-Sacramento.contents.usace_sacramento_light.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 35.0, 75.0, 75.0, 96.0, 96.0, 96.0, 96.0, 96.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+693,IND2.USACE-Sacramento.contents.usace_sacramento_light.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 30.0, 41.0, 41.0, 48.0, 48.0, 48.0, 48.0, 48.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+694,IND2.USACE-Sacramento.contents.usace_sacramento_light.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 88.0, 88.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+695,IND2.USACE-Sacramento.contents.usace_sacramento_light.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 42.0, 42.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+696,REL1.FEMA-BCA-Toolkit.contents.bca_toolkit_religious_facilities_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.28571, 48.42857, 60.0, 69.28571, 76.42857, 81.42857, 88.42857, 94.28571, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+697,REL1.FEMA-BCA-Toolkit.contents.bca_toolkit_religious_facilities_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.28571, 48.42857, 60.0, 69.28571, 76.42857, 81.42857, 88.42857, 94.28571, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+698,REL1.FEMA-BCA-Toolkit.contents.bca_toolkit_religious_facilities_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.28571, 48.42857, 60.0, 69.28571, 76.42857, 81.42857, 88.42857, 94.28571, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+699,REL1.FEMA-BCA-Toolkit.contents.bca_toolkit_religious_facilities_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 29.28571, 48.42857, 60.0, 69.28571, 76.42857, 81.42857, 88.42857, 94.28571, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+700,REL1.FEMA-BCA-Toolkit.contents.religious_facilities.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 29.28571, 48.42857, 60.0, 69.28571, 76.42857, 81.42857, 88.42857, 94.28571, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+701,REL1.FEMA-BCA-Toolkit.contents.religious_facilities.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 29.28571, 48.42857, 60.0, 69.28571, 76.42857, 81.42857, 88.42857, 94.28571, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286, 97.14286|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+702,REL1.USACE-Sacramento.contents.usace_sacramento_churches.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 33.0, 33.0, 85.0, 85.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+703,REL1.USACE-Sacramento.contents.usace_sacramento_churches.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 28.0, 28.0, 47.0, 47.0, 49.0, 49.0, 49.0, 49.0, 49.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+704,REL1.USACE-Sacramento.contents.usace_sacramento_churches.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 73.0, 73.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+705,REL1.USACE-Sacramento.contents.usace_sacramento_churches.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 35.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+706,RES1.USACE-NACCS.contents.naccs_5a_single_story_residence_no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 40.0, 60.0, 80.0, 85.0, 90.0, 95.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+707,RES1.USACE-NACCS.contents.naccs_5a_single_story_residence_no_basement_wave_crawlspace-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 10.0, 30.0, 60.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+708,RES1.USACE-NACCS.contents.naccs_5b_two_story_residence_no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 5.0, 25.0, 35.0, 45.0, 50.0, 55.0, 62.5, 70.0, 73.33333, 76.66667, 80.0, 83.33333, 86.66667, 90.0, 93.33333, 96.66667, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+709,RES1.USACE-NACCS.contents.naccs_5b_two_story_residence_no_basement_wave_crawlspace-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 5.0, 20.0, 35.0, 45.0, 94.0, 97.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+710,RES1.USACE-NACCS.contents.naccs_6a_single_story_residence_with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 10.0, 15.0, 15.0, 45.0, 64.0, 80.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+711,RES1.USACE-NACCS.contents.naccs_6b_two_story_residence_with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 10.0, 15.0, 20.0, 35.0, 40.0, 50.0, 55.0, 60.0, 65.0, 70.0, 76.66667, 83.33333, 90.0, 96.66667, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+712,RES1.USACE-NACCS.contents.naccs_7a_building_on_open_pile_foundation-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 1.0, 1.0, 10.0, 40.0, 50.0, 80.0, 89.0, 98.0, 99.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+713,RES1.USACE-NACCS.contents.naccs_7a_building_on_open_pile_foundation_waves-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 12.5, 20.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+714,RES1.USACE-NACCS.contents.naccs_7b_building_on_pile_foundation_with_enclosure-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 9.0, 11.0, 20.0, 40.0, 75.0, 85.0, 92.5, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+715,RES1.USACE-NACCS.contents.naccs_7b_building_on_pile_foundation_with_enclosure_waves-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 25.0, 40.0, 50.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+716,RES1.USACE-Generic.contents.single_family_residential.1_story_with_basements-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 10.5, 13.2, 16.0, 18.9, 21.8, 24.7, 27.4, 30.0, 32.4, 34.5, 36.3, 37.7, 38.6, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+717,RES1.USACE-Generic.contents.single_family_residential.2_or_more_stories_with_basements-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 8.4, 10.1, 11.9, 13.8, 15.7, 17.7, 19.8, 22.0, 24.3, 26.7, 29.1, 31.7, 34.4, 37.2, 40.0, 43.0, 46.1, 49.3, 52.6, 52.6, 52.6, 52.6, 52.6, 52.6, 52.6, 52.6, 52.6|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+718,RES1.USACE-Generic.contents.single_family_residential.split_level_with_basements-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 7.3, 9.4, 11.6, 13.8, 16.1, 18.2, 20.2, 22.1, 23.6, 24.9, 25.8, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+719,RES1.USACE-Generic.contents.single_family_residential.1_story_with_no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 2.4, 8.1, 13.3, 17.9, 22.0, 25.7, 28.8, 31.5, 33.8, 35.7, 37.2, 38.4, 39.2, 39.7, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+720,RES1.USACE-Generic.contents.single_family_residential.2_or_more_stories_with_no_basements-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 1.0, 5.0, 8.7, 12.2, 15.5, 18.5, 21.3, 23.9, 26.3, 28.4, 30.3, 32.0, 33.4, 34.7, 35.6, 36.4, 36.9, 37.2, 37.2, 37.2, 37.2, 37.2, 37.2, 37.2, 37.2, 37.2|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+721,RES1.USACE-Generic.contents.single_family_residential.split_level_with_no_basements-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 2.2, 2.9, 4.7, 7.5, 11.1, 15.3, 20.1, 25.2, 30.5, 35.7, 40.9, 45.8, 50.2, 54.1, 57.2, 59.4, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+722,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_slab_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 30.0, 45.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+723,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_wall_2_feet_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+724,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_wall_3_feet_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+725,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_open_+2_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 45.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+726,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_open_+4_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 35.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+727,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_open_+6_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 30.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+728,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_open_+8_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+729,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_open_+10_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 25.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+730,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_open_+12_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 10.0, 65.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+731,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_obstr_+2_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 3.0, 48.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+732,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_obstr_+4_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 3.0, 3.0, 38.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+733,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_obstr_+6_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 2.0, 3.0, 3.0, 3.0, 33.0, 93.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+734,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_obstr_+8_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 3.0, 3.0, 3.0, 28.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+735,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_obstr_+10_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 3.0, 3.0, 28.0, 78.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+736,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_elevated_obstr_+12_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 3.0, 13.0, 68.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+737,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_1_story_without_basement_outside_caz_usace_generic_riverine-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.4, 8.1, 13.3, 17.9, 22.0, 25.7, 28.8, 31.5, 33.8, 35.7, 37.2, 38.4, 39.2, 39.7, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+738,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_2_story_without_basement_outside_caz_usace_generic_riverine-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 5.0, 8.7, 12.2, 15.5, 18.5, 21.3, 23.9, 26.3, 28.4, 30.3, 32.0, 33.4, 34.7, 35.6, 36.4, 36.9, 37.2, 37.2, 37.2, 37.2, 37.2|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+739,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_split_level_without_basement_outside_caz_usace_generic_riverine-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2, 2.9, 4.7, 7.5, 11.1, 15.3, 20.1, 25.2, 30.5, 35.7, 40.9, 45.8, 50.2, 54.1, 57.2, 59.4, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+740,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_1_story_with_basement_outside_caz_usace_generic_riverine-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.5, 13.2, 16.0, 18.9, 21.8, 24.7, 27.4, 30.0, 32.4, 34.5, 36.3, 37.7, 38.6, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+741,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_2_story_with_basement_outside_caz_usace_generic_riverine-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 8.4, 10.1, 11.9, 13.8, 15.7, 17.7, 19.8, 22.0, 24.3, 26.7, 29.1, 31.7, 34.4, 37.2, 40.0, 43.0, 46.1, 49.3, 52.6, 52.6, 52.6, 52.6, 52.6|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+742,RES1.FEMA-BCA-Toolkit.contents.bca_toolkit_split_level_with_basement_outside_caz_usace_generic_riverine-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 7.3, 9.4, 11.6, 13.8, 16.1, 18.2, 20.2, 22.1, 23.6, 24.9, 25.8, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+743,RES1.FEMA-FIMA.contents.fema_fia_split_level_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.5, 13.5, 19.5, 37.5, 40.5, 42.0, 49.5, 51.0, 61.5, 64.5, 67.5, 69.0, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+744,RES1.FEMA-FIMA.contents.fema_fia_two_or_more_stories_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 12.0, 16.5, 22.5, 30.0, 34.5, 42.0, 49.5, 57.0, 66.0, 73.5, 76.5, 79.5, 82.5, 85.5, 88.5, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+745,RES1.FEMA-FIMA.contents.fema_fia_split_level_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.5, 7.5, 9.0, 24.0, 28.5, 33.0, 40.5, 48.0, 52.5, 54.0, 66.0, 72.0, 75.0, 78.0, 81.0, 84.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+746,RES1.FEMA-FIMA.contents.fema_fia_one_story_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 12.0, 16.5, 22.5, 30.0, 34.5, 42.0, 49.5, 57.0, 66.0, 73.5, 76.5, 79.5, 82.5, 85.5, 88.5, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+747,RES1.FEMA-FIMA.contents.fema_fia_coastal_building_zone_v-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 15.0, 23.0, 35.0, 50.0, 58.0, 63.0, 66.5, 69.5, 72.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 87.0, 87.0, 87.0, 87.0, 87.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+748,RES1.FEMA-FIMA.contents.fema_fia_two_or_more_stories_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 7.5, 13.5, 19.5, 27.0, 30.0, 33.0, 36.0, 39.0, 43.5, 49.5, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+749,RES1.FEMA-FIMA.contents.fema_fia_one_story_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 13.5, 21.0, 33.0, 40.5, 43.5, 45.0, 60.0, 64.5, 66.0, 67.5, 69.0, 70.5, 72.0, 73.5, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+750,RES1.FEMA-FIMA.contents.fema_fia_coastal_building_zone_v-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 11.0, 24.0, 29.0, 37.0, 54.0, 60.5, 64.5, 68.0, 70.0, 72.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 87.0, 87.0, 87.0, 87.0, 87.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+751,RES1.FEMA-FIMA.contents.fema_fia.split_level.with_basement_split_level-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.5, 7.5, 9.0, 24.0, 28.5, 33.0, 40.5, 48.0, 52.5, 54.0, 66.0, 72.0, 75.0, 78.0, 81.0, 84.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+752,RES1.FEMA-FIMA.contents.fema_fia_default_one_story_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 12.0, 16.5, 22.5, 30.0, 34.5, 42.0, 49.5, 57.0, 66.0, 73.5, 76.5, 79.5, 82.5, 85.5, 88.5, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+753,RES1.FEMA-FIMA.contents.fema_fia.1_story.no_basement_one_story-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 13.5, 21.0, 33.0, 40.5, 43.5, 45.0, 60.0, 64.5, 66.0, 67.5, 69.0, 70.5, 72.0, 73.5, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+754,RES1.FEMA-FIMA.contents.fema_fia.2_story.no_basement_two_or_more_stories-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 7.5, 13.5, 19.5, 27.0, 30.0, 33.0, 36.0, 39.0, 43.5, 49.5, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+755,RES1.FEMA-FIMA.contents.fema_fia_default_one_story_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 13.5, 21.0, 33.0, 40.5, 43.5, 45.0, 60.0, 64.5, 66.0, 67.5, 69.0, 70.5, 72.0, 73.5, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+756,RES1.FEMA-FIMA.contents.fema_fia_default_coastal_building_zone_v-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 11.0, 24.0, 29.0, 37.0, 54.0, 60.5, 64.5, 68.0, 70.0, 72.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 87.0, 87.0, 87.0, 87.0, 87.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+757,RES1.FEMA-FIMA.contents.fema_fia_default_two_or_more_stories_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 7.5, 13.5, 19.5, 27.0, 30.0, 33.0, 36.0, 39.0, 43.5, 49.5, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+758,RES1.FEMA-FIMA.contents.fema_fia.1_story.with_basement_one_story-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 12.0, 16.5, 22.5, 30.0, 34.5, 42.0, 49.5, 57.0, 66.0, 73.5, 76.5, 79.5, 82.5, 85.5, 88.5, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+759,RES1.FEMA-FIMA.contents.fema_fia_default_split_level_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.5, 13.5, 19.5, 37.5, 40.5, 42.0, 49.5, 51.0, 61.5, 64.5, 67.5, 69.0, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+760,RES1.FEMA-FIMA.contents.fema_fia_default_two_or_more_stories_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 12.0, 16.5, 22.5, 30.0, 34.5, 42.0, 49.5, 57.0, 66.0, 73.5, 76.5, 79.5, 82.5, 85.5, 88.5, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+761,RES1.FEMA-FIMA.contents.fema_fia_default_coastal_building_zone_v-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 6.0, 15.0, 23.0, 35.0, 50.0, 58.0, 63.0, 66.5, 69.5, 72.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 87.0, 87.0, 87.0, 87.0, 87.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+762,RES1.FEMA-BCA-Toolkit.contents.1_story_single_family_home.basement.fema_fia_original_source-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 6.0, 12.0, 16.5, 22.5, 30.0, 34.5, 42.0, 49.5, 57.0, 66.0, 73.5, 76.5, 79.5, 82.5, 85.5, 88.5, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+763,RES1.FEMA-BCA-Toolkit.contents.1_story_single_family_home.no_basement.fema_fia_original_source-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 13.5, 21.0, 33.0, 40.5, 43.5, 45.0, 60.0, 64.5, 66.0, 67.5, 69.0, 70.5, 72.0, 73.5, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0, 75.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+764,RES1.FEMA-BCA-Toolkit.contents.split_level_single_family_home.basement.fema_fia_original_source-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 4.5, 7.5, 9.0, 24.0, 28.5, 33.0, 40.5, 48.0, 52.5, 54.0, 66.0, 72.0, 75.0, 78.0, 81.0, 84.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+765,RES1.FEMA-BCA-Toolkit.contents.split_level_single_family_home.no_basement.fema_fia_original_source-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 4.5, 13.5, 19.5, 37.5, 40.5, 42.0, 49.5, 51.0, 61.5, 64.5, 67.5, 69.0, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5, 70.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+766,RES1.FEMA-BCA-Toolkit.contents.2_or_more_story_single_family_home.basement.fema_fia_original_source-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 6.0, 12.0, 16.5, 22.5, 30.0, 34.5, 42.0, 49.5, 57.0, 66.0, 73.5, 76.5, 79.5, 82.5, 85.5, 88.5, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+767,RES1.FEMA-BCA-Toolkit.contents.2_or_more_story_single_family_home.no_basement.fema_fia_original_source-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 7.5, 13.5, 19.5, 27.0, 30.0, 33.0, 36.0, 39.0, 43.5, 49.5, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0, 57.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+768,RES1.FEMA-BCA-Toolkit.contents.single_family_home.basement.fema_fia_original_source-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 11.0, 24.0, 29.0, 37.0, 54.0, 60.5, 64.5, 68.0, 70.0, 72.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+769,RES1.FEMA-BCA-Toolkit.contents.single_family_home.no_basement.fema_fia_original_source-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 6.0, 15.0, 23.0, 35.0, 50.0, 58.0, 63.0, 66.5, 69.5, 72.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 76.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0, 87.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+770,RES1.USACE-New-Orleans.contents.one_story.usace_new_orleans_one_story.pier_foundation.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 41.8, 62.9, 82.1, 84.6, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+771,RES1.USACE-New-Orleans.contents.one_story.usace_new_orleans_one_story.pier_foundation.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+772,RES1.USACE-New-Orleans.contents.one_story.usace_new_orleans_one_story.slab_foundation.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 41.8, 62.9, 82.1, 84.6, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2, 91.2|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+773,RES1.USACE-New-Orleans.contents.one_story.usace_new_orleans_one_story.slab_foundation.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+774,RES1.USACE-New-Orleans.contents.two_or_more.usace_new_orleans_two_story.pier_foundation.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 36.5, 50.8, 50.8, 55.3, 55.3, 55.3, 55.3, 55.3, 72.5, 80.7, 87.1, 90.1, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+775,RES1.USACE-New-Orleans.contents.two_or_more.usace_new_orleans_two_story.pier_foundation.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+776,RES1.USACE-New-Orleans.contents.two_or_more.usace_new_orleans_two_story.slab_foundation.structure.fresh_water.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 25.0, 36.5, 50.8, 50.8, 55.3, 55.3, 55.3, 55.3, 55.3, 72.5, 80.7, 87.1, 90.1, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5, 92.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+777,RES1.USACE-New-Orleans.contents.two_or_more.usace_new_orleans_two_story.slab_foundation.structure.salt_water.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+778,RES1.USACE-Generic.contents.one_story.usace_generic.with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 10.5, 13.2, 16.0, 18.9, 21.8, 24.7, 27.4, 30.0, 32.4, 34.5, 36.3, 37.7, 38.6, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1, 39.1|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+779,RES1.USACE-Generic.contents.one_story.usace_generic.with_no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 2.4, 8.1, 13.3, 17.9, 22.0, 25.7, 28.8, 31.5, 33.8, 35.7, 37.2, 38.4, 39.2, 39.7, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0, 40.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+780,RES1.USACE-Generic.contents.split_level.usace_generic.with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 7.3, 9.4, 11.6, 13.8, 16.1, 18.2, 20.2, 22.1, 23.6, 24.9, 25.8, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3, 26.3|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+781,RES1.USACE-Generic.contents.split_level.usace_generic.with_no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 2.2, 2.9, 4.7, 7.5, 11.1, 15.3, 20.1, 25.2, 30.5, 35.7, 40.9, 45.8, 50.2, 54.1, 57.2, 59.4, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+782,RES1.USACE-Generic.contents.two_or_more_stories.usace_generic.with_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 8.4, 10.1, 11.9, 13.8, 15.7, 17.7, 19.8, 22.0, 24.3, 26.7, 29.1, 31.7, 34.4, 37.2, 40.0, 43.0, 46.1, 49.3, 52.6, 52.6, 52.6, 52.6, 52.6, 52.6, 52.6, 52.6, 52.6|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+783,RES1.USACE-Generic.contents.two_or_more_stories.usace_generic.with_no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 1.0, 5.0, 8.7, 12.2, 15.5, 18.5, 21.3, 23.9, 26.3, 28.4, 30.3, 32.0, 33.4, 34.7, 35.6, 36.4, 36.9, 37.2, 37.2, 37.2, 37.2, 37.2, 37.2, 37.2, 37.2, 37.2|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+784,RES2.FEMA-BCA-Toolkit.contents.bca_toolkit_manufactured_home_caz_and_v_zone_expert_panel-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 75.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+785,RES2.FEMA-BCA-Toolkit.contents.bca_toolkit_mobile_home_outside_caz_fema_fia_riverine_adjusted-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 12.0, 66.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+786,RES2.FEMA-FIMA.contents.fema_fia_mobile_home_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 12.0, 66.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+787,RES2.FEMA-FIMA.contents.fema_fia_mobile_home-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 12.0, 66.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+788,RES2.FEMA-FIMA.contents.fema_fia_default_mobile_home-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 12.0, 66.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+789,RES2.FEMA-FIMA.contents.fema_fia_default_mobile_home_zone_a-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 12.0, 66.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+790,RES2.FEMA-BCA-Toolkit.contents.mobile_home.no_basement.fema_fia_original_source-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 12.0, 66.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+791,RES3.USACE-NACCS.contents.naccs_1a_1_apartments_1_story_no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 3.5, 28.0, 45.0, 60.0, 70.5, 81.0, 90.5, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+792,RES3.USACE-NACCS.contents.naccs_1a_3_apartments_3_story_no_basement-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.0, 15.0, 20.0, 25.0, 27.5, 30.0, 32.5, 35.0, 38.33333, 41.66667, 45.0, 48.33333, 51.66667, 55.0, 58.33333, 61.66667, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0, 65.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+793,RES3.USACE-NACCS.contents.naccs_4a_urban_high_rise-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.375, 0.5, 4.0, 5.0, 7.0, 7.5, 8.75, 10.0, 10.5, 11.0, 11.33333, 11.66667, 12.0, 12.33333, 12.66667, 13.0, 13.33333, 13.66667, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0, 14.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+794,RES3.USACE-NACCS.contents.naccs_4b_beach_high_rise-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 4.5, 5.5, 6.25, 7.0, 7.75, 8.5, 8.66667, 8.83333, 9.0, 9.16667, 9.33333, 9.5, 9.66667, 9.83333, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+795,RES3.USACE-NACCS.contents.naccs_4b_beach_high_rise_waves-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 2.5, 21.0, 33.75, 52.5, 67.5, 82.5, 91.25, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+796,RES3.FEMA-BCA-Toolkit.contents.bca_toolkit_apartment_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+797,RES3.FEMA-BCA-Toolkit.contents.bca_toolkit_apartment_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+798,RES3.FEMA-BCA-Toolkit.contents.bca_toolkit_apartment_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+799,RES3.FEMA-BCA-Toolkit.contents.bca_toolkit_apartment_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+800,RES3.FEMA-BCA-Toolkit.contents.apartment.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+801,RES3.FEMA-BCA-Toolkit.contents.apartment.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 21.71429, 30.42857, 39.0, 45.0, 47.85714, 51.85714, 55.71429, 59.28571, 60.57143, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857, 63.42857|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+802,RES4.FEMA-BCA-Toolkit.contents.bca_toolkit_hotel_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 16.14286, 26.28571, 34.14286, 39.71429, 48.71429, 52.42857, 58.42857, 61.28571, 63.14286, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+803,RES4.FEMA-BCA-Toolkit.contents.bca_toolkit_hotel_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 16.14286, 26.28571, 34.14286, 39.71429, 48.71429, 52.42857, 58.42857, 61.28571, 63.14286, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+804,RES4.FEMA-BCA-Toolkit.contents.bca_toolkit_hotel_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 16.14286, 26.28571, 34.14286, 39.71429, 48.71429, 52.42857, 58.42857, 61.28571, 63.14286, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+805,RES4.FEMA-BCA-Toolkit.contents.bca_toolkit_hotel_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 16.14286, 26.28571, 34.14286, 39.71429, 48.71429, 52.42857, 58.42857, 61.28571, 63.14286, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+806,RES4.FEMA-BCA-Toolkit.contents.hotel.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 16.14286, 26.28571, 34.14286, 39.71429, 48.71429, 52.42857, 58.42857, 61.28571, 63.14286, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+807,RES4.FEMA-BCA-Toolkit.contents.hotel.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 16.14286, 26.28571, 34.14286, 39.71429, 48.71429, 52.42857, 58.42857, 61.28571, 63.14286, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714, 64.85714|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+808,RES4.USACE-Sacramento.contents.usace_sacramento_hotel_full_service.1_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 23.0, 23.0, 90.0, 90.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+809,RES4.USACE-Sacramento.contents.usace_sacramento_hotel_full_service.2_story.short_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 20.0, 20.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 50.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+810,RES4.USACE-Sacramento.contents.usace_sacramento_hotel_full_service.1_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 88.0, 88.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+811,RES4.USACE-Sacramento.contents.usace_sacramento_hotel_full_service.2_story.long_duration-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 42.0, 42.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 56.0, 67.0, 67.0, 67.0, 67.0, 67.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+812,RES5.FEMA-BCA-Toolkit.contents.bca_toolkit_correctional_facility_default_non_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 13.125, 21.25, 31.0, 44.125, 53.0, 62.25, 69.5, 77.5, 83.75, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+813,RES5.FEMA-BCA-Toolkit.contents.bca_toolkit_correctional_facility_default_generic_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 13.125, 21.25, 31.0, 44.125, 53.0, 62.25, 69.5, 77.5, 83.75, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+814,RES5.FEMA-BCA-Toolkit.contents.bca_toolkit_correctional_facility_default_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 13.125, 21.25, 31.0, 44.125, 53.0, 62.25, 69.5, 77.5, 83.75, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+815,RES5.FEMA-BCA-Toolkit.contents.bca_toolkit_correctional_facility_default_non_generic_non_engineered_building-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 13.125, 21.25, 31.0, 44.125, 53.0, 62.25, 69.5, 77.5, 83.75, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+816,RES5.FEMA-BCA-Toolkit.contents.correctional_facility.engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 13.125, 21.25, 31.0, 44.125, 53.0, 62.25, 69.5, 77.5, 83.75, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
+817,RES5.FEMA-BCA-Toolkit.contents.correctional_facility.non_engineered-Cost,0,Peak Inundation Height,in,0,1,loss_ratio,"0.0, 0.0, 0.0, 0.0, 0.0, 13.125, 21.25, 31.0, 44.125, 53.0, 62.25, 69.5, 77.5, 83.75, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5, 87.5|-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0"
diff --git a/pelicun/resources/auto/Hazus_Earthquake_CSM.py b/pelicun/resources/auto/Hazus_Earthquake_CSM.py
index 0816ea72f..5b53f3898 100644
--- a/pelicun/resources/auto/Hazus_Earthquake_CSM.py
+++ b/pelicun/resources/auto/Hazus_Earthquake_CSM.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2023 Leland Stanford Junior University
# Copyright (c) 2023 The Regents of the University of California
@@ -39,38 +38,38 @@
import pandas as pd
-ap_DesignLevel = {1940: "LC", 1975: "MC", 2100: "HC"}
+ap_DesignLevel = {1940: 'LC', 1975: 'MC', 2100: 'HC'}
# ap_DesignLevel = {1940: 'PC', 1940: 'LC', 1975: 'MC', 2100: 'HC'}
-ap_DesignLevel_W1 = {0: "LC", 1975: "MC", 2100: "HC"}
+ap_DesignLevel_W1 = {0: 'LC', 1975: 'MC', 2100: 'HC'}
# ap_DesignLevel_W1 = {0: 'PC', 0: 'LC', 1975: 'MC', 2100: 'HC'}
ap_Occupancy = {
- "Other/Unknown": "RES3",
- "Residential - Single-Family": "RES1",
- "Residential - Town-Home": "RES3",
- "Residential - Multi-Family": "RES3",
- "Residential - Mixed Use": "RES3",
- "Office": "COM4",
- "Hotel": "RES4",
- "School": "EDU1",
- "Industrial - Light": "IND2",
- "Industrial - Warehouse": "IND2",
- "Industrial - Heavy": "IND1",
- "Retail": "COM1",
- "Parking": "COM10",
+ 'Other/Unknown': 'RES3',
+ 'Residential - Single-Family': 'RES1',
+ 'Residential - Town-Home': 'RES3',
+ 'Residential - Multi-Family': 'RES3',
+ 'Residential - Mixed Use': 'RES3',
+ 'Office': 'COM4',
+ 'Hotel': 'RES4',
+ 'School': 'EDU1',
+ 'Industrial - Light': 'IND2',
+ 'Industrial - Warehouse': 'IND2',
+ 'Industrial - Heavy': 'IND1',
+ 'Retail': 'COM1',
+ 'Parking': 'COM10',
}
convert_design_level = {
- "High-Code": "HC",
- "Moderate-Code": "MC",
- "Low-Code": "LC",
- "Pre-Code": "PC",
+ 'High-Code': 'HC',
+ 'Moderate-Code': 'MC',
+ 'Low-Code': 'LC',
+ 'Pre-Code': 'PC',
}
def convert_story_rise(structureType, stories):
- if structureType in ["W1", "W2", "S3", "PC1", "MH"]:
+ if structureType in ['W1', 'W2', 'S3', 'PC1', 'MH']:
# These archetypes have no rise information in their IDs
rise = None
@@ -79,101 +78,103 @@ def convert_story_rise(structureType, stories):
try:
stories = int(stories)
- except (ValueError, TypeError):
- raise ValueError(
+ except (ValueError, TypeError) as exc:
+ msg = (
'Missing "NumberOfStories" information, '
- "cannot infer `rise` attribute of archetype"
+ 'cannot infer `rise` attribute of archetype'
)
- if structureType == "RM1":
+ raise ValueError(msg) from exc
+
+ if structureType == 'RM1':
if stories <= 3:
- rise = "L"
+ rise = 'L'
else:
- rise = "M"
+ rise = 'M'
- elif structureType == "URM":
+ elif structureType == 'URM':
if stories <= 2:
- rise = "L"
+ rise = 'L'
else:
- rise = "M"
+ rise = 'M'
elif structureType in [
- "S1",
- "S2",
- "S4",
- "S5",
- "C1",
- "C2",
- "C3",
- "PC2",
- "RM2",
+ 'S1',
+ 'S2',
+ 'S4',
+ 'S5',
+ 'C1',
+ 'C2',
+ 'C3',
+ 'PC2',
+ 'RM2',
]:
if stories <= 3:
- rise = "L"
+ rise = 'L'
elif stories <= 7:
- rise = "M"
+ rise = 'M'
else:
- rise = "H"
+ rise = 'H'
return rise
-def auto_populate(AIM):
+def auto_populate(aim):
"""
Automatically creates a performance model for story EDP-based Hazus EQ analysis.
Parameters
----------
- AIM: dict
+ aim: dict
Asset Information Model - provides features of the asset that can be
used to infer attributes of the performance model.
Returns
-------
- GI_ap: dict
+ gi_ap: dict
Extended General Information - extends the GI from the input AIM with
additional inferred features. These features are typically used in
intermediate steps during the auto-population and are not required
for the performance assessment. They are returned to allow reviewing
how these latent variables affect the final results.
- DL_ap: dict
+ dl_ap: dict
Damage and Loss parameters - these define the performance model and
details of the calculation.
- CMP: DataFrame
+ comp: DataFrame
Component assignment - Defines the components (in rows) and their
location, direction, and quantity (in columns).
"""
# extract the General Information
- GI = AIM.get("GeneralInformation", None)
+ gi = aim.get('GeneralInformation', None)
- if GI is None:
+ if gi is None:
# TODO: show an error message
pass
- # initialize the auto-populated GI
- GI_ap = GI.copy()
+ # initialize the auto-populated gi
+ gi_ap = gi.copy()
- assetType = AIM["assetType"]
- ground_failure = AIM["Applications"]["DL"]["ApplicationData"]["ground_failure"]
+ assetType = aim['assetType']
+ ground_failure = aim['Applications']['DL']['ApplicationData']['ground_failure']
- if assetType == "Buildings":
+ if assetType == 'Buildings':
# get the building parameters
- bt = GI["StructureType"] # building type
+ bt = gi['StructureType'] # building type
# get the design level
- dl = GI.get("DesignLevel", None)
+ dl = gi.get('DesignLevel', None)
if dl is None:
# If there is no DesignLevel provided, we assume that the YearBuilt is
# available
- year_built = GI["YearBuilt"]
+ year_built = gi['YearBuilt']
- if "W1" in bt:
+ if 'W1' in bt:
DesignL = ap_DesignLevel_W1
else:
DesignL = ap_DesignLevel
@@ -183,108 +184,108 @@ def auto_populate(AIM):
dl = DesignL[year]
break
- GI_ap["DesignLevel"] = dl
+ gi_ap['DesignLevel'] = dl
# get the number of stories / height
- stories = GI.get("NumberOfStories", None)
+ stories = gi.get('NumberOfStories', None)
# We assume that the structure type does not include height information
# and we append it here based on the number of story information
rise = convert_story_rise(bt, stories)
# get the number of stories / height
- stories = GI.get("NumberOfStories", None)
+ stories = gi.get('NumberOfStories', None)
if rise is None:
# To prevent STR.W2.None.LC
- FG_S = f"STR.{bt}.{dl}"
+ fg_s = f'STR.{bt}.{dl}'
else:
- FG_S = f"STR.{bt}.{rise}.{dl}"
- # FG_S = f"STR.{bt}.{dl}"
- FG_NSD = "NSD"
- FG_NSA = f"NSA.{dl}"
+ fg_s = f'STR.{bt}.{rise}.{dl}'
+ # fg_s = f"STR.{bt}.{dl}"
+ fg_nsd = 'NSD'
+ fg_nsa = f'NSA.{dl}'
- CMP = pd.DataFrame(
+ comp = pd.DataFrame(
{
- f"{FG_S}": [
- "ea",
+ f'{fg_s}': [
+ 'ea',
1,
1,
1,
- "N/A",
+ 'N/A',
],
- f"{FG_NSA}": [
- "ea",
+ f'{fg_nsa}': [
+ 'ea',
1,
0,
1,
- "N/A",
+ 'N/A',
],
- f"{FG_NSD}": [
- "ea",
+ f'{fg_nsd}': [
+ 'ea',
1,
1,
1,
- "N/A",
+ 'N/A',
],
},
- index=["Units", "Location", "Direction", "Theta_0", "Family"],
+ index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'],
).T
# if needed, add components to simulate damage from ground failure
if ground_failure:
- foundation_type = "S"
+ foundation_type = 'S'
# fmt: off
- FG_GF_H = f'GF.H.{foundation_type}' # noqa
- FG_GF_V = f'GF.V.{foundation_type}' # noqa
- CMP_GF = pd.DataFrame( # noqa
- {f'{FG_GF_H}':[ 'ea', 1, 1, 1, 'N/A'], # noqa
- f'{FG_GF_V}':[ 'ea', 1, 3, 1, 'N/A']}, # noqa
- index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa
- ).T # noqa
+ FG_GF_H = f'GF.H.{foundation_type}'
+ FG_GF_V = f'GF.V.{foundation_type}'
+ comp_gf = pd.DataFrame(
+ {f'{FG_GF_H}': [ 'ea', 1, 1, 1, 'N/A'], # noqa: E201, E241
+ f'{FG_GF_V}': [ 'ea', 1, 3, 1, 'N/A']}, # noqa: E201, E241
+ index = [ 'Units', 'Location', 'Direction', 'Theta_0', 'Family'] # noqa: E201, E251
+ ).T
# fmt: on
- CMP = pd.concat([CMP, CMP_GF], axis=0)
+ comp = pd.concat([comp, comp_gf], axis=0)
# get the occupancy class
- if GI["OccupancyClass"] in ap_Occupancy.keys():
- ot = ap_Occupancy[GI["OccupancyClass"]]
+ if gi['OccupancyClass'] in ap_Occupancy:
+ occupancy = ap_Occupancy[gi['OccupancyClass']]
else:
- ot = GI["OccupancyClass"]
+ occupancy = gi['OccupancyClass']
- plan_area = GI.get("PlanArea", 1.0)
+ plan_area = gi.get('PlanArea', 1.0)
repair_config = {
- "ConsequenceDatabase": "Hazus Earthquake - Buildings",
- "MapApproach": "Automatic",
- "DecisionVariables": {
- "Cost": True,
- "Carbon": False,
- "Energy": False,
- "Time": False,
+ 'ConsequenceDatabase': 'Hazus Earthquake - Buildings',
+ 'MapApproach': 'Automatic',
+ 'DecisionVariables': {
+ 'Cost': True,
+ 'Carbon': False,
+ 'Energy': False,
+ 'Time': False,
},
}
- DL_ap = {
- "Asset": {
- "ComponentAssignmentFile": "CMP_QNT.csv",
- "ComponentDatabase": "Hazus Earthquake - Buildings",
- "NumberOfStories": f"{stories}",
- "OccupancyType": f"{ot}",
- "PlanArea": str(plan_area),
+ dl_ap = {
+ 'Asset': {
+ 'ComponentAssignmentFile': 'CMP_QNT.csv',
+ 'ComponentDatabase': 'Hazus Earthquake - Buildings',
+ 'NumberOfStories': f'{stories}',
+ 'OccupancyType': f'{occupancy}',
+ 'PlanArea': str(plan_area),
},
- "Damage": {"DamageProcess": "Hazus Earthquake"},
- "Demands": {},
- "Losses": {"Repair": repair_config},
- "Options": {
- "NonDirectionalMultipliers": {"ALL": 1.0},
+ 'Damage': {'DamageProcess': 'Hazus Earthquake'},
+ 'Demands': {},
+ 'Losses': {'Repair': repair_config},
+ 'Options': {
+ 'NonDirectionalMultipliers': {'ALL': 1.0},
},
}
else:
print(
- f"AssetType: {assetType} is not supported "
- f"in Hazus Earthquake Capacity Spectrum Method-based DL method"
+ f'AssetType: {assetType} is not supported '
+ f'in Hazus Earthquake Capacity Spectrum Method-based DL method'
)
- return GI_ap, DL_ap, CMP
+ return gi_ap, dl_ap, comp
diff --git a/pelicun/resources/auto/Hazus_Earthquake_IM.py b/pelicun/resources/auto/Hazus_Earthquake_IM.py
index fcd60289a..f67c52fd7 100644
--- a/pelicun/resources/auto/Hazus_Earthquake_IM.py
+++ b/pelicun/resources/auto/Hazus_Earthquake_IM.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2023 Leland Stanford Junior University
# Copyright (c) 2023 The Regents of the University of California
@@ -36,22 +35,28 @@
#
# Contributors:
# Adam Zsarnóczay
+"""Hazus Earthquake IM."""
+
+from __future__ import annotations
+
import json
+
import pandas as pd
+
import pelicun
-ap_DesignLevel = {1940: 'LC', 1975: 'MC', 2100: 'HC'}
+ap_design_level = {1940: 'LC', 1975: 'MC', 2100: 'HC'}
# original:
# ap_DesignLevel = {1940: 'PC', 1940: 'LC', 1975: 'MC', 2100: 'HC'}
# Note that the duplicated key is ignored, and Python keeps the last
# entry.
-ap_DesignLevel_W1 = {0: 'LC', 1975: 'MC', 2100: 'HC'}
+ap_design_level_w1 = {0: 'LC', 1975: 'MC', 2100: 'HC'}
# original:
# ap_DesignLevel_W1 = {0: 'PC', 0: 'LC', 1975: 'MC', 2100: 'HC'}
# same thing applies
-ap_Occupancy = {
+ap_occupancy = {
'Other/Unknown': 'RES3',
'Residential - Single-Family': 'RES1',
'Residential - Town-Home': 'RES3',
@@ -70,6 +75,9 @@
# Convert common length units
def convertUnits(value, unit_in, unit_out):
+ """
+ Convert units.
+ """
aval_types = ['m', 'mm', 'cm', 'km', 'inch', 'ft', 'mile']
m = 1.0
mm = 0.001 * m
@@ -89,117 +97,116 @@ def convertUnits(value, unit_in, unit_out):
}
if (unit_in not in aval_types) or (unit_out not in aval_types):
print(
- f"The unit {unit_in} or {unit_out} "
- f"are used in auto_population but not supported"
+ f'The unit {unit_in} or {unit_out} '
+ f'are used in auto_population but not supported'
)
- return
- value = value * scale_map[unit_in] / scale_map[unit_out]
- return value
+ return None
+ return value * scale_map[unit_in] / scale_map[unit_out]
-def convertBridgeToHAZUSclass(AIM):
+def convertBridgeToHAZUSclass(aim): # noqa: C901
# TODO: replace labels in AIM with standard CamelCase versions
- structureType = AIM["BridgeClass"]
+ structure_type = aim['BridgeClass']
# if (
- # type(structureType) == str
- # and len(structureType) > 3
- # and structureType[:3] == "HWB"
- # and 0 < int(structureType[3:])
- # and 29 > int(structureType[3:])
+ # type(structure_type) == str
+ # and len(structure_type) > 3
+ # and structure_type[:3] == "HWB"
+ # and 0 < int(structure_type[3:])
+ # and 29 > int(structure_type[3:])
# ):
# return AIM["bridge_class"]
- state = AIM["StateCode"]
- yr_built = AIM["YearBuilt"]
- num_span = AIM["NumOfSpans"]
- len_max_span = AIM["MaxSpanLength"]
- len_unit = AIM["units"]["length"]
- len_max_span = convertUnits(len_max_span, len_unit, "m")
+ state = aim['StateCode']
+ yr_built = aim['YearBuilt']
+ num_span = aim['NumOfSpans']
+ len_max_span = aim['MaxSpanLength']
+ len_unit = aim['units']['length']
+ len_max_span = convertUnits(len_max_span, len_unit, 'm')
seismic = (int(state) == 6 and int(yr_built) >= 1975) or (
int(state) != 6 and int(yr_built) >= 1990
)
# Use a catch-all, other class by default
- bridge_class = "HWB28"
+ bridge_class = 'HWB28'
if len_max_span > 150:
if not seismic:
- bridge_class = "HWB1"
+ bridge_class = 'HWB1'
else:
- bridge_class = "HWB2"
+ bridge_class = 'HWB2'
elif num_span == 1:
if not seismic:
- bridge_class = "HWB3"
+ bridge_class = 'HWB3'
else:
- bridge_class = "HWB4"
+ bridge_class = 'HWB4'
- elif structureType in list(range(101, 107)):
+ elif structure_type in list(range(101, 107)):
if not seismic:
if state != 6:
- bridge_class = "HWB5"
+ bridge_class = 'HWB5'
else:
- bridge_class = "HWB6"
+ bridge_class = 'HWB6'
else:
- bridge_class = "HWB7"
+ bridge_class = 'HWB7'
- elif structureType in [205, 206]:
+ elif structure_type in [205, 206]:
if not seismic:
- bridge_class = "HWB8"
+ bridge_class = 'HWB8'
else:
- bridge_class = "HWB9"
+ bridge_class = 'HWB9'
- elif structureType in list(range(201, 207)):
+ elif structure_type in list(range(201, 207)):
if not seismic:
- bridge_class = "HWB10"
+ bridge_class = 'HWB10'
else:
- bridge_class = "HWB11"
+ bridge_class = 'HWB11'
- elif structureType in list(range(301, 307)):
+ elif structure_type in list(range(301, 307)):
if not seismic:
if len_max_span >= 20:
if state != 6:
- bridge_class = "HWB12"
+ bridge_class = 'HWB12'
else:
- bridge_class = "HWB13"
+ bridge_class = 'HWB13'
else:
if state != 6:
- bridge_class = "HWB24"
+ bridge_class = 'HWB24'
else:
- bridge_class = "HWB25"
+ bridge_class = 'HWB25'
else:
- bridge_class = "HWB14"
+ bridge_class = 'HWB14'
- elif structureType in list(range(402, 411)):
+ elif structure_type in list(range(402, 411)):
if not seismic:
if len_max_span >= 20:
- bridge_class = "HWB15"
+ bridge_class = 'HWB15'
elif state != 6:
- bridge_class = "HWB26"
+ bridge_class = 'HWB26'
else:
- bridge_class = "HWB27"
+ bridge_class = 'HWB27'
else:
- bridge_class = "HWB16"
+ bridge_class = 'HWB16'
- elif structureType in list(range(501, 507)):
+ elif structure_type in list(range(501, 507)):
if not seismic:
if state != 6:
- bridge_class = "HWB17"
+ bridge_class = 'HWB17'
else:
- bridge_class = "HWB18"
+ bridge_class = 'HWB18'
else:
- bridge_class = "HWB19"
+ bridge_class = 'HWB19'
- elif structureType in [605, 606]:
+ elif structure_type in [605, 606]:
if not seismic:
- bridge_class = "HWB20"
+ bridge_class = 'HWB20'
else:
- bridge_class = "HWB21"
+ bridge_class = 'HWB21'
- elif structureType in list(range(601, 608)):
+ elif structure_type in list(range(601, 608)):
if not seismic:
- bridge_class = "HWB22"
+ bridge_class = 'HWB22'
else:
- bridge_class = "HWB23"
+ bridge_class = 'HWB23'
# TODO: review and add HWB24-27 rules
# TODO: also double check rules for HWB10-11 and HWB22-23
@@ -207,30 +214,30 @@ def convertBridgeToHAZUSclass(AIM):
return bridge_class
-def convertTunnelToHAZUSclass(AIM):
- if ("Bored" in AIM["ConstructType"]) or ("Drilled" in AIM["ConstructType"]):
- return "HTU1"
- elif ("Cut" in AIM["ConstructType"]) or ("Cover" in AIM["ConstructType"]):
- return "HTU2"
+def convertTunnelToHAZUSclass(aim) -> str:
+ if ('Bored' in aim['ConstructType']) or ('Drilled' in aim['ConstructType']):
+ return 'HTU1'
+ elif ('Cut' in aim['ConstructType']) or ('Cover' in aim['ConstructType']):
+ return 'HTU2'
else:
- # Select HTU2 for unclassfied tunnels because it is more conservative.
- return "HTU2"
+ # Select HTU2 for unclassified tunnels because it is more conservative.
+ return 'HTU2'
-def convertRoadToHAZUSclass(AIM):
- if AIM["RoadType"] in ["Primary", "Secondary"]:
- return "HRD1"
+def convertRoadToHAZUSclass(aim) -> str:
+ if aim['RoadType'] in ['Primary', 'Secondary']:
+ return 'HRD1'
- elif AIM["RoadType"] == "Residential":
- return "HRD2"
+ elif aim['RoadType'] == 'Residential':
+ return 'HRD2'
else:
# many unclassified roads are urban roads
- return "HRD2"
+ return 'HRD2'
-def convert_story_rise(structureType, stories):
- if structureType in ['W1', 'W2', 'S3', 'PC1', 'MH']:
+def convert_story_rise(structure_type, stories):
+ if structure_type in ['W1', 'W2', 'S3', 'PC1', 'MH']:
# These archetypes have no rise information in their IDs
rise = None
@@ -240,26 +247,19 @@ def convert_story_rise(structureType, stories):
stories = int(stories)
except (ValueError, TypeError):
- raise ValueError(
+ msg = (
'Missing "NumberOfStories" information, '
'cannot infer `rise` attribute of archetype'
)
+ raise ValueError(msg) # noqa: B904
- if structureType == 'RM1':
- if stories <= 3:
- rise = "L"
+ if structure_type == 'RM1':
+ rise = 'L' if stories <= 3 else 'M'
- else:
- rise = "M"
+ elif structure_type == 'URM':
+ rise = 'L' if stories <= 2 else 'M'
- elif structureType == 'URM':
- if stories <= 2:
- rise = "L"
-
- else:
- rise = "M"
-
- elif structureType in [
+ elif structure_type in [
'S1',
'S2',
'S4',
@@ -271,18 +271,18 @@ def convert_story_rise(structureType, stories):
'RM2',
]:
if stories <= 3:
- rise = "L"
+ rise = 'L'
elif stories <= 7:
- rise = "M"
+ rise = 'M'
else:
- rise = "H"
+ rise = 'H'
return rise
-def auto_populate(AIM):
+def auto_populate(aim): # noqa: C901
"""
Automatically creates a performance model for PGA-based Hazus EQ analysis.
@@ -306,286 +306,278 @@ def auto_populate(AIM):
CMP: DataFrame
Component assignment - Defines the components (in rows) and their
location, direction, and quantity (in columns).
- """
+ """
# extract the General Information
- GI = AIM.get('GeneralInformation', None)
+ gi = aim.get('GeneralInformation', None)
- if GI is None:
+ if gi is None:
# TODO: show an error message
pass
# initialize the auto-populated GI
- GI_ap = GI.copy()
+ gi_ap = gi.copy()
- assetType = AIM["assetType"]
- ground_failure = AIM["Applications"]["DL"]["ApplicationData"]["ground_failure"]
+ asset_type = aim['assetType']
+ ground_failure = aim['Applications']['DL']['ApplicationData']['ground_failure']
- if assetType == "Buildings":
+ if asset_type == 'Buildings':
# get the building parameters
- bt = GI['StructureType'] # building type
+ bt = gi['StructureType'] # building type
# get the design level
- dl = GI.get('DesignLevel', None)
+ dl = gi.get('DesignLevel', None)
if dl is None:
# If there is no DesignLevel provided, we assume that the YearBuilt is
# available
- year_built = GI['YearBuilt']
+ year_built = gi['YearBuilt']
- if 'W1' in bt:
- DesignL = ap_DesignLevel_W1
- else:
- DesignL = ap_DesignLevel
+ design_l = ap_design_level_w1 if 'W1' in bt else ap_design_level
- for year in sorted(DesignL.keys()):
+ for year in sorted(design_l.keys()):
if year_built <= year:
- dl = DesignL[year]
+ dl = design_l[year]
break
- GI_ap['DesignLevel'] = dl
+ gi_ap['DesignLevel'] = dl
# get the number of stories / height
- stories = GI.get('NumberOfStories', None)
+ stories = gi.get('NumberOfStories', None)
# We assume that the structure type does not include height information
# and we append it here based on the number of story information
rise = convert_story_rise(bt, stories)
if rise is not None:
- LF = f'LF.{bt}.{rise}.{dl}'
- GI_ap['BuildingRise'] = rise
+ lf = f'LF.{bt}.{rise}.{dl}'
+ gi_ap['BuildingRise'] = rise
else:
- LF = f'LF.{bt}.{dl}'
+ lf = f'LF.{bt}.{dl}'
# fmt: off
- CMP = pd.DataFrame( # noqa
- {f'{LF}': ['ea', 1, 1, 1, 'N/A']}, # noqa
- index = ['Units','Location','Direction','Theta_0','Family'] # noqa
- ).T # noqa
+ comp = pd.DataFrame(
+ {f'{lf}': ['ea', 1, 1, 1, 'N/A']}, # noqa: E241
+ index = ['Units','Location','Direction','Theta_0','Family'] # noqa: E231, E251
+ ).T
# fmt: on
# if needed, add components to simulate damage from ground failure
if ground_failure:
foundation_type = 'S'
- FG_GF_H = f'GF.H.{foundation_type}'
- FG_GF_V = f'GF.V.{foundation_type}'
+ fg_gf_h = f'GF.H.{foundation_type}'
+ fg_gf_v = f'GF.V.{foundation_type}'
# fmt: off
- CMP_GF = pd.DataFrame( # noqa
- {f'{FG_GF_H}':[ 'ea', 1, 1, 1, 'N/A'], # noqa
- f'{FG_GF_V}':[ 'ea', 1, 3, 1, 'N/A']}, # noqa
- index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa
- ).T # noqa
+ comp_gf = pd.DataFrame(
+ {f'{fg_gf_h}':[ 'ea', 1, 1, 1, 'N/A'], # noqa: E201, E231, E241
+ f'{fg_gf_v}':[ 'ea', 1, 3, 1, 'N/A']}, # noqa: E201, E231, E241
+ index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa: E201, E231, E251
+ ).T
# fmt: on
- CMP = pd.concat([CMP, CMP_GF], axis=0)
+ comp = pd.concat([comp, comp_gf], axis=0)
# set the number of stories to 1
# there is only one component in a building-level resolution
stories = 1
# get the occupancy class
- if GI['OccupancyClass'] in ap_Occupancy.keys():
- ot = ap_Occupancy[GI['OccupancyClass']]
+ if gi['OccupancyClass'] in ap_occupancy:
+ occupancy_type = ap_occupancy[gi['OccupancyClass']]
else:
- ot = GI['OccupancyClass']
-
- DL_ap = {
- "Asset": {
- "ComponentAssignmentFile": "CMP_QNT.csv",
- "ComponentDatabase": "Hazus Earthquake - Buildings",
- "NumberOfStories": f"{stories}",
- "OccupancyType": f"{ot}",
- "PlanArea": "1",
+ occupancy_type = gi['OccupancyClass']
+
+ dl_ap = {
+ 'Asset': {
+ 'ComponentAssignmentFile': 'CMP_QNT.csv',
+ 'ComponentDatabase': 'Hazus Earthquake - Buildings',
+ 'NumberOfStories': f'{stories}',
+ 'OccupancyType': f'{occupancy_type}',
+ 'PlanArea': '1',
},
- "Damage": {"DamageProcess": "Hazus Earthquake"},
- "Demands": {},
- "Losses": {
- "Repair": {
- "ConsequenceDatabase": "Hazus Earthquake - Buildings",
- "MapApproach": "Automatic",
+ 'Damage': {'DamageProcess': 'Hazus Earthquake'},
+ 'Demands': {},
+ 'Losses': {
+ 'Repair': {
+ 'ConsequenceDatabase': 'Hazus Earthquake - Buildings',
+ 'MapApproach': 'Automatic',
}
},
- "Options": {
- "NonDirectionalMultipliers": {"ALL": 1.0},
+ 'Options': {
+ 'NonDirectionalMultipliers': {'ALL': 1.0},
},
}
- elif assetType == "TransportationNetwork":
- inf_type = GI["assetSubtype"]
+ elif asset_type == 'TransportationNetwork':
+ inf_type = gi['assetSubtype']
- if inf_type == "HwyBridge":
+ if inf_type == 'HwyBridge':
# get the bridge class
- bt = convertBridgeToHAZUSclass(GI)
- GI_ap['BridgeHazusClass'] = bt
+ bt = convertBridgeToHAZUSclass(gi)
+ gi_ap['BridgeHazusClass'] = bt
# fmt: off
- CMP = pd.DataFrame( # noqa
- {f'HWB.GS.{bt[3:]}': [ 'ea', 1, 1, 1, 'N/A']}, # noqa
- index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa
- ).T # noqa
+ comp = pd.DataFrame(
+ {f'HWB.GS.{bt[3:]}': [ 'ea', 1, 1, 1, 'N/A']}, # noqa: E201, E241
+ index = [ 'Units', 'Location', 'Direction', 'Theta_0', 'Family'] # noqa: E201, E251
+ ).T
# fmt: on
# if needed, add components to simulate damage from ground failure
if ground_failure:
-
# fmt: off
- CMP_GF = pd.DataFrame( # noqa
- {f'HWB.GF': [ 'ea', 1, 1, 1, 'N/A']}, # noqa
- index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa
- ).T # noqa
+ comp_gf = pd.DataFrame(
+ {f'HWB.GF': [ 'ea', 1, 1, 1, 'N/A']}, # noqa: E201, E241, F541
+ index = [ 'Units', 'Location', 'Direction', 'Theta_0', 'Family'] # noqa: E201, E251
+ ).T
# fmt: on
- CMP = pd.concat([CMP, CMP_GF], axis=0)
+ comp = pd.concat([comp, comp_gf], axis=0)
- DL_ap = {
- "Asset": {
- "ComponentAssignmentFile": "CMP_QNT.csv",
- "ComponentDatabase": "Hazus Earthquake - Transportation",
- "BridgeHazusClass": bt,
- "PlanArea": "1",
+ dl_ap = {
+ 'Asset': {
+ 'ComponentAssignmentFile': 'CMP_QNT.csv',
+ 'ComponentDatabase': 'Hazus Earthquake - Transportation',
+ 'BridgeHazusClass': bt,
+ 'PlanArea': '1',
},
- "Damage": {"DamageProcess": "Hazus Earthquake"},
- "Demands": {},
- "Losses": {
- "Repair": {
- "ConsequenceDatabase": "Hazus Earthquake - Transportation",
- "MapApproach": "Automatic",
+ 'Damage': {'DamageProcess': 'Hazus Earthquake'},
+ 'Demands': {},
+ 'Losses': {
+ 'Repair': {
+ 'ConsequenceDatabase': 'Hazus Earthquake - Transportation',
+ 'MapApproach': 'Automatic',
}
},
- "Options": {
- "NonDirectionalMultipliers": {"ALL": 1.0},
+ 'Options': {
+ 'NonDirectionalMultipliers': {'ALL': 1.0},
},
}
- elif inf_type == "HwyTunnel":
+ elif inf_type == 'HwyTunnel':
# get the tunnel class
- tt = convertTunnelToHAZUSclass(GI)
- GI_ap['TunnelHazusClass'] = tt
+ tt = convertTunnelToHAZUSclass(gi)
+ gi_ap['TunnelHazusClass'] = tt
# fmt: off
- CMP = pd.DataFrame( # noqa
- {f'HTU.GS.{tt[3:]}': [ 'ea', 1, 1, 1, 'N/A']}, # noqa
- index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa
- ).T # noqa
+ comp = pd.DataFrame(
+ {f'HTU.GS.{tt[3:]}': [ 'ea', 1, 1, 1, 'N/A']}, # noqa: E201, E241
+ index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa: E201, E231, E251
+ ).T
# fmt: on
# if needed, add components to simulate damage from ground failure
if ground_failure:
-
# fmt: off
- CMP_GF = pd.DataFrame( # noqa
- {f'HTU.GF': [ 'ea', 1, 1, 1, 'N/A']}, # noqa
- index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa
- ).T # noqa
+ comp_gf = pd.DataFrame(
+ {f'HTU.GF': [ 'ea', 1, 1, 1, 'N/A']}, # noqa: E201, E241, F541
+ index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa: E201, E231, E251
+ ).T
# fmt: on
- CMP = pd.concat([CMP, CMP_GF], axis=0)
+ comp = pd.concat([comp, comp_gf], axis=0)
- DL_ap = {
- "Asset": {
- "ComponentAssignmentFile": "CMP_QNT.csv",
- "ComponentDatabase": "Hazus Earthquake - Transportation",
- "TunnelHazusClass": tt,
- "PlanArea": "1",
+ dl_ap = {
+ 'Asset': {
+ 'ComponentAssignmentFile': 'CMP_QNT.csv',
+ 'ComponentDatabase': 'Hazus Earthquake - Transportation',
+ 'TunnelHazusClass': tt,
+ 'PlanArea': '1',
},
- "Damage": {"DamageProcess": "Hazus Earthquake"},
- "Demands": {},
- "Losses": {
- "Repair": {
- "ConsequenceDatabase": "Hazus Earthquake - Transportation",
- "MapApproach": "Automatic",
+ 'Damage': {'DamageProcess': 'Hazus Earthquake'},
+ 'Demands': {},
+ 'Losses': {
+ 'Repair': {
+ 'ConsequenceDatabase': 'Hazus Earthquake - Transportation',
+ 'MapApproach': 'Automatic',
}
},
- "Options": {
- "NonDirectionalMultipliers": {"ALL": 1.0},
+ 'Options': {
+ 'NonDirectionalMultipliers': {'ALL': 1.0},
},
}
- elif inf_type == "Roadway":
+ elif inf_type == 'Roadway':
# get the road class
- rt = convertRoadToHAZUSclass(GI)
- GI_ap['RoadHazusClass'] = rt
+ rt = convertRoadToHAZUSclass(gi)
+ gi_ap['RoadHazusClass'] = rt
# fmt: off
- CMP = pd.DataFrame( # noqa
- {}, # noqa
- index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa
- ).T # noqa
+ comp = pd.DataFrame(
+ {},
+ index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa: E201, E231, E251
+ ).T
# fmt: on
if ground_failure:
-
# fmt: off
- CMP_GF = pd.DataFrame( # noqa
- {f'HRD.GF.{rt[3:]}':[ 'ea', 1, 1, 1, 'N/A']}, # noqa
- index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa
- ).T # noqa
+ comp_gf = pd.DataFrame(
+ {f'HRD.GF.{rt[3:]}':[ 'ea', 1, 1, 1, 'N/A']}, # noqa: E201, E231, E241
+ index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa: E201, E231, E251
+ ).T
# fmt: on
- CMP = pd.concat([CMP, CMP_GF], axis=0)
+ comp = pd.concat([comp, comp_gf], axis=0)
- DL_ap = {
- "Asset": {
- "ComponentAssignmentFile": "CMP_QNT.csv",
- "ComponentDatabase": "Hazus Earthquake - Transportation",
- "RoadHazusClass": rt,
- "PlanArea": "1",
+ dl_ap = {
+ 'Asset': {
+ 'ComponentAssignmentFile': 'CMP_QNT.csv',
+ 'ComponentDatabase': 'Hazus Earthquake - Transportation',
+ 'RoadHazusClass': rt,
+ 'PlanArea': '1',
},
- "Damage": {"DamageProcess": "Hazus Earthquake"},
- "Demands": {},
- "Losses": {
- "Repair": {
- "ConsequenceDatabase": "Hazus Earthquake - Transportation",
- "MapApproach": "Automatic",
+ 'Damage': {'DamageProcess': 'Hazus Earthquake'},
+ 'Demands': {},
+ 'Losses': {
+ 'Repair': {
+ 'ConsequenceDatabase': 'Hazus Earthquake - Transportation',
+ 'MapApproach': 'Automatic',
}
},
- "Options": {
- "NonDirectionalMultipliers": {"ALL": 1.0},
+ 'Options': {
+ 'NonDirectionalMultipliers': {'ALL': 1.0},
},
}
else:
- print("subtype not supported in HWY")
+ print('subtype not supported in HWY')
- elif assetType == "WaterDistributionNetwork":
+ elif asset_type == 'WaterDistributionNetwork':
pipe_material_map = {
- "CI": "B",
- "AC": "B",
- "RCC": "B",
- "DI": "D",
- "PVC": "D",
- "DS": "D",
- "BS": "B",
+ 'CI': 'B',
+ 'AC': 'B',
+ 'RCC': 'B',
+ 'DI': 'D',
+ 'PVC': 'D',
+ 'DS': 'D',
+ 'BS': 'B',
}
# GI = AIM.get("GeneralInformation", None)
# if GI==None:
# initialize the auto-populated GI
- wdn_element_type = GI_ap.get("type", "MISSING")
- asset_name = GI_ap.get("AIM_id", None)
+ wdn_element_type = gi_ap.get('type', 'MISSING')
+ asset_name = gi_ap.get('AIM_id', None)
- if wdn_element_type == "Pipe":
- pipe_construction_year = GI_ap.get("year", None)
- pipe_diameter = GI_ap.get("Diam", None)
+ if wdn_element_type == 'Pipe':
+ pipe_construction_year = gi_ap.get('year', None)
+ pipe_diameter = gi_ap.get('Diam', None)
# diamaeter value is a fundamental part of hydraulic
# performance assessment
if pipe_diameter is None:
- raise ValueError(
- f"pipe diamater in asset type {assetType}, \
- asset id \"{asset_name}\" has no diameter \
- value."
- )
+ msg = f'pipe diameter in asset type {asset_type}, \
+ asset id "{asset_name}" has no diameter \
+ value.'
+ raise ValueError(msg)
- pipe_length = GI_ap.get("Len", None)
+ pipe_length = gi_ap.get('Len', None)
# length value is a fundamental part of hydraulic performance assessment
if pipe_diameter is None:
- raise ValueError(
- f"pipe length in asset type {assetType}, \
- asset id \"{asset_name}\" has no diameter \
- value."
- )
+ msg = f'pipe length in asset type {asset_type}, \
+ asset id "{asset_name}" has no diameter \
+ value.'
+ raise ValueError(msg)
- pipe_material = GI_ap.get("material", None)
+ pipe_material = gi_ap.get('material', None)
# pipe material can be not available or named "missing" in
# both case, pipe flexibility will be set to "missing"
@@ -595,60 +587,62 @@ def auto_populate(AIM):
missing, if the pipe is smaller than or equal to 20
inches, the material is Cast Iron (CI) otherwise the pipe
material is steel.
- If the material is steel (ST), either based on user specified
- input or the assumption due to the lack of the user-input, the year
- that the pipe is constructed define the flexibility status per HAZUS
- instructions. If the pipe is built in 1935 or after, it is, the pipe
- is Ductile Steel (DS), and otherwise it is Brittle Steel (BS).
- If the pipe is missing construction year and is built by steel,
- we assume consevatively that the pipe is brittle (i.e., BS)
+ If the material is steel (ST), either based on user
+ specified input or the assumption due to the lack of the
+ user-input, the year that the pipe is constructed define
+ the flexibility status per HAZUS instructions. If the pipe
+ is built in 1935 or after, it is, the pipe is Ductile
+ Steel (DS), and otherwise it is Brittle Steel (BS).
+ If the pipe is missing construction year and is built
+ by steel, we assume consevatively that the pipe is brittle
+ (i.e., BS)
"""
if pipe_material is None:
if pipe_diameter > 20 * 0.0254: # 20 inches in meter
print(
- f"Asset {asset_name} is missing material. Material is\
- assumed to be Cast Iron"
+ f'Asset {asset_name} is missing material. Material is\
+ assumed to be Cast Iron'
)
- pipe_material = "CI"
+ pipe_material = 'CI'
else:
print(
- f"Asset {asset_name} is missing material. Material is "
- f"assumed to be Steel (ST)"
+ f'Asset {asset_name} is missing material. Material is '
+ f'assumed to be Steel (ST)'
)
- pipe_material = "ST"
+ pipe_material = 'ST'
- if pipe_material == "ST":
+ if pipe_material == 'ST':
if (pipe_construction_year is not None) and (
pipe_construction_year >= 1935
):
print(
- f"Asset {asset_name} has material of \"ST\" is assumed to be\
- Ductile Steel"
+ f'Asset {asset_name} has material of "ST" is assumed to be\
+ Ductile Steel'
)
- pipe_material = "DS"
+ pipe_material = 'DS'
else:
print(
f'Asset {asset_name} has material of "ST" is assumed to be '
f'Brittle Steel'
)
- pipe_material = "BS"
+ pipe_material = 'BS'
- pipe_flexibility = pipe_material_map.get(pipe_material, "missing")
+ pipe_flexibility = pipe_material_map.get(pipe_material, 'missing')
- GI_ap["material flexibility"] = pipe_flexibility
- GI_ap["material"] = pipe_material
+ gi_ap['material flexibility'] = pipe_flexibility
+ gi_ap['material'] = pipe_material
# Pipes are broken into 20ft segments (rounding up) and
# each segment is represented by an individual entry in
- # the performance model, `CMP`. The damage capcity of each
+ # the performance model, `CMP`. The damage capacity of each
# segment is assumed to be independent and driven by the
# same EDP. We therefore replicate the EDP associated with
- # the pipe to the various locations assgined to the
+ # the pipe to the various locations assigned to the
# segments.
# Determine number of segments
- pipe_length_unit = GI_ap['units']['length']
+ pipe_length_unit = gi_ap['units']['length']
pipe_length_feet = pelicun.base.convert_units(
pipe_length, unit=pipe_length_unit, to_unit='ft', category='length'
)
@@ -659,19 +653,16 @@ def auto_populate(AIM):
else:
# In all other cases, round up.
num_segments = int(pipe_length_feet / reference_length) + 1
- if num_segments > 1:
- location_string = f'1--{num_segments}'
- else:
- location_string = '1'
+ location_string = f'1--{num_segments}' if num_segments > 1 else '1'
# Define performance model
# fmt: off
- CMP = pd.DataFrame( # noqa
- {f'PWP.{pipe_flexibility}.GS': ['ea', location_string, '0', 1, 'N/A'], # noqa
- f'PWP.{pipe_flexibility}.GF': ['ea', location_string, '0', 1, 'N/A'], # noqa
- 'aggregate': ['ea', location_string, '0', 1, 'N/A']}, # noqa
- index = ['Units','Location','Direction','Theta_0','Family'] # noqa
- ).T # noqa
+ comp = pd.DataFrame(
+ {f'PWP.{pipe_flexibility}.GS': ['ea', location_string, '0', 1, 'N/A'],
+ f'PWP.{pipe_flexibility}.GF': ['ea', location_string, '0', 1, 'N/A'],
+ 'aggregate': ['ea', location_string, '0', 1, 'N/A']},
+ index = ['Units','Location','Direction','Theta_0','Family'] # noqa: E231, E251
+ ).T
# fmt: on
# Set up the demand cloning configuration for the pipe
@@ -688,122 +679,119 @@ def auto_populate(AIM):
)
demand_cloning_config = {}
for edp in response_data.columns:
- tag, location, direction = edp
+ tag, location, direction = edp # noqa: F841
demand_cloning_config['-'.join(edp)] = [
f'{tag}-{x}-{direction}'
- for x in [f'{i+1}' for i in range(num_segments)]
+ for x in [f'{i + 1}' for i in range(num_segments)]
]
demand_config = {'DemandCloning': demand_cloning_config}
# Create damage process
dmg_process = {
- f"1_PWP.{pipe_flexibility}.GS-LOC": {"DS1": "aggregate_DS1"},
- f"2_PWP.{pipe_flexibility}.GF-LOC": {"DS1": "aggregate_DS1"},
- f"3_PWP.{pipe_flexibility}.GS-LOC": {"DS2": "aggregate_DS2"},
- f"4_PWP.{pipe_flexibility}.GF-LOC": {"DS2": "aggregate_DS2"},
+ f'1_PWP.{pipe_flexibility}.GS-LOC': {'DS1': 'aggregate_DS1'},
+ f'2_PWP.{pipe_flexibility}.GF-LOC': {'DS1': 'aggregate_DS1'},
+ f'3_PWP.{pipe_flexibility}.GS-LOC': {'DS2': 'aggregate_DS2'},
+ f'4_PWP.{pipe_flexibility}.GF-LOC': {'DS2': 'aggregate_DS2'},
}
dmg_process_filename = 'dmg_process.json'
with open(dmg_process_filename, 'w', encoding='utf-8') as f:
json.dump(dmg_process, f, indent=2)
# Define the auto-populated config
- DL_ap = {
- "Asset": {
- "ComponentAssignmentFile": "CMP_QNT.csv",
- "ComponentDatabase": "Hazus Earthquake - Water",
- "Material Flexibility": pipe_flexibility,
- "PlanArea": "1", # Sina: does not make sense for water.
+ dl_ap = {
+ 'Asset': {
+ 'ComponentAssignmentFile': 'CMP_QNT.csv',
+ 'ComponentDatabase': 'Hazus Earthquake - Water',
+ 'Material Flexibility': pipe_flexibility,
+ 'PlanArea': '1', # Sina: does not make sense for water.
# Kept it here since itw as also
# kept here for Transportation
},
- "Damage": {
- "DamageProcess": "User Defined",
- "DamageProcessFilePath": "dmg_process.json",
+ 'Damage': {
+ 'DamageProcess': 'User Defined',
+ 'DamageProcessFilePath': 'dmg_process.json',
},
- "Demands": demand_config,
+ 'Demands': demand_config,
}
- elif wdn_element_type == "Tank":
+ elif wdn_element_type == 'Tank':
tank_cmp_lines = {
- ("OG", "C", 1): {'PST.G.C.A.GS': ['ea', 1, 1, 1, 'N/A']},
- ("OG", "C", 0): {'PST.G.C.U.GS': ['ea', 1, 1, 1, 'N/A']},
- ("OG", "S", 1): {'PST.G.S.A.GS': ['ea', 1, 1, 1, 'N/A']},
- ("OG", "S", 0): {'PST.G.S.U.GS': ['ea', 1, 1, 1, 'N/A']},
+ ('OG', 'C', 1): {'PST.G.C.A.GS': ['ea', 1, 1, 1, 'N/A']},
+ ('OG', 'C', 0): {'PST.G.C.U.GS': ['ea', 1, 1, 1, 'N/A']},
+ ('OG', 'S', 1): {'PST.G.S.A.GS': ['ea', 1, 1, 1, 'N/A']},
+ ('OG', 'S', 0): {'PST.G.S.U.GS': ['ea', 1, 1, 1, 'N/A']},
# Anchored status and Wood is not defined for On Ground tanks
- ("OG", "W", 0): {'PST.G.W.GS': ['ea', 1, 1, 1, 'N/A']},
+ ('OG', 'W', 0): {'PST.G.W.GS': ['ea', 1, 1, 1, 'N/A']},
# Anchored status and Steel is not defined for Above Ground tanks
- ("AG", "S", 0): {'PST.A.S.GS': ['ea', 1, 1, 1, 'N/A']},
+ ('AG', 'S', 0): {'PST.A.S.GS': ['ea', 1, 1, 1, 'N/A']},
# Anchored status and Concrete is not defined for Buried tanks.
- ("B", "C", 0): {'PST.B.C.GF': ['ea', 1, 1, 1, 'N/A']},
+ ('B', 'C', 0): {'PST.B.C.GF': ['ea', 1, 1, 1, 'N/A']},
}
# The default values are assumed: material = Concrete (C),
# location= On Ground (OG), and Anchored = 1
- tank_material = GI_ap.get("material", "C")
- tank_location = GI_ap.get("location", "OG")
- tank_anchored = GI_ap.get("anchored", int(1))
+ tank_material = gi_ap.get('material', 'C')
+ tank_location = gi_ap.get('location', 'OG')
+ tank_anchored = gi_ap.get('anchored', 1)
- tank_material_allowable = {"C", "S"}
+ tank_material_allowable = {'C', 'S'}
if tank_material not in tank_material_allowable:
- raise ValueError(
- f"Tank's material = \"{tank_material}\" is \
+ msg = f'Tank\'s material = "{tank_material}" is \
not allowable in tank {asset_name}. The \
material must be either C for concrete or S \
- for steel."
- )
+ for steel.'
+ raise ValueError(msg)
- tank_location_allowable = {"AG", "OG", "B"}
+ tank_location_allowable = {'AG', 'OG', 'B'}
if tank_location not in tank_location_allowable:
- raise ValueError(
- f"Tank's location = \"{tank_location}\" is \
+ msg = f'Tank\'s location = "{tank_location}" is \
not allowable in tank {asset_name}. The \
- location must be either \"AG\" for Above \
- ground, \"OG\" for On Ground or \"BG\" for \
- Bellow Ground (burried) Tanks."
- )
+ location must be either "AG" for Above \
+ ground, "OG" for On Ground or "BG" for \
+ Below Ground (buried) Tanks.'
+ raise ValueError(msg)
- tank_anchored_allowable = {int(0), int(1)}
+ tank_anchored_allowable = {0, 1}
if tank_anchored not in tank_anchored_allowable:
- raise ValueError(
- f"Tank's anchored status = \"{tank_location}\
- \" is not allowable in tank {asset_name}. \
+ msg = f'Tank\'s anchored status = "{tank_location}\
+ " is not allowable in tank {asset_name}. \
The anchored status must be either integer\
- value 0 for unachored, or 1 for anchored"
- )
+ value 0 for unachored, or 1 for anchored'
+ raise ValueError(msg)
- if tank_location == "AG" and tank_material == "C":
+ if tank_location == 'AG' and tank_material == 'C':
print(
- f"The tank {asset_name} is Above Ground (i.e., AG), but \
- the material type is Concrete (\"C\"). Tank type \"C\" is not \
- defiend for AG tanks. The tank is assumed to be Steel (\"S\")"
+ f'The tank {asset_name} is Above Ground (i.e., AG), but \
+ the material type is Concrete ("C"). Tank type "C" is not \
+ defined for AG tanks. The tank is assumed to be Steel ("S")'
)
- tank_material = "S"
+ tank_material = 'S'
- if tank_location == "AG" and tank_material == "W":
+ if tank_location == 'AG' and tank_material == 'W':
print(
- f"The tank {asset_name} is Above Ground (i.e., AG), but \
- the material type is Wood (\"W\"). Tank type \"W\" is not \
- defiend for AG tanks. The tank is assumed to be Steel (\"S\")"
+ f'The tank {asset_name} is Above Ground (i.e., AG), but \
+ the material type is Wood ("W"). Tank type "W" is not \
+ defined for AG tanks. The tank is assumed to be Steel ("S")'
)
- tank_material = "S"
+ tank_material = 'S'
- if tank_location == "B" and tank_material == "S":
+ if tank_location == 'B' and tank_material == 'S':
print(
- f"The tank {asset_name} is burried (i.e., B), but the\
- material type is Steel (\"S\"). \
- Tank type \"S\" is not defiend for\
- B tanks. The tank is assumed to be Concrete (\"C\")"
+ f'The tank {asset_name} is buried (i.e., B), but the\
+ material type is Steel ("S"). \
+ Tank type "S" is not defined for\
+ B tanks. The tank is assumed to be Concrete ("C")'
)
- tank_material = "C"
+ tank_material = 'C'
- if tank_location == "B" and tank_material == "W":
+ if tank_location == 'B' and tank_material == 'W':
print(
- f"The tank {asset_name} is burried (i.e., B), but the\
- material type is Wood (\"W\"). Tank type \"W\" is not defiend \
- for B tanks. The tank is assumed to be Concrete (\"C\")"
+ f'The tank {asset_name} is buried (i.e., B), but the\
+ material type is Wood ("W"). Tank type "W" is not defined \
+ for B tanks. The tank is assumed to be Concrete ("C")'
)
- tank_material = "C"
+ tank_material = 'C'
if tank_anchored == 1:
# Since anchore status does nto matter, there is no need to
@@ -811,40 +799,40 @@ def auto_populate(AIM):
tank_anchored = 0
cur_tank_cmp_line = tank_cmp_lines[
- (tank_location, tank_material, tank_anchored)
+ tank_location, tank_material, tank_anchored
]
- CMP = pd.DataFrame(
+ comp = pd.DataFrame(
cur_tank_cmp_line,
index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'],
).T
- DL_ap = {
- "Asset": {
- "ComponentAssignmentFile": "CMP_QNT.csv",
- "ComponentDatabase": "Hazus Earthquake - Water",
- "Material": tank_material,
- "Location": tank_location,
- "Anchored": tank_anchored,
- "PlanArea": "1", # Sina: does not make sense for water.
+ dl_ap = {
+ 'Asset': {
+ 'ComponentAssignmentFile': 'CMP_QNT.csv',
+ 'ComponentDatabase': 'Hazus Earthquake - Water',
+ 'Material': tank_material,
+ 'Location': tank_location,
+ 'Anchored': tank_anchored,
+ 'PlanArea': '1', # Sina: does not make sense for water.
# Kept it here since itw as also kept here for Transportation
},
- "Damage": {"DamageProcess": "Hazus Earthquake"},
- "Demands": {},
+ 'Damage': {'DamageProcess': 'Hazus Earthquake'},
+ 'Demands': {},
}
else:
print(
- f"Water Distribution network element type {wdn_element_type} "
- f"is not supported in Hazus Earthquake IM DL method"
+ f'Water Distribution network element type {wdn_element_type} '
+ f'is not supported in Hazus Earthquake IM DL method'
)
- DL_ap = None
- CMP = None
+ dl_ap = None
+ comp = None
else:
print(
- f"AssetType: {assetType} is not supported "
- f"in Hazus Earthquake IM DL method"
+ f'AssetType: {asset_type} is not supported '
+ f'in Hazus Earthquake IM DL method'
)
- return GI_ap, DL_ap, CMP
+ return gi_ap, dl_ap, comp
diff --git a/pelicun/resources/auto/Hazus_Earthquake_Story.py b/pelicun/resources/auto/Hazus_Earthquake_Story.py
index 74ff13465..e7f7597f5 100644
--- a/pelicun/resources/auto/Hazus_Earthquake_Story.py
+++ b/pelicun/resources/auto/Hazus_Earthquake_Story.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2023 Leland Stanford Junior University
# Copyright (c) 2023 The Regents of the University of California
@@ -37,15 +36,17 @@
# Contributors:
# Adam Zsarnóczay
+from __future__ import annotations
+
import pandas as pd
-ap_DesignLevel = {1940: 'LC', 1975: 'MC', 2100: 'HC'}
+ap_design_level = {1940: 'LC', 1975: 'MC', 2100: 'HC'}
# ap_DesignLevel = {1940: 'PC', 1940: 'LC', 1975: 'MC', 2100: 'HC'}
-ap_DesignLevel_W1 = {0: 'LC', 1975: 'MC', 2100: 'HC'}
+ap_design_level_w1 = {0: 'LC', 1975: 'MC', 2100: 'HC'}
# ap_DesignLevel_W1 = {0: 'PC', 0: 'LC', 1975: 'MC', 2100: 'HC'}
-ap_Occupancy = {
+ap_occupancy = {
'Other/Unknown': 'RES3',
'Residential - Single-Family': 'RES1',
'Residential - Town-Home': 'RES3',
@@ -69,7 +70,7 @@
}
-def story_scale(stories, comp_type):
+def story_scale(stories, comp_type): # noqa: C901
if comp_type == 'NSA':
if stories == 1:
return 1.00
@@ -107,11 +108,7 @@ def story_scale(stories, comp_type):
return 2.75
elif stories == 5:
return 3.00
- elif stories == 6:
- return 3.50
- elif stories == 7:
- return 3.50
- elif stories == 8:
+ elif stories in (6, 7, 8):
return 3.50
elif stories == 9:
return 4.50
@@ -121,9 +118,10 @@ def story_scale(stories, comp_type):
return 7.30
else:
return 1.0
+ return None
-def auto_populate(AIM):
+def auto_populate(aim):
"""
Automatically creates a performance model for story EDP-based Hazus EQ analysis.
@@ -147,73 +145,70 @@ def auto_populate(AIM):
CMP: DataFrame
Component assignment - Defines the components (in rows) and their
location, direction, and quantity (in columns).
- """
+ """
# extract the General Information
- GI = AIM.get('GeneralInformation', None)
+ gi = aim.get('GeneralInformation', None)
- if GI is None:
+ if gi is None:
# TODO: show an error message
pass
# initialize the auto-populated GI
- GI_ap = GI.copy()
+ gi_ap = gi.copy()
- assetType = AIM["assetType"]
- ground_failure = AIM["Applications"]["DL"]["ApplicationData"]["ground_failure"]
+ asset_type = aim['assetType']
+ ground_failure = aim['Applications']['DL']['ApplicationData']['ground_failure']
- if assetType == "Buildings":
+ if asset_type == 'Buildings':
# get the building parameters
- bt = GI['StructureType'] # building type
+ bt = gi['StructureType'] # building type
# get the design level
- dl = GI.get('DesignLevel', None)
+ dl = gi.get('DesignLevel', None)
if dl is None:
# If there is no DesignLevel provided, we assume that the YearBuilt is
# available
- year_built = GI['YearBuilt']
+ year_built = gi['YearBuilt']
- if 'W1' in bt:
- DesignL = ap_DesignLevel_W1
- else:
- DesignL = ap_DesignLevel
+ design_l = ap_design_level_w1 if 'W1' in bt else ap_design_level
- for year in sorted(DesignL.keys()):
+ for year in sorted(design_l.keys()):
if year_built <= year:
- dl = DesignL[year]
+ dl = design_l[year]
break
- GI_ap['DesignLevel'] = dl
+ gi_ap['DesignLevel'] = dl
# get the number of stories / height
- stories = GI.get('NumberOfStories', None)
+ stories = gi.get('NumberOfStories', None)
- FG_S = f'STR.{bt}.{dl}'
- FG_NSD = 'NSD'
- FG_NSA = f'NSA.{dl}'
+ fg_s = f'STR.{bt}.{dl}'
+ fg_nsd = 'NSD'
+ fg_nsa = f'NSA.{dl}'
- CMP = pd.DataFrame(
+ comp = pd.DataFrame(
{
- f'{FG_S}': [
+ f'{fg_s}': [
'ea',
'all',
'1, 2',
- f"{story_scale(stories, 'S')/stories/2.}",
+ f"{story_scale(stories, 'S') / stories / 2.}",
'N/A',
],
- f'{FG_NSA}': [
+ f'{fg_nsa}': [
'ea',
'all',
0,
- f"{story_scale(stories, 'NSA')/stories}",
+ f"{story_scale(stories, 'NSA') / stories}",
'N/A',
],
- f'{FG_NSD}': [
+ f'{fg_nsd}': [
'ea',
'all',
'1, 2',
- f"{story_scale(stories, 'NSD')/stories/2.}",
+ f"{story_scale(stories, 'NSD') / stories / 2.}",
'N/A',
],
},
@@ -224,57 +219,57 @@ def auto_populate(AIM):
if ground_failure:
foundation_type = 'S'
- # fmt: off
- FG_GF_H = f'GF.H.{foundation_type}' # noqa
- FG_GF_V = f'GF.V.{foundation_type}' # noqa
- CMP_GF = pd.DataFrame( # noqa
- {f'{FG_GF_H}':[ 'ea', 1, 1, 1, 'N/A'], # noqa
- f'{FG_GF_V}':[ 'ea', 1, 3, 1, 'N/A']}, # noqa
- index = [ 'Units','Location','Direction','Theta_0','Family'] # noqa
- ).T # noqa
- # fmt: on
+ FG_GF_H = f'GF.H.{foundation_type}'
+ FG_GF_V = f'GF.V.{foundation_type}'
+ CMP_GF = pd.DataFrame(
+ {
+ f'{FG_GF_H}': ['ea', 1, 1, 1, 'N/A'],
+ f'{FG_GF_V}': ['ea', 1, 3, 1, 'N/A'],
+ },
+ index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'],
+ ).T
- CMP = pd.concat([CMP, CMP_GF], axis=0)
+ comp = pd.concat([comp, CMP_GF], axis=0)
# get the occupancy class
- if GI['OccupancyClass'] in ap_Occupancy.keys():
- ot = ap_Occupancy[GI['OccupancyClass']]
+ if gi['OccupancyClass'] in ap_occupancy:
+ occupancy_type = ap_occupancy[gi['OccupancyClass']]
else:
- ot = GI['OccupancyClass']
+ occupancy_type = gi['OccupancyClass']
- plan_area = GI.get('PlanArea', 1.0)
+ plan_area = gi.get('PlanArea', 1.0)
repair_config = {
- "ConsequenceDatabase": "Hazus Earthquake - Stories",
- "MapApproach": "Automatic",
- "DecisionVariables": {
- "Cost": True,
- "Carbon": False,
- "Energy": False,
- "Time": False,
+ 'ConsequenceDatabase': 'Hazus Earthquake - Stories',
+ 'MapApproach': 'Automatic',
+ 'DecisionVariables': {
+ 'Cost': True,
+ 'Carbon': False,
+ 'Energy': False,
+ 'Time': False,
},
}
- DL_ap = {
- "Asset": {
- "ComponentAssignmentFile": "CMP_QNT.csv",
- "ComponentDatabase": "Hazus Earthquake - Stories",
- "NumberOfStories": f"{stories}",
- "OccupancyType": f"{ot}",
- "PlanArea": str(plan_area),
+ dl_ap = {
+ 'Asset': {
+ 'ComponentAssignmentFile': 'CMP_QNT.csv',
+ 'ComponentDatabase': 'Hazus Earthquake - Stories',
+ 'NumberOfStories': f'{stories}',
+ 'OccupancyType': f'{occupancy_type}',
+ 'PlanArea': str(plan_area),
},
- "Damage": {"DamageProcess": "Hazus Earthquake"},
- "Demands": {},
- "Losses": {"Repair": repair_config},
- "Options": {
- "NonDirectionalMultipliers": {"ALL": 1.0},
+ 'Damage': {'DamageProcess': 'Hazus Earthquake'},
+ 'Demands': {},
+ 'Losses': {'Repair': repair_config},
+ 'Options': {
+ 'NonDirectionalMultipliers': {'ALL': 1.0},
},
}
else:
print(
- f"AssetType: {assetType} is not supported "
- f"in Hazus Earthquake Story-based DL method"
+ f'AssetType: {asset_type} is not supported '
+ f'in Hazus Earthquake Story-based DL method'
)
- return GI_ap, DL_ap, CMP
+ return gi_ap, dl_ap, comp
diff --git a/pelicun/settings/default_config.json b/pelicun/settings/default_config.json
index 47fe005aa..1d9da5e5c 100644
--- a/pelicun/settings/default_config.json
+++ b/pelicun/settings/default_config.json
@@ -24,7 +24,14 @@
"SampleSize": 1000,
"PreserveRawOrder": false
},
- "RepairCostAndTimeCorrelation": 0.0
+ "RepairCostAndTimeCorrelation": 0.0,
+ "ErrorSetup": {
+ "Loss": {
+ "ReplacementThreshold": {
+ "RaiseOnUnknownKeys": true
+ }
+ }
+ }
},
"DemandAssessment": {
"Calibration": {
diff --git a/pelicun/settings/default_units.json b/pelicun/settings/default_units.json
index fe9a3122f..1b387b4a4 100644
--- a/pelicun/settings/default_units.json
+++ b/pelicun/settings/default_units.json
@@ -37,7 +37,7 @@
"inchps": 0.0254,
"ftps": 0.3048
},
- "accelleration": {
+ "acceleration": {
"mps2": 1.0,
"inps2": 0.0254,
"inchps2": 0.0254,
diff --git a/pelicun/settings/input_schema.json b/pelicun/settings/input_schema.json
new file mode 100644
index 000000000..19f54ce6b
--- /dev/null
+++ b/pelicun/settings/input_schema.json
@@ -0,0 +1,645 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "type": "object",
+ "properties": {
+ "GeneralInformation": {
+ "type": "object",
+ "properties": {
+ "AssetName": {
+ "type": "string"
+ },
+ "AssetType": {
+ "type": "string"
+ },
+ "Location": {
+ "type": "object",
+ "properties": {
+ "Latitude": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ },
+ "Longitude": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ }
+ }
+ },
+ "units": {
+ "type": "object",
+ "properties": {
+ "length": {
+ "type": "string",
+ "examples": [
+ "in",
+ "m"
+ ]
+ }
+ },
+ "required": [
+ "length"
+ ]
+ }
+ },
+ "required": [
+ "units"
+ ]
+ },
+ "assetType": {
+ "type": "string",
+ "examples": [
+ "Buildings"
+ ]
+ },
+ "DL": {
+ "type": "object",
+ "properties": {
+ "Demands": {
+ "type": "object",
+ "properties": {
+ "DemandFilePath": {
+ "type": "string"
+ },
+ "SampleSize": {
+ "anyOf": [
+ {
+ "type": "integer"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ },
+ "CoupledDemands": {
+ "type": "boolean"
+ },
+ "Calibration": {
+ "type": "object"
+ },
+ "CollapseLimits": {
+ "type": "object",
+ "patternProperties": {
+ ".*": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ }
+ }
+ },
+ "InferResidualDrift": {
+ "type": "object",
+ "properties": {
+ "method": {
+ "type": "string"
+ },
+ "x-direction": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ },
+ "y-direction": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ }
+ },
+ "required": [
+ "method"
+ ]
+ }
+ },
+ "required": [
+ "DemandFilePath"
+ ]
+ },
+ "Asset": {
+ "type": "object",
+ "properties": {
+ "ComponentAssignmentFile": {
+ "type": "string"
+ },
+ "NumberOfStories": {
+ "anyOf": [
+ {
+ "type": "integer"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "examples": [
+ 1,
+ 5,
+ 10
+ ]
+ },
+ "ComponentSampleFile": {
+ "type": "string"
+ },
+ "ComponentDatabase": {
+ "type": "string"
+ },
+ "ComponentDatabasePath": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "ComponentAssignmentFile"
+ ]
+ },
+ "Damage": {
+ "type": "object",
+ "properties": {
+ "CollapseFragility": {
+ "type": "object",
+ "properties": {
+ "DemandType": {
+ "type": "string",
+ "examples": [
+ "SA",
+ "PFA",
+ "PGA"
+ ]
+ },
+ "CapacityDistribution": {
+ "type": "string"
+ },
+ "CapacityMedian": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ },
+ "Theta_1": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ }
+ },
+ "required": [
+ "DemandType",
+ "CapacityDistribution",
+ "CapacityMedian",
+ "Theta_1"
+ ]
+ },
+ "DamageProcess": {
+ "type": "string"
+ },
+ "IrreparableDamage": {
+ "type": "object",
+ "properties": {
+ "DriftCapacityMedian": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ },
+ "DriftCapacityLogStd": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ }
+ },
+ "required": [
+ "DriftCapacityMedian",
+ "DriftCapacityLogStd"
+ ]
+ }
+ },
+ "required": [
+ "DamageProcess"
+ ]
+ },
+ "Losses": {
+ "type": "object",
+ "properties": {
+ "Repair": {
+ "type": "object",
+ "properties": {
+ "ConsequenceDatabase": {
+ "type": "string"
+ },
+ "MapApproach": {
+ "type": "string"
+ },
+ "MapFilePath": {
+ "type": "string"
+ },
+ "DecisionVariables": {
+ "type": "object",
+ "properties": {
+ "Cost": {
+ "type": "boolean"
+ },
+ "Time": {
+ "type": "boolean"
+ },
+ "Carbon": {
+ "type": "boolean"
+ },
+ "Energy": {
+ "type": "boolean"
+ }
+ }
+ },
+ "ConsequenceDatabasePath": {
+ "type": "string"
+ },
+ "ReplacementEnergy": {
+ "type": "object",
+ "properties": {
+ "Unit": {
+ "type": "string"
+ },
+ "Median": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ },
+ "Distribution": {
+ "type": "string"
+ },
+ "Theta_1": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ }
+ },
+ "required": [
+ "Unit",
+ "Median",
+ "Distribution",
+ "Theta_1"
+ ]
+ },
+ "ReplacementCarbon": {
+ "type": "object",
+ "properties": {
+ "Unit": {
+ "type": "string"
+ },
+ "Median": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ },
+ "Distribution": {
+ "type": "string"
+ },
+ "Theta_1": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ }
+ },
+ "required": [
+ "Unit",
+ "Median",
+ "Distribution",
+ "Theta_1"
+ ]
+ },
+ "ReplacementTime": {
+ "type": "object",
+ "properties": {
+ "Unit": {
+ "type": "string"
+ },
+ "Median": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ },
+ "Distribution": {
+ "type": "string"
+ },
+ "Theta_1": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ }
+ },
+ "required": [
+ "Unit",
+ "Median",
+ "Distribution",
+ "Theta_1"
+ ]
+ },
+ "ReplacementCost": {
+ "type": "object",
+ "properties": {
+ "Unit": {
+ "type": "string"
+ },
+ "Median": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ },
+ "Distribution": {
+ "type": "string"
+ },
+ "Theta_1": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ }
+ },
+ "required": [
+ "Unit",
+ "Median",
+ "Distribution",
+ "Theta_1"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "Outputs": {
+ "type": "object",
+ "properties": {
+ "Demand": {
+ "type": "object",
+ "properties": {
+ "Sample": {
+ "type": "boolean"
+ },
+ "Statistics": {
+ "type": "boolean"
+ }
+ }
+ },
+ "Asset": {
+ "type": "object",
+ "properties": {
+ "Sample": {
+ "type": "boolean"
+ },
+ "Statistics": {
+ "type": "boolean"
+ }
+ }
+ },
+ "Damage": {
+ "type": "object",
+ "properties": {
+ "Sample": {
+ "type": "boolean"
+ },
+ "Statistics": {
+ "type": "boolean"
+ },
+ "GroupedSample": {
+ "type": "boolean"
+ },
+ "GroupedStatistics": {
+ "type": "boolean"
+ }
+ }
+ },
+ "Loss": {
+ "type": "object",
+ "properties": {
+ "Repair": {
+ "type": "object",
+ "properties": {
+ "Sample": {
+ "type": "boolean"
+ },
+ "Statistics": {
+ "type": "boolean"
+ },
+ "GroupedSample": {
+ "type": "boolean"
+ },
+ "GroupedStatistics": {
+ "type": "boolean"
+ },
+ "AggregateSample": {
+ "type": "boolean"
+ },
+ "AggregateStatistics": {
+ "type": "boolean"
+ }
+ }
+ }
+ }
+ },
+ "Format": {
+ "type": "object",
+ "properties": {
+ "CSV": {
+ "type": "boolean"
+ },
+ "JSON": {
+ "type": "boolean"
+ }
+ }
+ },
+ "Settings": {
+ "type": "object",
+ "properties": {
+ "CondenseDS": {
+ "type": "boolean"
+ },
+ "SimpleIndexInJSON": {
+ "type": "boolean"
+ },
+ "AggregateColocatedComponentResults": {
+ "type": "boolean"
+ }
+ }
+ }
+ }
+ },
+ "Options": {
+ "type": "object",
+ "properties": {
+ "Options": {
+ "type": "boolean"
+ },
+ "Seed": {
+ "type": "integer"
+ },
+ "LogShowMS": {
+ "type": "boolean"
+ },
+ "LogFile": {
+ "type": "string"
+ },
+ "UnitsFile": {
+ "type": "string"
+ },
+ "PrintLog": {
+ "type": "boolean"
+ },
+ "ShowWarnings": {
+ "type": "boolean"
+ },
+ "DemandOffset": {
+ "type": "object"
+ },
+ "ListAllDamageStates": {
+ "type": "boolean"
+ },
+ "NonDirectionalMultipliers": {
+ "type": "object"
+ },
+ "EconomiesOfScale": {
+ "type": "object",
+ "properties": {
+ "AcrossFlorrs": {
+ "type": "boolean"
+ },
+ "AcrossDamageStates": {
+ "type": "boolean"
+ }
+ }
+ },
+ "Sampling": {
+ "type": "object",
+ "properties": {
+ "SamplingMethod": {
+ "type": "string"
+ },
+ "SampleSize": {
+ "type": "integer"
+ },
+ "PreserveRawOrder": {
+ "type": "boolean"
+ }
+ }
+ },
+ "RepairCostAndTimeCorrelation": {
+ "anyOf": [
+ {
+ "type": "number"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ }
+ }
+ },
+ "DemandAssessment": {
+ "type": "object",
+ "properties": {
+ "Calibration": {
+ "type": "object",
+ "properties": {
+ "Marginals": {
+ "type": "object"
+ }
+ }
+ }
+ }
+ },
+ "ApplicationData": {
+ "type": "object",
+ "properties": {
+ "ground_failure": {
+ "type": "boolean"
+ }
+ }
+ }
+ }
+ },
+ "Applications": {
+ "type": "object"
+ },
+ "auto_script_path": {
+ "type": "string"
+ }
+ },
+ "required": [
+ "GeneralInformation"
+ ]
+}
diff --git a/pelicun/tests/__init__.py b/pelicun/tests/__init__.py
new file mode 100644
index 000000000..1d9bf2ac7
--- /dev/null
+++ b/pelicun/tests/__init__.py
@@ -0,0 +1,35 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+
+"""Pelicun Unit Tests."""
diff --git a/pelicun/tests/basic/__init__.py b/pelicun/tests/basic/__init__.py
new file mode 100644
index 000000000..72c332008
--- /dev/null
+++ b/pelicun/tests/basic/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
diff --git a/pelicun/tests/data/assessment/test_assessment_calc_unit_scale_factor/custom_units.json b/pelicun/tests/basic/data/assessment/test_assessment_calc_unit_scale_factor/custom_units.json
similarity index 100%
rename from pelicun/tests/data/assessment/test_assessment_calc_unit_scale_factor/custom_units.json
rename to pelicun/tests/basic/data/assessment/test_assessment_calc_unit_scale_factor/custom_units.json
diff --git a/pelicun/tests/data/base/test_parse_units/additional_units_a.json b/pelicun/tests/basic/data/base/test_parse_units/additional_units_a.json
similarity index 100%
rename from pelicun/tests/data/base/test_parse_units/additional_units_a.json
rename to pelicun/tests/basic/data/base/test_parse_units/additional_units_a.json
diff --git a/pelicun/tests/data/base/test_parse_units/duplicate.json b/pelicun/tests/basic/data/base/test_parse_units/duplicate.json
similarity index 98%
rename from pelicun/tests/data/base/test_parse_units/duplicate.json
rename to pelicun/tests/basic/data/base/test_parse_units/duplicate.json
index 2fcbf47ca..1baa810f2 100644
--- a/pelicun/tests/data/base/test_parse_units/duplicate.json
+++ b/pelicun/tests/basic/data/base/test_parse_units/duplicate.json
@@ -39,7 +39,7 @@
"inchps": 0.0254,
"ftps": 0.3048
},
- "accelleration": {
+ "acceleration": {
"mps2": 1.0,
"inps2": 0.0254,
"inchps2": 0.0254,
diff --git a/pelicun/tests/data/base/test_parse_units/duplicate2.json b/pelicun/tests/basic/data/base/test_parse_units/duplicate2.json
similarity index 98%
rename from pelicun/tests/data/base/test_parse_units/duplicate2.json
rename to pelicun/tests/basic/data/base/test_parse_units/duplicate2.json
index f0c492e9a..70e60e630 100644
--- a/pelicun/tests/data/base/test_parse_units/duplicate2.json
+++ b/pelicun/tests/basic/data/base/test_parse_units/duplicate2.json
@@ -38,7 +38,7 @@
"inchps": 0.0254,
"ftps": 0.3048
},
- "accelleration": {
+ "acceleration": {
"mps2": 1.0,
"inps2": 0.0254,
"inchps2": 0.0254,
diff --git a/pelicun/tests/data/base/test_parse_units/invalid.json b/pelicun/tests/basic/data/base/test_parse_units/invalid.json
similarity index 100%
rename from pelicun/tests/data/base/test_parse_units/invalid.json
rename to pelicun/tests/basic/data/base/test_parse_units/invalid.json
diff --git a/pelicun/tests/data/base/test_parse_units/not_dict.json b/pelicun/tests/basic/data/base/test_parse_units/not_dict.json
similarity index 100%
rename from pelicun/tests/data/base/test_parse_units/not_dict.json
rename to pelicun/tests/basic/data/base/test_parse_units/not_dict.json
diff --git a/pelicun/tests/data/base/test_parse_units/not_float.json b/pelicun/tests/basic/data/base/test_parse_units/not_float.json
similarity index 100%
rename from pelicun/tests/data/base/test_parse_units/not_float.json
rename to pelicun/tests/basic/data/base/test_parse_units/not_float.json
diff --git a/pelicun/tests/data/file_io/test_load_data/no_units.csv b/pelicun/tests/basic/data/file_io/test_load_data/no_units.csv
similarity index 100%
rename from pelicun/tests/data/file_io/test_load_data/no_units.csv
rename to pelicun/tests/basic/data/file_io/test_load_data/no_units.csv
diff --git a/pelicun/tests/data/file_io/test_load_data/orient_1.csv b/pelicun/tests/basic/data/file_io/test_load_data/orient_1.csv
similarity index 100%
rename from pelicun/tests/data/file_io/test_load_data/orient_1.csv
rename to pelicun/tests/basic/data/file_io/test_load_data/orient_1.csv
diff --git a/pelicun/tests/data/file_io/test_load_data/orient_1_units.csv b/pelicun/tests/basic/data/file_io/test_load_data/orient_1_units.csv
similarity index 100%
rename from pelicun/tests/data/file_io/test_load_data/orient_1_units.csv
rename to pelicun/tests/basic/data/file_io/test_load_data/orient_1_units.csv
diff --git a/pelicun/tests/data/file_io/test_load_data/units.csv b/pelicun/tests/basic/data/file_io/test_load_data/units.csv
similarity index 100%
rename from pelicun/tests/data/file_io/test_load_data/units.csv
rename to pelicun/tests/basic/data/file_io/test_load_data/units.csv
diff --git a/pelicun/tests/data/model/test_AssetModel/CMP_marginals.csv b/pelicun/tests/basic/data/model/test_AssetModel/CMP_marginals.csv
similarity index 100%
rename from pelicun/tests/data/model/test_AssetModel/CMP_marginals.csv
rename to pelicun/tests/basic/data/model/test_AssetModel/CMP_marginals.csv
diff --git a/pelicun/tests/data/model/test_AssetModel/CMP_marginals_2.csv b/pelicun/tests/basic/data/model/test_AssetModel/CMP_marginals_2.csv
similarity index 100%
rename from pelicun/tests/data/model/test_AssetModel/CMP_marginals_2.csv
rename to pelicun/tests/basic/data/model/test_AssetModel/CMP_marginals_2.csv
diff --git a/pelicun/tests/data/model/test_AssetModel/CMP_marginals_block_weights.csv b/pelicun/tests/basic/data/model/test_AssetModel/CMP_marginals_block_weights.csv
similarity index 100%
rename from pelicun/tests/data/model/test_AssetModel/CMP_marginals_block_weights.csv
rename to pelicun/tests/basic/data/model/test_AssetModel/CMP_marginals_block_weights.csv
diff --git a/pelicun/tests/data/model/test_AssetModel/CMP_marginals_invalid_dir.csv b/pelicun/tests/basic/data/model/test_AssetModel/CMP_marginals_invalid_dir.csv
similarity index 100%
rename from pelicun/tests/data/model/test_AssetModel/CMP_marginals_invalid_dir.csv
rename to pelicun/tests/basic/data/model/test_AssetModel/CMP_marginals_invalid_dir.csv
diff --git a/pelicun/tests/data/model/test_AssetModel/CMP_marginals_invalid_loc.csv b/pelicun/tests/basic/data/model/test_AssetModel/CMP_marginals_invalid_loc.csv
similarity index 100%
rename from pelicun/tests/data/model/test_AssetModel/CMP_marginals_invalid_loc.csv
rename to pelicun/tests/basic/data/model/test_AssetModel/CMP_marginals_invalid_loc.csv
diff --git a/pelicun/tests/basic/data/model/test_DamageModel/_complete_ds_cols/parameters.csv b/pelicun/tests/basic/data/model/test_DamageModel/_complete_ds_cols/parameters.csv
new file mode 100644
index 000000000..30591acb7
--- /dev/null
+++ b/pelicun/tests/basic/data/model/test_DamageModel/_complete_ds_cols/parameters.csv
@@ -0,0 +1,3 @@
+ID,Demand-Directional,Demand-Offset,Demand-Type,Incomplete-,LS1-DamageStateWeights,LS1-Family,LS1-Theta_0,LS1-Theta_1,LS2-DamageStateWeights,LS2-Family,LS2-Theta_0,LS2-Theta_1,LS3-DamageStateWeights,LS3-Family,LS3-Theta_0,LS3-Theta_1,LS4-DamageStateWeights,LS4-Family,LS4-Theta_0,LS4-Theta_1
+many.ds,0,0,Peak Floor Acceleration,0,0.350000 | 0.150000 | 0.500000,lognormal,29.41995,0.5,,,,,,,,,,,,
+single.ds,0,0,Peak Floor Acceleration,0,,lognormal,29.41995,0.5,,,,,,,,,,,,
diff --git a/pelicun/tests/basic/data/model/test_DamageModel/load_model_parameters/damage_db.csv b/pelicun/tests/basic/data/model/test_DamageModel/load_model_parameters/damage_db.csv
new file mode 100644
index 000000000..7472a569e
--- /dev/null
+++ b/pelicun/tests/basic/data/model/test_DamageModel/load_model_parameters/damage_db.csv
@@ -0,0 +1,5 @@
+ID,Incomplete,Demand-Type,Demand-Unit,Demand-Offset,Demand-Directional,LS1-Family,LS1-Theta_0,LS1-Theta_1,LS1-DamageStateWeights,LS2-Family,LS2-Theta_0,LS2-Theta_1,LS2-DamageStateWeights,LS3-Family,LS3-Theta_0,LS3-Theta_1,LS3-DamageStateWeights
+component.A,0,Peak Interstory Drift Ratio,unitless,0,1,lognormal,0.02,0.3,0.950000 | 0.050000,lognormal,0.04,0.3,,lognormal,0.08,0.3,
+component.B,0,Peak Floor Acceleration,g,0,0,lognormal,0.2,0.6,,lognormal,0.4,0.6,,,,,
+component.C,0,Peak Floor Acceleration,g,0,0,lognormal,0.2,0.6,,lognormal,0.4,0.6,,,,,
+component.incomplete,1,Peak Floor Acceleration,g,0,0,lognormal,0.2,0.6,,lognormal,0.4,0.6,,,,,
diff --git a/pelicun/tests/basic/data/model/test_DemandModel/_get_required_demand_type/damage_db_testing_single.csv b/pelicun/tests/basic/data/model/test_DemandModel/_get_required_demand_type/damage_db_testing_single.csv
new file mode 100644
index 000000000..ad9169036
--- /dev/null
+++ b/pelicun/tests/basic/data/model/test_DemandModel/_get_required_demand_type/damage_db_testing_single.csv
@@ -0,0 +1,2 @@
+ID,Incomplete,Demand-Type,Demand-Unit,Demand-Offset,Demand-Directional,LS1-Family,LS1-Theta_0,LS1-Theta_1,LS1-DamageStateWeights,LS2-Family,LS2-Theta_0,LS2-Theta_1,LS2-DamageStateWeights,LS3-Family,LS3-Theta_0,LS3-Theta_1,LS3-DamageStateWeights,LS4-Family,LS4-Theta_0,LS4-Theta_1,LS4-DamageStateWeights
+testing.component,0,Peak Interstory Drift Ratio,unitless,0,1,lognormal,0.04,0.4,0.950000 | 0.050000,lognormal,0.08,0.4,,lognormal,0.11,0.4,,,,,
diff --git a/pelicun/tests/basic/data/model/test_DemandModel/_get_required_demand_type/damage_db_testing_utility.csv b/pelicun/tests/basic/data/model/test_DemandModel/_get_required_demand_type/damage_db_testing_utility.csv
new file mode 100644
index 000000000..f3a6811fb
--- /dev/null
+++ b/pelicun/tests/basic/data/model/test_DemandModel/_get_required_demand_type/damage_db_testing_utility.csv
@@ -0,0 +1,2 @@
+ID,Incomplete,Demand-Expression,Demand-Type,Demand-Unit,Demand-Offset,Demand-Directional,Demand2-Type,Demand2-Unit,Demand2-Offset,Demand2-Directional,LS1-Family,LS1-Theta_0,LS1-Theta_1,LS1-DamageStateWeights,LS2-Family,LS2-Theta_0,LS2-Theta_1,LS2-DamageStateWeights,LS3-Family,LS3-Theta_0,LS3-Theta_1,LS3-DamageStateWeights,LS4-Family,LS4-Theta_0,LS4-Theta_1,LS4-DamageStateWeights
+testing.component,0,sqrt(X1^2+X2^2),Peak Interstory Drift Ratio,unitless,0,1,Peak Floor Acceleration,unitless,0,1,lognormal,0.04,0.4,0.950000 | 0.050000,lognormal,0.08,0.4,,lognormal,0.11,0.4,,,,,
diff --git a/pelicun/tests/data/model/test_DemandModel_estimate_RID/demand_sample_A.csv b/pelicun/tests/basic/data/model/test_DemandModel/estimate_RID/demand_sample_A.csv
similarity index 100%
rename from pelicun/tests/data/model/test_DemandModel_estimate_RID/demand_sample_A.csv
rename to pelicun/tests/basic/data/model/test_DemandModel/estimate_RID/demand_sample_A.csv
diff --git a/pelicun/tests/data/model/test_DemandModel_generate_sample_with_demand_cloning/sample.csv b/pelicun/tests/basic/data/model/test_DemandModel/generate_sample_with_demand_cloning/sample.csv
similarity index 100%
rename from pelicun/tests/data/model/test_DemandModel_generate_sample_with_demand_cloning/sample.csv
rename to pelicun/tests/basic/data/model/test_DemandModel/generate_sample_with_demand_cloning/sample.csv
diff --git a/pelicun/tests/data/model/test_DemandModel_load_sample/demand_sample_A.csv b/pelicun/tests/basic/data/model/test_DemandModel/load_sample/demand_sample_A.csv
similarity index 100%
rename from pelicun/tests/data/model/test_DemandModel_load_sample/demand_sample_A.csv
rename to pelicun/tests/basic/data/model/test_DemandModel/load_sample/demand_sample_A.csv
diff --git a/pelicun/tests/data/model/test_DemandModel_load_sample/demand_sample_B.csv b/pelicun/tests/basic/data/model/test_DemandModel/load_sample/demand_sample_B.csv
similarity index 100%
rename from pelicun/tests/data/model/test_DemandModel_load_sample/demand_sample_B.csv
rename to pelicun/tests/basic/data/model/test_DemandModel/load_sample/demand_sample_B.csv
diff --git a/pelicun/tests/data/model/test_DemandModel_load_sample/demand_sample_C.csv b/pelicun/tests/basic/data/model/test_DemandModel/load_sample/demand_sample_C.csv
similarity index 100%
rename from pelicun/tests/data/model/test_DemandModel_load_sample/demand_sample_C.csv
rename to pelicun/tests/basic/data/model/test_DemandModel/load_sample/demand_sample_C.csv
diff --git a/pelicun/tests/data/model/test_DemandModel_load_sample/demand_sample_D.csv b/pelicun/tests/basic/data/model/test_DemandModel/load_sample/demand_sample_D.csv
similarity index 100%
rename from pelicun/tests/data/model/test_DemandModel_load_sample/demand_sample_D.csv
rename to pelicun/tests/basic/data/model/test_DemandModel/load_sample/demand_sample_D.csv
diff --git a/pelicun/tests/basic/data/model/test_LossModel/CMP_marginals.csv b/pelicun/tests/basic/data/model/test_LossModel/CMP_marginals.csv
new file mode 100755
index 000000000..00e4c015f
--- /dev/null
+++ b/pelicun/tests/basic/data/model/test_LossModel/CMP_marginals.csv
@@ -0,0 +1,7 @@
+,Units,Location,Direction,Theta_0,Blocks
+cmp.A,ea,0,0,1
+cmp.B,ea,0,1,1
+cmp.C,ea,0,2,1
+cmp.D,ea,1,1,1,4
+cmp.E,ea,1,1,2
+cmp.F,ea,1,1,2
diff --git a/pelicun/tests/basic/data/model/test_LossModel/loss_function_flood.csv b/pelicun/tests/basic/data/model/test_LossModel/loss_function_flood.csv
new file mode 100644
index 000000000..2023b44f4
--- /dev/null
+++ b/pelicun/tests/basic/data/model/test_LossModel/loss_function_flood.csv
@@ -0,0 +1,2 @@
+ID,Incomplete,Demand-Type,Demand-Unit,Demand-Offset,Demand-Directional,DV-Unit,LossFunction-Theta_0
+flood.comp-Cost,0,Peak Inundation Height,in,0,0,loss_ratio,"0.00,1.00|0.00,10.00"
diff --git a/pelicun/tests/basic/data/model/test_LossModel/loss_function_wind.csv b/pelicun/tests/basic/data/model/test_LossModel/loss_function_wind.csv
new file mode 100644
index 000000000..68a9dede1
--- /dev/null
+++ b/pelicun/tests/basic/data/model/test_LossModel/loss_function_wind.csv
@@ -0,0 +1,2 @@
+ID,Incomplete,Demand-Type,Demand-Unit,Demand-Offset,Demand-Directional,DV-Unit,LossFunction-Theta_0
+wind.comp-Cost,0,Peak Gust Wind Speed,mph,0,0,loss_ratio,"0.00,1.00|0.00,150.00"
diff --git a/pelicun/tests/basic/data/model/test_LossModel/scaled_losses_ds.csv b/pelicun/tests/basic/data/model/test_LossModel/scaled_losses_ds.csv
new file mode 100644
index 000000000..7db96f008
--- /dev/null
+++ b/pelicun/tests/basic/data/model/test_LossModel/scaled_losses_ds.csv
@@ -0,0 +1,129 @@
+dv,loss,dmg,ds,loc,dir,uid,0,1,2
+Cost,cmp.A.consequence,cmp.A,DS1,1,1,uid1,2.0,2.0,2.0
+Cost,cmp.A.consequence,cmp.A,DS1,1,1,uid2,2.0,2.0,2.0
+Cost,cmp.A.consequence,cmp.A,DS1,1,2,uid1,2.0,2.0,2.0
+Cost,cmp.A.consequence,cmp.A,DS1,1,2,uid2,2.0,2.0,2.0
+Cost,cmp.A.consequence,cmp.A,DS1,2,1,uid1,2.0,2.0,2.0
+Cost,cmp.A.consequence,cmp.A,DS1,2,1,uid2,2.0,2.0,2.0
+Cost,cmp.A.consequence,cmp.A,DS1,2,2,uid1,2.0,2.0,2.0
+Cost,cmp.A.consequence,cmp.A,DS1,2,2,uid2,2.0,2.0,2.0
+Cost,cmp.A.consequence,cmp.A,DS2,1,1,uid1,2.0,2.0,2.0
+Cost,cmp.A.consequence,cmp.A,DS2,1,1,uid2,2.0,2.0,2.0
+Cost,cmp.A.consequence,cmp.A,DS2,1,2,uid1,2.0,2.0,2.0
+Cost,cmp.A.consequence,cmp.A,DS2,1,2,uid2,2.0,2.0,2.0
+Cost,cmp.A.consequence,cmp.A,DS2,2,1,uid1,2.0,2.0,2.0
+Cost,cmp.A.consequence,cmp.A,DS2,2,1,uid2,2.0,2.0,2.0
+Cost,cmp.A.consequence,cmp.A,DS2,2,2,uid1,2.0,2.0,2.0
+Cost,cmp.A.consequence,cmp.A,DS2,2,2,uid2,2.0,2.0,2.0
+Cost,cmp.A.consequence,cmp.B,DS1,1,1,uid1,12.0,12.0,12.0
+Cost,cmp.A.consequence,cmp.B,DS1,1,1,uid2,12.0,12.0,12.0
+Cost,cmp.A.consequence,cmp.B,DS1,1,2,uid1,12.0,12.0,12.0
+Cost,cmp.A.consequence,cmp.B,DS1,1,2,uid2,12.0,12.0,12.0
+Cost,cmp.A.consequence,cmp.B,DS1,2,1,uid1,3.0,3.0,3.0
+Cost,cmp.A.consequence,cmp.B,DS1,2,1,uid2,3.0,3.0,3.0
+Cost,cmp.A.consequence,cmp.B,DS1,2,2,uid1,3.0,3.0,3.0
+Cost,cmp.A.consequence,cmp.B,DS1,2,2,uid2,3.0,3.0,3.0
+Cost,cmp.A.consequence,cmp.B,DS2,1,1,uid1,12.0,12.0,12.0
+Cost,cmp.A.consequence,cmp.B,DS2,1,1,uid2,12.0,12.0,12.0
+Cost,cmp.A.consequence,cmp.B,DS2,1,2,uid1,12.0,12.0,12.0
+Cost,cmp.A.consequence,cmp.B,DS2,1,2,uid2,12.0,12.0,12.0
+Cost,cmp.A.consequence,cmp.B,DS2,2,1,uid1,3.0,3.0,3.0
+Cost,cmp.A.consequence,cmp.B,DS2,2,1,uid2,3.0,3.0,3.0
+Cost,cmp.A.consequence,cmp.B,DS2,2,2,uid1,3.0,3.0,3.0
+Cost,cmp.A.consequence,cmp.B,DS2,2,2,uid2,3.0,3.0,3.0
+Cost,cmp.B.consequence,cmp.A,DS1,1,1,uid1,2.0,2.0,2.0
+Cost,cmp.B.consequence,cmp.A,DS1,1,1,uid2,2.0,2.0,2.0
+Cost,cmp.B.consequence,cmp.A,DS1,1,2,uid1,2.0,2.0,2.0
+Cost,cmp.B.consequence,cmp.A,DS1,1,2,uid2,2.0,2.0,2.0
+Cost,cmp.B.consequence,cmp.A,DS1,2,1,uid1,2.0,2.0,2.0
+Cost,cmp.B.consequence,cmp.A,DS1,2,1,uid2,2.0,2.0,2.0
+Cost,cmp.B.consequence,cmp.A,DS1,2,2,uid1,2.0,2.0,2.0
+Cost,cmp.B.consequence,cmp.A,DS1,2,2,uid2,2.0,2.0,2.0
+Cost,cmp.B.consequence,cmp.A,DS2,1,1,uid1,2.0,2.0,2.0
+Cost,cmp.B.consequence,cmp.A,DS2,1,1,uid2,2.0,2.0,2.0
+Cost,cmp.B.consequence,cmp.A,DS2,1,2,uid1,2.0,2.0,2.0
+Cost,cmp.B.consequence,cmp.A,DS2,1,2,uid2,2.0,2.0,2.0
+Cost,cmp.B.consequence,cmp.A,DS2,2,1,uid1,2.0,2.0,2.0
+Cost,cmp.B.consequence,cmp.A,DS2,2,1,uid2,2.0,2.0,2.0
+Cost,cmp.B.consequence,cmp.A,DS2,2,2,uid1,2.0,2.0,2.0
+Cost,cmp.B.consequence,cmp.A,DS2,2,2,uid2,2.0,2.0,2.0
+Cost,cmp.B.consequence,cmp.B,DS1,1,1,uid1,12.0,12.0,12.0
+Cost,cmp.B.consequence,cmp.B,DS1,1,1,uid2,12.0,12.0,12.0
+Cost,cmp.B.consequence,cmp.B,DS1,1,2,uid1,12.0,12.0,12.0
+Cost,cmp.B.consequence,cmp.B,DS1,1,2,uid2,12.0,12.0,12.0
+Cost,cmp.B.consequence,cmp.B,DS1,2,1,uid1,3.0,3.0,3.0
+Cost,cmp.B.consequence,cmp.B,DS1,2,1,uid2,3.0,3.0,3.0
+Cost,cmp.B.consequence,cmp.B,DS1,2,2,uid1,3.0,3.0,3.0
+Cost,cmp.B.consequence,cmp.B,DS1,2,2,uid2,3.0,3.0,3.0
+Cost,cmp.B.consequence,cmp.B,DS2,1,1,uid1,12.0,12.0,12.0
+Cost,cmp.B.consequence,cmp.B,DS2,1,1,uid2,12.0,12.0,12.0
+Cost,cmp.B.consequence,cmp.B,DS2,1,2,uid1,12.0,12.0,12.0
+Cost,cmp.B.consequence,cmp.B,DS2,1,2,uid2,12.0,12.0,12.0
+Cost,cmp.B.consequence,cmp.B,DS2,2,1,uid1,3.0,3.0,3.0
+Cost,cmp.B.consequence,cmp.B,DS2,2,1,uid2,3.0,3.0,3.0
+Cost,cmp.B.consequence,cmp.B,DS2,2,2,uid1,3.0,3.0,3.0
+Cost,cmp.B.consequence,cmp.B,DS2,2,2,uid2,3.0,3.0,3.0
+Carbon,cmp.A.consequence,cmp.A,DS1,1,1,uid1,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.A,DS1,1,1,uid2,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.A,DS1,1,2,uid1,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.A,DS1,1,2,uid2,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.A,DS1,2,1,uid1,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.A,DS1,2,1,uid2,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.A,DS1,2,2,uid1,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.A,DS1,2,2,uid2,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.A,DS2,1,1,uid1,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.A,DS2,1,1,uid2,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.A,DS2,1,2,uid1,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.A,DS2,1,2,uid2,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.A,DS2,2,1,uid1,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.A,DS2,2,1,uid2,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.A,DS2,2,2,uid1,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.A,DS2,2,2,uid2,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.B,DS1,1,1,uid1,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.B,DS1,1,1,uid2,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.B,DS1,1,2,uid1,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.B,DS1,1,2,uid2,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.B,DS1,2,1,uid1,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.B,DS1,2,1,uid2,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.B,DS1,2,2,uid1,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.B,DS1,2,2,uid2,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.B,DS2,1,1,uid1,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.B,DS2,1,1,uid2,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.B,DS2,1,2,uid1,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.B,DS2,1,2,uid2,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.B,DS2,2,1,uid1,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.B,DS2,2,1,uid2,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.B,DS2,2,2,uid1,1.0,1.0,1.0
+Carbon,cmp.A.consequence,cmp.B,DS2,2,2,uid2,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.A,DS1,1,1,uid1,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.A,DS1,1,1,uid2,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.A,DS1,1,2,uid1,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.A,DS1,1,2,uid2,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.A,DS1,2,1,uid1,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.A,DS1,2,1,uid2,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.A,DS1,2,2,uid1,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.A,DS1,2,2,uid2,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.A,DS2,1,1,uid1,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.A,DS2,1,1,uid2,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.A,DS2,1,2,uid1,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.A,DS2,1,2,uid2,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.A,DS2,2,1,uid1,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.A,DS2,2,1,uid2,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.A,DS2,2,2,uid1,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.A,DS2,2,2,uid2,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.B,DS1,1,1,uid1,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.B,DS1,1,1,uid2,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.B,DS1,1,2,uid1,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.B,DS1,1,2,uid2,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.B,DS1,2,1,uid1,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.B,DS1,2,1,uid2,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.B,DS1,2,2,uid1,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.B,DS1,2,2,uid2,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.B,DS2,1,1,uid1,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.B,DS2,1,1,uid2,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.B,DS2,1,2,uid1,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.B,DS2,1,2,uid2,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.B,DS2,2,1,uid1,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.B,DS2,2,1,uid2,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.B,DS2,2,2,uid1,1.0,1.0,1.0
+Carbon,cmp.B.consequence,cmp.B,DS2,2,2,uid2,1.0,1.0,1.0
diff --git a/pelicun/tests/basic/data/model/test_LossModel/scaled_losses_lf.csv b/pelicun/tests/basic/data/model/test_LossModel/scaled_losses_lf.csv
new file mode 100644
index 000000000..2cc5e997f
--- /dev/null
+++ b/pelicun/tests/basic/data/model/test_LossModel/scaled_losses_lf.csv
@@ -0,0 +1,65 @@
+dv,loss,dmg,loc,dir,uid,0,1,2
+Cost,cmp.A.consequence,cmp.A,1,1,uid1,2,2,2
+Cost,cmp.A.consequence,cmp.A,1,1,uid2,2,2,2
+Cost,cmp.A.consequence,cmp.A,1,2,uid1,2,2,2
+Cost,cmp.A.consequence,cmp.A,1,2,uid2,2,2,2
+Cost,cmp.A.consequence,cmp.A,2,1,uid1,2,2,2
+Cost,cmp.A.consequence,cmp.A,2,1,uid2,2,2,2
+Cost,cmp.A.consequence,cmp.A,2,2,uid1,2,2,2
+Cost,cmp.A.consequence,cmp.A,2,2,uid2,2,2,2
+Cost,cmp.A.consequence,cmp.B,1,1,uid1,12,12,12
+Cost,cmp.A.consequence,cmp.B,1,1,uid2,12,12,12
+Cost,cmp.A.consequence,cmp.B,1,2,uid1,12,12,12
+Cost,cmp.A.consequence,cmp.B,1,2,uid2,12,12,12
+Cost,cmp.A.consequence,cmp.B,2,1,uid1,3,3,3
+Cost,cmp.A.consequence,cmp.B,2,1,uid2,3,3,3
+Cost,cmp.A.consequence,cmp.B,2,2,uid1,3,3,3
+Cost,cmp.A.consequence,cmp.B,2,2,uid2,3,3,3
+Cost,cmp.B.consequence,cmp.A,1,1,uid1,2,2,2
+Cost,cmp.B.consequence,cmp.A,1,1,uid2,2,2,2
+Cost,cmp.B.consequence,cmp.A,1,2,uid1,2,2,2
+Cost,cmp.B.consequence,cmp.A,1,2,uid2,2,2,2
+Cost,cmp.B.consequence,cmp.A,2,1,uid1,2,2,2
+Cost,cmp.B.consequence,cmp.A,2,1,uid2,2,2,2
+Cost,cmp.B.consequence,cmp.A,2,2,uid1,2,2,2
+Cost,cmp.B.consequence,cmp.A,2,2,uid2,2,2,2
+Cost,cmp.B.consequence,cmp.B,1,1,uid1,12,12,12
+Cost,cmp.B.consequence,cmp.B,1,1,uid2,12,12,12
+Cost,cmp.B.consequence,cmp.B,1,2,uid1,12,12,12
+Cost,cmp.B.consequence,cmp.B,1,2,uid2,12,12,12
+Cost,cmp.B.consequence,cmp.B,2,1,uid1,3,3,3
+Cost,cmp.B.consequence,cmp.B,2,1,uid2,3,3,3
+Cost,cmp.B.consequence,cmp.B,2,2,uid1,3,3,3
+Cost,cmp.B.consequence,cmp.B,2,2,uid2,3,3,3
+Carbon,cmp.A.consequence,cmp.A,1,1,uid1,1,1,1
+Carbon,cmp.A.consequence,cmp.A,1,1,uid2,1,1,1
+Carbon,cmp.A.consequence,cmp.A,1,2,uid1,1,1,1
+Carbon,cmp.A.consequence,cmp.A,1,2,uid2,1,1,1
+Carbon,cmp.A.consequence,cmp.A,2,1,uid1,1,1,1
+Carbon,cmp.A.consequence,cmp.A,2,1,uid2,1,1,1
+Carbon,cmp.A.consequence,cmp.A,2,2,uid1,1,1,1
+Carbon,cmp.A.consequence,cmp.A,2,2,uid2,1,1,1
+Carbon,cmp.A.consequence,cmp.B,1,1,uid1,1,1,1
+Carbon,cmp.A.consequence,cmp.B,1,1,uid2,1,1,1
+Carbon,cmp.A.consequence,cmp.B,1,2,uid1,1,1,1
+Carbon,cmp.A.consequence,cmp.B,1,2,uid2,1,1,1
+Carbon,cmp.A.consequence,cmp.B,2,1,uid1,1,1,1
+Carbon,cmp.A.consequence,cmp.B,2,1,uid2,1,1,1
+Carbon,cmp.A.consequence,cmp.B,2,2,uid1,1,1,1
+Carbon,cmp.A.consequence,cmp.B,2,2,uid2,1,1,1
+Carbon,cmp.B.consequence,cmp.A,1,1,uid1,1,1,1
+Carbon,cmp.B.consequence,cmp.A,1,1,uid2,1,1,1
+Carbon,cmp.B.consequence,cmp.A,1,2,uid1,1,1,1
+Carbon,cmp.B.consequence,cmp.A,1,2,uid2,1,1,1
+Carbon,cmp.B.consequence,cmp.A,2,1,uid1,1,1,1
+Carbon,cmp.B.consequence,cmp.A,2,1,uid2,1,1,1
+Carbon,cmp.B.consequence,cmp.A,2,2,uid1,1,1,1
+Carbon,cmp.B.consequence,cmp.A,2,2,uid2,1,1,1
+Carbon,cmp.B.consequence,cmp.B,1,1,uid1,1,1,1
+Carbon,cmp.B.consequence,cmp.B,1,1,uid2,1,1,1
+Carbon,cmp.B.consequence,cmp.B,1,2,uid1,1,1,1
+Carbon,cmp.B.consequence,cmp.B,1,2,uid2,1,1,1
+Carbon,cmp.B.consequence,cmp.B,2,1,uid1,1,1,1
+Carbon,cmp.B.consequence,cmp.B,2,1,uid2,1,1,1
+Carbon,cmp.B.consequence,cmp.B,2,2,uid1,1,1,1
+Carbon,cmp.B.consequence,cmp.B,2,2,uid2,1,1,1
diff --git a/pelicun/tests/basic/data/model/test_LossModel/scaling_specification.csv b/pelicun/tests/basic/data/model/test_LossModel/scaling_specification.csv
new file mode 100644
index 000000000..ca8caba5f
--- /dev/null
+++ b/pelicun/tests/basic/data/model/test_LossModel/scaling_specification.csv
@@ -0,0 +1,5 @@
+Decision Variable,Component,Location,Direction,Scale Factor
+Cost,cmp.A,,,2
+Cost,cmp.B,1--2,,3
+Cost,cmp.B,1,,4
+Cost,missing,,,5
diff --git a/pelicun/tests/basic/data/model/test_LossModel/testing_damage_DB.csv b/pelicun/tests/basic/data/model/test_LossModel/testing_damage_DB.csv
new file mode 100644
index 000000000..01f6023c9
--- /dev/null
+++ b/pelicun/tests/basic/data/model/test_LossModel/testing_damage_DB.csv
@@ -0,0 +1,5 @@
+ID,Demand-Directional,Demand-Offset,Demand-Type,Demand-Unit,Incomplete-,LS1-DamageStateWeights,LS1-Family,LS1-Theta_0,LS1-Theta_1,LS2-DamageStateWeights,LS2-Family,LS2-Theta_0,LS2-Theta_1,LS3-DamageStateWeights,LS3-Family,LS3-Theta_0,LS3-Theta_1,LS4-DamageStateWeights,LS4-Family,LS4-Theta_0,LS4-Theta_1
+cmp.A,0,0,Peak Floor Acceleration,g,0,0.20 | 0.80,,4.00,,,,,,,,,,,,,
+cmp.B,1,0,Peak Floor Acceleration,g,0,,,4.00,,,,8.00,,,,,,,,,
+cmp.C,1,0,Peak Floor Acceleration,g,0,,,4.00,,,,,,,,,,,,,
+cmp.D,1,0,Peak Floor Acceleration,g,0,,,4.00,,,,,,,,,,,,,
diff --git a/pelicun/tests/basic/data/model/test_LossModel/testing_loss_function_DB.csv b/pelicun/tests/basic/data/model/test_LossModel/testing_loss_function_DB.csv
new file mode 100644
index 000000000..fb305573d
--- /dev/null
+++ b/pelicun/tests/basic/data/model/test_LossModel/testing_loss_function_DB.csv
@@ -0,0 +1,5 @@
+-,DV-Unit,Demand-Directional,Demand-Offset,Demand-Type,Demand-Unit,LossFunction-Theta_0,LossFunction-Theta_1,LossFunction-Family
+cmp.lf.A-Cost,USD_2011,0,0,Peak Floor Acceleration,g,"0.00,1.00|0.00,10.00",,
+cmp.lf.A-Time,worker_day,0,0,Peak Floor Acceleration,g,"0.00,1.00|0.00,10.00",,
+cmp.lf.B-Cost,USD_2011,0,0,Peak Floor Acceleration,g,"0.00,1.00|0.00,10.00",,
+cmp.lf.B-Time,worker_day,0,0,Peak Floor Acceleration,g,"0.00,1.00|0.00,10.00",,
diff --git a/pelicun/tests/basic/data/model/test_LossModel/testing_repair_DB.csv b/pelicun/tests/basic/data/model/test_LossModel/testing_repair_DB.csv
new file mode 100644
index 000000000..818e32689
--- /dev/null
+++ b/pelicun/tests/basic/data/model/test_LossModel/testing_repair_DB.csv
@@ -0,0 +1,25 @@
+ID,Incomplete,Quantity-Unit,DV-Unit,DS1-Family,DS1-Theta_0,DS1-Theta_1,DS1-LongLeadTime,DS2-Family,DS2-Theta_0,DS2-Theta_1,DS2-LongLeadTime
+cmp.A-Cost,0,1 EA,USD_2011,,1,,,,2,,
+cmp.A-Time,0,1 EA,worker_day,,1,,0,,2,,0
+cmp.A-Carbon,0,1 EA,kg,,1,,,,2,,
+cmp.A-Energy,0,1 EA,MJ,,1,,,,2,,
+cmp.B-Cost,0,1 EA,USD_2011,,1,,,,,,
+cmp.B-Time,0,1 EA,worker_day,,1,,0,,,,
+cmp.B-Carbon,0,1 EA,kg,,1,,,,,,
+cmp.B-Energy,0,1 EA,MJ,,1,,,,,,
+cmp.C-Cost,0,1 EA,USD_2011,,1,,,,,,
+cmp.C-Time,0,1 EA,worker_day,,1,,0,,,,
+cmp.C-Carbon,0,1 EA,kg,,1,,,,,,
+cmp.C-Energy,0,1 EA,MJ,,1,,,,,,
+cmp.D-Cost,0,1 EA,USD_2011,,1,,,,,,
+cmp.D-Time,0,1 EA,worker_day,,1,,0,,,,
+cmp.D-Carbon,0,1 EA,kg,,1,,,,,,
+cmp.D-Energy,0,1 EA,MJ,,1,,,,,,
+cmp.E-Cost,0,1 EA,USD_2011,,1,,,,,,
+cmp.E-Time,0,1 EA,worker_day,,1,,0,,,,
+cmp.E-Carbon,0,1 EA,kg,,1,,,,,,
+cmp.E-Energy,0,1 EA,MJ,,1,,,,,,
+cmp.F-Cost,0,1 EA,USD_2011,,1,,,,,,
+cmp.F-Time,0,1 EA,worker_day,,1,,0,,,,
+cmp.F-Carbon,0,1 EA,kg,,1,,,,,,
+cmp.F-Energy,0,1 EA,MJ,,1,,,,,,
diff --git a/pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_1.pcl b/pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_1.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_1.pcl
rename to pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_1.pcl
diff --git a/pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_10.pcl b/pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_10.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_10.pcl
rename to pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_10.pcl
diff --git a/pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_11.pcl b/pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_11.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_11.pcl
rename to pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_11.pcl
diff --git a/pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_12.pcl b/pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_12.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_12.pcl
rename to pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_12.pcl
diff --git a/pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_13.pcl b/pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_13.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_13.pcl
rename to pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_13.pcl
diff --git a/pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_14.pcl b/pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_14.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_14.pcl
rename to pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_14.pcl
diff --git a/pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_15.pcl b/pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_15.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_15.pcl
rename to pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_15.pcl
diff --git a/pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_16.pcl b/pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_16.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_16.pcl
rename to pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_16.pcl
diff --git a/pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_17.pcl b/pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_17.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_17.pcl
rename to pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_17.pcl
diff --git a/pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_18.pcl b/pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_18.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_18.pcl
rename to pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_18.pcl
diff --git a/pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_2.pcl b/pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_2.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_2.pcl
rename to pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_2.pcl
diff --git a/pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_3.pcl b/pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_3.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_3.pcl
rename to pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_3.pcl
diff --git a/pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_4.pcl b/pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_4.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_4.pcl
rename to pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_4.pcl
diff --git a/pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_5.pcl b/pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_5.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_5.pcl
rename to pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_5.pcl
diff --git a/pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_6.pcl b/pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_6.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_6.pcl
rename to pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_6.pcl
diff --git a/pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_7.pcl b/pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_7.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_7.pcl
rename to pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_7.pcl
diff --git a/pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_8.pcl b/pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_8.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_8.pcl
rename to pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_8.pcl
diff --git a/pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_9.pcl b/pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_9.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample/test_9.pcl
rename to pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample/test_9.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_apply_correlation/test_1.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_apply_correlation/test_1.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_apply_correlation/test_1.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_apply_correlation/test_1.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_apply_correlation/test_2.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_apply_correlation/test_2.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_apply_correlation/test_2.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_apply_correlation/test_2.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_1.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_1.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_1.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_1.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_10.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_10.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_10.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_10.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_11.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_11.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_11.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_11.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_12.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_12.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_12.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_12.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_13.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_13.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_13.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_13.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_14.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_14.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_14.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_14.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_15.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_15.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_15.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_15.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_16.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_16.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_16.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_16.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_17.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_17.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_17.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_17.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_18.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_18.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_18.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_18.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_19.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_19.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_19.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_19.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_2.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_2.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_2.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_2.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_20.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_20.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_20.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_20.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_21.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_21.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_21.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_21.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_22.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_22.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_22.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_22.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_23.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_23.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_23.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_23.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_24.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_24.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_24.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_24.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_25.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_25.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_25.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_25.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_26.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_26.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_26.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_26.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_27.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_27.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_27.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_27.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_28.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_28.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_28.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_28.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_29.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_29.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_29.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_29.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_3.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_3.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_3.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_3.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_30.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_30.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_30.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_30.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_31.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_31.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_31.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_31.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_32.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_32.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_32.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_32.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_33.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_33.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_33.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_33.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_34.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_34.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_34.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_34.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_35.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_35.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_35.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_35.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_36.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_36.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_36.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_36.pcl
diff --git a/pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_4.pcl b/pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_4.pcl
similarity index 100%
rename from pelicun/tests/data/uq/test_random_variable_set_orthotope_density/test_4.pcl
rename to pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density/test_4.pcl
diff --git a/pelicun/tests/reset_tests.py b/pelicun/tests/basic/reset_tests.py
similarity index 82%
rename from pelicun/tests/reset_tests.py
rename to pelicun/tests/basic/reset_tests.py
index 9c7fc8512..8c7db171f 100644
--- a/pelicun/tests/reset_tests.py
+++ b/pelicun/tests/basic/reset_tests.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
@@ -38,23 +37,23 @@
# Adam Zsarnóczay
# John Vouvakis Manousakis
-"""
-This file is used to reset all expected test result data.
-"""
+"""This file is used to reset all expected test result data."""
+
+from __future__ import annotations
-import os
-import re
-import glob
import ast
import importlib
+import os
+import re
+from pathlib import Path
-def reset_all_test_data(restore=True, purge=False):
+def reset_all_test_data(*, restore: bool = True, purge: bool = False) -> None: # noqa: C901
"""
Update the expected result pickle files with new results, accepting
the values obtained by executing the code as correct from now on.
- Warning: This function should never be used if tests are
+ CAUTION: This function should never be used if tests are
failing. Its only purpose is to aid the development of more tests
and keeping things tidy. If tests are failing, the specific tests
need to be investigated, and after rectifying the cause, new
@@ -65,7 +64,7 @@ def reset_all_test_data(restore=True, purge=False):
directory is the package root directory (`pelicun`). The code
assumes that the test data directory exists.
Data deletion only involves `.pcl` files that begin with `test_` and
- reside in /pelicun/tests/data.
+ reside in /pelicun/tests/basic/data.
Parameters
----------
@@ -77,7 +76,6 @@ def reset_all_test_data(restore=True, purge=False):
Raises
------
-
ValueError
If the test directory is not found.
@@ -87,17 +85,19 @@ def reset_all_test_data(restore=True, purge=False):
`pelicun` directory. Dangerous things may happen otherwise.
"""
-
- cwd = os.path.basename(os.getcwd())
+ cwd = Path.cwd()
if cwd != 'pelicun':
- raise OSError(
- 'Wrong directory. ' 'See the docstring of `reset_all_test_data`. Aborting'
+ msg = (
+ 'Wrong directory. '
+ 'See the docstring of `reset_all_test_data`. Aborting'
)
+ raise OSError(msg)
# where the test result data are stored
- testdir = os.path.join(*('tests', 'data'))
- if not os.path.exists(testdir):
- raise ValueError('pelicun/tests/data directory not found.')
+ testdir = Path('tests') / 'data'
+ if not testdir.exists():
+ msg = 'pelicun/tests/basic/data directory not found.'
+ raise ValueError(msg)
# clean up existing test result data
# only remove .pcl files that start with `test_`
@@ -106,18 +106,15 @@ def reset_all_test_data(restore=True, purge=False):
for root, _, files in os.walk('.'):
for filename in files:
if pattern.match(filename):
- full_name = os.path.join(root, filename)
- print(f'removing: {full_name}')
- file_path = full_name
- os.remove(file_path)
+ (Path(root) / filename).unlink()
# generate new data
if restore:
# get a list of all existing test files and iterate
- test_files = glob.glob('tests/*test*.py')
+ test_files = list(Path('tests').glob('*test*.py'))
for test_file in test_files:
# open the file and statically parse the code looking for functions
- with open(test_file, 'r', encoding='utf-8') as file:
+ with Path(test_file).open(encoding='utf-8') as file:
node = ast.parse(file.read())
functions = [n for n in node.body if isinstance(n, ast.FunctionDef)]
# iterate over the functions looking for test_ functions
@@ -129,7 +126,7 @@ def reset_all_test_data(restore=True, purge=False):
if 'reset' in arguments:
# we want to import it and run it with reset=True
# construct a module name, like 'tests.test_uq'
- module_name = 'tests.' + os.path.basename(test_file).replace(
+ module_name = 'tests.' + Path(test_file).name.replace(
'.py', ''
)
# import the module
@@ -137,5 +134,4 @@ def reset_all_test_data(restore=True, purge=False):
# get the function
func = getattr(module, function.name)
# run it to reset its expected test output data
- print(f'running: {function.name} from {module_name}')
func(reset=True)
diff --git a/pelicun/tests/test_assessment.py b/pelicun/tests/basic/test_assessment.py
similarity index 74%
rename from pelicun/tests/test_assessment.py
rename to pelicun/tests/basic/test_assessment.py
index cd7b95484..04e1b7822 100644
--- a/pelicun/tests/test_assessment.py
+++ b/pelicun/tests/basic/test_assessment.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
@@ -38,52 +37,43 @@
# Adam Zsarnóczay
# John Vouvakis Manousakis
-"""
-These are unit and integration tests on the assessment module of pelicun.
-"""
+"""These are unit and integration tests on the assessment module of pelicun."""
+
+from __future__ import annotations
import pytest
-from pelicun import base
-from pelicun import model
-from pelicun import assessment
-# pylint: disable=missing-function-docstring
+from pelicun import assessment
-def create_assessment_obj(config=None):
- if config:
- asmt = assessment.Assessment(config)
- else:
- asmt = assessment.Assessment({})
- return asmt
+def create_assessment_obj(config: dict | None = None) -> assessment.Assessment:
+ return assessment.Assessment(config) if config else assessment.Assessment({})
-def test_Assessment_init():
+def test_Assessment_init() -> None:
asmt = create_assessment_obj()
-
- assert asmt.stories is None
-
- assert asmt.options
- assert isinstance(asmt.options, base.Options)
-
- assert asmt.unit_conversion_factors
- assert isinstance(asmt.unit_conversion_factors, dict)
-
- assert asmt.log
- assert isinstance(asmt.log, base.Logger)
-
- # test attributes defined as properties
- assert asmt.demand
- assert isinstance(asmt.demand, model.DemandModel)
- assert asmt.asset
- assert isinstance(asmt.asset, model.AssetModel)
- assert asmt.damage
- assert isinstance(asmt.damage, model.DamageModel)
- assert asmt.repair
- assert isinstance(asmt.repair, model.RepairModel)
-
-
-def test_assessment_get_default_metadata():
+ # confirm attributes
+ for attribute in (
+ 'asset',
+ 'calc_unit_scale_factor',
+ 'damage',
+ 'demand',
+ 'get_default_data',
+ 'get_default_metadata',
+ 'log',
+ 'loss',
+ 'options',
+ 'scale_factor',
+ 'stories',
+ 'unit_conversion_factors',
+ ):
+ assert hasattr(asmt, attribute)
+ # confirm that creating an attribute on the fly is not allowed
+ with pytest.raises(AttributeError):
+ asmt.my_attribute = 2 # type: ignore
+
+
+def test_assessment_get_default_metadata() -> None:
asmt = create_assessment_obj()
data_sources = (
@@ -97,12 +87,12 @@ def test_assessment_get_default_metadata():
for data_source in data_sources:
# here we just test that we can load the data file, without
- # checking the contens.
+ # checking the contents.
asmt.get_default_data(data_source)
asmt.get_default_metadata(data_source)
-def test_assessment_calc_unit_scale_factor():
+def test_assessment_calc_unit_scale_factor() -> None:
# default unit file
asmt = create_assessment_obj()
@@ -118,7 +108,7 @@ def test_assessment_calc_unit_scale_factor():
asmt = create_assessment_obj(
{
'UnitsFile': (
- 'pelicun/tests/data/assessment/'
+ 'pelicun/tests/basic/data/assessment/'
'test_assessment_calc_unit_scale_factor/'
'custom_units.json'
)
@@ -136,7 +126,7 @@ def test_assessment_calc_unit_scale_factor():
# 1 smoot was 67 inches in 1958.
-def test_assessment_scale_factor():
+def test_assessment_scale_factor() -> None:
# default unit file
asmt = create_assessment_obj()
assert asmt.scale_factor('m') == 1.00
@@ -146,7 +136,7 @@ def test_assessment_scale_factor():
asmt = create_assessment_obj(
{
'UnitsFile': (
- 'pelicun/tests/data/assessment/'
+ 'pelicun/tests/basic/data/assessment/'
'test_assessment_calc_unit_scale_factor/'
'custom_units.json'
)
@@ -157,5 +147,5 @@ def test_assessment_scale_factor():
assert asmt.scale_factor('m') == 39.3701
# exceptions
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError, match='Unknown unit: helen'):
asmt.scale_factor('helen')
diff --git a/pelicun/tests/basic/test_asset_model.py b/pelicun/tests/basic/test_asset_model.py
new file mode 100644
index 000000000..eccde38d0
--- /dev/null
+++ b/pelicun/tests/basic/test_asset_model.py
@@ -0,0 +1,308 @@
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# John Vouvakis Manousakis
+
+"""These are unit and integration tests on the asset model of pelicun."""
+
+from __future__ import annotations
+
+import tempfile
+from copy import deepcopy
+from typing import TYPE_CHECKING
+
+import numpy as np
+import pandas as pd
+import pytest
+
+from pelicun import assessment
+from pelicun.base import ensure_value
+from pelicun.tests.basic.test_pelicun_model import TestPelicunModel
+
+if TYPE_CHECKING:
+ from pelicun.model.asset_model import AssetModel
+
+
+class TestAssetModel(TestPelicunModel):
+ @pytest.fixture
+ def asset_model(self, assessment_instance: assessment.Assessment) -> AssetModel:
+ return deepcopy(assessment_instance.asset)
+
+ def test_init_method(self, asset_model: AssetModel) -> None:
+ assert asset_model.log
+ assert asset_model.cmp_marginal_params is None
+ assert asset_model.cmp_units is None
+ assert asset_model._cmp_RVs is None
+ assert asset_model.cmp_sample is None
+
+ def test_save_cmp_sample(self, asset_model: AssetModel) -> None:
+ asset_model.cmp_sample = pd.DataFrame(
+ {
+ ('component_a', f'{i}', f'{j}', '0'): 8.0
+ for i in range(1, 3)
+ for j in range(1, 3)
+ },
+ index=range(10),
+ columns=pd.MultiIndex.from_tuples(
+ (
+ ('component_a', f'{i}', f'{j}', '0')
+ for i in range(1, 3)
+ for j in range(1, 3)
+ ),
+ names=('cmp', 'loc', 'dir', 'uid'),
+ ),
+ )
+
+ asset_model.cmp_units = pd.Series(
+ data=['ea'], index=['component_a'], name='Units'
+ )
+
+ res = asset_model.save_cmp_sample()
+ assert isinstance(res, pd.DataFrame)
+
+ temp_dir = tempfile.mkdtemp()
+ # save the sample there
+ asset_model.save_cmp_sample(f'{temp_dir}/temp.csv')
+
+ # load the component sample to a different AssetModel
+ asmt = assessment.Assessment()
+ asset_model = asmt.asset
+ asset_model.load_cmp_sample(f'{temp_dir}/temp.csv')
+
+ # also test loading sample to variables
+ # (but we don't inspect them)
+ asset_model.save_cmp_sample(save_units=False)
+ asset_model.save_cmp_sample(save_units=True)
+
+ def test_load_cmp_model_1(self, asset_model: AssetModel) -> None:
+ cmp_marginals = pd.read_csv(
+ 'pelicun/tests/basic/data/model/test_AssetModel/CMP_marginals.csv',
+ index_col=0,
+ )
+ asset_model.load_cmp_model({'marginals': cmp_marginals})
+
+ expected_cmp_marginal_params = pd.DataFrame(
+ {
+ 'Theta_0': (8.0, 8.0, 8.0, 8.0, 8.0, 8.0),
+ 'Blocks': (1, 1, 1, 1, 1, 1),
+ },
+ index=pd.MultiIndex.from_tuples(
+ (
+ ('component_a', '0', '1', '0'),
+ ('component_a', '0', '2', '0'),
+ ('component_a', '1', '1', '0'),
+ ('component_a', '1', '2', '0'),
+ ('component_a', '2', '1', '0'),
+ ('component_a', '2', '2', '0'),
+ ),
+ names=('cmp', 'loc', 'dir', 'uid'),
+ ),
+ ).astype({'Theta_0': 'float64', 'Blocks': 'int64'})
+
+ pd.testing.assert_frame_equal(
+ expected_cmp_marginal_params,
+ ensure_value(asset_model.cmp_marginal_params),
+ check_index_type=False,
+ check_column_type=False,
+ check_dtype=False,
+ )
+
+ expected_cmp_units = pd.Series(
+ data=['ea'], index=['component_a'], name='Units'
+ )
+
+ pd.testing.assert_series_equal(
+ expected_cmp_units,
+ ensure_value(asset_model.cmp_units),
+ check_index_type=False,
+ )
+
+ def test_load_cmp_model_2(self, asset_model: AssetModel) -> None:
+ # component marginals utilizing the keywords '--', 'all', 'top', 'roof'
+ cmp_marginals = pd.read_csv(
+ 'pelicun/tests/basic/data/model/test_AssetModel/CMP_marginals_2.csv',
+ index_col=0,
+ )
+ asset_model._asmnt.stories = 4
+ asset_model.load_cmp_model({'marginals': cmp_marginals})
+
+ assert ensure_value(asset_model.cmp_marginal_params).to_dict() == {
+ 'Theta_0': {
+ ('component_a', '0', '1', '0'): 1.0,
+ ('component_a', '0', '2', '0'): 1.0,
+ ('component_a', '1', '1', '0'): 1.0,
+ ('component_a', '1', '2', '0'): 1.0,
+ ('component_a', '2', '1', '0'): 1.0,
+ ('component_a', '2', '2', '0'): 1.0,
+ ('component_a', '3', '1', '0'): 1.0,
+ ('component_a', '3', '2', '0'): 1.0,
+ ('component_b', '1', '1', '0'): 1.0,
+ ('component_b', '2', '1', '0'): 1.0,
+ ('component_b', '3', '1', '0'): 1.0,
+ ('component_b', '4', '1', '0'): 1.0,
+ ('component_c', '0', '1', '0'): 1.0,
+ ('component_c', '1', '1', '0'): 1.0,
+ ('component_c', '2', '1', '0'): 1.0,
+ ('component_d', '4', '1', '0'): 1.0,
+ ('component_e', '5', '1', '0'): 1.0,
+ },
+ 'Blocks': {
+ ('component_a', '0', '1', '0'): 1,
+ ('component_a', '0', '2', '0'): 1,
+ ('component_a', '1', '1', '0'): 1,
+ ('component_a', '1', '2', '0'): 1,
+ ('component_a', '2', '1', '0'): 1,
+ ('component_a', '2', '2', '0'): 1,
+ ('component_a', '3', '1', '0'): 1,
+ ('component_a', '3', '2', '0'): 1,
+ ('component_b', '1', '1', '0'): 1,
+ ('component_b', '2', '1', '0'): 1,
+ ('component_b', '3', '1', '0'): 1,
+ ('component_b', '4', '1', '0'): 1,
+ ('component_c', '0', '1', '0'): 1,
+ ('component_c', '1', '1', '0'): 1,
+ ('component_c', '2', '1', '0'): 1,
+ ('component_d', '4', '1', '0'): 1,
+ ('component_e', '5', '1', '0'): 1,
+ },
+ }
+
+ expected_cmp_units = pd.Series(
+ data=['ea'] * 5,
+ index=[f'component_{x}' for x in ('a', 'b', 'c', 'd', 'e')],
+ name='Units',
+ )
+
+ pd.testing.assert_series_equal(
+ expected_cmp_units,
+ ensure_value(asset_model.cmp_units),
+ check_index_type=False,
+ )
+
+ def test_load_cmp_model_csv(self, asset_model: AssetModel) -> None:
+ # load by directly specifying the csv file
+ cmp_marginals = 'pelicun/tests/basic/data/model/test_AssetModel/CMP'
+ asset_model.load_cmp_model(cmp_marginals)
+
+ def test_load_cmp_model_exceptions(self, asset_model: AssetModel) -> None:
+ cmp_marginals = pd.read_csv(
+ 'pelicun/tests/basic/data/model/test_AssetModel/'
+ 'CMP_marginals_invalid_loc.csv',
+ index_col=0,
+ )
+ asset_model._asmnt.stories = 4
+ with pytest.raises(
+ ValueError, match='Cannot parse location string: basement'
+ ):
+ asset_model.load_cmp_model({'marginals': cmp_marginals})
+
+ cmp_marginals = pd.read_csv(
+ 'pelicun/tests/basic/data/model/test_AssetModel/'
+ 'CMP_marginals_invalid_dir.csv',
+ index_col=0,
+ )
+ asset_model._asmnt.stories = 4
+ with pytest.raises(
+ ValueError, match='Cannot parse direction string: non-directional'
+ ):
+ asset_model.load_cmp_model({'marginals': cmp_marginals})
+
+ def test_generate_cmp_sample(self, asset_model: AssetModel) -> None:
+ asset_model.cmp_marginal_params = pd.DataFrame(
+ {'Theta_0': (8.0, 8.0, 8.0, 8.0), 'Blocks': (1.0, 1.0, 1.0, 1.0)},
+ index=pd.MultiIndex.from_tuples(
+ (
+ ('component_a', '1', '1', '0'),
+ ('component_a', '1', '2', '0'),
+ ('component_a', '2', '1', '0'),
+ ('component_a', '2', '2', '0'),
+ ),
+ names=('cmp', 'loc', 'dir', 'uid'),
+ ),
+ )
+
+ asset_model.cmp_units = pd.Series(
+ data=['ea'], index=['component_a'], name='Units'
+ )
+
+ asset_model.generate_cmp_sample(sample_size=10)
+
+ assert asset_model._cmp_RVs is not None
+
+ expected_cmp_sample = pd.DataFrame(
+ {
+ ('component_a', f'{i}', f'{j}'): 8.0
+ for i in range(1, 3)
+ for j in range(1, 3)
+ },
+ index=range(10),
+ columns=pd.MultiIndex.from_tuples(
+ (
+ ('component_a', f'{i}', f'{j}', '0')
+ for i in range(1, 3)
+ for j in range(1, 3)
+ ),
+ names=('cmp', 'loc', 'dir', 'uid'),
+ ),
+ )
+
+ pd.testing.assert_frame_equal(
+ expected_cmp_sample,
+ ensure_value(asset_model.cmp_sample),
+ check_index_type=False,
+ check_column_type=False,
+ )
+
+ def test_generate_cmp_sample_exceptions_1(self, asset_model: AssetModel) -> None:
+ # without marginal parameters
+ with pytest.raises(
+ ValueError, match='Model parameters have not been specified'
+ ):
+ asset_model.generate_cmp_sample(sample_size=10)
+
+ def test_generate_cmp_sample_exceptions_2(self, asset_model: AssetModel) -> None:
+ # without specifying sample size
+ cmp_marginals = pd.read_csv(
+ 'pelicun/tests/basic/data/model/test_AssetModel/CMP_marginals.csv',
+ index_col=0,
+ )
+ asset_model.load_cmp_model({'marginals': cmp_marginals})
+ with pytest.raises(ValueError, match='Sample size was not specified'):
+ asset_model.generate_cmp_sample()
+ # but it should work if a demand sample is available
+ asset_model._asmnt.demand.sample = pd.DataFrame(np.empty(shape=(10, 2)))
+ asset_model.generate_cmp_sample()
diff --git a/pelicun/tests/test_auto.py b/pelicun/tests/basic/test_auto.py
similarity index 78%
rename from pelicun/tests/test_auto.py
rename to pelicun/tests/basic/test_auto.py
index 5bf2f34f0..a91081cc2 100644
--- a/pelicun/tests/test_auto.py
+++ b/pelicun/tests/basic/test_auto.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
@@ -38,18 +37,16 @@
# Adam Zsarnóczay
# John Vouvakis Manousakis
-"""
-These are unit and integration tests on the auto module of pelicun.
+"""These are unit and integration tests on the auto module of pelicun."""
-"""
+from __future__ import annotations
-import pytest
-from unittest.mock import patch
-from unittest.mock import MagicMock
-from pelicun.auto import auto_populate
+from pathlib import Path
+from unittest.mock import MagicMock, patch
+import pytest
-# pylint: disable=missing-function-docstring
+from pelicun.auto import auto_populate
# The tests maintain the order of definitions of the `auto.py` file.
@@ -63,21 +60,21 @@
@pytest.fixture
-def setup_valid_config():
+def setup_valid_config() -> dict:
return {'GeneralInformation': {'someKey': 'someValue'}}
@pytest.fixture
-def setup_auto_script_path():
+def setup_auto_script_path() -> str:
return 'PelicunDefault/test_script'
@pytest.fixture
-def setup_expected_base_path():
+def setup_expected_base_path() -> str:
return '/expected/path/resources/auto/'
-def test_valid_inputs(setup_valid_config, setup_auto_script_path):
+def test_valid_inputs(setup_valid_config: dict, setup_auto_script_path: str) -> None:
with patch('pelicun.base.pelicun_path', '/expected/path'), patch(
'os.path.exists', return_value=True
), patch('importlib.__import__') as mock_import:
@@ -86,30 +83,32 @@ def test_valid_inputs(setup_valid_config, setup_auto_script_path):
)
mock_import.return_value.auto_populate = mock_auto_populate_ext
- config, cmp = auto_populate(setup_valid_config, setup_auto_script_path)
+ config, cmp = auto_populate(setup_valid_config, Path(setup_auto_script_path))
assert 'DL' in config
assert cmp == 'CMP'
-def test_missing_general_information():
- with pytest.raises(ValueError) as excinfo:
- auto_populate({}, 'some/path')
- assert "No Asset Information provided for the auto-population routine." in str(
- excinfo.value
- )
+def test_missing_general_information() -> None:
+ with pytest.raises(
+ ValueError,
+ match='No Asset Information provided for the auto-population routine.',
+ ):
+ auto_populate({}, Path('some/path'))
def test_pelicun_default_path_replacement(
- setup_auto_script_path, setup_expected_base_path
-):
+ setup_auto_script_path: str, setup_expected_base_path: str
+) -> None:
modified_path = setup_auto_script_path.replace(
'PelicunDefault/', setup_expected_base_path
)
assert modified_path.startswith(setup_expected_base_path)
-def test_auto_population_script_execution(setup_valid_config, setup_auto_script_path):
+def test_auto_population_script_execution(
+ setup_valid_config: dict, setup_auto_script_path: str
+) -> None:
with patch('pelicun.base.pelicun_path', '/expected/path'), patch(
'os.path.exists', return_value=True
), patch('importlib.__import__') as mock_import:
@@ -118,5 +117,5 @@ def test_auto_population_script_execution(setup_valid_config, setup_auto_script_
)
mock_import.return_value.auto_populate = mock_auto_populate_ext
- auto_populate(setup_valid_config, setup_auto_script_path)
+ auto_populate(setup_valid_config, Path(setup_auto_script_path))
mock_import.assert_called_once()
diff --git a/pelicun/tests/basic/test_base.py b/pelicun/tests/basic/test_base.py
new file mode 100644
index 000000000..327ef9c36
--- /dev/null
+++ b/pelicun/tests/basic/test_base.py
@@ -0,0 +1,1016 @@
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# John Vouvakis Manousakis
+
+"""These are unit and integration tests on the base module of pelicun."""
+
+from __future__ import annotations
+
+import argparse
+import io
+import platform
+import re
+import subprocess # noqa: S404
+import tempfile
+from contextlib import redirect_stdout
+from pathlib import Path
+
+import numpy as np
+import pandas as pd
+import pytest
+
+from pelicun import base
+from pelicun.base import ensure_value
+
+# The tests maintain the order of definitions of the `base.py` file.
+
+
+def test_options_init() -> None:
+ temp_dir = tempfile.mkdtemp()
+
+ # Create a sample user_config_options dictionary
+ user_config_options = {
+ 'Verbose': False,
+ 'Seed': None,
+ 'LogShowMS': False,
+ 'LogFile': f'{temp_dir}/test_log_file',
+ 'PrintLog': False,
+ 'DemandOffset': {'PFA': -1, 'PFV': -1},
+ 'Sampling': {
+ 'SamplingMethod': 'MonteCarlo',
+ 'SampleSize': 1000,
+ 'PreserveRawOrder': False,
+ },
+ 'SamplingMethod': 'MonteCarlo',
+ 'NonDirectionalMultipliers': {'ALL': 1.2},
+ 'EconomiesOfScale': {'AcrossFloors': True, 'AcrossDamageStates': True},
+ 'RepairCostAndTimeCorrelation': 0.7,
+ }
+
+ # Create an Options object using the user_config_options
+ # dictionary
+ options = base.Options(user_config_options)
+
+ # Check that the Options object was created successfully
+ assert options is not None
+
+ # Check that the values of the Options object attributes match the
+ # values in the user_config_options dictionary
+ assert options.sampling_method == 'MonteCarlo'
+ assert options.units_file is None
+ assert options.demand_offset == {'PFA': -1, 'PFV': -1}
+ assert options.nondir_multi_dict == {'ALL': 1.2}
+ assert options.rho_cost_time == 0.7
+ assert options.eco_scale == {'AcrossFloors': True, 'AcrossDamageStates': True}
+
+ # Check that the Logger object attribute of the Options object is
+ # initialized with the correct parameters
+ assert options.log.verbose is False
+ assert options.log.log_show_ms is False
+ assert Path(ensure_value(options.log.log_file)).name == 'test_log_file'
+ assert options.log.print_log is False
+
+ # test seed property and setter
+ options.seed = 42
+ assert options.seed == 42
+
+ # test rng
+ assert isinstance(options.rng, np.random._generator.Generator)
+
+
+def test_nondir_multi() -> None:
+ options = base.Options({'NonDirectionalMultipliers': {'PFA': 1.5, 'PFV': 1.00}})
+ assert options.nondir_multi_dict == {'PFA': 1.5, 'PFV': 1.0, 'ALL': 1.2}
+
+
+def test_logger_init() -> None:
+ # Test that the Logger object is initialized with the correct
+ # attributes based on the input configuration
+
+ temp_dir = tempfile.mkdtemp()
+
+ log_config = {
+ 'verbose': True,
+ 'log_show_ms': False,
+ 'log_file': f'{temp_dir}/log.txt',
+ 'print_log': True,
+ }
+ log = base.Logger(**log_config) # type: ignore
+ assert log.verbose is True
+ assert log.log_show_ms is False
+ assert Path(ensure_value(log.log_file)).name == 'log.txt'
+ assert log.print_log is True
+
+ # test exceptions
+ log_config = {
+ 'verbose': True,
+ 'log_show_ms': False,
+ 'log_file': '/',
+ 'print_log': True,
+ }
+ with pytest.raises((IsADirectoryError, FileExistsError, FileNotFoundError)):
+ log = base.Logger(**log_config) # type: ignore
+
+
+def test_logger_msg() -> None:
+ temp_dir = tempfile.mkdtemp()
+
+ # Test that the msg method prints the correct message to the
+ # console and log file
+ log_config = {
+ 'verbose': True,
+ 'log_show_ms': True,
+ 'log_file': f'{temp_dir}/log.txt',
+ 'print_log': True,
+ }
+ log = base.Logger(**log_config) # type: ignore
+ # Check that the message is printed to the console
+ with io.StringIO() as buf, redirect_stdout(buf):
+ log.msg('This is a message')
+ output = buf.getvalue()
+ assert 'This is a message' in output
+ # Check that the message is written to the log file
+ with Path(f'{temp_dir}/log.txt').open(encoding='utf-8') as f:
+ assert 'This is a message' in f.read()
+
+ # Check if timestamp is printed
+ with io.StringIO() as buf, redirect_stdout(buf):
+ log.msg(
+ ('This is a message\nSecond line'),
+ prepend_timestamp=True,
+ )
+ output = buf.getvalue()
+ pattern = r'(\d{2}:\d{2}:\d{2})'
+ assert re.search(pattern, output) is not None
+
+
+def test_logger_div() -> None:
+ temp_dir = tempfile.mkdtemp()
+
+ # We test the divider with and without the timestamp
+ prepend_timestamp_args = (True, False)
+ patterns = (
+ r'[0-9][0-9]:[0-9][0-9]:[0-9][0-9]:[0-9][0-9][0-9][0-9][0-9][0-9]\s-+',
+ r'\s+-+',
+ )
+ for case, pattern_str in zip(prepend_timestamp_args, patterns):
+ pattern = re.compile(pattern_str)
+ # Test that the div method adds a divider as intended
+ log_config = {
+ 'verbose': True,
+ 'log_show_ms': True,
+ 'log_file': f'{temp_dir}/log.txt',
+ 'print_log': True,
+ }
+ log = base.Logger(**log_config) # type: ignore
+
+ # check console output
+ with io.StringIO() as buf, redirect_stdout(buf):
+ log.div(prepend_timestamp=case)
+ output = buf.getvalue()
+ assert pattern.match(output)
+ # check log file
+ with Path(f'{temp_dir}/log.txt').open(encoding='utf-8') as f:
+ # simply check that it is not empty
+ assert f.read()
+
+
+@pytest.mark.skipif(
+ platform.system() == 'Windows',
+ reason='Skipping test on Windows due to path handling issues.',
+)
+def test_logger_exception() -> None:
+ # Create a temporary directory for log files
+ temp_dir = tempfile.mkdtemp()
+
+ # Create a sample Python script that will raise an exception
+ test_script = Path(temp_dir) / 'test_script.py'
+ test_script_content = f"""
+from pathlib import Path
+from pelicun.base import Logger
+
+log_file_A = Path("{temp_dir}") / 'log_A.txt'
+log_file_B = Path("{temp_dir}") / 'log_B.txt'
+
+log_A = Logger(
+ log_file=log_file_A,
+ verbose=True,
+ log_show_ms=True,
+ print_log=True,
+)
+log_B = Logger(
+ log_file=log_file_B,
+ verbose=True,
+ log_show_ms=True,
+ print_log=True,
+)
+
+raise ValueError('Test exception in subprocess')
+"""
+
+ # Write the test script to the file
+ test_script.write_text(test_script_content)
+
+ # Use subprocess to run the script
+ process = subprocess.run( # noqa: S603
+ ['python', str(test_script)], # noqa: S607
+ capture_output=True,
+ text=True,
+ check=False,
+ )
+
+ # Check that the process exited with an error
+ assert process.returncode == 1
+
+ # Check the stdout/stderr for the expected output
+ assert 'Test exception in subprocess' in process.stdout
+
+ # Check that the exception was logged in the log file
+ log_files = (
+ Path(temp_dir) / 'log_A.txt',
+ Path(temp_dir) / 'log_B.txt',
+ )
+ for log_file in log_files:
+ assert log_file.exists(), 'Log file was not created'
+ log_content = log_file.read_text()
+ assert 'Test exception in subprocess' in log_content
+ assert 'Traceback' in log_content
+ assert 'ValueError' in log_content
+
+
+def test_split_file_name() -> None:
+ file_path = 'example.file.name.txt'
+ name, extension = base.split_file_name(file_path)
+ assert name == 'example.file.name'
+ assert extension == '.txt'
+
+ file_path = 'example'
+ name, extension = base.split_file_name(file_path)
+ assert name == 'example'
+ assert extension == '' # noqa: PLC1901
+
+
+def test_print_system_info() -> None:
+ temp_dir = tempfile.mkdtemp()
+
+ # create a logger object
+ log_config = {
+ 'verbose': True,
+ 'log_show_ms': True,
+ 'log_file': f'{temp_dir}/log.txt',
+ 'print_log': True,
+ }
+ log = base.Logger(**log_config) # type: ignore
+
+ # run print_system_info and get the console output
+ with io.StringIO() as buf, redirect_stdout(buf):
+ log.print_system_info()
+ output = buf.getvalue()
+
+ # verify the contents of the output
+ assert 'System Information:\n' in output
+
+
+def test_update_vals() -> None:
+ primary = {'b': {'c': 4, 'd': 5}, 'g': 7}
+ update = {'a': 1, 'b': {'c': 3, 'd': 5}, 'f': 6}
+ base.update_vals(update, primary, 'update', 'primary')
+ assert primary == {'b': {'c': 4, 'd': 5}, 'g': 7} # unchanged
+ assert update == {'a': 1, 'b': {'c': 3, 'd': 5}, 'f': 6, 'g': 7} # updated
+ # note: key 'g' created, 'f' left there, 'c', 'd' updated, as intended
+
+ primary = {'a': {'b': {'c': 4}}}
+ update = {'a': {'b': {'c': 3}}}
+ base.update_vals(update, primary, 'update', 'primary')
+ assert primary == {'a': {'b': {'c': 4}}} # unchanged
+ assert update == {'a': {'b': {'c': 3}}} # updated
+
+ primary = {'a': {'b': 4}}
+ update = {'a': {'b': {'c': 3}}}
+ with pytest.raises(ValueError, match='should not map to a dictionary'):
+ base.update_vals(update, primary, 'update', 'primary')
+
+ primary = {'a': {'b': 3}}
+ update = {'a': 1, 'b': 2}
+ with pytest.raises(ValueError, match='should map to a dictionary'):
+ base.update_vals(update, primary, 'update', 'primary')
+
+
+def test_merge_default_config() -> None:
+ # Test merging an empty user config with the default config
+ user_config: dict[str, object] | None = {}
+ merged_config = base.merge_default_config(user_config)
+ assert merged_config == base.load_default_options()
+
+ user_config = None # same as {}
+ merged_config = base.merge_default_config(user_config)
+ assert merged_config == base.load_default_options()
+
+ # Test merging a user config with a single option set
+ user_config = {'Verbose': True}
+ merged_config = base.merge_default_config(user_config)
+ assert merged_config == {**base.load_default_options(), **user_config}
+
+ # Test merging a user config with multiple options set
+ user_config = {'Verbose': True, 'Seed': 12345}
+ merged_config = base.merge_default_config(user_config)
+ assert merged_config == {**base.load_default_options(), **user_config}
+
+ # Test merging a user config with a nested option set
+ user_config = {'NonDirectionalMultipliers': {'PFA': 1.5}}
+ merged_config = base.merge_default_config(user_config)
+ assert merged_config == {**base.load_default_options(), **user_config}
+
+ # Test merging a user config with a nested option set and a top-level option set
+ user_config = {'Verbose': True, 'NonDirectionalMultipliers': {'PFA': 1.5}}
+ merged_config = base.merge_default_config(user_config)
+ assert merged_config == {**base.load_default_options(), **user_config}
+
+
+def test_convert_dtypes() -> None:
+ # All columns able to be converted
+
+ # Input DataFrame
+ df_input = pd.DataFrame({'a': ['1', '2', '3'], 'b': ['4.0', '5.5', '6.75']})
+
+ # Expected DataFrame
+ df_expected = pd.DataFrame({'a': [1, 2, 3], 'b': [4.0, 5.5, 6.75]}).astype(
+ {'a': 'int64', 'b': 'float64'}
+ )
+
+ # Convert data types
+ df_result = base.convert_dtypes(df_input)
+
+ pd.testing.assert_frame_equal(
+ df_result, df_expected, check_index_type=False, check_column_type=False
+ )
+
+ # No columns that can be converted
+
+ df_input = pd.DataFrame(
+ {'a': ['foo', 'bar', 'baz'], 'b': ['2021-01-01', '2021-01-02', '2021-01-03']}
+ )
+ df_expected = df_input.copy()
+ df_result = base.convert_dtypes(df_input)
+ pd.testing.assert_frame_equal(
+ df_result, df_expected, check_index_type=False, check_column_type=False
+ )
+
+ # Columns with mixed types
+
+ df_input = pd.DataFrame(
+ {
+ 'a': ['1', '2', 'three'],
+ 'b': ['4.0', '5.5', 'six'],
+ 'c': ['7', 'eight', '9'],
+ }
+ )
+ df_result = base.convert_dtypes(df_input)
+ pd.testing.assert_frame_equal(
+ df_result, df_input, check_index_type=False, check_column_type=False
+ )
+
+ # None values present
+
+ df_input = pd.DataFrame({'a': [None, '2', '3'], 'b': ['4.0', None, '6.75']})
+ df_expected = pd.DataFrame({'a': [np.nan, 2, 3], 'b': [4.0, np.nan, 6.75]})
+ df_result = base.convert_dtypes(df_input)
+ pd.testing.assert_frame_equal(
+ df_result,
+ df_expected,
+ check_dtype=False,
+ check_index_type=False,
+ check_column_type=False,
+ )
+
+ # Empty DataFrame
+
+ df_input = pd.DataFrame({})
+ df_expected = pd.DataFrame({})
+ df_result = base.convert_dtypes(df_input)
+ pd.testing.assert_frame_equal(
+ df_result, df_expected, check_index_type=False, check_column_type=False
+ )
+
+
+def test_convert_to_SimpleIndex() -> None:
+ # Test conversion of a multiindex to a simple index following the
+ # SimCenter dash convention
+ index = pd.MultiIndex.from_tuples((('a', 'b'), ('c', 'd')))
+ data = pd.DataFrame([[1, 2], [3, 4]], index=index)
+ data.index.names = ['name_1', 'name_2']
+ data_simple = base.convert_to_SimpleIndex(data, axis=0)
+ assert data_simple.index.tolist() == ['a-b', 'c-d']
+ assert data_simple.index.name == '-'.join(data.index.names)
+
+ # Test inplace modification
+ df_inplace = data.copy()
+ base.convert_to_SimpleIndex(df_inplace, axis=0, inplace=True)
+ assert df_inplace.index.tolist() == ['a-b', 'c-d']
+ assert df_inplace.index.name == '-'.join(data.index.names)
+
+ # Test conversion of columns
+ index = pd.MultiIndex.from_tuples((('a', 'b'), ('c', 'd')))
+ data = pd.DataFrame([[1, 2], [3, 4]], columns=index)
+ data.columns.names = ['name_1', 'name_2']
+ data_simple = base.convert_to_SimpleIndex(data, axis=1)
+ assert data_simple.columns.tolist() == ['a-b', 'c-d']
+ assert data_simple.columns.name == '-'.join(data.columns.names)
+
+ # Test inplace modification
+ df_inplace = data.copy()
+ base.convert_to_SimpleIndex(df_inplace, axis=1, inplace=True)
+ assert df_inplace.columns.tolist() == ['a-b', 'c-d']
+ assert df_inplace.columns.name == '-'.join(data.columns.names)
+
+ # Test invalid axis parameter
+ with pytest.raises(ValueError, match='Invalid axis parameter: 2'):
+ base.convert_to_SimpleIndex(data, axis=2)
+
+
+def test_convert_to_MultiIndex() -> None:
+ # Test a case where the index needs to be converted to a MultiIndex
+ data = pd.DataFrame({'A': (1, 2, 3), 'B': (4, 5, 6)})
+ data.index = pd.Index(['A-1', 'B-1', 'C-1'])
+ data_converted = base.convert_to_MultiIndex(data, axis=0, inplace=False)
+ expected_index = pd.MultiIndex.from_arrays((('A', 'B', 'C'), ('1', '1', '1')))
+ assert data_converted.index.equals(expected_index)
+ # original data should not have changed
+ assert data.index.equals(pd.Index(('A-1', 'B-1', 'C-1')))
+
+ # Test a case where the index is already a MultiIndex
+ data_converted = pd.DataFrame(
+ base.convert_to_MultiIndex(data_converted, axis=0, inplace=False)
+ )
+ assert data_converted.index.equals(expected_index)
+
+ # Test a case where the columns need to be converted to a MultiIndex
+ data = pd.DataFrame({'A-1': (1, 2, 3), 'B-1': (4, 5, 6)})
+ data_converted = base.convert_to_MultiIndex(data, axis=1, inplace=False)
+ expected_columns = pd.MultiIndex.from_arrays((('A', 'B'), ('1', '1')))
+ assert data_converted.columns.equals(expected_columns)
+ # original data should not have changed
+ assert data.columns.equals(pd.Index(('A-1', 'B-1')))
+
+ # Test a case where the columns are already a MultiIndex
+ data_converted = pd.DataFrame(
+ base.convert_to_MultiIndex(data_converted, axis=1, inplace=False)
+ )
+ assert data_converted.columns.equals(expected_columns)
+
+ # Test an invalid axis parameter
+ with pytest.raises(ValueError, match='Invalid axis parameter: 2'):
+ base.convert_to_MultiIndex(data_converted, axis=2, inplace=False)
+
+ # inplace=True
+ data = pd.DataFrame({'A': (1, 2, 3), 'B': (4, 5, 6)})
+ data.index = pd.Index(['A-1', 'B-1', 'C-1'])
+ base.convert_to_MultiIndex(data, axis=0, inplace=True)
+ expected_index = pd.MultiIndex.from_arrays((('A', 'B', 'C'), ('1', '1', '1')))
+ assert data.index.equals(expected_index)
+
+
+def test_show_matrix() -> None:
+ # Test with a simple 2D array
+ arr = np.array(((1, 2, 3), (4, 5, 6)))
+ base.show_matrix(arr)
+
+ # Test with a DataFrame
+ data = pd.DataFrame(((1, 2, 3), (4, 5, 6)), columns=('a', 'b', 'c'))
+ base.show_matrix(data)
+
+ # Test with use_describe=True
+ base.show_matrix(arr, use_describe=True)
+
+
+def test_multiply_factor_multiple_levels() -> None:
+ # Original DataFrame definition
+ data = pd.DataFrame(
+ np.full((5, 3), 1.00),
+ index=pd.MultiIndex.from_tuples(
+ [
+ ('A', 'X', 'K'),
+ ('A', 'X', 'L'),
+ ('A', 'Y', 'M'),
+ ('B', 'X', 'K'),
+ ('B', 'Y', 'M'),
+ ],
+ names=['lv1', 'lv2', 'lv3'],
+ ),
+ columns=['col1', 'col2', 'col3'],
+ )
+
+ # Test 1: Basic multiplication on rows
+ result_df = pd.DataFrame(
+ np.array(
+ [
+ [2.0, 2.0, 2.0],
+ [2.0, 2.0, 2.0],
+ [1.0, 1.0, 1.0],
+ [1.0, 1.0, 1.0],
+ [1.0, 1.0, 1.0],
+ ]
+ ),
+ index=pd.MultiIndex.from_tuples(
+ [
+ ('A', 'X', 'K'),
+ ('A', 'X', 'L'),
+ ('A', 'Y', 'M'),
+ ('B', 'X', 'K'),
+ ('B', 'Y', 'M'),
+ ],
+ names=['lv1', 'lv2', 'lv3'],
+ ),
+ columns=['col1', 'col2', 'col3'],
+ )
+ test_df = data.copy()
+ base.multiply_factor_multiple_levels(test_df, {'lv1': 'A', 'lv2': 'X'}, 2)
+ pd.testing.assert_frame_equal(
+ test_df,
+ result_df,
+ )
+
+ # Test 2: Multiplication on all rows
+ result_df_all = pd.DataFrame(
+ np.full((5, 3), 3.00),
+ index=pd.MultiIndex.from_tuples(
+ [
+ ('A', 'X', 'K'),
+ ('A', 'X', 'L'),
+ ('A', 'Y', 'M'),
+ ('B', 'X', 'K'),
+ ('B', 'Y', 'M'),
+ ],
+ names=['lv1', 'lv2', 'lv3'],
+ ),
+ columns=['col1', 'col2', 'col3'],
+ )
+ test_df = data.copy()
+ base.multiply_factor_multiple_levels(test_df, {}, 3)
+ pd.testing.assert_frame_equal(test_df, result_df_all)
+
+ # Original DataFrame definition for columns test
+ df_columns = pd.DataFrame(
+ np.ones((3, 5)),
+ index=['row1', 'row2', 'row3'],
+ columns=pd.MultiIndex.from_tuples(
+ [
+ ('A', 'X', 'K'),
+ ('A', 'X', 'L'),
+ ('A', 'Y', 'M'),
+ ('B', 'X', 'K'),
+ ('B', 'Y', 'M'),
+ ],
+ names=['lv1', 'lv2', 'lv3'],
+ ),
+ )
+
+ # Test 3: Multiplication on columns
+ result_df_columns = pd.DataFrame(
+ np.array(
+ [
+ [2.0, 2.0, 1.0, 2.0, 1.0],
+ [2.0, 2.0, 1.0, 2.0, 1.0],
+ [2.0, 2.0, 1.0, 2.0, 1.0],
+ ]
+ ),
+ index=['row1', 'row2', 'row3'],
+ columns=pd.MultiIndex.from_tuples(
+ [
+ ('A', 'X', 'K'),
+ ('A', 'X', 'L'),
+ ('A', 'Y', 'M'),
+ ('B', 'X', 'K'),
+ ('B', 'Y', 'M'),
+ ],
+ names=['lv1', 'lv2', 'lv3'],
+ ),
+ )
+ test_df = df_columns.copy()
+ base.multiply_factor_multiple_levels(test_df, {'lv2': 'X'}, 2, axis=1)
+ pd.testing.assert_frame_equal(
+ test_df,
+ result_df_columns,
+ )
+
+ # Test 4: Multiplication with no matching conditions
+ with pytest.raises(
+ ValueError, match="No rows found matching the conditions: `{'lv1': 'C'}`"
+ ):
+ base.multiply_factor_multiple_levels(data.copy(), {'lv1': 'C'}, 2)
+
+ # Test 5: Invalid axis
+ with pytest.raises(ValueError, match='Invalid axis: `2`'):
+ base.multiply_factor_multiple_levels(data.copy(), {'lv1': 'A'}, 2, axis=2)
+
+ # Test 6: Empty conditions affecting all rows
+ result_df_empty = pd.DataFrame(
+ np.full((5, 3), 4.00),
+ index=pd.MultiIndex.from_tuples(
+ [
+ ('A', 'X', 'K'),
+ ('A', 'X', 'L'),
+ ('A', 'Y', 'M'),
+ ('B', 'X', 'K'),
+ ('B', 'Y', 'M'),
+ ],
+ names=['lv1', 'lv2', 'lv3'],
+ ),
+ columns=['col1', 'col2', 'col3'],
+ )
+ testing_df = data.copy()
+ base.multiply_factor_multiple_levels(testing_df, {}, 4)
+ pd.testing.assert_frame_equal(testing_df, result_df_empty)
+
+
+def test_describe() -> None:
+ expected_idx: pd.Index = pd.Index(
+ (
+ 'count',
+ 'mean',
+ 'std',
+ 'log_std',
+ 'min',
+ '0.1%',
+ '2.3%',
+ '10%',
+ '15.9%',
+ '50%',
+ '84.1%',
+ '90%',
+ '97.7%',
+ '99.9%',
+ 'max',
+ ),
+ dtype='object',
+ )
+
+ # case 1:
+ # passing a DataFrame
+
+ data = pd.DataFrame(
+ ((1.00, 2.00, 3.00), (4.00, 5.00, 6.00)), columns=['A', 'B', 'C']
+ )
+ desc = base.describe(data)
+ assert np.all(desc.index == expected_idx)
+ assert np.all(desc.columns == pd.Index(('A', 'B', 'C'), dtype='object'))
+
+ # case 2:
+ # passing a series
+
+ sr = pd.Series((1.00, 2.00, 3.00), name='A')
+ desc = base.describe(sr)
+ assert np.all(desc.index == expected_idx)
+ assert np.all(desc.columns == pd.Index(('A',), dtype='object'))
+
+ # case 3:
+ # passing a 2D numpy array
+
+ desc = base.describe(np.array(((1.00, 2.00, 3.00), (4.00, 5.00, 6.00))))
+ assert np.all(desc.index == expected_idx)
+ assert np.all(desc.columns == pd.Index((0, 1, 2), dtype='object'))
+
+ # case 4:
+ # passing a 1D numpy array
+
+ desc = base.describe(np.array((1.00, 2.00, 3.00)))
+ assert np.all(desc.index == expected_idx)
+ assert np.all(desc.columns == pd.Index((0,), dtype='object'))
+
+
+def test_str2bool() -> None:
+ assert base.str2bool('True') is True
+ assert base.str2bool('False') is False
+ assert base.str2bool('yes') is True
+ assert base.str2bool('no') is False
+ assert base.str2bool('t') is True
+ assert base.str2bool('f') is False
+ assert base.str2bool('1') is True
+ assert base.str2bool('0') is False
+ assert base.str2bool(v=True) is True
+ assert base.str2bool(v=False) is False
+ with pytest.raises(argparse.ArgumentTypeError):
+ base.str2bool('In most cases, it depends..')
+
+
+def test_float_or_None() -> None:
+ # Test with a string that can be converted to a float
+ assert base.float_or_None('123.00') == 123.00
+
+ # Test with a string that represents an integer
+ assert base.float_or_None('42') == 42.0
+
+ # Test with a string that represents a negative number
+ assert base.float_or_None('-123.00') == -123.00
+
+ # Test with a string that can't be converted to a float
+ assert base.float_or_None('hello') is None
+
+ # Test with an empty string
+ assert base.float_or_None('') is None
+
+
+def test_int_or_None() -> None:
+ # Test the case when the string can be converted to int
+ assert base.int_or_None('123') == 123
+ assert base.int_or_None('-456') == -456
+ assert base.int_or_None('0') == 0
+ assert base.int_or_None('+789') == 789
+
+ # Test the case when the string cannot be converted to int
+ assert base.int_or_None('abc') is None
+ assert base.int_or_None('123a') is None
+ assert base.int_or_None(' ') is None
+ assert base.int_or_None('') is None
+
+
+def test_check_if_str_is_na() -> None:
+ data = ['N/A', 'foo', 'NaN', '', 'bar', np.nan]
+
+ res = [base.check_if_str_is_na(x) for x in data]
+
+ assert res == [True, False, True, True, False, False]
+
+
+def test_with_parsed_str_na_values() -> None:
+ data = pd.DataFrame(
+ {
+ 'A': [1.00, 2.00, 'N/A', 4.00, 5.00],
+ 'B': ['foo', 'bar', 'NA', 'baz', 'qux'],
+ 'C': [1, 2, 3, 4, 5],
+ }
+ )
+
+ res = base.with_parsed_str_na_values(data)
+ pd.testing.assert_frame_equal(
+ res,
+ pd.DataFrame(
+ {
+ 'A': [1.00, 2.00, np.nan, 4.00, 5.00],
+ 'B': ['foo', 'bar', np.nan, 'baz', 'qux'],
+ 'C': [1, 2, 3, 4, 5],
+ }
+ ),
+ )
+
+
+def test_run_input_specs() -> None:
+ assert Path(base.pelicun_path).name == 'pelicun'
+
+
+def test_dedupe_index() -> None:
+ tuples = [('A', '1'), ('A', '1'), ('B', '2'), ('B', '3')]
+ index = pd.MultiIndex.from_tuples(tuples, names=['L1', 'L2'])
+ data = np.full((4, 1), 0.00)
+ data_pd = pd.DataFrame(data, index=index)
+ data_pd = base.dedupe_index(data_pd)
+ assert data_pd.to_dict() == {
+ 0: {
+ ('A', '1', '0'): 0.0,
+ ('A', '1', '1'): 0.0,
+ ('B', '2', '0'): 0.0,
+ ('B', '3', '0'): 0.0,
+ }
+ }
+
+
+def test_dict_raise_on_duplicates() -> None:
+ res = base.dict_raise_on_duplicates([('A', '1'), ('B', '2')])
+ assert res == {'A': '1', 'B': '2'}
+ with pytest.raises(ValueError, match='duplicate key: A'):
+ base.dict_raise_on_duplicates([('A', '1'), ('A', '2')])
+
+
+def test_parse_units() -> None:
+ # Test the default units are parsed correctly
+ units = base.parse_units()
+ assert isinstance(units, dict)
+ expect = {
+ 'sec': 1.0,
+ 'minute': 60.0,
+ 'hour': 3600.0,
+ 'day': 86400.0,
+ 'm': 1.0,
+ 'mm': 0.001,
+ 'cm': 0.01,
+ 'km': 1000.0,
+ 'in': 0.0254,
+ 'inch': 0.0254,
+ 'ft': 0.3048,
+ 'mile': 1609.344,
+ 'm2': 1.0,
+ 'mm2': 1e-06,
+ 'cm2': 0.0001,
+ 'km2': 1000000.0,
+ 'in2': 0.00064516,
+ 'inch2': 0.00064516,
+ 'ft2': 0.09290304,
+ 'mile2': 2589988.110336,
+ 'm3': 1.0,
+ 'in3': 1.6387064e-05,
+ 'inch3': 1.6387064e-05,
+ 'ft3': 0.028316846592,
+ 'cmps': 0.01,
+ 'mps': 1.0,
+ 'mph': 0.44704,
+ 'inps': 0.0254,
+ 'inchps': 0.0254,
+ 'ftps': 0.3048,
+ 'mps2': 1.0,
+ 'inps2': 0.0254,
+ 'inchps2': 0.0254,
+ 'ftps2': 0.3048,
+ 'g': 9.80665,
+ 'kg': 1.0,
+ 'ton': 1000.0,
+ 'lb': 0.453592,
+ 'N': 1.0,
+ 'kN': 1000.0,
+ 'lbf': 4.4482179868,
+ 'kip': 4448.2179868,
+ 'kips': 4448.2179868,
+ 'Pa': 1.0,
+ 'kPa': 1000.0,
+ 'MPa': 1000000.0,
+ 'GPa': 1000000000.0,
+ 'psi': 6894.751669043338,
+ 'ksi': 6894751.669043338,
+ 'Mpsi': 6894751669.043338,
+ 'A': 1.0,
+ 'V': 1.0,
+ 'kV': 1000.0,
+ 'ea': 1.0,
+ 'unitless': 1.0,
+ 'rad': 1.0,
+ 'C': 1.0,
+ 'USD_2011': 1.0,
+ 'USD': 1.0,
+ 'loss_ratio': 1.0,
+ 'worker_day': 1.0,
+ 'EA': 1.0,
+ 'SF': 0.09290304,
+ 'LF': 0.3048,
+ 'TN': 1000.0,
+ 'AP': 1.0,
+ 'CF': 0.0004719474432,
+ 'KV': 1000.0,
+ 'J': 1.0,
+ 'MJ': 1000000.0,
+ 'test_two': 2.00,
+ 'test_three': 3.00,
+ }
+ for thing, value in units.items():
+ assert thing in expect
+ assert value == expect[thing]
+
+ # Test that additional units are parsed correctly
+ additional_units_file = (
+ 'pelicun/tests/basic/data/base/test_parse_units/additional_units_a.json'
+ )
+ units = base.parse_units(additional_units_file)
+ assert isinstance(units, dict)
+ assert 'year' in units
+ assert units['year'] == 1.00
+
+ # Test that an exception is raised if the additional units file is not found
+ with pytest.raises(FileNotFoundError):
+ units = base.parse_units('invalid/file/path.json')
+
+ # Test that an exception is raised if the additional units file is
+ # not a valid JSON file
+ invalid_json_file = 'pelicun/tests/basic/data/base/test_parse_units/invalid.json'
+ with pytest.raises(
+ ValueError,
+ match='not a valid JSON file.',
+ ):
+ units = base.parse_units(invalid_json_file)
+
+ # Test that an exception is raised if a unit is defined twice in
+ # the additional units file
+ duplicate_units_file = (
+ 'pelicun/tests/basic/data/base/test_parse_units/duplicate2.json'
+ )
+ with pytest.raises(
+ ValueError,
+ match='sec defined twice',
+ ):
+ units = base.parse_units(duplicate_units_file)
+
+ # Test that an exception is raised if a unit conversion factor is not a float
+ invalid_units_file = (
+ 'pelicun/tests/basic/data/base/test_parse_units/not_float.json'
+ )
+ with pytest.raises(TypeError):
+ units = base.parse_units(invalid_units_file)
+
+ # Test that we get an error if some first-level key does not point
+ # to a dictionary
+ invalid_units_file = (
+ 'pelicun/tests/basic/data/base/test_parse_units/not_dict.json'
+ )
+ with pytest.raises(
+ (ValueError, TypeError),
+ match="contains first-level keys that don't point to a dictionary",
+ ):
+ units = base.parse_units(invalid_units_file)
+
+
+def test_unit_conversion() -> None:
+ # Test scalar conversion from feet to meters
+ assert base.convert_units(1.00, 'ft', 'm') == 0.3048
+
+ # Test list conversion from feet to meters
+ feet_values_list = [1.0, 2.0, 3.0]
+ meter_values_list = [0.3048, 0.6096, 0.9144]
+ np.testing.assert_array_almost_equal(
+ base.convert_units(feet_values_list, 'ft', 'm'), meter_values_list
+ )
+
+ # Test numpy array conversion from feet to meters
+ feet_values_array = np.array([1.0, 2.0, 3.0])
+ meter_values_array = np.array([0.3048, 0.6096, 0.9144])
+ np.testing.assert_array_almost_equal(
+ base.convert_units(feet_values_array, 'ft', 'm'), meter_values_array
+ )
+
+ # Test conversion with explicit category
+ assert base.convert_units(1.00, 'ft', 'm', category='length') == 0.3048
+
+ # Test error handling for invalid input type
+ with pytest.raises(TypeError) as excinfo:
+ base.convert_units('one', 'ft', 'm') # type: ignore
+ assert str(excinfo.value) == 'Invalid input type for `values`'
+
+ # Test error handling for unknown unit
+ with pytest.raises(ValueError, match='Unknown unit `xyz`'):
+ base.convert_units(1.00, 'xyz', 'm')
+
+ # Test error handling for mismatched category
+ with pytest.raises(ValueError, match='Unknown unit: `ft`'):
+ base.convert_units(1.00, 'ft', 'm', category='volume')
+
+ # Test error handling unknown category
+ with pytest.raises(ValueError, match='Unknown category: `unknown_category`'):
+ base.convert_units(1.00, 'ft', 'm', category='unknown_category')
+
+ # Test error handling different categories
+ with pytest.raises(
+ ValueError,
+ match='`lb` is a `mass` unit, but `m` is not specified in that category.',
+ ):
+ base.convert_units(1.00, 'lb', 'm')
+
+
+def test_stringterpolation() -> None:
+ func = base.stringterpolation('1,2,3|4,5,6')
+ x_new = np.array([4, 4.5, 5])
+ expected = np.array([1, 1.5, 2])
+ np.testing.assert_array_almost_equal(func(x_new), expected)
+
+
+def test_invert_mapping() -> None:
+ original_dict = {'a': [1, 2], 'b': [3]}
+ expected = {1: 'a', 2: 'a', 3: 'b'}
+ assert base.invert_mapping(original_dict) == expected
+
+ # with duplicates, raises an error
+ original_dict = {'a': [1, 2], 'b': [2]}
+ with pytest.raises(
+ ValueError, match='Cannot invert mapping with duplicate values.'
+ ):
+ base.invert_mapping(original_dict)
diff --git a/pelicun/tests/basic/test_damage_model.py b/pelicun/tests/basic/test_damage_model.py
new file mode 100644
index 000000000..a2bd9fe88
--- /dev/null
+++ b/pelicun/tests/basic/test_damage_model.py
@@ -0,0 +1,1085 @@
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# John Vouvakis Manousakis
+
+"""These are unit and integration tests on the damage model of pelicun."""
+
+from __future__ import annotations
+
+import warnings
+from copy import deepcopy
+from typing import TYPE_CHECKING
+
+import numpy as np
+import pandas as pd
+import pytest
+
+from pelicun import base, uq
+from pelicun.base import ensure_value
+from pelicun.model.damage_model import (
+ DamageModel,
+ DamageModel_Base,
+ DamageModel_DS,
+ _is_for_ds_model,
+)
+from pelicun.pelicun_warnings import PelicunWarning
+from pelicun.tests.basic.test_pelicun_model import TestPelicunModel
+
+if TYPE_CHECKING:
+ from pelicun.assessment import Assessment
+
+
+class TestDamageModel(TestPelicunModel):
+ @pytest.fixture
+ def damage_model(self, assessment_instance: Assessment) -> DamageModel:
+ return deepcopy(assessment_instance.damage)
+
+ def test___init__(self, damage_model: DamageModel) -> None:
+ assert damage_model.log
+ assert damage_model.ds_model
+ with pytest.raises(AttributeError):
+ damage_model.xyz = 123 # type: ignore
+
+ assert damage_model.ds_model.damage_params is None
+ assert damage_model.ds_model.sample is None
+
+ assert len(damage_model._damage_models) == 1
+
+ def test_damage_models(self, assessment_instance: Assessment) -> None:
+ damage_model = DamageModel(assessment_instance)
+ assert damage_model._damage_models is not None
+ assert len(damage_model._damage_models) == 1
+ assert isinstance(damage_model._damage_models[0], DamageModel_DS)
+
+ def test_load_model_parameters(self, damage_model: DamageModel) -> None:
+ path = (
+ 'pelicun/tests/basic/data/model/test_DamageModel/'
+ 'load_model_parameters/damage_db.csv'
+ )
+ # The file defines the parameters for four components:
+ # component.A, component.B, component.C, and component.incomplete
+ # component.incomplete is flagged incomplete.
+ cmp_set = {'component.A', 'component.B', 'component.incomplete'}
+ # (Omit component.C)
+ with warnings.catch_warnings(record=True) as w:
+ damage_model.load_model_parameters([path], cmp_set, warn_missing=True)
+ assert len(w) == 1
+ assert (
+ 'The damage model does not provide damage information '
+ 'for the following component(s) in the asset model: '
+ "['component.incomplete']."
+ ) in str(w[0].message)
+ damage_parameters = damage_model.ds_model.damage_params
+ assert damage_parameters is not None
+ assert 'component.A' in damage_parameters.index
+ assert 'component.B' in damage_parameters.index
+ assert 'component.C' not in damage_parameters.index
+ assert 'component.incomplete' not in damage_parameters.index
+
+ # make sure unit conversions were done correctly.
+ # component.A: unitless, 3 limit states
+ # component.B: from g -> m/s^2, 2 limit states
+ assert damage_parameters['LS1']['Theta_0']['component.A'] == 0.02
+ assert damage_parameters['LS2']['Theta_0']['component.A'] == 0.04
+ assert damage_parameters['LS3']['Theta_0']['component.A'] == 0.08
+ assert damage_parameters['LS1']['Theta_0']['component.B'] == 1.96133
+ assert damage_parameters['LS2']['Theta_0']['component.B'] == 3.92266
+ assert pd.isna(damage_parameters['LS3']['Theta_0']['component.B'])
+
+ # If a component is in the set but does not have damage
+ # parameters, no damage parameters are loaded for it.
+ cmp_set = {'not.exist'}
+ with warnings.catch_warnings(record=True) as w:
+ damage_model.load_model_parameters([path], cmp_set, warn_missing=True)
+ assert len(w) == 1
+ assert (
+ 'The damage model does not provide damage '
+ 'information for the following component(s) '
+ "in the asset model: ['not.exist']."
+ ) in str(w[0].message)
+ assert ensure_value(damage_model.ds_model.damage_params).empty
+
+ def test_calculate(self) -> None:
+ # User-facing methods are coupled with other assessment objects
+ # and are tested in the verification examples.
+ pass
+
+ def test_save_sample(self) -> None:
+ # User-facing methods are coupled with other assessment objects
+ # and are tested in the verification examples.
+ pass
+
+ def test_load_sample(self) -> None:
+ # User-facing methods are coupled with other assessment objects
+ # and are tested in the verification examples.
+ pass
+
+ def test__get_component_id_set(self, assessment_instance: Assessment) -> None:
+ damage_model = DamageModel(assessment_instance)
+
+ damage_model.ds_model.damage_params = pd.DataFrame(
+ {
+ ('LS1', 'Theta_0'): [0.1, 0.2, 0.3],
+ ('LS2', 'Theta_0'): [0.2, 0.3, 0.4],
+ },
+ index=pd.Index(['cmp.1', 'cmp.2', 'cmp.3'], name='ID'),
+ )
+
+ component_id_set = damage_model._get_component_id_set()
+
+ expected_set = {'cmp.1', 'cmp.2', 'cmp.3'}
+
+ assert component_id_set == expected_set
+
+ def test__ensure_damage_parameter_availability(
+ self, assessment_instance: Assessment
+ ) -> None:
+ damage_model = DamageModel(assessment_instance)
+
+ damage_model.ds_model.damage_params = pd.DataFrame(
+ {
+ ('LS1', 'Theta_0'): [0.1, 0.2, 0.3],
+ ('LS2', 'Theta_0'): [0.2, 0.3, 0.4],
+ },
+ index=pd.Index(['cmp.1', 'cmp.2', 'cmp.3'], name='ID'),
+ )
+
+ cmp_set = {'cmp.1', 'cmp.2', 'cmp.3', 'cmp.4'}
+
+ expected_missing_components = ['cmp.4']
+
+ with pytest.warns(PelicunWarning) as record:
+ missing_components = damage_model._ensure_damage_parameter_availability(
+ cmp_set, warn_missing=True
+ )
+ assert missing_components == expected_missing_components
+ assert len(record) == 1
+ assert 'cmp.4' in str(record[0].message)
+
+
+class TestDamageModel_Base(TestPelicunModel):
+ def test___init__(self, assessment_instance: Assessment) -> None:
+ damage_model = DamageModel_Base(assessment_instance)
+ with pytest.raises(AttributeError):
+ damage_model.xyz = 123 # type: ignore
+
+ def test__load_model_parameters(self, assessment_instance: Assessment) -> None:
+ damage_model = DamageModel_Base(assessment_instance)
+
+ damage_model.damage_params = pd.DataFrame(
+ {
+ ('Demand', 'Type'): ['Type1', 'Type2'],
+ ('LS1', 'Theta_0'): [0.1, 0.2],
+ },
+ index=pd.Index(['cmp.1', 'cmp.2'], name='ID'),
+ )
+
+ # New data to be loaded, which contains a redefinition and a
+ # new parameter
+ new_data = pd.DataFrame(
+ {
+ ('Demand', 'Type'): ['Type3', 'Type4'],
+ ('LS1', 'Theta_0'): [0.3, 0.4],
+ },
+ index=pd.Index(['cmp.1', 'cmp.3'], name='ID'),
+ )
+
+ damage_model.load_model_parameters(new_data)
+
+ pd.testing.assert_frame_equal(
+ damage_model.damage_params,
+ pd.DataFrame(
+ {
+ ('Demand', 'Type'): ['Type1', 'Type2', 'Type4'],
+ ('LS1', 'Theta_0'): [0.1, 0.2, 0.4],
+ },
+ index=pd.Index(['cmp.1', 'cmp.2', 'cmp.3'], name='ID'),
+ ),
+ )
+
+ def test__convert_damage_parameter_units(
+ self, assessment_instance: Assessment
+ ) -> None:
+ damage_model = DamageModel_Base(assessment_instance)
+
+ # should have no effect when damage_params is None
+ damage_model.convert_damage_parameter_units()
+
+ # converting units from 'g' to 'm/s2' (1g ~ 9.80665 m/s2)
+
+ damage_model.damage_params = pd.DataFrame(
+ {
+ ('Demand', 'Unit'): ['g', 'g'],
+ ('LS1', 'Theta_0'): [0.5, 0.2], # Values in g's
+ },
+ index=pd.Index(['cmp.1', 'cmp.2'], name='ID'),
+ )
+
+ damage_model.convert_damage_parameter_units()
+
+ pd.testing.assert_frame_equal(
+ damage_model.damage_params,
+ pd.DataFrame(
+ {
+ ('LS1', 'Theta_0'): [
+ 0.5 * 9.80665,
+ 0.2 * 9.80665,
+ ],
+ },
+ index=pd.Index(['cmp.1', 'cmp.2'], name='ID'),
+ ),
+ )
+
+ def test__remove_incomplete_components(
+ self, assessment_instance: Assessment
+ ) -> None:
+ damage_model = DamageModel_Base(assessment_instance)
+
+ # with damage_model.damage_params set to None this should have
+ # no effect.
+ damage_model.remove_incomplete_components()
+
+ damage_model.damage_params = pd.DataFrame(
+ {
+ ('Demand', 'Type'): ['Type1', 'Type2', 'Type3', 'Type4'],
+ ('Incomplete', ''): [0, 1, 0, 1],
+ },
+ index=pd.Index(['cmp.1', 'cmp.2', 'cmp.3', 'cmp.4'], name='ID'),
+ )
+
+ damage_model.remove_incomplete_components()
+
+ pd.testing.assert_frame_equal(
+ damage_model.damage_params,
+ pd.DataFrame(
+ {
+ ('Demand', 'Type'): ['Type1', 'Type3'],
+ # Only complete components remain
+ ('Incomplete', ''): [0, 0],
+ },
+ index=pd.Index(['cmp.1', 'cmp.3'], name='ID'),
+ ),
+ )
+
+ # with damage_model.damage_params set to None this should have
+ # no effect.
+ damage_model.damage_params = damage_model.damage_params.drop(
+ ('Incomplete', ''), axis=1
+ )
+ # now, this should also have no effect
+ before = damage_model.damage_params.copy()
+ damage_model.remove_incomplete_components()
+ pd.testing.assert_frame_equal(before, damage_model.damage_params)
+
+ def test__drop_unused_damage_parameters(
+ self, assessment_instance: Assessment
+ ) -> None:
+ damage_model = DamageModel_Base(assessment_instance)
+
+ damage_model.damage_params = pd.DataFrame(
+ index=pd.Index(['cmp.1', 'cmp.2', 'cmp.3', 'cmp.4'], name='ID')
+ )
+
+ cmp_set = {'cmp.1', 'cmp.3'}
+
+ damage_model.drop_unused_damage_parameters(cmp_set)
+
+ pd.testing.assert_frame_equal(
+ damage_model.damage_params,
+ pd.DataFrame(index=pd.Index(['cmp.1', 'cmp.3'], name='ID')),
+ )
+
+ def test__get_pg_batches(self, assessment_instance: Assessment) -> None:
+ damage_model = DamageModel_Base(assessment_instance)
+
+ component_blocks = pd.DataFrame(
+ {'Blocks': [1, 1, 2, 1, 3, 4]},
+ index=pd.MultiIndex.from_tuples(
+ [
+ ('cmp.1', '1', '1', '1'),
+ ('cmp.2', '2', '2', '2'),
+ ('cmp.3', '1', '1', '1'),
+ ('cmp.4', '3', '3', '3'),
+ ('cmp.5', '2', '2', '2'),
+ ('cmp.6', '1', '1', '1'),
+ ],
+ names=['cmp', 'loc', 'dir', 'uid'],
+ ),
+ )
+
+ block_batch_size = 3
+
+ missing_components = ['cmp.4', 'cmp.5', 'cmp.6']
+
+ # Attach a mocked damage_params DataFrame to the damage model
+ # instance to simulate the available
+ # components. `_get_pg_batches` doesn't need any other
+ # information from that attribute.
+ damage_model.damage_params = pd.DataFrame(index=['cmp.1', 'cmp.2', 'cmp.3'])
+
+ resulting_batches = damage_model._get_pg_batches(
+ component_blocks, block_batch_size, missing_components
+ )
+ pd.testing.assert_frame_equal(
+ resulting_batches,
+ pd.DataFrame(
+ {'Blocks': [1, 2, 1]},
+ index=pd.MultiIndex.from_tuples(
+ [
+ (1, 'cmp.1', '1', '1', '1'),
+ (1, 'cmp.3', '1', '1', '1'),
+ (2, 'cmp.2', '2', '2', '2'),
+ ],
+ names=['Batch', 'cmp', 'loc', 'dir', 'uid'],
+ ),
+ ),
+ )
+
+
+class TestDamageModel_DS(TestDamageModel_Base):
+ def test__obtain_ds_sample(self, assessment_instance: Assessment) -> None:
+ damage_model = DamageModel_DS(assessment_instance)
+
+ demand_sample = pd.DataFrame(
+ {
+ ('PFA', '0', '1'): [5.00, 5.00], # m/s2
+ ('PFA', '0', '2'): [5.00, 5.00],
+ },
+ index=[0, 1],
+ ).rename_axis(columns=['type', 'loc', 'dir'])
+
+ component_blocks = pd.DataFrame(
+ {'Blocks': [1, 2, 1]},
+ index=pd.MultiIndex.from_tuples(
+ [
+ ('cmp.1', '1', '1', '1'),
+ ('cmp.2', '1', '1', '1'),
+ ('cmp.3', '1', '1', '1'),
+ ],
+ names=['cmp', 'loc', 'dir', 'uid'],
+ ),
+ )
+
+ block_batch_size = 2
+ scaling_specification = None
+ nondirectional_multipliers = {'ALL': 1.2}
+
+ damage_model.damage_params = pd.DataFrame(
+ {
+ ('Demand', 'Directional'): [0, 0, 0],
+ ('Demand', 'Offset'): [0, 0, 0],
+ ('Demand', 'Type'): [
+ 'Peak Floor Acceleration',
+ 'Peak Floor Acceleration',
+ 'Peak Floor Acceleration',
+ ],
+ ('Incomplete', ''): [0, 0, 0],
+ ('LS1', 'DamageStateWeights'): [None, None, None],
+ ('LS1', 'Family'): [None, None, None],
+ ('LS1', 'Theta_0'): [1.0, 1.0, 10.0], # m/s2
+ ('LS1', 'Theta_1'): [None, None, None],
+ },
+ index=['cmp.1', 'cmp.2', 'cmp.3'],
+ ).rename_axis('ID')
+
+ damage_model.obtain_ds_sample(
+ demand_sample,
+ component_blocks,
+ block_batch_size,
+ scaling_specification,
+ [],
+ nondirectional_multipliers,
+ )
+ pd.testing.assert_frame_equal(
+ ensure_value(damage_model.ds_sample),
+ pd.DataFrame(
+ {
+ ('cmp.1', '1', '1', '1', '1'): [1, 1],
+ ('cmp.2', '1', '1', '1', '1'): [1, 1],
+ ('cmp.2', '1', '1', '1', '2'): [1, 1],
+ ('cmp.3', '1', '1', '1', '1'): [0, 0],
+ },
+ dtype='int64',
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid', 'block']),
+ )
+
+ def test__handle_operation(self, assessment_instance: Assessment) -> None:
+ damage_model = DamageModel_DS(assessment_instance)
+
+ assert damage_model._handle_operation(1.00, '+', 1.00) == 2.00
+ assert damage_model._handle_operation(1.00, '-', 1.00) == 0.00
+ assert damage_model._handle_operation(1.00, '*', 4.00) == 4.00
+ assert damage_model._handle_operation(8.00, '/', 8.00) == 1.00
+
+ with pytest.raises(ValueError, match='Invalid operation: `%`'):
+ damage_model._handle_operation(1.00, '%', 1.00)
+
+ def test__generate_dmg_sample(self, assessment_instance: Assessment) -> None:
+ # Create an instance of the damage model
+ damage_model = DamageModel_DS(assessment_instance)
+
+ pgb = pd.DataFrame(
+ {'Blocks': [1]},
+ index=pd.MultiIndex.from_tuples(
+ [('cmp.test', '1', '2', '3')],
+ names=['cmp', 'loc', 'dir', 'uid'],
+ ),
+ )
+
+ damage_params = pd.DataFrame(
+ {
+ ('Demand', 'Directional'): [0.0],
+ ('Demand', 'Offset'): [0.0],
+ ('Demand', 'Type'): ['None Specified'],
+ ('Incomplete', ''): [0],
+ ('LS1', 'DamageStateWeights'): [None], # No randomness
+ ('LS1', 'Family'): [None], # No specific family of distribution
+ ('LS1', 'Theta_0'): [1.0], # Constant value for simplicity
+ ('LS1', 'Theta_1'): [None], # No randomness
+ },
+ index=['cmp.test'],
+ ).rename_axis('ID')
+
+ damage_model.damage_params = damage_params
+
+ scaling_specification = None
+ sample_size = 2
+
+ capacity_sample, lsds_sample = damage_model._generate_dmg_sample(
+ sample_size, pgb, scaling_specification
+ )
+
+ pd.testing.assert_frame_equal(
+ capacity_sample,
+ pd.DataFrame(
+ {
+ ('cmp.test', '1', '2', '3', '1', '1'): [1.0, 1.0],
+ }
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid', 'block', 'ls']),
+ )
+
+ pd.testing.assert_frame_equal(
+ lsds_sample.astype('int32'),
+ pd.DataFrame(
+ {
+ ('cmp.test', '1', '2', '3', '1', '1'): [1, 1],
+ },
+ dtype='int32',
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid', 'block', 'ls']),
+ )
+
+ def test__create_dmg_RVs(self, assessment_instance: Assessment) -> None:
+ damage_model = DamageModel_DS(assessment_instance)
+
+ pgb = pd.DataFrame(
+ {'Blocks': [1]},
+ index=pd.MultiIndex.from_tuples(
+ [
+ ('cmp.A', '1', '2', '3'),
+ ],
+ names=['cmp', 'loc', 'dir', 'uid'],
+ ),
+ )
+
+ damage_params = pd.DataFrame(
+ {
+ ('Demand', 'Directional'): [0.0],
+ ('Demand', 'Offset'): [0.0],
+ ('Demand', 'Type'): ['Peak Floor Acceleration'],
+ ('Incomplete', ''): [0],
+ ('LS1', 'DamageStateWeights'): [
+ '0.40 | 0.10 | 0.50',
+ ],
+ ('LS1', 'Family'): ['lognormal'],
+ ('LS1', 'Theta_0'): [30.00],
+ ('LS1', 'Theta_1'): [0.5],
+ },
+ index=['cmp.A'],
+ ).rename_axis('ID')
+
+ # Attach this DataFrame to the damage model instance
+ damage_model.damage_params = damage_params
+
+ # Define a scaling specification
+ scaling_specification = {'cmp.A-1-2': '*1.20'}
+
+ # Execute the method under test
+ capacity_rv_reg, lsds_rv_reg = damage_model._create_dmg_RVs(
+ pgb, scaling_specification
+ )
+
+ # Now we need to verify the outputs in the registries
+ # This will include checks to ensure random variables were
+ # created correctly.
+ # Example check for presence and properties of a
+ # RandomVariable in the registry:
+ assert 'FRG-cmp.A-1-2-3-1-1' in capacity_rv_reg.RV
+ assert isinstance(
+ capacity_rv_reg.RV['FRG-cmp.A-1-2-3-1-1'],
+ uq.LogNormalRandomVariable,
+ )
+
+ assert 'LSDS-cmp.A-1-2-3-1-1' in lsds_rv_reg.RV
+ assert isinstance(
+ lsds_rv_reg.RV['LSDS-cmp.A-1-2-3-1-1'],
+ uq.MultinomialRandomVariable,
+ )
+
+ def test__evaluate_damage_state(self, assessment_instance: Assessment) -> None:
+ # We define a single component with 3 limit states.
+ # The last limit state can have two damage states, DS3 and DS4.
+ # We test that the damage state assignments are correct.
+ # We intend to have the following DS realizations: 0, 1, 2, 3, 4.
+
+ damage_model = DamageModel_DS(assessment_instance)
+
+ demand_dict = {'edp': np.array([1.00, 3.00, 5.00, 7.00, 7.00])}
+
+ # component, loc, dir, uid, block
+ required_edps = {'edp': [('component.A', '0', '1', '1', '1')]}
+
+ capacity_sample = pd.DataFrame(
+ {
+ # component, loc, dir, uid, block, limit state
+ ('component.A', '0', '1', '1', '1', '1'): np.full(5, 2.00),
+ ('component.A', '0', '1', '1', '1', '2'): np.full(5, 4.00),
+ ('component.A', '0', '1', '1', '1', '3'): np.full(5, 6.00),
+ },
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid', 'block', 'ls'])
+
+ lsds_sample = pd.DataFrame(
+ {
+ # component, loc, dir, uid, block, limit state
+ ('component.A', '0', '1', '1', '1', '1'): [1, 1, 1, 1, 1],
+ ('component.A', '0', '1', '1', '1', '2'): [2, 2, 2, 2, 2],
+ ('component.A', '0', '1', '1', '1', '3'): [3, 3, 3, 3, 4],
+ },
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid', 'block', 'ls'])
+
+ res = damage_model._evaluate_damage_state(
+ demand_dict, required_edps, capacity_sample, lsds_sample
+ )
+ pd.testing.assert_frame_equal(
+ res,
+ pd.DataFrame(
+ {
+ ('component.A', '0', '1', '1', '1'): [0, 1, 2, 3, 4],
+ },
+ dtype='int64',
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid', 'block']),
+ )
+
+ def test__prepare_dmg_quantities(self, assessment_instance: Assessment) -> None:
+ #
+ # A case with blocks
+ #
+
+ damage_model = DamageModel_DS(assessment_instance)
+
+ damage_model.ds_sample = pd.DataFrame(
+ {
+ ('A', '0', '1', '0', '1'): [-1, 0, 1, 2, 3], # block 1
+ ('A', '0', '1', '0', '2'): [3, -1, 0, 1, 2], # block 2
+ },
+ dtype='int32',
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid', 'block'])
+
+ component_sample = pd.DataFrame(
+ {
+ ('A', '0', '1', '0'): [2.0, 4.0, 6.0, 8.0, 10.0],
+ },
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid'])
+
+ component_marginal_parameters = pd.DataFrame(
+ {
+ 'Blocks': [2.00],
+ },
+ index=pd.MultiIndex.from_tuples([('A', '0', '1', '0')]),
+ ).rename_axis(index=['cmp', 'loc', 'dir', 'uid'])
+
+ res = damage_model.prepare_dmg_quantities(
+ component_sample,
+ component_marginal_parameters,
+ dropzero=True,
+ )
+
+ # Each block takes half the quantity.
+ # Realization 0: Expect q=1 at DS3 from block 2
+ # Realization 1: Expect zeros
+ # Realization 2: Expect q=6/2=3 at DS1 from block 1
+ # Realization 3: Expect q=8/2 at DSs 1 and 2
+ # Realization 4: Expect q=10/2 at DSs 2 and 3
+ pd.testing.assert_frame_equal(
+ res,
+ pd.DataFrame(
+ {
+ ('A', '0', '1', '0', '1'): [0.0, 0.0, 3.0, 4.0, 0.0],
+ ('A', '0', '1', '0', '2'): [0.0, 0.0, 0.0, 4.0, 5.0],
+ ('A', '0', '1', '0', '3'): [1.0, 0.0, 0.0, 0.0, 5.0],
+ }
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid', 'ds']),
+ )
+
+ #
+ # A case without blocks
+ #
+
+ damage_model.ds_sample = pd.DataFrame(
+ {
+ ('A', '0', '1', '0', '1'): [-1, 0, 1, 2, 3],
+ },
+ dtype='int32',
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid', 'block'])
+
+ component_sample = pd.DataFrame(
+ {
+ ('A', '0', '1', '0'): [2.0, 4.0, 6.0, 8.0, 10.0],
+ },
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid'])
+
+ res = damage_model.prepare_dmg_quantities(
+ component_sample,
+ None,
+ dropzero=True,
+ )
+
+ # Realization 0: Expect NaNs
+ # Realization 1: Expect zeros
+ # Realization 2: Expect q=6 at DS1
+ # Realization 3: Expect q=8 at DS2
+ # Realization 4: Expect q=10 at DS3
+ pd.testing.assert_frame_equal(
+ res,
+ pd.DataFrame(
+ {
+ ('A', '0', '1', '0', '1'): [np.nan, 0.0, 6.0, 0.0, 0.0],
+ ('A', '0', '1', '0', '2'): [np.nan, 0.0, 0.0, 8.0, 0.0],
+ ('A', '0', '1', '0', '3'): [np.nan, 0.0, 0.0, 0.0, 10.0],
+ }
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid', 'ds']),
+ )
+
+ #
+ # Test `dropzero`
+ #
+
+ damage_model.ds_sample = pd.DataFrame(
+ {
+ ('A', '0', '1', '0', '1'): [-1, 0],
+ ('A', '0', '1', '1', '1'): [1, 0],
+ },
+ dtype='int32',
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid', 'block'])
+
+ component_sample = pd.DataFrame(
+ {
+ ('A', '0', '1', '0'): [2.0, 4.0],
+ ('A', '0', '1', '1'): [6.0, 8.0],
+ },
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid'])
+
+ res = damage_model.prepare_dmg_quantities(
+ component_sample,
+ None,
+ dropzero=True,
+ )
+
+ pd.testing.assert_frame_equal(
+ res,
+ pd.DataFrame(
+ {
+ ('A', '0', '1', '1', '1'): [6.0, 0.0],
+ }
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid', 'ds']),
+ )
+
+ res = damage_model.prepare_dmg_quantities(
+ component_sample,
+ None,
+ dropzero=False,
+ )
+
+ pd.testing.assert_frame_equal(
+ res,
+ pd.DataFrame(
+ {
+ ('A', '0', '1', '0', '0'): [np.nan, 4.0], # returned
+ ('A', '0', '1', '1', '0'): [0.0, 8.0], # returned
+ ('A', '0', '1', '1', '1'): [6.0, 0.0],
+ }
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid', 'ds']),
+ )
+
+ def test__perform_dmg_task(self, assessment_instance: Assessment) -> None: # noqa: C901
+ damage_model = DamageModel_DS(assessment_instance)
+
+ #
+ # when CMP.B reaches DS1, CMP.A should be DS4
+ #
+
+ damage_model.ds_sample = pd.DataFrame(
+ {
+ ('CMP.A', '1', '1', '0'): [0, 0, 0],
+ ('CMP.A', '1', '1', '1'): [0, 0, 0],
+ ('CMP.B', '1', '1', '0'): [0, 0, 1],
+ ('CMP.B', '1', '1', '1'): [1, 0, 0],
+ },
+ dtype='int32',
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid'])
+
+ dmg_process = {'1_CMP.B': {'DS1': 'CMP.A_DS4'}}
+ for task in dmg_process.items():
+ damage_model.perform_dmg_task(task)
+
+ pd.testing.assert_frame_equal(
+ damage_model.ds_sample,
+ pd.DataFrame(
+ {
+ ('CMP.A', '1', '1', '0'): [4, 0, 4],
+ ('CMP.A', '1', '1', '1'): [4, 0, 4],
+ ('CMP.B', '1', '1', '0'): [0, 0, 1],
+ ('CMP.B', '1', '1', '1'): [1, 0, 0],
+ },
+ dtype='int32',
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid']),
+ )
+
+ #
+ # when CMP.B reaches DS1, CMP.A should be NA (-1)
+ #
+
+ damage_model.ds_sample = pd.DataFrame(
+ {
+ ('CMP.A', '1', '1', '0'): [0, 0, 0],
+ ('CMP.A', '1', '1', '1'): [0, 0, 0],
+ ('CMP.B', '1', '1', '0'): [0, 0, 1],
+ ('CMP.B', '1', '1', '1'): [1, 0, 0],
+ },
+ dtype='int32',
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid'])
+
+ dmg_process = {'1_CMP.B': {'DS1': 'CMP.A_NA'}}
+ for task in dmg_process.items():
+ damage_model.perform_dmg_task(task)
+
+ pd.testing.assert_frame_equal(
+ damage_model.ds_sample,
+ pd.DataFrame(
+ {
+ ('CMP.A', '1', '1', '0'): [-1, 0, -1],
+ ('CMP.A', '1', '1', '1'): [-1, 0, -1],
+ ('CMP.B', '1', '1', '0'): [0, 0, 1],
+ ('CMP.B', '1', '1', '1'): [1, 0, 0],
+ },
+ dtype='int32',
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid']),
+ )
+
+ #
+ # `-LOC` keyword
+ # when CMP.B reaches DS1, CMP.A should be DS4
+ # matching locations
+ #
+
+ damage_model.ds_sample = pd.DataFrame(
+ {
+ ('CMP.A', '1', '1', '0'): [0, 0, 0],
+ ('CMP.A', '2', '1', '0'): [0, 0, 0],
+ ('CMP.B', '1', '1', '0'): [0, 0, 1],
+ ('CMP.B', '2', '1', '0'): [1, 0, 0],
+ },
+ dtype='int32',
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid'])
+
+ dmg_process = {'1_CMP.B-LOC': {'DS1': 'CMP.A_DS4'}}
+ for task in dmg_process.items():
+ damage_model.perform_dmg_task(task)
+
+ pd.testing.assert_frame_equal(
+ damage_model.ds_sample,
+ pd.DataFrame(
+ {
+ ('CMP.A', '1', '1', '0'): [0, 0, 4],
+ ('CMP.A', '2', '1', '0'): [4, 0, 0],
+ ('CMP.B', '1', '1', '0'): [0, 0, 1],
+ ('CMP.B', '2', '1', '0'): [1, 0, 0],
+ },
+ dtype='int32',
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid']),
+ )
+
+ #
+ # ALL keyword
+ #
+ # Whenever CMP.A reaches DS1, all other components should be
+ # set to DS2.
+ #
+
+ damage_model.ds_sample = pd.DataFrame(
+ {
+ ('CMP.A', '1', '1', '0'): [1, 0, 0],
+ ('CMP.B', '1', '1', '0'): [0, 0, 0],
+ ('CMP.C', '1', '1', '0'): [0, 0, 0],
+ ('CMP.D', '1', '1', '0'): [0, 0, 0],
+ },
+ dtype='int32',
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid'])
+
+ dmg_process = {'1_CMP.A': {'DS1': 'ALL_DS2'}}
+ for task in dmg_process.items():
+ damage_model.perform_dmg_task(task)
+
+ pd.testing.assert_frame_equal(
+ damage_model.ds_sample,
+ pd.DataFrame(
+ {
+ ('CMP.A', '1', '1', '0'): [1, 0, 0],
+ ('CMP.B', '1', '1', '0'): [2, 0, 0],
+ ('CMP.C', '1', '1', '0'): [2, 0, 0],
+ ('CMP.D', '1', '1', '0'): [2, 0, 0],
+ },
+ dtype='int32',
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid']),
+ )
+
+ #
+ # NA keyword
+ #
+ # NA translates to -1 representing nan
+ #
+
+ damage_model.ds_sample = pd.DataFrame(
+ {
+ ('CMP.A', '1', '1', '0'): [0, 0, 0],
+ ('CMP.A', '1', '1', '1'): [0, 0, 0],
+ ('CMP.B', '1', '1', '0'): [0, 0, 1],
+ ('CMP.B', '1', '1', '1'): [1, 0, 0],
+ },
+ dtype='int32',
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid'])
+
+ dmg_process = {'1_CMP.B': {'DS1': 'CMP.A_NA'}}
+ for task in dmg_process.items():
+ damage_model.perform_dmg_task(task)
+
+ pd.testing.assert_frame_equal(
+ damage_model.ds_sample,
+ pd.DataFrame(
+ {
+ ('CMP.A', '1', '1', '0'): [-1, 0, -1],
+ ('CMP.A', '1', '1', '1'): [-1, 0, -1],
+ ('CMP.B', '1', '1', '0'): [0, 0, 1],
+ ('CMP.B', '1', '1', '1'): [1, 0, 0],
+ },
+ dtype='int32',
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid']),
+ )
+
+ #
+ # NA keyword combined with `-LOC`
+ #
+
+ damage_model.ds_sample = pd.DataFrame(
+ {
+ ('CMP.A', '1', '1', '0'): [0, 0, 0],
+ ('CMP.A', '2', '1', '0'): [0, 0, 0],
+ ('CMP.B', '1', '1', '0'): [0, 0, 1],
+ ('CMP.B', '2', '1', '0'): [1, 0, 0],
+ },
+ dtype='int32',
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid'])
+
+ dmg_process = {'1_CMP.B-LOC': {'DS1': 'CMP.A_NA'}}
+ for task in dmg_process.items():
+ damage_model.perform_dmg_task(task)
+
+ pd.testing.assert_frame_equal(
+ damage_model.ds_sample,
+ pd.DataFrame(
+ {
+ ('CMP.A', '1', '1', '0'): [0, 0, -1],
+ ('CMP.A', '2', '1', '0'): [-1, 0, 0],
+ ('CMP.B', '1', '1', '0'): [0, 0, 1],
+ ('CMP.B', '2', '1', '0'): [1, 0, 0],
+ },
+ dtype='int32',
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid']),
+ )
+
+ #
+ # NA keyword combined with `-LOC` and `ALL`
+ #
+
+ damage_model.ds_sample = pd.DataFrame(
+ {
+ ('CMP.A', '1', '1', '0'): [0, 0, 1],
+ ('CMP.A', '2', '1', '0'): [1, 0, 0],
+ ('CMP.B', '1', '1', '0'): [0, 0, 0],
+ ('CMP.B', '2', '1', '0'): [0, 0, 0],
+ ('CMP.C', '1', '1', '0'): [0, 0, 0],
+ ('CMP.C', '2', '1', '0'): [0, 0, 0],
+ },
+ dtype='int32',
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid'])
+
+ dmg_process = {'1_CMP.A-LOC': {'DS1': 'ALL_NA'}}
+ for task in dmg_process.items():
+ damage_model.perform_dmg_task(task)
+
+ pd.testing.assert_frame_equal(
+ damage_model.ds_sample,
+ pd.DataFrame(
+ {
+ ('CMP.A', '1', '1', '0'): [0, 0, 1],
+ ('CMP.A', '2', '1', '0'): [1, 0, 0],
+ ('CMP.B', '1', '1', '0'): [0, 0, -1],
+ ('CMP.B', '2', '1', '0'): [-1, 0, 0],
+ ('CMP.C', '1', '1', '0'): [0, 0, -1],
+ ('CMP.C', '2', '1', '0'): [-1, 0, 0],
+ },
+ dtype='int32',
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid']),
+ )
+
+ #
+ # Test warnings: Source component not found
+ #
+ damage_model.ds_sample = pd.DataFrame(
+ {
+ ('CMP.A', '1', '1', '0'): [0, 0, 0],
+ ('CMP.B', '1', '1', '1'): [0, 0, 0],
+ },
+ dtype='int32',
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid'])
+
+ dmg_process = {'1_CMP.C': {'DS1': 'CMP.A_DS4'}}
+ with pytest.warns(PelicunWarning) as record:
+ for task in dmg_process.items():
+ damage_model.perform_dmg_task(task)
+ assert (
+ 'Source component `CMP.C` in the prescribed damage process not found'
+ ) in str(record.list[0].message)
+
+ #
+ # Test warnings: Target component not found
+ #
+ dmg_process = {'1_CMP.A': {'DS1': 'CMP.C_DS4'}}
+ with pytest.warns(PelicunWarning) as record:
+ for task in dmg_process.items():
+ damage_model.perform_dmg_task(task)
+ assert (
+ 'Target component `CMP.C` in the prescribed damage process not found'
+ ) in str(record.list[0].message)
+
+ #
+ # Test Error: Unable to parse source event
+ #
+ dmg_process = {'1_CMP.A': {'XYZ': 'CMP.B_DS1'}}
+ for task in dmg_process.items():
+ with pytest.raises(
+ ValueError,
+ match='Unable to parse source event in damage process: `XYZ`',
+ ):
+ damage_model.perform_dmg_task(task)
+ dmg_process = {'1_CMP.A': {'DS1': 'CMP.B_ABC'}}
+ for task in dmg_process.items():
+ with pytest.raises(
+ ValueError,
+ match='Unable to parse target event in damage process: `ABC`',
+ ):
+ damage_model.perform_dmg_task(task)
+
+ def test__complete_ds_cols(self, assessment_instance: Assessment) -> None:
+ damage_model = DamageModel_DS(assessment_instance)
+ # the method needs damage parameters
+ damage_model.damage_params = base.convert_to_MultiIndex(
+ pd.read_csv(
+ (
+ 'pelicun/tests/basic/data/model/test_DamageModel/'
+ '_complete_ds_cols/parameters.csv'
+ ),
+ index_col=0,
+ ),
+ axis=1,
+ )
+ # Set up one realization, with 100 units of the component in
+ # damage state 2.
+ dmg_sample = pd.DataFrame(
+ {
+ ('many.ds', '0', '0', '0', '2'): [100.00],
+ ('single.ds', '0', '0', '0', '1'): [100.00],
+ },
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid', 'ds'])
+ out = damage_model.complete_ds_cols(dmg_sample)
+ pd.testing.assert_frame_equal(
+ out,
+ pd.DataFrame(
+ {
+ ('many.ds', '0', '0', '0', '0'): [0.00],
+ ('many.ds', '0', '0', '0', '1'): [0.00],
+ ('many.ds', '0', '0', '0', '2'): [100.00],
+ ('many.ds', '0', '0', '0', '3'): [0.00],
+ ('single.ds', '0', '0', '0', '0'): [0.00],
+ ('single.ds', '0', '0', '0', '1'): [100.00],
+ }
+ ).rename_axis(columns=['cmp', 'loc', 'dir', 'uid', 'ds']),
+ )
+
+
+def test__is_for_ds_model() -> None:
+ data_with_ls1 = pd.DataFrame(
+ {
+ ('LS1', 'Theta_0'): [0.5],
+ ('LS2', 'Theta_0'): [0.6],
+ },
+ index=pd.Index(['cmp.1'], name='ID'),
+ )
+
+ data_without_ls1 = pd.DataFrame(
+ {
+ ('Demand', 'Type'): ['Type1'],
+ ('LS2', 'Theta_0'): [0.6],
+ },
+ index=pd.Index(['cmp.1'], name='ID'),
+ )
+
+ result_with_ls1 = _is_for_ds_model(data_with_ls1)
+ assert result_with_ls1 is True
+
+ result_without_ls1 = _is_for_ds_model(data_without_ls1)
+ assert result_without_ls1 is False
diff --git a/pelicun/tests/basic/test_demand_model.py b/pelicun/tests/basic/test_demand_model.py
new file mode 100644
index 000000000..020084313
--- /dev/null
+++ b/pelicun/tests/basic/test_demand_model.py
@@ -0,0 +1,637 @@
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# John Vouvakis Manousakis
+
+"""These are unit and integration tests on the demand model of pelicun."""
+
+from __future__ import annotations
+
+import tempfile
+import warnings
+from collections import defaultdict
+from copy import deepcopy
+from pathlib import Path
+from typing import TYPE_CHECKING
+
+import numpy as np
+import pandas as pd
+import pytest
+
+from pelicun.base import ensure_value
+from pelicun.model.demand_model import (
+ DemandModel,
+ _assemble_required_demand_data,
+ _get_required_demand_type,
+)
+from pelicun.tests.basic.test_model import TestModelModule
+
+if TYPE_CHECKING:
+ from pelicun.assessment import Assessment
+
+
+class TestDemandModel(TestModelModule): # noqa: PLR0904
+ @pytest.fixture
+ def demand_model(self, assessment_instance: Assessment) -> DemandModel:
+ return deepcopy(assessment_instance.demand)
+
+ @pytest.fixture
+ def demand_model_with_sample(
+ self, assessment_instance: Assessment
+ ) -> DemandModel:
+ mdl = assessment_instance.demand
+ mdl.load_sample(
+ 'pelicun/tests/basic/data/model/'
+ 'test_DemandModel/load_sample/demand_sample_A.csv'
+ )
+ model_copy = deepcopy(mdl)
+ assert isinstance(model_copy, DemandModel)
+ return model_copy
+
+ @pytest.fixture
+ def calibrated_demand_model(
+ self, demand_model_with_sample: DemandModel
+ ) -> DemandModel:
+ config = {
+ 'ALL': {
+ 'DistributionFamily': 'normal',
+ 'AddUncertainty': 0.00,
+ },
+ 'PID': {
+ 'DistributionFamily': 'lognormal',
+ 'TruncateUpper': '0.06',
+ },
+ 'SA': {
+ 'DistributionFamily': 'empirical',
+ },
+ }
+ demand_model_with_sample.calibrate_model(config)
+ model_copy = deepcopy(demand_model_with_sample)
+ assert isinstance(model_copy, DemandModel)
+ return model_copy
+
+ @pytest.fixture
+ def demand_model_with_sample_b(
+ self, assessment_instance: Assessment
+ ) -> DemandModel:
+ mdl = assessment_instance.demand
+ mdl.load_sample(
+ 'pelicun/tests/basic/data/model/'
+ 'test_DemandModel/load_sample/demand_sample_B.csv'
+ )
+ model_copy = deepcopy(mdl)
+ assert isinstance(model_copy, DemandModel)
+ return model_copy
+
+ @pytest.fixture
+ def demand_model_with_sample_c(
+ self, assessment_instance: Assessment
+ ) -> DemandModel:
+ mdl = assessment_instance.demand
+ mdl.load_sample(
+ 'pelicun/tests/basic/data/model/'
+ 'test_DemandModel/load_sample/demand_sample_C.csv'
+ )
+ model_copy = deepcopy(mdl)
+ assert isinstance(model_copy, DemandModel)
+ return model_copy
+
+ @pytest.fixture
+ def demand_model_with_sample_d(
+ self, assessment_instance: Assessment
+ ) -> DemandModel:
+ mdl = assessment_instance.demand
+ mdl.load_sample(
+ 'pelicun/tests/basic/data/model/'
+ 'test_DemandModel/load_sample/demand_sample_D.csv'
+ )
+ model_copy = deepcopy(mdl)
+ assert isinstance(model_copy, DemandModel)
+ return model_copy
+
+ def test_init(self, demand_model: DemandModel) -> None:
+ assert demand_model.log
+
+ assert demand_model.marginal_params is None
+ assert demand_model.correlation is None
+ assert demand_model.empirical_data is None
+ assert demand_model.user_units is None
+ assert demand_model._RVs is None
+ assert demand_model.sample is None
+
+ def test_save_sample(self, demand_model_with_sample: DemandModel) -> None:
+ # instantiate a temporary directory in memory
+ temp_dir = tempfile.mkdtemp()
+ # save the sample there
+ demand_model_with_sample.save_sample(f'{temp_dir}/temp.csv')
+ with Path(f'{temp_dir}/temp.csv').open(encoding='utf-8') as f:
+ contents = f.read()
+ assert contents == (
+ ',PFA-0-1,PFA-1-1,PID-1-1,SA_0.23-0-1\n'
+ 'Units,inps2,inps2,rad,inps2\n'
+ '0,158.62478,397.04389,0.02672,342.149\n'
+ )
+ res = demand_model_with_sample.save_sample()
+ assert isinstance(res, pd.DataFrame)
+ assert res.to_dict() == {
+ ('PFA', '0', '1'): {0: 158.62478},
+ ('PFA', '1', '1'): {0: 397.04389},
+ ('PID', '1', '1'): {0: 0.02672},
+ ('SA_0.23', '0', '1'): {0: 342.149},
+ }
+
+ def test_load_sample(
+ self,
+ demand_model_with_sample: DemandModel,
+ demand_model_with_sample_b: DemandModel,
+ ) -> None:
+ # retrieve the loaded sample and units
+ obtained_sample = ensure_value(demand_model_with_sample.sample)
+ obtained_units = ensure_value(demand_model_with_sample.user_units)
+
+ obtained_sample_2 = ensure_value(demand_model_with_sample_b.sample)
+ obtained_units_2 = ensure_value(demand_model_with_sample_b.user_units)
+
+ # demand_sample_A.csv and demand_sample_b.csv only differ in the
+ # headers, where the first includes a tag for the hazard
+ # level. Therefore, the two files are expected to result to the
+ # same `obtained_sample`
+
+ pd.testing.assert_frame_equal(
+ obtained_sample,
+ obtained_sample_2,
+ check_index_type=False,
+ check_column_type=False,
+ )
+ pd.testing.assert_series_equal(
+ obtained_units,
+ obtained_units_2,
+ check_index_type=False,
+ )
+
+ # compare against the expected values for the sample
+ expected_sample = pd.DataFrame(
+ {
+ ('PFA', '0', '1'): [4.029069],
+ ('PFA', '1', '1'): [10.084915],
+ ('PID', '1', '1'): [0.02672],
+ ('SA_0.23', '0', '1'): [8.690585],
+ },
+ index=[0],
+ ).rename_axis(columns=['type', 'loc', 'dir'])
+ pd.testing.assert_frame_equal(
+ expected_sample,
+ obtained_sample,
+ check_index_type=False,
+ check_column_type=False,
+ )
+
+ # compare against the expected values for the units
+ expected_units = pd.Series(
+ ('inps2', 'inps2', 'rad', 'inps2'),
+ index=pd.MultiIndex.from_tuples(
+ (
+ ('PFA', '0', '1'),
+ ('PFA', '1', '1'),
+ ('PID', '1', '1'),
+ ('SA_0.23', '0', '1'),
+ ),
+ names=['type', 'loc', 'dir'],
+ ),
+ name='Units',
+ )
+ pd.testing.assert_series_equal(
+ expected_units,
+ obtained_units,
+ check_index_type=False,
+ )
+
+ def test_estimate_RID(self, demand_model_with_sample: DemandModel) -> None:
+ demands = ensure_value(demand_model_with_sample.sample)['PID']
+ params = {'yield_drift': 0.01}
+ res = demand_model_with_sample.estimate_RID(demands, params)
+ assert list(res.columns) == [('RID', '1', '1')]
+ with pytest.raises(ValueError, match='Invalid method: `xyz`'):
+ demand_model_with_sample.estimate_RID(demands, params, method='xyz')
+
+ def test_expand_sample_float(
+ self, demand_model_with_sample: DemandModel
+ ) -> None:
+ sample_before = ensure_value(demand_model_with_sample.sample).copy()
+ demand_model_with_sample.expand_sample('test_lab', 1.00, 'unitless')
+ sample_after = ensure_value(demand_model_with_sample.sample).copy()
+ pd.testing.assert_frame_equal(
+ sample_before, sample_after.drop('test_lab', axis=1)
+ )
+ assert sample_after.loc[0, ('test_lab', '0', '1')] == 1.0
+
+ def test_expand_sample_numpy(
+ self, demand_model_with_sample: DemandModel
+ ) -> None:
+ sample_before = ensure_value(demand_model_with_sample.sample).copy()
+ demand_model_with_sample.expand_sample('test_lab', 1.00, 'unitless')
+ sample_after = ensure_value(demand_model_with_sample.sample).copy()
+ pd.testing.assert_frame_equal(
+ sample_before, sample_after.drop('test_lab', axis=1)
+ )
+ assert sample_after.loc[0, ('test_lab', '0', '1')] == 1.0
+
+ def test_expand_sample_error_no_sample(self, demand_model: DemandModel) -> None:
+ with pytest.raises(
+ ValueError, match='Demand model does not have a sample yet.'
+ ):
+ demand_model.expand_sample('test_lab', np.array((1.00,)), 'unitless')
+
+ def test_expand_sample_error_wrong_shape(
+ self, demand_model_with_sample: DemandModel
+ ) -> None:
+ with pytest.raises(ValueError, match='Incompatible array length.'):
+ demand_model_with_sample.expand_sample(
+ 'test_lab', np.array((1.00, 1.00)), 'unitless'
+ )
+
+ def test_calibrate_model(
+ self,
+ calibrated_demand_model: DemandModel,
+ ) -> None:
+ assert ensure_value(calibrated_demand_model.marginal_params)[
+ 'Family'
+ ].to_list() == [
+ 'normal',
+ 'normal',
+ 'lognormal',
+ 'empirical',
+ ]
+ assert (
+ ensure_value(calibrated_demand_model.marginal_params).loc[
+ ('PID', '1', '1'), 'TruncateUpper'
+ ]
+ == 0.06
+ )
+
+ def test_calibrate_model_censoring(
+ self,
+ demand_model_with_sample_c: DemandModel,
+ ) -> None:
+ # with a config featuring censoring the RIDs
+ config = {
+ 'ALL': {
+ 'DistributionFamily': 'normal',
+ 'AddUncertainty': 0.00,
+ },
+ 'PID': {
+ 'DistributionFamily': 'lognormal',
+ 'CensorUpper': '0.05',
+ },
+ }
+ demand_model_with_sample_c.calibrate_model(config)
+
+ def test_calibrate_model_truncation(
+ self,
+ demand_model_with_sample_c: DemandModel,
+ ) -> None:
+ # with a config that specifies a truncation limit smaller than
+ # the samples
+ config = {
+ 'ALL': {
+ 'DistributionFamily': 'normal',
+ 'AddUncertainty': 0.00,
+ },
+ 'PID': {
+ 'DistributionFamily': 'lognormal',
+ 'TruncateUpper': '0.04',
+ },
+ }
+ demand_model_with_sample_c.calibrate_model(config)
+
+ def test_save_load_model_with_empirical(
+ self, calibrated_demand_model: DemandModel, assessment_instance: Assessment
+ ) -> None:
+ # a model that has empirical marginal parameters
+ temp_dir = tempfile.mkdtemp()
+ calibrated_demand_model.save_model(f'{temp_dir}/temp')
+ assert Path(f'{temp_dir}/temp_marginals.csv').exists()
+ assert Path(f'{temp_dir}/temp_empirical.csv').exists()
+ assert Path(f'{temp_dir}/temp_correlation.csv').exists()
+
+ # Load model to a different DemandModel instance to verify
+ new_demand_model = assessment_instance.demand
+ new_demand_model.load_model(f'{temp_dir}/temp')
+ pd.testing.assert_frame_equal(
+ ensure_value(calibrated_demand_model.marginal_params),
+ ensure_value(new_demand_model.marginal_params),
+ atol=1e-4,
+ check_index_type=False,
+ check_column_type=False,
+ )
+ pd.testing.assert_frame_equal(
+ ensure_value(calibrated_demand_model.correlation),
+ ensure_value(new_demand_model.correlation),
+ atol=1e-4,
+ check_index_type=False,
+ check_column_type=False,
+ )
+ pd.testing.assert_frame_equal(
+ ensure_value(calibrated_demand_model.empirical_data),
+ ensure_value(new_demand_model.empirical_data),
+ atol=1e-4,
+ check_index_type=False,
+ check_column_type=False,
+ )
+
+ def test_save_load_model_without_empirical(
+ self,
+ demand_model_with_sample_c: DemandModel,
+ assessment_instance: Assessment,
+ ) -> None:
+ # a model that does not have empirical marginal parameters
+ temp_dir = tempfile.mkdtemp()
+ config = {
+ 'ALL': {
+ 'DistributionFamily': 'normal',
+ 'AddUncertainty': 0.00,
+ },
+ 'PID': {
+ 'DistributionFamily': 'lognormal',
+ 'TruncateUpper': '0.04',
+ },
+ }
+ demand_model_with_sample_c.calibrate_model(config)
+ demand_model_with_sample_c.save_model(f'{temp_dir}/temp')
+ assert Path(f'{temp_dir}/temp_marginals.csv').exists()
+ assert Path(f'{temp_dir}/temp_correlation.csv').exists()
+
+ # Load model to a different DemandModel instance to verify
+ new_demand_model = assessment_instance.demand
+ new_demand_model.load_model(f'{temp_dir}/temp')
+ pd.testing.assert_frame_equal(
+ ensure_value(demand_model_with_sample_c.marginal_params),
+ ensure_value(new_demand_model.marginal_params),
+ )
+ pd.testing.assert_frame_equal(
+ ensure_value(demand_model_with_sample_c.correlation),
+ ensure_value(new_demand_model.correlation),
+ )
+ assert demand_model_with_sample_c.empirical_data is None
+ assert new_demand_model.empirical_data is None
+
+ def test_generate_sample_exceptions(self, demand_model: DemandModel) -> None:
+ # generating a sample from a non calibrated model should fail
+ with pytest.raises(
+ ValueError, match='Model parameters have not been specified'
+ ):
+ demand_model.generate_sample(
+ {'SampleSize': 3, 'PreserveRawOrder': False}
+ )
+
+ def test_generate_sample(self, calibrated_demand_model: DemandModel) -> None:
+ calibrated_demand_model.generate_sample(
+ {'SampleSize': 3, 'PreserveRawOrder': False}
+ )
+
+ # get the generated demand sample
+ res = calibrated_demand_model.save_sample(save_units=True)
+ assert isinstance(res, tuple)
+
+ obtained_sample, obtained_units = res
+
+ # compare against the expected values for the sample
+ expected_sample = pd.DataFrame(
+ {
+ ('PFA', '0', '1'): [158.624160, 158.624160, 158.624160],
+ ('PFA', '1', '1'): [397.042985, 397.042985, 397.042985],
+ ('PID', '1', '1'): [0.02672, 0.02672, 0.02672],
+ ('SA_0.23', '0', '1'): [342.148783, 342.148783, 342.148783],
+ },
+ index=pd.Index([0, 1, 2], dtype='object'),
+ ).rename_axis(columns=['type', 'loc', 'dir'])
+ pd.testing.assert_frame_equal(
+ expected_sample,
+ obtained_sample,
+ check_exact=False,
+ atol=1e-4,
+ check_index_type=False,
+ check_column_type=False,
+ )
+
+ # compare against the expected values for the units
+ expected_units = pd.Series(
+ ('inps2', 'inps2', 'rad', 'inps2'),
+ index=pd.MultiIndex.from_tuples(
+ (
+ ('PFA', '0', '1'),
+ ('PFA', '1', '1'),
+ ('PID', '1', '1'),
+ ('SA_0.23', '0', '1'),
+ ),
+ names=('type', 'loc', 'dir'),
+ ),
+ name='Units',
+ )
+ pd.testing.assert_series_equal(
+ expected_units,
+ obtained_units,
+ check_index_type=False,
+ )
+
+ def test_generate_sample_with_demand_cloning(
+ self, assessment_instance: Assessment
+ ) -> None:
+ # # used for debugging
+ # assessment_instance = assessment.Assessment()
+
+ demand_model = assessment_instance.demand
+
+ mdl = assessment_instance.demand
+ # contains PGV-0-1, PGV-1-1, PGV-2-1, and PGA-0-1
+ # PGA-0-1 is not cloned.
+ mdl.load_sample(
+ 'pelicun/tests/basic/data/model/'
+ 'test_DemandModel/generate_sample_with_demand_cloning/sample.csv'
+ )
+ demand_model.calibrate_model(
+ {
+ 'ALL': {
+ 'DistributionFamily': 'lognormal',
+ },
+ }
+ )
+ with warnings.catch_warnings(record=True) as w:
+ demand_model.generate_sample(
+ {
+ 'SampleSize': 1000,
+ 'DemandCloning': {
+ 'PGV-0-1': ['PGV-0-1', 'PGV-0-2', 'PGV-0-3'],
+ 'PGV-1-1': ['PGV-1-1', 'PGV-1-2', 'PGV-1-3'],
+ 'PGV-2-1': ['PGV-2-1', 'PGV-2-2', 'PGV-2-3'],
+ 'not_present': ['X-0-0', 'Y-0-0', 'Z-0-0'],
+ },
+ }
+ )
+ assert len(w) == 1
+ assert (
+ 'The demand cloning configuration lists columns '
+ "that are not present in the original demand sample's "
+ "columns: ['not_present']."
+ ) in str(w[0].message)
+ # we'll just get a warning for the `not_present` entry
+ assert ensure_value(demand_model.sample).columns.to_list() == [
+ ('PGA', '0', '1'),
+ ('PGV', '0', '1'),
+ ('PGV', '0', '2'),
+ ('PGV', '0', '3'),
+ ('PGV', '1', '1'),
+ ('PGV', '1', '2'),
+ ('PGV', '1', '3'),
+ ('PGV', '2', '1'),
+ ('PGV', '2', '2'),
+ ('PGV', '2', '3'),
+ ]
+ assert np.array_equal(
+ demand_model.sample['PGV', '0', '1'].values, # type: ignore
+ demand_model.sample['PGV', '0', '3'].values, # type: ignore
+ )
+ # exceptions
+ # Duplicate entries in demand cloning configuration
+ with pytest.raises(
+ ValueError, match='Duplicate entries in demand cloning configuration.'
+ ):
+ demand_model.generate_sample(
+ {
+ 'SampleSize': 1000,
+ 'DemandCloning': {
+ 'PGV-0-1': ['PGV-0-1', 'PGV-0-2', 'PGV-0-3'],
+ 'PGV-1-1': ['PGV-0-1', 'PGV-1-2', 'PGV-1-3'],
+ 'PGV-2-1': ['PGV-0-1', 'PGV-2-2', 'PGV-2-3'],
+ },
+ }
+ )
+
+ def test__get_required_demand_type(
+ self, assessment_instance: Assessment
+ ) -> None:
+ # Simple case: single demand
+ damage_model = assessment_instance.damage
+ cmp_set = {'testing.component'}
+ damage_model.load_model_parameters(
+ [
+ 'pelicun/tests/basic/data/model/test_DemandModel/'
+ '_get_required_demand_type/damage_db_testing_single.csv'
+ ],
+ cmp_set,
+ )
+ pgb = pd.DataFrame(
+ {('testing.component', '1', '1', '1'): [1]}, index=['Blocks']
+ ).T.rename_axis(index=['cmp', 'loc', 'dir', 'uid'])
+ demand_offset = {'PFA': 0}
+ required = _get_required_demand_type(
+ ensure_value(damage_model.ds_model.damage_params), pgb, demand_offset
+ )
+ expected = defaultdict(
+ list,
+ {(('PID-1-1',), None): [('testing.component', '1', '1', '1')]},
+ )
+ assert required == expected
+
+ # Utility demand case: two demands are required
+ damage_model = assessment_instance.damage
+ cmp_set = {'testing.component'}
+ damage_model.load_model_parameters(
+ [
+ 'pelicun/tests/basic/data/model/test_DemandModel/'
+ '_get_required_demand_type/damage_db_testing_utility.csv'
+ ],
+ cmp_set,
+ )
+ pgb = pd.DataFrame(
+ {('testing.component', '1', '1', '1'): [1]}, index=['Blocks']
+ ).T.rename_axis(index=['cmp', 'loc', 'dir', 'uid'])
+ demand_offset = {'PFA': 0}
+ required = _get_required_demand_type(
+ ensure_value(damage_model.ds_model.damage_params), pgb, demand_offset
+ )
+ expected = defaultdict(
+ list,
+ {
+ (('PID-1-1', 'PFA-1-1'), 'sqrt(X1^2+X2^2)'): [ # type: ignore
+ ('testing.component', '1', '1', '1')
+ ]
+ },
+ )
+ assert required == expected
+
+ def test__assemble_required_demand_data(
+ self, assessment_instance: Assessment
+ ) -> None:
+ # Utility demand case: two demands are required
+ damage_model = assessment_instance.damage
+ cmp_set = {'testing.component'}
+ damage_model.load_model_parameters(
+ [
+ 'pelicun/tests/basic/data/model/test_DemandModel/'
+ '_get_required_demand_type/damage_db_testing_single.csv'
+ ],
+ cmp_set,
+ )
+ required_edps = defaultdict(
+ list,
+ {
+ (('PID-1-1', 'PFA-1-1'), 'sqrt(X1^2+X2^2)'): [
+ ('testing.component', '1', '1', '1')
+ ]
+ },
+ )
+ nondirectional_multipliers = {'ALL': 1.00}
+ demand_sample = pd.DataFrame(
+ {
+ ('PID', '1', '1'): np.full(5, 3.00),
+ ('PFA', '1', '1'): np.full(5, 4.00),
+ }
+ )
+ demand_data = _assemble_required_demand_data(
+ required_edps, # type: ignore
+ nondirectional_multipliers,
+ demand_sample,
+ )
+ expected = {
+ (('PID-1-1', 'PFA-1-1'), 'sqrt(X1^2+X2^2)'): np.array(
+ [5.0, 5.0, 5.0, 5.0, 5.0]
+ )
+ }
+ assert demand_data.keys() == expected.keys()
+ for key in demand_data:
+ assert np.all(demand_data[key] == expected[key])
diff --git a/pelicun/tests/test_file_io.py b/pelicun/tests/basic/test_file_io.py
similarity index 60%
rename from pelicun/tests/test_file_io.py
rename to pelicun/tests/basic/test_file_io.py
index d2241d306..06444a707 100644
--- a/pelicun/tests/test_file_io.py
+++ b/pelicun/tests/basic/test_file_io.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
@@ -38,57 +37,57 @@
# Adam Zsarnóczay
# John Vouvakis Manousakis
-"""
-These are unit and integration tests on the file_io module of pelicun.
-"""
+"""These are unit and integration tests on the file_io module of pelicun."""
+
+from __future__ import annotations
import tempfile
-import os
-import pytest
+from pathlib import Path
+
import numpy as np
import pandas as pd
-from pelicun import file_io
+import pytest
-# pylint: disable=missing-function-docstring
-# pylint: disable=missing-class-docstring
+from pelicun import base, file_io
+from pelicun.pelicun_warnings import PelicunWarning
# The tests maintain the order of definitions of the `file_io.py` file.
-def test_save_to_csv():
+def test_save_to_csv() -> None:
# Test saving with orientation 0
- data = pd.DataFrame({"A": [1e-3, 2e-3, 3e-3], "B": [4e-3, 5e-3, 6e-3]})
- units = pd.Series(["meters", "meters"], index=["A", "B"])
- unit_conversion_factors = {"meters": 0.001}
+ data = pd.DataFrame({'A': [1e-3, 2e-3, 3e-3], 'B': [4e-3, 5e-3, 6e-3]})
+ units = pd.Series(['meters', 'meters'], index=['A', 'B'])
+ unit_conversion_factors = {'meters': 0.001}
# Save to a temporary file
with tempfile.TemporaryDirectory() as tmpdir:
- filepath = os.path.join(tmpdir, 'foo.csv')
+ filepath = Path(tmpdir) / 'foo.csv'
file_io.save_to_csv(
data, filepath, units, unit_conversion_factors, orientation=0
)
- assert os.path.isfile(filepath)
+ assert Path(filepath).is_file()
# Check that the file contains the expected data
- with open(filepath, 'r', encoding='utf-8') as f:
+ with Path(filepath).open(encoding='utf-8') as f:
contents = f.read()
assert contents == (
',A,B\n0,meters,meters\n0,1.0,4.0' '\n1,2.0,5.0\n2,3.0,6.0\n'
)
# Test saving with orientation 1
- data = pd.DataFrame({"A": [1e-3, 2e-3, 3e-3], "B": [4e-3, 5e-3, 6e-3]})
- units = pd.Series(["meters", "meters"], index=["A", "B"])
- unit_conversion_factors = {"meters": 0.001}
+ data = pd.DataFrame({'A': [1e-3, 2e-3, 3e-3], 'B': [4e-3, 5e-3, 6e-3]})
+ units = pd.Series(['meters', 'meters'], index=['A', 'B'])
+ unit_conversion_factors = {'meters': 0.001}
# Save to a temporary file
with tempfile.TemporaryDirectory() as tmpdir:
- filepath = os.path.join(tmpdir, 'bar.csv')
+ filepath = Path(tmpdir) / 'bar.csv'
file_io.save_to_csv(
data, filepath, units, unit_conversion_factors, orientation=1
)
- assert os.path.isfile(filepath)
+ assert Path(filepath).is_file()
# Check that the file contains the expected data
- with open(filepath, 'r', encoding='utf-8') as f:
+ with Path(filepath).open(encoding='utf-8') as f:
contents = f.read()
assert contents == (
',0,A,B\n0,,0.001,0.004\n1,,0.002,' '0.005\n2,,0.003,0.006\n'
@@ -98,57 +97,56 @@ def test_save_to_csv():
# edge cases
#
- data = pd.DataFrame({"A": [1e-3, 2e-3, 3e-3], "B": [4e-3, 5e-3, 6e-3]})
- units = pd.Series(["meters", "meters"], index=["A", "B"])
+ data = pd.DataFrame({'A': [1e-3, 2e-3, 3e-3], 'B': [4e-3, 5e-3, 6e-3]})
+ units = pd.Series(['meters', 'meters'], index=['A', 'B'])
# units given, without unit conversion factors
- unit_conversion_factors = None
- with pytest.raises(ValueError):
- with tempfile.TemporaryDirectory() as tmpdir:
- filepath = os.path.join(tmpdir, 'foo.csv')
- file_io.save_to_csv(
- data, filepath, units, unit_conversion_factors, orientation=0
- )
+ filepath = Path(tmpdir) / 'foo.csv'
+ with pytest.raises(
+ ValueError,
+ match='When `units` is not None, `unit_conversion_factors` must be provided.',
+ ), tempfile.TemporaryDirectory() as tmpdir:
+ file_io.save_to_csv(
+ data, filepath, units, unit_conversion_factors=None, orientation=0
+ )
- unit_conversion_factors = {"meters": 0.001}
+ unit_conversion_factors = {'meters': 0.001}
# not csv extension
- with pytest.raises(ValueError):
- with tempfile.TemporaryDirectory() as tmpdir:
- filepath = os.path.join(tmpdir, 'foo.xyz')
- file_io.save_to_csv(
- data, filepath, units, unit_conversion_factors, orientation=0
- )
+ filepath = Path(tmpdir) / 'foo.xyz'
+ with pytest.raises(
+ ValueError,
+ match=('Please use the `.csv` file extension. Received file name is '),
+ ), tempfile.TemporaryDirectory() as tmpdir:
+ file_io.save_to_csv(
+ data, filepath, units, unit_conversion_factors, orientation=0
+ )
# no data, log a complaint
- # Logger object used for a single test
- class Logger:
- def __init__(self):
- self.logs = []
-
- def msg(self, text, **kwargs):
- # Keep track of the contents of the logging calls
- self.logs.append((text, kwargs))
-
- mylogger = Logger()
- data = None
+ mylogger = base.Logger(
+ log_file=None, verbose=True, log_show_ms=False, print_log=True
+ )
with tempfile.TemporaryDirectory() as tmpdir:
- filepath = os.path.join(tmpdir, 'foo.csv')
- file_io.save_to_csv(
- data,
- filepath,
- units,
- unit_conversion_factors,
- orientation=0,
- log=mylogger,
- )
- assert mylogger.logs[-1][0] == 'WARNING: Data was empty, no file saved.'
+ filepath = Path(tmpdir) / 'foo.csv'
+ with pytest.warns(PelicunWarning) as record:
+ file_io.save_to_csv(
+ None,
+ filepath,
+ units,
+ unit_conversion_factors,
+ orientation=0,
+ log=mylogger,
+ )
+ assert 'Data was empty, no file saved.' in str(record.list[0].message)
-def test_substitute_default_path():
+def test_substitute_default_path() -> None:
prior_path = file_io.base.pelicun_path
- file_io.base.pelicun_path = 'some_path'
- input_paths = ['PelicunDefault/data/file1.txt', '/data/file2.txt']
+ file_io.base.pelicun_path = Path('some_path')
+ input_paths: list[str | pd.DataFrame] = [
+ 'PelicunDefault/data/file1.txt',
+ '/data/file2.txt',
+ ]
expected_paths = [
'some_path/resources/SimCenterDBDL/data/file1.txt',
'/data/file2.txt',
@@ -158,29 +156,31 @@ def test_substitute_default_path():
file_io.base.pelicun_path = prior_path
-def test_load_data():
+def test_load_data() -> None:
# test loading data with orientation 0
- filepath = 'pelicun/tests/data/file_io/test_load_data/units.csv'
- unit_conversion_factors = {"inps2": 0.0254, "rad": 1.00}
+ filepath = 'pelicun/tests/basic/data/file_io/test_load_data/units.csv'
+ unit_conversion_factors = {'inps2': 0.0254, 'rad': 1.00}
data = file_io.load_data(filepath, unit_conversion_factors)
- assert np.array_equal(data.index.values, np.array(range(6)))
- assert data.shape == (6, 19)
- assert isinstance(data.columns, pd.core.indexes.multi.MultiIndex)
- assert data.columns.nlevels == 4
+ assert np.array_equal(data.index.values, np.array(range(6))) # type: ignore
+ assert data.shape == (6, 19) # type: ignore
+ assert isinstance(data.columns, pd.core.indexes.multi.MultiIndex) # type: ignore
+ assert data.columns.nlevels == 4 # type: ignore
- _, units = file_io.load_data(filepath, unit_conversion_factors, return_units=True)
+ _, units = file_io.load_data(
+ filepath, unit_conversion_factors, return_units=True
+ )
for item in unit_conversion_factors:
- assert item in units.unique()
+ assert item in units.unique() # type: ignore
- filepath = 'pelicun/tests/data/file_io/test_load_data/no_units.csv'
+ filepath = 'pelicun/tests/basic/data/file_io/test_load_data/no_units.csv'
data_nounits = file_io.load_data(filepath, {})
assert isinstance(data_nounits, pd.DataFrame)
# test loading data with orientation 1
- filepath = 'pelicun/tests/data/file_io/test_load_data/orient_1.csv'
+ filepath = 'pelicun/tests/basic/data/file_io/test_load_data/orient_1.csv'
data = file_io.load_data(
filepath, unit_conversion_factors, orientation=1, reindex=False
)
@@ -189,8 +189,8 @@ def test_load_data():
assert data.index.nlevels == 4
# with convert=None
- filepath = 'pelicun/tests/data/file_io/test_load_data/orient_1_units.csv'
- unit_conversion_factors = {"g": 1.00, "rad": 1.00}
+ filepath = 'pelicun/tests/basic/data/file_io/test_load_data/orient_1_units.csv'
+ unit_conversion_factors = {'g': 1.00, 'rad': 1.00}
data = file_io.load_data(
filepath, unit_conversion_factors, orientation=1, reindex=False
)
@@ -202,7 +202,7 @@ def test_load_data():
data = file_io.load_data(
filepath, unit_conversion_factors, orientation=1, reindex=True
)
- assert np.array_equal(data.index.values, np.array(range(10)))
+ assert np.array_equal(data.index.values, np.array(range(10))) # type: ignore
#
# edge cases
@@ -212,7 +212,10 @@ def test_load_data():
with pytest.raises(FileNotFoundError):
file_io.load_from_file('/')
# exception: not a .csv file
- with pytest.raises(ValueError):
+ with pytest.raises(
+ ValueError,
+ match='Unexpected file type received when trying to load from csv',
+ ):
file_io.load_from_file('pelicun/base.py')
diff --git a/pelicun/tests/basic/test_loss_model.py b/pelicun/tests/basic/test_loss_model.py
new file mode 100644
index 000000000..d4aab645f
--- /dev/null
+++ b/pelicun/tests/basic/test_loss_model.py
@@ -0,0 +1,1214 @@
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# John Vouvakis Manousakis
+
+"""These are unit and integration tests on the loss model of pelicun."""
+
+from __future__ import annotations
+
+import re
+from copy import deepcopy
+from itertools import product
+from typing import TYPE_CHECKING
+
+import numpy as np
+import pandas as pd
+import pytest
+
+from pelicun import model, uq
+from pelicun.base import ensure_value
+from pelicun.model.loss_model import (
+ LossModel,
+ RepairModel_DS,
+ RepairModel_LF,
+ _is_for_ds_model,
+ _is_for_lf_model,
+)
+from pelicun.pelicun_warnings import PelicunWarning
+from pelicun.tests.basic.test_pelicun_model import TestPelicunModel
+
+if TYPE_CHECKING:
+ from pelicun.assessment import Assessment
+ from pelicun.model.asset_model import AssetModel
+
+
+class TestLossModel(TestPelicunModel):
+ @pytest.fixture
+ def loss_model(self, assessment_instance: Assessment) -> LossModel:
+ return deepcopy(assessment_instance.loss)
+
+ @pytest.fixture
+ def asset_model_empty(self, assessment_instance: Assessment) -> AssetModel:
+ return deepcopy(assessment_instance.asset)
+
+ @pytest.fixture
+ def asset_model_a(self, asset_model_empty: AssetModel) -> AssetModel:
+ asset = deepcopy(asset_model_empty)
+ asset.cmp_marginal_params = pd.DataFrame(
+ {
+ ('Theta_0'): [1.0, 1.0, 1.0],
+ ('Blocks'): [1, 1, 1],
+ },
+ index=pd.MultiIndex.from_tuples(
+ [
+ ('cmp.A', '1', '1', '0'),
+ ('cmp.B', '1', '1', '0'),
+ ('cmp.C', '1', '1', '0'),
+ ]
+ ),
+ ).rename_axis(index=['cmp', 'loc', 'dir', 'uid'])
+ asset.generate_cmp_sample(sample_size=10)
+ return asset
+
+ @pytest.fixture
+ def loss_model_with_ones(self, assessment_instance: Assessment) -> LossModel:
+ loss_model = assessment_instance.loss
+
+ # add artificial values to the samples
+ data_ds = {}
+ for (
+ decision_variable,
+ consequence,
+ component,
+ damage_state,
+ location,
+ direction,
+ uid,
+ ) in product(
+ ('Cost', 'Carbon'),
+ ('cmp.A.consequence', 'cmp.B.consequence'),
+ ('cmp.A', 'cmp.B'),
+ ('DS1', 'DS2'),
+ ('1', '2'), # loc
+ ('1', '2'), # dir
+ ('uid1', 'uid2'),
+ ):
+ data_ds[
+ decision_variable,
+ consequence,
+ component,
+ damage_state,
+ location,
+ direction,
+ uid,
+ ] = [1.00, 1.00, 1.00]
+ loss_model.ds_model.sample = pd.DataFrame(data_ds).rename_axis(
+ columns=['dv', 'loss', 'dmg', 'ds', 'loc', 'dir', 'uid']
+ )
+ data_lf = {}
+ for (
+ decision_variable,
+ consequence,
+ component,
+ location,
+ direction,
+ uid,
+ ) in product(
+ ('Cost', 'Carbon'),
+ ('cmp.A.consequence', 'cmp.B.consequence'),
+ ('cmp.A', 'cmp.B'),
+ ('1', '2'), # loc
+ ('1', '2'), # dir
+ ('uid1', 'uid2'),
+ ):
+ data_lf[
+ decision_variable,
+ consequence,
+ component,
+ location,
+ direction,
+ uid,
+ ] = [1.00, 1.00, 1.00]
+ loss_model.lf_model.sample = pd.DataFrame(data_lf).rename_axis(
+ columns=['dv', 'loss', 'dmg', 'loc', 'dir', 'uid']
+ )
+
+ return loss_model
+
+ def test___init__(self, loss_model: LossModel) -> None:
+ assert loss_model.log
+ assert loss_model.ds_model
+ with pytest.raises(AttributeError):
+ loss_model.xyz = 123 # type: ignore
+
+ assert loss_model.ds_model.loss_params is None
+ assert loss_model.ds_model.sample is None
+
+ assert len(loss_model._loss_models) == 2
+
+ def test_decision_variables(self, loss_model: LossModel) -> None:
+ dvs = ('Cost', 'Time')
+ assert loss_model.decision_variables == dvs
+ assert loss_model.ds_model.decision_variables == dvs
+ assert loss_model.lf_model.decision_variables == dvs
+
+ def test_add_loss_map(
+ self, loss_model: LossModel, asset_model_a: AssetModel
+ ) -> None:
+ loss_model._asmnt.asset = asset_model_a
+
+ loss_map = pd.DataFrame(
+ {
+ 'Repair': ['consequence.A', 'consequence.B'],
+ },
+ index=['cmp.A', 'cmp.B'],
+ )
+ loss_model.add_loss_map(loss_map)
+ pd.testing.assert_frame_equal(ensure_value(loss_model._loss_map), loss_map)
+ for contained_model in loss_model._loss_models:
+ pd.testing.assert_frame_equal(
+ ensure_value(contained_model.loss_map), loss_map
+ )
+
+ def test_load_model_parameters(
+ self, loss_model: LossModel, asset_model_a: AssetModel
+ ) -> None:
+ loss_model._asmnt.asset = asset_model_a
+ loss_model.decision_variables = ('my_RV',)
+ loss_map = pd.DataFrame(
+ {
+ 'Repair': ['consequence.A', 'consequence.B', 'consequence.F'],
+ },
+ index=['cmp.A', 'cmp.B', 'cmp.F'],
+ )
+ loss_model.add_loss_map(loss_map)
+ # consequence.A will be for the DS model
+ # consequence.B will be for the LF model
+ # consequence.C will have no loss parameters defined for it
+ # consequence.D should be removed from the DS parameters
+ # consequence.E should be removed from the LF parameters
+ # consequence.F should be missing
+ ds_loss_parameters = pd.DataFrame(
+ {
+ ('Quantity', 'Unit'): ['1 EA'] * 2,
+ ('DV', 'Unit'): ['1 EA'] * 2,
+ ('DS1', 'Theta_0'): ['0.00,1.00|0.00,1.00'] * 2,
+ },
+ index=pd.MultiIndex.from_tuples(
+ [('consequence.A', 'my_RV'), ('consequence.D', 'my_RV')]
+ ),
+ )
+ lf_loss_parameters = pd.DataFrame(
+ {
+ ('Quantity', 'Unit'): ['1 EA'] * 2,
+ ('DV', 'Unit'): ['1 EA'] * 2,
+ ('Demand', 'Unit'): ['1 EA'] * 2,
+ ('LossFunction', 'Theta_0'): ['0.00,1.00|0.00,1.00'] * 2,
+ },
+ index=pd.MultiIndex.from_tuples(
+ [('consequence.B', 'my_RV'), ('consequence.E', 'my_RV')]
+ ),
+ )
+ with pytest.warns(PelicunWarning) as record:
+ loss_model.load_model_parameters(
+ [ds_loss_parameters, lf_loss_parameters]
+ )
+
+ # assert len(record) == 1
+ # TODO(JVM): re-enable the line above once we address other
+ # warnings, and change indexing to [0] below.
+
+ assert (
+ 'The loss model does not provide loss information '
+ 'for the following component(s) in the asset '
+ "model: [('consequence.F', 'my_RV')]."
+ ) in str(record[-1].message)
+
+ def test__loss_models(self, loss_model: LossModel) -> None:
+ models = loss_model._loss_models
+ assert len(models) == 2
+ assert isinstance(models[0], RepairModel_DS)
+ assert isinstance(models[1], RepairModel_LF)
+
+ def test__loss_map(self, loss_model: LossModel) -> None:
+ loss_map = pd.DataFrame(
+ {
+ 'Repair': ['consequence_A', 'consequence_B'],
+ },
+ index=['cmp_A', 'cmp_B'],
+ )
+ # test setter
+ loss_model._loss_map = loss_map
+ # test getter
+ pd.testing.assert_frame_equal(ensure_value(loss_model._loss_map), loss_map)
+ for contained_model in loss_model._loss_models:
+ pd.testing.assert_frame_equal(
+ ensure_value(contained_model.loss_map), loss_map
+ )
+
+ def test__missing(self, loss_model: LossModel) -> None:
+ missing = {
+ ('missing.component', 'Time'),
+ ('missing.component', 'Energy'),
+ }
+ # test setter
+ loss_model._missing = missing
+ # test getter
+ assert loss_model._missing == missing
+ for contained_model in loss_model._loss_models:
+ assert contained_model.missing == missing
+
+ def test__ensure_loss_parameter_availability(
+ self, assessment_instance: Assessment
+ ) -> None:
+ loss_model = LossModel(assessment_instance)
+
+ # Only consider `DecisionVariableXYZ`
+ loss_model.decision_variables = ('DecisionVariableXYZ',)
+
+ # A, B should be in the ds model
+ # C, D should be in the lf model
+ # E should be missing
+
+ loss_map = pd.DataFrame(
+ {
+ 'Repair': [f'consequence_{x}' for x in ('A', 'B', 'C', 'D', 'E')],
+ },
+ index=[f'cmp_{x}' for x in ('A', 'B', 'C', 'D', 'E')],
+ )
+
+ loss_model._loss_map = loss_map
+
+ loss_model.ds_model.loss_params = pd.DataFrame(
+ index=pd.MultiIndex.from_tuples(
+ [
+ ('consequence_A', 'DecisionVariableXYZ'),
+ ('consequence_B', 'DecisionVariableXYZ'),
+ ]
+ )
+ )
+ loss_model.lf_model.loss_params = pd.DataFrame(
+ index=pd.MultiIndex.from_tuples(
+ [
+ ('consequence_C', 'DecisionVariableXYZ'),
+ ('consequence_D', 'DecisionVariableXYZ'),
+ ]
+ )
+ )
+
+ with pytest.warns(PelicunWarning) as record:
+ loss_model._ensure_loss_parameter_availability()
+ missing = loss_model._missing
+ assert missing == {('consequence_E', 'DecisionVariableXYZ')}
+ assert len(record) == 1
+ assert (
+ 'The loss model does not provide loss information '
+ 'for the following component(s) in the asset model: '
+ "[('consequence_E', 'DecisionVariableXYZ')]"
+ ) in str(record[0].message)
+
+ def test_aggregate_losses_when_no_loss(
+ self, assessment_instance: Assessment
+ ) -> None:
+ # tests that aggregate losses works when there is no loss.
+ loss_model = LossModel(assessment_instance)
+ loss_model.decision_variables = ('Cost', 'Time', 'Carbon', 'Energy')
+ df_agg = loss_model.aggregate_losses()
+ assert isinstance(df_agg, pd.DataFrame)
+ pd.testing.assert_frame_equal(
+ df_agg,
+ pd.DataFrame(
+ {
+ 'repair_cost': 0.00,
+ 'repair_carbon': 0.0,
+ 'repair_energy': 0.00,
+ 'repair_time-sequential': 0.00,
+ 'repair_time-parallel': 0.00,
+ },
+ index=[0],
+ ),
+ )
+
+ def test__apply_consequence_scaling(
+ self, loss_model_with_ones: LossModel
+ ) -> None:
+ # When only `dv` is provided
+ scaling_conditions = {'dv': 'Cost'}
+ scaling_factor = 2.00
+
+ loss_model_with_ones._apply_consequence_scaling(
+ scaling_conditions, scaling_factor
+ )
+
+ for loss_model in loss_model_with_ones._loss_models:
+ assert loss_model.sample is not None
+ mask = loss_model.sample.columns.get_level_values('dv') == 'Cost'
+ assert np.all(loss_model.sample.iloc[:, mask] == 2.00)
+ assert np.all(loss_model.sample.iloc[:, ~mask] == 1.00)
+ loss_model.sample.iloc[:, :] = 1.00
+
+ scaling_conditions = {'dv': 'Carbon', 'loc': '1', 'uid': 'uid2'}
+ scaling_factor = 2.00
+ loss_model_with_ones._apply_consequence_scaling(
+ scaling_conditions, scaling_factor
+ )
+
+ for loss_model in loss_model_with_ones._loss_models:
+ assert loss_model.sample is not None
+ mask = np.full(len(loss_model.sample.columns), fill_value=True)
+ mask &= loss_model.sample.columns.get_level_values('dv') == 'Carbon'
+ mask &= loss_model.sample.columns.get_level_values('loc') == '1'
+ mask &= loss_model.sample.columns.get_level_values('uid') == 'uid2'
+ assert np.all(loss_model.sample.iloc[:, mask] == 2.00)
+ assert np.all(loss_model.sample.iloc[:, ~mask] == 1.00)
+
+ def test_aggregate_losses_combination(
+ self, assessment_instance: Assessment
+ ) -> None:
+ # The test sets up a very simple loss calculation from
+ # scratch, only defining essential parameters.
+
+ # demand
+ sample_size = 5
+ demand_marginal_parameters = pd.DataFrame(
+ {
+ ('PIH', '0', '1'): ['in', 7.00],
+ ('PWS', '0', '1'): ['mph', 50.0],
+ },
+ index=['Units', 'Theta_0'],
+ ).T
+ perfect_corr = pd.DataFrame(
+ np.ones((2, 2)),
+ columns=demand_marginal_parameters.index,
+ index=demand_marginal_parameters.index,
+ )
+ assessment_instance.demand.load_model(
+ {'marginals': demand_marginal_parameters, 'correlation': perfect_corr}
+ )
+ assessment_instance.demand.generate_sample({'SampleSize': sample_size})
+
+ # asset
+ assessment_instance.asset.cmp_marginal_params = pd.DataFrame(
+ {
+ 'Theta_0': (1.0, 1.0),
+ },
+ index=pd.MultiIndex.from_tuples(
+ (('wind.comp', '0', '1', '0'), ('flood.comp', '0', '1', '0')),
+ names=('cmp', 'loc', 'dir', 'uid'),
+ ),
+ )
+ assessment_instance.asset.generate_cmp_sample()
+
+ # no damage estimation needed since we only use loss functions
+
+ # loss
+
+ assessment_instance.loss.decision_variables = ('Cost',)
+ assessment_instance.loss.add_loss_map(loss_map_policy='fill')
+ assessment_instance.loss.load_model_parameters(
+ [
+ (
+ 'pelicun/tests/basic/data/model/'
+ 'test_LossModel/loss_function_wind.csv'
+ ),
+ (
+ 'pelicun/tests/basic/data/model/'
+ 'test_LossModel/loss_function_flood.csv'
+ ),
+ ]
+ )
+
+ assessment_instance.loss.calculate()
+
+ # individual losses
+ l1, l2 = ensure_value(assessment_instance.loss.lf_model.sample).iloc[0, :]
+ # combined loss, result of interpolation
+ l_comb = 0.904
+
+ combination_array = pd.read_csv(
+ (
+ 'pelicun/resources/SimCenterDBDL/combined_loss_matrices/'
+ 'Wind_Flood_Hazus_HU_bldg.csv'
+ ),
+ index_col=None,
+ header=None,
+ ).to_numpy()
+ loss_combination = {
+ 'Cost': {
+ ('wind.comp', 'flood.comp'): combination_array,
+ },
+ }
+
+ agg_df, _ = assessment_instance.loss.aggregate_losses(
+ loss_combination=loss_combination, future=True
+ )
+ assert isinstance(agg_df, pd.DataFrame)
+ pd.testing.assert_frame_equal(
+ agg_df, pd.DataFrame([l_comb] * 5, columns=['repair_cost'])
+ )
+
+ # verify interpolation with some manual checks
+ lower, higher = combination_array[8:10, 4]
+ assert lower <= l_comb <= higher
+ assert l2 == combination_array[0, 4]
+ assert combination_array[8, 0] <= l1 <= combination_array[9, 0]
+
+ def test_aggregate_losses_thresholds(
+ self, loss_model_with_ones: LossModel
+ ) -> None:
+ # Row 0 has the value of 1.0 in all columns.
+ # Adjust rows 1 and 2 to have the values 2.0 and 3.0, for
+ # testing.
+ assert loss_model_with_ones.ds_model.sample is not None
+ assert loss_model_with_ones.lf_model.sample is not None
+ loss_model_with_ones.decision_variables = ('Cost', 'Carbon')
+ loss_model_with_ones.dv_units = {'Cost': 'USD_2011', 'Carbon': 'kg'}
+ loss_model_with_ones.ds_model.sample.iloc[1, :] = 2.00
+ loss_model_with_ones.ds_model.sample.iloc[2, :] = 3.00
+ loss_model_with_ones.lf_model.sample.iloc[1, :] = 2.00
+ loss_model_with_ones.lf_model.sample.iloc[2, :] = 3.00
+ # Instantiate a RandomVariableRegistry to pass as an argument
+ # to the method.
+ rv_reg = uq.RandomVariableRegistry(loss_model_with_ones._asmnt.options.rng)
+ # Add a threshold for `Cost`
+ rv_reg.add_RV(
+ uq.rv_class_map('deterministic')(name='Cost', theta=np.array((400.00,))) # type: ignore
+ )
+ # Add a threshold for `Carbon`
+ rv_reg.add_RV(
+ uq.rv_class_map('deterministic')(
+ name='Carbon',
+ theta=np.array((100.00,)), # type: ignore
+ )
+ )
+ df_agg, exceedance_bool_df = loss_model_with_ones.aggregate_losses(
+ replacement_configuration=(rv_reg, {'Cost': 0.50, 'Carbon': 1.00}),
+ future=True,
+ )
+ assert isinstance(df_agg, pd.DataFrame)
+ assert isinstance(exceedance_bool_df, pd.DataFrame)
+ df_agg_expected = pd.DataFrame(
+ {
+ 'repair_carbon': [96.00, 100.00, 100.00],
+ 'repair_cost': [96.00, 400.00, 400.00],
+ }
+ )
+ exceedance_bool_df_expected = pd.DataFrame(
+ {'Cost': [False, False, True], 'Carbon': [False, True, True]}
+ )
+ pd.testing.assert_frame_equal(df_agg, df_agg_expected)
+ pd.testing.assert_frame_equal(
+ exceedance_bool_df, exceedance_bool_df_expected
+ )
+
+ def test_consequence_scaling(self, loss_model_with_ones: LossModel) -> None:
+ loss_model_with_ones.consequence_scaling(
+ 'pelicun/tests/basic/data/model/test_LossModel/scaling_specification.csv'
+ )
+
+ expected_ds = (
+ pd.read_csv(
+ 'pelicun/tests/basic/data/model/test_LossModel/scaled_losses_ds.csv',
+ dtype={
+ 'dv': str,
+ 'loss': str,
+ 'dmg': str,
+ 'ds': str,
+ 'loc': str,
+ 'dir': str,
+ 'uid': str,
+ },
+ )
+ .set_index(['dv', 'loss', 'dmg', 'ds', 'loc', 'dir', 'uid'])
+ .T.astype(float)
+ )
+ expected_ds.index = pd.RangeIndex(range(len(expected_ds))) # type: ignore
+ pd.testing.assert_frame_equal(
+ loss_model_with_ones.ds_model.sample, # type: ignore
+ expected_ds,
+ )
+
+ expected_lf = (
+ pd.read_csv(
+ 'pelicun/tests/basic/data/model/test_LossModel/scaled_losses_lf.csv',
+ dtype={
+ 'dv': str,
+ 'loss': str,
+ 'dmg': str,
+ 'loc': str,
+ 'dir': str,
+ 'uid': str,
+ },
+ )
+ .set_index(['dv', 'loss', 'dmg', 'loc', 'dir', 'uid'])
+ .T.astype(float)
+ )
+ expected_lf.index = pd.RangeIndex(range(len(expected_lf))) # type: ignore
+ pd.testing.assert_frame_equal(
+ loss_model_with_ones.lf_model.sample, # type: ignore
+ expected_lf,
+ )
+
+
+class TestRepairModel_Base(TestPelicunModel):
+ def test___init__(self, assessment_instance: Assessment) -> None:
+ repair_model = RepairModel_DS(assessment_instance)
+ with pytest.raises(AttributeError):
+ repair_model.xyz = 123 # type: ignore
+
+ def test_drop_unused_loss_parameters(
+ self, assessment_instance: Assessment
+ ) -> None:
+ base_model = RepairModel_DS(assessment_instance)
+ loss_map = pd.DataFrame(
+ {
+ 'Repair': ['consequence_A', 'consequence_B'],
+ },
+ index=['cmp_A', 'cmp_B'],
+ )
+ # without loss_params, it should do nothing
+ base_model.drop_unused_loss_parameters(loss_map)
+ base_model.loss_params = pd.DataFrame(
+ index=[f'consequence_{x}' for x in ('A', 'B', 'C', 'D')]
+ )
+ base_model.drop_unused_loss_parameters(loss_map)
+ pd.testing.assert_frame_equal(
+ base_model.loss_params,
+ pd.DataFrame(index=[f'consequence_{x}' for x in ('A', 'B')]),
+ )
+
+ def test__remove_incomplete_components(
+ self, assessment_instance: Assessment
+ ) -> None:
+ base_model = RepairModel_DS(assessment_instance)
+ # without loss_params, it should do nothing
+ base_model.remove_incomplete_components()
+ # without incomplete, it should do nothing
+ loss_params = pd.DataFrame(
+ index=[f'consequence_{x}' for x in ('A', 'B', 'C', 'D')]
+ )
+ base_model.loss_params = loss_params
+ base_model.remove_incomplete_components()
+ pd.testing.assert_frame_equal(
+ base_model.loss_params,
+ loss_params,
+ )
+ base_model.loss_params = pd.DataFrame(
+ {('Incomplete', ''): [0, 0, 0, 1]},
+ index=[f'consequence_{x}' for x in ('A', 'B', 'C', 'D')],
+ )
+ # Now entry D should be gone
+ base_model.remove_incomplete_components()
+ pd.testing.assert_frame_equal(
+ base_model.loss_params,
+ pd.DataFrame(
+ {('Incomplete', ''): [0, 0, 0]},
+ index=[f'consequence_{x}' for x in ('A', 'B', 'C')],
+ ),
+ )
+
+ def test__get_available(self, assessment_instance: Assessment) -> None:
+ base_model = RepairModel_DS(assessment_instance)
+ base_model.loss_params = pd.DataFrame(index=['cmp.A', 'cmp.B', 'cmp.C'])
+ assert base_model.get_available() == {'cmp.A', 'cmp.B', 'cmp.C'}
+
+
+class TestRepairModel_DS(TestRepairModel_Base):
+ def test_convert_loss_parameter_units(
+ self, assessment_instance: Assessment
+ ) -> None:
+ ds_model = RepairModel_DS(assessment_instance)
+ ds_model.loss_params = pd.DataFrame(
+ {
+ ('Quantity', 'Unit'): ['1 test_two', '1 EA'],
+ ('DV', 'Unit'): ['test_three', 'test_three'],
+ ('DS1', 'Theta_0'): ['200.00,100.00|10.00,20.00', '100.00'],
+ ('DS1', 'Theta_1'): [0.20, None],
+ ('DS1', 'Family'): ['lognormal', None],
+ },
+ index=pd.MultiIndex.from_tuples([('cmpA', 'Cost'), ('cmpB', 'Cost')]),
+ )
+
+ ds_model.convert_loss_parameter_units()
+
+ # DVs are scaled by 3/2, quantities by 2
+ pd.testing.assert_frame_equal(
+ ds_model.loss_params,
+ pd.DataFrame(
+ {
+ ('Quantity', 'Unit'): ['1 test_two', '1 EA'],
+ ('DV', 'Unit'): ['test_three', 'test_three'],
+ ('DS1', 'Theta_0'): ['300,150|20,40', 300.0],
+ ('DS1', 'Theta_1'): [0.20, None],
+ ('DS1', 'Family'): ['lognormal', None],
+ },
+ index=pd.MultiIndex.from_tuples(
+ [('cmpA', 'Cost'), ('cmpB', 'Cost')]
+ ),
+ ),
+ )
+
+ def test__drop_unused_damage_states(
+ self, assessment_instance: Assessment
+ ) -> None:
+ ds_model = RepairModel_DS(assessment_instance)
+ loss_params = pd.DataFrame(
+ {
+ ('DS1', 'Theta_0'): [1.0, 1.0, 1.0, 1.0],
+ ('DS2', 'Theta_0'): [1.0, 1.0, 1.0, None],
+ ('DS3', 'Theta_0'): [1.0, 1.0, None, None],
+ ('DS4', 'Theta_0'): [1.0, None, None, None],
+ ('DS5', 'Theta_0'): [None, None, None, None],
+ ('DS6', 'Theta_0'): [None, None, None, None],
+ ('DS7', 'Theta_0'): [None, None, None, None],
+ }
+ )
+ ds_model.loss_params = loss_params
+ ds_model.drop_unused_damage_states()
+ pd.testing.assert_frame_equal(ds_model.loss_params, loss_params.iloc[:, 0:4])
+
+ def test__create_DV_RVs(self, assessment_instance: Assessment) -> None:
+ assessment_instance.options.rho_cost_time = 0.30
+ ds_model = RepairModel_DS(assessment_instance)
+ ds_model.decision_variables = ('Cost', 'Time')
+ ds_model.missing = {('cmp.B', 'Cost'), ('cmp.B', 'Time')}
+ ds_model.loss_map = pd.DataFrame(
+ {
+ 'Repair': ['cmp.A', 'cmp.B', 'cmp.C', 'cmp.D', 'cmp.E'],
+ },
+ index=['cmp.A', 'cmp.B', 'cmp.C', 'cmp.D', 'cmp.E'],
+ )
+ # cmp.B is marked as missing, cmp.C is intended for the LF
+ # model.
+ # cmp.D has `|` in Theta_0 which should be treated as 1.00
+ # cmp.E has deterministic loss.
+ ds_model.loss_params = pd.DataFrame(
+ {
+ ('DV', 'Unit'): ['1 EA', '1 EA', '1 EA', '1 EA'],
+ ('Quantity', 'Unit'): ['1 EA', '1 EA', '1 EA', '1 EA'],
+ ('DS1', 'Family'): ['normal', 'normal', 'normal', None],
+ ('DS1', 'Theta_0'): [1.00, 1.00, '4.0,2.0|5.0,1.0', 1.00],
+ ('DS1', 'Theta_1'): [1.00, 1.00, 1.00, None],
+ },
+ index=pd.MultiIndex.from_tuples(
+ [
+ ('cmp.A', 'Cost'),
+ ('cmp.A', 'Time'),
+ ('cmp.D', 'Cost'),
+ ('cmp.E', 'Cost'),
+ ]
+ ),
+ ).rename_axis(index=['Loss Driver', 'Decision Variable'])
+
+ cases = pd.MultiIndex.from_tuples(
+ [
+ ('cmp.A', '0', '1', '0', '1'),
+ ('cmp.B', '0', '1', '0', '1'), # marked as missing
+ ('cmp.C', '0', '1', '0', '1'), # no loss parameters
+ ('cmp.D', '0', '1', '0', '1'), # `|` in Theta_0
+ ('cmp.E', '0', '1', '0', '1'), # Deterministic loss
+ ],
+ names=['cmp', 'loc', 'dir', 'uid', 'ds'],
+ )
+ rv_reg = ds_model._create_DV_RVs(cases)
+ assert rv_reg is not None
+ for key in (
+ 'Cost-cmp.A-1-0-1-0',
+ 'Time-cmp.A-1-0-1-0',
+ 'Cost-cmp.D-1-0-1-0',
+ ):
+ assert key in rv_reg.RV
+ assert len(rv_reg.RV) == 3
+ assert isinstance(rv_reg.RV['Cost-cmp.A-1-0-1-0'], uq.NormalRandomVariable)
+ assert isinstance(rv_reg.RV['Time-cmp.A-1-0-1-0'], uq.NormalRandomVariable)
+ assert isinstance(rv_reg.RV['Cost-cmp.D-1-0-1-0'], uq.NormalRandomVariable)
+ assert np.all(
+ rv_reg.RV['Cost-cmp.A-1-0-1-0'].theta[0:2] == np.array((1.0, 1.0))
+ )
+ assert np.all(
+ rv_reg.RV['Time-cmp.A-1-0-1-0'].theta[0:2] == np.array((1.0, 1.0))
+ )
+ assert np.all(
+ rv_reg.RV['Cost-cmp.D-1-0-1-0'].theta[0:2] == np.array([1.0, 1.0])
+ )
+ assert 'DV-cmp.A-1-0-1-0_set' in rv_reg.RV_set
+ np.all(
+ rv_reg.RV_set['DV-cmp.A-1-0-1-0_set'].Rho()
+ == np.array(((1.0, 0.3), (0.3, 1.0)))
+ )
+ assert len(rv_reg.RV_set) == 1
+
+ def test__create_DV_RVs_all_deterministic(
+ self, assessment_instance: Assessment
+ ) -> None:
+ ds_model = RepairModel_DS(assessment_instance)
+ ds_model.decision_variables = ('myRV',)
+ ds_model.missing = set()
+ ds_model.loss_map = pd.DataFrame(
+ {'Repair': ['cmp.A']},
+ index=['cmp.A'],
+ )
+ ds_model.loss_params = pd.DataFrame(
+ {
+ ('DV', 'Unit'): ['1 EA'],
+ ('Quantity', 'Unit'): ['1 EA'],
+ ('DS1', 'Family'): [None],
+ ('DS1', 'Theta_0'): [1.00],
+ },
+ index=pd.MultiIndex.from_tuples([('cmp.A', 'myRV')]),
+ ).rename_axis(index=['Loss Driver', 'Decision Variable'])
+
+ cases = pd.MultiIndex.from_tuples(
+ [('cmp.A', '0', '1', '0', '1')],
+ names=['cmp', 'loc', 'dir', 'uid', 'ds'],
+ )
+ rv_reg = ds_model._create_DV_RVs(cases)
+
+ assert rv_reg is None
+
+ def test__calc_median_consequence_no_locs(
+ self, assessment_instance: Assessment
+ ) -> None:
+ # Test the method when the eco_qnt dataframe's columns do not
+ # contain `loc` information.
+
+ ds_model = RepairModel_DS(assessment_instance)
+ eco_qnt = pd.DataFrame(
+ {
+ ('cmp.A', '0'): [0.00, 0.00, 1.00],
+ ('cmp.A', '1'): [1.00, 0.00, 0.00],
+ ('cmp.A', '2'): [0.00, 1.00, 0.00],
+ ('cmp.B', '1'): [0.00, 1.00, 0.00],
+ ('cmp.B', '2'): [1.00, 0.00, 0.00],
+ }
+ ).rename_axis(columns=['cmp', 'ds'])
+ ds_model.decision_variables = ('my_DV',)
+ # cmp.A should be available and we should get medians.
+ # missing_cmp will be marked as missing
+ # is_for_LF_model represents a component->consequence pair
+ # that is intended for processing by the loss function model
+ # and should be ignored by the damage state model.
+ ds_model.loss_map = pd.DataFrame(
+ {
+ 'Repair': ['cmp.A', 'cmp.B', 'missing_cmp', 'is_for_LF_model'],
+ },
+ index=['cmp.A', 'cmp.B', 'missing_consequence', 'LF_consequence'],
+ )
+
+ # DS3 is in the loss parameters but has not been triggered.
+ ds_model.loss_params = pd.DataFrame(
+ {
+ ('DV', 'Unit'): ['1 EA', '1 EA'],
+ ('Quantity', 'Unit'): ['1 EA', '1 EA'],
+ ('DS1', 'Family'): [None, 'normal'],
+ ('DS1', 'Theta_0'): [100.00, 12345.00],
+ ('DS1', 'Theta_1'): [None, 0.30],
+ ('DS2', 'Family'): [None, 'normal'],
+ ('DS2', 'Theta_0'): [200.00, '2.00,1.00|5.00,10.00'],
+ ('DS2', 'Theta_1'): [None, 0.30],
+ ('DS3', 'Family'): [None, 'normal'],
+ ('DS3', 'Theta_0'): [200.00, '2.00,1.00|5.00,10.00'],
+ ('DS3', 'Theta_1'): [None, 0.30],
+ },
+ index=pd.MultiIndex.from_tuples(
+ [('cmp.A', 'my_DV'), ('cmp.B', 'my_DV')]
+ ),
+ ).rename_axis(index=['Loss Driver', 'Decision Variable'])
+ ds_model.missing = {('missing_cmp', 'my_DV')}
+ medians = ds_model._calc_median_consequence(eco_qnt)
+ assert len(medians) == 1
+ assert 'my_DV' in medians
+ pd.testing.assert_frame_equal(
+ medians['my_DV'],
+ pd.DataFrame(
+ {
+ ('cmp.A', '1'): [100.00, 100.00, 100.00],
+ ('cmp.A', '2'): [200.00, 200.00, 200.00],
+ ('cmp.B', '1'): [1.00, 1.00, 1.00],
+ ('cmp.B', '2'): [2.00, 2.00, 2.00],
+ }
+ ).rename_axis(columns=['cmp', 'ds']),
+ )
+
+ #
+ # edge cases
+ #
+
+ # random variable not supported
+ ds_model.loss_params = pd.DataFrame(
+ {
+ ('DV', 'Unit'): ['1 EA'],
+ ('Quantity', 'Unit'): ['1 EA'],
+ ('DS1', 'Family'): ['multilinear_CDF'],
+ ('DS1', 'Theta_0'): ['0.00,1.00|0.00,1.00'],
+ ('DS1', 'Theta_1'): [0.30],
+ },
+ index=pd.MultiIndex.from_tuples([('cmp.A', 'my_DV')]),
+ ).rename_axis(index=['Loss Driver', 'Decision Variable'])
+ with pytest.raises(
+ ValueError,
+ match='Loss Distribution of type multilinear_CDF not supported.',
+ ):
+ ds_model._calc_median_consequence(eco_qnt)
+
+ def test__calc_median_consequence_locs(
+ self, assessment_instance: Assessment
+ ) -> None:
+ # Test the method when the eco_qnt dataframe's columns contain
+ # `loc` information.
+
+ ds_model = RepairModel_DS(assessment_instance)
+ eco_qnt = pd.DataFrame(
+ {
+ ('cmp.A', '0', '1'): [0.00, 0.00, 1.00],
+ ('cmp.A', '1', '1'): [1.00, 0.00, 0.00],
+ }
+ ).rename_axis(columns=['cmp', 'ds', 'loc'])
+ ds_model.decision_variables = ('my_DV',)
+ # cmp.A should be available and we should get medians.
+ # missing_cmp will be marked as missing
+ # is_for_LF_model represents a component->consequence pair
+ # that is intended for processing by the loss function model
+ # and should be ignored by the damage state model.
+ ds_model.loss_map = pd.DataFrame(
+ {
+ 'Repair': ['cmp.A'],
+ },
+ index=['cmp.A'],
+ )
+
+ # DS3 is in the loss parameters but has not been triggered.
+ ds_model.loss_params = pd.DataFrame(
+ {
+ ('DV', 'Unit'): ['1 EA'],
+ ('Quantity', 'Unit'): ['1 EA'],
+ ('DS1', 'Family'): [None],
+ ('DS1', 'Theta_0'): [100.00],
+ ('DS1', 'Theta_1'): [None],
+ ('DS2', 'Family'): [None],
+ ('DS2', 'Theta_0'): [200.00],
+ ('DS2', 'Theta_1'): [None],
+ ('DS3', 'Family'): [None],
+ ('DS3', 'Theta_0'): [200.00],
+ ('DS3', 'Theta_1'): [None],
+ },
+ index=pd.MultiIndex.from_tuples([('cmp.A', 'my_DV')]),
+ ).rename_axis(index=['Loss Driver', 'Decision Variable'])
+ ds_model.missing = set()
+ medians = ds_model._calc_median_consequence(eco_qnt)
+ assert len(medians) == 1
+ assert 'my_DV' in medians
+ pd.testing.assert_frame_equal(
+ medians['my_DV'],
+ pd.DataFrame(
+ {
+ ('cmp.A', '1', '1'): [100.00, 100.00, 100.00],
+ }
+ ).rename_axis(columns=['cmp', 'ds', 'loc']),
+ )
+
+
+class TestRepairModel_LF(TestRepairModel_Base):
+ def test_convert_loss_parameter_units(
+ self, assessment_instance: Assessment
+ ) -> None:
+ lf_model = RepairModel_LF(assessment_instance)
+ lf_model.loss_params = pd.DataFrame(
+ {
+ ('Demand', 'Unit'): ['inps2', 'g'],
+ ('DV', 'Unit'): ['test_three', 'test_three'],
+ ('LossFunction', 'Theta_0'): [
+ '1.00,1.00|1.00,1.00',
+ '1.00,1.00|1.00,1.00',
+ ],
+ },
+ index=pd.MultiIndex.from_tuples([('cmpA', 'Cost'), ('cmpB', 'Cost')]),
+ )
+
+ lf_model.convert_loss_parameter_units()
+
+ pd.testing.assert_frame_equal(
+ lf_model.loss_params,
+ pd.DataFrame(
+ {
+ ('Demand', 'Unit'): ['inps2', 'g'],
+ ('DV', 'Unit'): ['test_three', 'test_three'],
+ ('LossFunction', 'Theta_0'): [
+ '3,3|0.0254,0.0254',
+ '3,3|9.80665,9.80665',
+ ],
+ },
+ index=pd.MultiIndex.from_tuples(
+ [('cmpA', 'Cost'), ('cmpB', 'Cost')]
+ ),
+ ),
+ )
+
+ def test__calc_median_consequence(self, assessment_instance: Assessment) -> None:
+ lf_model = RepairModel_LF(assessment_instance)
+
+ performance_group = pd.DataFrame(
+ {
+ 'Blocks': [1],
+ },
+ index=pd.MultiIndex.from_tuples([(('cmp.A', 'dv.A'), '0', '1', '0')]),
+ )
+ loss_map = {'cmp.A': 'cmp.A'}
+ required_edps = {(('cmp.A', 'dv.A'), '0', '1', '0'): 'PFA-1-1'}
+ demand_dict = {'PFA-1-1': np.array((1.00, 2.00, 3.00))}
+ cmp_sample = {
+ ('cmp.A', '0', '1', '0'): pd.Series(
+ np.array((10.00, 20.00, 30.00)), name=('cmp.A', '0', '1', '0')
+ )
+ }
+ lf_model.loss_params = pd.DataFrame(
+ {
+ ('LossFunction', 'Theta_0'): ['0.00,1.00|0.00,10.00'],
+ },
+ index=pd.MultiIndex.from_tuples(
+ [('cmp.A', 'dv.A')], names=['Loss Driver', 'Decsision Variable']
+ ),
+ )
+ medians = lf_model._calc_median_consequence(
+ performance_group, loss_map, required_edps, demand_dict, cmp_sample
+ )
+ pd.testing.assert_frame_equal(
+ medians,
+ pd.DataFrame(
+ {('dv.A', 'cmp.A', 'cmp.A', '0', '1', '0', '0'): [1.0, 4.0, 9.0]},
+ ).rename_axis(
+ columns=['dv', 'loss', 'dmg', 'loc', 'dir', 'uid', 'block']
+ ),
+ )
+ # test small interpolation domain warning
+ demand_dict = {'PFA-1-1': np.array((1.00, 2.00, 1e3))}
+ with pytest.raises(
+ ValueError,
+ match=re.escape(
+ 'Loss function interpolation for consequence '
+ '`cmp.A-dv.A` has failed. Ensure a sufficient '
+ 'interpolation domain for the X values '
+ '(those after the `|` symbol) and verify '
+ 'the X-value and Y-value lengths match.'
+ ),
+ ):
+ lf_model._calc_median_consequence(
+ performance_group, loss_map, required_edps, demand_dict, cmp_sample
+ )
+
+ def test__create_DV_RVs(self, assessment_instance: Assessment) -> None:
+ assessment_instance.options.rho_cost_time = 0.50
+ lf_model = RepairModel_LF(assessment_instance)
+ lf_model.decision_variables = ('Cost', 'Time')
+ lf_model.missing = set()
+ lf_model.loss_map = pd.DataFrame(
+ {
+ 'Repair': ['cmp.A', 'cmp.B'],
+ },
+ index=['cmp.A', 'cmp.B'],
+ )
+ lf_model.loss_params = pd.DataFrame(
+ {
+ ('DV', 'Unit'): ['1 EA', '1 EA', '1 EA'],
+ ('Quantity', 'Unit'): ['1 EA', '1 EA', '1 EA'],
+ ('LossFunction', 'Family'): ['normal', 'normal', None],
+ ('LossFunction', 'Theta_0'): [
+ '0.0,1.0|0.0,1.0',
+ '0.0,1.0|0.0,1.0',
+ '0.0,1.0|0.0,1.0',
+ ],
+ ('LossFunction', 'Theta_1'): [0.3, 0.3, None],
+ },
+ index=pd.MultiIndex.from_tuples(
+ [
+ ('cmp.A', 'Cost'),
+ ('cmp.A', 'Time'),
+ ('cmp.B', 'Cost'),
+ ]
+ ),
+ ).rename_axis(index=['Loss Driver', 'Decision Variable'])
+
+ cases = pd.MultiIndex.from_tuples(
+ [
+ ('Cost', 'cmp.A', 'cmp.A', '0', '1', '0', '1'),
+ ('Time', 'cmp.A', 'cmp.A', '0', '1', '0', '1'),
+ ('Cost', 'cmp.B', 'cmp.B', '0', '1', '0', '1'),
+ ],
+ names=['dv', 'loss', 'dmg', 'loc', 'dir', 'uid', 'block'],
+ )
+ rv_reg = lf_model._create_DV_RVs(cases)
+ assert rv_reg is not None
+ for key in (
+ 'Cost-cmp.A-cmp.A-0-1-0-1',
+ 'Time-cmp.A-cmp.A-0-1-0-1',
+ ):
+ assert key in rv_reg.RV
+ assert len(rv_reg.RV) == 2
+ assert isinstance(
+ rv_reg.RV['Cost-cmp.A-cmp.A-0-1-0-1'], uq.NormalRandomVariable
+ )
+ assert isinstance(
+ rv_reg.RV['Time-cmp.A-cmp.A-0-1-0-1'], uq.NormalRandomVariable
+ )
+ assert np.all(
+ rv_reg.RV['Cost-cmp.A-cmp.A-0-1-0-1'].theta[0:2] == np.array((1.0, 0.3))
+ )
+ assert np.all(
+ rv_reg.RV['Time-cmp.A-cmp.A-0-1-0-1'].theta[0:2] == np.array((1.0, 0.3))
+ )
+ assert 'DV-cmp.A-cmp.A-0-1-0-1_set' in rv_reg.RV_set
+ np.all(
+ rv_reg.RV_set['DV-cmp.A-cmp.A-0-1-0-1_set'].Rho()
+ == np.array(((1.0, 0.5), (0.5, 1.0)))
+ )
+ assert len(rv_reg.RV_set) == 1
+
+ def test__create_DV_RVs_no_rv_case(
+ self, assessment_instance: Assessment
+ ) -> None:
+ # Special case where there is no need for RVs
+
+ lf_model = RepairModel_LF(assessment_instance)
+ lf_model.decision_variables = ('Cost', 'Time')
+ lf_model.missing = set()
+ lf_model.loss_map = pd.DataFrame(
+ {
+ 'Repair': ['cmp.B'],
+ },
+ index=['cmp.B'],
+ )
+ lf_model.loss_params = pd.DataFrame(
+ {
+ ('DV', 'Unit'): ['1 EA'],
+ ('Quantity', 'Unit'): ['1 EA'],
+ ('LossFunction', 'Family'): [None],
+ ('LossFunction', 'Theta_0'): ['0.0,1.0|0.0,1.0'],
+ ('LossFunction', 'Theta_1'): [None],
+ },
+ index=pd.MultiIndex.from_tuples(
+ [
+ ('cmp.B', 'Cost'),
+ ]
+ ),
+ ).rename_axis(index=['Loss Driver', 'Decision Variable'])
+
+ cases = pd.MultiIndex.from_tuples(
+ [
+ ('Cost', 'cmp.B', 'cmp.B', '0', '1', '0', '1'),
+ ],
+ names=['dv', 'loss', 'dmg', 'loc', 'dir', 'uid', 'block'],
+ )
+ rv_reg = lf_model._create_DV_RVs(cases)
+ assert rv_reg is None
+
+
+def test__prep_constant_median_DV() -> None:
+ median = 10.00
+ constant_median_dv = model.loss_model._prep_constant_median_DV(median)
+ assert constant_median_dv() == median
+ values = (1.0, 2.0, 3.0, 4.0, 5.0)
+ for value in values:
+ assert constant_median_dv(value) == 10.00
+
+
+def test__prep_bounded_multilinear_median_DV() -> None:
+ medians = np.array((1.00, 2.00, 3.00, 4.00, 5.00))
+ quantities = np.array((0.00, 1.00, 2.00, 3.00, 4.00))
+ f = model.loss_model._prep_bounded_multilinear_median_DV(medians, quantities)
+
+ result = f(2.5)
+ expected = 3.5
+ assert result == expected
+
+ result = f(0.00)
+ expected = 1.00
+ assert result == expected
+
+ result = f(4.00)
+ expected = 5.0
+ assert result == expected
+
+ result = f(-1.00)
+ expected = 1.00
+ assert result == expected
+
+ result = f(5.00)
+ expected = 5.00
+ assert result == expected
+
+ result_list = f([2.5, 3.5])
+ expected_list = [3.5, 4.5]
+ assert np.allclose(result_list, expected_list)
+
+ with pytest.raises(
+ ValueError,
+ match=(
+ 'A bounded linear median Decision Variable function '
+ 'called without specifying the quantity '
+ 'of damaged components'
+ ),
+ ):
+ f(None)
+
+
+def test__is_for_lf_model() -> None:
+ positive_case = pd.DataFrame(
+ {
+ ('LossFunction', 'Theta_0'): [0.5],
+ },
+ index=pd.Index(['cmp.1'], name='ID'),
+ )
+
+ negative_case = pd.DataFrame(
+ {
+ ('DS1', 'Theta_0'): [0.50],
+ },
+ index=pd.Index(['cmp.1'], name='ID'),
+ )
+
+ assert _is_for_lf_model(positive_case) is True
+ assert _is_for_lf_model(negative_case) is False
+
+
+def test__is_for_ds_model() -> None:
+ positive_case = pd.DataFrame(
+ {
+ ('DS1', 'Theta_0'): [0.50],
+ },
+ index=pd.Index(['cmp.1'], name='ID'),
+ )
+
+ negative_case = pd.DataFrame(
+ {
+ ('LossFunction', 'Theta_0'): [0.5],
+ },
+ index=pd.Index(['cmp.1'], name='ID'),
+ )
+
+ assert _is_for_ds_model(positive_case) is True
+ assert _is_for_ds_model(negative_case) is False
diff --git a/pelicun/tests/basic/test_model.py b/pelicun/tests/basic/test_model.py
new file mode 100644
index 000000000..af878f089
--- /dev/null
+++ b/pelicun/tests/basic/test_model.py
@@ -0,0 +1,64 @@
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# John Vouvakis Manousakis
+
+"""This file defines a class used by the model unit tests."""
+
+from __future__ import annotations
+
+from copy import deepcopy
+from typing import Callable
+
+import pytest
+
+from pelicun import assessment
+
+
+class TestModelModule:
+ @pytest.fixture
+ def assessment_factory(self) -> Callable:
+ def create_instance(*, verbose: bool) -> assessment.Assessment:
+ x = assessment.Assessment()
+ x.log.verbose = verbose
+ return x
+
+ return create_instance
+
+ @pytest.fixture(params=[True, False])
+ def assessment_instance(self, request, assessment_factory) -> None: # noqa: ANN001
+ return deepcopy(assessment_factory(verbose=request.param))
diff --git a/pelicun/tests/basic/test_pelicun_model.py b/pelicun/tests/basic/test_pelicun_model.py
new file mode 100644
index 000000000..6b37fd77d
--- /dev/null
+++ b/pelicun/tests/basic/test_pelicun_model.py
@@ -0,0 +1,217 @@
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# John Vouvakis Manousakis
+
+"""These are unit and integration tests on the PelicunModel class."""
+
+from __future__ import annotations
+
+from copy import deepcopy
+from typing import TYPE_CHECKING
+
+import numpy as np
+import pandas as pd
+import pytest
+
+from pelicun import model
+from pelicun.tests.basic.test_model import TestModelModule
+
+if TYPE_CHECKING:
+ from pelicun.assessment import Assessment
+ from pelicun.model.pelicun_model import PelicunModel
+
+
+class TestPelicunModel(TestModelModule):
+ @pytest.fixture
+ def pelicun_model(self, assessment_instance: Assessment) -> PelicunModel:
+ return deepcopy(model.PelicunModel(assessment_instance))
+
+ def test_init(self, pelicun_model: PelicunModel) -> None:
+ assert pelicun_model.log
+
+ def test__convert_marginal_params(self, pelicun_model: PelicunModel) -> None:
+ # one row, only Theta_0, no conversion
+ marginal_params = pd.DataFrame(
+ [['1.0']],
+ columns=['Theta_0'],
+ index=pd.MultiIndex.from_tuples(
+ (('A', '0', '1'),), names=('cmp', 'loc', 'dir')
+ ),
+ )
+ units = pd.Series(['ea'], index=marginal_params.index)
+ arg_units = None
+ res = pelicun_model._convert_marginal_params(
+ marginal_params, units, arg_units
+ )
+
+ # >>> res
+ # Theta_0
+ # cmp loc dir
+ # A 0 1 1.0
+
+ assert 'Theta_0' in res.columns
+ assert res.to_dict() == {'Theta_0': {('A', '0', '1'): 1.0}}
+
+ # many rows, with conversions
+ marginal_params = pd.DataFrame(
+ [
+ [np.nan, 1.0, np.nan, np.nan, np.nan, np.nan],
+ ['normal', np.nan, 1.0, np.nan, -0.50, 0.50],
+ ['lognormal', 1.0, 0.5, np.nan, 0.50, 1.50],
+ ['uniform', 0.0, 10.0, np.nan, np.nan, np.nan],
+ ],
+ columns=[
+ 'Family',
+ 'Theta_0',
+ 'Theta_1',
+ 'Theta_2',
+ 'TruncateLower',
+ 'TruncateUpper',
+ ],
+ index=pd.MultiIndex.from_tuples(
+ (
+ ('A', '0', '1'),
+ ('B', '0', '1'),
+ ('C', '0', '1'),
+ ('D', '0', '1'),
+ ),
+ names=('cmp', 'loc', 'dir'),
+ ),
+ )
+ units = pd.Series(['ea', 'ft', 'in', 'in2'], index=marginal_params.index)
+ arg_units = None
+ res = pelicun_model._convert_marginal_params(
+ marginal_params, units, arg_units
+ )
+
+ expected_df = pd.DataFrame(
+ {
+ 'Family': [np.nan, 'normal', 'lognormal', 'uniform'],
+ 'Theta_0': [1.0000, np.nan, 0.0254, 0.0000],
+ 'Theta_1': [np.nan, 1.000000, 0.500000, 0.0064516],
+ 'Theta_2': [np.nan, np.nan, np.nan, np.nan],
+ 'TruncateLower': [np.nan, -0.1524, 0.0127, np.nan],
+ 'TruncateUpper': [np.nan, 0.1524, 0.0381, np.nan],
+ },
+ index=pd.MultiIndex.from_tuples(
+ (
+ ('A', '0', '1'),
+ ('B', '0', '1'),
+ ('C', '0', '1'),
+ ('D', '0', '1'),
+ ),
+ names=('cmp', 'loc', 'dir'),
+ ),
+ )
+
+ pd.testing.assert_frame_equal(
+ expected_df, res, check_index_type=False, check_column_type=False
+ )
+
+ # a case with arg_units
+ marginal_params = pd.DataFrame(
+ [['500.0,400.00|20,10']],
+ columns=['Theta_0'],
+ index=pd.MultiIndex.from_tuples(
+ (('A', '0', '1'),), names=('cmp', 'loc', 'dir')
+ ),
+ )
+ units = pd.Series(['test_three'], index=marginal_params.index)
+ arg_units = pd.Series(['test_two'], index=marginal_params.index)
+ res = pelicun_model._convert_marginal_params(
+ marginal_params, units, arg_units
+ )
+
+ # >>> res
+ # Theta_0
+ # cmp loc dir
+ # A 0 1 750,600|40,20
+
+ # note: '40,20' = '20,10' * 2.00 (test_two)
+ # note: '750,600' = '500,400' * 3.00 / 2.00 (test_three/test_two)
+
+ expected_df = pd.DataFrame(
+ {
+ 'Theta_0': ['750,600|40,20'],
+ },
+ index=pd.MultiIndex.from_tuples(
+ (('A', '0', '1'),),
+ names=('cmp', 'loc', 'dir'),
+ ),
+ )
+ pd.testing.assert_frame_equal(
+ expected_df, res, check_index_type=False, check_column_type=False
+ )
+
+ # a case with arg_units where we don't divide
+ marginal_params = pd.DataFrame(
+ [['1.00,2.00|1.00,4.00']],
+ columns=['Theta_0'],
+ index=[1],
+ )
+ units = pd.Series(['test_three'], index=marginal_params.index)
+ arg_units = pd.Series(['test_two'], index=marginal_params.index)
+ res = pelicun_model._convert_marginal_params(
+ marginal_params, units, arg_units, divide_units=False
+ )
+
+ # Theta_0
+ # 1 3,6|2,8
+
+ # note: '3,6' = '1,2' * 3.00 (test_three)
+ # note: '2,8' = '1,4' * 2.00 (test_two)
+
+ expected_df = pd.DataFrame(
+ {
+ 'Theta_0': ['3,6|2,8'],
+ },
+ index=[1],
+ )
+ pd.testing.assert_frame_equal(
+ expected_df, res, check_index_type=False, check_column_type=False
+ )
+
+ def test_query_error_setup(self, pelicun_model: PelicunModel) -> None:
+ assert (
+ pelicun_model.query_error_setup(
+ 'Loss/ReplacementThreshold/RaiseOnUnknownKeys'
+ )
+ is True
+ )
+ with pytest.raises(KeyError):
+ pelicun_model.query_error_setup('some/invalid/path')
diff --git a/pelicun/tests/test_uq.py b/pelicun/tests/basic/test_uq.py
similarity index 61%
rename from pelicun/tests/test_uq.py
rename to pelicun/tests/basic/test_uq.py
index 3d4814a93..f75c56969 100644
--- a/pelicun/tests/test_uq.py
+++ b/pelicun/tests/basic/test_uq.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
@@ -46,16 +45,23 @@
reset from the `reset_all_test_data` function in `reset_tests.py`.
"""
+from __future__ import annotations
+
+import math
+import re
import warnings
-import pytest
+
import numpy as np
-from scipy.stats import norm
-from scipy.stats import lognorm
-from pelicun import uq
-from pelicun.tests.util import import_pickle
-from pelicun.tests.util import export_pickle
+import pytest
+from scipy.stats import (
+ lognorm, # type: ignore
+ norm, # type: ignore
+ weibull_min, # type: ignore
+)
-# pylint: disable=missing-function-docstring
+from pelicun import uq
+from pelicun.base import ensure_value
+from pelicun.tests.util import export_pickle, import_pickle
# The tests maintain the order of definitions of the `uq.py` file.
@@ -68,42 +74,65 @@
# The following tests verify the functions of the module.
-def test_scale_distribution():
+def test_scale_distribution() -> None:
# used in all cases
theta = np.array((-1.00, 1.00))
trunc = np.array((-2.00, 2.00))
# case 1:
# normal distribution, factor of two
- res = uq.scale_distribution(2.00, 'normal', theta, trunc)
+ theta_new, truncation_limits = uq.scale_distribution(
+ 2.00, 'normal', theta, trunc
+ )
+ assert truncation_limits is not None
+ assert np.allclose(theta_new, np.array((-2.00, 1.00)))
+ assert np.allclose(truncation_limits, np.array((-4.00, 4.00)))
+
+ # case 2:
+ # normal_std distribution, factor of two
+ res = uq.scale_distribution(2.00, 'normal_std', theta, trunc)
+ assert res[1] is not None
+ assert np.allclose(res[0], np.array((-2.00, 2.00))) # theta_new
+ assert np.allclose(res[1], np.array((-4.00, 4.00))) # truncation_limits
+
+ # case 3:
+ # normal_cov distribution, factor of two
+ res = uq.scale_distribution(2.00, 'normal_cov', theta, trunc)
+ assert res[1] is not None
assert np.allclose(res[0], np.array((-2.00, 1.00))) # theta_new
assert np.allclose(res[1], np.array((-4.00, 4.00))) # truncation_limits
- # case 2:
+ # case 4:
# lognormal distribution, factor of two
res = uq.scale_distribution(2.00, 'lognormal', theta, trunc)
+ assert res[1] is not None
assert np.allclose(res[0], np.array((-2.00, 1.00))) # theta_new
assert np.allclose(res[1], np.array((-4.00, 4.00))) # truncation_limits
- # case 3:
+ # case 5:
# uniform distribution, factor of two
res = uq.scale_distribution(2.00, 'uniform', theta, trunc)
+ assert res[1] is not None
assert np.allclose(res[0], np.array((-2.00, 2.00))) # theta_new
assert np.allclose(res[1], np.array((-4.00, 4.00))) # truncation_limits
- # case 4: unsupported distribution
- with pytest.raises(ValueError):
+ # case 6: unsupported distribution
+ with pytest.raises(
+ ValueError, match='Unsupported distribution: benktander-weibull'
+ ):
uq.scale_distribution(0.50, 'benktander-weibull', np.array((1.00, 10.00)))
-def test_mvn_orthotope_density():
+def test_mvn_orthotope_density() -> None:
# case 1:
# zero-width slice should result in a value of zero.
mu_val = 0.00
cov_val = 1.00
lower_val = -1.00
upper_val = -1.00
- res = uq.mvn_orthotope_density(mu_val, cov_val, lower_val, upper_val)
+ res = uq.mvn_orthotope_density(
+ mu_val, np.atleast_2d([cov_val]), lower_val, upper_val
+ )
assert np.allclose(res, np.array((0.00, 2.00e-16)))
# case 2:
@@ -112,7 +141,9 @@ def test_mvn_orthotope_density():
cov_val = 1.00
lower_val = np.nan
upper_val = 0.00
- res = uq.mvn_orthotope_density(mu_val, cov_val, lower_val, upper_val)
+ res = uq.mvn_orthotope_density(
+ mu_val, np.atleast_2d([cov_val]), lower_val, upper_val
+ )
assert np.allclose(res, np.array((0.50, 2.00e-16)))
# case 3:
@@ -121,7 +152,9 @@ def test_mvn_orthotope_density():
cov_val = 1.00
lower_val = 0.00
upper_val = np.nan
- res = uq.mvn_orthotope_density(mu_val, cov_val, lower_val, upper_val)
+ res = uq.mvn_orthotope_density(
+ mu_val, np.atleast_2d([cov_val]), lower_val, upper_val
+ )
assert np.allclose(res, np.array((0.50, 2.00e-16)))
# case 4:
@@ -154,28 +187,36 @@ def test_mvn_orthotope_density():
assert np.allclose(res, np.array((1.00 / 8.00, 2.00e-16)))
-def test__get_theta():
- # evaluate uq._get_theta() for some valid inputs
+def test__get_theta() -> None:
+ # Evaluate uq._get_theta() for some valid inputs
res = uq._get_theta(
- np.array(((1.00, 1.00), (1.00, 0.5))),
- np.array(((0.00, 1.00), (1.00, 0.5))),
- ['normal', 'lognormal'],
+ np.array(((1.00, 1.00), (1.00, 0.5), (0.00, 0.3), (1.50, 0.2))),
+ np.array(((0.00, 1.00), (1.00, 0.5), (0.00, 0.3), (1.00, 0.2))),
+ np.array(['normal', 'lognormal', 'normal_std', 'normal_cov']),
)
- # check that the expected output is obtained
- assert np.allclose(
- res, np.array(((2.71828183, 2.71828183), (1.82436064, 0.82436064)))
+ # Check that the expected output is obtained for each distribution type
+ expected_res = np.array(
+ ((1.00, 2.00), (2.00, 0.82436064), (0.00, 0.60), (1.36642083, 0.24428055))
)
- # check that it failes for invalid inputs
- with pytest.raises(ValueError):
- uq._get_theta(np.array((1.00,)), np.array((1.00,)), 'not_a_distribution')
+ assert np.allclose(res, expected_res)
+
+ # Check that it fails for invalid inputs
+ with pytest.raises(
+ ValueError, match='Unsupported distribution: not_a_distribution'
+ ):
+ uq._get_theta(
+ np.array((1.00,)), np.array((1.00,)), np.array(['not_a_distribution'])
+ )
-def test__get_limit_probs():
+def test__get_limit_probs() -> None:
# verify that it works for valid inputs
- res = uq._get_limit_probs(np.array((0.10, 0.20)), 'normal', np.array((0.15, 1.00)))
+ res = uq._get_limit_probs(
+ np.array((0.10, 0.20)), 'normal', np.array((0.15, 1.00))
+ )
assert np.allclose(res, np.array((0.4800611941616275, 0.5199388058383725)))
res = uq._get_limit_probs(
@@ -215,7 +256,9 @@ def test__get_limit_probs():
# verify that it fails for invalid inputs
- with pytest.raises(ValueError):
+ with pytest.raises(
+ ValueError, match='Unsupported distribution: not_a_distribution'
+ ):
uq._get_limit_probs(
np.array((1.00,)),
'not_a_distribution',
@@ -223,7 +266,7 @@ def test__get_limit_probs():
)
-def test__get_std_samples():
+def test__get_std_samples() -> None:
# test that it works with valid inputs
# case 1:
@@ -233,7 +276,7 @@ def test__get_std_samples():
tr_limits = np.array(((np.nan, np.nan),))
dist_list = np.array(('normal',))
res = uq._get_std_samples(samples, theta, tr_limits, dist_list)
- assert np.allclose(res, np.array(((1.00, 2.00, 3.00))))
+ assert np.allclose(res, np.array((1.00, 2.00, 3.00)))
# case 2:
# multivariate samples
@@ -272,8 +315,9 @@ def test__get_std_samples():
)
# test that it fails for invalid inputs
-
- with pytest.raises(ValueError):
+ with pytest.raises(
+ ValueError, match='Unsupported distribution: some_unsupported_distribution'
+ ):
uq._get_std_samples(
np.array(((1.00, 2.00, 3.00),)),
np.array(((0.00, 1.0),)),
@@ -282,43 +326,50 @@ def test__get_std_samples():
)
-def test__get_std_corr_matrix():
+def test__get_std_corr_matrix() -> None:
# test that it works with valid inputs
# case 1:
std_samples = np.array(((1.00,),))
res = uq._get_std_corr_matrix(std_samples)
+ assert res is not None
assert np.allclose(res, np.array(((1.00,),)))
# case 2:
std_samples = np.array(((1.00, 0.00), (0.00, 1.00)))
res = uq._get_std_corr_matrix(std_samples)
+ assert res is not None
assert np.allclose(res, np.array(((1.00, 0.00), (0.00, 1.00))))
# case 3:
std_samples = np.array(((1.00, 0.00), (0.00, -1.00)))
res = uq._get_std_corr_matrix(std_samples)
+ assert res is not None
assert np.allclose(res, np.array(((1.00, 0.00), (0.00, 1.00))))
# case 4:
std_samples = np.array(((1.00, 1.00), (1.00, 1.00)))
res = uq._get_std_corr_matrix(std_samples)
+ assert res is not None
assert np.allclose(res, np.array(((1.00, 1.00), (1.00, 1.00))))
# case 5:
std_samples = np.array(((1.00, 1e50), (-1.00, -1.00)))
res = uq._get_std_corr_matrix(std_samples)
+ assert res is not None
assert np.allclose(res, np.array(((1.00, 0.00), (0.00, 1.00))))
# test that it fails for invalid inputs
for bad_item in (np.nan, np.inf, -np.inf):
- with pytest.raises(ValueError):
- x = np.array(((1.00, bad_item), (-1.00, -1.00)))
+ x = np.array(((1.00, bad_item), (-1.00, -1.00)))
+ with pytest.raises(
+ ValueError, match='std_samples array must not contain inf or NaN values'
+ ):
uq._get_std_corr_matrix(x)
-def test__mvn_scale():
+def test__mvn_scale() -> None:
# case 1:
np.random.seed(40)
sample = np.random.normal(0.00, 1.00, size=(2, 5)).T
@@ -334,8 +385,8 @@ def test__mvn_scale():
assert np.allclose(res, np.array((0.0, 0.0, 0.0, 0.0, 0.0)))
-def test__neg_log_likelihood():
- # Parameters not whithin the pre-defined bounds should yield a
+def test__neg_log_likelihood() -> None:
+ # Parameters not within the pre-defined bounds should yield a
# large value to discourage the optimization algorithm from going
# in that direction.
res = uq._neg_log_likelihood(
@@ -349,9 +400,9 @@ def test__neg_log_likelihood():
(1.10, 0.30),
),
),
- dist_list=['normal', 'normal'],
- tr_limits=[None, None],
- det_limits=[None, None],
+ dist_list=np.array(('normal', 'normal')),
+ tr_limits=np.array((np.nan, np.nan)),
+ det_limits=[np.array((np.nan, np.nan))],
censored_count=0,
enforce_bounds=True,
)
@@ -362,17 +413,17 @@ def test__neg_log_likelihood():
res = uq._neg_log_likelihood(
np.array((np.nan, 0.20)),
np.array((1.00, 0.20)),
- 0.00,
- 20.00,
+ np.atleast_1d((0.00,)),
+ np.atleast_1d((20.00,)),
np.array(
(
(0.90, 0.10),
(1.10, 0.30),
),
),
- ['normal', 'normal'],
- [-np.inf, np.inf],
- [np.nan, np.nan],
+ np.array(('normal', 'normal')),
+ np.array((-np.inf, np.inf)),
+ [np.array((np.nan, np.nan))],
0,
enforce_bounds=False,
)
@@ -380,7 +431,7 @@ def test__neg_log_likelihood():
assert res == 1e10
-def test_fit_distribution_to_sample_univariate():
+def test_fit_distribution_to_sample_univariate() -> None:
# a single value in the sample
sample_vec = np.array((1.00,))
res = uq.fit_distribution_to_sample(sample_vec, 'normal')
@@ -396,21 +447,29 @@ def test_fit_distribution_to_sample_univariate():
assert np.isclose(res[0][0, 0], np.mean(sample_vec))
assert np.isclose(res[0][0, 1], np.inf)
assert np.isclose(res[1][0, 0], 1.00)
- res = uq.fit_distribution_to_sample(sample_vec, 'normal-stdev')
+ res = uq.fit_distribution_to_sample(sample_vec, 'normal_std')
assert np.isclose(res[0][0, 0], np.mean(sample_vec))
assert np.isclose(res[0][0, 1], 2.0)
assert np.isclose(res[1][0, 0], 1.00)
+ res = uq.fit_distribution_to_sample(sample_vec, 'normal_cov')
+ assert np.isclose(res[0][0, 0], np.mean(sample_vec))
+ assert np.isclose(res[0][0, 1], np.inf)
+ assert np.isclose(res[1][0, 0], 1.00)
# baseline case where the cov=mu/sigma is defined
sample_vec += 10.00
- res = uq.fit_distribution_to_sample(sample_vec, 'normal')
+ res = uq.fit_distribution_to_sample(sample_vec, 'normal_cov')
assert np.isclose(res[0][0, 0], np.mean(sample_vec))
assert np.isclose(res[0][0, 1], np.std(sample_vec) / np.mean(sample_vec))
assert np.isclose(res[1][0, 0], 1.00)
- res = uq.fit_distribution_to_sample(sample_vec, 'normal-stdev')
+ res = uq.fit_distribution_to_sample(sample_vec, 'normal_std')
assert np.isclose(res[0][0, 0], np.mean(sample_vec))
assert np.isclose(res[0][0, 1], np.std(sample_vec))
assert np.isclose(res[1][0, 0], 1.00)
+ res = uq.fit_distribution_to_sample(sample_vec, 'normal')
+ assert np.isclose(res[0][0, 0], np.mean(sample_vec))
+ assert np.isclose(res[0][0, 1], np.std(sample_vec) / np.mean(sample_vec))
+ assert np.isclose(res[1][0, 0], 1.00)
# lognormal
log_sample_vec = np.log(sample_vec)
@@ -453,12 +512,27 @@ def test_fit_distribution_to_sample_univariate():
usable_sample = usable_sample.reshape((1, -1))
res_a = uq.fit_distribution_to_sample(
usable_sample,
- 'normal',
+ 'normal_cov',
censored_count=c_count,
- detection_limits=[c_lower, c_upper],
+ detection_limits=(c_lower, c_upper),
)
compare_a = (
- np.array(((1.13825975, 0.46686491))),
+ np.array((1.13825975, 0.46686491)),
+ np.array(
+ ((1.00,)),
+ ),
+ )
+ assert np.allclose(res_a[0], compare_a[0])
+ assert np.allclose(res_a[1], compare_a[1])
+
+ res_a = uq.fit_distribution_to_sample(
+ usable_sample,
+ 'normal_std',
+ censored_count=c_count,
+ detection_limits=(c_lower, c_upper),
+ )
+ compare_a = (
+ np.array((1.13825975, 0.53141375)),
np.array(
((1.00,)),
),
@@ -477,12 +551,12 @@ def test_fit_distribution_to_sample_univariate():
usable_sample = usable_sample.reshape((1, -1))
res_b = uq.fit_distribution_to_sample(
usable_sample,
- 'normal',
+ 'normal_cov',
censored_count=c_count,
- detection_limits=[c_lower, c_upper],
+ detection_limits=(c_lower, c_upper),
)
compare_b = (
- np.array(((-1.68598848, 1.75096914))),
+ np.array((-1.68598848, 1.75096914)),
np.array(
((1.00,)),
),
@@ -501,12 +575,12 @@ def test_fit_distribution_to_sample_univariate():
usable_sample = usable_sample.reshape((1, -1))
res_c = uq.fit_distribution_to_sample(
usable_sample,
- 'normal',
+ 'normal_cov',
censored_count=c_count,
- detection_limits=[c_lower, c_upper],
+ detection_limits=(c_lower, c_upper),
)
compare_c = (
- np.array(((1.68598845, 1.75096921))),
+ np.array((1.68598845, 1.75096921)),
np.array(
((1.00,)),
),
@@ -524,9 +598,12 @@ def test_fit_distribution_to_sample_univariate():
sample_vec = np.array((-3.00, -2.00, -1.00, 0.00, 1.00, 2.00, 3.00)).reshape(
(1, -1)
)
- with pytest.raises(ValueError):
+ with pytest.raises(
+ ValueError,
+ match='One or more sample values lie outside of the specified truncation limits.',
+ ):
res = uq.fit_distribution_to_sample(
- sample_vec, 'normal', truncation_limits=[t_lower, t_upper]
+ sample_vec, 'normal_cov', truncation_limits=(t_lower, t_upper)
)
# truncated data, only lower, expect failure
@@ -535,9 +612,15 @@ def test_fit_distribution_to_sample_univariate():
sample_vec = np.array((-3.00, -2.00, -1.00, 0.00, 1.00, 2.00, 3.00)).reshape(
(1, -1)
)
- with pytest.raises(ValueError):
+ with pytest.raises(
+ ValueError,
+ match=(
+ 'One or more sample values lie '
+ 'outside of the specified truncation limits.'
+ ),
+ ):
res = uq.fit_distribution_to_sample(
- sample_vec, 'normal', truncation_limits=[t_lower, t_upper]
+ sample_vec, 'normal_cov', truncation_limits=(t_lower, t_upper)
)
# truncated data, only upper, expect failure
@@ -546,9 +629,15 @@ def test_fit_distribution_to_sample_univariate():
sample_vec = np.array((-3.00, -2.00, -1.00, 0.00, 1.00, 2.00, 3.00)).reshape(
(1, -1)
)
- with pytest.raises(ValueError):
+ with pytest.raises(
+ ValueError,
+ match=(
+ 'One or more sample values lie '
+ 'outside of the specified truncation limits.'
+ ),
+ ):
res = uq.fit_distribution_to_sample(
- sample_vec, 'normal', truncation_limits=[t_lower, t_upper]
+ sample_vec, 'normal_cov', truncation_limits=(t_lower, t_upper)
)
# truncated data, lower and upper
@@ -557,10 +646,10 @@ def test_fit_distribution_to_sample_univariate():
t_upper = +4.50
sample_vec = np.array((0.00, 1.00, 2.00, 3.00, 4.00)).reshape((1, -1))
res_a = uq.fit_distribution_to_sample(
- sample_vec, 'normal', truncation_limits=[t_lower, t_upper]
+ sample_vec, 'normal_cov', truncation_limits=(t_lower, t_upper)
)
compare_a = (
- np.array(((1.99999973, 2.2639968))),
+ np.array((1.99999973, 2.2639968)),
np.array(
((1.00,)),
),
@@ -576,9 +665,9 @@ def test_fit_distribution_to_sample_univariate():
(1, -1)
)
res_b = uq.fit_distribution_to_sample(
- sample_vec, 'normal', truncation_limits=[t_lower, t_upper]
+ sample_vec, 'normal_cov', truncation_limits=(t_lower, t_upper)
)
- compare_b = (np.array(((-0.09587816, 21.95601487))), np.array(((1.00,))))
+ compare_b = (np.array((-0.09587816, 21.95601487)), np.array((1.00,)))
assert np.allclose(res_b[0], compare_b[0])
assert np.allclose(res_b[1], compare_b[1])
@@ -590,10 +679,10 @@ def test_fit_distribution_to_sample_univariate():
(1, -1)
)
res_c = uq.fit_distribution_to_sample(
- sample_vec, 'normal', truncation_limits=[t_lower, t_upper]
+ sample_vec, 'normal_cov', truncation_limits=(t_lower, t_upper)
)
compare_c = (
- np.array(((0.09587811, 21.95602574))),
+ np.array((0.09587811, 21.95602574)),
np.array(
((1.00,)),
),
@@ -606,7 +695,7 @@ def test_fit_distribution_to_sample_univariate():
assert np.isclose(res_b[0][0, 1], res_c[0][0, 1])
-def test_fit_distribution_to_sample_multivariate():
+def test_fit_distribution_to_sample_multivariate() -> None:
# uncorrelated, normal
np.random.seed(40)
sample = np.random.multivariate_normal(
@@ -615,7 +704,7 @@ def test_fit_distribution_to_sample_multivariate():
np.random.seed(40)
# note: distribution can be specified once, implying that it is
# the same for all random variables.
- res = uq.fit_distribution_to_sample(sample, ['normal'])
+ res = uq.fit_distribution_to_sample(sample, ['normal_cov'])
compare = (
np.array(((0.9909858, 1.01732669), (0.99994493, 0.99588164))),
np.array(((1.00, 0.0092258), (0.0092258, 1.00))),
@@ -629,7 +718,7 @@ def test_fit_distribution_to_sample_multivariate():
(1.00, 1.00), np.array(((1.00, 0.70), (0.70, 1.00))), size=10000
).T
np.random.seed(40)
- res = uq.fit_distribution_to_sample(sample, ['normal', 'normal'])
+ res = uq.fit_distribution_to_sample(sample, ['normal_cov', 'normal_cov'])
compare = (
np.array(((1.00833201, 1.0012552), (1.00828936, 0.99477853))),
np.array(((1.00, 0.70623679), (0.70623679, 1.00))),
@@ -645,9 +734,9 @@ def test_fit_distribution_to_sample_multivariate():
np.random.seed(40)
res = uq.fit_distribution_to_sample(
sample,
- ['normal', 'normal'],
- truncation_limits=np.array((-5.00, 6.00)),
- detection_limits=np.array((0.20, 1.80)),
+ ['normal_cov', 'normal_cov'],
+ truncation_limits=(-5.00, 6.00),
+ detection_limits=(0.20, 1.80),
)
compare = (
np.array(((1.00833201, 1.0012552), (1.00828936, 0.99477853))),
@@ -660,12 +749,12 @@ def test_fit_distribution_to_sample_multivariate():
np.random.seed(40)
sample = np.full(
(2, 10),
- 3.14,
+ 123.00,
)
np.random.seed(40)
- res = uq.fit_distribution_to_sample(sample, ['normal', 'normal'])
+ res = uq.fit_distribution_to_sample(sample, ['normal_cov', 'normal_cov'])
compare = (
- np.array(((3.14, 1.0e-6), (3.14, 1.0e-6))),
+ np.array(((123.00, 1.0e-6), (123.00, 1.0e-6))),
np.array(((1.00, 0.00), (0.00, 1.00))),
)
assert np.allclose(res[0], compare[0])
@@ -680,7 +769,7 @@ def test_fit_distribution_to_sample_multivariate():
)
np.random.seed(40)
res = uq.fit_distribution_to_sample(
- sample, ['lognormal', 'lognormal'], detection_limits=np.array((1e-8, 5.00))
+ sample, ['lognormal', 'lognormal'], detection_limits=(1e-8, 5.00)
)
compare = (
np.array(((4.60517598e00, 2.18581908e-04), (4.60517592e00, 2.16575944e-04))),
@@ -694,11 +783,11 @@ def test_fit_distribution_to_sample_multivariate():
np.random.seed(40)
sample = np.full(
(1, 10),
- 3.14,
+ math.pi,
)
np.random.seed(40)
with pytest.raises(IndexError):
- res = uq.fit_distribution_to_sample(sample, ['normal', 'normal'])
+ res = uq.fit_distribution_to_sample(sample, ['normal_cov', 'normal_cov'])
# extreme examples:
# for these we just ensure that the function works without
@@ -712,14 +801,14 @@ def test_fit_distribution_to_sample_multivariate():
).T
sample = np.exp(sample)
sample += np.random.uniform(-10.00, 10.00, size=sample.shape)
- res = uq.fit_distribution_to_sample(sample, ['normal', 'normal'])
+ res = uq.fit_distribution_to_sample(sample, ['normal_cov', 'normal_cov'])
for res_i in res:
assert not np.any(np.isinf(res_i))
assert not np.any(np.isnan(res_i))
# 2) very noisy input data, normal fit
sample = np.random.uniform(-10.00, 10.00, size=sample.shape)
- res = uq.fit_distribution_to_sample(sample, ['normal', 'normal'])
+ res = uq.fit_distribution_to_sample(sample, ['normal_cov', 'normal_cov'])
for res_i in res:
assert not np.any(np.isinf(res_i))
assert not np.any(np.isnan(res_i))
@@ -739,14 +828,16 @@ def test_fit_distribution_to_sample_multivariate():
sample = np.concatenate(
(np.random.normal(0.00, 1.00, size=100000), np.array((np.inf,)))
)
- with pytest.raises(ValueError):
- uq.fit_distribution_to_sample(sample, ['normal'])
+ with pytest.raises(
+ ValueError, match='Conversion to standard normal space was unsuccessful'
+ ):
+ uq.fit_distribution_to_sample(sample, ['normal_cov'])
-def test_fit_distribution_to_percentiles():
+def test_fit_distribution_to_percentiles() -> None:
# normal, mean of 20 and standard deviation of 10
- percentiles = np.linspace(0.01, 0.99, num=10000)
- values = norm.ppf(percentiles, loc=20, scale=10)
+ percentiles = np.linspace(0.01, 0.99, num=10000).tolist()
+ values = norm.ppf(percentiles, loc=20, scale=10).tolist()
res = uq.fit_distribution_to_percentiles(
values, percentiles, ['normal', 'lognormal']
)
@@ -754,7 +845,7 @@ def test_fit_distribution_to_percentiles():
assert np.allclose(res[1], np.array((20.00, 10.00)))
# lognormal, median of 20 and beta of 0.4
- ln_values = lognorm.ppf(percentiles, s=0.40, scale=20.00)
+ ln_values = lognorm.ppf(percentiles, s=0.40, scale=20.00).tolist()
res = uq.fit_distribution_to_percentiles(
ln_values, percentiles, ['normal', 'lognormal']
)
@@ -762,17 +853,19 @@ def test_fit_distribution_to_percentiles():
assert np.allclose(res[1], np.array((20.0, 0.40)))
# unrecognized distribution family
- percentiles = np.linspace(0.01, 0.99, num=10000)
- values = norm.ppf(percentiles, loc=20, scale=10)
- with pytest.raises(ValueError):
+ percentiles = np.linspace(0.01, 0.99, num=10000).tolist()
+ values = norm.ppf(percentiles, loc=20, scale=10).tolist()
+ with pytest.raises(
+ ValueError, match='Distribution family not recognized: birnbaum-saunders'
+ ):
uq.fit_distribution_to_percentiles(
values, percentiles, ['lognormal', 'birnbaum-saunders']
)
-def test__OLS_percentiles():
+def test__OLS_percentiles() -> None:
# normal: negative standard deviation
- params = np.array((2.50, -0.10))
+ params = (2.50, -0.10)
perc = np.linspace(1e-2, 1.00 - 1e-2, num=5)
values = norm.ppf(perc, loc=20, scale=10)
family = 'normal'
@@ -780,7 +873,7 @@ def test__OLS_percentiles():
assert res == 10000000000.0
# lognormal: negative median
- params = np.array((-1.00, 0.40))
+ params = (-1.00, 0.40)
perc = np.linspace(1e-2, 1.00 - 1e-2, num=5)
values = lognorm.ppf(perc, s=0.40, scale=20.00)
family = 'lognormal'
@@ -797,35 +890,64 @@ def test__OLS_percentiles():
# The following tests verify the methods of the objects of the module.
-def test_NormalRandomVariable():
+def test_NormalRandomVariable() -> None:
rv = uq.NormalRandomVariable('rv_name', theta=np.array((0.00, 1.00)))
assert rv.name == 'rv_name'
np.testing.assert_allclose(rv.theta, np.array((0.00, 1.00)))
assert np.all(np.isnan(rv.truncation_limits))
assert rv.RV_set is None
assert rv.sample_DF is None
+ # confirm that creating an attribute on the fly is not allowed
+ with pytest.raises(AttributeError):
+ rv.xyz = 123 # type: ignore
+
+
+def test_Normal_STD() -> None:
+ rv = uq.Normal_STD('rv_name', theta=np.array((0.00, 1.00)))
+ assert rv.name == 'rv_name'
+ np.testing.assert_allclose(rv.theta, np.array((0.00, 1.00)))
+ assert np.all(np.isnan(rv.truncation_limits))
+ assert rv.RV_set is None
+ assert rv.sample_DF is None
+ with pytest.raises(AttributeError):
+ rv.xyz = 123 # type: ignore
+
+
+def test_Normal_COV() -> None:
+ with pytest.raises(
+ ValueError, match='The mean of Normal_COV RVs cannot be zero.'
+ ):
+ rv = uq.Normal_COV('rv_name', theta=np.array((0.00, 1.00)))
+ rv = uq.Normal_COV('rv_name', theta=np.array((2.00, 1.00)))
+ assert rv.name == 'rv_name'
+ np.testing.assert_allclose(rv.theta, np.array((2.00, 2.00)))
+ assert np.all(np.isnan(rv.truncation_limits))
+ assert rv.RV_set is None
+ assert rv.sample_DF is None
+ with pytest.raises(AttributeError):
+ rv.xyz = 123 # type: ignore
-def test_NormalRandomVariable_cdf():
+def test_NormalRandomVariable_cdf() -> None:
# test CDF method
rv = uq.NormalRandomVariable(
'test_rv',
- theta=(1.0, 1.0),
+ theta=np.array((1.0, 1.0)),
truncation_limits=np.array((0.00, np.nan)),
)
# evaluate CDF at different points
- x = (-1.0, 0.0, 0.5, 1.0, 2.0)
+ x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0))
cdf = rv.cdf(x)
# assert that CDF values are correct
assert np.allclose(cdf, (0.0, 0.0, 0.1781461, 0.40571329, 0.81142658), rtol=1e-5)
# repeat without truncation limits
- rv = uq.NormalRandomVariable('test_rv', theta=(1.0, 1.0))
+ rv = uq.NormalRandomVariable('test_rv', theta=np.array((1.0, 1.0)))
# evaluate CDF at different points
- x = (-1.0, 0.0, 0.5, 1.0, 2.0)
+ x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0))
cdf = rv.cdf(x)
# assert that CDF values are correct
@@ -834,49 +956,80 @@ def test_NormalRandomVariable_cdf():
)
-def test_NormalRandomVariable_inverse_transform():
+def test_Normal_STD_cdf() -> None:
+ rv = uq.Normal_STD(
+ 'test_rv',
+ theta=np.array((1.0, 1.0)),
+ truncation_limits=np.array((0.00, np.nan)),
+ )
+
+ x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0))
+ cdf = rv.cdf(x)
+ assert np.allclose(cdf, (0.0, 0.0, 0.1781461, 0.40571329, 0.81142658), rtol=1e-5)
+
+
+def test_Normal_COV_cdf() -> None:
+ rv = uq.Normal_COV(
+ 'test_rv',
+ theta=np.array((1.0, 1.0)),
+ truncation_limits=np.array((0.00, np.nan)),
+ )
+
+ x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0))
+ cdf = rv.cdf(x)
+ assert np.allclose(cdf, (0.0, 0.0, 0.1781461, 0.40571329, 0.81142658), rtol=1e-5)
+
+
+def test_NormalRandomVariable_inverse_transform() -> None:
samples = np.array((0.10, 0.20, 0.30))
- rv = uq.NormalRandomVariable('test_rv', theta=(1.0, 0.5))
+ rv = uq.NormalRandomVariable('test_rv', theta=np.array((1.0, 0.5)))
rv.uni_sample = samples
rv.inverse_transform_sampling()
inverse_transform = rv.sample
+ assert inverse_transform is not None
assert np.allclose(
inverse_transform, np.array((0.35922422, 0.57918938, 0.73779974)), rtol=1e-5
)
- rv = uq.NormalRandomVariable('test_rv', theta=(1.0, 0.5))
- with pytest.raises(ValueError):
+ rv = uq.NormalRandomVariable('test_rv', theta=np.array((1.0, 0.5)))
+ with pytest.raises(ValueError, match='No available uniform sample.'):
rv.inverse_transform_sampling()
# with truncation limits
rv = uq.NormalRandomVariable(
- 'test_rv', theta=(1.0, 0.5), truncation_limits=(np.nan, 1.20)
+ 'test_rv',
+ theta=np.array((1.0, 0.5)),
+ truncation_limits=np.array((np.nan, 1.20)),
)
rv.uni_sample = samples
rv.inverse_transform_sampling()
- inverse_transform = rv.sample
+ inverse_transform = ensure_value(rv.sample)
assert np.allclose(
inverse_transform, np.array((0.24508018, 0.43936, 0.57313359)), rtol=1e-5
)
rv = uq.NormalRandomVariable(
- 'test_rv', theta=(1.0, 0.5), truncation_limits=(0.80, np.nan)
+ 'test_rv',
+ theta=np.array((1.0, 0.5)),
+ truncation_limits=np.array((0.80, np.nan)),
)
rv.uni_sample = samples
rv.inverse_transform_sampling()
- inverse_transform = rv.sample
+ inverse_transform = ensure_value(rv.sample)
assert np.allclose(
inverse_transform, np.array((0.8863824, 0.96947866, 1.0517347)), rtol=1e-5
)
rv = uq.NormalRandomVariable(
- 'test_rv', theta=(1.0, 0.5), truncation_limits=(0.80, 1.20)
+ 'test_rv',
+ theta=np.array((1.0, 0.5)),
+ truncation_limits=np.array((0.80, 1.20)),
)
rv.uni_sample = samples
rv.inverse_transform_sampling()
- inverse_transform = rv.sample
+ inverse_transform = ensure_value(rv.sample)
assert np.allclose(
inverse_transform, np.array((0.84155378, 0.88203946, 0.92176503)), rtol=1e-5
)
@@ -887,48 +1040,87 @@ def test_NormalRandomVariable_inverse_transform():
# normal with problematic truncation limits
rv = uq.NormalRandomVariable(
- 'test_rv', theta=(1.0, 0.5), truncation_limits=(1e8, 2e8)
+ 'test_rv', theta=np.array((1.0, 0.5)), truncation_limits=np.array((1e8, 2e8))
)
rv.uni_sample = samples
- with pytest.raises(ValueError):
+ with pytest.raises(
+ ValueError,
+ match=(
+ 'The probability mass within the truncation '
+ 'limits is too small and the truncated '
+ 'distribution cannot be sampled with '
+ 'sufficiently high accuracy. This is most probably '
+ 'due to incorrect truncation limits set '
+ 'for the distribution.'
+ ),
+ ):
rv.inverse_transform_sampling()
-def test_LogNormalRandomVariable_cdf():
+def test_Normal_STD_inverse_transform() -> None:
+ samples = np.array((0.10, 0.20, 0.30))
+ rv = uq.Normal_STD('test_rv', theta=np.array((1.0, 0.5)))
+ rv.uni_sample = samples
+ rv.inverse_transform_sampling()
+ inverse_transform = ensure_value(rv.sample)
+ assert np.allclose(
+ inverse_transform, np.array((0.35922422, 0.57918938, 0.73779974)), rtol=1e-5
+ )
+
+
+def test_Normal_COV_inverse_transform() -> None:
+ samples = np.array((0.10, 0.20, 0.30))
+ rv = uq.Normal_COV('test_rv', theta=np.array((1.0, 0.5)))
+ rv.uni_sample = samples
+ rv.inverse_transform_sampling()
+ inverse_transform = ensure_value(rv.sample)
+ assert np.allclose(
+ inverse_transform, np.array((0.35922422, 0.57918938, 0.73779974)), rtol=1e-5
+ )
+
+
+def test_LogNormalRandomVariable_cdf() -> None:
# lower truncation
rv = uq.LogNormalRandomVariable(
'test_rv',
- theta=(1.0, 1.0),
+ theta=np.array((1.0, 1.0)),
truncation_limits=np.array((0.10, np.nan)),
)
- x = (-1.0, 0.0, 0.5, 1.0, 2.0)
+ # confirm that creating an attribute on the fly is not allowed
+ with pytest.raises(AttributeError):
+ rv.xyz = 123 # type: ignore
+ x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0))
cdf = rv.cdf(x)
- assert np.allclose(cdf, (0.0, 0.0, 0.23597085, 0.49461712, 0.75326339), rtol=1e-5)
+ assert np.allclose(
+ cdf, (0.0, 0.0, 0.23597085, 0.49461712, 0.75326339), rtol=1e-5
+ )
# upper truncation
rv = uq.LogNormalRandomVariable(
'test_rv',
- theta=(1.0, 1.0),
+ theta=np.array((1.0, 1.0)),
truncation_limits=np.array((np.nan, 5.00)),
)
- x = (-1.0, 0.0, 0.5, 1.0, 2.0)
+ x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0))
cdf = rv.cdf(x)
- assert np.allclose(cdf, (0.00, 0.00, 0.25797755, 0.52840734, 0.79883714), rtol=1e-5)
+ assert np.allclose(
+ cdf, (0.00, 0.00, 0.25797755, 0.52840734, 0.79883714), rtol=1e-5
+ )
# no truncation
- rv = uq.LogNormalRandomVariable('test_rv', theta=(1.0, 1.0))
- x = (-1.0, 0.0, 0.5, 1.0, 2.0)
+ rv = uq.LogNormalRandomVariable('test_rv', theta=np.array((1.0, 1.0)))
+ x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0))
cdf = rv.cdf(x)
assert np.allclose(cdf, (0.0, 0.0, 0.2441086, 0.5, 0.7558914), rtol=1e-5)
-def test_LogNormalRandomVariable_inverse_transform():
+def test_LogNormalRandomVariable_inverse_transform() -> None:
samples = np.array((0.10, 0.20, 0.30))
- rv = uq.LogNormalRandomVariable('test_rv', theta=(1.0, 0.5))
+ rv = uq.LogNormalRandomVariable('test_rv', theta=np.array((1.0, 0.5)))
rv.uni_sample = samples
rv.inverse_transform_sampling()
- inverse_transform = rv.sample
+ inverse_transform = ensure_value(rv.sample)
assert np.allclose(
inverse_transform, np.array((0.52688352, 0.65651442, 0.76935694)), rtol=1e-5
@@ -940,12 +1132,12 @@ def test_LogNormalRandomVariable_inverse_transform():
rv = uq.LogNormalRandomVariable(
'test_rv',
- theta=(1.0, 0.5),
+ theta=np.array((1.0, 0.5)),
truncation_limits=np.array((0.50, np.nan)),
)
rv.uni_sample = samples
rv.inverse_transform_sampling()
- inverse_transform = rv.sample
+ inverse_transform = ensure_value(rv.sample)
assert np.allclose(
inverse_transform, np.array((0.62614292, 0.73192471, 0.83365823)), rtol=1e-5
)
@@ -955,81 +1147,84 @@ def test_LogNormalRandomVariable_inverse_transform():
#
# lognormal without values to sample from
- rv = uq.LogNormalRandomVariable('test_rv', theta=(1.0, 0.5))
- with pytest.raises(ValueError):
+ rv = uq.LogNormalRandomVariable('test_rv', theta=np.array((1.0, 0.5)))
+ with pytest.raises(ValueError, match='No available uniform sample.'):
rv.inverse_transform_sampling()
-def test_UniformRandomVariable_cdf():
+def test_UniformRandomVariable_cdf() -> None:
# uniform, both theta values
- rv = uq.UniformRandomVariable('test_rv', theta=(0.0, 1.0))
- x = (-1.0, 0.0, 0.5, 1.0, 2.0)
+ rv = uq.UniformRandomVariable('test_rv', theta=np.array((0.0, 1.0)))
+ # confirm that creating an attribute on the fly is not allowed
+ with pytest.raises(AttributeError):
+ rv.xyz = 123 # type: ignore
+ x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0))
cdf = rv.cdf(x)
assert np.allclose(cdf, (0.0, 0.0, 0.5, 1.0, 1.0), rtol=1e-5)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
# uniform, only upper theta value ( -inf implied )
- rv = uq.UniformRandomVariable('test_rv', theta=(np.nan, 100.00))
- x = (-1.0, 0.0, 0.5, 1.0, 2.0)
+ rv = uq.UniformRandomVariable('test_rv', theta=np.array((np.nan, 100.00)))
+ x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0))
cdf = rv.cdf(x)
assert np.all(np.isnan(cdf))
# uniform, only lower theta value ( +inf implied )
- rv = uq.UniformRandomVariable('test_rv', theta=(0.00, np.nan))
- x = (-1.0, 0.0, 0.5, 1.0, 2.0)
+ rv = uq.UniformRandomVariable('test_rv', theta=np.array((0.00, np.nan)))
+ x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0))
cdf = rv.cdf(x)
assert np.allclose(cdf, (0.0, 0.0, 0.0, 0.0, 0.0), rtol=1e-5)
# uniform, with truncation limits
rv = uq.UniformRandomVariable(
'test_rv',
- theta=(0.0, 10.0),
+ theta=np.array((0.0, 10.0)),
truncation_limits=np.array((0.00, 1.00)),
)
- x = (-1.0, 0.0, 0.5, 1.0, 2.0)
+ x = np.array((-1.0, 0.0, 0.5, 1.0, 2.0))
cdf = rv.cdf(x)
assert np.allclose(cdf, (0.0, 0.0, 0.5, 1.0, 1.0), rtol=1e-5)
-def test_UniformRandomVariable_inverse_transform():
- rv = uq.UniformRandomVariable('test_rv', theta=(0.0, 1.0))
+def test_UniformRandomVariable_inverse_transform() -> None:
+ rv = uq.UniformRandomVariable('test_rv', theta=np.array((0.0, 1.0)))
samples = np.array((0.10, 0.20, 0.30))
rv.uni_sample = samples
rv.inverse_transform_sampling()
- inverse_transform = rv.sample
+ inverse_transform = ensure_value(rv.sample)
assert np.allclose(inverse_transform, samples, rtol=1e-5)
#
# uniform with unspecified bounds
#
- rv = uq.UniformRandomVariable('test_rv', theta=(np.nan, 1.0))
+ rv = uq.UniformRandomVariable('test_rv', theta=np.array((np.nan, 1.0)))
samples = np.array((0.10, 0.20, 0.30))
rv.uni_sample = samples
rv.inverse_transform_sampling()
- inverse_transform = rv.sample
+ inverse_transform = ensure_value(rv.sample)
assert np.all(np.isnan(inverse_transform))
- rv = uq.UniformRandomVariable('test_rv', theta=(0.00, np.nan))
+ rv = uq.UniformRandomVariable('test_rv', theta=np.array((0.00, np.nan)))
rv.uni_sample = samples
rv.inverse_transform_sampling()
- inverse_transform = rv.sample
+ inverse_transform = ensure_value(rv.sample)
assert np.all(np.isinf(inverse_transform))
rv = uq.UniformRandomVariable(
'test_rv',
- theta=(0.00, 1.00),
+ theta=np.array((0.00, 1.00)),
truncation_limits=np.array((0.20, 0.80)),
)
rv.uni_sample = samples
rv.inverse_transform_sampling()
- inverse_transform = rv.sample
+ inverse_transform = ensure_value(rv.sample)
assert np.allclose(inverse_transform, np.array((0.26, 0.32, 0.38)), rtol=1e-5)
# sample as a pandas series, with a log() map
rv.f_map = np.log
- assert rv.sample_DF.to_dict() == {
+ assert ensure_value(rv.sample_DF).to_dict() == {
0: -1.3470736479666092,
1: -1.1394342831883646,
2: -0.9675840262617056,
@@ -1040,63 +1235,161 @@ def test_UniformRandomVariable_inverse_transform():
#
# uniform without values to sample from
- rv = uq.UniformRandomVariable('test_rv', theta=(0.0, 1.0))
- with pytest.raises(ValueError):
+ rv = uq.UniformRandomVariable('test_rv', theta=np.array((0.0, 1.0)))
+ with pytest.raises(ValueError, match='No available uniform sample.'):
rv.inverse_transform_sampling()
-def test_MultinomialRandomVariable():
+def test_WeibullRandomVariable() -> None:
+ rv = uq.WeibullRandomVariable('rv_name', theta=np.array((1.5, 2.0)))
+ assert rv.name == 'rv_name'
+ np.testing.assert_allclose(rv.theta, np.array((1.5, 2.0)))
+ assert np.all(np.isnan(rv.truncation_limits))
+ assert rv.RV_set is None
+ assert rv.sample_DF is None
+ with pytest.raises(AttributeError):
+ rv.xyz = 123 # type: ignore
+
+
+def test_WeibullRandomVariable_cdf() -> None:
+ rv = uq.WeibullRandomVariable(
+ 'test_rv',
+ theta=np.array((1.5, 2.0)),
+ truncation_limits=np.array((0.5, 2.5)),
+ )
+
+ x = np.array((0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0))
+ cdf = rv.cdf(x)
+
+ expected_cdf = np.array([0.0, 0.0, 0.30463584, 0.63286108, 0.87169261, 1.0, 1.0])
+ assert np.allclose(cdf, expected_cdf, rtol=1e-5)
+
+ rv = uq.WeibullRandomVariable('test_rv', theta=np.array((1.5, 2.0)))
+ cdf = rv.cdf(x)
+ expected_cdf_no_trunc = weibull_min.cdf(x, 2.0, scale=1.5)
+ assert np.allclose(cdf, expected_cdf_no_trunc, rtol=1e-5)
+
+
+def test_WeibullRandomVariable_inverse_transform() -> None:
+ samples = np.array((0.10, 0.20, 0.30))
+
+ rv = uq.WeibullRandomVariable('test_rv', theta=np.array((1.5, 2.0)))
+ rv.uni_sample = samples
+ rv.inverse_transform_sampling()
+ inverse_transform = ensure_value(rv.sample)
+ expected_samples = weibull_min.ppf(samples, 2.0, scale=1.5)
+ assert np.allclose(inverse_transform, expected_samples, rtol=1e-5)
+
+ rv = uq.WeibullRandomVariable(
+ 'test_rv', theta=np.array((1.5, 2.0)), truncation_limits=np.array((0.5, 2.5))
+ )
+ rv.uni_sample = samples
+ rv.inverse_transform_sampling()
+ inverse_transform = ensure_value(rv.sample)
+ truncated_samples = weibull_min.ppf(
+ samples
+ * (
+ weibull_min.cdf(2.5, 2.0, scale=1.5)
+ - weibull_min.cdf(0.5, 2.0, scale=1.5)
+ )
+ + weibull_min.cdf(0.5, 2.0, scale=1.5),
+ 2.0,
+ scale=1.5,
+ )
+ assert np.allclose(inverse_transform, truncated_samples, rtol=1e-5)
+
+
+def test_MultinomialRandomVariable() -> None:
# multinomial with invalid p values provided in the theta vector
- with pytest.raises(ValueError):
- uq.MultinomialRandomVariable('rv_invalid', np.array((0.20, 0.70, 0.10, 42.00)))
+ with pytest.raises(
+ ValueError,
+ match=re.escape(
+ 'The set of p values provided for a multinomial '
+ 'distribution shall sum up to less than or equal to 1.0. '
+ 'The provided values sum up to 43.0. '
+ 'p = [ 0.2 0.7 0.1 42. ] .'
+ ),
+ ):
+ uq.MultinomialRandomVariable(
+ 'rv_invalid', np.array((0.20, 0.70, 0.10, 42.00))
+ )
-def test_MultilinearCDFRandomVariable():
+def test_MultilinearCDFRandomVariable() -> None:
# multilinear CDF: cases that should fail
x_values = (0.00, 1.00, 2.00, 3.00, 4.00)
y_values = (100.00, 0.20, 0.20, 0.80, 1.00)
values = np.column_stack((x_values, y_values))
- with pytest.raises(ValueError):
+ with pytest.raises(
+ ValueError,
+ match='For multilinear CDF random variables, y_1 should be set to 0.00',
+ ):
uq.MultilinearCDFRandomVariable('test_rv', theta=values)
x_values = (0.00, 1.00, 2.00, 3.00, 4.00)
y_values = (0.00, 0.20, 0.20, 0.80, 0.80)
values = np.column_stack((x_values, y_values))
- with pytest.raises(ValueError):
+ with pytest.raises(
+ ValueError,
+ match='For multilinear CDF random variables, y_n should be set to 1.00',
+ ):
uq.MultilinearCDFRandomVariable('test_rv', theta=values)
x_values = (0.00, 3.00, 1.00, 2.00, 4.00)
y_values = (0.00, 0.25, 0.50, 0.75, 1.00)
values = np.column_stack((x_values, y_values))
- with pytest.raises(ValueError):
+ with pytest.raises(
+ ValueError,
+ match='For multilinear CDF random variables, Xs should be specified in ascending order',
+ ):
uq.MultilinearCDFRandomVariable('test_rv', theta=values)
x_values = (0.00, 1.00, 2.00, 3.00, 4.00)
y_values = (0.00, 0.75, 0.50, 0.25, 1.00)
values = np.column_stack((x_values, y_values))
- with pytest.raises(ValueError):
+ with pytest.raises(
+ ValueError,
+ match='For multilinear CDF random variables, Ys should be specified in ascending order',
+ ):
uq.MultilinearCDFRandomVariable('test_rv', theta=values)
x_values = (0.00, 1.00, 2.00, 3.00, 4.00)
y_values = (0.00, 0.50, 0.50, 0.50, 1.00)
values = np.column_stack((x_values, y_values))
- with pytest.raises(ValueError):
+ with pytest.raises(
+ ValueError,
+ match=(
+ 'For multilinear CDF random variables, '
+ 'Ys should be specified in strictly ascending order'
+ ),
+ ):
uq.MultilinearCDFRandomVariable('test_rv', theta=values)
x_values = (0.00, 2.00, 2.00, 3.00, 4.00)
y_values = (0.00, 0.20, 0.40, 0.50, 1.00)
values = np.column_stack((x_values, y_values))
- with pytest.raises(ValueError):
+ with pytest.raises(
+ ValueError,
+ match=(
+ 'For multilinear CDF random variables, '
+ 'Xs should be specified in strictly ascending order'
+ ),
+ ):
uq.MultilinearCDFRandomVariable('test_rv', theta=values)
-def test_MultilinearCDFRandomVariable_cdf():
+def test_MultilinearCDFRandomVariable_cdf() -> None:
x_values = (0.00, 1.00, 2.00, 3.00, 4.00)
y_values = (0.00, 0.20, 0.30, 0.80, 1.00)
values = np.column_stack((x_values, y_values))
rv = uq.MultilinearCDFRandomVariable('test_rv', theta=values)
- x = (-100.00, 0.00, 0.50, 1.00, 1.50, 2.00, 2.50, 3.00, 3.50, 4.00, 100.00)
+ # confirm that creating an attribute on the fly is not allowed
+ with pytest.raises(AttributeError):
+ rv.xyz = 123 # type: ignore
+ x = np.array(
+ (-100.00, 0.00, 0.50, 1.00, 1.50, 2.00, 2.50, 3.00, 3.50, 4.00, 100.00)
+ )
cdf = rv.cdf(x)
assert np.allclose(
@@ -1106,7 +1399,7 @@ def test_MultilinearCDFRandomVariable_cdf():
)
-def test_MultilinearCDFRandomVariable_inverse_transform():
+def test_MultilinearCDFRandomVariable_inverse_transform() -> None:
x_values = (0.00, 1.00, 2.00, 3.00, 4.00)
y_values = (0.00, 0.20, 0.30, 0.80, 1.00)
values = np.column_stack((x_values, y_values))
@@ -1114,7 +1407,7 @@ def test_MultilinearCDFRandomVariable_inverse_transform():
rv.uni_sample = np.array((0.00, 0.1, 0.2, 0.5, 0.8, 0.9, 1.00))
rv.inverse_transform_sampling()
- inverse_transform = rv.sample
+ inverse_transform = ensure_value(rv.sample)
assert np.allclose(
inverse_transform,
np.array((0.00, 0.50, 1.00, 2.40, 3.00, 3.50, 4.00)),
@@ -1122,76 +1415,95 @@ def test_MultilinearCDFRandomVariable_inverse_transform():
)
-def test_EmpiricalRandomVariable_inverse_transform():
+def test_EmpiricalRandomVariable_inverse_transform() -> None:
samples = np.array((0.10, 0.20, 0.30))
- rv = uq.EmpiricalRandomVariable('test_rv', raw_samples=(1.00, 2.00, 3.00, 4.00))
+ rv_empirical = uq.EmpiricalRandomVariable(
+ 'test_rv_empirical', theta=np.array((1.00, 2.00, 3.00, 4.00))
+ )
+ # confirm that creating an attribute on the fly is not allowed
+ with pytest.raises(AttributeError):
+ rv_empirical.xyz = 123 # type: ignore
samples = np.array((0.10, 0.50, 0.90))
- rv.uni_sample = samples
- rv.inverse_transform_sampling()
- inverse_transform = rv.sample
+ rv_empirical.uni_sample = samples
+ rv_empirical.inverse_transform_sampling()
+ inverse_transform = ensure_value(rv_empirical.sample)
assert np.allclose(inverse_transform, np.array((1.00, 3.00, 4.00)), rtol=1e-5)
- rv = uq.CoupledEmpiricalRandomVariable(
- 'test_rv',
- raw_samples=np.array((1.00, 2.00, 3.00, 4.00)),
+ rv_coupled = uq.CoupledEmpiricalRandomVariable(
+ 'test_rv_coupled',
+ theta=np.array((1.00, 2.00, 3.00, 4.00)),
)
- rv.inverse_transform_sampling(sample_size=6)
- inverse_transform = rv.sample
+ rv_coupled.inverse_transform_sampling(sample_size=6)
+ inverse_transform = ensure_value(rv_coupled.sample)
assert np.allclose(
inverse_transform, np.array((1.00, 2.00, 3.00, 4.00, 1.00, 2.00)), rtol=1e-5
)
-def test_DeterministicRandomVariable_inverse_transform():
+def test_DeterministicRandomVariable_inverse_transform() -> None:
rv = uq.DeterministicRandomVariable('test_rv', theta=np.array((0.00,)))
rv.inverse_transform_sampling(4)
- inverse_transform = rv.sample
- assert np.allclose(inverse_transform, np.array((0.00, 0.00, 0.00, 0.00)), rtol=1e-5)
+ inverse_transform = ensure_value(rv.sample)
+ assert np.allclose(
+ inverse_transform, np.array((0.00, 0.00, 0.00, 0.00)), rtol=1e-5
+ )
-def test_RandomVariable_Set():
+def test_RandomVariable_Set() -> None:
# a set of two random variables
- rv_1 = uq.NormalRandomVariable('rv1', theta=(1.0, 1.0))
- rv_2 = uq.NormalRandomVariable('rv2', theta=(1.0, 1.0))
- rv_set = uq.RandomVariableSet( # noqa: F841
- 'test_set', (rv_1, rv_2), np.array(((1.0, 0.50), (0.50, 1.0)))
+ rv_1 = uq.NormalRandomVariable('rv1', theta=np.array((1.0, 1.0)))
+ rv_2 = uq.NormalRandomVariable('rv2', theta=np.array((1.0, 1.0)))
+ rv_set = uq.RandomVariableSet(
+ 'test_set', [rv_1, rv_2], np.array(((1.0, 0.50), (0.50, 1.0)))
)
# size of the set
assert rv_set.size == 2
# a set with only one random variable
- rv_1 = uq.NormalRandomVariable('rv1', theta=(1.0, 1.0))
- rv_set = uq.RandomVariableSet( # noqa: F841
- 'test_set', (rv_1,), np.array(((1.0, 0.50),))
- )
+ rv_1 = uq.NormalRandomVariable('rv1', theta=np.array((1.0, 1.0)))
+ rv_set = uq.RandomVariableSet('test_set', [rv_1], np.array(((1.0, 0.50),)))
-def test_RandomVariable_Set_apply_correlation(reset=False):
- data_dir = 'pelicun/tests/data/uq/test_random_variable_set_apply_correlation'
- file_incr = 0
+def test_RandomVariable_perfect_correlation() -> None:
+ # Test that the `.anchor` attribute is propagated correctly
+ rv_1 = uq.NormalRandomVariable('rv1', theta=np.array((1.0, 1.0)))
+ rv_2 = uq.NormalRandomVariable('rv2', theta=np.array((1.0, 1.0)), anchor=rv_1)
+ rv_1.uni_sample = np.random.random(size=10)
+ assert np.all(rv_2.uni_sample == rv_1.uni_sample)
+
+ rv_1 = uq.NormalRandomVariable('rv1', theta=np.array((1.0, 1.0)))
+ rv_2 = uq.NormalRandomVariable('rv2', theta=np.array((1.0, 1.0)))
+ rv_1.uni_sample = np.random.random(size=10)
+ assert rv_2.uni_sample is None
+
+
+def test_RandomVariable_Set_apply_correlation(*, reset: bool = False) -> None:
+ data_dir = (
+ 'pelicun/tests/basic/data/uq/test_random_variable_set_apply_correlation'
+ )
# correlated, uniform
np.random.seed(40)
- rv_1 = uq.UniformRandomVariable(name='rv1', theta=(-5.0, 5.0))
- rv_2 = uq.UniformRandomVariable(name='rv2', theta=(-5.0, 5.0))
+ rv_1 = uq.UniformRandomVariable(name='rv1', theta=np.array((-5.0, 5.0)))
+ rv_2 = uq.UniformRandomVariable(name='rv2', theta=np.array((-5.0, 5.0)))
rv_1.uni_sample = np.random.random(size=100)
rv_2.uni_sample = np.random.random(size=100)
rvs = uq.RandomVariableSet(
- name='test_set', RV_list=[rv_1, rv_2], Rho=np.array(((1.0, 0.5), (0.5, 1.0)))
+ name='test_set', rv_list=[rv_1, rv_2], rho=np.array(((1.0, 0.5), (0.5, 1.0)))
)
rvs.apply_correlation()
- for rv in (rv_1, rv_2):
- res = rv.uni_sample
- file_incr += 1
+ for i, rv in enumerate((rv_1, rv_2)):
+ res = ensure_value(rv.uni_sample)
+ file_incr = i + 1
filename = f'{data_dir}/test_{file_incr}.pcl'
if reset:
export_pickle(filename, res)
@@ -1203,13 +1515,13 @@ def test_RandomVariable_Set_apply_correlation(reset=False):
rv_1.inverse_transform_sampling()
rv_2.inverse_transform_sampling()
rvset_sample = rvs.sample
- assert set(rvset_sample.keys()) == set(('rv1', 'rv2'))
+ assert set(rvset_sample.keys()) == {'rv1', 'rv2'}
vals = list(rvset_sample.values())
assert np.all(vals[0] == rv_1.sample)
assert np.all(vals[1] == rv_2.sample)
-def test_RandomVariable_Set_apply_correlation_special():
+def test_RandomVariable_Set_apply_correlation_special() -> None:
# This function tests the apply_correlation method of the
# RandomVariableSet class when given special input conditions.
# The first test checks that the method works when given a non
@@ -1225,8 +1537,8 @@ def test_RandomVariable_Set_apply_correlation_special():
# non positive semidefinite correlation matrix
rho = np.array(((1.00, 0.50), (0.50, -1.00)))
- rv_1 = uq.NormalRandomVariable('rv1', theta=[5.0, 0.1])
- rv_2 = uq.NormalRandomVariable('rv2', theta=[5.0, 0.1])
+ rv_1 = uq.NormalRandomVariable('rv1', theta=np.array((5.0, 0.1)))
+ rv_2 = uq.NormalRandomVariable('rv2', theta=np.array((5.0, 0.1)))
rv_1.uni_sample = np.random.random(size=100)
rv_2.uni_sample = np.random.random(size=100)
rv_set = uq.RandomVariableSet('rv_set', [rv_1, rv_2], rho)
@@ -1234,8 +1546,8 @@ def test_RandomVariable_Set_apply_correlation_special():
# non full rank matrix
rho = np.array(((0.00, 0.00), (0.0, 0.0)))
- rv_1 = uq.NormalRandomVariable('rv1', theta=[5.0, 0.1])
- rv_2 = uq.NormalRandomVariable('rv2', theta=[5.0, 0.1])
+ rv_1 = uq.NormalRandomVariable('rv1', theta=np.array((5.0, 0.1)))
+ rv_2 = uq.NormalRandomVariable('rv2', theta=np.array((5.0, 0.1)))
rv_1.uni_sample = np.random.random(size=100)
rv_2.uni_sample = np.random.random(size=100)
rv_set = uq.RandomVariableSet('rv_set', [rv_1, rv_2], rho)
@@ -1245,21 +1557,23 @@ def test_RandomVariable_Set_apply_correlation_special():
)
-def test_RandomVariable_Set_orthotope_density(reset=False):
- data_dir = 'pelicun/tests/data/uq/test_random_variable_set_orthotope_density'
+def test_RandomVariable_Set_orthotope_density(*, reset: bool = False) -> None:
+ data_dir = (
+ 'pelicun/tests/basic/data/uq/test_random_variable_set_orthotope_density'
+ )
# create some random variables
- rv_1 = uq.NormalRandomVariable(
- 'rv1', theta=[5.0, 0.1], truncation_limits=np.array((np.nan, 10.0))
+ rv_1 = uq.Normal_COV(
+ 'rv1', theta=np.array((5.0, 0.1)), truncation_limits=np.array((np.nan, 10.0))
)
- rv_2 = uq.LogNormalRandomVariable('rv2', theta=[10.0, 0.2])
- rv_3 = uq.UniformRandomVariable('rv3', theta=[13.0, 17.0])
- rv_4 = uq.UniformRandomVariable('rv4', theta=[0.0, 1.0])
- rv_5 = uq.UniformRandomVariable('rv5', theta=[0.0, 1.0])
+ rv_2 = uq.LogNormalRandomVariable('rv2', theta=np.array((10.0, 0.2)))
+ rv_3 = uq.UniformRandomVariable('rv3', theta=np.array((13.0, 17.0)))
+ rv_4 = uq.UniformRandomVariable('rv4', theta=np.array((0.0, 1.0)))
+ rv_5 = uq.UniformRandomVariable('rv5', theta=np.array((0.0, 1.0)))
# create a random variable set
rv_set = uq.RandomVariableSet(
- 'rv_set', (rv_1, rv_2, rv_3, rv_4, rv_5), np.identity(5)
+ 'rv_set', [rv_1, rv_2, rv_3, rv_4, rv_5], np.identity(5)
)
# define test cases
@@ -1268,7 +1582,7 @@ def test_RandomVariable_Set_orthotope_density(reset=False):
(
np.array([4.0, 9.0, 14.0, np.nan]),
np.array([6.0, 11.0, 16.0, 0.80]),
- ('rv1', 'rv2', 'rv3', 'rv4'),
+ ['rv1', 'rv2', 'rv3', 'rv4'],
),
(
np.array([4.0, 9.0, 14.0, np.nan, 0.20]),
@@ -1293,7 +1607,7 @@ def test_RandomVariable_Set_orthotope_density(reset=False):
res = rv_set.orthotope_density(lower, upper, var_subset=var_subset)
# check that the density is equal to the expected value
# construct a filepath for the results
- filename = f'{data_dir}/test_{i+1}.pcl'
+ filename = f'{data_dir}/test_{i + 1}.pcl'
# overwrite results if needed
if reset:
export_pickle(filename, res)
@@ -1303,8 +1617,10 @@ def test_RandomVariable_Set_orthotope_density(reset=False):
assert np.allclose(res, compare)
-def test_RandomVariableRegistry_generate_sample(reset=False):
- data_dir = 'pelicun/tests/data/uq/test_RandomVariableRegistry_generate_sample'
+def test_RandomVariableRegistry_generate_sample(*, reset: bool = False) -> None:
+ data_dir = (
+ 'pelicun/tests/basic/data/uq/test_RandomVariableRegistry_generate_sample'
+ )
file_incr = 0
for method in ('LHS_midpoint', 'LHS', 'MonteCarlo'):
@@ -1316,14 +1632,14 @@ def test_RandomVariableRegistry_generate_sample(reset=False):
rng = np.random.default_rng(0)
rv_registry_single = uq.RandomVariableRegistry(rng)
# create the random variable and add it to the registry
- RV = uq.NormalRandomVariable('x', theta=[1.0, 1.0])
- rv_registry_single.add_RV(RV)
+ rv = uq.NormalRandomVariable('x', theta=np.array((1.0, 1.0)))
+ rv_registry_single.add_RV(rv)
# Generate a sample
sample_size = 1000
rv_registry_single.generate_sample(sample_size, method)
- res = rv_registry_single.RV_sample['x']
+ res = ensure_value(rv_registry_single.RV_sample['x'])
assert len(res) == sample_size
file_incr += 1
@@ -1345,13 +1661,15 @@ def test_RandomVariableRegistry_generate_sample(reset=False):
# create a random variable registry and add some random variables to it
rng = np.random.default_rng(4)
rv_registry = uq.RandomVariableRegistry(rng)
- rv_1 = uq.NormalRandomVariable('rv1', theta=[5.0, 0.1])
- rv_2 = uq.LogNormalRandomVariable('rv2', theta=[10.0, 0.2])
- rv_3 = uq.UniformRandomVariable('rv3', theta=[13.0, 17.0])
+ rv_1 = uq.Normal_COV('rv1', theta=np.array((5.0, 0.1)))
+ rv_2 = uq.LogNormalRandomVariable('rv2', theta=np.array((10.0, 0.2)))
+ rv_3 = uq.UniformRandomVariable('rv3', theta=np.array((13.0, 17.0)))
rv_registry.add_RV(rv_1)
rv_registry.add_RV(rv_2)
rv_registry.add_RV(rv_3)
- with pytest.raises(ValueError):
+ with pytest.raises(
+ ValueError, match='RV rv3 already exists in the registry.'
+ ):
rv_registry.add_RV(rv_3)
# create a random variable set and add it to the registry
@@ -1361,16 +1679,16 @@ def test_RandomVariableRegistry_generate_sample(reset=False):
rv_registry.add_RV_set(rv_set)
# add some more random variables that are not part of the set
- rv_4 = uq.NormalRandomVariable('rv4', theta=[14.0, 0.30])
- rv_5 = uq.NormalRandomVariable('rv5', theta=[15.0, 0.50])
+ rv_4 = uq.Normal_COV('rv4', theta=np.array((14.0, 0.30)))
+ rv_5 = uq.Normal_COV('rv5', theta=np.array((15.0, 0.50)))
rv_registry.add_RV(rv_4)
rv_registry.add_RV(rv_5)
rv_registry.generate_sample(10, method=method)
# verify that all samples have been generated as expected
- for rv_name in (f'rv{i+1}' for i in range(5)):
- res = rv_registry.RV_sample[rv_name]
+ for rv_name in (f'rv{i + 1}' for i in range(5)):
+ res = ensure_value(rv_registry.RV_sample[rv_name])
file_incr += 1
filename = f'{data_dir}/test_{file_incr}.pcl'
if reset:
@@ -1379,17 +1697,19 @@ def test_RandomVariableRegistry_generate_sample(reset=False):
assert np.allclose(res, compare)
# obtain multiple RVs from the registry
- rv_dictionary = rv_registry.RVs(('rv1', 'rv2'))
+ rv_dictionary = rv_registry.RVs(['rv1', 'rv2'])
assert 'rv1' in rv_dictionary
assert 'rv2' in rv_dictionary
assert 'rv3' not in rv_dictionary
-def test_rv_class_map():
- rv_class = uq.rv_class_map('normal')
- assert rv_class.__name__ == 'NormalRandomVariable'
+def test_rv_class_map() -> None:
+ rv_class = uq.rv_class_map('normal_std')
+ assert rv_class.__name__ == 'Normal_STD'
- with pytest.raises(ValueError):
+ with pytest.raises(
+ ValueError, match=re.escape('Unsupported distribution: ')
+ ):
uq.rv_class_map('')
diff --git a/pelicun/tests/code_repetition_checker.py b/pelicun/tests/code_repetition_checker.py
index 5fa15794e..d877e4b2a 100644
--- a/pelicun/tests/code_repetition_checker.py
+++ b/pelicun/tests/code_repetition_checker.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
@@ -43,21 +42,28 @@
Python test files.
"""
-from glob2 import glob
+from __future__ import annotations
+from pathlib import Path
-def main(file):
+from glob2 import glob # type: ignore
+
+
+def main(file: str) -> None:
"""
Identifies and displays repeated consecutive line blocks within a
file, including their line numbers.
- Args:
- file: Path to the file to be checked for duplicates.
+ Parameters
+ ----------
+ file: str
+ Path to the file to be checked for duplicates.
+
"""
# file = 'tests/test_uq.py'
group = 15 # find repeated blocks this many lines
- with open(file, 'r', encoding='utf-8') as f:
+ with Path(file).open(encoding='utf-8') as f:
contents = f.readlines()
num_lines = len(contents)
for i in range(0, num_lines, group):
@@ -65,22 +71,22 @@ def main(file):
for j in range(i + 1, num_lines):
jlines = contents[j : j + group]
if glines == jlines:
- print(f'{i, j}: ')
+ print(f'{i, j}: ') # noqa: T201
for k in range(group):
- print(f' {jlines[k]}', end='')
- print()
+ print(f' {jlines[k]}', end='') # noqa: T201
+ print() # noqa: T201
-def all_test_files():
+def all_test_files() -> None:
"""
Searches for all Python test files in the 'tests' directory and
runs the main function to find and print repeated line blocks in each file.
"""
test_files = glob('tests/*.py')
for file in test_files:
- print()
- print(file)
- print()
+ print() # noqa: T201
+ print(file) # noqa: T201
+ print() # noqa: T201
main(file)
diff --git a/pelicun/tests/data/model/test_DamageModel__evaluate_damage_state_and_prepare_dmg_quantities/demand_sample.csv b/pelicun/tests/data/model/test_DamageModel__evaluate_damage_state_and_prepare_dmg_quantities/demand_sample.csv
deleted file mode 100644
index c0168e43b..000000000
--- a/pelicun/tests/data/model/test_DamageModel__evaluate_damage_state_and_prepare_dmg_quantities/demand_sample.csv
+++ /dev/null
@@ -1,3 +0,0 @@
-,1-PFA-0-1,1-PFA-0-2,1-PFA-1-1,1-PFA-1-2,1-PID-1-1,1-PID-1-2,1-PID-2-1,1-PID-2-2,1-SA_0.23-0-1
-Units,inps2,inps2,inps2,inps2,rad,rad,rad,rad,inps2
-0,158.62478,158.62478,397.04389,397.04389,0.02672,0.02672,0.02672,0.02672,342.149
diff --git a/pelicun/tests/data/model/test_DamageModel_assemble_required_demand_data/demand_sample.csv b/pelicun/tests/data/model/test_DamageModel_assemble_required_demand_data/demand_sample.csv
deleted file mode 100644
index 149b88ff9..000000000
--- a/pelicun/tests/data/model/test_DamageModel_assemble_required_demand_data/demand_sample.csv
+++ /dev/null
@@ -1,3 +0,0 @@
-,1-PFA-0-1,1-PFA-0-2,1-PFA-1-1,1-PFA-1-2,1-PID-1-1,1-PID-1-2,1-PID-2-1,1-PID-2-2,1-SA_0.23-0-1
-Units,inps2,inps2,inps2,inps2,rad,rad,rad,rad,inps2
-0,100.00,105.00,110.00,115.00,0.001,0.002,0.003,0.004,120.00
diff --git a/pelicun/tests/data/model/test_DamageModel_calculate_multilinear_CDF/damage_model.csv b/pelicun/tests/data/model/test_DamageModel_calculate_multilinear_CDF/damage_model.csv
deleted file mode 100644
index bef4cd8d4..000000000
--- a/pelicun/tests/data/model/test_DamageModel_calculate_multilinear_CDF/damage_model.csv
+++ /dev/null
@@ -1,2 +0,0 @@
-ID,Incomplete,Demand-Type,Demand-Unit,Demand-Offset,Demand-Directional,LS1-Family,LS1-Theta_0,LS1-DamageStateWeights
-test_component,0,Peak Ground Velocity,inps2,0,1,multilinear_CDF,"0.0,0.4,0.8,1.2,1.6,2.0|0.00,0.20,0.40,0.60,0.80,1.0",
diff --git a/pelicun/tests/data/model/test_DamageModel_perform_dmg_task/CMP_marginals.csv b/pelicun/tests/data/model/test_DamageModel_perform_dmg_task/CMP_marginals.csv
deleted file mode 100755
index b38e30dd6..000000000
--- a/pelicun/tests/data/model/test_DamageModel_perform_dmg_task/CMP_marginals.csv
+++ /dev/null
@@ -1,3 +0,0 @@
-,Units,Location,Direction,Theta_0,Blocks
-CMP.A,ea,1,1,1,1
-CMP.B,ea,1,2,1,1
diff --git a/pelicun/tests/data/model/test_DamageModel_perform_dmg_task/CMP_marginals_2.csv b/pelicun/tests/data/model/test_DamageModel_perform_dmg_task/CMP_marginals_2.csv
deleted file mode 100755
index 46aed067a..000000000
--- a/pelicun/tests/data/model/test_DamageModel_perform_dmg_task/CMP_marginals_2.csv
+++ /dev/null
@@ -1,5 +0,0 @@
-,Units,Location,Direction,Theta_0,Blocks
-CMP.A,ea,1,1,1,1
-CMP.A,ea,2,1,1,1
-CMP.B,ea,1,2,1,1
-CMP.B,ea,2,2,1,1
diff --git a/pelicun/tests/data/model/test_DamageModel_perform_dmg_task/fragility_DB_test.csv b/pelicun/tests/data/model/test_DamageModel_perform_dmg_task/fragility_DB_test.csv
deleted file mode 100644
index 881172f23..000000000
--- a/pelicun/tests/data/model/test_DamageModel_perform_dmg_task/fragility_DB_test.csv
+++ /dev/null
@@ -1,3 +0,0 @@
-ID,Incomplete,Demand-Type,Demand-Unit,Demand-Offset,Demand-Directional,LS1-Family,LS1-Theta_0,LS1-Theta_1,LS1-DamageStateWeights
-CMP.A,0,Peak Interstory Drift Ratio,ea,0,1,,0.04,,0.95|0.05
-CMP.B,0,Peak Interstory Drift Ratio,ea,0,1,,0.04,,0.95|0.05
diff --git a/pelicun/tests/dl_calculation/__init__.py b/pelicun/tests/dl_calculation/__init__.py
new file mode 100644
index 000000000..72c332008
--- /dev/null
+++ b/pelicun/tests/dl_calculation/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
diff --git a/pelicun/tests/dl_calculation/e1/8000-AIM.json b/pelicun/tests/dl_calculation/e1/8000-AIM.json
new file mode 100644
index 000000000..b78114f01
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e1/8000-AIM.json
@@ -0,0 +1,21 @@
+{
+ "GeneralInformation": {
+ "NumberOfStories": 1,
+ "YearBuilt": 1900,
+ "StructureType": "C1",
+ "OccupancyClass": "EDU1",
+ "units": {
+ "force": "kips",
+ "length": "ft",
+ "time": "sec"
+ }
+ },
+ "assetType": "Buildings",
+ "Applications": {
+ "DL": {
+ "ApplicationData": {
+ "ground_failure": false
+ }
+ }
+ }
+}
diff --git a/pelicun/tests/dl_calculation/e1/__init__.py b/pelicun/tests/dl_calculation/e1/__init__.py
new file mode 100644
index 000000000..72c332008
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e1/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
diff --git a/pelicun/tests/dl_calculation/e1/response.csv b/pelicun/tests/dl_calculation/e1/response.csv
new file mode 100644
index 000000000..3034fb85b
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e1/response.csv
@@ -0,0 +1,101 @@
+,1-PGA-1-1
+0.000000000000000000e+00,6.849029933956130911e+00
+1.000000000000000000e+00,1.237134226051886543e+01
+2.000000000000000000e+00,1.972131621939452018e+01
+3.000000000000000000e+00,9.152152931865703778e+00
+4.000000000000000000e+00,4.762545330142875954e+00
+5.000000000000000000e+00,5.119594082152485903e+00
+6.000000000000000000e+00,8.985259370480688901e+00
+7.000000000000000000e+00,5.800429585653212428e+00
+8.000000000000000000e+00,6.721154036788936637e+00
+9.000000000000000000e+00,1.180442967680789756e+01
+1.000000000000000000e+01,1.757573848172475195e+01
+1.100000000000000000e+01,2.408892253534509109e+01
+1.200000000000000000e+01,5.366956767518109572e+00
+1.300000000000000000e+01,6.689460282908749278e+00
+1.400000000000000000e+01,9.837892431968002782e+00
+1.500000000000000000e+01,3.108201101576595793e+00
+1.600000000000000000e+01,7.969306015954598976e+00
+1.700000000000000000e+01,6.239227951703175457e+00
+1.800000000000000000e+01,1.265503535460110918e+01
+1.900000000000000000e+01,5.152699658386873161e+00
+2.000000000000000000e+01,8.945841984822743953e+00
+2.100000000000000000e+01,5.271650608692700857e+00
+2.200000000000000000e+01,1.217883862654925764e+01
+2.300000000000000000e+01,4.565064448801355645e+00
+2.400000000000000000e+01,2.827185963889101927e+01
+2.500000000000000000e+01,8.360633657235220895e+00
+2.600000000000000000e+01,1.771275449651086475e+01
+2.700000000000000000e+01,1.353470186586244495e+01
+2.800000000000000000e+01,1.585083782737845048e+01
+2.900000000000000000e+01,7.743412977553257193e+00
+3.000000000000000000e+01,1.673262791659537640e+01
+3.100000000000000000e+01,1.125514571474493408e+01
+3.200000000000000000e+01,6.706408356999864928e+00
+3.300000000000000000e+01,7.770377729315534943e+00
+3.400000000000000000e+01,9.665443132282717897e+00
+3.500000000000000000e+01,1.723980753309739811e+01
+3.600000000000000000e+01,1.291221957249968177e+01
+3.700000000000000000e+01,5.012730578330669040e+00
+3.800000000000000000e+01,1.143700619441758626e+01
+3.900000000000000000e+01,1.252350066480562596e+01
+4.000000000000000000e+01,6.383801705492390788e+00
+4.100000000000000000e+01,8.314748267032781470e+00
+4.200000000000000000e+01,8.332817023133479495e+00
+4.300000000000000000e+01,7.344179823126522066e+00
+4.400000000000000000e+01,1.645381636611937282e+01
+4.500000000000000000e+01,1.204983450209533657e+01
+4.600000000000000000e+01,1.165673673862969828e+01
+4.700000000000000000e+01,7.513555832366318299e+00
+4.800000000000000000e+01,2.024437216200684730e+01
+4.900000000000000000e+01,8.920834603836945931e+00
+5.000000000000000000e+01,1.080188057233199572e+01
+5.100000000000000000e+01,6.199736427905365943e+00
+5.200000000000000000e+01,1.372165205481582362e+01
+5.300000000000000000e+01,7.684718998480578378e+00
+5.400000000000000000e+01,5.171043463741079371e+00
+5.500000000000000000e+01,2.111664537027713706e+01
+5.600000000000000000e+01,1.019006581042710913e+01
+5.700000000000000000e+01,4.275116867520939223e+00
+5.800000000000000000e+01,1.899195644078132261e+01
+5.900000000000000000e+01,1.213049449493617438e+01
+6.000000000000000000e+01,4.832690227828255303e+00
+6.100000000000000000e+01,3.791254293305350132e+00
+6.200000000000000000e+01,8.030002531225640894e+00
+6.300000000000000000e+01,1.163245027176597013e+01
+6.400000000000000000e+01,2.171715232778592508e+01
+6.500000000000000000e+01,7.206784171136174422e+00
+6.600000000000000000e+01,4.534544778468278636e+00
+6.700000000000000000e+01,8.180354349576319350e+00
+6.800000000000000000e+01,2.096016461144264653e+01
+6.900000000000000000e+01,1.020904287768493468e+01
+7.000000000000000000e+01,3.578274393687075783e+00
+7.100000000000000000e+01,1.207216137873451700e+01
+7.200000000000000000e+01,1.899808690168648084e+01
+7.300000000000000000e+01,1.447931410453573520e+01
+7.400000000000000000e+01,4.652951138159096445e+00
+7.500000000000000000e+01,9.959855894302648949e+00
+7.600000000000000000e+01,4.533117400993131874e+00
+7.700000000000000000e+01,7.248896994573899910e+00
+7.800000000000000000e+01,8.842184263094427621e+00
+7.900000000000000000e+01,4.215076299759047629e+00
+8.000000000000000000e+01,1.529557773187450032e+01
+8.100000000000000000e+01,4.175349145066248546e+00
+8.200000000000000000e+01,1.009543326050395251e+01
+8.300000000000000000e+01,8.690186504818180779e+00
+8.400000000000000000e+01,1.547032449114289854e+01
+8.500000000000000000e+01,7.042901121410118925e+00
+8.600000000000000000e+01,4.125904243435575935e+00
+8.700000000000000000e+01,1.376223081867616571e+01
+8.800000000000000000e+01,7.388148474784416386e+00
+8.900000000000000000e+01,1.453892207787288093e+01
+9.000000000000000000e+01,1.299062984361380124e+01
+9.100000000000000000e+01,6.391741821717948469e+00
+9.200000000000000000e+01,8.088304936390127153e+00
+9.300000000000000000e+01,2.300799414165177481e+01
+9.400000000000000000e+01,1.575348473932006321e+01
+9.500000000000000000e+01,1.019287352203471109e+01
+9.600000000000000000e+01,1.786715222266329661e+01
+9.700000000000000000e+01,1.328402105666898869e+01
+9.800000000000000000e+01,5.457579231912252915e+00
+9.900000000000000000e+01,5.857004480627420406e+00
diff --git a/pelicun/tests/dl_calculation/e1/test_e1.py b/pelicun/tests/dl_calculation/e1/test_e1.py
new file mode 100644
index 000000000..1db78a984
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e1/test_e1.py
@@ -0,0 +1,135 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+
+"""DL Calculation Example 1."""
+
+import os
+import shutil
+import tempfile
+from pathlib import Path
+from typing import Generator
+
+import pytest
+
+from pelicun.pelicun_warnings import PelicunWarning
+from pelicun.tools.DL_calculation import run_pelicun
+
+
+@pytest.fixture
+def obtain_temp_dir() -> Generator:
+ # get the path of this file
+ this_file = __file__
+
+ initial_dir = Path.cwd()
+ this_dir = str(Path(this_file).parent)
+
+ temp_dir = tempfile.mkdtemp()
+
+ yield this_dir, temp_dir
+
+ # go back to the right directory, otherwise any tests that follow
+ # could have issues.
+ os.chdir(initial_dir)
+
+
+def test_dl_calculation_1(obtain_temp_dir: str) -> None:
+ this_dir: str
+ temp_dir: str
+
+ this_dir, temp_dir = obtain_temp_dir # type: ignore
+
+ # Copy all input files to a temporary directory.
+ # All outputs will also go there.
+ # This approach is more robust to changes in the output files over
+ # time.
+
+ os.chdir(this_dir)
+ temp_dir = tempfile.mkdtemp()
+ # copy input files
+ for file_name in ('8000-AIM.json', 'response.csv'):
+ shutil.copy(f'{this_dir}/{file_name}', f'{temp_dir}/{file_name}')
+
+ # change directory to there
+ os.chdir(temp_dir)
+
+ # run
+ run_pelicun(
+ demand_file='response.csv',
+ config_path='8000-AIM.json',
+ output_path=None,
+ coupled_edp=True,
+ realizations=100,
+ auto_script_path='PelicunDefault/Hazus_Earthquake_IM.py',
+ detailed_results=False,
+ output_format=None,
+ custom_model_dir=None,
+ )
+
+ #
+ # Test files
+ #
+
+ # Ensure the number of files is as expected
+ num_files = sum(1 for entry in Path(temp_dir).iterdir() if entry.is_file())
+ assert num_files == 19
+
+ # Verify their names
+ files = {
+ '8000-AIM.json',
+ '8000-AIM_ap.json',
+ 'CMP_QNT.csv',
+ 'CMP_sample.json',
+ 'DEM_sample.json',
+ 'DL_summary.csv',
+ 'DL_summary.json',
+ 'DL_summary_stats.csv',
+ 'DL_summary_stats.json',
+ 'DMG_grp.json',
+ 'DMG_grp_stats.json',
+ 'DV_repair_agg.json',
+ 'DV_repair_agg_stats.json',
+ 'DV_repair_grp.json',
+ 'DV_repair_sample.json',
+ 'DV_repair_stats.json',
+ 'pelicun_log.txt',
+ 'pelicun_log_warnings.txt',
+ 'response.csv',
+ }
+
+ for file in files:
+ assert Path(f'{temp_dir}/{file}').is_file()
+
+ #
+ # Check the values: TODO
+ #
diff --git a/pelicun/tests/dl_calculation/e1_no_autopop/8000-AIM.json b/pelicun/tests/dl_calculation/e1_no_autopop/8000-AIM.json
new file mode 100644
index 000000000..f255daef2
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e1_no_autopop/8000-AIM.json
@@ -0,0 +1,87 @@
+{
+ "GeneralInformation": {
+ "NumberOfStories": 1,
+ "YearBuilt": 1900,
+ "StructureType": "C1",
+ "OccupancyClass": "EDU1",
+ "units": {
+ "force": "kips",
+ "length": "ft",
+ "time": "sec"
+ },
+ "DesignLevel": "LC",
+ "BuildingRise": "L"
+ },
+ "assetType": "Buildings",
+ "Applications": {
+ "DL": {
+ "ApplicationData": {
+ "ground_failure": false
+ }
+ }
+ },
+ "DL": {
+ "Asset": {
+ "ComponentAssignmentFile": "CMP_QNT.csv",
+ "ComponentDatabase": "Hazus Earthquake - Buildings",
+ "NumberOfStories": "1",
+ "OccupancyType": "EDU1",
+ "PlanArea": "1"
+ },
+ "Damage": {
+ "DamageProcess": "Hazus Earthquake",
+ "ScalingSpecification": {"LF.C1.L.LC-1-1": "*1.2", "collapse-0-1": "*1.2"}
+ },
+ "Demands": {
+ "DemandFilePath": "response.csv",
+ "SampleSize": "100",
+ "CoupledDemands": true
+ },
+ "Losses": {
+ "Repair": {
+ "ConsequenceDatabase": "Hazus Earthquake - Buildings",
+ "MapApproach": "Automatic"
+ }
+ },
+ "Options": {
+ "NonDirectionalMultipliers": {
+ "ALL": 1.0
+ }
+ },
+ "Outputs": {
+ "Demand": {
+ "Sample": true,
+ "Statistics": false
+ },
+ "Asset": {
+ "Sample": true,
+ "Statistics": false
+ },
+ "Damage": {
+ "Sample": false,
+ "Statistics": false,
+ "GroupedSample": true,
+ "GroupedStatistics": true
+ },
+ "Loss": {
+ "Repair": {
+ "Sample": true,
+ "Statistics": true,
+ "GroupedSample": true,
+ "GroupedStatistics": false,
+ "AggregateSample": true,
+ "AggregateStatistics": true
+ }
+ },
+ "Format": {
+ "CSV": false,
+ "JSON": true
+ },
+ "Settings": {
+ "CondenseDS": true,
+ "SimpleIndexInJSON": true,
+ "AggregateColocatedComponentResults": true
+ }
+ }
+ }
+}
diff --git a/pelicun/tests/dl_calculation/e1_no_autopop/CMP_QNT.csv b/pelicun/tests/dl_calculation/e1_no_autopop/CMP_QNT.csv
new file mode 100644
index 000000000..8ecf8aaef
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e1_no_autopop/CMP_QNT.csv
@@ -0,0 +1,2 @@
+,Units,Location,Direction,Theta_0,Family
+LF.C1.L.LC,ea,1,1,1,N/A
diff --git a/pelicun/tests/dl_calculation/e1_no_autopop/__init__.py b/pelicun/tests/dl_calculation/e1_no_autopop/__init__.py
new file mode 100644
index 000000000..72c332008
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e1_no_autopop/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
diff --git a/pelicun/tests/dl_calculation/e1_no_autopop/response.csv b/pelicun/tests/dl_calculation/e1_no_autopop/response.csv
new file mode 100644
index 000000000..3034fb85b
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e1_no_autopop/response.csv
@@ -0,0 +1,101 @@
+,1-PGA-1-1
+0.000000000000000000e+00,6.849029933956130911e+00
+1.000000000000000000e+00,1.237134226051886543e+01
+2.000000000000000000e+00,1.972131621939452018e+01
+3.000000000000000000e+00,9.152152931865703778e+00
+4.000000000000000000e+00,4.762545330142875954e+00
+5.000000000000000000e+00,5.119594082152485903e+00
+6.000000000000000000e+00,8.985259370480688901e+00
+7.000000000000000000e+00,5.800429585653212428e+00
+8.000000000000000000e+00,6.721154036788936637e+00
+9.000000000000000000e+00,1.180442967680789756e+01
+1.000000000000000000e+01,1.757573848172475195e+01
+1.100000000000000000e+01,2.408892253534509109e+01
+1.200000000000000000e+01,5.366956767518109572e+00
+1.300000000000000000e+01,6.689460282908749278e+00
+1.400000000000000000e+01,9.837892431968002782e+00
+1.500000000000000000e+01,3.108201101576595793e+00
+1.600000000000000000e+01,7.969306015954598976e+00
+1.700000000000000000e+01,6.239227951703175457e+00
+1.800000000000000000e+01,1.265503535460110918e+01
+1.900000000000000000e+01,5.152699658386873161e+00
+2.000000000000000000e+01,8.945841984822743953e+00
+2.100000000000000000e+01,5.271650608692700857e+00
+2.200000000000000000e+01,1.217883862654925764e+01
+2.300000000000000000e+01,4.565064448801355645e+00
+2.400000000000000000e+01,2.827185963889101927e+01
+2.500000000000000000e+01,8.360633657235220895e+00
+2.600000000000000000e+01,1.771275449651086475e+01
+2.700000000000000000e+01,1.353470186586244495e+01
+2.800000000000000000e+01,1.585083782737845048e+01
+2.900000000000000000e+01,7.743412977553257193e+00
+3.000000000000000000e+01,1.673262791659537640e+01
+3.100000000000000000e+01,1.125514571474493408e+01
+3.200000000000000000e+01,6.706408356999864928e+00
+3.300000000000000000e+01,7.770377729315534943e+00
+3.400000000000000000e+01,9.665443132282717897e+00
+3.500000000000000000e+01,1.723980753309739811e+01
+3.600000000000000000e+01,1.291221957249968177e+01
+3.700000000000000000e+01,5.012730578330669040e+00
+3.800000000000000000e+01,1.143700619441758626e+01
+3.900000000000000000e+01,1.252350066480562596e+01
+4.000000000000000000e+01,6.383801705492390788e+00
+4.100000000000000000e+01,8.314748267032781470e+00
+4.200000000000000000e+01,8.332817023133479495e+00
+4.300000000000000000e+01,7.344179823126522066e+00
+4.400000000000000000e+01,1.645381636611937282e+01
+4.500000000000000000e+01,1.204983450209533657e+01
+4.600000000000000000e+01,1.165673673862969828e+01
+4.700000000000000000e+01,7.513555832366318299e+00
+4.800000000000000000e+01,2.024437216200684730e+01
+4.900000000000000000e+01,8.920834603836945931e+00
+5.000000000000000000e+01,1.080188057233199572e+01
+5.100000000000000000e+01,6.199736427905365943e+00
+5.200000000000000000e+01,1.372165205481582362e+01
+5.300000000000000000e+01,7.684718998480578378e+00
+5.400000000000000000e+01,5.171043463741079371e+00
+5.500000000000000000e+01,2.111664537027713706e+01
+5.600000000000000000e+01,1.019006581042710913e+01
+5.700000000000000000e+01,4.275116867520939223e+00
+5.800000000000000000e+01,1.899195644078132261e+01
+5.900000000000000000e+01,1.213049449493617438e+01
+6.000000000000000000e+01,4.832690227828255303e+00
+6.100000000000000000e+01,3.791254293305350132e+00
+6.200000000000000000e+01,8.030002531225640894e+00
+6.300000000000000000e+01,1.163245027176597013e+01
+6.400000000000000000e+01,2.171715232778592508e+01
+6.500000000000000000e+01,7.206784171136174422e+00
+6.600000000000000000e+01,4.534544778468278636e+00
+6.700000000000000000e+01,8.180354349576319350e+00
+6.800000000000000000e+01,2.096016461144264653e+01
+6.900000000000000000e+01,1.020904287768493468e+01
+7.000000000000000000e+01,3.578274393687075783e+00
+7.100000000000000000e+01,1.207216137873451700e+01
+7.200000000000000000e+01,1.899808690168648084e+01
+7.300000000000000000e+01,1.447931410453573520e+01
+7.400000000000000000e+01,4.652951138159096445e+00
+7.500000000000000000e+01,9.959855894302648949e+00
+7.600000000000000000e+01,4.533117400993131874e+00
+7.700000000000000000e+01,7.248896994573899910e+00
+7.800000000000000000e+01,8.842184263094427621e+00
+7.900000000000000000e+01,4.215076299759047629e+00
+8.000000000000000000e+01,1.529557773187450032e+01
+8.100000000000000000e+01,4.175349145066248546e+00
+8.200000000000000000e+01,1.009543326050395251e+01
+8.300000000000000000e+01,8.690186504818180779e+00
+8.400000000000000000e+01,1.547032449114289854e+01
+8.500000000000000000e+01,7.042901121410118925e+00
+8.600000000000000000e+01,4.125904243435575935e+00
+8.700000000000000000e+01,1.376223081867616571e+01
+8.800000000000000000e+01,7.388148474784416386e+00
+8.900000000000000000e+01,1.453892207787288093e+01
+9.000000000000000000e+01,1.299062984361380124e+01
+9.100000000000000000e+01,6.391741821717948469e+00
+9.200000000000000000e+01,8.088304936390127153e+00
+9.300000000000000000e+01,2.300799414165177481e+01
+9.400000000000000000e+01,1.575348473932006321e+01
+9.500000000000000000e+01,1.019287352203471109e+01
+9.600000000000000000e+01,1.786715222266329661e+01
+9.700000000000000000e+01,1.328402105666898869e+01
+9.800000000000000000e+01,5.457579231912252915e+00
+9.900000000000000000e+01,5.857004480627420406e+00
diff --git a/pelicun/tests/dl_calculation/e1_no_autopop/test_e1.py b/pelicun/tests/dl_calculation/e1_no_autopop/test_e1.py
new file mode 100644
index 000000000..8f823c815
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e1_no_autopop/test_e1.py
@@ -0,0 +1,134 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+
+"""DL Calculation Example 1."""
+
+import os
+import shutil
+import tempfile
+from pathlib import Path
+from typing import Generator
+
+import pytest
+
+from pelicun.pelicun_warnings import PelicunWarning
+from pelicun.tools.DL_calculation import run_pelicun
+
+
+@pytest.fixture
+def obtain_temp_dir() -> Generator:
+ # get the path of this file
+ this_file = __file__
+
+ initial_dir = Path.cwd()
+ this_dir = str(Path(this_file).parent)
+
+ temp_dir = tempfile.mkdtemp()
+
+ yield this_dir, temp_dir
+
+ # go back to the right directory, otherwise any tests that follow
+ # could have issues.
+ os.chdir(initial_dir)
+
+
+def test_dl_calculation_1(obtain_temp_dir: str) -> None:
+ this_dir: str
+ temp_dir: str
+
+ this_dir, temp_dir = obtain_temp_dir # type: ignore
+
+ # Copy all input files to a temporary directory.
+ # All outputs will also go there.
+ # This approach is more robust to changes in the output files over
+ # time.
+
+ os.chdir(this_dir)
+ temp_dir = tempfile.mkdtemp()
+ # copy input files
+ for file_name in ('8000-AIM.json', 'response.csv', 'CMP_QNT.csv'):
+ shutil.copy(f'{this_dir}/{file_name}', f'{temp_dir}/{file_name}')
+
+ # change directory to there
+ os.chdir(temp_dir)
+
+ # run
+ run_pelicun(
+ demand_file='response.csv',
+ config_path='8000-AIM.json',
+ output_path=None,
+ coupled_edp=True,
+ realizations=100,
+ auto_script_path=None,
+ detailed_results=False,
+ output_format=None,
+ custom_model_dir=None,
+ )
+
+ #
+ # Test files
+ #
+
+ # Ensure the number of files is as expected
+ num_files = sum(1 for entry in Path(temp_dir).iterdir() if entry.is_file())
+ assert num_files == 18
+
+ # Verify their names
+ files = {
+ '8000-AIM.json',
+ 'CMP_QNT.csv',
+ 'CMP_sample.json',
+ 'DEM_sample.json',
+ 'DL_summary.csv',
+ 'DL_summary.json',
+ 'DL_summary_stats.csv',
+ 'DL_summary_stats.json',
+ 'DMG_grp.json',
+ 'DMG_grp_stats.json',
+ 'DV_repair_agg.json',
+ 'DV_repair_agg_stats.json',
+ 'DV_repair_grp.json',
+ 'DV_repair_sample.json',
+ 'DV_repair_stats.json',
+ 'pelicun_log.txt',
+ 'pelicun_log_warnings.txt',
+ 'response.csv',
+ }
+
+ for file in files:
+ assert Path(f'{temp_dir}/{file}').is_file()
+
+ #
+ # Check the values: TODO
+ #
diff --git a/pelicun/tests/dl_calculation/e2/1-AIM.json b/pelicun/tests/dl_calculation/e2/1-AIM.json
new file mode 100644
index 000000000..9f53ffea1
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e2/1-AIM.json
@@ -0,0 +1,157 @@
+{
+ "RandomVariables": [],
+ "GeneralInformation": {
+ "AIM_id": "1",
+ "location": {
+ "latitude": 61.21700019,
+ "longitude": -149.9083196
+ },
+ "Latitude": 61.21700019,
+ "Longitude": -149.9083196,
+ "NumberOfStories": 2,
+ "YearBuilt": 1983,
+ "OccupancyClass": "RES1",
+ "PlanArea": 3059,
+ "StructureType": "W1",
+ "Footprint": "{\"type\":\"Feature\",\"geometry\":{\"type\":\"Polygon\",\"coordinates\":[[[-149.908450,61.217076],[-149.908450,61.217100],[-149.908506,61.217100],[-149.908506,61.217076],[-149.908450,61.217076]]]},\"properties\":{}}",
+ "units": {
+ "force": "kips",
+ "length": "ft",
+ "time": "sec"
+ }
+ },
+ "DefaultValues": {
+ "driverFile": "driver",
+ "edpFiles": [
+ "EDP.json"
+ ],
+ "filenameDL": "BIM.json",
+ "filenameEDP": "EDP.json",
+ "filenameEVENT": "EVENT.json",
+ "filenameSAM": "SAM.json",
+ "filenameSIM": "SIM.json",
+ "rvFiles": [
+ "SAM.json",
+ "EVENT.json",
+ "SIM.json"
+ ],
+ "workflowInput": "scInput.json",
+ "workflowOutput": "EDP.json"
+ },
+ "commonFileDir": "/Users/adamzs/Documents/R2D/LocalWorkDir/tmp.SimCenter/input_data",
+ "remoteAppDir": "/Users/adamzs/SimCenter",
+ "localAppDir": "/Users/adamzs/SimCenter",
+ "units": {
+ "force": "kips",
+ "length": "ft",
+ "time": "sec"
+ },
+ "outputs": {
+ "AIM": false,
+ "DM": true,
+ "DV": true,
+ "EDP": true,
+ "IM": false
+ },
+ "RegionalEvent": {
+ "eventFile": "EventGrid.csv",
+ "eventFilePath": "/Users/adamzs/Examples/R2D/00_Built_ins/E2MDOFBuildingResponse/input_data/records",
+ "units": {
+ "TH_file": "mps2"
+ }
+ },
+ "assetType": "Buildings",
+ "Applications": {
+ "Events": [
+ {
+ "Application": "SimCenterEvent",
+ "ApplicationData": {},
+ "EventClassification": "Earthquake"
+ }
+ ],
+ "Modeling": {
+ "Application": "MDOF-LU",
+ "ApplicationData": {}
+ },
+ "EDP": {
+ "Application": "StandardEarthquakeEDP",
+ "ApplicationData": {}
+ },
+ "Simulation": {
+ "Application": "OpenSees-Simulation",
+ "ApplicationData": {}
+ },
+ "UQ": {
+ "Application": "Dakota-UQ",
+ "ApplicationData": {}
+ },
+ "DL": {
+ "Application": "Pelicun3",
+ "ApplicationData": {
+ "DL_Method": "HAZUS MH EQ Story",
+ "Realizations": 1000,
+ "auto_script": "PelicunDefault/Hazus_Earthquake_Story.py",
+ "coupled_EDP": false,
+ "detailed_results": false,
+ "ground_failure": false,
+ "log_file": true,
+ "regional": "true"
+ }
+ }
+ },
+ "Modeling": {
+ "hazusData": "HazusData.txt",
+ "pathToHazusFile": "input_data",
+ "stdDamping": 0.1,
+ "stdStiffness": 0.2
+ },
+ "Simulation": {
+ "Application": "OpenSees-Simulation",
+ "algorithm": "Newton",
+ "analysis": "Transient -numSubLevels 2 -numSubSteps 10",
+ "convergenceTest": "NormUnbalance 1.0e-2 10",
+ "dampingModel": "Rayleigh Damping",
+ "firstMode": 1,
+ "integration": "Newmark 0.5 0.25",
+ "modalRayleighTangentRatio": 0,
+ "numModesModal": 1,
+ "rayleighTangent": "Initial",
+ "secondMode": -1,
+ "solver": "Umfpack"
+ },
+ "UQ": {
+ "parallelExecution": true,
+ "samplingMethodData": {
+ "method": "LHS",
+ "samples": 10,
+ "seed": 100
+ },
+ "saveWorkDir": true,
+ "uqType": "Forward Propagation"
+ },
+ "DL": {},
+ "Events": [
+ {
+ "EventFolderPath": "/Users/adamzs/Documents/R2D/LocalWorkDir/tmp.SimCenter/input_data",
+ "Events": [
+ [
+ "site_2716x00000",
+ 1.0
+ ],
+ [
+ "site_8040x00001",
+ 1.0
+ ],
+ [
+ "site_8040x00002",
+ 1.0
+ ],
+ [
+ "site_8040x00003",
+ 1.0
+ ]
+ ],
+ "type": "timeHistory"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/pelicun/tests/dl_calculation/e2/__init__.py b/pelicun/tests/dl_calculation/e2/__init__.py
new file mode 100644
index 000000000..72c332008
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e2/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
diff --git a/pelicun/tests/dl_calculation/e2/response.csv b/pelicun/tests/dl_calculation/e2/response.csv
new file mode 100644
index 000000000..8a0b9a596
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e2/response.csv
@@ -0,0 +1,11 @@
+%eval_id,interface,kFactor,dampFactor,eventID,1-PFA-0-1,1-PFA-0-2,1-PFA-1-1,1-PFA-1-2,1-PFD-1-1,1-PFD-1-2,1-PID-1-1,1-PID-1-2,1-PFA-2-1,1-PFA-2-2,1-PFD-2-1,1-PFD-2-2,1-PID-2-1,1-PID-2-2,1-PRD-1-1,1-PRD-1-2
+1,NO_ID,1.026465942,0.9956333887,site_8040x00001,215.043,168.716,76.252,56.8133,4.49167,3.22806,0.380295,0.273309,56.8146,48.3789,6.03711,4.36916,0.181566,0.115603,0.255571,0.18496
+2,NO_ID,0.850802135,0.9378316228,site_8040x00002,214.971,168.65,76.9281,52.415,4.96696,3.41827,0.420536,0.289415,55.0095,44.665,7.09337,4.63415,0.206361,0.131427,0.300287,0.196179
+3,NO_ID,0.9710487994,0.7747865923,site_8040x00002,215.006,168.708,76.9783,55.3668,5.11266,3.40528,0.432871,0.288313,56.8278,45.9009,7.05925,4.63904,0.209247,0.126186,0.298841,0.196387
+4,NO_ID,0.9181644673,1.011372976,site_8040x00003,215.037,168.844,74.0719,54.6637,4.61283,3.31155,0.390553,0.280378,56.0866,46.582,6.45243,4.47795,0.193768,0.122622,0.273153,0.189567
+5,NO_ID,1.113528118,1.131871685,site_2716x00000,215.728,220.759,71.5915,75.604,3.24263,6.64242,0.274543,0.562392,50.7769,78.835,4.64295,8.40443,0.12973,0.159593,0.196551,0.355787
+6,NO_ID,0.8068747036,1.028889898,site_8040x00002,215.024,168.594,79.0883,51.5823,4.88878,3.40033,0.413915,0.287895,54.2241,44.6102,7.09944,4.5938,0.201193,0.130674,0.300543,0.194471
+7,NO_ID,0.6791699963,1.079610422,site_2716x00000,215.834,221.631,64.1503,64.8678,4.00295,8.12018,0.338916,0.687509,43.8816,56.2641,5.82254,10.2027,0.188821,0.183259,0.246488,0.431914
+8,NO_ID,1.096051367,0.8818221076,site_2716x00000,215.517,221.027,72.4134,82.4459,3.5833,7.60049,0.303387,0.643508,49.9268,72.8163,5.10915,9.50482,0.143343,0.170842,0.216287,0.40237
+9,NO_ID,1.168661598,1.106919607,site_8040x00003,214.992,168.774,76.6332,58.3448,4.08816,3.00079,0.346132,0.254067,56.044,51.6227,5.3709,4.07461,0.153875,0.104099,0.227368,0.172491
+10,NO_ID,1.275305886,0.9535125809,site_8040x00001,214.965,168.732,78.0955,60.2986,4.29777,3.06692,0.363877,0.259665,57.0108,52.4049,5.56991,4.1414,0.155576,0.102947,0.235793,0.175319
diff --git a/pelicun/tests/dl_calculation/e2/test_e2.py b/pelicun/tests/dl_calculation/e2/test_e2.py
new file mode 100644
index 000000000..d9ed638e5
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e2/test_e2.py
@@ -0,0 +1,131 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+
+"""DL Calculation Example 2."""
+
+from __future__ import annotations
+
+import os
+import shutil
+import tempfile
+from pathlib import Path
+from typing import Generator
+
+import pytest
+
+from pelicun.pelicun_warnings import PelicunWarning
+from pelicun.tools.DL_calculation import run_pelicun
+
+
+@pytest.fixture
+def obtain_temp_dir() -> Generator:
+ # get the path of this file
+ this_file = __file__
+
+ initial_dir = Path.cwd()
+ this_dir = str(Path(this_file).parent)
+
+ temp_dir = tempfile.mkdtemp()
+
+ yield this_dir, temp_dir
+
+ # go back to the right directory, otherwise any tests that follow
+ # could have issues.
+ os.chdir(initial_dir)
+
+
+def test_dl_calculation_2(obtain_temp_dir: tuple[str, str]) -> None:
+ this_dir, temp_dir = obtain_temp_dir
+
+ # Copy all input files to a temporary directory.
+ # All outputs will also go there.
+ # This approach is more robust to changes in the output files over
+ # time.
+ os.chdir(this_dir)
+ temp_dir = tempfile.mkdtemp()
+ # copy input files
+ for file_name in ('1-AIM.json', 'response.csv'):
+ shutil.copy(f'{this_dir}/{file_name}', f'{temp_dir}/{file_name}')
+ os.chdir(temp_dir)
+
+ # run
+ run_pelicun(
+ demand_file='response.csv',
+ config_path='1-AIM.json',
+ output_path=None,
+ coupled_edp=True,
+ realizations=100,
+ auto_script_path='PelicunDefault/Hazus_Earthquake_Story.py',
+ detailed_results=False,
+ output_format=None,
+ custom_model_dir=None,
+ )
+
+ #
+ # Test files
+ #
+
+ # Ensure the number of files is as expected
+ num_files = sum(1 for entry in Path(temp_dir).iterdir() if entry.is_file())
+ assert num_files == 19
+
+ # Verify their names
+ files = {
+ '1-AIM.json',
+ '1-AIM_ap.json',
+ 'CMP_QNT.csv',
+ 'CMP_sample.json',
+ 'DEM_sample.json',
+ 'DL_summary.csv',
+ 'DL_summary.json',
+ 'DL_summary_stats.csv',
+ 'DL_summary_stats.json',
+ 'DMG_grp.json',
+ 'DMG_grp_stats.json',
+ 'DV_repair_agg.json',
+ 'DV_repair_agg_stats.json',
+ 'DV_repair_grp.json',
+ 'DV_repair_sample.json',
+ 'DV_repair_stats.json',
+ 'pelicun_log.txt',
+ 'pelicun_log_warnings.txt',
+ 'response.csv',
+ }
+
+ for file in files:
+ assert Path(f'{temp_dir}/{file}').is_file()
+
+ #
+ # Check the values: TODO
+ #
diff --git a/pelicun/tests/dl_calculation/e3/170-AIM.json b/pelicun/tests/dl_calculation/e3/170-AIM.json
new file mode 100644
index 000000000..7d74c69ac
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e3/170-AIM.json
@@ -0,0 +1,158 @@
+{
+ "RandomVariables": [],
+ "GeneralInformation": {
+ "AIM_id": "170",
+ "location": {
+ "latitude": 37.8720785,
+ "longitude": -122.271291
+ },
+ "Latitude": 37.8720785,
+ "Longitude": -122.271291,
+ "ReplacementCost": 32923670.0,
+ "PlanArea": 253259.0,
+ "YearBuilt": 1987,
+ "NumberOfStories": 5,
+ "OccupancyClass": "RES3",
+ "StructureType": "RM1",
+ "Footprint": "{\"type\":\"Feature\",\"geometry\":{\"type\":\"Polygon\",\"coordinates\":[[[-122.271843,37.872178],[-122.271797,37.871878],[-122.270739,37.871979],[-122.270785,37.872279],[-122.271843,37.872178]]]},\"properties\":{}}",
+ "units": {
+ "force": "N",
+ "length": "m",
+ "time": "sec"
+ }
+ },
+ "DefaultValues": {
+ "driverFile": "driver",
+ "edpFiles": [
+ "EDP.json"
+ ],
+ "filenameDL": "BIM.json",
+ "filenameEDP": "EDP.json",
+ "filenameEVENT": "EVENT.json",
+ "filenameSAM": "SAM.json",
+ "filenameSIM": "SIM.json",
+ "rvFiles": [
+ "SAM.json",
+ "EVENT.json",
+ "SIM.json"
+ ],
+ "workflowInput": "scInput.json",
+ "workflowOutput": "EDP.json"
+ },
+ "commonFileDir": "/Users/adamzs/Documents/R2D/LocalWorkDir/tmp.SimCenter/input_data",
+ "remoteAppDir": "/Users/adamzs/SimCenter",
+ "localAppDir": "/Users/adamzs/SimCenter",
+ "units": {
+ "force": "N",
+ "length": "m",
+ "time": "sec"
+ },
+ "outputs": {
+ "AIM": false,
+ "DM": true,
+ "DV": true,
+ "EDP": true,
+ "IM": false
+ },
+ "RegionalEvent": {
+ "eventFile": "EventGrid.csv",
+ "eventFilePath": "/Users/adamzs/Examples/R2D/00_Built_ins/E3PhysicsBasedGroundMotions/input_data/SW4_filtered",
+ "units": {
+ "TH_file": "mps2"
+ }
+ },
+ "assetType": "Buildings",
+ "Applications": {
+ "Events": [
+ {
+ "Application": "SimCenterEvent",
+ "ApplicationData": {},
+ "EventClassification": "Earthquake"
+ }
+ ],
+ "Modeling": {
+ "Application": "MDOF-LU",
+ "ApplicationData": {}
+ },
+ "EDP": {
+ "Application": "StandardEarthquakeEDP",
+ "ApplicationData": {}
+ },
+ "Simulation": {
+ "Application": "OpenSees-Simulation",
+ "ApplicationData": {}
+ },
+ "UQ": {
+ "Application": "Dakota-UQ",
+ "ApplicationData": {}
+ },
+ "DL": {
+ "Application": "Pelicun3",
+ "ApplicationData": {
+ "DL_Method": "HAZUS MH EQ Story",
+ "Realizations": 1000,
+ "auto_script": "PelicunDefault/Hazus_Earthquake_Story.py",
+ "coupled_EDP": false,
+ "detailed_results": false,
+ "ground_failure": false,
+ "log_file": true,
+ "regional": "true"
+ }
+ }
+ },
+ "Modeling": {
+ "hazusData": "HazusData.txt",
+ "pathToHazusFile": "/Users/adamzs/Examples/R2D/00_Built_ins/E3PhysicsBasedGroundMotions/input_data",
+ "stdDamping": 0.1,
+ "stdStiffness": 0.1
+ },
+ "Simulation": {
+ "Application": "OpenSees-Simulation",
+ "algorithm": "Newton",
+ "analysis": "Transient -numSubLevels 2 -numSubSteps 10",
+ "convergenceTest": "NormUnbalance 1.0e-2 10",
+ "dampingModel": "Rayleigh Damping",
+ "firstMode": 1,
+ "integration": "Newmark 0.5 0.25",
+ "modalRayleighTangentRatio": 0,
+ "numModesModal": 1,
+ "rayleighTangent": "Initial",
+ "secondMode": -1,
+ "solver": "Umfpack"
+ },
+ "UQ": {
+ "parallelExecution": true,
+ "samplingMethodData": {
+ "method": "LHS",
+ "samples": 10,
+ "seed": 2
+ },
+ "saveWorkDir": true,
+ "uqType": "Forward Propagation"
+ },
+ "DL": {},
+ "Events": [
+ {
+ "EventFolderPath": "/Users/adamzs/Documents/R2D/LocalWorkDir/tmp.SimCenter/input_data",
+ "Events": [
+ [
+ "S_20_20x00000",
+ 1.0
+ ],
+ [
+ "S_21_20x00001",
+ 1.0
+ ],
+ [
+ "S_20_20x00002",
+ 1.0
+ ],
+ [
+ "S_20_20x00003",
+ 1.0
+ ]
+ ],
+ "type": "timeHistory"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/pelicun/tests/dl_calculation/e3/__init__.py b/pelicun/tests/dl_calculation/e3/__init__.py
new file mode 100644
index 000000000..72c332008
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e3/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
diff --git a/pelicun/tests/dl_calculation/e3/response.csv b/pelicun/tests/dl_calculation/e3/response.csv
new file mode 100644
index 000000000..f5450664f
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e3/response.csv
@@ -0,0 +1,11 @@
+%eval_id,interface,kFactor,dampFactor,eventID,1-PFA-0-1,1-PFA-0-2,1-PFA-1-1,1-PFA-1-2,1-PFD-1-1,1-PFD-1-2,1-PID-1-1,1-PID-1-2,1-PFA-2-1,1-PFA-2-2,1-PFD-2-1,1-PFD-2-2,1-PID-2-1,1-PID-2-2,1-PFA-3-1,1-PFA-3-2,1-PFD-3-1,1-PFD-3-2,1-PID-3-1,1-PID-3-2,1-PFA-4-1,1-PFA-4-2,1-PFD-4-1,1-PFD-4-2,1-PID-4-1,1-PID-4-2,1-PFA-5-1,1-PFA-5-2,1-PFD-5-1,1-PFD-5-2,1-PID-5-1,1-PID-5-2,1-PRD-1-1,1-PRD-1-2
+1,NO_ID,0.8817522814,1.066983211,S_20_20x00000,16.8582,33.1454,17.9583,26.3537,0.102333,0.190679,0.0284257,0.0529664,15.9064,26.1489,0.170668,0.301302,0.0206839,0.0371189,16.1886,21.0559,0.213332,0.391667,0.0145917,0.0305021,11.3821,15.2439,0.23443,0.476214,0.0124983,0.0235541,9.27622,12.5316,0.240095,0.521471,0.00883049,0.0166364,0.0133386,0.0289706
+2,NO_ID,0.9886876314,0.9229375803,S_20_20x00002,16.8582,33.1454,19.0145,27.6361,0.101138,0.192878,0.028094,0.0535773,17.034,26.4289,0.170838,0.301898,0.0209905,0.0354014,16.9067,21.4484,0.213383,0.39401,0.0150769,0.0300369,11.201,15.2395,0.235273,0.479983,0.0122908,0.0240372,9.0027,13.6248,0.241533,0.529824,0.00783707,0.0169535,0.0134185,0.0294347
+3,NO_ID,0.8289637764,1.094113724,S_21_20x00001,21.4178,29.7214,26.0836,28.9411,0.15291,0.226224,0.0424751,0.06284,25.7287,25.4034,0.235994,0.334483,0.0267993,0.0354025,21.4589,24.4428,0.295284,0.397812,0.0203297,0.0256835,14.2942,16.607,0.350466,0.430446,0.0186296,0.0150488,10.412,13.4647,0.383205,0.451574,0.012696,0.00839198,0.0212891,0.0250875
+4,NO_ID,1.149126203,0.9904420057,S_21_20x00001,21.4178,29.7214,27.0508,29.1394,0.124586,0.203982,0.0346071,0.0566616,29.0617,25.5925,0.209125,0.288435,0.0254111,0.033464,23.4012,22.5772,0.272395,0.337902,0.0207331,0.0241908,16.0629,18.2045,0.323891,0.374038,0.0164086,0.0142068,12.144,13.8227,0.357021,0.400927,0.0125357,0.00771287,0.0198345,0.0222737
+5,NO_ID,1.023984053,1.033176188,S_20_20x00002,16.8582,33.1454,19.0051,28.3928,0.0968352,0.190333,0.0268987,0.0528702,17.4989,25.6891,0.163041,0.29316,0.0199254,0.0339042,16.9643,21.5639,0.202793,0.378937,0.0143506,0.0282022,11.2404,15.346,0.222792,0.4563,0.0114616,0.0223485,8.91781,13.2588,0.228647,0.500532,0.00697738,0.0159657,0.0127026,0.0278073
+6,NO_ID,1.100286411,1.003007167,S_20_20x00003,16.8582,33.1454,18.1755,29.2081,0.0935455,0.192511,0.0259849,0.0534753,18.2734,24.966,0.158488,0.289701,0.0195301,0.0322815,16.7467,21.1354,0.197327,0.377802,0.0141887,0.0267308,10.8399,15.2204,0.216019,0.451104,0.0108707,0.0214652,9.04108,13.4751,0.222274,0.494721,0.00609555,0.0156971,0.0123486,0.0274845
+7,NO_ID,0.9728892271,0.8801918606,S_20_20x00002,16.8582,33.1454,18.749,27.2682,0.102701,0.193424,0.028528,0.053729,16.7965,26.6722,0.173756,0.304865,0.0214078,0.0360892,16.8282,21.3298,0.21754,0.401037,0.0153533,0.0308758,11.1945,15.0646,0.240062,0.490105,0.0126211,0.0247425,9.08684,13.7124,0.246792,0.542388,0.00811314,0.0173462,0.0137107,0.0301327
+8,NO_ID,1.036791646,0.8162030555,S_20_20x00003,16.8582,33.1454,18.9857,28.0586,0.101309,0.196953,0.0281415,0.054709,17.55,25.981,0.172995,0.305844,0.0215047,0.0348475,17.0242,21.4269,0.216626,0.403625,0.0156283,0.0299812,11.1826,15.4938,0.239731,0.490476,0.0124514,0.0241655,9.04254,13.8515,0.246843,0.546054,0.0076043,0.0177906,0.0137135,0.0303363
+9,NO_ID,0.9167603602,0.9667845426,S_21_20x00001,21.4178,29.7214,25.8863,29.6129,0.149522,0.221778,0.041534,0.0616051,26.8127,25.0341,0.234634,0.319563,0.0269895,0.0369312,22.1552,24.31,0.295412,0.378919,0.0206867,0.0261987,14.947,17.4409,0.353733,0.413318,0.0180886,0.0150603,11.1587,13.1472,0.388497,0.436477,0.0130409,0.00849169,0.0215832,0.0242487
+10,NO_ID,1.066969822,1.179000945,S_20_20x00000,16.8582,33.1454,17.4239,29.1214,0.0907618,0.185076,0.0252116,0.05141,18.0617,25.2039,0.152335,0.280539,0.0185366,0.0321872,16.7174,21.4768,0.188743,0.361134,0.0133264,0.0261369,10.9262,14.9956,0.206001,0.427841,0.0103238,0.0203554,9.19306,12.9921,0.211544,0.466035,0.00602739,0.0147734,0.0117525,0.0258909
diff --git a/pelicun/tests/dl_calculation/e3/test_e3.py b/pelicun/tests/dl_calculation/e3/test_e3.py
new file mode 100644
index 000000000..11745c4d4
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e3/test_e3.py
@@ -0,0 +1,129 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+
+"""DL Calculation Example 3."""
+
+from __future__ import annotations
+
+import os
+import shutil
+import tempfile
+from pathlib import Path
+from typing import Generator
+
+import pytest
+
+from pelicun.pelicun_warnings import PelicunWarning
+from pelicun.tools.DL_calculation import run_pelicun
+
+
+@pytest.fixture
+def obtain_temp_dir() -> Generator:
+ # get the path of this file
+ this_file = __file__
+
+ initial_dir = Path.cwd()
+ this_dir = str(Path(this_file).parent)
+
+ temp_dir = tempfile.mkdtemp()
+
+ yield this_dir, temp_dir
+
+ # go back to the right directory, otherwise any tests that follow
+ # could have issues.
+ os.chdir(initial_dir)
+
+
+def test_dl_calculation_3(obtain_temp_dir: tuple[str, str]) -> None:
+ this_dir, temp_dir = obtain_temp_dir
+
+ # Copy all input files to a temporary directory.
+ # All outputs will also go there.
+ # This approach is more robust to changes in the output files over
+ # time.
+ os.chdir(this_dir)
+ for file_name in ('170-AIM.json', 'response.csv'):
+ shutil.copy(f'{this_dir}/{file_name}', f'{temp_dir}/{file_name}')
+ os.chdir(temp_dir)
+
+ # run
+ run_pelicun(
+ demand_file='response.csv',
+ config_path='170-AIM.json',
+ output_path=None,
+ coupled_edp=False,
+ realizations=100,
+ auto_script_path='PelicunDefault/Hazus_Earthquake_Story.py',
+ detailed_results=False,
+ output_format=None,
+ custom_model_dir=None,
+ )
+
+ #
+ # Test files
+ #
+
+ # Ensure the number of files is as expected
+ num_files = sum(1 for entry in Path(temp_dir).iterdir() if entry.is_file())
+ assert num_files == 19
+
+ # Verify their names
+ files = {
+ '170-AIM.json',
+ '170-AIM_ap.json',
+ 'CMP_QNT.csv',
+ 'CMP_sample.json',
+ 'DEM_sample.json',
+ 'DL_summary.csv',
+ 'DL_summary.json',
+ 'DL_summary_stats.csv',
+ 'DL_summary_stats.json',
+ 'DMG_grp.json',
+ 'DMG_grp_stats.json',
+ 'DV_repair_agg.json',
+ 'DV_repair_agg_stats.json',
+ 'DV_repair_grp.json',
+ 'DV_repair_sample.json',
+ 'DV_repair_stats.json',
+ 'pelicun_log.txt',
+ 'pelicun_log_warnings.txt',
+ 'response.csv',
+ }
+
+ for file in files:
+ assert Path(f'{temp_dir}/{file}').is_file()
+
+ #
+ # Check the values: TODO
+ #
diff --git a/pelicun/tests/dl_calculation/e4/0-AIM.json b/pelicun/tests/dl_calculation/e4/0-AIM.json
new file mode 100644
index 000000000..ee65ede33
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e4/0-AIM.json
@@ -0,0 +1,154 @@
+{
+ "RandomVariables": [],
+ "GeneralInformation": {
+ "AIM_id": "0",
+ "location": {
+ "latitude": 37.8860429,
+ "longitude": -122.3006004
+ },
+ "Latitude": 37.8860429,
+ "Longitude": -122.3006004,
+ "NumberOfStories": 3,
+ "YearBuilt": 2000,
+ "OccupancyClass": "RES3",
+ "StructureType": "W2",
+ "PlanArea": 9332,
+ "ReplacementCost": 1,
+ "Footprint": "{\"type\":\"Feature\",\"geometry\":{\"type\":\"Polygon\",\"coordinates\":[[[-122.300575,37.886210],[-122.300738,37.886181],[-122.300611,37.885734],[-122.300447,37.885763],[-122.300575,37.886210]]]},\"properties\":{}}",
+ "units": {
+ "force": "kips",
+ "length": "ft",
+ "time": "sec"
+ }
+ },
+ "DefaultValues": {
+ "driverFile": "driver",
+ "edpFiles": [
+ "EDP.json"
+ ],
+ "filenameDL": "BIM.json",
+ "filenameEDP": "EDP.json",
+ "filenameEVENT": "EVENT.json",
+ "filenameSAM": "SAM.json",
+ "filenameSIM": "SIM.json",
+ "rvFiles": [
+ "SAM.json",
+ "EVENT.json",
+ "SIM.json"
+ ],
+ "workflowInput": "scInput.json",
+ "workflowOutput": "EDP.json"
+ },
+ "commonFileDir": "/Users/adamzs/Documents/R2D/LocalWorkDir/tmp.SimCenter/input_data",
+ "remoteAppDir": "/Users/adamzs/SimCenter",
+ "localAppDir": "/Users/adamzs/SimCenter",
+ "units": {
+ "force": "kips",
+ "length": "ft",
+ "time": "sec"
+ },
+ "outputs": {
+ "AIM": false,
+ "DM": true,
+ "DV": true,
+ "EDP": true,
+ "IM": false
+ },
+ "RegionalEvent": {
+ "eventFile": "EventGrid.csv",
+ "eventFilePath": "/Users/adamzs/Examples/R2D/00_Built_ins/E4OpenSeesPyFEM/input_data/records",
+ "units": {
+ "TH_file": "g",
+ "factor": "scalar"
+ }
+ },
+ "assetType": "Buildings",
+ "Applications": {
+ "Events": [
+ {
+ "Application": "SimCenterEvent",
+ "ApplicationData": {},
+ "EventClassification": "Earthquake"
+ }
+ ],
+ "Modeling": {
+ "Application": "OpenSeesPyInput",
+ "ApplicationData": {
+ "dofMap": "1,2,3",
+ "filePath": "/Users/adamzs/Examples/R2D/00_Built_ins/E4OpenSeesPyFEM/input_data/model",
+ "mainScript": "cantilever_light.py",
+ "modelPath": "",
+ "ndm": 3
+ }
+ },
+ "EDP": {
+ "Application": "StandardEarthquakeEDP",
+ "ApplicationData": {}
+ },
+ "Simulation": {
+ "Application": "OpenSeesPy-Simulation",
+ "ApplicationData": {}
+ },
+ "UQ": {
+ "Application": "Dakota-UQ",
+ "ApplicationData": {}
+ },
+ "DL": {
+ "Application": "Pelicun3",
+ "ApplicationData": {
+ "DL_Method": "HAZUS MH EQ Story",
+ "Realizations": 500,
+ "auto_script": "PelicunDefault/Hazus_Earthquake_Story.py",
+ "coupled_EDP": true,
+ "detailed_results": false,
+ "ground_failure": false,
+ "log_file": true,
+ "regional": "true"
+ }
+ }
+ },
+ "Modeling": {},
+ "Simulation": {
+ "Application": "OpenSeesPy-Simulation",
+ "ApplicationData": {}
+ },
+ "UQ": {
+ "parallelExecution": true,
+ "samplingMethodData": {
+ "method": "LHS",
+ "samples": 5,
+ "seed": 3
+ },
+ "saveWorkDir": true,
+ "uqType": "Forward Propagation"
+ },
+ "DL": {},
+ "Events": [
+ {
+ "EventFolderPath": "/Users/adamzs/Documents/R2D/LocalWorkDir/tmp.SimCenter/input_data",
+ "Events": [
+ [
+ "RSN1447x00000",
+ 3.919191919191919
+ ],
+ [
+ "RSN6392x00001",
+ 16.582828282828284
+ ],
+ [
+ "RSN64x00002",
+ 14.974747474747474
+ ],
+ [
+ "RSN1061x00003",
+ 2.713131313131313
+ ],
+ [
+ "RSN6141x00004",
+ 2.914141414141414
+ ]
+ ],
+ "type": "timeHistory"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/pelicun/tests/dl_calculation/e4/__init__.py b/pelicun/tests/dl_calculation/e4/__init__.py
new file mode 100644
index 000000000..72c332008
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e4/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
diff --git a/pelicun/tests/dl_calculation/e4/response.csv b/pelicun/tests/dl_calculation/e4/response.csv
new file mode 100644
index 000000000..7a87ee016
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e4/response.csv
@@ -0,0 +1,6 @@
+,interface,eventID,1-PFA-0-1,1-PFA-0-2,1-PFA-1-1,1-PFA-1-2,1-PFD-1-1,1-PFD-1-2,1-PID-1-1,1-PID-1-2,1-PFA-2-1,1-PFA-2-2,1-PFD-2-1,1-PFD-2-2,1-PID-2-1,1-PID-2-2,1-PFA-3-1,1-PFA-3-2,1-PFD-3-1,1-PFD-3-2,1-PID-3-1,1-PID-3-2,1-PRD-1-1,1-PRD-1-2
+0,NO_ID,RSN1447x00000,4.98933,5.46146,8.01783,6.99425,0,0,0.00138306,0.000934419,12.1014,7.91662,0,0,0.00138306,0.000934419,14.2045,9.63215,0,0,0.00138306,0.000934419,0,0
+1,NO_ID,RSN6392x00001,4.83526,5.94428,9.58926,12.7484,0,0,0.00173334,0.00221061,15.2544,19.2729,0,0,0.00173334,0.00221061,18.2143,22.9734,0,0,0.00173334,0.00221061,0,0
+2,NO_ID,RSN64x00002,12.0587,10.5266,9.02844,7.96309,0,0,0.00110625,0.000758379,9.74886,7.23194,0,0,0.00110625,0.000758379,12.1982,9.85511,0,0,0.00110625,0.000758379,0,0
+3,NO_ID,RSN1061x00003,6.33161,4.69372,7.71302,6.14005,0,0,0.00141245,0.000640956,12.2413,5.46212,0,0,0.00141245,0.000640956,17.5873,8.18094,0,0,0.00141245,0.000640956,0,0
+4,NO_ID,RSN6141x00004,5.64286,5.91934,4.73417,7.69997,0,0,0.000615579,0.00120173,5.71894,10.6722,0,0,0.000615579,0.00120173,8.97946,14.9338,0,0,0.000615579,0.00120173,0,0
diff --git a/pelicun/tests/dl_calculation/e4/test_e4.py b/pelicun/tests/dl_calculation/e4/test_e4.py
new file mode 100644
index 000000000..45544beeb
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e4/test_e4.py
@@ -0,0 +1,134 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+
+"""DL Calculation Example 4."""
+
+from __future__ import annotations
+
+import os
+import shutil
+import tempfile
+from pathlib import Path
+from typing import Generator
+
+import pytest
+
+from pelicun.pelicun_warnings import PelicunWarning
+from pelicun.tools.DL_calculation import run_pelicun
+
+
+@pytest.fixture
+def obtain_temp_dir() -> Generator:
+ # get the path of this file
+ this_file = __file__
+
+ initial_dir = Path.cwd()
+ this_dir = str(Path(this_file).parent)
+
+ temp_dir = tempfile.mkdtemp()
+
+ yield this_dir, temp_dir
+
+ # go back to the right directory, otherwise any tests that follow
+ # could have issues.
+ os.chdir(initial_dir)
+
+
+def test_dl_calculation_4(obtain_temp_dir: tuple[str, str]) -> None:
+ this_dir, temp_dir = obtain_temp_dir
+
+ # Copy all input files to a temporary directory.
+ # All outputs will also go there.
+ # This approach is more robust to changes in the output files over
+ # time.
+
+ os.chdir(this_dir)
+ temp_dir = tempfile.mkdtemp()
+ # copy input files
+ for file_name in ('0-AIM.json', 'response.csv'):
+ shutil.copy(f'{this_dir}/{file_name}', f'{temp_dir}/{file_name}')
+
+ # change directory to there
+ os.chdir(temp_dir)
+
+ # run
+ run_pelicun(
+ demand_file='response.csv',
+ config_path='0-AIM.json',
+ output_path=None,
+ coupled_edp=True,
+ realizations=100,
+ auto_script_path='PelicunDefault/Hazus_Earthquake_Story.py',
+ detailed_results=False,
+ output_format=None,
+ custom_model_dir=None,
+ )
+
+ #
+ # Test files
+ #
+
+ # Ensure the number of files is as expected
+ num_files = sum(1 for entry in Path(temp_dir).iterdir() if entry.is_file())
+ assert num_files == 19
+
+ # Verify their names
+ files = {
+ '0-AIM.json',
+ '0-AIM_ap.json',
+ 'CMP_QNT.csv',
+ 'CMP_sample.json',
+ 'DEM_sample.json',
+ 'DL_summary.csv',
+ 'DL_summary.json',
+ 'DL_summary_stats.csv',
+ 'DL_summary_stats.json',
+ 'DMG_grp.json',
+ 'DMG_grp_stats.json',
+ 'DV_repair_agg.json',
+ 'DV_repair_agg_stats.json',
+ 'DV_repair_grp.json',
+ 'DV_repair_sample.json',
+ 'DV_repair_stats.json',
+ 'pelicun_log.txt',
+ 'pelicun_log_warnings.txt',
+ 'response.csv',
+ }
+
+ for file in files:
+ assert Path(f'{temp_dir}/{file}').is_file()
+
+ #
+ # Check the values: TODO
+ #
diff --git a/pelicun/tests/dl_calculation/e5/1-AIM.json b/pelicun/tests/dl_calculation/e5/1-AIM.json
new file mode 100644
index 000000000..efc8f9a76
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e5/1-AIM.json
@@ -0,0 +1,4117 @@
+{
+ "RandomVariables": [],
+ "GeneralInformation": {
+ "AIM_id": "1",
+ "location": {
+ "latitude": 37.7709536,
+ "longitude": -122.2939517
+ },
+ "Latitude": 37.7709536,
+ "Longitude": -122.2939517,
+ "NumberOfStories": 1,
+ "YearBuilt": 1953,
+ "OccupancyClass": "IND2",
+ "StructureType": "W1",
+ "PlanArea": 18615.30048,
+ "ReplacementCost": 38781.876,
+ "Population": 1,
+ "SoilType": "B",
+ "Footprint": "{\"type\":\"Feature\",\"geometry\":{\"type\":\"Polygon\",\"coordinates\":[[[-122.293878,37.771004],[-122.293973,37.771029],[-122.294025,37.770903],[-122.293931,37.770878],[-122.293878,37.771004]]]},\"properties\":{}}",
+ "units": {
+ "force": "kips",
+ "length": "inch",
+ "time": "sec"
+ }
+ },
+ "DefaultValues": {
+ "driverFile": "driver",
+ "edpFiles": [
+ "EDP.json"
+ ],
+ "filenameDL": "BIM.json",
+ "filenameEDP": "EDP.json",
+ "filenameEVENT": "EVENT.json",
+ "filenameSAM": "SAM.json",
+ "filenameSIM": "SIM.json",
+ "rvFiles": [
+ "SAM.json",
+ "EVENT.json",
+ "SIM.json"
+ ],
+ "workflowInput": "scInput.json",
+ "workflowOutput": "EDP.json"
+ },
+ "commonFileDir": "/Users/adamzs/Documents/R2D/LocalWorkDir/tmp.SimCenter/input_data",
+ "remoteAppDir": "/Users/adamzs/SimCenter",
+ "localAppDir": "/Users/adamzs/SimCenter",
+ "units": {
+ "force": "kips",
+ "length": "inch",
+ "time": "sec"
+ },
+ "outputs": {
+ "AIM": false,
+ "DM": true,
+ "DV": true,
+ "EDP": true,
+ "IM": false
+ },
+ "RegionalEvent": {
+ "eventFile": "EventGrid.csv",
+ "eventFilePath": "/Users/adamzs/Examples/R2D/00_Built_ins/E5GroundShakingAndLiquefaction/input_data/IMs",
+ "units": {
+ "PGA": "inchps2",
+ "PGD_h": "inch",
+ "PGD_v": "inch"
+ }
+ },
+ "assetType": "Buildings",
+ "Applications": {
+ "Events": [
+ {
+ "Application": "SimCenterEvent",
+ "ApplicationData": {},
+ "EventClassification": "Earthquake"
+ }
+ ],
+ "Modeling": {
+ "Application": "None",
+ "ApplicationData": {}
+ },
+ "Simulation": {
+ "Application": "IMasEDP",
+ "ApplicationData": {}
+ },
+ "UQ": {
+ "Application": "None",
+ "ApplicationData": {}
+ },
+ "DL": {
+ "Application": "Pelicun3",
+ "ApplicationData": {
+ "DL_Method": "HAZUS MH EQ IM",
+ "Realizations": 1000,
+ "auto_script": "PelicunDefault/Hazus_Earthquake_IM.py",
+ "coupled_EDP": true,
+ "detailed_results": false,
+ "ground_failure": true,
+ "log_file": true,
+ "regional": "true"
+ }
+ }
+ },
+ "Modeling": {},
+ "Simulation": {
+ "type": "IMasEDP"
+ },
+ "UQ": {},
+ "DL": {},
+ "Events": [
+ {
+ "EventFolderPath": "/Users/adamzs/Documents/R2D/LocalWorkDir/tmp.SimCenter/input_data",
+ "Events": [
+ [
+ "S_097_2018.csvx0x00000",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx1x00001",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx2x00002",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx3x00003",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx4x00004",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx5x00005",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx6x00006",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx7x00007",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx8x00008",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx9x00009",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx10x00010",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx11x00011",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx12x00012",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx13x00013",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx14x00014",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx15x00015",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx16x00016",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx17x00017",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx18x00018",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx19x00019",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx20x00020",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx21x00021",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx22x00022",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx23x00023",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx24x00024",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx25x00025",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx26x00026",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx27x00027",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx28x00028",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx29x00029",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx30x00030",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx31x00031",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx32x00032",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx33x00033",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx34x00034",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx35x00035",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx36x00036",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx37x00037",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx38x00038",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx39x00039",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx40x00040",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx41x00041",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx42x00042",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx43x00043",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx44x00044",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx45x00045",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx46x00046",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx47x00047",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx48x00048",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx49x00049",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx50x00050",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx51x00051",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx52x00052",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx53x00053",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx54x00054",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx55x00055",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx56x00056",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx57x00057",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx58x00058",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx59x00059",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx60x00060",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx61x00061",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx62x00062",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx63x00063",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx64x00064",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx65x00065",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx66x00066",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx67x00067",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx68x00068",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx69x00069",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx70x00070",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx71x00071",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx72x00072",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx73x00073",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx74x00074",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx75x00075",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx76x00076",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx77x00077",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx78x00078",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx79x00079",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx80x00080",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx81x00081",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx82x00082",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx83x00083",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx84x00084",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx85x00085",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx86x00086",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx87x00087",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx88x00088",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx89x00089",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx90x00090",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx91x00091",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx92x00092",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx93x00093",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx94x00094",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx95x00095",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx96x00096",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx97x00097",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx98x00098",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx99x00099",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx100x00100",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx101x00101",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx102x00102",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx103x00103",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx104x00104",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx105x00105",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx106x00106",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx107x00107",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx108x00108",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx109x00109",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx110x00110",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx111x00111",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx112x00112",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx113x00113",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx114x00114",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx115x00115",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx116x00116",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx117x00117",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx118x00118",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx119x00119",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx120x00120",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx121x00121",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx122x00122",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx123x00123",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx124x00124",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx125x00125",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx126x00126",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx127x00127",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx128x00128",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx129x00129",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx130x00130",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx131x00131",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx132x00132",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx133x00133",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx134x00134",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx135x00135",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx136x00136",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx137x00137",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx138x00138",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx139x00139",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx140x00140",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx141x00141",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx142x00142",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx143x00143",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx144x00144",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx145x00145",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx146x00146",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx147x00147",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx148x00148",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx149x00149",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx150x00150",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx151x00151",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx152x00152",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx153x00153",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx154x00154",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx155x00155",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx156x00156",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx157x00157",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx158x00158",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx159x00159",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx160x00160",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx161x00161",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx162x00162",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx163x00163",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx164x00164",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx165x00165",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx166x00166",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx167x00167",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx168x00168",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx169x00169",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx170x00170",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx171x00171",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx172x00172",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx173x00173",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx174x00174",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx175x00175",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx176x00176",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx177x00177",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx178x00178",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx179x00179",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx180x00180",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx181x00181",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx182x00182",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx183x00183",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx184x00184",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx185x00185",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx186x00186",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx187x00187",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx188x00188",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx189x00189",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx190x00190",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx191x00191",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx192x00192",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx193x00193",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx194x00194",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx195x00195",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx196x00196",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx197x00197",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx198x00198",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx199x00199",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx200x00200",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx201x00201",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx202x00202",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx203x00203",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx204x00204",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx205x00205",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx206x00206",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx207x00207",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx208x00208",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx209x00209",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx210x00210",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx211x00211",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx212x00212",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx213x00213",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx214x00214",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx215x00215",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx216x00216",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx217x00217",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx218x00218",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx219x00219",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx220x00220",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx221x00221",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx222x00222",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx223x00223",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx224x00224",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx225x00225",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx226x00226",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx227x00227",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx228x00228",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx229x00229",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx230x00230",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx231x00231",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx232x00232",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx233x00233",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx234x00234",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx235x00235",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx236x00236",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx237x00237",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx238x00238",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx239x00239",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx240x00240",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx241x00241",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx242x00242",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx243x00243",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx244x00244",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx245x00245",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx246x00246",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx247x00247",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx248x00248",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx249x00249",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx250x00250",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx251x00251",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx252x00252",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx253x00253",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx254x00254",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx255x00255",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx256x00256",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx257x00257",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx258x00258",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx259x00259",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx260x00260",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx261x00261",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx262x00262",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx263x00263",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx264x00264",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx265x00265",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx266x00266",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx267x00267",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx268x00268",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx269x00269",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx270x00270",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx271x00271",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx272x00272",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx273x00273",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx274x00274",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx275x00275",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx276x00276",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx277x00277",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx278x00278",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx279x00279",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx280x00280",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx281x00281",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx282x00282",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx283x00283",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx284x00284",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx285x00285",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx286x00286",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx287x00287",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx288x00288",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx289x00289",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx290x00290",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx291x00291",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx292x00292",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx293x00293",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx294x00294",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx295x00295",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx296x00296",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx297x00297",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx298x00298",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx299x00299",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx300x00300",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx301x00301",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx302x00302",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx303x00303",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx304x00304",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx305x00305",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx306x00306",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx307x00307",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx308x00308",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx309x00309",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx310x00310",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx311x00311",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx312x00312",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx313x00313",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx314x00314",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx315x00315",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx316x00316",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx317x00317",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx318x00318",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx319x00319",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx320x00320",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx321x00321",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx322x00322",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx323x00323",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx324x00324",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx325x00325",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx326x00326",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx327x00327",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx328x00328",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx329x00329",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx330x00330",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx331x00331",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx332x00332",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx333x00333",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx334x00334",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx335x00335",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx336x00336",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx337x00337",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx338x00338",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx339x00339",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx340x00340",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx341x00341",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx342x00342",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx343x00343",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx344x00344",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx345x00345",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx346x00346",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx347x00347",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx348x00348",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx349x00349",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx350x00350",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx351x00351",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx352x00352",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx353x00353",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx354x00354",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx355x00355",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx356x00356",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx357x00357",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx358x00358",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx359x00359",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx360x00360",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx361x00361",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx362x00362",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx363x00363",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx364x00364",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx365x00365",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx366x00366",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx367x00367",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx368x00368",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx369x00369",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx370x00370",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx371x00371",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx372x00372",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx373x00373",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx374x00374",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx375x00375",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx376x00376",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx377x00377",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx378x00378",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx379x00379",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx380x00380",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx381x00381",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx382x00382",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx383x00383",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx384x00384",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx385x00385",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx386x00386",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx387x00387",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx388x00388",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx389x00389",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx390x00390",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx391x00391",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx392x00392",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx393x00393",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx394x00394",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx395x00395",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx396x00396",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx397x00397",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx398x00398",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx399x00399",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx400x00400",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx401x00401",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx402x00402",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx403x00403",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx404x00404",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx405x00405",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx406x00406",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx407x00407",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx408x00408",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx409x00409",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx410x00410",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx411x00411",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx412x00412",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx413x00413",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx414x00414",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx415x00415",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx416x00416",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx417x00417",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx418x00418",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx419x00419",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx420x00420",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx421x00421",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx422x00422",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx423x00423",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx424x00424",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx425x00425",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx426x00426",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx427x00427",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx428x00428",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx429x00429",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx430x00430",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx431x00431",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx432x00432",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx433x00433",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx434x00434",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx435x00435",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx436x00436",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx437x00437",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx438x00438",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx439x00439",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx440x00440",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx441x00441",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx442x00442",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx443x00443",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx444x00444",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx445x00445",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx446x00446",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx447x00447",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx448x00448",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx449x00449",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx450x00450",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx451x00451",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx452x00452",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx453x00453",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx454x00454",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx455x00455",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx456x00456",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx457x00457",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx458x00458",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx459x00459",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx460x00460",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx461x00461",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx462x00462",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx463x00463",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx464x00464",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx465x00465",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx466x00466",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx467x00467",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx468x00468",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx469x00469",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx470x00470",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx471x00471",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx472x00472",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx473x00473",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx474x00474",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx475x00475",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx476x00476",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx477x00477",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx478x00478",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx479x00479",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx480x00480",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx481x00481",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx482x00482",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx483x00483",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx484x00484",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx485x00485",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx486x00486",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx487x00487",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx488x00488",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx489x00489",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx490x00490",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx491x00491",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx492x00492",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx493x00493",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx494x00494",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx495x00495",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx496x00496",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx497x00497",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx498x00498",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx499x00499",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx500x00500",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx501x00501",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx502x00502",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx503x00503",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx504x00504",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx505x00505",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx506x00506",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx507x00507",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx508x00508",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx509x00509",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx510x00510",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx511x00511",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx512x00512",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx513x00513",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx514x00514",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx515x00515",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx516x00516",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx517x00517",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx518x00518",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx519x00519",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx520x00520",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx521x00521",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx522x00522",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx523x00523",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx524x00524",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx525x00525",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx526x00526",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx527x00527",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx528x00528",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx529x00529",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx530x00530",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx531x00531",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx532x00532",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx533x00533",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx534x00534",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx535x00535",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx536x00536",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx537x00537",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx538x00538",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx539x00539",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx540x00540",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx541x00541",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx542x00542",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx543x00543",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx544x00544",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx545x00545",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx546x00546",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx547x00547",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx548x00548",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx549x00549",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx550x00550",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx551x00551",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx552x00552",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx553x00553",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx554x00554",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx555x00555",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx556x00556",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx557x00557",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx558x00558",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx559x00559",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx560x00560",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx561x00561",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx562x00562",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx563x00563",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx564x00564",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx565x00565",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx566x00566",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx567x00567",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx568x00568",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx569x00569",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx570x00570",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx571x00571",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx572x00572",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx573x00573",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx574x00574",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx575x00575",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx576x00576",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx577x00577",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx578x00578",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx579x00579",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx580x00580",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx581x00581",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx582x00582",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx583x00583",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx584x00584",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx585x00585",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx586x00586",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx587x00587",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx588x00588",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx589x00589",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx590x00590",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx591x00591",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx592x00592",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx593x00593",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx594x00594",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx595x00595",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx596x00596",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx597x00597",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx598x00598",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx599x00599",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx600x00600",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx601x00601",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx602x00602",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx603x00603",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx604x00604",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx605x00605",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx606x00606",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx607x00607",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx608x00608",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx609x00609",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx610x00610",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx611x00611",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx612x00612",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx613x00613",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx614x00614",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx615x00615",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx616x00616",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx617x00617",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx618x00618",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx619x00619",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx620x00620",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx621x00621",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx622x00622",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx623x00623",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx624x00624",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx625x00625",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx626x00626",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx627x00627",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx628x00628",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx629x00629",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx630x00630",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx631x00631",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx632x00632",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx633x00633",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx634x00634",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx635x00635",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx636x00636",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx637x00637",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx638x00638",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx639x00639",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx640x00640",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx641x00641",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx642x00642",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx643x00643",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx644x00644",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx645x00645",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx646x00646",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx647x00647",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx648x00648",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx649x00649",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx650x00650",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx651x00651",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx652x00652",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx653x00653",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx654x00654",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx655x00655",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx656x00656",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx657x00657",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx658x00658",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx659x00659",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx660x00660",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx661x00661",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx662x00662",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx663x00663",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx664x00664",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx665x00665",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx666x00666",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx667x00667",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx668x00668",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx669x00669",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx670x00670",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx671x00671",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx672x00672",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx673x00673",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx674x00674",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx675x00675",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx676x00676",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx677x00677",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx678x00678",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx679x00679",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx680x00680",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx681x00681",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx682x00682",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx683x00683",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx684x00684",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx685x00685",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx686x00686",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx687x00687",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx688x00688",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx689x00689",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx690x00690",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx691x00691",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx692x00692",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx693x00693",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx694x00694",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx695x00695",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx696x00696",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx697x00697",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx698x00698",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx699x00699",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx700x00700",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx701x00701",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx702x00702",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx703x00703",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx704x00704",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx705x00705",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx706x00706",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx707x00707",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx708x00708",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx709x00709",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx710x00710",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx711x00711",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx712x00712",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx713x00713",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx714x00714",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx715x00715",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx716x00716",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx717x00717",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx718x00718",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx719x00719",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx720x00720",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx721x00721",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx722x00722",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx723x00723",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx724x00724",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx725x00725",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx726x00726",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx727x00727",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx728x00728",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx729x00729",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx730x00730",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx731x00731",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx732x00732",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx733x00733",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx734x00734",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx735x00735",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx736x00736",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx737x00737",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx738x00738",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx739x00739",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx740x00740",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx741x00741",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx742x00742",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx743x00743",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx744x00744",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx745x00745",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx746x00746",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx747x00747",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx748x00748",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx749x00749",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx750x00750",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx751x00751",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx752x00752",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx753x00753",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx754x00754",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx755x00755",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx756x00756",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx757x00757",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx758x00758",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx759x00759",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx760x00760",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx761x00761",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx762x00762",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx763x00763",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx764x00764",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx765x00765",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx766x00766",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx767x00767",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx768x00768",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx769x00769",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx770x00770",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx771x00771",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx772x00772",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx773x00773",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx774x00774",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx775x00775",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx776x00776",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx777x00777",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx778x00778",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx779x00779",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx780x00780",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx781x00781",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx782x00782",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx783x00783",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx784x00784",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx785x00785",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx786x00786",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx787x00787",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx788x00788",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx789x00789",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx790x00790",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx791x00791",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx792x00792",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx793x00793",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx794x00794",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx795x00795",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx796x00796",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx797x00797",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx798x00798",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx799x00799",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx800x00800",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx801x00801",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx802x00802",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx803x00803",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx804x00804",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx805x00805",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx806x00806",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx807x00807",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx808x00808",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx809x00809",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx810x00810",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx811x00811",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx812x00812",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx813x00813",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx814x00814",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx815x00815",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx816x00816",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx817x00817",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx818x00818",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx819x00819",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx820x00820",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx821x00821",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx822x00822",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx823x00823",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx824x00824",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx825x00825",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx826x00826",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx827x00827",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx828x00828",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx829x00829",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx830x00830",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx831x00831",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx832x00832",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx833x00833",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx834x00834",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx835x00835",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx836x00836",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx837x00837",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx838x00838",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx839x00839",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx840x00840",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx841x00841",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx842x00842",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx843x00843",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx844x00844",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx845x00845",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx846x00846",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx847x00847",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx848x00848",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx849x00849",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx850x00850",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx851x00851",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx852x00852",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx853x00853",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx854x00854",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx855x00855",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx856x00856",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx857x00857",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx858x00858",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx859x00859",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx860x00860",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx861x00861",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx862x00862",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx863x00863",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx864x00864",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx865x00865",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx866x00866",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx867x00867",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx868x00868",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx869x00869",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx870x00870",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx871x00871",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx872x00872",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx873x00873",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx874x00874",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx875x00875",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx876x00876",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx877x00877",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx878x00878",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx879x00879",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx880x00880",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx881x00881",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx882x00882",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx883x00883",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx884x00884",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx885x00885",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx886x00886",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx887x00887",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx888x00888",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx889x00889",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx890x00890",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx891x00891",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx892x00892",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx893x00893",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx894x00894",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx895x00895",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx896x00896",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx897x00897",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx898x00898",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx899x00899",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx900x00900",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx901x00901",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx902x00902",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx903x00903",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx904x00904",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx905x00905",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx906x00906",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx907x00907",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx908x00908",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx909x00909",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx910x00910",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx911x00911",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx912x00912",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx913x00913",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx914x00914",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx915x00915",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx916x00916",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx917x00917",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx918x00918",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx919x00919",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx920x00920",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx921x00921",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx922x00922",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx923x00923",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx924x00924",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx925x00925",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx926x00926",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx927x00927",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx928x00928",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx929x00929",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx930x00930",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx931x00931",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx932x00932",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx933x00933",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx934x00934",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx935x00935",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx936x00936",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx937x00937",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx938x00938",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx939x00939",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx940x00940",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx941x00941",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx942x00942",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx943x00943",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx944x00944",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx945x00945",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx946x00946",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx947x00947",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx948x00948",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx949x00949",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx950x00950",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx951x00951",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx952x00952",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx953x00953",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx954x00954",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx955x00955",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx956x00956",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx957x00957",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx958x00958",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx959x00959",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx960x00960",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx961x00961",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx962x00962",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx963x00963",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx964x00964",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx965x00965",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx966x00966",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx967x00967",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx968x00968",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx969x00969",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx970x00970",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx971x00971",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx972x00972",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx973x00973",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx974x00974",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx975x00975",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx976x00976",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx977x00977",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx978x00978",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx979x00979",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx980x00980",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx981x00981",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx982x00982",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx983x00983",
+ 1.0
+ ],
+ [
+ "S_102_2023.csvx984x00984",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx985x00985",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx986x00986",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx987x00987",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx988x00988",
+ 1.0
+ ],
+ [
+ "S_101_2022.csvx989x00989",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx990x00990",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx991x00991",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx992x00992",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx993x00993",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx994x00994",
+ 1.0
+ ],
+ [
+ "S_097_2018.csvx995x00995",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx996x00996",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx997x00997",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx998x00998",
+ 1.0
+ ],
+ [
+ "S_173_25.csvx999x00999",
+ 1.0
+ ]
+ ],
+ "type": "intensityMeasure"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/pelicun/tests/dl_calculation/e5/__init__.py b/pelicun/tests/dl_calculation/e5/__init__.py
new file mode 100644
index 000000000..72c332008
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e5/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
diff --git a/pelicun/tests/dl_calculation/e5/response.csv b/pelicun/tests/dl_calculation/e5/response.csv
new file mode 100644
index 000000000..77abab658
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e5/response.csv
@@ -0,0 +1,1001 @@
+,1-PGA-1-1, 1-PGD-1-1, 1-PGD-1-3
+0.000000000000000000e+00,5.671708714147081309e+01,1.058778320653509936e-01,3.594925578033780073e-02
+1.000000000000000000e+00,9.239360015033409468e+01,8.261993378754910466e+00,2.448204327959560000e+00
+2.000000000000000000e+00,6.770253407546232438e+01,2.729926520741309925e+00,9.676598366108390969e-01
+3.000000000000000000e+00,7.558703547849975735e+01,3.948430151776629593e+00,1.325201953419389955e+00
+4.000000000000000000e+00,6.361478489586501439e+01,2.161578080856080053e+00,8.037002574446210357e-01
+5.000000000000000000e+00,1.676231582228625712e+02,1.097923486238300050e+01,3.114476251050680489e+00
+6.000000000000000000e+00,5.403869615078225053e+01,1.505904944206239771e+00,5.584982497366889342e-01
+7.000000000000000000e+00,8.002090707291368687e+01,4.689679718063409553e+00,1.538539798635609968e+00
+8.000000000000000000e+00,9.171395039576098895e+01,6.585218521973519756e+00,2.072504102282710203e+00
+9.000000000000000000e+00,2.360190552108088013e+02,1.309038596958689915e+01,3.571129490209400181e+00
+1.000000000000000000e+01,1.128892248318074536e+02,9.322425463065130202e+00,2.662795616050090075e+00
+1.100000000000000000e+01,6.957539192693192831e+01,3.005076062024279793e+00,1.051488569666140016e+00
+1.200000000000000000e+01,1.482110838619613844e+02,1.954347085996299915e+00,6.752159811682709734e-01
+1.300000000000000000e+01,1.533982736084648764e+02,1.365901853999599957e+01,3.693619489509240239e+00
+1.400000000000000000e+01,2.269816906217044163e+02,1.259450938744359938e+01,3.476792513548879526e+00
+1.500000000000000000e+01,6.792178499066716313e+01,2.762785804149879620e+00,9.766412559228959678e-01
+1.600000000000000000e+01,6.188836529397437403e+01,1.937217467235889945e+00,7.453696232867691451e-01
+1.700000000000000000e+01,9.458932364100456880e+01,7.045277058723449670e+00,2.180981241692570016e+00
+1.800000000000000000e+01,8.039260807690922661e+01,4.757827094864549800e+00,1.558102834183070051e+00
+1.900000000000000000e+01,9.717835523168156442e+01,7.441943537224839922e+00,2.274398651174410091e+00
+2.000000000000000000e+01,9.869267266705490727e+01,7.661376772480219977e+00,2.325032251561700125e+00
+2.100000000000000000e+01,5.932435136947889021e+01,1.654822388349560303e+00,6.344670172003841202e-01
+2.200000000000000000e+01,2.133949586802820022e+02,1.050045259184610025e+01,2.978061325445399810e+00
+2.300000000000000000e+01,1.919490315907718241e+02,1.617344120508100147e+01,4.342450317826100026e+00
+2.400000000000000000e+01,1.444513781881032628e+02,1.239570812243269948e+01,3.439203655896859946e+00
+2.500000000000000000e+01,1.035246889028743595e+02,8.243137153383731430e+00,2.444572378149080016e+00
+2.600000000000000000e+01,9.216981238270665244e+01,6.656560178666859784e+00,2.088786570176729640e+00
+2.700000000000000000e+01,1.684478929475988593e+02,1.449190230155750037e+01,3.914129585840509407e+00
+2.800000000000000000e+01,1.880695921143545775e+02,8.460779019066759332e+00,2.357725869682709696e+00
+2.900000000000000000e+01,5.699425003719914429e+01,1.442293920176019872e+00,5.263920649945320607e-01
+3.000000000000000000e+01,9.989077690457764902e+01,7.814232062723289296e+00,2.359005617020789813e+00
+3.100000000000000000e+01,1.019858642359828451e+02,8.070939463940559833e+00,2.411883833072040240e+00
+3.200000000000000000e+01,5.165144762505130416e+01,1.053305652095289924e+00,3.745254125193590022e-01
+3.300000000000000000e+01,1.154012134861182375e+02,9.620734844437571098e+00,2.731572384278560417e+00
+3.400000000000000000e+01,9.072791927974829207e+01,6.430245752923940294e+00,2.035453040101209865e+00
+3.500000000000000000e+01,2.043712175022202189e+02,1.693316131572260019e+01,4.522095473805699761e+00
+3.600000000000000000e+01,7.837510847648883328e+01,4.408523538289360388e+00,1.456516684925960092e+00
+3.700000000000000000e+01,3.751553233901879736e+01,1.529755472411319928e-01,5.657674959297349926e-02
+3.800000000000000000e+01,1.880233414339168974e+02,1.108424407976690063e+01,3.138712066695470426e+00
+3.900000000000000000e+01,6.341206460448974980e+01,4.730990702928489888e+00,1.550480410039319912e+00
+4.000000000000000000e+01,7.982409654408465371e+01,4.652967542804829826e+00,1.528400960171639955e+00
+4.100000000000000000e+01,5.914848930025463147e+01,1.637810548306720015e+00,6.243995743775320184e-01
+4.200000000000000000e+01,3.796047922924574181e+01,1.672698241412789999e-01,6.177747551638609891e-02
+4.300000000000000000e+01,1.004438063461916641e+02,7.886781587944040162e+00,2.373960764764549758e+00
+4.400000000000000000e+01,1.204602967108971114e+02,1.099337004108289939e+01,3.118424470704839635e+00
+4.500000000000000000e+01,8.116267987481464274e+01,3.360434190331299753e+00,1.038492074673569965e+00
+4.600000000000000000e+01,1.117596619940532747e+02,2.036548159845040118e+00,7.714536074863199699e-01
+4.700000000000000000e+01,1.192956331589734020e+02,1.003073861904680086e+01,2.841740026370750183e+00
+4.800000000000000000e+01,1.331018880556576391e+02,1.133049606210030014e+01,3.196772964865250088e+00
+4.900000000000000000e+01,1.341515577424953563e+02,1.143764433123159918e+01,3.220554700636470535e+00
+5.000000000000000000e+01,4.460158175129372893e+01,5.582375054218220445e-01,2.181083707744910227e-01
+5.100000000000000000e+01,1.096641521114853219e+02,8.961323731089390066e+00,2.582561837035389996e+00
+5.200000000000000000e+01,1.028860487022796946e+02,8.172901646347691340e+00,2.430860520383020251e+00
+5.300000000000000000e+01,4.741952117922136267e+01,7.605368106290141261e-01,2.954971331140739954e-01
+5.400000000000000000e+01,1.014165622454425346e+02,8.003413455706899882e+00,2.398706246941509690e+00
+5.500000000000000000e+01,8.583619251457919574e+01,5.651636285465450271e+00,1.844887499706899803e+00
+5.600000000000000000e+01,1.374189567944845862e+02,1.173465549421759846e+01,3.294335962883379931e+00
+5.700000000000000000e+01,1.642107947764255869e+02,1.417590245198080012e+01,3.824535726537670044e+00
+5.800000000000000000e+01,1.318211605456285156e+02,1.121047059854419992e+01,3.167064474385620443e+00
+5.900000000000000000e+01,7.132964541360063038e+01,3.268041503151970506e+00,1.128442613510879822e+00
+6.000000000000000000e+01,2.012119728559336522e+02,1.674219069495280010e+01,4.481722129914400554e+00
+6.100000000000000000e+01,6.805815407941415174e+01,2.781829251325609764e+00,9.823162111600720614e-01
+6.200000000000000000e+01,5.768763900622879248e+01,1.503468636487520049e+00,5.539442225217250382e-01
+6.300000000000000000e+01,5.191279347202218730e+01,1.073483174165350107e+00,3.803257853908640351e-01
+6.400000000000000000e+01,1.798349834793633306e+02,1.534542493050919987e+01,4.126903206963770110e+00
+6.500000000000000000e+01,1.320012051639350545e+02,1.123162353130230251e+01,3.171222044392139949e+00
+6.600000000000000000e+01,7.616446917164269337e+01,4.043081029529090209e+00,1.351091917199129799e+00
+6.700000000000000000e+01,2.279196874424842463e+02,1.935617041873319977e+01,4.813619511662809991e+00
+6.800000000000000000e+01,8.081220637295488984e+01,4.831047347160610350e+00,1.580830437289409973e+00
+6.900000000000000000e+01,1.980247812835284265e+02,1.654995438113569861e+01,4.437078828319790169e+00
+7.000000000000000000e+01,7.333310971735093631e+01,4.083814763475850373e-01,1.642757886510240184e-01
+7.100000000000000000e+01,6.446380281380221788e+01,2.272704233629660120e+00,8.360957125035550774e-01
+7.200000000000000000e+01,8.983444809976633394e+01,6.286598435213970326e+00,2.003406136497030143e+00
+7.300000000000000000e+01,7.543033030998732613e+01,3.923342517288839648e+00,1.317741691675389903e+00
+7.400000000000000000e+01,5.525740162142073331e+01,7.840041058131920204e-01,3.001304092932439715e-01
+7.500000000000000000e+01,6.676376093794063138e+01,2.600506110323880016e+00,9.311170788641679463e-01
+7.600000000000000000e+01,1.450188597636844179e+02,1.245404292745599939e+01,3.449784323526279728e+00
+7.700000000000000000e+01,8.852285335903341945e+01,1.641717751143009885e+00,6.266811070457509736e-01
+7.800000000000000000e+01,7.373814931295996189e+01,3.651937637489709676e+00,1.236972645264559967e+00
+7.900000000000000000e+01,1.381130505805765836e+02,1.179610784573610083e+01,3.310642222583140004e+00
+8.000000000000000000e+01,8.566114707557188979e+01,3.989829130573679383e-01,1.612207576273380349e-01
+8.100000000000000000e+01,1.352386129413348499e+02,1.153715398290739991e+01,3.244897546903179997e+00
+8.200000000000000000e+01,6.201014602550355193e+01,1.951162529684129954e+00,7.494203397300339686e-01
+8.300000000000000000e+01,9.325355578568223791e+01,8.175747962817029091e+00,2.431417932397899939e+00
+8.400000000000000000e+01,9.546020025439619872e+01,7.186288174782429650e+00,2.212014192735299645e+00
+8.500000000000000000e+01,9.202930993687871819e+01,7.670123914734769954e+00,2.326957311384219906e+00
+8.600000000000000000e+01,4.706041432903978006e+01,7.331770081204390666e-01,2.899465828639279930e-01
+8.700000000000000000e+01,8.533531184197039465e+01,5.574354834445969686e+00,1.820909592371920072e+00
+8.800000000000000000e+01,7.162910633976950692e+01,3.315811341136370327e+00,1.141770240603360076e+00
+8.900000000000000000e+01,6.234817461349315693e+01,1.344842850964419911e+00,4.850445386042760432e-01
+9.000000000000000000e+01,1.120495599931329735e+02,9.233473657601448892e+00,2.641128999232950303e+00
+9.100000000000000000e+01,1.104432588661442196e+02,9.052108742846169065e+00,2.601199287283309847e+00
+9.200000000000000000e+01,6.711068408306115884e+01,1.219985397981600217e+00,4.362891479381400539e-01
+9.300000000000000000e+01,6.354373617289603970e+01,2.152472442294819821e+00,8.011125044448019761e-01
+9.400000000000000000e+01,6.205054356531948656e+01,1.955806063937570061e+00,7.506906874137849739e-01
+9.500000000000000000e+01,1.231078213648871724e+02,1.041027317086540016e+01,2.953727277469140322e+00
+9.600000000000000000e+01,1.848173107549770293e+02,9.874275365278400329e+00,2.798958180006100083e+00
+9.700000000000000000e+01,5.134728557559241580e+01,1.037125107706210025e+00,3.679582762767709680e-01
+9.800000000000000000e+01,9.582662695990502755e+01,7.242193894630750250e+00,2.225429761831060116e+00
+9.900000000000000000e+01,1.038610459902061365e+02,8.280549095956651229e+00,2.451899054213940055e+00
+1.000000000000000000e+02,6.630419597258367048e+01,4.347911488357420140e+00,1.186161290672820057e+00
+1.010000000000000000e+02,6.778999675641112788e+01,2.741850203630980420e+00,9.712220411143119847e-01
+1.020000000000000000e+02,9.273843212233151689e+01,6.750923797604769128e+00,2.109656846302600197e+00
+1.030000000000000000e+02,5.214097088507736544e+01,1.086066720484439863e+00,3.855083139730849928e-01
+1.040000000000000000e+02,5.901930900431769800e+01,1.528439635033769894e+00,5.663783786183249624e-01
+1.050000000000000000e+02,7.325094445459387771e+01,3.572626915773069900e+00,1.214665016948800025e+00
+1.060000000000000000e+02,8.197862692215016978e+01,1.214109586928189932e+00,4.699648635307320244e-01
+1.070000000000000000e+02,1.222288982527579151e+02,7.883700971074779851e+00,2.373276289004569772e+00
+1.080000000000000000e+02,8.889915493512567934e+01,6.139506035472450662e+00,1.971110933896419892e+00
+1.090000000000000000e+02,5.367465344911528291e+01,1.185584297563929912e+00,4.229987273828770267e-01
+1.100000000000000000e+02,8.089075404846660433e+01,4.847592558810210228e+00,1.584828142239689930e+00
+1.110000000000000000e+02,1.054505769986808872e+02,8.457328233377459981e+00,2.487553737975129575e+00
+1.120000000000000000e+02,2.550393151330924013e+02,2.111401287423279882e+01,5.144145239636650935e+00
+1.130000000000000000e+02,7.139216254222013447e+01,3.277330234150410249e+00,1.131199822456909976e+00
+1.140000000000000000e+02,1.098062235487087150e+02,8.981161878816749322e+00,2.585923924966029741e+00
+1.150000000000000000e+02,6.864613535276319567e+01,2.876046510267890266e+00,1.007751974251730021e+00
+1.160000000000000000e+02,9.415614341226358874e+01,6.974351001240030712e+00,2.164440534149509876e+00
+1.170000000000000000e+02,1.259698985196955903e+02,1.580336135254689900e+01,4.250075211718360002e+00
+1.180000000000000000e+02,6.617051410868613459e+01,2.518882371871950188e+00,9.093557321608309874e-01
+1.190000000000000000e+02,6.418498412216366944e+01,2.235737885691059912e+00,8.251377906525120265e-01
+1.200000000000000000e+02,6.933429547940430382e+01,2.971211538419779696e+00,1.039693592142669987e+00
+1.210000000000000000e+02,7.906033349707392688e+01,4.523325868175129827e+00,1.490274627941210017e+00
+1.220000000000000000e+02,6.923111943063102558e+01,2.147932349283979914e+00,8.002392748206008655e-01
+1.230000000000000000e+02,1.576140269200004127e+02,2.129046124918060112e+00,7.374205629706619414e-01
+1.240000000000000000e+02,7.116774241155785319e+01,3.241217296592259878e+00,1.121365452450540090e+00
+1.250000000000000000e+02,4.570173299926995014e+01,6.429326978175470275e-01,2.441754314540399984e-01
+1.260000000000000000e+02,9.843513587497689343e+01,7.625500399046930333e+00,2.316881881266729781e+00
+1.270000000000000000e+02,6.343526235930431056e+01,9.497733261153759043e-01,3.352152065692330374e-01
+1.280000000000000000e+02,2.836800728543716588e+01,1.527149955843289976e-02,6.290379737185700093e-03
+1.290000000000000000e+02,7.606795302172535855e+01,1.215500360568160021e+00,4.343891638084250162e-01
+1.300000000000000000e+02,7.378908881337754622e+01,3.662868356445450058e+00,1.239351228781260028e+00
+1.310000000000000000e+02,9.609844657094420484e+01,6.333443937507269084e+00,1.563669940566949856e+00
+1.320000000000000000e+02,1.149562604498296423e+02,5.770085830657789572e+00,1.879007475157389973e+00
+1.330000000000000000e+02,6.889016486952685625e+01,2.910822112079630042e+00,1.018796426065760041e+00
+1.340000000000000000e+02,1.526861115700156972e+02,1.321749035722690024e+01,3.595302034332000130e+00
+1.350000000000000000e+02,2.654369235570796945e+02,2.171819862426370307e+01,5.269709351103339934e+00
+1.360000000000000000e+02,9.737644037341939907e+01,7.471085682679539985e+00,2.281119157125159891e+00
+1.370000000000000000e+02,1.340217552026134911e+02,1.142649683305149999e+01,3.217570964100919806e+00
+1.380000000000000000e+02,4.478922658540391666e+01,5.795954841369620469e-01,2.221248683922160128e-01
+1.390000000000000000e+02,1.448778113098600215e+02,1.243454232295000139e+01,3.447140849291860043e+00
+1.400000000000000000e+02,5.519216665805923583e+01,1.293746316959889731e+00,4.657347431687639538e-01
+1.410000000000000000e+02,9.072785982985213593e+01,6.430236173046809611e+00,2.035450860484969926e+00
+1.420000000000000000e+02,8.775090246998314569e+01,5.957181096935559417e+00,1.927232053316910054e+00
+1.430000000000000000e+02,1.365649180856611622e+02,1.165875056917280261e+01,3.274649085753960076e+00
+1.440000000000000000e+02,8.136551176238758387e+01,4.930421888657829932e+00,1.609478750795990010e+00
+1.450000000000000000e+02,8.775923030050007867e+01,2.485348042697749893e+00,8.080749059521670219e-01
+1.460000000000000000e+02,8.402947194178651102e+01,5.376522764385019748e+00,1.752884355762510005e+00
+1.470000000000000000e+02,5.826085711878308615e+01,1.553375369428739772e+00,5.792465771970460864e-01
+1.480000000000000000e+02,1.289408453191276749e+02,1.092123143813370056e+01,3.101900140049110188e+00
+1.490000000000000000e+02,6.394291016558845087e+01,2.204018311028610011e+00,8.158855064400820645e-01
+1.500000000000000000e+02,7.444952397386026632e+01,3.764299016026890321e+00,1.270998737356280106e+00
+1.510000000000000000e+02,8.075279152066826782e+01,5.193513423162130493e+00,1.691862193284629834e+00
+1.520000000000000000e+02,1.120253253410939180e+02,9.230807205281680083e+00,2.640511924270399824e+00
+1.530000000000000000e+02,4.795210571996022964e+01,8.022793504917720142e-01,3.040928705636020135e-01
+1.540000000000000000e+02,1.347205568582265869e+02,1.489569434055480146e+00,5.542561610643380599e-01
+1.550000000000000000e+02,1.145578110904797597e+02,9.517849888517901036e+00,2.707794223689079960e+00
+1.560000000000000000e+02,1.022012304413944008e+02,8.096013239492968694e+00,2.416438198707870466e+00
+1.570000000000000000e+02,1.374453027240733434e+02,1.173696235155179934e+01,3.294949818049620394e+00
+1.580000000000000000e+02,1.248174864972454401e+02,1.057072326335359946e+01,2.998683305590270187e+00
+1.590000000000000000e+02,1.270410352872257107e+02,1.075622578942659935e+01,3.056910584308529355e+00
+1.600000000000000000e+02,4.749175661181482155e+01,7.681215066042229589e-01,2.966403517377769750e-01
+1.610000000000000000e+02,3.081330609155719102e+01,3.313845065236160209e-02,1.176869397638339969e-02
+1.620000000000000000e+02,1.418003197088790444e+02,1.213613125995970066e+01,3.388286606403250190e+00
+1.630000000000000000e+02,2.721757469733402672e+02,1.030275116560380155e+01,2.790678481918879683e+00
+1.640000000000000000e+02,1.021675309113132784e+02,1.401762633766639965e+01,3.782938794410380279e+00
+1.650000000000000000e+02,1.535113360977756543e+02,1.328806866945219944e+01,3.611303886331060031e+00
+1.660000000000000000e+02,9.387041601947235847e+01,6.929229495337150269e+00,2.153068958777109909e+00
+1.670000000000000000e+02,2.087318476984956703e+02,1.719307255281909974e+01,4.574417840929089429e+00
+1.680000000000000000e+02,5.779238752837630955e+01,1.512484540052939863e+00,5.583849331793820925e-01
+1.690000000000000000e+02,9.057457143995884508e+01,6.404669688806480465e+00,2.029852244003039985e+00
+1.700000000000000000e+02,9.867540871875903008e+01,7.658968091888789687e+00,2.324502842987920204e+00
+1.710000000000000000e+02,5.855514403091285658e+01,1.579539724672150092e+00,5.933013482076070488e-01
+1.720000000000000000e+02,1.458997596283748237e+02,1.254550783344899934e+01,3.466490954212380071e+00
+1.730000000000000000e+02,8.262161039467339663e+01,1.123766841939019834e+01,3.172887674445080020e+00
+1.740000000000000000e+02,7.623839501415925213e+01,4.055196564293660266e+00,1.354373358355080104e+00
+1.750000000000000000e+02,9.866444229371850838e+01,7.657438323616649889e+00,2.324166766677379758e+00
+1.760000000000000000e+02,9.285428394050660472e+01,6.769324321249049703e+00,2.113984619419800204e+00
+1.770000000000000000e+02,9.421282834904809533e+01,1.641736163647309965e+01,4.403520759756440306e+00
+1.780000000000000000e+02,5.128672136484318145e+01,2.867790413804370164e+00,1.005085239206439995e+00
+1.790000000000000000e+02,1.862237542469479763e+02,1.579417861608200013e+01,4.247659844837100351e+00
+1.800000000000000000e+02,1.437183758707360255e+02,1.319843779737109912e+01,3.591207232839829899e+00
+1.810000000000000000e+02,9.158510590669362728e+01,6.565147679273599657e+00,2.067571375494820174e+00
+1.820000000000000000e+02,5.577269730202414877e+01,1.339306075002120178e+00,4.838418788207700105e-01
+1.830000000000000000e+02,8.295672042189006845e+01,5.207682502272240832e+00,1.696132929422659918e+00
+1.840000000000000000e+02,5.001672283946305697e+01,9.643964682483109341e-01,3.409812180010809990e-01
+1.850000000000000000e+02,1.047204635486780830e+02,8.375320496215991284e+00,2.470960208085710264e+00
+1.860000000000000000e+02,2.358676479120768477e+02,8.597153656371030905e+00,2.377426847582659875e+00
+1.870000000000000000e+02,1.131258892773763733e+02,9.347572263925698621e+00,2.669005265970990415e+00
+1.880000000000000000e+02,2.308497096321313506e+02,1.656549649129900104e+01,4.440723653818190897e+00
+1.890000000000000000e+02,5.575241869538307782e+01,1.337743623063980092e+00,4.831901984924679150e-01
+1.900000000000000000e+02,9.830571195738743029e+01,7.606302113054380420e+00,2.312635157039089950e+00
+1.910000000000000000e+02,1.104178261897135656e+02,9.049356336415199209e+00,2.600583968924169653e+00
+1.920000000000000000e+02,1.046801017813858152e+02,8.370850356959468996e+00,2.470053795624669757e+00
+1.930000000000000000e+02,5.507953744126424311e+01,1.285608107614290008e+00,4.623458759378799576e-01
+1.940000000000000000e+02,2.986439846986016278e+02,9.270339384211689904e+00,2.650040415528629900e+00
+1.950000000000000000e+02,1.286127451752038269e+02,1.089356416364069879e+01,3.094559339680830412e+00
+1.960000000000000000e+02,1.453692037661806182e+02,8.852151026700600056e+00,2.419945516756410342e+00
+1.970000000000000000e+02,1.836235753328935516e+02,1.561685957215400045e+01,4.200815341961900096e+00
+1.980000000000000000e+02,7.167093019763017026e+01,3.322095826491529902e+00,1.143655836822240035e+00
+1.990000000000000000e+02,7.155962389928369305e+01,3.305390171115389908e+00,1.138649388261399942e+00
+2.000000000000000000e+02,1.034599691814613465e+02,8.235448866364329490e+00,2.443171024965830274e+00
+2.010000000000000000e+02,8.985560973909655047e+01,6.289953931832809708e+00,2.004149166783600222e+00
+2.020000000000000000e+02,5.802064308083289035e+01,1.532291748161250133e+00,5.683353712820840009e-01
+2.030000000000000000e+02,1.348941163518919950e+02,1.150156605599659976e+01,3.237345698572009933e+00
+2.040000000000000000e+02,1.269528185814201180e+02,1.074888448532180085e+01,3.054744894420170276e+00
+2.050000000000000000e+02,8.450506724093359878e+01,5.447959653592879370e+00,1.779153337923830236e+00
+2.060000000000000000e+02,1.078376295108638203e+02,8.739250051666509478e+00,2.540721435449429855e+00
+2.070000000000000000e+02,6.212357016242447116e+01,1.964222484426610071e+00,7.529970503367309220e-01
+2.080000000000000000e+02,7.727437572893117590e+01,4.231147215642820036e+00,1.402071227055429858e+00
+2.090000000000000000e+02,1.013765652372183297e+02,7.998686005182941372e+00,2.397786223104250158e+00
+2.100000000000000000e+02,7.091670896583633521e+01,3.204375353725009479e+00,1.110553158974879828e+00
+2.110000000000000000e+02,2.192146279070714456e+02,1.001710081235380123e+01,2.837857992100509819e+00
+2.120000000000000000e+02,5.727915203686829670e+01,1.466015208221209898e+00,5.373679219623620495e-01
+2.130000000000000000e+02,1.557460299144027545e+02,1.348013388128390133e+01,3.653210595834180285e+00
+2.140000000000000000e+02,9.525169912977435160e+01,5.278090315872669969e+00,1.718585246329750182e+00
+2.150000000000000000e+02,5.721063832228323776e+01,1.460279726856910099e+00,5.346872328922449569e-01
+2.160000000000000000e+02,3.886110213991691609e+01,1.985916212288850180e-01,7.361758987712410662e-02
+2.170000000000000000e+02,5.268377225901367211e+01,1.119201782990870253e+00,3.982091580003689346e-01
+2.180000000000000000e+02,5.122462924067320245e+01,2.000861251832949606e+00,7.616739022550171878e-01
+2.190000000000000000e+02,1.017319844829535498e+02,8.040771667478971096e+00,2.405975897500220118e+00
+2.200000000000000000e+02,9.227837268894084843e+01,6.673625065568809411e+00,2.092718091750170206e+00
+2.210000000000000000e+02,8.047845925668691791e+01,4.771880473011660406e+00,1.562705249119580087e+00
+2.220000000000000000e+02,6.577760102828801791e+01,1.425049177537510037e+00,5.384803327584960497e-01
+2.230000000000000000e+02,1.559413879971881158e+02,1.759325284784589627e+01,4.606781951566879307e+00
+2.240000000000000000e+02,6.733457155699201735e+01,2.675579139965069686e+00,9.530022222628800055e-01
+2.250000000000000000e+02,3.860731516597626722e+01,1.918318525329469892e-01,7.007267278983940473e-02
+2.260000000000000000e+02,1.212502221377149425e+02,1.023702396078869903e+01,2.899699680890920117e+00
+2.270000000000000000e+02,8.747598651968354488e+01,5.914541145001059874e+00,1.916485745871179924e+00
+2.280000000000000000e+02,4.012116695404272093e+01,2.438464660377099857e-01,9.360024063713280607e-02
+2.290000000000000000e+02,5.384257007858843735e+01,1.196218286559139976e+00,4.274323134584800399e-01
+2.300000000000000000e+02,1.387647382002709264e+02,1.185343827293219832e+01,3.325398998983399856e+00
+2.310000000000000000e+02,2.812701060680694454e+01,1.443248611381110075e-02,5.891923631100339821e-03
+2.320000000000000000e+02,5.618419889907966791e+01,1.374139283435780179e+00,4.974542183258400252e-01
+2.330000000000000000e+02,1.223196342513025314e+02,1.687687157496909762e+01,4.510466251775389956e+00
+2.340000000000000000e+02,6.815538460457119641e+01,2.795465895440610193e+00,9.864055741702718594e-01
+2.350000000000000000e+02,6.218439075599224708e+01,1.971254084954090047e+00,7.549278210515719678e-01
+2.360000000000000000e+02,1.061920277365587282e+02,8.546186010186040960e+00,2.504816696789459840e+00
+2.370000000000000000e+02,6.566358173523792630e+01,2.442251276980170083e+00,8.878170445955209100e-01
+2.380000000000000000e+02,7.055656557168819631e+01,3.146094290512009728e+00,1.095385240566730145e+00
+2.390000000000000000e+02,1.574320573736639517e+02,1.362638088688600035e+01,3.685756342206680358e+00
+2.400000000000000000e+02,3.374925611338350251e+01,6.769922622251670075e-02,2.416533373527110160e-02
+2.410000000000000000e+02,6.690094290520140419e+01,2.618402480052169690e+00,9.362877848825849059e-01
+2.420000000000000000e+02,4.457214277614469893e+02,2.886032083750930255e+01,7.182111894654419793e+00
+2.430000000000000000e+02,1.183440312974121440e+02,9.930417852596882256e+00,2.814992592447799868e+00
+2.440000000000000000e+02,6.906363351641594761e+01,2.935099636835790449e+00,1.026837516874130163e+00
+2.450000000000000000e+02,9.189481665645362796e+01,6.613462453048160050e+00,2.078920989894270210e+00
+2.460000000000000000e+02,1.085422318495352982e+02,8.830552559558869419e+00,2.556529785234370244e+00
+2.470000000000000000e+02,1.175229249385475043e+02,9.849094447846500344e+00,2.791582817835940311e+00
+2.480000000000000000e+02,8.937725088090725478e+01,9.049391998798439474e+00,2.600591939422919996e+00
+2.490000000000000000e+02,1.267619802931474027e+02,1.073301707943620009e+01,3.050078525122319917e+00
+2.500000000000000000e+02,6.810161757094097368e+01,2.787918989427879968e+00,9.841397097465429677e-01
+2.510000000000000000e+02,6.214669846227029382e+01,1.966894046930860007e+00,7.537301985109341462e-01
+2.520000000000000000e+02,1.017599909851852402e+02,2.305431706606780040e-01,8.737924052890469473e-02
+2.530000000000000000e+02,6.525673408776707163e+01,2.384896417406340063e+00,8.693081636531740930e-01
+2.540000000000000000e+02,6.780441778172986744e+01,2.743819880966839708e+00,9.718119889919690779e-01
+2.550000000000000000e+02,9.224852826978874987e+01,6.977666980457009949e+00,2.165298251726100354e+00
+2.560000000000000000e+02,1.010609276417168587e+02,7.960651590436530256e+00,2.389783627890569750e+00
+2.570000000000000000e+02,4.775002447891619539e+01,3.183076154507380462e-01,1.712509828400330114e-01
+2.580000000000000000e+02,1.421251234190085881e+02,1.217150541663809982e+01,3.394813749473379882e+00
+2.590000000000000000e+02,8.446176679619429706e+01,5.441426420543339582e+00,1.776725428113389960e+00
+2.600000000000000000e+02,1.109287362838933291e+02,9.110018155977410714e+00,2.613058237356080227e+00
+2.610000000000000000e+02,1.030852278654485872e+02,8.194563667382860572e+00,2.435109379772069538e+00
+2.620000000000000000e+02,5.359462605283734860e+01,4.460886815463120492e+00,1.472709008040169687e+00
+2.630000000000000000e+02,6.032288502458978741e+01,1.757448318487230043e+00,6.830521979949130129e-01
+2.640000000000000000e+02,1.429820400652455419e+02,1.136612736220340025e+01,3.204622508603540254e+00
+2.650000000000000000e+02,6.549237787798057298e+01,2.417991827260270288e+00,8.798945031093550195e-01
+2.660000000000000000e+02,9.355625679078787016e+01,8.940973055493699917e+00,2.578304936825990268e+00
+2.670000000000000000e+02,8.750800815216418016e+01,5.919674054297759724e+00,1.917818012704049835e+00
+2.680000000000000000e+02,9.506691258992671578e+01,3.536819556602970316e+00,1.203751240066629924e+00
+2.690000000000000000e+02,9.277220880024050587e+01,6.756285113747129145e+00,2.110915994242740013e+00
+2.700000000000000000e+02,9.944115176217853502e+01,8.990641832369519193e+00,2.587920992553990107e+00
+2.710000000000000000e+02,6.123960012247957252e+01,1.859772117154769866e+00,7.177029918185171553e-01
+2.720000000000000000e+02,2.409488125517206214e+01,3.234057197026950128e-03,1.942270481732369988e-03
+2.730000000000000000e+02,8.201767668061930294e+01,5.040912310784740669e+00,1.644666568807260143e+00
+2.740000000000000000e+02,1.735534981889799724e+02,1.141563962646389996e+01,3.214672068835999763e+00
+2.750000000000000000e+02,1.286033798482768589e+02,1.089277522056959846e+01,3.094336219039699909e+00
+2.760000000000000000e+02,1.168674439370924887e+02,9.780806737434399878e+00,2.772893023108930155e+00
+2.770000000000000000e+02,1.020130918511459157e+02,7.156426849345740138e+00,2.204919714191549751e+00
+2.780000000000000000e+02,1.522685824432815309e+02,3.663748595691220356e+00,1.239626293672219992e+00
+2.790000000000000000e+02,1.578143131082063064e+02,1.365762631661669957e+01,3.693283133321330070e+00
+2.800000000000000000e+02,8.462639415898183870e+01,5.466296779007650208e+00,1.785820140994110128e+00
+2.810000000000000000e+02,5.358334462035624313e+01,1.177239209765610140e+00,4.206173995390020126e-01
+2.820000000000000000e+02,1.119654078953874858e+02,9.224217029881311092e+00,2.638988233961859997e+00
+2.830000000000000000e+02,1.872995050274663527e+02,1.586752912428200091e+01,4.266363730250169795e+00
+2.840000000000000000e+02,1.170961540630782736e+02,9.805167451295211833e+00,2.779368266351509575e+00
+2.850000000000000000e+02,4.698190534737586432e+01,7.255222015137550162e-01,2.875196985699339791e-01
+2.860000000000000000e+02,1.165764865173370595e+02,9.749875402254598811e+00,2.764725304658859795e+00
+2.870000000000000000e+02,3.462389465248670462e+01,8.832578877745639323e-02,2.988306426396159995e-02
+2.880000000000000000e+02,1.484459359187716814e+02,1.280035343116779956e+01,3.515309041448299698e+00
+2.890000000000000000e+02,1.235111648061043468e+02,1.044582924363080068e+01,2.964632335713059952e+00
+2.900000000000000000e+02,9.843667051127701484e+01,7.625713830596439813e+00,2.316932393053009687e+00
+2.910000000000000000e+02,8.367009598259370762e+01,5.322859390832910265e+00,1.733596545854589888e+00
+2.920000000000000000e+02,2.057696594144383084e+02,1.701744799826539989e+01,4.539474990000579524e+00
+2.930000000000000000e+02,1.663380862786180501e+02,1.433484160419359910e+01,3.870169525601700133e+00
+2.940000000000000000e+02,8.824405074298186946e+01,6.037904968890849489e+00,1.946573107578710049e+00
+2.950000000000000000e+02,9.346404920268011551e+01,6.866753506693179965e+00,2.137190270290170169e+00
+2.960000000000000000e+02,8.607271745365864035e+01,3.208662591684749543e+00,1.111807035946860145e+00
+2.970000000000000000e+02,9.543335937619461617e+01,7.182204383331929343e+00,2.211041070257420138e+00
+2.980000000000000000e+02,1.121799755477006926e+02,9.968651030271050928e+00,2.825680655216649573e+00
+2.990000000000000000e+02,4.171997183310845259e+01,3.204327313938739596e-01,1.259674064045399888e-01
+3.000000000000000000e+02,1.626382992832886600e+02,1.405646426484520006e+01,3.791659514493660232e+00
+3.010000000000000000e+02,1.017057840676157809e+02,8.037663342448100323e+00,2.405369095254790146e+00
+3.020000000000000000e+02,1.111109495282131121e+02,6.750893891293450189e+00,2.109649826758840163e+00
+3.030000000000000000e+02,1.051997762689733662e+02,8.429347053565649617e+00,2.481811109173149621e+00
+3.040000000000000000e+02,7.504655946259909172e+01,3.856872926784290279e+00,1.299792797103620279e+00
+3.050000000000000000e+02,6.506313204559053531e+01,3.251385830546060340e+00,1.124364773972490017e+00
+3.060000000000000000e+02,6.088468802743718555e+01,1.821496132761559972e+00,7.037612181736299100e-01
+3.070000000000000000e+02,1.093719716557413619e+02,8.928483957751650379e+00,2.575700027305309892e+00
+3.080000000000000000e+02,1.090358753503634830e+02,8.890816483125679071e+00,2.567877772887390275e+00
+3.090000000000000000e+02,1.365548921569238132e+02,1.165787672577709877e+01,3.274420382031810295e+00
+3.100000000000000000e+02,1.283420782413443817e+02,1.087078083262460027e+01,3.088133993178890258e+00
+3.110000000000000000e+02,1.125235572607866033e+02,9.283667776958910878e+00,2.653290832284190159e+00
+3.120000000000000000e+02,7.991550704659179871e+01,5.723436404047779646e+00,1.865929196760409692e+00
+3.130000000000000000e+02,1.484346510577545644e+02,9.783819300572901057e+00,2.773691736636959781e+00
+3.140000000000000000e+02,5.905834517095640024e+01,1.629148202677889845e+00,6.194035624996179212e-01
+3.150000000000000000e+02,4.428266054085116110e+01,5.206678249196370345e+00,1.695799686393269834e+00
+3.160000000000000000e+02,1.043549265279285834e+02,8.334905668063699480e+00,2.462792019629290063e+00
+3.170000000000000000e+02,1.366629500946317819e+02,1.463969862759479934e+00,5.478891749134029432e-01
+3.180000000000000000e+02,1.351392973855157607e+02,1.152855576873460031e+01,3.242710205854590022e+00
+3.190000000000000000e+02,6.836904013644826250e+01,2.830799274472080018e+00,9.955480537706791200e-01
+3.200000000000000000e+02,8.557886495165604401e+01,5.611836842002070114e+00,1.832472765239729906e+00
+3.210000000000000000e+02,1.502116525450829556e+02,1.298288899638670024e+01,3.548887005400509942e+00
+3.220000000000000000e+02,1.483495588507209959e+02,1.279138902134110012e+01,3.513486769290879774e+00
+3.230000000000000000e+02,1.031311337763349343e+02,8.199563032476490321e+00,2.436092157921040169e+00
+3.240000000000000000e+02,1.226218381883492583e+02,1.035674632866799882e+01,2.940824810873710060e+00
+3.250000000000000000e+02,1.319211264304462929e+02,1.122461033366259997e+01,3.169368703936370046e+00
+3.260000000000000000e+02,1.128608118561084979e+02,9.319409724788368266e+00,2.662053196939509814e+00
+3.270000000000000000e+02,1.327874011944787753e+02,1.130383401552949962e+01,3.189662837690830255e+00
+3.280000000000000000e+02,6.935718692911012795e+01,2.974281437506790304e+00,1.040799125560849969e+00
+3.290000000000000000e+02,1.858902488983752335e+02,1.577140383295390080e+01,4.241690182259929998e+00
+3.300000000000000000e+02,4.312031634877759956e+01,4.137606478278149646e-01,1.666639328329100056e-01
+3.310000000000000000e+02,1.209836794262910900e+02,3.191379140791000157e+00,1.107894540077159817e+00
+3.320000000000000000e+02,9.440658212294511031e+01,7.016754587443139179e+00,2.174073624069280175e+00
+3.330000000000000000e+02,6.978591943855285251e+01,3.033820946758030157e+00,1.062076226188160000e+00
+3.340000000000000000e+02,6.993031277124818246e+01,3.058047578890759954e+00,1.069506400244550015e+00
+3.350000000000000000e+02,1.493388112902868841e+02,1.289591191147019877e+01,3.532392598692960295e+00
+3.360000000000000000e+02,6.251695813493032716e+01,2.015533135089859762e+00,7.656826545515600380e-01
+3.370000000000000000e+02,2.388959344875980548e+02,2.009880337173639830e+01,4.946218531694820619e+00
+3.380000000000000000e+02,1.024884606819364592e+02,7.411208376520620256e+00,2.267023560821490147e+00
+3.390000000000000000e+02,9.288357459154335061e+01,6.773981662540429660e+00,2.115082832250759992e+00
+3.400000000000000000e+02,8.102370002801701787e+01,4.869697253847980356e+00,1.591648337594550044e+00
+3.410000000000000000e+02,1.189613928186561793e+02,9.997280333051730139e+00,2.832240403909459747e+00
+3.420000000000000000e+02,1.067773367279997530e+02,2.102382186221030125e+00,7.338492788905499609e-01
+3.430000000000000000e+02,7.416096309479982551e+01,3.720145413595270067e+00,1.256997382509699834e+00
+3.440000000000000000e+02,5.967771528242967349e+01,1.119761417448919882e+00,4.482259066822600335e-01
+3.450000000000000000e+02,1.008029340144844923e+02,7.930354541792049794e+00,2.383312695673859505e+00
+3.460000000000000000e+02,8.025685422297401317e+01,4.732260517497099528e+00,1.550892802813679916e+00
+3.470000000000000000e+02,5.752760648203096849e+01,1.486978842782989974e+00,5.473226268052819865e-01
+3.480000000000000000e+02,1.005195321172554088e+02,2.366520857632230079e+00,8.643961201763928903e-01
+3.490000000000000000e+02,6.623258272802362967e+01,2.529367468895890259e+00,9.115888056343929291e-01
+3.500000000000000000e+02,4.584856924093086405e+01,4.708422975962569978e+00,1.544233846907850261e+00
+3.510000000000000000e+02,2.818420677360825266e+02,2.260373987166429899e+01,5.467421122402289946e+00
+3.520000000000000000e+02,1.708095598922090801e+02,1.468941165509030178e+01,3.959634933714290206e+00
+3.530000000000000000e+02,5.403847534769108307e+01,1.208729262279909955e+00,4.326843309805279425e-01
+3.540000000000000000e+02,8.562454831706723724e+01,5.618887638953179220e+00,1.834661756669349941e+00
+3.550000000000000000e+02,5.968384123410332620e+01,1.690061425609749923e+00,6.542886185432170709e-01
+3.560000000000000000e+02,2.307420572720313316e+02,1.598553530777230058e+01,4.295884554591049564e+00
+3.570000000000000000e+02,5.464786987055580170e+01,1.254851710729389946e+00,4.496966835700660359e-01
+3.580000000000000000e+02,8.576364394066528973e+01,1.484371306770730037e+00,5.529245645369039730e-01
+3.590000000000000000e+02,9.606086543336940053e+01,8.593754666457209979e+00,2.514081417948220043e+00
+3.600000000000000000e+02,1.138140467380927419e+02,9.432396439437491509e+00,2.687405476060730436e+00
+3.610000000000000000e+02,1.544044658287017739e+02,1.618835182050889943e+00,5.907342806585100803e-01
+3.620000000000000000e+02,5.457945212070951158e+01,1.250040007215740046e+00,4.477388027550710525e-01
+3.630000000000000000e+02,5.729339561709225137e+01,1.467210043489030058e+00,5.379286160996550326e-01
+3.640000000000000000e+02,7.638137302604712886e+01,4.078701487816050353e+00,1.360761248779690069e+00
+3.650000000000000000e+02,1.796994881199761949e+02,1.799207163912699770e+01,4.642421200713830132e+00
+3.660000000000000000e+02,1.016254057094127319e+02,1.876546920757949932e+00,7.226632823869200140e-01
+3.670000000000000000e+02,7.367240693386764860e+01,1.303304379576730021e+01,3.559218659639040272e+00
+3.680000000000000000e+02,8.619163674034000167e+01,5.706943220669000283e+00,1.861068080957300142e+00
+3.690000000000000000e+02,1.104111114126970961e+02,2.628777828931149863e+00,8.272509808830920131e-01
+3.700000000000000000e+02,1.069276925843923749e+02,8.629431124294560718e+00,2.520831155090749842e+00
+3.710000000000000000e+02,3.455823055941762050e+02,1.295152822183510111e+01,3.542691659476190136e+00
+3.720000000000000000e+02,7.071616115768470934e+01,3.168834877492870294e+00,1.102063234781169854e+00
+3.730000000000000000e+02,4.724174178649205658e+01,7.456972947864429768e-01,2.927302181659500180e-01
+3.740000000000000000e+02,1.201020794449514568e+02,1.119178133823760035e+01,3.161956137419609902e+00
+3.750000000000000000e+02,7.006688506049160026e+01,3.077087197087700510e+00,1.075309634276199855e+00
+3.760000000000000000e+02,7.546059395575105100e+01,1.478275540739889982e+00,5.431590964694279489e-01
+3.770000000000000000e+02,1.142110218070620533e+02,9.475116718557080375e+00,2.698215848482109713e+00
+3.780000000000000000e+02,8.057730091218220991e+01,8.177332065483030377e+00,2.431728271222270088e+00
+3.790000000000000000e+02,7.524278853415167134e+01,3.893469133518300040e+00,1.308913991677830069e+00
+3.800000000000000000e+02,1.110052764984742879e+02,9.118356975251630914e+00,2.614945288128220025e+00
+3.810000000000000000e+02,2.425433234241761795e+02,1.751864557618069895e+01,4.603849120734170519e+00
+3.820000000000000000e+02,1.772682742183763196e+02,1.516323892855540123e+01,4.077608862365950770e+00
+3.830000000000000000e+02,2.504275399619749010e+02,2.083416886826010384e+01,5.088313130549730445e+00
+3.840000000000000000e+02,6.964017936131703834e+01,3.013899630386869699e+00,1.054717092285719859e+00
+3.850000000000000000e+02,6.509922357518624381e+01,2.360252248419689725e+00,8.624402749331689932e-01
+3.860000000000000000e+02,1.200727488665596354e+02,6.394617218105410394e+00,2.027575996461659980e+00
+3.870000000000000000e+02,1.439709359053112223e+02,1.235176409919480101e+01,3.430338231309089547e+00
+3.880000000000000000e+02,1.405547805533580572e+02,1.202471217634930056e+01,3.362844084627910402e+00
+3.890000000000000000e+02,3.560300634131940711e+01,1.112731997099619952e-01,3.774898890257440132e-02
+3.900000000000000000e+02,1.053367077593629944e+02,8.444615224381349705e+00,2.484940857974770267e+00
+3.910000000000000000e+02,1.071044592912162159e+02,8.649522711379749040e+00,2.524648927465549697e+00
+3.920000000000000000e+02,9.019057118027453157e+01,3.164392677099390205e+00,1.100756228547620230e+00
+3.930000000000000000e+02,4.502061478860390764e+01,6.025384226623780037e-01,2.272824416943329817e-01
+3.940000000000000000e+02,6.445563786432548170e+01,2.271615122917069574e+00,8.357699808577691014e-01
+3.950000000000000000e+02,1.504261085987471631e+02,1.300205678633500028e+01,3.552826100596430337e+00
+3.960000000000000000e+02,8.600300195412710025e+01,5.677543849794740360e+00,1.852457839132889861e+00
+3.970000000000000000e+02,9.936143795214493935e+01,7.749744876391799764e+00,2.344268555875599791e+00
+3.980000000000000000e+02,5.219587921821607068e+01,1.089114121403719881e+00,3.867677747623750051e-01
+3.990000000000000000e+02,1.822018995385599851e+02,1.551097991090060013e+01,4.174352281912049989e+00
+4.000000000000000000e+02,8.943320962087354076e+01,6.223204961396909596e+00,1.989458919095489886e+00
+4.010000000000000000e+02,6.057090008392968628e+01,5.039472231575789607e+00,1.644201401632620074e+00
+4.020000000000000000e+02,1.543686042700919359e+02,1.336220622897580235e+01,3.627332108769189656e+00
+4.030000000000000000e+02,1.174612756209231321e+02,8.900276004990320899e+00,2.569837396002450092e+00
+4.040000000000000000e+02,7.061094509987772483e+01,3.153828974353069636e+00,1.097653035009599876e+00
+4.050000000000000000e+02,1.237139729538191801e+02,1.046926496497280112e+01,2.969948896823200180e+00
+4.060000000000000000e+02,6.932505124065741597e+01,2.969972517277279511e+00,1.039247974889900172e+00
+4.070000000000000000e+02,8.322878335615313006e+01,1.376915383008859850e-01,4.994519807648380161e-02
+4.080000000000000000e+02,8.337821428141546676e+01,5.277145700229769254e+00,1.718270589788100100e+00
+4.090000000000000000e+02,8.720795051709588108e+01,5.871696160829230848e+00,1.905429907742330231e+00
+4.100000000000000000e+02,9.180430924980744578e+01,6.599318777252099366e+00,2.075702872682720024e+00
+4.110000000000000000e+02,1.291339813447779648e+02,1.094319826337610024e+01,3.106222222413209888e+00
+4.120000000000000000e+02,5.486265967004183608e+01,1.270069528344319965e+00,4.559252681955929809e-01
+4.130000000000000000e+02,4.189618938994632202e+01,1.192463908849269866e-01,4.173411024806329694e-02
+4.140000000000000000e+02,7.055581679552810215e+01,3.145987887692940355e+00,1.095354068902050004e+00
+4.150000000000000000e+02,6.739269126021238776e+01,7.706423774011209815e+00,2.334727729775240235e+00
+4.160000000000000000e+02,1.057481809500874164e+02,2.379512501424350113e+00,8.684749575008720024e-01
+4.170000000000000000e+02,7.221533818767494495e+01,3.408463070062559996e+00,1.168716187863509948e+00
+4.180000000000000000e+02,1.277934868882169752e+02,1.081900835709249975e+01,3.075268871466540155e+00
+4.190000000000000000e+02,1.233906448621657290e+02,1.043519489326859961e+01,2.961359315922080349e+00
+4.200000000000000000e+02,6.621731880329760145e+01,2.527120609312870414e+00,9.110387322532258469e-01
+4.210000000000000000e+02,1.036326188642825343e+02,8.255505747871129785e+00,2.446915350462770089e+00
+4.220000000000000000e+02,1.420643400851178626e+02,1.216604249074280020e+01,3.393588518447010038e+00
+4.230000000000000000e+02,6.107355407455667518e+01,1.841784339115229763e+00,7.110909374087540646e-01
+4.240000000000000000e+02,1.208351011547560319e+02,1.019875351597220003e+01,2.887026126711289997e+00
+4.250000000000000000e+02,1.079089740550527949e+02,8.747448834069579959e+00,2.542304857888979974e+00
+4.260000000000000000e+02,1.365278994953261247e+02,1.165552430939460038e+01,3.273804920306879751e+00
+4.270000000000000000e+02,7.540824952910516288e+01,5.612477672456269495e+00,1.832671534830540017e+00
+4.280000000000000000e+02,9.313179709083026125e+01,1.898741033022669944e+00,7.305043341967970560e-01
+4.290000000000000000e+02,1.304990897617052212e+02,1.107882928110389997e+01,3.137268367936819846e+00
+4.300000000000000000e+02,1.300453070415517232e+02,1.102377469662549991e+01,3.126963663285890149e+00
+4.310000000000000000e+02,5.218482836730554197e+01,1.088500199781740063e+00,3.865139048476189387e-01
+4.320000000000000000e+02,9.155051976005665892e+01,6.559767012509379214e+00,2.066247969152620101e+00
+4.330000000000000000e+02,1.082946659655929409e+02,8.797680894143390162e+00,2.550929198675759846e+00
+4.340000000000000000e+02,6.908665385663303482e+01,2.938157697628999809e+00,1.027915955583689867e+00
+4.350000000000000000e+02,8.870554316240905735e+01,6.109355323838299157e+00,1.963942292079299934e+00
+4.360000000000000000e+02,1.599519007294644268e+02,1.383699703870359876e+01,3.736403351510589665e+00
+4.370000000000000000e+02,1.254775237189376753e+02,6.750024236083389084e+00,2.083026833703389702e+00
+4.380000000000000000e+02,8.940927859360961349e+01,1.275207883020679889e+00,4.871853048168130473e-01
+4.390000000000000000e+02,9.019101180645670013e+01,1.273881725295449963e+00,4.574946206625829515e-01
+4.400000000000000000e+02,2.034605206768295318e+02,1.687805846703859913e+01,4.510720751963700259e+00
+4.410000000000000000e+02,6.843299934321561295e+01,2.845710865006080059e+00,9.983305777911249956e-01
+4.420000000000000000e+02,1.137421186167236442e+02,4.225445129485779994e+00,1.400344691228430083e+00
+4.430000000000000000e+02,6.137605230145929625e+01,1.878164778033930116e+00,7.232718153282299278e-01
+4.440000000000000000e+02,9.225046966348558897e+01,7.900940390762820620e+00,2.377113124655470422e+00
+4.450000000000000000e+02,7.978349861756832695e+01,4.646057152625910014e+00,1.526326645380519986e+00
+4.460000000000000000e+02,1.666630768731541536e+02,1.435898338106949979e+01,3.877070814673700294e+00
+4.470000000000000000e+02,1.743929083043551600e+02,1.495339745863030245e+01,4.024914244599240298e+00
+4.480000000000000000e+02,6.373498377175171470e+01,2.177050790125250046e+00,8.081184647776189500e-01
+4.490000000000000000e+02,1.478237321274555711e+02,1.273626874719239943e+01,3.503602902987140499e+00
+4.500000000000000000e+02,7.153245152510166349e+01,3.301321343444310052e+00,1.137432874370700064e+00
+4.510000000000000000e+02,2.119164488876253216e+01,5.609736542303019697e-04,2.507670788840430081e-04
+4.520000000000000000e+02,9.013575356163275387e+01,6.334488461199139664e+00,2.014057409858760028e+00
+4.530000000000000000e+02,1.368732302079618819e+02,1.168693877827680083e+01,3.281709120026740312e+00
+4.540000000000000000e+02,7.293011208143393276e+01,3.525772270978030143e+00,1.200401488168399977e+00
+4.550000000000000000e+02,8.456177481367846838e+01,5.456524619835369627e+00,1.782344242808399759e+00
+4.560000000000000000e+02,1.321555759114795876e+02,9.427863155304530451e+00,2.686264354558760115e+00
+4.570000000000000000e+02,2.542285670159478883e+01,4.815213221072809843e-03,2.906697704874249867e-03
+4.580000000000000000e+02,1.115058216098584012e+02,9.173025930599319366e+00,2.627392814235019802e+00
+4.590000000000000000e+02,5.716182409927162666e+01,1.456205311157990012e+00,5.327935472895569680e-01
+4.600000000000000000e+02,6.394405498823589795e+01,2.204167501125370077e+00,8.159287206957690497e-01
+4.610000000000000000e+02,1.304752836280847532e+02,1.107615035022719852e+01,3.136744590174640468e+00
+4.620000000000000000e+02,9.787684359170486914e+01,7.542807702617730037e+00,2.298387670208790201e+00
+4.630000000000000000e+02,1.068095150989642121e+02,8.616018083059559629e+00,2.518289108055450143e+00
+4.640000000000000000e+02,9.863292696614854549e+01,7.653043270032750200e+00,2.323201879113790280e+00
+4.650000000000000000e+02,7.120620321485725412e+01,3.246889584700399567e+00,1.123037738832260102e+00
+4.660000000000000000e+02,1.511104358224957025e+02,4.743083771616360522e+00,1.299217118350709965e+00
+4.670000000000000000e+02,1.057636322730114102e+02,1.574858423301239929e+00,5.973394205735560458e-01
+4.680000000000000000e+02,1.893158887906025711e+02,1.600311812576720172e+01,4.300273395754530270e+00
+4.690000000000000000e+02,9.723810192762051940e+01,7.450446684957320542e+00,2.276418951888190101e+00
+4.700000000000000000e+02,7.206510718969641971e+01,3.385481402661240313e+00,1.161694251407479950e+00
+4.710000000000000000e+02,1.590539525522378028e+02,1.376551124451839847e+01,3.718072609592059763e+00
+4.720000000000000000e+02,1.019708976842443775e+02,8.069158603348720149e+00,2.411534131637759870e+00
+4.730000000000000000e+02,1.110085557143618473e+02,9.118714359545391446e+00,2.615026231119029987e+00
+4.740000000000000000e+02,6.210751121573014899e+01,1.962369202894919784e+00,7.524887614916769829e-01
+4.750000000000000000e+02,8.252172113620291327e+01,5.127459137543639578e+00,1.672777263804889891e+00
+4.760000000000000000e+02,6.373902028913566653e+01,2.177571878182850362e+00,8.082677321379840851e-01
+4.770000000000000000e+02,7.009879262036933767e+01,3.081548435207869918e+00,1.076599201375020076e+00
+4.780000000000000000e+02,1.011906437249856054e+02,7.976739908633849296e+00,2.393060864046029579e+00
+4.790000000000000000e+02,7.746531924944807201e+01,4.261309416124330340e+00,1.411241581881280016e+00
+4.800000000000000000e+02,1.447801743883584322e+02,1.242564508218819874e+01,3.445317799621089527e+00
+4.810000000000000000e+02,1.003995023953280992e+02,7.881321549524990644e+00,2.372747951680460154e+00
+4.820000000000000000e+02,1.082359086035920370e+02,8.790887751453279719e+00,2.549606295264379874e+00
+4.830000000000000000e+02,1.436152041918479654e+02,4.990674894527558969e+00,1.628542855457810079e+00
+4.840000000000000000e+02,9.114973756560026175e+01,6.497633535502599145e+00,2.051083253522079985e+00
+4.850000000000000000e+02,8.293895156351761955e+01,5.204762448014090381e+00,1.695164222786479824e+00
+4.860000000000000000e+02,1.502395779944374112e+02,1.298538411870659992e+01,3.549399115504810354e+00
+4.870000000000000000e+02,1.222098533922464014e+02,1.032041626325640138e+01,2.929322346108030217e+00
+4.880000000000000000e+02,1.437493759418629509e+02,1.232559648442959954e+01,3.426284659104490427e+00
+4.890000000000000000e+02,1.513225284293854997e+02,1.308233190645669985e+01,3.569450097265899569e+00
+4.900000000000000000e+02,9.830630370960301434e+01,7.606387839492299463e+00,2.312654514788389903e+00
+4.910000000000000000e+02,7.738556556796592645e+01,1.507227472764880183e+01,4.055491780628179832e+00
+4.920000000000000000e+02,1.275673815630194383e+02,1.219993285878839906e+01,3.399830750960660009e+00
+4.930000000000000000e+02,9.477028593823435187e+01,9.728887524336789383e+00,2.606235851756720123e+00
+4.940000000000000000e+02,9.780560724745093637e+01,7.532568745514589992e+00,2.295903383441529666e+00
+4.950000000000000000e+02,9.243669146363926359e+01,6.703152399092579650e+00,2.098502692892560084e+00
+4.960000000000000000e+02,5.266562213274219317e+01,1.118166306513409980e+00,3.977743718447120380e-01
+4.970000000000000000e+02,3.856285970163733623e+01,2.666619149323730120e-01,1.381855225176260027e-01
+4.980000000000000000e+02,2.927865451955624394e+02,2.313944997094680289e+01,5.603675188818509234e+00
+4.990000000000000000e+02,8.235439851431256386e+01,5.098618252154420105e+00,1.663454835836360113e+00
+5.000000000000000000e+02,8.447732136776481582e+01,8.672073189453008979e+00,2.528795729839850015e+00
+5.010000000000000000e+02,1.758589621199145654e+02,1.505672518065550136e+01,4.051444330817560058e+00
+5.020000000000000000e+02,6.279644137236108037e+01,2.052588877914280019e+00,7.749607813140501067e-01
+5.030000000000000000e+02,7.664349425513606207e+01,4.120387010716919818e+00,1.372616264849709777e+00
+5.040000000000000000e+02,1.059950977190426471e+02,8.523433168825459205e+00,2.500181286787910295e+00
+5.050000000000000000e+02,7.560008061755304709e+01,3.950523781898379827e+00,1.325826497242480029e+00
+5.060000000000000000e+02,8.239652506892304018e+01,5.105869122826660167e+00,1.665836441449179972e+00
+5.070000000000000000e+02,5.323066470955888718e+01,1.153873255771620032e+00,4.116228071033620761e-01
+5.080000000000000000e+02,6.982607543576978060e+01,3.040736594909040136e+00,1.064128150579930043e+00
+5.090000000000000000e+02,8.709295054162529937e+01,5.853379466705800560e+00,1.900738132434520056e+00
+5.100000000000000000e+02,1.226489168437296087e+02,1.000390238000730037e+01,2.834114055457030101e+00
+5.110000000000000000e+02,1.435682703775150344e+02,1.126315545009340013e+01,3.178335730889799482e+00
+5.120000000000000000e+02,1.214345171357127953e+02,1.025299528819090078e+01,2.905385192163319807e+00
+5.130000000000000000e+02,1.209288188191734150e+02,1.020755304574690037e+01,2.889871390245689842e+00
+5.140000000000000000e+02,1.493080322314211799e+02,2.822634270111150201e+00,8.826080834440169776e-01
+5.150000000000000000e+02,7.306333020683534585e+01,3.545170301444240391e+00,1.206288730554320177e+00
+5.160000000000000000e+02,1.413260647054136712e+02,1.209363571645069868e+01,3.378859960124440232e+00
+5.170000000000000000e+02,4.520666523005906612e+01,6.125845290413549638e-01,2.316130509450089470e-01
+5.180000000000000000e+02,1.710975569952911997e+02,8.539111520269388578e+00,2.503336089662470343e+00
+5.190000000000000000e+02,9.392511197376438759e+01,6.937667300676860549e+00,2.155232363312419963e+00
+5.200000000000000000e+02,8.611734950169835656e+01,5.695352392799900798e+00,1.857665138003200100e+00
+5.210000000000000000e+02,7.417971652324715137e+01,2.196461352870590300e+00,8.137001656819971007e-01
+5.220000000000000000e+02,1.131521137850989476e+02,9.614497520440890455e+00,2.729937448465189931e+00
+5.230000000000000000e+02,1.186165937333945948e+02,9.957520775036490690e+00,2.822558465596320065e+00
+5.240000000000000000e+02,8.200367249739993269e+01,6.642029372393720266e+00,2.085450188627469892e+00
+5.250000000000000000e+02,1.249636329136015860e+02,1.058272165856400093e+01,3.002570840576840094e+00
+5.260000000000000000e+02,1.563908003968478511e+02,1.288083735931859941e+01,3.529282332833120428e+00
+5.270000000000000000e+02,1.398948915242894486e+02,1.195998203555930139e+01,3.349148064273299585e+00
+5.280000000000000000e+02,8.004936312401089538e+01,4.694557374414589646e+00,1.540019096676770172e+00
+5.290000000000000000e+02,1.588507048014892575e+02,1.368835457332449934e+01,3.700726598004270063e+00
+5.300000000000000000e+02,8.603157637133644187e+01,5.681990315299420224e+00,1.853755627287110075e+00
+5.310000000000000000e+02,7.995797376589221983e+01,4.675804983162969997e+00,1.535280628348389964e+00
+5.320000000000000000e+02,9.152855546045687163e+01,6.556351506383220062e+00,2.065408757099819859e+00
+5.330000000000000000e+02,8.543048672242707653e+01,5.588980231984759683e+00,1.825406915825499965e+00
+5.340000000000000000e+02,1.656513268096720424e+02,6.070541083759530387e+00,1.954782345483180084e+00
+5.350000000000000000e+02,6.286748027058813904e+01,2.064172946184959834e+00,7.773660197525429361e-01
+5.360000000000000000e+02,4.776195793354875008e+01,7.879591019489379233e-01,3.009926707869149953e-01
+5.370000000000000000e+02,5.996000725031154843e+01,1.720649685963729913e+00,6.679138173426600877e-01
+5.380000000000000000e+02,1.188735440566104415e+02,9.988499644296631175e+00,2.829760891601790007e+00
+5.390000000000000000e+02,7.680979850902352268e+01,6.209581999642299621e+00,1.958580545913279902e+00
+5.400000000000000000e+02,9.008701936129249077e+01,4.779803701499210078e+00,1.308677948563209847e+00
+5.410000000000000000e+02,8.520823414278105190e+01,5.554870455806219631e+00,1.814946717248300034e+00
+5.420000000000000000e+02,7.737673067643558511e+01,4.247296380349180112e+00,1.406973232161929932e+00
+5.430000000000000000e+02,6.755211099890176740e+01,2.704610191357009974e+00,9.616113073180930781e-01
+5.440000000000000000e+02,7.956579459109185848e+01,2.376211910907159996e+00,8.674354095881869897e-01
+5.450000000000000000e+02,1.256496767631172844e+02,1.063961516909969873e+01,3.021056832152379634e+00
+5.460000000000000000e+02,1.373120786111165330e+02,1.172530026443369877e+01,3.291849785399350026e+00
+5.470000000000000000e+02,1.085457770704661442e+02,1.115709513622280102e+00,4.470701964387800431e-01
+5.480000000000000000e+02,6.124596354746324778e+01,1.860464281252429686e+00,7.179596774053050856e-01
+5.490000000000000000e+02,9.446197812218379397e+01,7.025392881791620248e+00,2.176160714748160085e+00
+5.500000000000000000e+02,8.952436555090862669e+01,3.140436402553279827e+00,9.871142032040449710e-01
+5.510000000000000000e+02,1.613014593629561659e+02,1.394834485652530276e+01,3.764493921656149489e+00
+5.520000000000000000e+02,9.186250088158024596e+01,6.608410114436610527e+00,2.077770331644949842e+00
+5.530000000000000000e+02,6.239570845139054711e+01,2.001046265444959893e+00,7.617243528230319694e-01
+5.540000000000000000e+02,1.490198134959795766e+02,2.911592848494700014e+00,1.019066741527629993e+00
+5.550000000000000000e+02,8.737865288551610377e+01,5.898957790226550202e+00,1.912451261244739964e+00
+5.560000000000000000e+02,9.852523308012244740e+01,1.041338180752480014e+01,2.954676358150640336e+00
+5.570000000000000000e+02,6.386751391317048387e+01,2.194209997101929766e+00,8.130504643662528341e-01
+5.580000000000000000e+02,1.113018377657219844e+02,1.351542093767890140e+00,5.106204961961939848e-01
+5.590000000000000000e+02,1.267048268591128561e+02,1.072826873728469899e+01,3.048685943578299806e+00
+5.600000000000000000e+02,7.367419880840706981e+01,3.642385210654020433e+00,1.233999102331149844e+00
+5.610000000000000000e+02,8.785279260272920965e+01,1.157450536327930024e+01,3.254388634274870018e+00
+5.620000000000000000e+02,1.353491021765701703e+02,2.349708255980700322e+00,8.591678765316450583e-01
+5.630000000000000000e+02,1.552270827302825182e+02,6.562675748347049698e+00,2.066963185588550189e+00
+5.640000000000000000e+02,1.481957150238085887e+02,1.362909395117250000e+01,3.686408237400979893e+00
+5.650000000000000000e+02,9.520972286830547660e+01,8.915627367007949999e+00,2.573024398294230242e+00
+5.660000000000000000e+02,1.166041200350340148e+02,9.752810209397061669e+00,2.765497700250080015e+00
+5.670000000000000000e+02,6.022198786496273470e+01,1.747151888615649851e+00,6.794932096754109363e-01
+5.680000000000000000e+02,9.049312521317116875e+01,6.391604996955240203e+00,2.026894832565070192e+00
+5.690000000000000000e+02,6.182928875055134910e+01,1.930481358922040203e+00,7.427193617913659951e-01
+5.700000000000000000e+02,1.421859407144623049e+02,1.217697280090579959e+01,3.396041248840400151e+00
+5.710000000000000000e+02,1.336864469738208641e+02,1.139773703574640074e+01,3.209907306606529787e+00
+5.720000000000000000e+02,1.353433362744499959e+02,8.566128306071080445e+00,2.508880199194420069e+00
+5.730000000000000000e+02,6.124337061762953738e+01,1.860182217329379828e+00,7.178550546724828951e-01
+5.740000000000000000e+02,1.012673703457233643e+02,9.539853147344910411e+00,2.713454180335040267e+00
+5.750000000000000000e+02,8.176575645899190192e+01,4.998031912522139741e+00,1.630890882446039969e+00
+5.760000000000000000e+02,6.653395678425698634e+01,2.570733271838120171e+00,9.225746183006170442e-01
+5.770000000000000000e+02,1.128941720459154396e+02,9.322950629895821351e+00,2.662924952004789958e+00
+5.780000000000000000e+02,1.570566324649247747e+02,1.393959005251199912e+01,3.762112714872740682e+00
+5.790000000000000000e+02,1.953310792977470101e+01,5.609736542303019697e-04,5.138502092087750476e-06
+5.800000000000000000e+02,1.414667212452168599e+02,1.210623015810739922e+01,3.381645958777280025e+00
+5.810000000000000000e+02,7.714060345702750965e+01,4.210108217380129858e+00,1.395711860105939728e+00
+5.820000000000000000e+02,7.881709777491454361e+01,2.278359130646029573e+00,8.377899240254940150e-01
+5.830000000000000000e+02,5.564842838910669087e+01,1.117871351391790169e+00,4.476872651363429378e-01
+5.840000000000000000e+02,9.985249759015199800e+01,7.809554471635649975e+00,2.357984056032919717e+00
+5.850000000000000000e+02,1.535804463255784640e+02,1.658478578643340029e+01,4.445263807677410028e+00
+5.860000000000000000e+02,8.088065173969170019e+01,9.700375644164299649e-01,3.432099340765030204e-01
+5.870000000000000000e+02,5.449866385664149249e+01,1.244380463227219957e+00,4.454427154119099952e-01
+5.880000000000000000e+02,7.831737821632725627e+01,5.665227863506289374e+00,1.848871376382669851e+00
+5.890000000000000000e+02,1.166045206024608518e+02,1.119774733432489899e+00,4.482296989864099634e-01
+5.900000000000000000e+02,7.226094380627583291e+01,3.415461912832589597e+00,1.170862430495569928e+00
+5.910000000000000000e+02,8.022203354061018388e+01,4.723520028170660190e+00,1.549055678301879846e+00
+5.920000000000000000e+02,2.277121424942819203e+02,1.934172049939510174e+01,4.811057523325120044e+00
+5.930000000000000000e+02,1.574951351267054349e+02,1.363153434430550170e+01,3.686994884054049848e+00
+5.940000000000000000e+02,7.869317665555084318e+01,2.265127084712009520e+00,8.338332779823240948e-01
+5.950000000000000000e+02,6.912693957689015178e+01,2.943515284641140095e+00,1.029809785058199978e+00
+5.960000000000000000e+02,7.363544534925901530e+01,3.636605650838399928e+00,1.232203235437090205e+00
+5.970000000000000000e+02,8.666375699719534964e+01,5.780997410064419917e+00,1.882008414889520198e+00
+5.980000000000000000e+02,1.046444389598591300e+02,8.366902206048258961e+00,2.469253843320740049e+00
+5.990000000000000000e+02,8.282825545013662349e+01,5.184807163586190093e+00,1.689155643269100171e+00
+6.000000000000000000e+02,1.446061330279169397e+02,1.240979379455409948e+01,3.442076611122829810e+00
+6.010000000000000000e+02,5.952013024918552730e+01,1.673936199984570106e+00,6.462195969744770618e-01
+6.020000000000000000e+02,1.162433992371343265e+02,9.713193726207370204e+00,2.755469323561100303e+00
+6.030000000000000000e+02,1.536294413241174084e+02,1.329814191278640045e+01,3.613613489887040231e+00
+6.040000000000000000e+02,9.052525176873763257e+01,7.852922551710110888e+00,2.367497936220160248e+00
+6.050000000000000000e+02,9.851977563385634085e+01,7.637278098433059270e+00,2.319673271128870340e+00
+6.060000000000000000e+02,7.543921358365115282e+01,3.924761610312079707e+00,1.318162534168939803e+00
+6.070000000000000000e+02,1.161349453579902473e+02,9.701254205467339631e+00,2.752476985821830091e+00
+6.080000000000000000e+02,5.294975284087490763e+01,3.502684152802220119e+00,1.252082824403830008e+00
+6.090000000000000000e+02,5.287841105458858237e+01,1.130357673310270039e+00,4.029087319226210551e-01
+6.100000000000000000e+02,1.367460005555132909e+02,1.167454100285390162e+01,3.278789319810119895e+00
+6.110000000000000000e+02,1.048514542101328431e+02,8.389840938612319832e+00,2.473909665116259760e+00
+6.120000000000000000e+02,4.969059768401015731e+01,9.485473967753578206e-01,3.347324808998169976e-01
+6.130000000000000000e+02,1.211330444353243934e+02,1.022658008847509947e+01,2.896103740147530026e+00
+6.140000000000000000e+02,6.034469139645906921e+01,8.039054712162990057e+00,2.405640672239529820e+00
+6.150000000000000000e+02,9.123473772562009287e+01,9.556807701492670004e-01,4.148960884968270202e-01
+6.160000000000000000e+02,1.168830609447040274e+02,9.782468852332311116e+00,2.773333624498290018e+00
+6.170000000000000000e+02,6.688397038081022572e+01,2.616183291161749747e+00,9.356450953484291011e-01
+6.180000000000000000e+02,8.356731500071919072e+01,1.247403032596509975e+00,4.466680773602729193e-01
+6.190000000000000000e+02,1.417028765656763483e+02,8.843285104423779686e+00,2.418658579445899903e+00
+6.200000000000000000e+02,1.407503567982580819e+02,1.204216710060849849e+01,3.366945132378649674e+00
+6.210000000000000000e+02,1.408394872159831266e+02,1.205012689440569851e+01,3.368820099991240546e+00
+6.220000000000000000e+02,7.424963587322616831e+01,3.733672548471230090e+00,1.261270617926129933e+00
+6.230000000000000000e+02,7.248252016864984171e+01,3.450923303567860145e+00,1.180987171869100072e+00
+6.240000000000000000e+02,1.700576832629833746e+02,1.463269257359350028e+01,3.944901651093049466e+00
+6.250000000000000000e+02,1.132430556037153337e+02,9.360039801290561456e+00,2.672096613795750031e+00
+6.260000000000000000e+02,8.649779800983210976e+01,5.754889227730150658e+00,1.874842192564110110e+00
+6.270000000000000000e+02,8.642791822493241227e+01,5.743920810595679605e+00,1.871845956294780011e+00
+6.280000000000000000e+02,5.597396344565625270e+01,1.357611108207100070e+00,4.904173946656570360e-01
+6.290000000000000000e+02,1.364025138774265429e+02,3.986543145822280199e+00,1.112397096204550184e+00
+6.300000000000000000e+02,5.412429692342387000e+01,1.216984603263260079e+00,4.350173341663570725e-01
+6.310000000000000000e+02,9.976088843354462199e+01,7.798369119032280139e+00,2.355545668644559942e+00
+6.320000000000000000e+02,2.945388316822591079e+01,2.108090686327110039e-02,8.226209539323329004e-03
+6.330000000000000000e+02,8.058388000216605462e+01,4.789177590564990261e+00,1.568393497910360201e+00
+6.340000000000000000e+02,9.341441925549234782e+01,1.976878410578789991e+00,7.564747839229220006e-01
+6.350000000000000000e+02,1.651185608960789750e+02,7.253784798893259556e+00,2.228233439089009948e+00
+6.360000000000000000e+02,6.763234166812274850e+01,2.715375647786539837e+00,9.648262676415940708e-01
+6.370000000000000000e+02,7.727371716773181731e+01,4.231043454403409676e+00,1.402039789184020169e+00
+6.380000000000000000e+02,1.018261197671332212e+02,8.051947265882349214e+00,2.408160528391890320e+00
+6.390000000000000000e+02,6.927949126610316455e+01,2.963871870698779887e+00,1.037058673027589917e+00
+6.400000000000000000e+02,7.764222991206619895e+01,4.289392824346050404e+00,1.419837704205149986e+00
+6.410000000000000000e+02,5.522516196317778991e+01,1.296139356858419855e+00,4.667348049723920034e-01
+6.420000000000000000e+02,3.435490556064425505e+01,8.268766206215920345e-02,2.798666742618330025e-02
+6.430000000000000000e+02,5.059801083568605407e+01,9.958814618747598990e-01,3.524367713213679698e-01
+6.440000000000000000e+02,8.800866496932887628e+01,6.000904903425749559e+00,1.937273474485659897e+00
+6.450000000000000000e+02,6.088034759659500139e+01,1.821032023107840070e+00,7.035950433657520708e-01
+6.460000000000000000e+02,8.570647627412569136e+01,5.631548460812050294e+00,1.838603595796749923e+00
+6.470000000000000000e+02,7.539964972227605244e+01,3.918444167133569689e+00,1.316290099850589979e+00
+6.480000000000000000e+02,5.186062096641867925e+01,1.070624148338890080e+00,3.791522044154310156e-01
+6.490000000000000000e+02,5.542651421420564617e+01,1.312860991812710099e+00,4.729112605072120501e-01
+6.500000000000000000e+02,1.020460162994187385e+02,7.953888284793190877e+00,2.388335392323490236e+00
+6.510000000000000000e+02,5.796918358340400346e+01,1.527807002179479845e+00,5.660580151254970271e-01
+6.520000000000000000e+02,4.964193302899879967e+01,9.462045384465949116e-01,3.338102080775449676e-01
+6.530000000000000000e+02,1.031887159876105642e+02,9.250344047446690254e+00,2.490890526450590059e+00
+6.540000000000000000e+02,8.284573033428141287e+01,5.187850962035929214e+00,1.690101185836619946e+00
+6.550000000000000000e+02,3.033591411911338653e+01,4.197342970859420375e-03,2.549228649377730145e-03
+6.560000000000000000e+02,8.933521981046767735e+01,6.207789405620280476e+00,1.986092853194260099e+00
+6.570000000000000000e+02,1.796783767028231296e+02,1.533428535324840070e+01,4.123832923744459755e+00
+6.580000000000000000e+02,4.648609142882850165e+01,6.905824509540049450e-01,2.681607382068800227e-01
+6.590000000000000000e+02,3.095278655404133517e+01,3.407667559702109994e-02,1.221840445315599925e-02
+6.600000000000000000e+02,1.383897549965853955e+02,1.182042855579940088e+01,3.317221673514459734e+00
+6.610000000000000000e+02,4.675730893824893997e+01,7.105615783273820929e-01,2.781934136741560093e-01
+6.620000000000000000e+02,5.991300793302790595e+01,1.715930655573449926e+00,6.655288609994929327e-01
+6.630000000000000000e+02,8.563093154150446651e+01,5.619873338766558923e+00,1.834968130252209839e+00
+6.640000000000000000e+02,9.983962609261615739e+01,7.807982116933150252e+00,2.357640908322959916e+00
+6.650000000000000000e+02,8.053692978536381020e+01,4.781468656589379584e+00,1.565855147522090096e+00
+6.660000000000000000e+02,1.115697069681015563e+02,9.374358921926269517e+00,2.673067110202779872e+00
+6.670000000000000000e+02,5.035506572727157248e+01,9.811120658331249356e-01,3.475935171109389432e-01
+6.680000000000000000e+02,9.621055986012656547e+01,7.301077789197550238e+00,2.239754081417559828e+00
+6.690000000000000000e+02,7.173026808749955308e+01,3.331026971945319826e+00,1.146340197108480119e+00
+6.700000000000000000e+02,1.184884438055933202e+02,9.944771222940859801e+00,2.818992907028410322e+00
+6.710000000000000000e+02,3.872075700305910573e+01,1.948352179865119882e-01,7.164485853871149779e-02
+6.720000000000000000e+02,5.788139721029588003e+01,1.520182238768610139e+00,5.622193322819749595e-01
+6.730000000000000000e+02,5.117532822360554690e+01,1.025560205074039999e+00,3.643208024455329985e-01
+6.740000000000000000e+02,1.354071492576597393e+02,1.339705394965850171e+00,5.066364396913110024e-01
+6.750000000000000000e+02,9.343419968453159186e+01,6.861321320854339767e+00,2.136037201988550294e+00
+6.760000000000000000e+02,1.099510934288124560e+02,8.997523761314800694e+00,2.589372858511580056e+00
+6.770000000000000000e+02,1.630990470761500717e+02,1.409140823688769828e+01,3.801194387055950141e+00
+6.780000000000000000e+02,6.740985156458565086e+01,2.685599163590480121e+00,9.559638526461389230e-01
+6.790000000000000000e+02,5.892787851916797592e+01,1.615132257297289931e+00,6.123562341919859264e-01
+6.800000000000000000e+02,9.269046041670094382e+01,6.743314099803709993e+00,2.107872210468780150e+00
+6.810000000000000000e+02,4.596870109793517400e+01,6.586774086784850102e-01,2.516735949388739790e-01
+6.820000000000000000e+02,9.715023436002448420e+01,7.437943715280200152e+00,2.273449762588569900e+00
+6.830000000000000000e+02,1.396465320057110091e+02,8.309901624603689640e+00,2.457768197899819818e+00
+6.840000000000000000e+02,6.495755293766396221e+01,2.339305227188410274e+00,8.563859022370279295e-01
+6.850000000000000000e+02,1.379728135939553795e+02,1.178379409305799896e+01,3.307325002811299708e+00
+6.860000000000000000e+02,9.438316890936992820e+01,7.013105684345160462e+00,2.173193297593630113e+00
+6.870000000000000000e+02,1.314238329395757887e+02,6.533779993956939869e+00,2.059879301087520265e+00
+6.880000000000000000e+02,1.104383726728670894e+02,9.051579895262179321e+00,2.601081034965730066e+00
+6.890000000000000000e+02,8.223438695918062535e+01,5.077999976183610542e+00,1.656708239129560001e+00
+6.900000000000000000e+02,9.476624653274866716e+01,7.073953331993729776e+00,2.187297376161870144e+00
+6.910000000000000000e+02,8.714708653821088546e+01,9.728140109329210361e+00,2.759226604267799754e+00
+6.920000000000000000e+02,1.032673188875624106e+02,8.214409161843740037e+00,2.439015497100330165e+00
+6.930000000000000000e+02,2.609435003472862036e+02,2.146101144747019873e+01,5.215965850476410814e+00
+6.940000000000000000e+02,1.487865847390355896e+02,3.486997784395440281e+00,1.188989159230040116e+00
+6.950000000000000000e+02,5.673984950144859596e+01,1.421397958322879962e+00,5.169521385172979322e-01
+6.960000000000000000e+02,4.316530810480240632e+01,4.174505154297940557e-01,1.683204074365280134e-01
+6.970000000000000000e+02,6.025322408591798506e+01,1.750334175492489930e+00,6.805904527194440723e-01
+6.980000000000000000e+02,1.047075291125411098e+02,8.373887778327569364e+00,2.470669614314480178e+00
+6.990000000000000000e+02,8.053830032572263065e+01,6.567011207720820920e+00,2.068030104729920104e+00
+7.000000000000000000e+02,1.255337291150098196e+02,1.062963815138999912e+01,3.017904691150859797e+00
+7.010000000000000000e+02,9.744719678887797443e+01,8.209636459223659699e+00,2.438074909393549650e+00
+7.020000000000000000e+02,9.098504233621534354e+01,6.810219483178388877e+00,2.123667121299379623e+00
+7.030000000000000000e+02,1.228143267709867246e+02,1.037357112642099999e+01,2.945908550177540164e+00
+7.040000000000000000e+02,1.185328115553946446e+02,9.949184008610430396e+00,2.820225685711180041e+00
+7.050000000000000000e+02,8.920446665345457404e+01,6.187260280849950611e+00,1.981625387168799879e+00
+7.060000000000000000e+02,1.073707272952391207e+02,1.223113410612120200e+00,4.726673493429170425e-01
+7.070000000000000000e+02,1.040342610313967100e+02,8.299580497244170019e+00,2.455700985214539944e+00
+7.080000000000000000e+02,6.172875973024148522e+01,1.919061782795229876e+00,7.382742591553029810e-01
+7.090000000000000000e+02,7.961013452094610443e+01,4.615558016032610844e+00,1.517530352464040000e+00
+7.100000000000000000e+02,9.132042310741492486e+01,6.524045994289330075e+00,2.057503496757940198e+00
+7.110000000000000000e+02,7.984915728097330145e+01,4.657236752139889191e+00,1.529684174618080039e+00
+7.120000000000000000e+02,8.251878050011382015e+01,5.126951320707250126e+00,1.672621783563499998e+00
+7.130000000000000000e+02,8.335495811659291121e+01,5.273426282232570372e+00,1.717062192333909776e+00
+7.140000000000000000e+02,1.806102042601316953e+02,1.540029736452800080e+01,4.142225763531249605e+00
+7.150000000000000000e+02,8.890531764507531420e+01,6.140467430365860402e+00,1.971340293918659947e+00
+7.160000000000000000e+02,7.948754452793853886e+01,4.594882019075429369e+00,1.511369716146359954e+00
+7.170000000000000000e+02,5.408532284283736402e+01,1.211737725154309997e+00,4.339533129159089953e-01
+7.180000000000000000e+02,1.411320971409312222e+02,1.207628040611559861e+01,3.375002096326389811e+00
+7.190000000000000000e+02,2.058015370948161546e+02,1.701933595557429868e+01,4.539874761991120167e+00
+7.200000000000000000e+02,5.309971013169313636e+01,1.146200282360649858e+00,4.083460561141960543e-01
+7.210000000000000000e+02,1.201870662318425360e+02,1.013121753161309968e+01,2.867629785871289805e+00
+7.220000000000000000e+02,7.633119630589108340e+01,4.070441773226120219e+00,1.358513235777949912e+00
+7.230000000000000000e+02,5.471454690474742932e+01,1.509232543658340031e+00,5.567770422257640739e-01
+7.240000000000000000e+02,8.303835887103684854e+01,5.221113276526299529e+00,1.700598735214990054e+00
+7.250000000000000000e+02,6.636426368894409222e+01,2.547601182789390073e+00,9.163593474302429431e-01
+7.260000000000000000e+02,7.369878926364785343e+01,3.646056121064349842e+00,1.235141017467610070e+00
+7.270000000000000000e+02,2.260131802128578826e+01,1.030822864176569986e+00,4.177101303755070272e-01
+7.280000000000000000e+02,8.523122179569413959e+01,5.558391384264609769e+00,1.816021841676449977e+00
+7.290000000000000000e+02,8.005530996572927904e+01,4.695577156912679406e+00,1.540328597887979933e+00
+7.300000000000000000e+02,1.417358336302930013e+02,1.213034790373009919e+01,3.386999295762519679e+00
+7.310000000000000000e+02,7.306798292461043332e+01,3.545849258773389856e+00,1.206495246373830099e+00
+7.320000000000000000e+02,9.707984036037029796e+01,7.427937674049589489e+00,2.271080036122680390e+00
+7.330000000000000000e+02,7.977756720676691771e+01,4.645048120550280046e+00,1.526024046579100180e+00
+7.340000000000000000e+02,7.612891203533655471e+01,4.037262771282390084e+00,1.349518738144720009e+00
+7.350000000000000000e+02,7.574216239209044943e+01,3.973378125379739778e+00,1.332619314663860077e+00
+7.360000000000000000e+02,1.066808807822187219e+02,1.142720877758070053e+01,3.217761300361629662e+00
+7.370000000000000000e+02,7.133172999182609431e+01,3.268350913396300239e+00,1.128534366148060020e+00
+7.380000000000000000e+02,7.264920729241119091e+01,3.483949954491929901e+00,1.188148176454159932e+00
+7.390000000000000000e+02,1.562243022654403433e+02,1.352118721113090061e+01,3.662339482649750178e+00
+7.400000000000000000e+02,1.100048185734216304e+02,9.003596999100938802e+00,2.590655592706189836e+00
+7.410000000000000000e+02,7.996968401245030122e+01,4.677806183980339760e+00,1.535885285610460116e+00
+7.420000000000000000e+02,1.152288302251236587e+02,3.776661805982089781e+00,1.274946930015280033e+00
+7.430000000000000000e+02,7.583101850884001749e+01,3.987718824485620317e+00,1.336468063479719914e+00
+7.440000000000000000e+02,9.838304756662357420e+01,7.618258659373950081e+00,2.315169601065389937e+00
+7.450000000000000000e+02,9.473914820659607017e+01,7.069700677055499760e+00,2.186349872800219973e+00
+7.460000000000000000e+02,7.860001223072501375e+01,6.352032719820400075e+00,2.017984895548920132e+00
+7.470000000000000000e+02,8.220419666296437811e+01,5.072822131713119909e+00,1.655019884177550082e+00
+7.480000000000000000e+02,1.051415287771703362e+02,8.422858844559039326e+00,2.480483840297390330e+00
+7.490000000000000000e+02,8.035142131784967034e+01,3.831353621048759983e+00,1.292296905771949778e+00
+7.500000000000000000e+02,5.977267038041154024e+01,1.698865243743860054e+00,6.585736492504878914e-01
+7.510000000000000000e+02,1.359887038746358314e+02,1.307377249344490089e+01,3.567667653043339993e+00
+7.520000000000000000e+02,4.483462238657912025e+01,5.848891377985949713e-01,2.231181388018559919e-01
+7.530000000000000000e+02,9.472374522043458001e+01,7.067284160521659864e+00,2.185811850786530286e+00
+7.540000000000000000e+02,7.605891202103084936e+01,4.025825898052250551e+00,1.346431372750690025e+00
+7.550000000000000000e+02,7.906208444257512724e+01,4.523617564405619795e+00,1.490359926318580275e+00
+7.560000000000000000e+02,6.698603258935834504e+01,2.629549401445189893e+00,9.395226022956240097e-01
+7.570000000000000000e+02,1.379863891992832237e+02,1.178498576069819848e+01,3.307645619366189838e+00
+7.580000000000000000e+02,5.468033442555491064e+01,1.257140912961929979e+00,4.506300605761279776e-01
+7.590000000000000000e+02,5.165889825099333876e+01,1.053704847327189942e+00,3.746879622680349686e-01
+7.600000000000000000e+02,9.217979604792529358e+01,6.658128331044159687e+00,2.089147250194959948e+00
+7.610000000000000000e+02,9.831223396828781347e+01,7.607246985075110324e+00,2.312848538809529675e+00
+7.620000000000000000e+02,9.909838122260501336e+01,7.715851214866960461e+00,2.336737444937130093e+00
+7.630000000000000000e+02,1.389189985906547804e+02,2.108662229591270076e+01,5.138621252928370353e+00
+7.640000000000000000e+02,1.147089148437264328e+02,9.819113796335829036e+00,2.783187459028810373e+00
+7.650000000000000000e+02,1.049489365730013475e+02,8.400659812415151251e+00,2.476112386321339898e+00
+7.660000000000000000e+02,1.401516977375536328e+02,1.198878562128810010e+01,3.354448173571910274e+00
+7.670000000000000000e+02,8.706889357185180245e+01,5.849552787110269492e+00,1.899760522081310077e+00
+7.680000000000000000e+02,1.226831047501818546e+02,1.036209904395789927e+01,2.942439592423290229e+00
+7.690000000000000000e+02,9.273608782983372123e+01,6.750551794938280281e+00,2.109569533958270071e+00
+7.700000000000000000e+02,1.100543698341513874e+02,9.009201002058508934e+00,2.591840451227890085e+00
+7.710000000000000000e+02,6.449623449995081614e+01,9.174003291291288775e+00,2.627616564821730005e+00
+7.720000000000000000e+02,4.433121728770363745e+01,5.938295083704240085e+00,1.922459335211260001e+00
+7.730000000000000000e+02,5.679647381719835408e+01,1.426025699479900100e+00,5.190254293312129841e-01
+7.740000000000000000e+02,1.101291612827004229e+02,9.017664249510310981e+00,2.593632091303029519e+00
+7.750000000000000000e+02,5.682547554645720567e+01,9.974780432867540458e-01,4.329009017172659735e-01
+7.760000000000000000e+02,7.201158482695088026e+01,3.373608038099550122e+00,1.159215280919569935e+00
+7.770000000000000000e+02,1.339792748289630140e+02,3.329162442042330028e+00,1.035359554953880101e+00
+7.780000000000000000e+02,1.757693445124036487e+02,1.505041057032439866e+01,4.049804359065029224e+00
+7.790000000000000000e+02,8.226040850191002107e+01,5.082465729628339979e+00,1.658166298364300184e+00
+7.800000000000000000e+02,1.130966522180246585e+02,9.344463038251641152e+00,2.668235641607060060e+00
+7.810000000000000000e+02,6.241325957575754302e+01,2.003138132479440170e+00,7.622949506503279293e-01
+7.820000000000000000e+02,7.204782875008530141e+01,3.379122746244500153e+00,1.160892231589879753e+00
+7.830000000000000000e+02,8.253064867104870928e+01,5.181857860031350071e+00,1.688240166185280167e+00
+7.840000000000000000e+02,7.907824360359546745e+01,4.526310191312449938e+00,1.491147573544560068e+00
+7.850000000000000000e+02,1.159215366927454767e+02,9.677788699911788584e+00,2.746619187753200020e+00
+7.860000000000000000e+02,1.538884810399809169e+02,1.332065559513619846e+01,3.618452207178620039e+00
+7.870000000000000000e+02,5.425362247944527638e+01,1.225375775839740111e+00,4.385798761930279621e-01
+7.880000000000000000e+02,8.967878794581889679e+01,6.261953293894269024e+00,1.997963664408860307e+00
+7.890000000000000000e+02,6.361566076116133672e+01,2.161690518340560363e+00,8.037322676236370311e-01
+7.900000000000000000e+02,1.208586490420015451e+02,1.020096398204119836e+01,2.887740173672340038e+00
+7.910000000000000000e+02,7.924073736034787885e+01,4.553448419312509543e+00,1.499112852796679940e+00
+7.920000000000000000e+02,1.122558999613762154e+02,9.382793416153750954e+00,2.674981123667740324e+00
+7.930000000000000000e+02,8.188340640525959202e+01,5.018026368062490583e+00,1.637294975540229958e+00
+7.940000000000000000e+02,1.119463214620739251e+02,9.222118455773060219e+00,2.638503453675610011e+00
+7.950000000000000000e+02,7.763561011232151543e+01,1.104347912536129961e+00,3.930911436317769958e-01
+7.960000000000000000e+02,6.999617467253254688e+01,4.828807193582500545e+00,1.580119358237020100e+00
+7.970000000000000000e+02,6.873796957876399460e+01,2.887969161221810133e+00,1.011873398300510152e+00
+7.980000000000000000e+02,1.334824684444733691e+02,1.558946511876039942e+01,4.193674530821040136e+00
+7.990000000000000000e+02,7.086243540606129443e+01,3.192577789567540236e+00,1.108244035255510074e+00
+8.000000000000000000e+02,1.650031538298985367e+02,1.423574964030239975e+01,3.841468222502950614e+00
+8.010000000000000000e+02,6.796237774275159893e+01,1.340380637821219967e+00,4.842905128952929572e-01
+8.020000000000000000e+02,6.758905227988398678e+01,2.709563104114479870e+00,9.630888792678620192e-01
+8.030000000000000000e+02,8.767142321853313547e+01,5.945074474972590117e+00,1.924170120549090202e+00
+8.040000000000000000e+02,1.112702939678909217e+02,5.926390971133379537e+00,1.459922116825720151e+00
+8.050000000000000000e+02,1.271243459549381072e+02,1.076316255231319907e+01,3.058960869537439731e+00
+8.060000000000000000e+02,9.109209716659321998e+01,1.619858182858070039e+00,5.910582478128489914e-01
+8.070000000000000000e+02,3.307809875709577341e+01,4.550327125102440057e-02,1.615609653693559841e-02
+8.080000000000000000e+02,1.290873702657921740e+02,1.093925104445459873e+01,3.105176389980309981e+00
+8.090000000000000000e+02,9.502945843978822893e+01,7.116275372919909969e+00,2.196565401679969831e+00
+8.100000000000000000e+02,1.714707600632086155e+02,1.473936612956100056e+01,3.972086169965959979e+00
+8.110000000000000000e+02,4.882756535325223268e+01,8.771659259358141059e-01,3.188896913099720232e-01
+8.120000000000000000e+02,1.040159634782240232e+02,1.048738334189090082e+01,2.975666952728480030e+00
+8.130000000000000000e+02,6.178209080714201917e+01,3.259991266188579928e+00,1.126057577409429911e+00
+8.140000000000000000e+02,4.496523489942747887e+01,5.995893030812420310e-01,2.260260536581369939e-01
+8.150000000000000000e+02,1.959936945989244634e+02,1.642669323058190045e+01,4.405940349048960591e+00
+8.160000000000000000e+02,1.244917263201507325e+02,1.053815206690109996e+01,2.990081035708430424e+00
+8.170000000000000000e+02,8.144601185140169264e+01,4.580421825590820784e-01,1.877491281707950321e-01
+8.180000000000000000e+02,1.415284902271281169e+02,1.211176339893949994e+01,3.382872023747559798e+00
+8.190000000000000000e+02,1.008248625582462807e+02,7.932926094147440210e+00,2.383860282794720398e+00
+8.200000000000000000e+02,1.768975061407054170e+02,1.573987625065269924e+01,4.233474822754150324e+00
+8.210000000000000000e+02,1.560654586759686708e+02,1.375043195271819840e+01,3.714292743356490067e+00
+8.220000000000000000e+02,4.828130538817335804e+01,8.277931665259300464e-01,3.095353923040070065e-01
+8.230000000000000000e+02,4.058553412304652852e+01,2.586504379285040067e-01,1.020717401656460049e-01
+8.240000000000000000e+02,7.034241242055045973e+01,3.115772854772119960e+00,1.086529820456950013e+00
+8.250000000000000000e+02,6.323281680923464165e+01,6.911710103192719501e+00,2.148591580724810157e+00
+8.260000000000000000e+02,9.399203728151891823e+01,1.272231155044360129e+00,4.568146791404140150e-01
+8.270000000000000000e+02,7.775963869599900136e+01,3.524325431512709716e+00,1.056088974373660117e+00
+8.280000000000000000e+02,9.091186104760403452e+01,6.459931432354330205e+00,2.042228066053410007e+00
+8.290000000000000000e+02,7.405735792584448518e+01,1.088603980135299842e+01,3.092433204591630069e+00
+8.300000000000000000e+02,1.300025263823953594e+02,1.042092984541760003e+01,2.956984247280039835e+00
+8.310000000000000000e+02,9.561518009841927324e+01,9.771878539958420706e-01,3.460388977454639803e-01
+8.320000000000000000e+02,6.664585716989085995e+01,2.585198452382140299e+00,9.267159007209950783e-01
+8.330000000000000000e+02,1.462090964788214080e+02,1.257393865009830058e+01,3.472457333067049845e+00
+8.340000000000000000e+02,1.461700938097202993e+02,1.257035214014630142e+01,3.471703072121959721e+00
+8.350000000000000000e+02,1.610317021936784840e+02,1.392795562118019781e+01,3.758868331652009509e+00
+8.360000000000000000e+02,6.162540590106529947e+01,1.907377845600949806e+00,7.337856293841250599e-01
+8.370000000000000000e+02,1.767605390406695278e+02,1.512730084764260141e+01,4.068108513863940345e+00
+8.380000000000000000e+02,4.127418420866951010e+01,2.944898063409210343e-01,1.159083400869059949e-01
+8.390000000000000000e+02,6.391125092252493545e+01,2.199895647364220164e+00,8.146924323092520348e-01
+8.400000000000000000e+02,2.596963779743518899e+02,1.469324491922919940e+01,3.960636771510570142e+00
+8.410000000000000000e+02,9.948157486987545894e+01,7.764343474001840661e+00,2.347736500212259880e+00
+8.420000000000000000e+02,1.790093229702285385e+02,1.528672869854749905e+01,4.110809480291179874e+00
+8.430000000000000000e+02,1.312183508533507847e+02,1.115425999599969842e+01,3.153314481633910038e+00
+8.440000000000000000e+02,1.240860976931446089e+02,1.050502815470160023e+01,2.979513255275359818e+00
+8.450000000000000000e+02,8.678080946836706744e+01,5.799461645164370793e+00,1.887106064037289865e+00
+8.460000000000000000e+02,1.001116425159616341e+02,7.841263954872759179e+00,2.364930895727360038e+00
+8.470000000000000000e+02,9.502247891339634123e+01,9.082361493806269337e+00,2.459108237490700066e+00
+8.480000000000000000e+02,6.085047243633621150e+01,1.817840170441390013e+00,7.024539514601341184e-01
+8.490000000000000000e+02,1.509567517439150492e+02,1.304954645631969967e+01,3.562635623396080309e+00
+8.500000000000000000e+02,6.059266160927779765e+01,1.790484982515289980e+00,6.927945666644890199e-01
+8.510000000000000000e+02,8.344519043767014921e+01,5.287610101077679481e+00,1.721761086471149893e+00
+8.520000000000000000e+02,1.273808986093910107e+02,1.078454679963689955e+01,3.065305639686269679e+00
+8.530000000000000000e+02,1.011817060681584053e+02,7.975686112570070208e+00,2.392834542530589914e+00
+8.540000000000000000e+02,4.884864115956558805e+01,8.789464523519641137e-01,3.192598622977000367e-01
+8.550000000000000000e+02,9.628615939008101066e+01,7.312709340509889522e+00,2.242607819626460053e+00
+8.560000000000000000e+02,5.499067039233562326e+01,1.279220039095070094e+00,4.596985628299029014e-01
+8.570000000000000000e+02,1.319858073092321433e+02,1.123032802593269786e+01,3.170865383599990039e+00
+8.580000000000000000e+02,6.910359807899342854e+01,2.940410180306209931e+00,1.028711481439650077e+00
+8.590000000000000000e+02,8.296282802295077374e+01,5.208686461819380220e+00,1.696466168849739997e+00
+8.600000000000000000e+02,4.199173374364280420e+01,3.359923836265599761e-01,1.326394520391429854e-01
+8.610000000000000000e+02,2.902838940170643767e+01,1.849305835542969928e-02,7.759762195418620431e-03
+8.620000000000000000e+02,1.083476563833086317e+02,8.803810494701810541e+00,2.552124140851069534e+00
+8.630000000000000000e+02,1.124517507043580906e+02,9.276070711320711482e+00,2.651436993370079875e+00
+8.640000000000000000e+02,1.344718852258602055e+02,1.146518781340600057e+01,3.227959170249569798e+00
+8.650000000000000000e+02,5.497273651665794603e+01,1.138272365820210075e+00,4.062605733580589296e-01
+8.660000000000000000e+02,9.599980208602855214e+01,7.268715036892919201e+00,2.231856309416970330e+00
+8.670000000000000000e+02,5.451126153870944080e+01,1.159894109392139994e+00,4.584093076952069912e-01
+8.680000000000000000e+02,8.381771824667710291e+01,5.344944700243170033e+00,1.741462630470119954e+00
+8.690000000000000000e+02,9.489140552824822805e+01,7.093616555440249272e+00,2.191689680413650088e+00
+8.700000000000000000e+02,8.416637750859777611e+01,5.397014048068650816e+00,1.760357507678450117e+00
+8.710000000000000000e+02,7.799704384676735458e+01,4.347313958210790474e+00,1.437375772073129943e+00
+8.720000000000000000e+02,4.808712059009850037e+01,8.126315559446071202e-01,3.063133389267700402e-01
+8.730000000000000000e+02,5.541808932834058510e+01,1.312223439408789938e+00,4.726502414722279499e-01
+8.740000000000000000e+02,1.641031983625541386e+02,1.416771421128950159e+01,3.822255694218240407e+00
+8.750000000000000000e+02,4.751393218266084517e+01,4.660338463277620136e+00,1.530617292009879948e+00
+8.760000000000000000e+02,1.474853124091061431e+02,8.471475093632500730e+00,2.490468755904220455e+00
+8.770000000000000000e+02,1.119036978227671142e+02,9.217433156825759255e+00,2.637421864120129911e+00
+8.780000000000000000e+02,1.170266263970866873e+02,9.797757475601631683e+00,2.777394636758310220e+00
+8.790000000000000000e+02,7.665940230444807924e+01,4.122823331888869625e+00,1.373341840144340020e+00
+8.800000000000000000e+02,6.304966045999308477e+01,2.086558774205669931e+00,7.836165066016659297e-01
+8.810000000000000000e+02,1.359979993190573566e+02,1.160913847032190027e+01,3.261802821763569504e+00
+8.820000000000000000e+02,7.153909264844779159e+01,3.302315453485319630e+00,1.137729994723350124e+00
+8.830000000000000000e+02,7.435251271150522712e+01,3.749412082976759653e+00,1.266260779672709891e+00
+8.840000000000000000e+02,7.601922714706824991e+01,7.613617332479719835e+00,2.314093660940879982e+00
+8.850000000000000000e+02,7.670353382665048514e+01,4.129587617294040669e+00,1.375358402372360178e+00
+8.860000000000000000e+02,5.526076168857841964e+01,1.298725818844360003e+00,4.678175602152639390e-01
+8.870000000000000000e+02,6.290481289862380976e+01,2.068744850211490238e+00,7.786389560477009475e-01
+8.880000000000000000e+02,1.091417842724177945e+02,1.185407157265180089e+01,3.325551535930649560e+00
+8.890000000000000000e+02,1.029528997894312425e+02,8.180166779225979568e+00,2.432283819014259940e+00
+8.900000000000000000e+02,1.624521706327475954e+02,1.404236035639589986e+01,3.787830271383810121e+00
+8.910000000000000000e+02,1.641225001869754863e+02,7.558200597751410399e+00,2.302134645202869923e+00
+8.920000000000000000e+02,1.179344534413394285e+02,9.106528060660409096e+00,2.612269339447220062e+00
+8.930000000000000000e+02,1.499172148524734780e+02,1.295559667681160043e+01,3.543502328930970613e+00
+8.940000000000000000e+02,1.020962780922653792e+02,1.157315166016659846e+00,4.576648154506539612e-01
+8.950000000000000000e+02,1.785833234092671375e+02,1.525647739305780171e+01,4.102594979035099954e+00
+8.960000000000000000e+02,5.017464470140705401e+01,9.721639465953978432e-01,3.440507414543350073e-01
+8.970000000000000000e+02,9.130369448555013889e+01,6.521454131381610786e+00,2.056871778910749793e+00
+8.980000000000000000e+02,1.106354352812567896e+02,9.078115428787551622e+00,2.605866597941839835e+00
+8.990000000000000000e+02,1.172665224448877126e+02,9.822828923964641135e+00,2.784223603898130062e+00
+9.000000000000000000e+02,9.915176675018082619e+01,7.722975223597299710e+00,2.338258964335119927e+00
+9.010000000000000000e+02,4.013692981007793037e+01,2.443401407198670106e-01,9.387822186767889876e-02
+9.020000000000000000e+02,7.876709831995766820e+01,4.472625091599370606e+00,1.476120580239830060e+00
+9.030000000000000000e+02,4.648833718465725440e+01,6.907250456302240771e-01,2.682392389300150004e-01
+9.040000000000000000e+02,1.522753874052719141e+02,1.317430295429399933e+01,3.587425962982410343e+00
+9.050000000000000000e+02,4.009692300611450122e+01,2.430883830962060344e-01,9.317392300202789746e-02
+9.060000000000000000e+02,8.551977566262608832e+01,5.602726459449460350e+00,1.829650886914240049e+00
+9.070000000000000000e+02,1.582199179546477694e+02,1.369081955914090187e+01,3.701325494596359622e+00
+9.080000000000000000e+02,4.036026067883332757e+01,2.514013094563730122e-01,9.788681991272139837e-02
+9.090000000000000000e+02,9.102630118711049079e+01,6.478445742385440020e+00,2.046474908171819873e+00
+9.100000000000000000e+02,4.150736826670528501e+01,3.088061337014650354e-01,1.210322577636799984e-01
+9.110000000000000000e+02,7.588563243153338078e+01,2.554880918456789995e+00,9.180552797146909194e-01
+9.120000000000000000e+02,1.045433377689055874e+02,8.355717546549849217e+00,2.466990794920520091e+00
+9.130000000000000000e+02,8.826180351138275171e+01,9.968802180341971297e-01,4.325759740189439873e-01
+9.140000000000000000e+02,5.474858095295426352e+01,1.261965897502759892e+00,4.526014509573471067e-01
+9.150000000000000000e+02,6.742410095220344601e+01,2.687498930851460077e+00,9.565265173999780712e-01
+9.160000000000000000e+02,8.328464619197168872e+01,5.261777143826549263e+00,1.713420005310760219e+00
+9.170000000000000000e+02,3.233399896756553460e+01,4.800367982044510123e-02,1.732203136409839889e-02
+9.180000000000000000e+02,1.535888394339050649e+02,1.329467851977819848e+01,3.612818945956890015e+00
+9.190000000000000000e+02,2.220241052818526271e+02,1.449866647990049939e+00,5.444358705337410953e-01
+9.200000000000000000e+02,5.699156566432522908e+01,1.442072025740019914e+00,5.262907394562950092e-01
+9.210000000000000000e+02,1.147945124922441380e+02,3.804740810845660182e+00,1.283960888033429892e+00
+9.220000000000000000e+02,1.215250180318411566e+02,1.026084595390840093e+01,2.908190640633279589e+00
+9.230000000000000000e+02,9.048092237348369338e+01,6.389649073824429593e+00,2.026452757761560086e+00
+9.240000000000000000e+02,4.133076534205974184e+01,2.973905255538100145e-01,1.171265151865620024e-01
+9.250000000000000000e+02,1.115364479494598129e+02,9.176378528556099923e+00,2.628160516198399854e+00
+9.260000000000000000e+02,1.871775562830781610e+02,1.390867337314760022e+01,3.753508431350849772e+00
+9.270000000000000000e+02,9.011479110344542676e+01,6.331148761338889486e+00,2.013311339945050182e+00
+9.280000000000000000e+02,1.157523286108297782e+02,4.247796642184289873e+00,1.407125374045800070e+00
+9.290000000000000000e+02,1.345931695567962265e+02,8.078789504732510451e+00,2.413328906397069762e+00
+9.300000000000000000e+02,5.380717792936016508e+01,1.193970060935469979e+00,4.264926947309499727e-01
+9.310000000000000000e+02,6.971177355531388287e+01,3.023673334168289983e+00,1.058315243830660002e+00
+9.320000000000000000e+02,8.382133744179841983e+01,5.345483231264849699e+00,1.741656466747439902e+00
+9.330000000000000000e+02,7.679130812688110552e+01,4.147200388482950473e+00,1.379394853999580173e+00
+9.340000000000000000e+02,6.947546051964104663e+01,2.991505107207459879e+00,1.046558553207829867e+00
+9.350000000000000000e+02,7.493476378862190757e+01,2.494146474278770009e-01,9.675262334420599297e-02
+9.360000000000000000e+02,2.015984542296542088e+02,1.676552413404600017e+01,4.486857393901879654e+00
+9.370000000000000000e+02,1.169380819823520596e+02,9.788326258775448707e+00,2.774887730059739877e+00
+9.380000000000000000e+02,1.149488802793696607e+02,9.560262966943199103e+00,2.718729795587429887e+00
+9.390000000000000000e+02,1.328249498856408195e+02,2.939201570948790199e+00,1.028284503285010087e+00
+9.400000000000000000e+02,1.431722677695654795e+02,1.227333697516139921e+01,3.415529209915000308e+00
+9.410000000000000000e+02,6.713344775658936214e+01,2.648945098818950239e+00,9.451780997625059966e-01
+9.420000000000000000e+02,1.463053795869320197e+02,1.258279464644339996e+01,3.474321796614070035e+00
+9.430000000000000000e+02,1.361652801368277323e+02,1.162370130752779929e+01,3.265575303369640192e+00
+9.440000000000000000e+02,1.426337700842448157e+02,1.222412759239469970e+01,3.405128092845430388e+00
+9.450000000000000000e+02,8.040908692674501879e+01,7.321334236103659521e+00,2.244729135583380053e+00
+9.460000000000000000e+02,9.422338875835177419e+01,6.984786649862339658e+00,2.167142279560679619e+00
+9.470000000000000000e+02,1.218494989181309620e+02,1.028903511217069777e+01,2.918323285825100033e+00
+9.480000000000000000e+02,9.649007163313882529e+01,7.343508906805600311e+00,2.250098578950059824e+00
+9.490000000000000000e+02,5.629035136967998199e+01,1.382553472019490082e+00,5.010741349164949954e-01
+9.500000000000000000e+02,7.120662882652507619e+01,3.246952396108140348e+00,1.123056268227009991e+00
+9.510000000000000000e+02,1.926355004261864394e+02,1.470571761879969763e+01,3.963901978596859443e+00
+9.520000000000000000e+02,4.880925284654738050e+01,8.756221622298600016e-01,3.185684069785480466e-01
+9.530000000000000000e+02,1.058248976192212751e+02,8.499223469756758575e+00,2.496209464640669839e+00
+9.540000000000000000e+02,1.256491133287837982e+02,1.132356340205520118e+00,4.518022946105489934e-01
+9.550000000000000000e+02,1.300113104114877558e+02,1.102099526448260036e+01,3.126180388496559992e+00
+9.560000000000000000e+02,8.077447479196064251e+01,4.824807196558519529e+00,1.578788641653649849e+00
+9.570000000000000000e+02,1.004207032482125470e+02,1.555642226851959897e+01,4.184989657038769906e+00
+9.580000000000000000e+02,1.075384552497260273e+02,1.006415799151910040e+00,3.566425198632320881e-01
+9.590000000000000000e+02,1.332055689668032414e+02,1.133929629980019804e+01,3.199084021356419782e+00
+9.600000000000000000e+02,7.774378252961925284e+01,4.305573457600339715e+00,1.424816314605090062e+00
+9.610000000000000000e+02,1.145022240272213168e+02,9.271645713196170036e+00,2.650358584806350226e+00
+9.620000000000000000e+02,1.662618574634203128e+02,1.148432714008749933e+01,3.232993609270550017e+00
+9.630000000000000000e+02,7.152090776936201166e+01,3.299593882643259590e+00,1.136916726951529899e+00
+9.640000000000000000e+02,6.958328615270873740e+01,3.006150134113419803e+00,1.051880581621249977e+00
+9.650000000000000000e+02,1.221065984697110878e+02,1.834534295181569963e+00,6.449416029559089658e-01
+9.660000000000000000e+02,4.760280577705086813e+01,7.762026318013809378e-01,2.984216530806469425e-01
+9.670000000000000000e+02,3.051362667168076541e+02,2.369942376755789581e+01,5.763425768621130096e+00
+9.680000000000000000e+02,2.098742022639878542e+02,1.726089466805489891e+01,4.587646345403240566e+00
+9.690000000000000000e+02,9.675792538722379277e+01,7.381393402460659559e+00,2.259655752579400101e+00
+9.700000000000000000e+02,7.032223599011570059e+01,3.112927528195879923e+00,1.085701636187089925e+00
+9.710000000000000000e+02,8.542200085068284920e+01,5.587675088099889997e+00,1.825004828418540059e+00
+9.720000000000000000e+02,7.849748994124161072e+01,4.428466861500670149e+00,1.462816562996770209e+00
+9.730000000000000000e+02,7.483719107854722097e+01,3.824222194065329816e+00,1.290209712824170074e+00
+9.740000000000000000e+02,7.649961031417767288e+01,8.507971027101429939e+00,2.498025598125120261e+00
+9.750000000000000000e+02,7.927565651068738362e+01,4.559294921146009649e+00,1.500835289127020022e+00
+9.760000000000000000e+02,4.452531467325740522e+01,9.845770817693599852e-01,4.281581891573440490e-01
+9.770000000000000000e+02,1.582054800856320185e+02,1.368963732752730067e+01,3.701038224511520092e+00
+9.780000000000000000e+02,9.741525146727542506e+01,8.545150277246440496e+00,2.504599801611969934e+00
+9.790000000000000000e+02,1.201529685530441043e+02,1.012789807142159937e+01,2.866621719565109583e+00
+9.800000000000000000e+02,5.499865054492226335e+01,1.279792485403790092e+00,4.599353456176270005e-01
+9.810000000000000000e+02,1.172201877997026997e+02,9.818083912865009921e+00,2.782900397177560325e+00
+9.820000000000000000e+02,9.680010688620599524e+01,7.387352987976019136e+00,2.261172475831950468e+00
+9.830000000000000000e+02,1.093650099190521416e+02,8.927702554247868960e+00,2.575537235454109730e+00
+9.840000000000000000e+02,1.116125052306356764e+02,2.673617837747209958e+00,8.536882086844541639e-01
+9.850000000000000000e+02,2.293792660130197021e+02,1.945690546695310275e+01,4.831163988276549581e+00
+9.860000000000000000e+02,4.989807360566414474e+01,9.586004495558159144e-01,3.386939512068130043e-01
+9.870000000000000000e+02,7.904178418223948199e+01,8.927682114815789660e+00,2.429815158725669999e+00
+9.880000000000000000e+02,3.335073685647994779e+01,6.318074183964640655e-02,2.202262046939219842e-02
+9.890000000000000000e+02,6.761552874957547488e+01,8.312017945071201175e-02,2.819922635610460152e-02
+9.900000000000000000e+02,1.359751692899206148e+02,1.435606559574890184e+01,3.876234756067509668e+00
+9.910000000000000000e+02,9.600320533104114418e+01,7.269236870901550240e+00,2.231983168756670022e+00
+9.920000000000000000e+02,1.490020491091123063e+02,1.286438818108360138e+01,3.525897128937519831e+00
+9.930000000000000000e+02,1.239798009887912400e+02,6.856900065730969551e+00,2.134922652190479830e+00
+9.940000000000000000e+02,1.721515823747157583e+02,1.479085258129130054e+01,3.984522868303590037e+00
+9.950000000000000000e+02,4.548160810455529202e+01,4.369467336596960516e-01,1.773505546267199751e-01
+9.960000000000000000e+02,7.664355332406847765e+01,4.120396055181210215e+00,1.372618957714120125e+00
+9.970000000000000000e+02,1.139763529615720756e+02,9.449846581681070390e+00,2.691808776681460014e+00
+9.980000000000000000e+02,6.317352598940736641e+01,2.101887345800979912e+00,7.879230267641100793e-01
+9.990000000000000000e+02,3.610058328932037597e+01,1.202611863383300089e-01,4.223899235442629685e-02
diff --git a/pelicun/tests/dl_calculation/e5/test_e5.py b/pelicun/tests/dl_calculation/e5/test_e5.py
new file mode 100644
index 000000000..451afb654
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e5/test_e5.py
@@ -0,0 +1,134 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+
+"""DL Calculation Example 5."""
+
+from __future__ import annotations
+
+import os
+import shutil
+import tempfile
+from pathlib import Path
+from typing import Generator
+
+import pytest
+
+from pelicun.pelicun_warnings import PelicunWarning
+from pelicun.tools.DL_calculation import run_pelicun
+
+
+@pytest.fixture
+def obtain_temp_dir() -> Generator:
+ # get the path of this file
+ this_file = __file__
+
+ initial_dir = Path.cwd()
+ this_dir = str(Path(this_file).parent)
+
+ temp_dir = tempfile.mkdtemp()
+
+ yield this_dir, temp_dir
+
+ # go back to the right directory, otherwise any tests that follow
+ # could have issues.
+ os.chdir(initial_dir)
+
+
+def test_dl_calculation_5(obtain_temp_dir: tuple[str, str]) -> None:
+ this_dir, temp_dir = obtain_temp_dir
+
+ # Copy all input files to a temporary directory.
+ # All outputs will also go there.
+ # This approach is more robust to changes in the output files over
+ # time.
+
+ os.chdir(this_dir)
+ temp_dir = tempfile.mkdtemp()
+ # copy input files
+ for file_name in ('1-AIM.json', 'response.csv'):
+ shutil.copy(f'{this_dir}/{file_name}', f'{temp_dir}/{file_name}')
+
+ # change directory to there
+ os.chdir(temp_dir)
+
+ # run
+ run_pelicun(
+ demand_file='response.csv',
+ config_path='1-AIM.json',
+ output_path=None,
+ coupled_edp=True,
+ realizations=100,
+ auto_script_path='PelicunDefault/Hazus_Earthquake_IM.py',
+ detailed_results=False,
+ output_format=None,
+ custom_model_dir=None,
+ )
+
+ #
+ # Test files
+ #
+
+ # Ensure the number of files is as expected
+ num_files = sum(1 for entry in Path(temp_dir).iterdir() if entry.is_file())
+ assert num_files == 19
+
+ # Verify their names
+ files = {
+ '1-AIM.json',
+ '1-AIM_ap.json',
+ 'CMP_QNT.csv',
+ 'CMP_sample.json',
+ 'DEM_sample.json',
+ 'DL_summary.csv',
+ 'DL_summary.json',
+ 'DL_summary_stats.csv',
+ 'DL_summary_stats.json',
+ 'DMG_grp.json',
+ 'DMG_grp_stats.json',
+ 'DV_repair_agg.json',
+ 'DV_repair_agg_stats.json',
+ 'DV_repair_grp.json',
+ 'DV_repair_sample.json',
+ 'DV_repair_stats.json',
+ 'pelicun_log.txt',
+ 'pelicun_log_warnings.txt',
+ 'response.csv',
+ }
+
+ for file in files:
+ assert Path(f'{temp_dir}/{file}').is_file()
+
+ #
+ # Check the values: TODO
+ #
diff --git a/pelicun/tests/dl_calculation/e6/1-AIM.json b/pelicun/tests/dl_calculation/e6/1-AIM.json
new file mode 100644
index 000000000..51ea594e2
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e6/1-AIM.json
@@ -0,0 +1,513 @@
+{
+ "RandomVariables": [],
+ "GeneralInformation": {
+ "AIM_id": "1",
+ "location": {
+ "latitude": 37.557583,
+ "longitude": -122.307781
+ },
+ "Latitude": 37.557583,
+ "Longitude": -122.307781,
+ "PlanArea": 1945.0,
+ "NumberOfStories": 1,
+ "YearBuilt": 1948,
+ "OccupancyClass": "RES1",
+ "ReplacementCost": 632577.7,
+ "StructureType": "W1",
+ "Footprint": "{\"type\":\"Feature\",\"geometry\":{\"type\":\"Polygon\",\"coordinates\":[[[-122.307821,37.557470],[-122.307701,37.557549],[-122.307847,37.557688],[-122.307968,37.557609],[-122.307821,37.557470]]]},\"properties\":{}}",
+ "units": {
+ "force": "kips",
+ "length": "ft",
+ "time": "sec"
+ }
+ },
+ "DefaultValues": {
+ "driverFile": "driver",
+ "edpFiles": [
+ "EDP.json"
+ ],
+ "filenameDL": "BIM.json",
+ "filenameEDP": "EDP.json",
+ "filenameEVENT": "EVENT.json",
+ "filenameSAM": "SAM.json",
+ "filenameSIM": "SIM.json",
+ "rvFiles": [
+ "SAM.json",
+ "EVENT.json",
+ "SIM.json"
+ ],
+ "workflowInput": "scInput.json",
+ "workflowOutput": "EDP.json"
+ },
+ "commonFileDir": "/Users/adamzs/Documents/R2D/LocalWorkDir/tmp.SimCenter/input_data",
+ "remoteAppDir": "/Users/adamzs/SimCenter",
+ "localAppDir": "/Users/adamzs/SimCenter",
+ "units": {
+ "force": "kips",
+ "length": "ft",
+ "time": "sec"
+ },
+ "outputs": {
+ "AIM": false,
+ "DM": true,
+ "DV": true,
+ "EDP": true,
+ "IM": false
+ },
+ "RegionalEvent": {
+ "eventFile": "ShakeMapIMs/EventGrid.csv",
+ "eventFilePath": "/Users/adamzs/Documents/R2D/LocalWorkDir/tmp.SimCenter/input_data",
+ "units": {
+ "PGA": "g"
+ }
+ },
+ "assetType": "Buildings",
+ "Applications": {
+ "Events": [
+ {
+ "Application": "SimCenterEvent",
+ "ApplicationData": {},
+ "EventClassification": "Earthquake"
+ }
+ ],
+ "Modeling": {
+ "Application": "None",
+ "ApplicationData": {}
+ },
+ "Simulation": {
+ "Application": "IMasEDP",
+ "ApplicationData": {}
+ },
+ "UQ": {
+ "Application": "None",
+ "ApplicationData": {}
+ },
+ "DL": {
+ "Application": "Pelicun3",
+ "ApplicationData": {
+ "DL_Method": "HAZUS MH EQ IM",
+ "Realizations": 100,
+ "auto_script": "PelicunDefault/Hazus_Earthquake_IM.py",
+ "coupled_EDP": true,
+ "detailed_results": false,
+ "ground_failure": false,
+ "log_file": true,
+ "regional": "true"
+ }
+ }
+ },
+ "Modeling": {},
+ "Simulation": {
+ "type": "IMasEDP"
+ },
+ "UQ": {},
+ "DL": {},
+ "Events": [
+ {
+ "EventFolderPath": "/Users/adamzs/Documents/R2D/LocalWorkDir/tmp.SimCenter/input_data/ShakeMapIMs",
+ "Events": [
+ [
+ "Site_74397.csvx0x00000",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00001",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00002",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00003",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00004",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00005",
+ 1.0
+ ],
+ [
+ "Site_73976.csvx0x00006",
+ 1.0
+ ],
+ [
+ "Site_74818.csvx0x00007",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00008",
+ 1.0
+ ],
+ [
+ "Site_74818.csvx0x00009",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00010",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00011",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00012",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00013",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00014",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00015",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00016",
+ 1.0
+ ],
+ [
+ "Site_74398.csvx0x00017",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00018",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00019",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00020",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00021",
+ 1.0
+ ],
+ [
+ "Site_74398.csvx0x00022",
+ 1.0
+ ],
+ [
+ "Site_73976.csvx0x00023",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00024",
+ 1.0
+ ],
+ [
+ "Site_74398.csvx0x00025",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00026",
+ 1.0
+ ],
+ [
+ "Site_74398.csvx0x00027",
+ 1.0
+ ],
+ [
+ "Site_74398.csvx0x00028",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00029",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00030",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00031",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00032",
+ 1.0
+ ],
+ [
+ "Site_74398.csvx0x00033",
+ 1.0
+ ],
+ [
+ "Site_74398.csvx0x00034",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00035",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00036",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00037",
+ 1.0
+ ],
+ [
+ "Site_74398.csvx0x00038",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00039",
+ 1.0
+ ],
+ [
+ "Site_74818.csvx0x00040",
+ 1.0
+ ],
+ [
+ "Site_74398.csvx0x00041",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00042",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00043",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00044",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00045",
+ 1.0
+ ],
+ [
+ "Site_73976.csvx0x00046",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00047",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00048",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00049",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00050",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00051",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00052",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00053",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00054",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00055",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00056",
+ 1.0
+ ],
+ [
+ "Site_74398.csvx0x00057",
+ 1.0
+ ],
+ [
+ "Site_74398.csvx0x00058",
+ 1.0
+ ],
+ [
+ "Site_74398.csvx0x00059",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00060",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00061",
+ 1.0
+ ],
+ [
+ "Site_74398.csvx0x00062",
+ 1.0
+ ],
+ [
+ "Site_74398.csvx0x00063",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00064",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00065",
+ 1.0
+ ],
+ [
+ "Site_74398.csvx0x00066",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00067",
+ 1.0
+ ],
+ [
+ "Site_73976.csvx0x00068",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00069",
+ 1.0
+ ],
+ [
+ "Site_74398.csvx0x00070",
+ 1.0
+ ],
+ [
+ "Site_74398.csvx0x00071",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00072",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00073",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00074",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00075",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00076",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00077",
+ 1.0
+ ],
+ [
+ "Site_74398.csvx0x00078",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00079",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00080",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00081",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00082",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00083",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00084",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00085",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00086",
+ 1.0
+ ],
+ [
+ "Site_74398.csvx0x00087",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00088",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00089",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00090",
+ 1.0
+ ],
+ [
+ "Site_74398.csvx0x00091",
+ 1.0
+ ],
+ [
+ "Site_74398.csvx0x00092",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00093",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00094",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00095",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00096",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00097",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00098",
+ 1.0
+ ],
+ [
+ "Site_74397.csvx0x00099",
+ 1.0
+ ]
+ ],
+ "type": "intensityMeasure"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/pelicun/tests/dl_calculation/e6/__init__.py b/pelicun/tests/dl_calculation/e6/__init__.py
new file mode 100644
index 000000000..72c332008
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e6/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
diff --git a/pelicun/tests/dl_calculation/e6/response.csv b/pelicun/tests/dl_calculation/e6/response.csv
new file mode 100644
index 000000000..9edbfabce
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e6/response.csv
@@ -0,0 +1,101 @@
+,1-PGA-1-1
+0.000000000000000000e+00,1.259614000984252158e+01
+1.000000000000000000e+00,1.259614000984252158e+01
+2.000000000000000000e+00,1.259614000984252158e+01
+3.000000000000000000e+00,1.259614000984252158e+01
+4.000000000000000000e+00,1.259614000984252158e+01
+5.000000000000000000e+00,1.259614000984252158e+01
+6.000000000000000000e+00,1.190761537073490928e+01
+7.000000000000000000e+00,1.375762316272965968e+01
+8.000000000000000000e+00,1.259614000984252158e+01
+9.000000000000000000e+00,1.375762316272965968e+01
+1.000000000000000000e+01,1.259614000984252158e+01
+1.100000000000000000e+01,1.259614000984252158e+01
+1.200000000000000000e+01,1.259614000984252158e+01
+1.300000000000000000e+01,1.259614000984252158e+01
+1.400000000000000000e+01,1.259614000984252158e+01
+1.500000000000000000e+01,1.259614000984252158e+01
+1.600000000000000000e+01,1.259614000984252158e+01
+1.700000000000000000e+01,1.188509353674540847e+01
+1.800000000000000000e+01,1.259614000984252158e+01
+1.900000000000000000e+01,1.259614000984252158e+01
+2.000000000000000000e+01,1.259614000984252158e+01
+2.100000000000000000e+01,1.259614000984252158e+01
+2.200000000000000000e+01,1.188509353674540847e+01
+2.300000000000000000e+01,1.190761537073490928e+01
+2.400000000000000000e+01,1.259614000984252158e+01
+2.500000000000000000e+01,1.188509353674540847e+01
+2.600000000000000000e+01,1.259614000984252158e+01
+2.700000000000000000e+01,1.188509353674540847e+01
+2.800000000000000000e+01,1.188509353674540847e+01
+2.900000000000000000e+01,1.259614000984252158e+01
+3.000000000000000000e+01,1.259614000984252158e+01
+3.100000000000000000e+01,1.259614000984252158e+01
+3.200000000000000000e+01,1.259614000984252158e+01
+3.300000000000000000e+01,1.188509353674540847e+01
+3.400000000000000000e+01,1.188509353674540847e+01
+3.500000000000000000e+01,1.259614000984252158e+01
+3.600000000000000000e+01,1.259614000984252158e+01
+3.700000000000000000e+01,1.259614000984252158e+01
+3.800000000000000000e+01,1.188509353674540847e+01
+3.900000000000000000e+01,1.259614000984252158e+01
+4.000000000000000000e+01,1.375762316272965968e+01
+4.100000000000000000e+01,1.188509353674540847e+01
+4.200000000000000000e+01,1.259614000984252158e+01
+4.300000000000000000e+01,1.259614000984252158e+01
+4.400000000000000000e+01,1.259614000984252158e+01
+4.500000000000000000e+01,1.259614000984252158e+01
+4.600000000000000000e+01,1.190761537073490928e+01
+4.700000000000000000e+01,1.259614000984252158e+01
+4.800000000000000000e+01,1.259614000984252158e+01
+4.900000000000000000e+01,1.259614000984252158e+01
+5.000000000000000000e+01,1.259614000984252158e+01
+5.100000000000000000e+01,1.259614000984252158e+01
+5.200000000000000000e+01,1.259614000984252158e+01
+5.300000000000000000e+01,1.259614000984252158e+01
+5.400000000000000000e+01,1.259614000984252158e+01
+5.500000000000000000e+01,1.259614000984252158e+01
+5.600000000000000000e+01,1.259614000984252158e+01
+5.700000000000000000e+01,1.188509353674540847e+01
+5.800000000000000000e+01,1.188509353674540847e+01
+5.900000000000000000e+01,1.188509353674540847e+01
+6.000000000000000000e+01,1.259614000984252158e+01
+6.100000000000000000e+01,1.259614000984252158e+01
+6.200000000000000000e+01,1.188509353674540847e+01
+6.300000000000000000e+01,1.188509353674540847e+01
+6.400000000000000000e+01,1.259614000984252158e+01
+6.500000000000000000e+01,1.259614000984252158e+01
+6.600000000000000000e+01,1.188509353674540847e+01
+6.700000000000000000e+01,1.259614000984252158e+01
+6.800000000000000000e+01,1.190761537073490928e+01
+6.900000000000000000e+01,1.259614000984252158e+01
+7.000000000000000000e+01,1.188509353674540847e+01
+7.100000000000000000e+01,1.188509353674540847e+01
+7.200000000000000000e+01,1.259614000984252158e+01
+7.300000000000000000e+01,1.259614000984252158e+01
+7.400000000000000000e+01,1.259614000984252158e+01
+7.500000000000000000e+01,1.259614000984252158e+01
+7.600000000000000000e+01,1.259614000984252158e+01
+7.700000000000000000e+01,1.259614000984252158e+01
+7.800000000000000000e+01,1.188509353674540847e+01
+7.900000000000000000e+01,1.259614000984252158e+01
+8.000000000000000000e+01,1.259614000984252158e+01
+8.100000000000000000e+01,1.259614000984252158e+01
+8.200000000000000000e+01,1.259614000984252158e+01
+8.300000000000000000e+01,1.259614000984252158e+01
+8.400000000000000000e+01,1.259614000984252158e+01
+8.500000000000000000e+01,1.259614000984252158e+01
+8.600000000000000000e+01,1.259614000984252158e+01
+8.700000000000000000e+01,1.188509353674540847e+01
+8.800000000000000000e+01,1.259614000984252158e+01
+8.900000000000000000e+01,1.259614000984252158e+01
+9.000000000000000000e+01,1.259614000984252158e+01
+9.100000000000000000e+01,1.188509353674540847e+01
+9.200000000000000000e+01,1.188509353674540847e+01
+9.300000000000000000e+01,1.259614000984252158e+01
+9.400000000000000000e+01,1.259614000984252158e+01
+9.500000000000000000e+01,1.259614000984252158e+01
+9.600000000000000000e+01,1.259614000984252158e+01
+9.700000000000000000e+01,1.259614000984252158e+01
+9.800000000000000000e+01,1.259614000984252158e+01
+9.900000000000000000e+01,1.259614000984252158e+01
diff --git a/pelicun/tests/dl_calculation/e6/test_e6.py b/pelicun/tests/dl_calculation/e6/test_e6.py
new file mode 100644
index 000000000..5f6c9cf71
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e6/test_e6.py
@@ -0,0 +1,134 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+
+"""DL Calculation Example 6."""
+
+from __future__ import annotations
+
+import os
+import shutil
+import tempfile
+from pathlib import Path
+from typing import Generator
+
+import pytest
+
+from pelicun.pelicun_warnings import PelicunWarning
+from pelicun.tools.DL_calculation import run_pelicun
+
+
+@pytest.fixture
+def obtain_temp_dir() -> Generator:
+ # get the path of this file
+ this_file = __file__
+
+ initial_dir = Path.cwd()
+ this_dir = str(Path(this_file).parent)
+
+ temp_dir = tempfile.mkdtemp()
+
+ yield this_dir, temp_dir
+
+ # go back to the right directory, otherwise any tests that follow
+ # could have issues.
+ os.chdir(initial_dir)
+
+
+def test_dl_calculation_6(obtain_temp_dir: tuple[str, str]) -> None:
+ this_dir, temp_dir = obtain_temp_dir
+
+ # Copy all input files to a temporary directory.
+ # All outputs will also go there.
+ # This approach is more robust to changes in the output files over
+ # time.
+
+ os.chdir(this_dir)
+ temp_dir = tempfile.mkdtemp()
+ # copy input files
+ for file_name in ('1-AIM.json', 'response.csv'):
+ shutil.copy(f'{this_dir}/{file_name}', f'{temp_dir}/{file_name}')
+
+ # change directory to there
+ os.chdir(temp_dir)
+
+ # run
+ run_pelicun(
+ demand_file='response.csv',
+ config_path='1-AIM.json',
+ output_path=None,
+ coupled_edp=True,
+ realizations=100,
+ auto_script_path='PelicunDefault/Hazus_Earthquake_IM.py',
+ detailed_results=False,
+ output_format=None,
+ custom_model_dir=None,
+ )
+
+ #
+ # Test files
+ #
+
+ # Ensure the number of files is as expected
+ num_files = sum(1 for entry in Path(temp_dir).iterdir() if entry.is_file())
+ assert num_files == 19
+
+ # Verify their names
+ files = {
+ '1-AIM.json',
+ '1-AIM_ap.json',
+ 'CMP_QNT.csv',
+ 'CMP_sample.json',
+ 'DEM_sample.json',
+ 'DL_summary.csv',
+ 'DL_summary.json',
+ 'DL_summary_stats.csv',
+ 'DL_summary_stats.json',
+ 'DMG_grp.json',
+ 'DMG_grp_stats.json',
+ 'DV_repair_agg.json',
+ 'DV_repair_agg_stats.json',
+ 'DV_repair_grp.json',
+ 'DV_repair_sample.json',
+ 'DV_repair_stats.json',
+ 'pelicun_log.txt',
+ 'pelicun_log_warnings.txt',
+ 'response.csv',
+ }
+
+ for file in files:
+ assert Path(f'{temp_dir}/{file}').is_file()
+
+ #
+ # Check the values in the output files: TODO
+ #
diff --git a/pelicun/tests/dl_calculation/e7/1-AIM.json b/pelicun/tests/dl_calculation/e7/1-AIM.json
new file mode 100644
index 000000000..442b3a025
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e7/1-AIM.json
@@ -0,0 +1,194 @@
+{
+ "RandomVariables": [],
+ "GeneralInformation": {
+ "AIM_id": "1",
+ "location": {
+ "latitude": 39.41770296,
+ "longitude": -74.50360821
+ },
+ "Latitude": 39.41770296,
+ "Longitude": -74.50360821,
+ "BldgID": "NJBF000081138",
+ "Address": "14 W LEE AVE",
+ "City": "Absecon",
+ "State": "NJ",
+ "OccupancyClass": "RES1",
+ "BuildingType": 3001,
+ "UseCode": NaN,
+ "BldgClass": 17.0,
+ "DesignLevel": "NE",
+ "YearBuiltNJDEP": 1956,
+ "YearBuiltMODIV": 1956,
+ "NumberofStories0": 3101,
+ "NumberOfStories": 2,
+ "NoUnits": 1,
+ "PlanArea0": 1310.670302,
+ "PlanArea": 1310.670302,
+ "FoundationType": 3505,
+ "SplitLevel": "NO",
+ "ElevationR0": 19.64,
+ "ElevationR1": 33.89,
+ "FirstFloorHt0": 3,
+ "FirstFloorHt1": -2.97,
+ "FloodZone": 6112,
+ "DSWI": 112.44902,
+ "DSWII": 122.279935,
+ "DSWIII": 131.916872,
+ "DSWIV": 138.112949,
+ "WindZone": "I",
+ "AvgJanTemp": "Above",
+ "RoofShape": "Gable",
+ "RoofSlope": 0,
+ "RoofCover": 5701.0,
+ "RoofSystem": "Wood",
+ "MeanRoofHt": 26.76835396,
+ "WindowArea": 0,
+ "Garage": 0.0,
+ "HazusClassW": NaN,
+ "HazusClassIN": NaN,
+ "HazusClassWA": NaN,
+ "AnalysisDefault": 1,
+ "AnalysisAdopted": 1,
+ "Modifications": NaN,
+ "z0": 0.35,
+ "structureType": "nav",
+ "replacementCost": 1,
+ "Footprint": "{\"type\":\"Feature\",\"geometry\":{\"type\":\"Polygon\",\"coordinates\":[[[-74.503572,39.417656],[-74.503550,39.417744],[-74.503703,39.417767],[-74.503724,39.417679],[-74.503572,39.417656]]]},\"properties\":{}}",
+ "units": {
+ "force": "kips",
+ "length": "ft",
+ "time": "sec"
+ }
+ },
+ "DefaultValues": {
+ "driverFile": "driver",
+ "edpFiles": [
+ "EDP.json"
+ ],
+ "filenameDL": "BIM.json",
+ "filenameEDP": "EDP.json",
+ "filenameEVENT": "EVENT.json",
+ "filenameSAM": "SAM.json",
+ "filenameSIM": "SIM.json",
+ "rvFiles": [
+ "SAM.json",
+ "EVENT.json",
+ "SIM.json"
+ ],
+ "workflowInput": "scInput.json",
+ "workflowOutput": "EDP.json"
+ },
+ "commonFileDir": "/Users/adamzs/Documents/R2D/LocalWorkDir/tmp.SimCenter/input_data",
+ "remoteAppDir": "/Users/adamzs/SimCenter",
+ "localAppDir": "/Users/adamzs/SimCenter",
+ "units": {
+ "force": "kips",
+ "length": "ft",
+ "time": "sec"
+ },
+ "outputs": {
+ "AIM": false,
+ "DM": true,
+ "DV": true,
+ "EDP": true,
+ "IM": false
+ },
+ "RegionalEvent": {
+ "eventFile": "EventGrid.csv",
+ "eventFilePath": "/Users/adamzs/Examples/R2D/00_Built_ins/E7HurricaneWindWater/input_data/IMs",
+ "units": {
+ "PIH": "ft",
+ "PWS": "mph"
+ }
+ },
+ "assetType": "Buildings",
+ "Applications": {
+ "Events": [
+ {
+ "Application": "SimCenterEvent",
+ "ApplicationData": {},
+ "EventClassification": "Earthquake"
+ }
+ ],
+ "Modeling": {
+ "Application": "None",
+ "ApplicationData": {}
+ },
+ "Simulation": {
+ "Application": "IMasEDP",
+ "ApplicationData": {}
+ },
+ "UQ": {
+ "Application": "None",
+ "ApplicationData": {}
+ },
+ "DL": {
+ "Application": "Pelicun3",
+ "ApplicationData": {
+ "DL_Method": "HAZUS MH HU",
+ "Realizations": 500,
+ "auto_script": "auto_HU_NJ.py",
+ "coupled_EDP": true,
+ "detailed_results": false,
+ "ground_failure": false,
+ "log_file": true,
+ "path_to_auto_script": "/Users/adamzs/Examples/R2D/00_Built_ins/E7HurricaneWindWater/input_data/auto_pop",
+ "regional": "true"
+ }
+ }
+ },
+ "Modeling": {},
+ "Simulation": {
+ "type": "IMasEDP"
+ },
+ "UQ": {},
+ "DL": {},
+ "Events": [
+ {
+ "EventFolderPath": "/Users/adamzs/Documents/R2D/LocalWorkDir/tmp.SimCenter/input_data",
+ "Events": [
+ [
+ "10250.csvx0x00000",
+ 1.0
+ ],
+ [
+ "10300.csvx0x00001",
+ 1.0
+ ],
+ [
+ "10350.csvx0x00002",
+ 1.0
+ ],
+ [
+ "10350.csvx0x00003",
+ 1.0
+ ],
+ [
+ "10350.csvx0x00004",
+ 1.0
+ ],
+ [
+ "10350.csvx0x00005",
+ 1.0
+ ],
+ [
+ "10250.csvx0x00006",
+ 1.0
+ ],
+ [
+ "10250.csvx0x00007",
+ 1.0
+ ],
+ [
+ "10350.csvx0x00008",
+ 1.0
+ ],
+ [
+ "10350.csvx0x00009",
+ 1.0
+ ]
+ ],
+ "type": "intensityMeasure"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/pelicun/tests/dl_calculation/e7/__init__.py b/pelicun/tests/dl_calculation/e7/__init__.py
new file mode 100644
index 000000000..72c332008
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e7/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
diff --git a/pelicun/tests/dl_calculation/e7/auto_HU_NJ.py b/pelicun/tests/dl_calculation/e7/auto_HU_NJ.py
new file mode 100644
index 000000000..72004d445
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e7/auto_HU_NJ.py
@@ -0,0 +1,178 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+# Frank McKenna
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import pandas as pd
+
+from pelicun.tests.dl_calculation.rulesets.WindMetaVarRulesets import parse_BIM
+from pelicun.tests.dl_calculation.rulesets.BuildingClassRulesets import (
+ building_class,
+)
+from pelicun.tests.dl_calculation.rulesets.FloodAssmRulesets import Assm_config
+from pelicun.tests.dl_calculation.rulesets.FloodClassRulesets import FL_config
+from pelicun.tests.dl_calculation.rulesets.WindCECBRulesets import CECB_config
+from pelicun.tests.dl_calculation.rulesets.WindCERBRulesets import CERB_config
+from pelicun.tests.dl_calculation.rulesets.WindMECBRulesets import MECB_config
+from pelicun.tests.dl_calculation.rulesets.WindMERBRulesets import MERB_config
+from pelicun.tests.dl_calculation.rulesets.WindMHRulesets import MH_config
+from pelicun.tests.dl_calculation.rulesets.WindMLRIRulesets import MLRI_config
+from pelicun.tests.dl_calculation.rulesets.WindMLRMRulesets import MLRM_config
+from pelicun.tests.dl_calculation.rulesets.WindMMUHRulesets import MMUH_config
+from pelicun.tests.dl_calculation.rulesets.WindMSFRulesets import MSF_config
+from pelicun.tests.dl_calculation.rulesets.WindSECBRulesets import SECB_config
+from pelicun.tests.dl_calculation.rulesets.WindSERBRulesets import SERB_config
+from pelicun.tests.dl_calculation.rulesets.WindSPMBRulesets import SPMB_config
+from pelicun.tests.dl_calculation.rulesets.WindWMUHRulesets import WMUH_config
+from pelicun.tests.dl_calculation.rulesets.WindWSFRulesets import WSF_config
+
+
+def auto_populate(aim):
+ """
+ Populates the DL model for hurricane assessments in Atlantic County, NJ
+
+ Assumptions:
+ - Everything relevant to auto-population is provided in the Buiding
+ Information Model (AIM).
+ - The information expected in the AIM file is described in the parse_AIM
+ method.
+
+ Parameters
+ ----------
+ aim: dictionary
+ Contains the information that is available about the asset and will be
+ used to auto-popualate the damage and loss model.
+
+ Returns
+ -------
+ GI_ap: dictionary
+ Containes the extended AIM data.
+ DL_ap: dictionary
+ Contains the auto-populated loss model.
+ """
+
+ # extract the General Information
+ GI = aim.get('GeneralInformation', None)
+
+ # parse the GI data
+ GI_ap = parse_BIM(GI, location='NJ', hazards=['wind', 'inundation'])
+
+ # identify the building class
+ bldg_class = building_class(GI_ap, hazard='wind')
+
+ # prepare the building configuration string
+ if bldg_class == 'WSF':
+ bldg_config = WSF_config(GI_ap)
+ elif bldg_class == 'WMUH':
+ bldg_config = WMUH_config(GI_ap)
+ elif bldg_class == 'MSF':
+ bldg_config = MSF_config(GI_ap)
+ elif bldg_class == 'MMUH':
+ bldg_config = MMUH_config(GI_ap)
+ elif bldg_class == 'MLRM':
+ bldg_config = MLRM_config(GI_ap)
+ elif bldg_class == 'MLRI':
+ bldg_config = MLRI_config(GI_ap)
+ elif bldg_class == 'MERB':
+ bldg_config = MERB_config(GI_ap)
+ elif bldg_class == 'MECB':
+ bldg_config = MECB_config(GI_ap)
+ elif bldg_class == 'CECB':
+ bldg_config = CECB_config(GI_ap)
+ elif bldg_class == 'CERB':
+ bldg_config = CERB_config(GI_ap)
+ elif bldg_class == 'SPMB':
+ bldg_config = SPMB_config(GI_ap)
+ elif bldg_class == 'SECB':
+ bldg_config = SECB_config(GI_ap)
+ elif bldg_class == 'SERB':
+ bldg_config = SERB_config(GI_ap)
+ elif bldg_class == 'MH':
+ bldg_config = MH_config(GI_ap)
+ else:
+ raise ValueError(
+ f'Building class {bldg_class} not recognized by the '
+ f'auto-population routine.'
+ )
+
+ # prepare the flood rulesets
+ fld_config = FL_config(GI_ap)
+
+ # prepare the assembly loss compositions
+ hu_assm, fl_assm = Assm_config(GI_ap)
+
+ # prepare the component assignment
+ CMP = pd.DataFrame(
+ {
+ f'{bldg_config}': ['ea', 1, 1, 1, 'N/A'],
+ f'{fld_config}': ['ea', 1, 1, 1, 'N/A'],
+ },
+ index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'],
+ ).T
+
+ DL_ap = {
+ 'Asset': {
+ 'ComponentAssignmentFile': 'CMP_QNT.csv',
+ 'ComponentDatabase': 'Hazus Hurricane',
+ 'NumberOfStories': f"{GI_ap['NumberOfStories']}",
+ 'OccupancyType': f"{GI_ap['OccupancyClass']}",
+ 'PlanArea': f"{GI_ap['PlanArea']}",
+ },
+ 'Damage': {'DamageProcess': 'Hazus Hurricane'},
+ 'Demands': {},
+ 'Losses': {
+ 'BldgRepair': {
+ 'ConsequenceDatabase': 'Hazus Hurricane',
+ 'MapApproach': 'Automatic',
+ 'DecisionVariables': {
+ 'Cost': True,
+ 'Carbon': False,
+ 'Energy': False,
+ 'Time': False,
+ },
+ }
+ },
+ }
+
+ return GI_ap, DL_ap, CMP
diff --git a/pelicun/tests/dl_calculation/e7/response.csv b/pelicun/tests/dl_calculation/e7/response.csv
new file mode 100644
index 000000000..cdeb2123a
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e7/response.csv
@@ -0,0 +1,11 @@
+,1-PWS-1-1, 1-PIH-1-1
+0.000000000000000000e+00,2.690600000000000023e+02,1.005799999999999983e+01
+1.000000000000000000e+00,2.682386666666666883e+02,1.005799999999999983e+01
+2.000000000000000000e+00,2.674320000000000164e+02,1.005799999999999983e+01
+3.000000000000000000e+00,2.674320000000000164e+02,1.005799999999999983e+01
+4.000000000000000000e+00,2.674320000000000164e+02,1.005799999999999983e+01
+5.000000000000000000e+00,2.674320000000000164e+02,1.005799999999999983e+01
+6.000000000000000000e+00,2.690600000000000023e+02,1.005799999999999983e+01
+7.000000000000000000e+00,2.690600000000000023e+02,1.005799999999999983e+01
+8.000000000000000000e+00,2.674320000000000164e+02,1.005799999999999983e+01
+9.000000000000000000e+00,2.674320000000000164e+02,1.005799999999999983e+01
diff --git a/pelicun/tests/dl_calculation/e7/test_e7.py b/pelicun/tests/dl_calculation/e7/test_e7.py
new file mode 100644
index 000000000..9f16c2697
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e7/test_e7.py
@@ -0,0 +1,151 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+
+"""DL Calculation Example 7."""
+
+from __future__ import annotations
+
+import os
+import shutil
+import tempfile
+from pathlib import Path
+from typing import Generator
+
+import pytest
+
+# import pandas as pd
+from pelicun.pelicun_warnings import PelicunWarning
+from pelicun.tools.DL_calculation import run_pelicun
+
+
+@pytest.fixture
+def obtain_temp_dir() -> Generator:
+ # get the path of this file
+ this_file = __file__
+
+ initial_dir = Path.cwd()
+ this_dir = str(Path(this_file).parent)
+
+ temp_dir = tempfile.mkdtemp()
+
+ yield this_dir, temp_dir
+
+ # go back to the right directory, otherwise any tests that follow
+ # could have issues.
+ os.chdir(initial_dir)
+
+
+def test_dl_calculation_7(obtain_temp_dir: tuple[str, str]) -> None:
+ this_dir, temp_dir = obtain_temp_dir
+
+ # Copy all input files to a temporary directory.
+ # All outputs will also go there.
+ # This approach is more robust to changes in the output files over
+ # time.
+
+ ruleset_files = [
+ path.resolve()
+ for path in Path('pelicun/tests/dl_calculation/rulesets').glob(
+ '*Rulesets.py'
+ )
+ ]
+
+ os.chdir(this_dir)
+ temp_dir = tempfile.mkdtemp()
+ # copy input files
+ for file_name in ('1-AIM.json', 'response.csv', 'auto_HU_NJ.py'):
+ shutil.copy(f'{this_dir}/{file_name}', f'{temp_dir}/{file_name}')
+
+ # copy ruleset files
+ for file_path in ruleset_files:
+ shutil.copy(str(file_path), f'{temp_dir}/{file_path.name}')
+
+ # change directory to there
+ os.chdir(temp_dir)
+
+ # run
+ run_pelicun(
+ demand_file='response.csv',
+ config_path='1-AIM.json',
+ output_path=None,
+ coupled_edp=True,
+ realizations=100,
+ auto_script_path='auto_HU_NJ.py',
+ detailed_results=False,
+ output_format=None,
+ custom_model_dir=None,
+ )
+
+ # now remove the ruleset files and auto script
+ for file_path in ruleset_files:
+ Path(f'{temp_dir}/{file_path.name}').unlink()
+ Path('auto_HU_NJ.py').unlink()
+
+ #
+ # Test files
+ #
+
+ # Ensure the number of files is as expected
+ num_files = sum(1 for entry in Path(temp_dir).iterdir() if entry.is_file())
+ assert num_files == 19
+
+ # Verify their names
+ files = {
+ '1-AIM.json',
+ '1-AIM_ap.json',
+ 'CMP_QNT.csv',
+ 'CMP_sample.json',
+ 'DEM_sample.json',
+ 'DL_summary.csv',
+ 'DL_summary.json',
+ 'DL_summary_stats.csv',
+ 'DL_summary_stats.json',
+ 'DMG_grp.json',
+ 'DMG_grp_stats.json',
+ 'DV_repair_agg.json',
+ 'DV_repair_agg_stats.json',
+ 'DV_repair_grp.json',
+ 'DV_repair_sample.json',
+ 'DV_repair_stats.json',
+ 'pelicun_log.txt',
+ 'pelicun_log_warnings.txt',
+ 'response.csv',
+ }
+
+ for file in files:
+ assert Path(f'{temp_dir}/{file}').is_file()
+
+ #
+ # Check the values in the output files: TODO
+ #
diff --git a/pelicun/tests/dl_calculation/e8/1-AIM.json b/pelicun/tests/dl_calculation/e8/1-AIM.json
new file mode 100644
index 000000000..203e14395
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e8/1-AIM.json
@@ -0,0 +1,130 @@
+{
+ "RandomVariables": [],
+ "GeneralInformation": {
+ "AIM_id": "1",
+ "location": {
+ "latitude": 30.2696026,
+ "longitude": -93.18640624
+ },
+ "RoofShape": "Hip",
+ "PlanArea": 744.6,
+ "Longitude": -93.18640624,
+ "Latitude": 30.2696026,
+ "LULC": 43,
+ "DWSII": 128.7,
+ "BuildingType": "Wood",
+ "OccupancyClass": "RES3",
+ "AvgJanTemp": "Above",
+ "Garage": 0,
+ "NumberOfStories": 1,
+ "MeanRoofHt": 15.0,
+ "RoofSlope": 0.25,
+ "YearBuilt": 1962,
+ "Footprint": "{\"geometry\": {\"coordinates\": [[[-93.186341, 30.269573], [-93.18626, 30.269613], [-93.186367, 30.26977], [-93.186542, 30.269681], [-93.186471, 30.269576], [-93.186496, 30.269444], [-93.186369, 30.269425], [-93.186341, 30.269573]]], \"type\": \"Polygon\"}, \"properties\": {}, \"type\": \"Feature\"}",
+ "units": {
+ "force": "kips",
+ "length": "ft",
+ "time": "sec"
+ }
+ },
+ "DefaultValues": {
+ "driverFile": "driver",
+ "edpFiles": [
+ "EDP.json"
+ ],
+ "filenameDL": "BIM.json",
+ "filenameEDP": "EDP.json",
+ "filenameEVENT": "EVENT.json",
+ "filenameSAM": "SAM.json",
+ "filenameSIM": "SIM.json",
+ "rvFiles": [
+ "SAM.json",
+ "EVENT.json",
+ "SIM.json"
+ ],
+ "workflowInput": "scInput.json",
+ "workflowOutput": "EDP.json"
+ },
+ "commonFileDir": "/Users/adamzs/Documents/R2D/LocalWorkDir/tmp.SimCenter/input_data",
+ "remoteAppDir": "/Users/adamzs/SimCenter",
+ "localAppDir": "/Users/adamzs/SimCenter",
+ "units": {
+ "force": "kips",
+ "length": "ft",
+ "time": "sec"
+ },
+ "outputs": {
+ "AIM": false,
+ "DM": true,
+ "DV": true,
+ "EDP": true,
+ "IM": false
+ },
+ "RegionalEvent": {
+ "eventFile": "EventGrid.csv",
+ "eventFilePath": "/Users/adamzs/Documents/R2D/LocalWorkDir/tmp.SimCenter/input_data",
+ "intensityLabels": [
+ "PWS"
+ ],
+ "intensityMeasures": [
+ "PWS"
+ ],
+ "units": {
+ "PWS": "mph"
+ }
+ },
+ "assetType": "Buildings",
+ "Applications": {
+ "Events": [
+ {
+ "Application": "SimCenterEvent",
+ "ApplicationData": {},
+ "EventClassification": "Earthquake"
+ }
+ ],
+ "Modeling": {
+ "Application": "None",
+ "ApplicationData": {}
+ },
+ "Simulation": {
+ "Application": "IMasEDP",
+ "ApplicationData": {}
+ },
+ "UQ": {
+ "Application": "None",
+ "ApplicationData": {}
+ },
+ "DL": {
+ "Application": "Pelicun3",
+ "ApplicationData": {
+ "DL_Method": "HAZUS MH HU",
+ "Realizations": 500,
+ "auto_script": "auto_HU_LA.py",
+ "coupled_EDP": true,
+ "detailed_results": false,
+ "ground_failure": false,
+ "log_file": true,
+ "path_to_auto_script": "/Users/adamzs/Examples/R2D/00_Built_ins/E8HurricaneWind/input_data/ruleset",
+ "regional": "true"
+ }
+ }
+ },
+ "Modeling": {},
+ "Simulation": {
+ "type": "IMasEDP"
+ },
+ "UQ": {},
+ "DL": {},
+ "Events": [
+ {
+ "EventFolderPath": "/Users/adamzs/Documents/R2D/LocalWorkDir/tmp.SimCenter/input_data",
+ "Events": [
+ [
+ "Site_0.csvx0x00000",
+ 1.0
+ ]
+ ],
+ "type": "intensityMeasure"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/pelicun/tests/dl_calculation/e8/__init__.py b/pelicun/tests/dl_calculation/e8/__init__.py
new file mode 100644
index 000000000..72c332008
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e8/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
diff --git a/pelicun/tests/dl_calculation/e8/auto_HU_LA.py b/pelicun/tests/dl_calculation/e8/auto_HU_LA.py
new file mode 100644
index 000000000..7402d4c7e
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e8/auto_HU_LA.py
@@ -0,0 +1,144 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+# Frank McKenna
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import pandas as pd
+
+from pelicun.tests.dl_calculation.rulesets.MetaVarRulesets import parse_BIM
+from pelicun.tests.dl_calculation.rulesets.BldgClassRulesets import building_class
+from pelicun.tests.dl_calculation.rulesets.WindWSFRulesets import WSF_config
+from pelicun.tests.dl_calculation.rulesets.WindWMUHRulesets import WMUH_config
+
+
+def auto_populate(aim):
+ """
+ Populates the DL model for hurricane assessments in Atlantic County, NJ
+
+ Assumptions:
+ - Everything relevant to auto-population is provided in the Buiding
+ Information Model (AIM).
+ - The information expected in the AIM file is described in the parse_GI
+ method.
+
+ Parameters
+ ----------
+ aim: dictionary
+ Contains the information that is available about the asset and will be
+ used to auto-popualate the damage and loss model.
+
+ Returns
+ -------
+ GI_ap: dictionary
+ Containes the extended BIM data.
+ DL_ap: dictionary
+ Contains the auto-populated loss model.
+ """
+
+ # extract the General Information
+ GI = aim.get('GeneralInformation', None)
+
+ # parse the GI data
+ GI_ap = parse_BIM(
+ GI,
+ location='LA',
+ hazards=[
+ 'wind',
+ ],
+ )
+
+ # identify the building class
+ bldg_class = building_class(GI_ap, hazard='wind')
+ GI_ap.update({'HazusClassW': bldg_class})
+
+ # prepare the building configuration string
+ if bldg_class == 'WSF':
+ bldg_config = WSF_config(GI_ap)
+ elif bldg_class == 'WMUH':
+ bldg_config = WMUH_config(GI_ap)
+ else:
+ raise ValueError(
+ f'Building class {bldg_class} not recognized by the '
+ f'auto-population routine.'
+ )
+
+ # drop keys of internal variables from GI_ap dict
+ internal_vars = ['V_ult', 'V_asd']
+ for var in internal_vars:
+ try:
+ GI_ap.pop(var)
+ except KeyError:
+ pass
+
+ # prepare the component assignment
+ CMP = pd.DataFrame(
+ {f'{bldg_config}': ['ea', 1, 1, 1, 'N/A']},
+ index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'],
+ ).T
+
+ DL_ap = {
+ 'Asset': {
+ 'ComponentAssignmentFile': 'CMP_QNT.csv',
+ 'ComponentDatabase': 'Hazus Hurricane',
+ 'NumberOfStories': f"{GI_ap['NumberOfStories']}",
+ 'OccupancyType': f"{GI_ap['OccupancyClass']}",
+ 'PlanArea': f"{GI_ap['PlanArea']}",
+ },
+ 'Damage': {'DamageProcess': 'Hazus Hurricane'},
+ 'Demands': {},
+ 'Losses': {
+ 'BldgRepair': {
+ 'ConsequenceDatabase': 'Hazus Hurricane',
+ 'MapApproach': 'Automatic',
+ 'DecisionVariables': {
+ 'Cost': True,
+ 'Carbon': False,
+ 'Energy': False,
+ 'Time': False,
+ },
+ }
+ },
+ }
+
+ return GI_ap, DL_ap, CMP
diff --git a/pelicun/tests/dl_calculation/e8/response.csv b/pelicun/tests/dl_calculation/e8/response.csv
new file mode 100644
index 000000000..d7ea6d3e5
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e8/response.csv
@@ -0,0 +1,2 @@
+,1-PWS-1-1
+0.000000000000000000e+00,1.941852000000000089e+02
diff --git a/pelicun/tests/dl_calculation/e8/test_e8.py b/pelicun/tests/dl_calculation/e8/test_e8.py
new file mode 100644
index 000000000..040f13c4d
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e8/test_e8.py
@@ -0,0 +1,150 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+
+"""DL Calculation Example 8."""
+
+from __future__ import annotations
+
+import os
+import shutil
+import tempfile
+from pathlib import Path
+from typing import Generator
+
+import pytest
+
+from pelicun.pelicun_warnings import PelicunWarning
+from pelicun.tools.DL_calculation import run_pelicun
+
+
+@pytest.fixture
+def obtain_temp_dir() -> Generator:
+ # get the path of this file
+ this_file = __file__
+
+ initial_dir = Path.cwd()
+ this_dir = str(Path(this_file).parent)
+
+ temp_dir = tempfile.mkdtemp()
+
+ yield this_dir, temp_dir
+
+ # go back to the right directory, otherwise any tests that follow
+ # could have issues.
+ os.chdir(initial_dir)
+
+
+def test_dl_calculation_8(obtain_temp_dir: tuple[str, str]) -> None:
+ this_dir, temp_dir = obtain_temp_dir
+
+ # Copy all input files to a temporary directory.
+ # All outputs will also go there.
+ # This approach is more robust to changes in the output files over
+ # time.
+
+ ruleset_files = [
+ path.resolve()
+ for path in Path('pelicun/tests/dl_calculation/rulesets').glob(
+ '*Rulesets.py'
+ )
+ ]
+
+ os.chdir(this_dir)
+ temp_dir = tempfile.mkdtemp()
+ # copy input files
+ for file_name in ('1-AIM.json', 'response.csv', 'auto_HU_LA.py'):
+ shutil.copy(f'{this_dir}/{file_name}', f'{temp_dir}/{file_name}')
+
+ # copy ruleset files
+ for file_path in ruleset_files:
+ shutil.copy(str(file_path), f'{temp_dir}/{file_path.name}')
+
+ # change directory to there
+ os.chdir(temp_dir)
+
+ # run
+ run_pelicun(
+ demand_file='response.csv',
+ config_path='1-AIM.json',
+ output_path=None,
+ coupled_edp=True,
+ realizations=100,
+ auto_script_path='auto_HU_LA.py',
+ detailed_results=False,
+ output_format=None,
+ custom_model_dir=None,
+ )
+
+ # now remove the ruleset files and auto script
+ for file_path in ruleset_files:
+ Path(f'{temp_dir}/{file_path.name}').unlink()
+ Path('auto_HU_LA.py').unlink()
+
+ #
+ # Test files
+ #
+
+ # Ensure the number of files is as expected
+ num_files = sum(1 for entry in Path(temp_dir).iterdir() if entry.is_file())
+ assert num_files == 19
+
+ # Verify their names
+ files = {
+ '1-AIM.json',
+ '1-AIM_ap.json',
+ 'CMP_QNT.csv',
+ 'CMP_sample.json',
+ 'DEM_sample.json',
+ 'DL_summary.csv',
+ 'DL_summary.json',
+ 'DL_summary_stats.csv',
+ 'DL_summary_stats.json',
+ 'DMG_grp.json',
+ 'DMG_grp_stats.json',
+ 'DV_repair_agg.json',
+ 'DV_repair_agg_stats.json',
+ 'DV_repair_grp.json',
+ 'DV_repair_sample.json',
+ 'DV_repair_stats.json',
+ 'pelicun_log.txt',
+ 'pelicun_log_warnings.txt',
+ 'response.csv',
+ }
+
+ for file in files:
+ assert Path(f'{temp_dir}/{file}').is_file()
+
+ #
+ # Check the values in result files: TODO
+ #
diff --git a/pelicun/tests/dl_calculation/e9/3500-AIM.json b/pelicun/tests/dl_calculation/e9/3500-AIM.json
new file mode 100644
index 000000000..cb9f9bbc0
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e9/3500-AIM.json
@@ -0,0 +1,149 @@
+{
+ "RandomVariables": [],
+ "GeneralInformation": {
+ "AIM_id": "3500",
+ "location": {
+ "latitude": 45.99862463501656,
+ "longitude": -123.92472997373451
+ },
+ "Longitude": -123.92472997373451,
+ "Latitude": 45.99862463501656,
+ "AREA": 5000.03963,
+ "PERIMETER": 300.00066,
+ "TAX_": 22449,
+ "TAX_ID": 22449,
+ "X_COORD": 7331926.547,
+ "Y_COORD": 868660.96,
+ "GIS_ACRES": 0.1,
+ "AV": 135257.0,
+ "AV_IMPROVE": 72644.0,
+ "AV_LAND": 62613.0,
+ "PROPERTY_C": 101,
+ "RMV": 234509.0,
+ "RMV_IMPROV": 79865.0,
+ "RMV_LAND": 154644.0,
+ "STAT_CLASS": 120,
+ "MA": 3,
+ "NH": "G",
+ "TAXMAPNUM": "6_10_16DD",
+ "X_UTM": 428394.34799099993,
+ "Y_UTM": 5094310.35306,
+ "Type": 0,
+ "Zone": 1,
+ "bldg_typ": 1.0,
+ "num_floors": 1,
+ "data_sourc": 1,
+ "NumberOfStories": 1,
+ "year_built": 1959,
+ "struct_typ": "W1",
+ "dgn_lvl": "Pre - Code",
+ "guid": "ef751380-811b-428b-b8c0-21353ad518c6",
+ "units": {
+ "force": "N",
+ "length": "m",
+ "time": "sec"
+ }
+ },
+ "DefaultValues": {
+ "driverFile": "driver",
+ "edpFiles": [
+ "EDP.json"
+ ],
+ "filenameDL": "BIM.json",
+ "filenameEDP": "EDP.json",
+ "filenameEVENT": "EVENT.json",
+ "filenameSAM": "SAM.json",
+ "filenameSIM": "SIM.json",
+ "rvFiles": [
+ "SAM.json",
+ "EVENT.json",
+ "SIM.json"
+ ],
+ "workflowInput": "scInput.json",
+ "workflowOutput": "EDP.json"
+ },
+ "commonFileDir": "/Users/adamzs/Documents/R2D/LocalWorkDir/tmp.SimCenter/input_data",
+ "remoteAppDir": "/Users/adamzs/SimCenter",
+ "localAppDir": "/Users/adamzs/SimCenter",
+ "units": {
+ "force": "N",
+ "length": "m",
+ "time": "sec"
+ },
+ "outputs": {
+ "AIM": false,
+ "DM": true,
+ "DV": true,
+ "EDP": true,
+ "IM": false
+ },
+ "RegionalEvent": {
+ "eventFile": "EventGrid.csv",
+ "eventFilePath": "/Users/adamzs/Documents/R2D/LocalWorkDir/tmp.SimCenter/input_data",
+ "intensityLabels": [
+ "PIH"
+ ],
+ "intensityMeasures": [
+ "PIH"
+ ],
+ "units": {
+ "PIH": "m"
+ }
+ },
+ "assetType": "Buildings",
+ "Applications": {
+ "Events": [
+ {
+ "Application": "SimCenterEvent",
+ "ApplicationData": {},
+ "EventClassification": "Earthquake"
+ }
+ ],
+ "Modeling": {
+ "Application": "None",
+ "ApplicationData": {}
+ },
+ "Simulation": {
+ "Application": "IMasEDP",
+ "ApplicationData": {}
+ },
+ "UQ": {
+ "Application": "None",
+ "ApplicationData": {}
+ },
+ "DL": {
+ "Application": "Pelicun3",
+ "ApplicationData": {
+ "DL_Method": "User-provided Models",
+ "Realizations": 500,
+ "auto_script": "custom_pop.py",
+ "coupled_EDP": true,
+ "custom_model_dir": "CustomDLModels",
+ "detailed_results": false,
+ "ground_failure": false,
+ "log_file": true,
+ "path_to_auto_script": "/Users/adamzs/Examples/R2D/00_Built_ins/E9Tsunami/input_data/ruleset",
+ "path_to_custom_model_dir": "/Users/adamzs/Examples/R2D/00_Built_ins/E9Tsunami/input_data/DL_Models",
+ "regional": "true"
+ }
+ }
+ },
+ "Modeling": {},
+ "Simulation": {
+ "type": "IMasEDP"
+ },
+ "UQ": {},
+ "DL": {},
+ "Events": [
+ {
+ "EventFolderPath": "/Users/adamzs/Documents/R2D/LocalWorkDir/tmp.SimCenter/input_data",
+ "Events": [
+ [
+ "Site_0.csvx0x00000",
+ 1.0
+ ]
+ ],
+ "type": "intensityMeasure"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/pelicun/tests/dl_calculation/e9/CustomDLModels/damage_Tsunami.csv b/pelicun/tests/dl_calculation/e9/CustomDLModels/damage_Tsunami.csv
new file mode 100644
index 000000000..22689736c
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e9/CustomDLModels/damage_Tsunami.csv
@@ -0,0 +1,4 @@
+ID,Incomplete,Demand-Type,Demand-Unit,Demand-Offset,Demand-Directional,LS1-Family,LS1-Theta_0,LS1-Theta_1,LS2-Family,LS2-Theta_0,LS2-Theta_1,LS3-Family,LS3-Theta_0,LS3-Theta_1
+building.1,0,Peak Inundation Height,m,0,0,lognormal,0.159,0.8196,lognormal,0.8881,0.8391,lognormal,1.6578,0.8948
+building.2,0,Peak Inundation Height,m,0,0,lognormal,0.1979,0.745,lognormal,0.925,0.692,lognormal,1.7814,0.7196
+building.3andAbove,0,Peak Inundation Height,m,0,0,lognormal,0.1489,0.66,lognormal,1.1408,0.7981,lognormal,2.3491,0.7898
\ No newline at end of file
diff --git a/pelicun/tests/dl_calculation/e9/CustomDLModels/damage_Tsunami.json b/pelicun/tests/dl_calculation/e9/CustomDLModels/damage_Tsunami.json
new file mode 100644
index 000000000..58cca7e12
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e9/CustomDLModels/damage_Tsunami.json
@@ -0,0 +1,76 @@
+{
+ "_GeneralInformation": {
+ "ShortName": "Tsunami Damage Models from Suppasri et al. 2018",
+ "Description": "The models in this dataset are based on Suppasri et al. 2018",
+ "Version": "1.0"
+ },
+ "building.1": {
+ "Description": "1-story building",
+ "Comments": "INCORE reference id: 5bbbafcdec2309046c2745ca",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Major Damage"
+ }
+ },
+ "LS2": {
+ "DS2": {
+ "Description": "Complete Damage"
+ }
+ },
+ "LS3": {
+ "DS3": {
+ "Description": "Collapse"
+ }
+ }
+ }
+ },
+ "building.2": {
+ "Description": "2-story building",
+ "Comments": "INCORE reference id: 5bbbb077ec2309046c2745cc",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Major Damage"
+ }
+ },
+ "LS2": {
+ "DS2": {
+ "Description": "Complete Damage"
+ }
+ },
+ "LS3": {
+ "DS3": {
+ "Description": "Collapse"
+ }
+ }
+ }
+ },
+ "building.3andAbove": {
+ "Description": "Building with 3 or more stories",
+ "Comments": "INCORE reference id: 5bbbb021ec2309046c2745cb",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "LimitStates": {
+ "LS1": {
+ "DS1": {
+ "Description": "Major Damage"
+ }
+ },
+ "LS2": {
+ "DS2": {
+ "Description": "Complete Damage"
+ }
+ },
+ "LS3": {
+ "DS3": {
+ "Description": "Collapse"
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/pelicun/tests/dl_calculation/e9/CustomDLModels/loss_map.csv b/pelicun/tests/dl_calculation/e9/CustomDLModels/loss_map.csv
new file mode 100644
index 000000000..4a585fe4f
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e9/CustomDLModels/loss_map.csv
@@ -0,0 +1,4 @@
+,Repair
+building.1,generic
+building.2,generic
+building.3andAbove,generic
diff --git a/pelicun/tests/dl_calculation/e9/CustomDLModels/loss_repair_Tsunami.csv b/pelicun/tests/dl_calculation/e9/CustomDLModels/loss_repair_Tsunami.csv
new file mode 100644
index 000000000..3a2be592d
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e9/CustomDLModels/loss_repair_Tsunami.csv
@@ -0,0 +1,2 @@
+ID,Incomplete,Quantity-Unit,DV-Unit,DS1-Theta_0,DS2-Theta_0,DS3-Theta_0
+generic-Cost,0,1 EA,loss_ratio,0.5,0.8,1
\ No newline at end of file
diff --git a/pelicun/tests/dl_calculation/e9/CustomDLModels/loss_repair_Tsunami.json b/pelicun/tests/dl_calculation/e9/CustomDLModels/loss_repair_Tsunami.json
new file mode 100644
index 000000000..5e634ff02
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e9/CustomDLModels/loss_repair_Tsunami.json
@@ -0,0 +1,61 @@
+{
+ "_GeneralInformation": {
+ "ShortName": "Tsunami Consequence Models from Suppasri et al. 2018",
+ "Description": "The models in this dataset are based on Suppasri et al. 2018",
+ "Version": "1.0",
+ "DecisionVariables": {
+ "Cost": "Repair costs are measured by loss ratios as percentage of replacement cost.",
+ }
+ },
+ "1": {
+ "Description": "1-story building",
+ "Comments": "INCORE reference id: 5bbbafcdec2309046c2745ca",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "DamageStates": {
+ "DS1": {
+ "Description": "Major Damage"
+ },
+ "DS2": {
+ "Description": "Complete Damage"
+ },
+ "DS3": {
+ "Description": "Collapse"
+ }
+ }
+ },
+ "2": {
+ "Description": "2-story building",
+ "Comments": "INCORE reference id: 5bbbb077ec2309046c2745cc",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "DamageStates": {
+ "DS1": {
+ "Description": "Major Damage"
+ },
+ "DS2": {
+ "Description": "Complete Damage"
+ },
+ "DS3": {
+ "Description": "Collapse"
+ }
+ }
+ },
+ "3andAbove": {
+ "Description": "Building with 3 or more stories",
+ "Comments": "INCORE reference id: 5bbbb021ec2309046c2745cb",
+ "SuggestedComponentBlockSize": "1 EA",
+ "RoundUpToIntegerQuantity": "True",
+ "DamageStates": {
+ "DS1": {
+ "Description": "Major Damage"
+ },
+ "DS2": {
+ "Description": "Complete Damage"
+ },
+ "DS3": {
+ "Description": "Collapse"
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/pelicun/tests/dl_calculation/e9/__init__.py b/pelicun/tests/dl_calculation/e9/__init__.py
new file mode 100644
index 000000000..72c332008
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e9/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
diff --git a/pelicun/tests/dl_calculation/e9/custom_pop.py b/pelicun/tests/dl_calculation/e9/custom_pop.py
new file mode 100644
index 000000000..519433a2f
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e9/custom_pop.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+
+# Contributors:
+# Stevan Gavrilovic
+# Adam Zsarnoczay
+# Example 9 Tsunami, Seaside
+
+import pandas as pd
+
+
+def auto_populate(aim):
+ """
+ Populates the DL model for tsunami example using custom fragility functions
+
+ Assumptions
+ -----------
+ * Everything relevant to auto-population is provided in the
+ Buiding Information Model (AIM).
+ * The information expected in the AIM file is described in the
+ parse_AIM method.
+
+ Parameters
+ ----------
+ aim: dictionary
+ Contains the information that is available about the asset and will be
+ used to auto-populate the damage and loss model.
+
+ Returns
+ -------
+ GI_ap: dictionary
+ Contains the extended AIM data.
+ DL_ap: dictionary
+ Contains the auto-populated loss model.
+ """
+
+ # parse the AIM data
+ # print(aim) # Look in the AIM.json file to see what you can access here
+
+ # extract the General Information
+ GI = aim.get('GeneralInformation', None)
+
+ # GI_ap is the 'extended AIM data - this case no extended AIM data
+ GI_ap = GI.copy()
+
+ # Get the number of Stories - note the column heading needs to be exactly
+ # 'NumberOfStories'.
+ nstories = GI_ap.get('NumberOfStories', None)
+ if nstories is None:
+ print("NumberOfStories attribute missing from AIM file.")
+ return None, None, None
+
+ # Get the fragility tag according to some building attribute; the
+ # NumberOfStories in this case. The fragility tag needs to be unique, i.e.,
+ # one tag for each fragility group. The fragility tag has to match the file
+ # name of the json file in the 'ComponentDataFolder' (without the .json
+ # suffix)
+
+ if nstories == 1:
+ fragility_function_tag = 'building.1'
+ elif nstories == 2:
+ fragility_function_tag = 'building.2'
+ elif nstories >= 3:
+ fragility_function_tag = 'building.3andAbove'
+ else:
+ print(f"Invalid number of storeys provided: {nstories}")
+
+ # prepare the component assignment
+ CMP = pd.DataFrame(
+ {f'{fragility_function_tag}': ['ea', 1, 1, 1, 'N/A']},
+ index=['Units', 'Location', 'Direction', 'Theta_0', 'Family'],
+ ).T
+
+ # Populate the DL_ap
+ DL_ap = {
+ "Asset": {
+ "ComponentAssignmentFile": "CMP_QNT.csv",
+ "ComponentDatabase": "None",
+ "ComponentDatabasePath": "CustomDLDataFolder/damage_Tsunami.csv",
+ },
+ "Damage": {"DamageProcess": "None"},
+ "Demands": {},
+ "Losses": {
+ "Repair": {
+ "ConsequenceDatabase": "None",
+ "ConsequenceDatabasePath": (
+ "CustomDLDataFolder/loss_repair_Tsunami.csv"
+ ),
+ "MapApproach": "User Defined",
+ "MapFilePath": "CustomDLDataFolder/loss_map.csv",
+ "DecisionVariables": {
+ "Cost": True,
+ "Carbon": False,
+ "Energy": False,
+ "Time": False,
+ },
+ }
+ },
+ }
+
+ return GI_ap, DL_ap, CMP
diff --git a/pelicun/tests/dl_calculation/e9/pelicun_command.txt b/pelicun/tests/dl_calculation/e9/pelicun_command.txt
new file mode 100644
index 000000000..779a00e3d
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e9/pelicun_command.txt
@@ -0,0 +1 @@
+python3.9 /DL_calculation.py --filenameDL 3500-AIM.json --demandFile response.csv --Realizations 500 --auto_script custom_pop.py --coupled_EDP True --custom_model_dir ./CustomDLModels --detailed_results False --ground_failure False --log_file True --regional true --resource_dir ./ --dirnameOutput ./
\ No newline at end of file
diff --git a/pelicun/tests/dl_calculation/e9/response.csv b/pelicun/tests/dl_calculation/e9/response.csv
new file mode 100644
index 000000000..58b99f756
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e9/response.csv
@@ -0,0 +1,2 @@
+,1-PIH-1-1
+0.000000000000000000e+00,1.699999999999999956e+00
diff --git a/pelicun/tests/dl_calculation/e9/test_e9.py b/pelicun/tests/dl_calculation/e9/test_e9.py
new file mode 100644
index 000000000..3cf5a5186
--- /dev/null
+++ b/pelicun/tests/dl_calculation/e9/test_e9.py
@@ -0,0 +1,150 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+
+"""DL Calculation Example 9."""
+
+from __future__ import annotations
+
+import os
+import shutil
+import tempfile
+from pathlib import Path
+from typing import Generator
+
+import pytest
+
+from pelicun.pelicun_warnings import PelicunWarning
+from pelicun.tools.DL_calculation import run_pelicun
+
+
+@pytest.fixture
+def obtain_temp_dir() -> Generator:
+ # get the path of this file
+ this_file = __file__
+
+ initial_dir = Path.cwd()
+ this_dir = str(Path(this_file).parent)
+
+ temp_dir = tempfile.mkdtemp()
+
+ yield this_dir, temp_dir
+
+ # go back to the right directory, otherwise any tests that follow
+ # could have issues.
+ os.chdir(initial_dir)
+
+
+def test_dl_calculation_9(obtain_temp_dir: tuple[str, str]) -> None:
+ this_dir, temp_dir = obtain_temp_dir
+
+ # Copy all input files to a temporary directory.
+ # All outputs will also go there.
+ # This approach is more robust to changes in the output files over
+ # time.
+ ruleset_files = [
+ path.resolve()
+ for path in Path('pelicun/tests/dl_calculation/rulesets').glob(
+ '*Rulesets.py'
+ )
+ ]
+
+ dl_models_dir = Path(f'{this_dir}/CustomDLModels').resolve()
+ os.chdir(this_dir)
+ temp_dir = tempfile.mkdtemp()
+ # copy input files
+ for file_name in ('3500-AIM.json', 'response.csv', 'custom_pop.py'):
+ shutil.copy(f'{this_dir}/{file_name}', f'{temp_dir}/{file_name}')
+ # copy ruleset files
+ for file_path in ruleset_files:
+ shutil.copy(str(file_path), f'{temp_dir}/{file_path.name}')
+ # copy the custom models
+ shutil.copytree(str(dl_models_dir), f'{temp_dir}/{dl_models_dir.name}')
+ # change directory to there
+ os.chdir(temp_dir)
+
+ # run
+ run_pelicun(
+ demand_file='response.csv',
+ config_path='3500-AIM.json',
+ output_path=None,
+ coupled_edp=True,
+ realizations=100,
+ auto_script_path='custom_pop.py',
+ detailed_results=False,
+ output_format=None,
+ custom_model_dir='./CustomDLModels',
+ )
+
+ # now remove the ruleset files and auto script
+ for file_path in ruleset_files:
+ Path(f'{temp_dir}/{file_path.name}').unlink()
+ Path('custom_pop.py').unlink()
+
+ #
+ # Test files
+ #
+
+ # Ensure the number of files is as expected
+ num_files = sum(1 for entry in Path(temp_dir).iterdir() if entry.is_file())
+ assert num_files == 19
+
+ # Verify their names
+ files = {
+ '3500-AIM.json',
+ '3500-AIM_ap.json',
+ 'CMP_QNT.csv',
+ 'CMP_sample.json',
+ 'DEM_sample.json',
+ 'DL_summary.csv',
+ 'DL_summary.json',
+ 'DL_summary_stats.csv',
+ 'DL_summary_stats.json',
+ 'DMG_grp.json',
+ 'DMG_grp_stats.json',
+ 'DV_repair_agg.json',
+ 'DV_repair_agg_stats.json',
+ 'DV_repair_grp.json',
+ 'DV_repair_sample.json',
+ 'DV_repair_stats.json',
+ 'pelicun_log.txt',
+ 'pelicun_log_warnings.txt',
+ 'response.csv',
+ }
+
+ for file in files:
+ assert Path(f'{temp_dir}/{file}').is_file()
+
+ #
+ # Check the values: TODO
+ #
diff --git a/pelicun/tests/dl_calculation/rulesets/BldgClassRulesets.py b/pelicun/tests/dl_calculation/rulesets/BldgClassRulesets.py
new file mode 100644
index 000000000..60432ff41
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/BldgClassRulesets.py
@@ -0,0 +1,164 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import random
+import numpy as np
+import datetime
+
+def building_class(BIM, hazard):
+ """
+ Short description
+
+ Long description
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+ hazard: str
+ Supported hazard types: "wind", "inundation"
+
+ Returns
+ -------
+ bldg_class: str
+ One of the standard building class labels from HAZUS
+ """
+
+ # check hazard
+ if hazard not in ['wind', 'inundation']:
+ print(f'WARNING: The provided hazard is not recognized: {hazard}')
+
+ if hazard == 'wind':
+ if BIM['BuildingType'] == "Wood":
+ if ((BIM['OccupancyClass'] == 'RES1') or
+ ((BIM['RoofShape'] != 'flt') and (BIM['OccupancyClass'] == ''))):
+ # OccupancyClass = RES1
+ # Wood Single-Family Homes (WSF1 or WSF2)
+ # OR roof type = flat (HAZUS can only map flat to WSF1)
+ # OR default (by '')
+ if BIM['RoofShape'] == 'flt': # checking if there is a misclassication
+ BIM['RoofShape'] = 'gab' # ensure the WSF has gab (by default, note gab is more vulneable than hip)
+ bldg_class = 'WSF'
+ else:
+ # OccupancyClass = RES3, RES5, RES6, or COM8
+ # Wood Multi-Unit Hotel (WMUH1, WMUH2, or WMUH3)
+ bldg_class = 'WMUH'
+
+ elif BIM['BuildingType'] == "Steel":
+ if ((BIM['DesignLevel'] == 'E') and
+ (BIM['OccupancyClass'] in ['RES3A', 'RES3B', 'RES3C', 'RES3D',
+ 'RES3E', 'RES3F'])):
+ # Steel Engineered Residential Building (SERBL, SERBM, SERBH)
+ bldg_class = 'SERB'
+ elif ((BIM['DesignLevel'] == 'E') and
+ (BIM['OccupancyClass'] in ['COM1', 'COM2', 'COM3', 'COM4', 'COM5',
+ 'COM6', 'COM7', 'COM8', 'COM9','COM10'])):
+ # Steel Engineered Commercial Building (SECBL, SECBM, SECBH)
+ bldg_class = 'SECB'
+ elif ((BIM['DesignLevel'] == 'PE') and
+ (BIM['OccupancyClass'] not in ['RES3A', 'RES3B', 'RES3C', 'RES3D',
+ 'RES3E', 'RES3F'])):
+ # Steel Pre-Engineered Metal Building (SPMBS, SPMBM, SPMBL)
+ bldg_class = 'SPMB'
+ else:
+ bldg_class = 'SECB'
+
+ elif BIM['BuildingType'] == "Concrete":
+ if ((BIM['DesignLevel'] == 'E') and
+ (BIM['OccupancyClass'] in ['RES3A', 'RES3B', 'RES3C', 'RES3D',
+ 'RES3E', 'RES3F', 'RES5', 'RES6'])):
+ # Concrete Engineered Residential Building (CERBL, CERBM, CERBH)
+ bldg_class = 'CERB'
+ elif ((BIM['DesignLevel'] == 'E') and
+ (BIM['OccupancyClass'] in ['COM1', 'COM2', 'COM3', 'COM4', 'COM5',
+ 'COM6', 'COM7', 'COM8', 'COM9','COM10'])):
+ # Concrete Engineered Commercial Building (CECBL, CECBM, CECBH)
+ bldg_class = 'CECB'
+ else:
+ bldg_class = 'CECB'
+
+ elif BIM['BuildingType'] == "Masonry":
+ if BIM['OccupancyClass'] == 'RES1':
+ # OccupancyClass = RES1
+ # Masonry Single-Family Homes (MSF1 or MSF2)
+ bldg_class = 'MSF'
+ elif ((BIM['OccupancyClass'] in ['RES3A', 'RES3B', 'RES3C', 'RES3D',
+ 'RES3E', 'RES3F']) and (BIM['DesignLevel'] == 'E')):
+ # Masonry Engineered Residential Building (MERBL, MERBM, MERBH)
+ bldg_class = 'MERB'
+ elif ((BIM['OccupancyClass'] in ['COM1', 'COM2', 'COM3', 'COM4',
+ 'COM5', 'COM6', 'COM7', 'COM8', 'COM9',
+ 'COM10']) and (BIM['DesignLevel'] == 'E')):
+ # Masonry Engineered Commercial Building (MECBL, MECBM, MECBH)
+ bldg_class = 'MECB'
+ elif BIM['OccupancyClass'] in ['IND1', 'IND2', 'IND3', 'IND4', 'IND5', 'IND6']:
+ # Masonry Low-Rise Masonry Warehouse/Factory (MLRI)
+ bldg_class = 'MLRI'
+ elif BIM['OccupancyClass'] in ['RES3A', 'RES3B', 'RES3C', 'RES3D',
+ 'RES3E', 'RES3F', 'RES5', 'RES6', 'COM8']:
+ # OccupancyClass = RES3X or COM8
+ # Masonry Multi-Unit Hotel/Motel (MMUH1, MMUH2, or MMUH3)
+ bldg_class = 'MMUH'
+ elif ((BIM['NumberOfStories'] == 1) and
+ (BIM['OccupancyClass'] in ['COM1', 'COM2'])):
+ # Low-Rise Masonry Strip Mall (MLRM1 or MLRM2)
+ bldg_class = 'MLRM'
+ else:
+ bldg_class = 'MECB' # for others not covered by the above
+ #elif ((BIM['OccupancyClass'] in ['RES3A', 'RES3B', 'RES3C', 'RES3D',
+ # 'RES3E', 'RES3F', 'RES5', 'RES6',
+ # 'COM8']) and (BIM['DesignLevel'] in ['NE', 'ME'])):
+ # # Masonry Multi-Unit Hotel/Motel Non-Engineered
+ # # (MMUH1NE, MMUH2NE, or MMUH3NE)
+ # bldg_class = 'MMUHNE'
+
+ elif BIM['BuildingType'] == "Manufactured":
+ bldg_class = 'MH'
+
+ else:
+ bldg_class = 'WMUH'
+ # if nan building type is provided, return the dominant class
+
+ return bldg_class
diff --git a/pelicun/tests/dl_calculation/rulesets/BuildingClassRulesets.py b/pelicun/tests/dl_calculation/rulesets/BuildingClassRulesets.py
new file mode 100644
index 000000000..b646946f0
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/BuildingClassRulesets.py
@@ -0,0 +1,174 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import random
+import numpy as np
+import datetime
+
+
+def building_class(BIM, hazard):
+ """
+ Short description
+
+ Long description
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ bldg_class: str
+ One of the standard building class labels from HAZUS
+ """
+
+ # check hazard
+ if hazard not in ['wind', 'inundation']:
+ print(f'WARNING: The provided hazard is not recognized: {hazard}')
+
+ if hazard == 'wind':
+
+ if BIM['BuildingType'] == 'Wood':
+ if ((BIM['OccupancyClass'] == 'RES1') or
+ ((BIM['RoofShape'] != 'flt') and (BIM['OccupancyClass'] == ''))):
+ # BuildingType = 3001
+ # OccupancyClass = RES1
+ # Wood Single-Family Homes (WSF1 or WSF2)
+ # OR roof type = flat (HAZUS can only map flat to WSF1)
+ # OR default (by '')
+ if BIM['RoofShape'] == 'flt': # checking if there is a misclassication
+ BIM['RoofShape'] = 'gab' # ensure the WSF has gab (by default, note gab is more vulneable than hip)
+ bldg_class = 'WSF'
+ else:
+ # BuildingType = 3001
+ # OccupancyClass = RES3, RES5, RES6, or COM8
+ # Wood Multi-Unit Hotel (WMUH1, WMUH2, or WMUH3)
+ bldg_class = 'WMUH'
+ elif BIM['BuildingType'] == 'Steel':
+ if ((BIM['DesignLevel'] == 'E') and
+ (BIM['OccupancyClass'] in ['RES3A', 'RES3B', 'RES3C', 'RES3D',
+ 'RES3E', 'RES3F'])):
+ # BuildingType = 3002
+ # Steel Engineered Residential Building (SERBL, SERBM, SERBH)
+ bldg_class = 'SERB'
+ elif ((BIM['DesignLevel'] == 'E') and
+ (BIM['OccupancyClass'] in ['COM1', 'COM2', 'COM3', 'COM4', 'COM5',
+ 'COM6', 'COM7', 'COM8', 'COM9','COM10'])):
+ # BuildingType = 3002
+ # Steel Engineered Commercial Building (SECBL, SECBM, SECBH)
+ bldg_class = 'SECB'
+ elif ((BIM['DesignLevel'] == 'PE') and
+ (BIM['OccupancyClass'] not in ['RES3A', 'RES3B', 'RES3C', 'RES3D',
+ 'RES3E', 'RES3F'])):
+ # BuildingType = 3002
+ # Steel Pre-Engineered Metal Building (SPMBS, SPMBM, SPMBL)
+ bldg_class = 'SPMB'
+ else:
+ bldg_class = 'SECB'
+ elif BIM['BuildingType'] == 'Concrete':
+ if ((BIM['DesignLevel'] == 'E') and
+ (BIM['OccupancyClass'] in ['RES3A', 'RES3B', 'RES3C', 'RES3D',
+ 'RES3E', 'RES3F', 'RES5', 'RES6'])):
+ # BuildingType = 3003
+ # Concrete Engineered Residential Building (CERBL, CERBM, CERBH)
+ bldg_class = 'CERB'
+ elif ((BIM['DesignLevel'] == 'E') and
+ (BIM['OccupancyClass'] in ['COM1', 'COM2', 'COM3', 'COM4', 'COM5',
+ 'COM6', 'COM7', 'COM8', 'COM9','COM10'])):
+ # BuildingType = 3003
+ # Concrete Engineered Commercial Building (CECBL, CECBM, CECBH)
+ bldg_class = 'CECB'
+ else:
+ bldg_class = 'CECB'
+ elif BIM['BuildingType'] == 'Masonry':
+ if BIM['OccupancyClass'] == 'RES1':
+ # BuildingType = 3004
+ # OccupancyClass = RES1
+ # Masonry Single-Family Homes (MSF1 or MSF2)
+ bldg_class = 'MSF'
+ elif ((BIM['OccupancyClass'] in ['RES3A', 'RES3B', 'RES3C', 'RES3D',
+ 'RES3E', 'RES3F']) and (BIM['DesignLevel'] == 'E')):
+ # BuildingType = 3004
+ # Masonry Engineered Residential Building (MERBL, MERBM, MERBH)
+ bldg_class = 'MERB'
+ elif ((BIM['OccupancyClass'] in ['COM1', 'COM2', 'COM3', 'COM4',
+ 'COM5', 'COM6', 'COM7', 'COM8', 'COM9',
+ 'COM10']) and (BIM['DesignLevel'] == 'E')):
+ # BuildingType = 3004
+ # Masonry Engineered Commercial Building (MECBL, MECBM, MECBH)
+ bldg_class = 'MECB'
+ elif BIM['OccupancyClass'] in ['IND1', 'IND2', 'IND3', 'IND4', 'IND5', 'IND6']:
+ # BuildingType = 3004
+ # Masonry Low-Rise Masonry Warehouse/Factory (MLRI)
+ bldg_class = 'MLRI'
+ elif BIM['OccupancyClass'] in ['RES3A', 'RES3B', 'RES3C', 'RES3D',
+ 'RES3E', 'RES3F', 'RES5', 'RES6', 'COM8']:
+ # BuildingType = 3004
+ # OccupancyClass = RES3X or COM8
+ # Masonry Multi-Unit Hotel/Motel (MMUH1, MMUH2, or MMUH3)
+ bldg_class = 'MMUH'
+ elif ((BIM['NumberOfStories'] == 1) and
+ (BIM['OccupancyClass'] in ['COM1', 'COM2'])):
+ # BuildingType = 3004
+ # Low-Rise Masonry Strip Mall (MLRM1 or MLRM2)
+ bldg_class = 'MLRM'
+ else:
+ bldg_class = 'MECB' # for others not covered by the above
+ #elif ((BIM['OccupancyClass'] in ['RES3A', 'RES3B', 'RES3C', 'RES3D',
+ # 'RES3E', 'RES3F', 'RES5', 'RES6',
+ # 'COM8']) and (BIM['DesignLevel'] in ['NE', 'ME'])):
+ # # BuildingType = 3004
+ # # Masonry Multi-Unit Hotel/Motel Non-Engineered
+ # # (MMUH1NE, MMUH2NE, or MMUH3NE)
+ # return 'MMUHNE'
+ elif BIM['BuildingType'] == 'Manufactured':
+ bldg_class = 'MH'
+
+ else:
+ bldg_class = 'WMUH'
+ # if nan building type is provided, return the dominant class
+
+ return bldg_class
\ No newline at end of file
diff --git a/pelicun/tests/dl_calculation/rulesets/FloodAssmRulesets.py b/pelicun/tests/dl_calculation/rulesets/FloodAssmRulesets.py
new file mode 100644
index 000000000..658d2e4a3
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/FloodAssmRulesets.py
@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+# Frank McKenna
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import random
+import numpy as np
+import datetime
+import math
+
+def Assm_config(BIM):
+ """
+ Rules to identify the flood vunerability category
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ config: str
+ A string that identifies a specific configration within this buidling
+ class.
+ """
+ year = BIM['YearBuilt'] # just for the sake of brevity
+
+ # Flood Type
+ if BIM['FloodZone'] in ['AO']:
+ flood_type = 'raz' # Riverline/A-Zone
+ elif BIM['FloodZone'] in ['AE', 'AH', 'A']:
+ flood_type = 'caz' # Costal/A-Zone
+ elif BIM['FloodZone'] in ['VE']:
+ flood_type = 'cvz' # Costal/V-Zone
+ else:
+ flood_type = 'caz' # Default
+
+ # PostFIRM
+ PostFIRM = False # Default
+ city_list = ['Absecon', 'Atlantic', 'Brigantine', 'Buena', 'Buena Vista',
+ 'Corbin City', 'Egg Harbor City', 'Egg Harbor', 'Estell Manor',
+ 'Folsom', 'Galloway', 'Hamilton', 'Hammonton', 'Linwood',
+ 'Longport', 'Margate City', 'Mullica', 'Northfield',
+ 'Pleasantville', 'Port Republic', 'Somers Point',
+ 'Ventnor City', 'Weymouth']
+ year_list = [1976, 1971, 1971, 1983, 1979, 1981, 1982, 1983, 1978, 1982,
+ 1983, 1977, 1982, 1983, 1974, 1974, 1982, 1979, 1983, 1983,
+ 1982, 1971, 1979]
+ for i in range(0,22):
+ PostFIRM = (((BIM['City'] == city_list[i]) and (year > year_list[i])) or \
+ PostFIRM)
+
+ # fl_assm
+ fl_assm = f"{'fl_surge_assm'}_" \
+ f"{BIM['OccupancyClass']}_" \
+ f"{int(PostFIRM)}_" \
+ f"{flood_type}"
+
+ # hu_assm
+ hu_assm = f"{'hu_surge_assm'}_" \
+ f"{BIM['OccupancyClass']}_" \
+ f"{int(PostFIRM)}"
+
+ return hu_assm, fl_assm
\ No newline at end of file
diff --git a/pelicun/tests/dl_calculation/rulesets/FloodClassRulesets.py b/pelicun/tests/dl_calculation/rulesets/FloodClassRulesets.py
new file mode 100644
index 000000000..702c829ec
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/FloodClassRulesets.py
@@ -0,0 +1,199 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import numpy as np
+
+def FL_config(BIM):
+ """
+ Rules to identify the flood vunerability category
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ config: str
+ A string that identifies a specific configration within this buidling
+ class.
+ """
+ year = BIM['YearBuilt'] # just for the sake of brevity
+
+ # Flood Type
+ if BIM['FloodZone'] == 'AO':
+ flood_type = 'raz' # Riverline/A-Zone
+ elif BIM['FloodZone'] in ['A', 'AE']:
+ flood_type = 'cvz' # Costal-Zone
+ elif BIM['FloodZone'].startswith('V'):
+ flood_type = 'cvz' # Costal-Zone
+ else:
+ flood_type = 'cvz' # Default
+
+ # First Floor Elevation (FFE)
+ if flood_type in ['raz', 'caz']:
+ FFE = BIM['FirstFloorElevation']
+ else:
+ FFE = BIM['FirstFloorElevation'] - 1.0
+
+ # PostFIRM
+ PostFIRM = False # Default
+ city_list = ['Absecon', 'Atlantic', 'Brigantine', 'Buena', 'Buena Vista',
+ 'Corbin City', 'Egg Harbor City', 'Egg Harbor', 'Estell Manor',
+ 'Folsom', 'Galloway', 'Hamilton', 'Hammonton', 'Linwood',
+ 'Longport', 'Margate City', 'Mullica', 'Northfield',
+ 'Pleasantville', 'Port Republic', 'Somers Point',
+ 'Ventnor City', 'Weymouth']
+ year_list = [1976, 1971, 1971, 1983, 1979, 1981, 1982, 1983, 1978, 1982,
+ 1983, 1977, 1982, 1983, 1974, 1974, 1982, 1979, 1983, 1983,
+ 1982, 1971, 1979]
+ for i in range(0,22):
+ PostFIRM = (((BIM['City'] == city_list[i]) and (year > year_list[i])) or \
+ PostFIRM)
+
+ # Basement Type
+ if BIM['SplitLevel'] and (BIM['FoundationType'] == 3504):
+ bmt_type = 'spt' # Split-Level Basement
+ elif BIM['FoundationType'] in [3501, 3502, 3503, 3505, 3506, 3507]:
+ bmt_type = 'bn' # No Basement
+ elif (not BIM['SplitLevel']) and (BIM['FoundationType'] == 3504):
+ bmt_type = 'bw' # Basement
+ else:
+ bmt_type = 'bw' # Default
+
+ # Duration
+ dur = 'short'
+
+ # Occupancy Type
+ if BIM['OccupancyClass'] == 'RES1':
+ if BIM['NumberOfStories'] == 1:
+ if flood_type == 'raz':
+ OT = 'SF1XA'
+ elif flood_type == 'cvz':
+ OT = 'SF1XV'
+ else:
+ if bmt_type == 'nav':
+ if flood_type == 'raz':
+ OT = 'SF2XA'
+ elif flood_type == 'cvz':
+ OT = 'SF2XV'
+ elif bmt_type == 'bmt':
+ if flood_type == 'raz':
+ OT = 'SF2BA'
+ elif flood_type == 'cvz':
+ OT = 'SF2BV'
+ elif bmt_type == 'spt':
+ if flood_type == 'raz':
+ OT = 'SF2SA'
+ elif flood_type == 'cvz':
+ OT = 'SF2SV'
+ elif 'RES3' in BIM['OccupancyClass']:
+ OT = 'APT'
+ else:
+ ap_OT = {
+ 'RES2': 'MH',
+ 'RES4': 'HOT',
+ 'RES5': 'NURSE',
+ 'RES6': 'NURSE',
+ 'COM1': 'RETAL',
+ 'COM2': 'WHOLE',
+ 'COM3': 'SERVICE',
+ 'COM4': 'OFFICE',
+ 'COM5': 'BANK',
+ 'COM6': 'HOSP',
+ 'COM7': 'MED',
+ 'COM8': 'REC',
+ 'COM9': 'THEAT',
+ 'COM10': 'GARAGE',
+ 'IND1': 'INDH',
+ 'IND2': 'INDL',
+ 'IND3': 'CHEM',
+ 'IND4': 'PROC',
+ 'IND5': 'CHEM',
+ 'IND6': 'CONST',
+ 'AGR1': 'AGRI',
+ 'REL1': 'RELIG',
+ 'GOV1': 'CITY',
+ 'GOV2': 'EMERG',
+ 'EDU1': 'SCHOOL',
+ 'EDU2': 'SCHOOL'
+ }
+ ap_OT[BIM['OccupancyClass']]
+
+
+ if not (BIM['OccupancyClass'] in ['RES1', 'RES2']):
+ if 'RES3' in BIM['OccupancyClass']:
+ fl_config = f"{'fl'}_" \
+ f"{'RES3'}"
+ else:
+ fl_config = f"{'fl'}_" \
+ f"{BIM['OccupancyClass']}"
+ elif BIM['OccupancyClass'] == 'RES2':
+ fl_config = f"{'fl'}_" \
+ f"{BIM['OccupancyClass']}_" \
+ f"{flood_type}"
+ else:
+ if bmt_type == 'spt':
+ fl_config = f"{'fl'}_" \
+ f"{BIM['OccupancyClass']}_" \
+ f"{'sl'}_" \
+ f"{'bw'}_" \
+ f"{flood_type}"
+ else:
+ st = 's'+str(np.min([BIM['NumberOfStories'],3]))
+ fl_config = f"{'fl'}_" \
+ f"{BIM['OccupancyClass']}_" \
+ f"{st}_" \
+ f"{bmt_type}_" \
+ f"{flood_type}"
+
+ # extend the BIM dictionary
+ BIM.update(dict(
+ FloodType = flood_type,
+ BasementType=bmt_type,
+ PostFIRM=PostFIRM,
+ ))
+
+ return fl_config
\ No newline at end of file
diff --git a/pelicun/tests/dl_calculation/rulesets/FloodRulesets.py b/pelicun/tests/dl_calculation/rulesets/FloodRulesets.py
new file mode 100644
index 000000000..882d8d933
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/FloodRulesets.py
@@ -0,0 +1,200 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import numpy as np
+
+def FL_config(BIM):
+ """
+ Rules to identify the flood vunerability category
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ config: str
+ A string that identifies a specific configration within this buidling
+ class.
+ """
+ year = BIM['YearBuilt'] # just for the sake of brevity
+
+ # Flood Type
+ if BIM['FloodZone'] == 'AO':
+ flood_type = 'raz' # Riverline/A-Zone
+ elif BIM['FloodZone'] in ['A', 'AE']:
+ flood_type = 'cvz' # Costal-Zone
+ elif BIM['FloodZone'].startswith('V'):
+ flood_type = 'cvz' # Costal-Zone
+ else:
+ flood_type = 'cvz' # Default
+
+ # First Floor Elevation (FFE)
+ if flood_type in ['raz', 'caz']:
+ FFE = BIM['FirstFloorElevation']
+ else:
+ FFE = BIM['FirstFloorElevation'] - 1.0
+
+ # PostFIRM
+ PostFIRM = False # Default
+ city_list = ['Absecon', 'Atlantic', 'Brigantine', 'Buena', 'Buena Vista',
+ 'Corbin City', 'Egg Harbor City', 'Egg Harbor', 'Estell Manor',
+ 'Folsom', 'Galloway', 'Hamilton', 'Hammonton', 'Linwood',
+ 'Longport', 'Margate City', 'Mullica', 'Northfield',
+ 'Pleasantville', 'Port Republic', 'Somers Point',
+ 'Ventnor City', 'Weymouth']
+ year_list = [1976, 1971, 1971, 1983, 1979, 1981, 1982, 1983, 1978, 1982,
+ 1983, 1977, 1982, 1983, 1974, 1974, 1982, 1979, 1983, 1983,
+ 1982, 1971, 1979]
+ for i in range(0,22):
+ PostFIRM = (((BIM['City'] == city_list[i]) and (year > year_list[i])) or \
+ PostFIRM)
+
+ # Basement Type
+ if BIM['SplitLevel'] and (BIM['FoundationType'] == 3504):
+ bmt_type = 'spt' # Split-Level Basement
+ elif BIM['FoundationType'] in [3501, 3502, 3503, 3505, 3506, 3507]:
+ bmt_type = 'bn' # No Basement
+ elif (not BIM['SplitLevel']) and (BIM['FoundationType'] == 3504):
+ bmt_type = 'bw' # Basement
+ else:
+ bmt_type = 'bw' # Default
+
+ # Duration
+ dur = 'short'
+
+ # Occupancy Type
+ if BIM['OccupancyClass'] == 'RES1':
+ if BIM['NumberOfStories'] == 1:
+ if flood_type == 'raz':
+ OT = 'SF1XA'
+ elif flood_type == 'cvz':
+ OT = 'SF1XV'
+ else:
+ if bmt_type == 'nav':
+ if flood_type == 'raz':
+ OT = 'SF2XA'
+ elif flood_type == 'cvz':
+ OT = 'SF2XV'
+ elif bmt_type == 'bmt':
+ if flood_type == 'raz':
+ OT = 'SF2BA'
+ elif flood_type == 'cvz':
+ OT = 'SF2BV'
+ elif bmt_type == 'spt':
+ if flood_type == 'raz':
+ OT = 'SF2SA'
+ elif flood_type == 'cvz':
+ OT = 'SF2SV'
+ elif 'RES3' in BIM['OccupancyClass']:
+ OT = 'APT'
+ else:
+ ap_OT = {
+ 'RES2': 'MH',
+ 'RES4': 'HOT',
+ 'RES5': 'NURSE',
+ 'RES6': 'NURSE',
+ 'COM1': 'RETAL',
+ 'COM2': 'WHOLE',
+ 'COM3': 'SERVICE',
+ 'COM4': 'OFFICE',
+ 'COM5': 'BANK',
+ 'COM6': 'HOSP',
+ 'COM7': 'MED',
+ 'COM8': 'REC',
+ 'COM9': 'THEAT',
+ 'COM10': 'GARAGE',
+ 'IND1': 'INDH',
+ 'IND2': 'INDL',
+ 'IND3': 'CHEM',
+ 'IND4': 'PROC',
+ 'IND5': 'CHEM',
+ 'IND6': 'CONST',
+ 'AGR1': 'AGRI',
+ 'REL1': 'RELIG',
+ 'GOV1': 'CITY',
+ 'GOV2': 'EMERG',
+ 'EDU1': 'SCHOOL',
+ 'EDU2': 'SCHOOL'
+ }
+ ap_OT[BIM['OccupancyClass']]
+
+
+ if not (BIM['OccupancyClass'] in ['RES1', 'RES2']):
+ if 'RES3' in BIM['OccupancyClass']:
+ fl_config = f"{'fl'}_" \
+ f"{'RES3'}"
+ else:
+ fl_config = f"{'fl'}_" \
+ f"{BIM['OccupancyClass']}"
+ elif BIM['OccupancyClass'] == 'RES2':
+ fl_config = f"{'fl'}_" \
+ f"{BIM['OccupancyClass']}_" \
+ f"{flood_type}"
+ else:
+ if bmt_type == 'spt':
+ fl_config = f"{'fl'}_" \
+ f"{BIM['OccupancyClass']}_" \
+ f"{'sl'}_" \
+ f"{'bw'}_" \
+ f"{flood_type}"
+ else:
+ st = 's'+str(np.min([BIM['NumberOfStories'],3]))
+ fl_config = f"{'fl'}_" \
+ f"{BIM['OccupancyClass']}_" \
+ f"{st}_" \
+ f"{bmt_type}_" \
+ f"{flood_type}"
+
+ # extend the BIM dictionary
+ BIM.update(dict(
+ FloodType = flood_type,
+ BasementType=bmt_type,
+ PostFIRM=PostFIRM,
+ ))
+
+ return fl_config
+
diff --git a/pelicun/tests/dl_calculation/rulesets/MetaVarRulesets.py b/pelicun/tests/dl_calculation/rulesets/MetaVarRulesets.py
new file mode 100644
index 000000000..cfd50c7f2
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/MetaVarRulesets.py
@@ -0,0 +1,426 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import numpy as np
+
+def parse_BIM(BIM_in, location, hazards):
+ """
+ Parses the information provided in the BIM model.
+
+ The atrributes below list the expected metadata in the BIM file
+
+ Parameters
+ ----------
+ location: str
+ Supported locations:
+ NJ - New Jersey
+ LA - Louisiana
+ hazard: list of str
+ Supported hazard types: "wind", "inundation"
+
+ BIM attributes
+ --------------
+ NumberOfStories: str
+ Number of stories
+ YearBuilt: str
+ Year of construction.
+ RoofShape: {'hip', 'hipped', 'gabled', 'gable', 'flat'}
+ One of the listed roof shapes that best describes the building.
+ OccupancyType: str
+ Occupancy type.
+ BuildingType: str
+ Core construction material type
+ DWSII: float
+ Design wind speed II as per ASCE 7 in mph
+ Area: float
+ Plan area in ft2.
+ LULC: integer
+ Land Use/Land Cover category (typically location-specific)
+
+ Returns
+ -------
+ BIM: dictionary
+ Parsed building characteristics.
+ """
+
+ # check location
+ if location not in ['LA', 'NJ']:
+ print(f'WARNING: The provided location is not recognized: {location}')
+
+ # check hazard
+ for hazard in hazards:
+ if hazard not in ['wind', 'inundation']:
+ print(f'WARNING: The provided hazard is not recognized: {hazard}')
+
+ # initialize the BIM dict
+ BIM = {}
+
+ if 'wind' in hazards:
+ # maps roof type to the internal representation
+ ap_RoofType = {
+ 'hip' : 'hip',
+ 'hipped': 'hip',
+ 'Hip' : 'hip',
+ 'gabled': 'gab',
+ 'gable' : 'gab',
+ 'Gable' : 'gab',
+ 'flat' : 'flt',
+ 'Flat' : 'flt'
+ }
+
+ # maps roof system to the internal representation
+ ap_RoofSystem = {
+ 'Wood': 'trs',
+ 'OWSJ': 'ows',
+ 'N/A': 'trs'
+ }
+ roof_system = BIM_in.get('RoofSystem', 'Wood')
+
+ # maps number of units to the internal representation
+ ap_NoUnits = {
+ 'Single': 'sgl',
+ 'Multiple': 'mlt',
+ 'Multi': 'mlt',
+ 'nav': 'nav'
+ }
+
+ # Average January Temp.
+ ap_ajt = {
+ 'Above': 'above',
+ 'Below': 'below'
+ }
+
+ # Year built
+ alname_yearbuilt = ['yearBuilt', 'YearBuiltMODIV', 'YearBuiltNJDEP']
+
+ yearbuilt = BIM_in.get('YearBuilt', None)
+
+ # if none of the above works, set a default
+ if yearbuilt is None:
+ for alname in alname_yearbuilt:
+ if alname in BIM_in.keys():
+ yearbuilt = BIM_in[alname]
+ break
+
+ if yearbuilt is None:
+ yearbuilt = 1985
+
+ # Number of Stories
+ alname_nstories = ['stories', 'NumberofStories0', 'NumberofStories', 'NumberofStories1']
+
+ nstories = BIM_in.get('NumberOfStories', None)
+
+ if nstories is None:
+ for alname in alname_nstories:
+ if alname in BIM_in.keys():
+ nstories = BIM_in[alname]
+ break
+
+ if nstories is None:
+ raise KeyError("NumberOfStories attribute missing, cannot autopopulate")
+
+ # Plan Area
+ alname_area = ['area', 'PlanArea1', 'Area', 'PlanArea0']
+
+ area = BIM_in.get('PlanArea', None)
+
+ if area is None:
+ for alname in alname_area:
+ if alname in BIM_in.keys():
+ area = BIM_in[alname]
+ break
+
+ if area is None:
+ raise KeyError("PlanArea attribute missing, cannot autopopulate")
+
+ # Design Wind Speed
+ alname_dws = ['DWSII', 'DesignWindSpeed']
+
+ dws = BIM_in.get('DesignWindSpeed', None)
+
+ if dws is None:
+ for alname in alname_dws:
+ if alname in BIM_in.keys():
+ dws = BIM_in[alname]
+ break
+
+ if dws is None:
+ raise KeyError("DesignWindSpeed attribute missing, cannot autopopulate")
+
+ # occupancy type
+ alname_occupancy = ['occupancy', 'OccupancyClass']
+
+ oc = BIM_in.get('OccupancyClass', None)
+
+ if oc is None:
+ for alname in alname_occupancy:
+ if alname in BIM_in.keys():
+ oc = BIM_in[alname]
+ break
+
+ if oc is None:
+ raise KeyError("OccupancyClass attribute missing, cannot autopopulate")
+
+ # if getting RES3 then converting it to default RES3A
+ if oc == 'RES3':
+ oc = 'RES3A'
+
+ # maps for BuildingType
+ ap_BuildingType_NJ = {
+ # Coastal areas with a 1% or greater chance of flooding and an
+ # additional hazard associated with storm waves.
+ 3001: 'Wood',
+ 3002: 'Steel',
+ 3003: 'Concrete',
+ 3004: 'Masonry',
+ 3005: 'Manufactured',
+ }
+ if location == 'NJ':
+ # NJDEP code for flood zone needs to be converted
+ buildingtype = ap_BuildingType_NJ[BIM_in['BuildingType']]
+
+ elif location == 'LA':
+ # standard input should provide the building type as a string
+ buildingtype = BIM_in['BuildingType']
+
+ # maps for design level (Marginal Engineered is mapped to Engineered as default)
+ ap_DesignLevel = {
+ 'E': 'E',
+ 'NE': 'NE',
+ 'PE': 'PE',
+ 'ME': 'E'
+ }
+ design_level = BIM_in.get('DesignLevel','E')
+
+ # flood zone
+ flood_zone = BIM_in.get('FloodZone', 'X')
+
+ # add the parsed data to the BIM dict
+ BIM.update(dict(
+ OccupancyClass=str(oc),
+ BuildingType=buildingtype,
+ YearBuilt=int(yearbuilt),
+ NumberOfStories=int(nstories),
+ PlanArea=float(area),
+ V_ult=float(dws),
+ AvgJanTemp=ap_ajt[BIM_in.get('AvgJanTemp','Below')],
+ RoofShape=ap_RoofType[BIM_in['RoofShape']],
+ RoofSlope=float(BIM_in.get('RoofSlope',0.25)), # default 0.25
+ SheathingThickness=float(BIM_in.get('SheathingThick',1.0)), # default 1.0
+ RoofSystem=str(ap_RoofSystem[roof_system]), # only valid for masonry structures
+ Garage=float(BIM_in.get('Garage',-1.0)),
+ LULC=BIM_in.get('LULC',-1),
+ MeanRoofHt=float(BIM_in.get('MeanRoofHt',15.0)), # default 15
+ WindowArea=float(BIM_in.get('WindowArea',0.20)),
+ WindZone=str(BIM_in.get('WindZone', 'I')),
+ FloodZone =str(flood_zone)
+ ))
+
+ if 'inundation' in hazards:
+
+ # maps for split level
+ ap_SplitLevel = {
+ 'NO': 0,
+ 'YES': 1
+ }
+
+ # foundation type
+ foundation = BIM_in.get('FoundationType',3501)
+
+ # number of units
+ nunits = BIM_in.get('NoUnits',1)
+
+ # maps for flood zone
+ ap_FloodZone = {
+ # Coastal areas with a 1% or greater chance of flooding and an
+ # additional hazard associated with storm waves.
+ 6101: 'VE',
+ 6102: 'VE',
+ 6103: 'AE',
+ 6104: 'AE',
+ 6105: 'AO',
+ 6106: 'AE',
+ 6107: 'AH',
+ 6108: 'AO',
+ 6109: 'A',
+ 6110: 'X',
+ 6111: 'X',
+ 6112: 'X',
+ 6113: 'OW',
+ 6114: 'D',
+ 6115: 'NA',
+ 6119: 'NA'
+ }
+ if type(BIM_in['FloodZone']) == int:
+ # NJDEP code for flood zone (conversion to the FEMA designations)
+ floodzone_fema = ap_FloodZone[BIM_in['FloodZone']]
+ else:
+ # standard input should follow the FEMA flood zone designations
+ floodzone_fema = BIM_in['FloodZone']
+
+ # add the parsed data to the BIM dict
+ BIM.update(dict(
+ DesignLevel=str(ap_DesignLevel[design_level]), # default engineered
+ NumberOfUnits=int(nunits),
+ FirstFloorElevation=float(BIM_in.get('FirstFloorHt1',10.0)),
+ SplitLevel=bool(ap_SplitLevel[BIM_in.get('SplitLevel','NO')]), # dfault: no
+ FoundationType=int(foundation), # default: pile
+ City=BIM_in.get('City','NA')
+ ))
+
+ # add inferred, generic meta-variables
+
+ if 'wind' in hazards:
+
+ # Hurricane-Prone Region (HRP)
+ # Areas vulnerable to hurricane, defined as the U.S. Atlantic Ocean and
+ # Gulf of Mexico coasts where the ultimate design wind speed, V_ult is
+ # greater than a pre-defined limit.
+ if BIM['YearBuilt'] >= 2016:
+ # The limit is 115 mph in IRC 2015
+ HPR = BIM['V_ult'] > 115.0
+ else:
+ # The limit is 90 mph in IRC 2009 and earlier versions
+ HPR = BIM['V_ult'] > 90.0
+
+ # Wind Borne Debris
+ # Areas within hurricane-prone regions are affected by debris if one of
+ # the following two conditions holds:
+ # (1) Within 1 mile (1.61 km) of the coastal mean high water line where
+ # the ultimate design wind speed is greater than flood_lim.
+ # (2) In areas where the ultimate design wind speed is greater than
+ # general_lim
+ # The flood_lim and general_lim limits depend on the year of construction
+ if BIM['YearBuilt'] >= 2016:
+ # In IRC 2015:
+ flood_lim = 130.0 # mph
+ general_lim = 140.0 # mph
+ else:
+ # In IRC 2009 and earlier versions
+ flood_lim = 110.0 # mph
+ general_lim = 120.0 # mph
+ # Areas within hurricane-prone regions located in accordance with
+ # one of the following:
+ # (1) Within 1 mile (1.61 km) of the coastal mean high water line
+ # where the ultimate design wind speed is 130 mph (58m/s) or greater.
+ # (2) In areas where the ultimate design wind speed is 140 mph (63.5m/s)
+ # or greater. (Definitions: Chapter 2, 2015 NJ Residential Code)
+ if not HPR:
+ WBD = False
+ else:
+ WBD = (((BIM['FloodZone'].startswith('A') or BIM['FloodZone'].startswith('V')) and
+ BIM['V_ult'] >= flood_lim) or (BIM['V_ult'] >= general_lim))
+
+ # Terrain
+ # open (0.03) = 3
+ # light suburban (0.15) = 15
+ # suburban (0.35) = 35
+ # light trees (0.70) = 70
+ # trees (1.00) = 100
+ # Mapped to Land Use Categories in NJ (see https://www.state.nj.us/dep/gis/
+ # digidownload/metadata/lulc02/anderson2002.html) by T. Wu group
+ # (see internal report on roughness calculations, Table 4).
+ # These are mapped to Hazus defintions as follows:
+ # Open Water (5400s) with zo=0.01 and barren land (7600) with zo=0.04 assume Open
+ # Open Space Developed, Low Intensity Developed, Medium Intensity Developed
+ # (1110-1140) assumed zo=0.35-0.4 assume Suburban
+ # High Intensity Developed (1600) with zo=0.6 assume Lt. Tree
+ # Forests of all classes (4100-4300) assumed zo=0.6 assume Lt. Tree
+ # Shrub (4400) with zo=0.06 assume Open
+ # Grasslands, pastures and agricultural areas (2000 series) with
+ # zo=0.1-0.15 assume Lt. Suburban
+ # Woody Wetlands (6250) with zo=0.3 assume suburban
+ # Emergent Herbaceous Wetlands (6240) with zo=0.03 assume Open
+ # Note: HAZUS category of trees (1.00) does not apply to any LU/LC in NJ
+ terrain = 15 # Default in Reorganized Rulesets - WIND
+ if location == "NJ":
+ if (BIM['FloodZone'].startswith('V') or BIM['FloodZone'] in ['A', 'AE', 'A1-30', 'AR', 'A99']):
+ terrain = 3
+ elif ((BIM['LULC'] >= 5000) and (BIM['LULC'] <= 5999)):
+ terrain = 3 # Open
+ elif ((BIM['LULC'] == 4400) or (BIM['LULC'] == 6240)) or (BIM['LULC'] == 7600):
+ terrain = 3 # Open
+ elif ((BIM['LULC'] >= 2000) and (BIM['LULC'] <= 2999)):
+ terrain = 15 # Light suburban
+ elif ((BIM['LULC'] >= 1110) and (BIM['LULC'] <= 1140)) or ((BIM['LULC'] >= 6250) and (BIM['LULC'] <= 6252)):
+ terrain = 35 # Suburban
+ elif ((BIM['LULC'] >= 4100) and (BIM['LULC'] <= 4300)) or (BIM['LULC'] == 1600):
+ terrain = 70 # light trees
+ elif location == "LA":
+ if (BIM['FloodZone'].startswith('V') or BIM['FloodZone'] in ['A', 'AE', 'A1-30', 'AR', 'A99']):
+ terrain = 3
+ elif ((BIM['LULC'] >= 50) and (BIM['LULC'] <= 59)):
+ terrain = 3 # Open
+ elif ((BIM['LULC'] == 44) or (BIM['LULC'] == 62)) or (BIM['LULC'] == 76):
+ terrain = 3 # Open
+ elif ((BIM['LULC'] >= 20) and (BIM['LULC'] <= 29)):
+ terrain = 15 # Light suburban
+ elif (BIM['LULC'] == 11) or (BIM['LULC'] == 61):
+ terrain = 35 # Suburban
+ elif ((BIM['LULC'] >= 41) and (BIM['LULC'] <= 43)) or (BIM['LULC'] in [16, 17]):
+ terrain = 70 # light trees
+
+ BIM.update(dict(
+ # Nominal Design Wind Speed
+ # Former term was “Basic Wind Speed”; it is now the “Nominal Design
+ # Wind Speed (V_asd). Unit: mph."
+ V_asd = np.sqrt(0.6 * BIM['V_ult']),
+
+ HazardProneRegion=HPR,
+ WindBorneDebris=WBD,
+ TerrainRoughness=terrain,
+ ))
+
+ if 'inundation' in hazards:
+
+ BIM.update(dict(
+ # Flood Risk
+ # Properties in the High Water Zone (within 1 mile of the coast) are at
+ # risk of flooding and other wind-borne debris action.
+ FloodRisk=True, # TODO: need high water zone for this and move it to inputs!
+ ))
+
+ return BIM
+
diff --git a/pelicun/tests/dl_calculation/rulesets/WindCECBRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindCECBRulesets.py
new file mode 100644
index 000000000..c034a6b4f
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/WindCECBRulesets.py
@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import random
+import numpy as np
+import datetime
+
+def CECB_config(BIM):
+ """
+ Rules to identify a HAZUS CECB configuration based on BIM data
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ config: str
+ A string that identifies a specific configration within this buidling
+ class.
+ """
+
+ year = BIM['YearBuilt'] # just for the sake of brevity
+
+ # Roof cover
+ if BIM['RoofShape'] in ['gab', 'hip']:
+ roof_cover = 'bur'
+ # Warning: HAZUS does not have N/A option for CECB, so here we use bur
+ else:
+ if year >= 1975:
+ roof_cover = 'spm'
+ else:
+ # year < 1975
+ roof_cover = 'bur'
+
+ # shutters
+ if year >= 2000:
+ shutters = BIM['WindBorneDebris']
+ # BOCA 1996 and earlier:
+ # Shutters were not required by code until the 2000 IBC. Before 2000, the
+ # percentage of commercial buildings that have shutters is assumed to be
+ # 46%. This value is based on a study on preparedness of small businesses
+ # for hurricane disasters, which says that in Sarasota County, 46% of
+ # business owners had taken action to wind-proof or flood-proof their
+ # facilities. In addition to that, 46% of business owners reported boarding
+ # up their businesses before Hurricane Katrina. In addition, compliance
+ # rates based on the Homeowners Survey data hover between 43 and 50 percent.
+ else:
+ if BIM['WindBorneDebris']:
+ shutters = random.random() < 0.46
+ else:
+ shutters = False
+
+ # Wind Debris (widd in HAZSU)
+ # HAZUS A: Res/Comm, B: Varies by direction, C: Residential, D: None
+ WIDD = 'C' # residential (default)
+ if BIM['OccupancyClass'] in ['RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C',
+ 'RES3D']:
+ WIDD = 'C' # residential
+ elif BIM['OccupancyClass'] == 'AGR1':
+ WIDD = 'D' # None
+ else:
+ WIDD = 'A' # Res/Comm
+
+ # Window area ratio
+ if BIM['WindowArea'] < 0.33:
+ WWR = 'low'
+ elif BIM['WindowArea'] < 0.5:
+ WWR = 'med'
+ else:
+ WWR = 'hig'
+
+ if BIM['NumberOfStories'] <= 2:
+ bldg_tag = 'C.ECB.L'
+ elif BIM['NumberOfStories'] <= 5:
+ bldg_tag = 'C.ECB.M'
+ else:
+ bldg_tag = 'C.ECB.H'
+
+ # extend the BIM dictionary
+ BIM.update(dict(
+ RoofCover = roof_cover,
+ Shutters = shutters,
+ WindowAreaRatio = WWR,
+ WindDebrisClass = WIDD
+ ))
+
+ bldg_config = f"{bldg_tag}." \
+ f"{roof_cover}." \
+ f"{int(shutters)}." \
+ f"{WIDD}." \
+ f"{WWR}." \
+ f"{int(BIM['TerrainRoughness'])}"
+
+ return bldg_config
+
diff --git a/pelicun/tests/dl_calculation/rulesets/WindCERBRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindCERBRulesets.py
new file mode 100644
index 000000000..41f8faab0
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/WindCERBRulesets.py
@@ -0,0 +1,138 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import random
+import numpy as np
+import datetime
+
+def CERB_config(BIM):
+ """
+ Rules to identify a HAZUS CERB configuration based on BIM data
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ config: str
+ A string that identifies a specific configration within this buidling
+ class.
+ """
+
+ year = BIM['YearBuilt'] # just for the sake of brevity
+
+ # Roof cover
+ if BIM['RoofShape'] in ['gab', 'hip']:
+ roof_cover = 'bur'
+ # Warning: HAZUS does not have N/A option for CECB, so here we use bur
+ else:
+ if year >= 1975:
+ roof_cover = 'spm'
+ else:
+ # year < 1975
+ roof_cover = 'bur'
+
+ # shutters
+ if year >= 2000:
+ shutters = BIM['WindBorneDebris']
+ # BOCA 1996 and earlier:
+ # Shutters were not required by code until the 2000 IBC. Before 2000, the
+ # percentage of commercial buildings that have shutters is assumed to be
+ # 46%. This value is based on a study on preparedness of small businesses
+ # for hurricane disasters, which says that in Sarasota County, 46% of
+ # business owners had taken action to wind-proof or flood-proof their
+ # facilities. In addition to that, 46% of business owners reported boarding
+ # up their businesses before Hurricane Katrina. In addition, compliance
+ # rates based on the Homeowners Survey data hover between 43 and 50 percent.
+ else:
+ if BIM['WindBorneDebris']:
+ shutters = random.random() < 0.45
+ else:
+ shutters = False
+
+ # Wind Debris (widd in HAZUS)
+ # HAZUS A: Res/Comm, B: Varies by direction, C: Residential, D: None
+ WIDD = 'C' # residential (default)
+ if BIM['OccupancyClass'] in ['RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C',
+ 'RES3D']:
+ WIDD = 'C' # residential
+ elif BIM['OccupancyClass'] == 'AGR1':
+ WIDD = 'D' # None
+ else:
+ WIDD = 'A' # Res/Comm
+
+ # Window area ratio
+ if BIM['WindowArea'] < 0.33:
+ WWR = 'low'
+ elif BIM['WindowArea'] < 0.5:
+ WWR = 'med'
+ else:
+ WWR = 'hig'
+
+ if BIM['NumberOfStories'] <= 2:
+ bldg_tag = 'C.ERB.L'
+ elif BIM['NumberOfStories'] <= 5:
+ bldg_tag = 'C.ERB.M'
+ else:
+ bldg_tag = 'C.ERB.H'
+
+ # extend the BIM dictionary
+ BIM.update(dict(
+ RoofCover = roof_cover,
+ Shutters = shutters,
+ WindowAreaRatio = WWR,
+ WindDebrisClass = WIDD
+ ))
+
+ bldg_config = f"{bldg_tag}." \
+ f"{roof_cover}." \
+ f"{int(shutters)}." \
+ f"{WIDD}." \
+ f"{WWR}." \
+ f"{int(BIM['TerrainRoughness'])}"
+
+ return bldg_config
diff --git a/pelicun/tests/dl_calculation/rulesets/WindEFRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindEFRulesets.py
new file mode 100644
index 000000000..1762eb5ce
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/WindEFRulesets.py
@@ -0,0 +1,316 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import random
+import numpy as np
+import datetime
+
+
+def HUEFFS_config(BIM):
+ """
+ Rules to identify a HAZUS HUEFFS/HUEFSS configuration based on BIM data
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ config: str
+ A string that identifies a specific configration within this buidling
+ class.
+ """
+
+ year = BIM['YearBuilt'] # just for the sake of brevity
+
+ # Roof cover
+ if year >= 1975:
+ roof_cover = 'spm'
+ else:
+ # year < 1975
+ roof_cover = 'bur'
+
+ # Wind debris
+ WIDD = 'A'
+
+ # Roof deck age
+ if year >= (datetime.datetime.now().year - 50):
+ DQ = 'god' # new or average
+ else:
+ DQ = 'por' # old
+
+ # Metal-RDA
+ if year > 2000:
+ if BIM['V_ult'] <= 142:
+ MRDA = 'std' # standard
+ else:
+ MRDA = 'sup' # superior
+ else:
+ MRDA = 'std' # standard
+
+ # Shutters
+ shutters = int(BIM['WBD'])
+
+ # extend the BIM dictionary
+ BIM.update(dict(
+ RoofCover = roof_cover,
+ RoofDeckAttachmentM = MRDA,
+ RoofDeckAge=DQ,
+ WindDebrisClass = WIDD,
+ Shutters = shutters
+ ))
+
+ bldg_tag = 'HUEF.FS'
+ bldg_config = f"{bldg_tag}." \
+ f"{roof_cover}." \
+ f"{shutters}." \
+ f"{WIDD}." \
+ f"{DQ}." \
+ f"{MRDA}." \
+ f"{int(BIM['TerrainRoughness'])}"
+
+ return bldg_config
+
+def HUEFSS_config(BIM):
+ """
+ Rules to identify a HAZUS HUEFFS/HUEFSS configuration based on BIM data
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ config: str
+ A string that identifies a specific configration within this buidling
+ class.
+ """
+
+ year = BIM['YearBuilt'] # just for the sake of brevity
+
+ # Roof cover
+ if year >= 1975:
+ roof_cover = 'spm'
+ else:
+ # year < 1975
+ roof_cover = 'bur'
+
+ # Wind debris
+ WIDD = 'A'
+
+ # Roof deck age
+ if year >= (datetime.datetime.now().year - 50):
+ DQ = 'god' # new or average
+ else:
+ DQ = 'por' # old
+
+ # Metal-RDA
+ if year > 2000:
+ if BIM['V_ult'] <= 142:
+ MRDA = 'std' # standard
+ else:
+ MRDA = 'sup' # superior
+ else:
+ MRDA = 'std' # standard
+
+ # Shutters
+ shutters = BIM['WindBorneDebris']
+
+ # extend the BIM dictionary
+ BIM.update(dict(
+ RoofCover = roof_cover,
+ RoofDeckAttachmentM = MRDA,
+ RoofDeckAge=DQ,
+ WindDebrisClass = WIDD,
+ Shutters=shutters
+ ))
+
+ bldg_tag = 'HUEF.S.S'
+ bldg_config = f"{bldg_tag}." \
+ f"{roof_cover}." \
+ f"{int(shutters)}." \
+ f"{WIDD}." \
+ f"{DQ}." \
+ f"{MRDA}." \
+ f"{int(BIM['TerrainRoughness'])}"
+
+ return bldg_config
+
+
+def HUEFH_config(BIM):
+ """
+ Rules to identify a HAZUS HUEFH configuration based on BIM data
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ config: str
+ A string that identifies a specific configration within this buidling
+ class.
+ """
+
+ year = BIM['YearBuilt'] # just for the sake of brevity
+
+ # Roof cover
+ if year >= 1975:
+ roof_cover = 'spm'
+ else:
+ # year < 1975
+ roof_cover = 'bur'
+
+ # Wind debris
+ WIDD = 'A'
+
+ # Shutters
+ shutters = BIM['WindBorneDebris']
+
+ # Metal-RDA
+ if year > 2000:
+ if BIM['V_ult'] <= 142:
+ MRDA = 'std' # standard
+ else:
+ MRDA = 'sup' # superior
+ else:
+ MRDA = 'std' # standard
+
+ if BIM['NumberOfStories'] <=2:
+ bldg_tag = 'HUEF.H.S'
+ elif BIM['NumberOfStories'] <= 5:
+ bldg_tag = 'HUEF.H.M'
+ else:
+ bldg_tag = 'HUEF.H.L'
+
+ # extend the BIM dictionary
+ BIM.update(dict(
+ RoofCover = roof_cover,
+ RoofDeckAttachmentM = MRDA,
+ WindDebrisClass = WIDD,
+ Shutters=shutters
+ ))
+
+ bldg_config = f"{bldg_tag}." \
+ f"{roof_cover}." \
+ f"{WIDD}." \
+ f"{MRDA}." \
+ f"{int(shutters)}." \
+ f"{int(BIM['TerrainRoughness'])}"
+
+ return bldg_config
+
+def HUEFS_config(BIM):
+ """
+ Rules to identify a HAZUS HUEFS configuration based on BIM data
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ config: str
+ A string that identifies a specific configration within this buidling
+ class.
+ """
+
+ year = BIM['YearBuilt'] # just for the sake of brevity
+
+ # Roof cover
+ if year >= 1975:
+ roof_cover = 'spm'
+ else:
+ # year < 1975
+ roof_cover = 'bur'
+
+ # Wind debris
+ WIDD = 'C'
+
+ # Shutters
+ if year > 2000:
+ shutters = BIM['WindBorneDebris']
+ else:
+ # year <= 2000
+ if BIM['WindBorneDebris']:
+ shutters = random.random() < 0.46
+ else:
+ shutters = False
+
+ # Metal-RDA
+ if year > 2000:
+ if BIM['V_ult'] <= 142:
+ MRDA = 'std' # standard
+ else:
+ MRDA = 'sup' # superior
+ else:
+ MRDA = 'std' # standard
+
+ if BIM['NumberOfStories'] <=2:
+ bldg_tag = 'HUEF.S.M'
+ else:
+ bldg_tag = 'HUEF.S.L'
+
+ # extend the BIM dictionary
+ BIM.update(dict(
+ RoofCover = roof_cover,
+ RoofDeckAttachmentM = MRDA,
+ WindDebrisClass = WIDD,
+ Shutters=shutters
+ ))
+
+ bldg_config = f"{bldg_tag}." \
+ f"{roof_cover}." \
+ f"{int(shutters)}." \
+ f"{WIDD}." \
+ f"null." \
+ f"{MRDA}." \
+ f"{int(BIM['TerrainRoughness'])}"
+
+ return bldg_config
\ No newline at end of file
diff --git a/pelicun/tests/dl_calculation/rulesets/WindMECBRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMECBRulesets.py
new file mode 100644
index 000000000..137844f7b
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/WindMECBRulesets.py
@@ -0,0 +1,141 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import random
+
+def MECB_config(BIM):
+ """
+ Rules to identify a HAZUS MECB configuration based on BIM data
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ config: str
+ A string that identifies a specific configration within this buidling
+ class.
+ """
+
+ year = BIM['YearBuilt'] # just for the sake of brevity
+
+ # Roof cover
+ if BIM['RoofShape'] in ['gab', 'hip']:
+ roof_cover = 'bur'
+ # no info, using the default supoorted by HAZUS
+ else:
+ if year >= 1975:
+ roof_cover = 'spm'
+ else:
+ # year < 1975
+ roof_cover = 'bur'
+
+ # shutters
+ if year >= 2000:
+ shutters = BIM['WindBorneDebris']
+ else:
+ if BIM['WindBorneDebris']:
+ shutters = random.random() < 0.46
+ else:
+ shutters = False
+
+ # Wind Debris (widd in HAZSU)
+ # HAZUS A: Res/Comm, B: Varies by direction, C: Residential, D: None
+ WIDD = 'C' # residential (default)
+ if BIM['OccupancyClass'] in ['RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C',
+ 'RES3D']:
+ WIDD = 'C' # residential
+ elif BIM['OccupancyClass'] == 'AGR1':
+ WIDD = 'D' # None
+ else:
+ WIDD = 'A' # Res/Comm
+
+ # Metal RDA
+ # 1507.2.8.1 High Wind Attachment.
+ # Underlayment applied in areas subject to high winds (Vasd greater
+ # than 110 mph as determined in accordance with Section 1609.3.1) shall
+ # be applied with corrosion-resistant fasteners in accordance with
+ # the manufacturer’s instructions. Fasteners are to be applied along
+ # the overlap not more than 36 inches on center.
+ if BIM['V_ult'] > 142:
+ MRDA = 'std' # standard
+ else:
+ MRDA = 'sup' # superior
+
+ # Window area ratio
+ if BIM['WindowArea'] < 0.33:
+ WWR = 'low'
+ elif BIM['WindowArea'] < 0.5:
+ WWR = 'med'
+ else:
+ WWR = 'hig'
+
+ if BIM['NumberOfStories'] <= 2:
+ bldg_tag = 'M.ECB.L'
+ elif BIM['NumberOfStories'] <= 5:
+ bldg_tag = 'M.ECB.M'
+ else:
+ bldg_tag = 'M.ECB.H'
+
+ # extend the BIM dictionary
+ BIM.update(dict(
+ RoofCover = roof_cover,
+ RoofDeckAttachmentM = MRDA,
+ Shutters = shutters,
+ WindowAreaRatio = WWR,
+ WindDebrisClass = WIDD
+ ))
+
+ bldg_config = f"{bldg_tag}." \
+ f"{roof_cover}." \
+ f"{int(shutters)}." \
+ f"{WIDD}." \
+ f"{MRDA}." \
+ f"{WWR}." \
+ f"{int(BIM['TerrainRoughness'])}"
+
+ return bldg_config
diff --git a/pelicun/tests/dl_calculation/rulesets/WindMERBRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMERBRulesets.py
new file mode 100644
index 000000000..2299b8dbb
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/WindMERBRulesets.py
@@ -0,0 +1,143 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import random
+import numpy as np
+import datetime
+
+def MERB_config(BIM):
+ """
+ Rules to identify a HAZUS MERB configuration based on BIM data
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ config: str
+ A string that identifies a specific configration within this buidling
+ class.
+ """
+
+ year = BIM['YearBuilt'] # just for the sake of brevity
+
+ # Roof cover
+ if BIM['RoofShape'] in ['gab', 'hip']:
+ roof_cover = 'bur'
+ # no info, using the default supoorted by HAZUS
+ else:
+ if year >= 1975:
+ roof_cover = 'spm'
+ else:
+ # year < 1975
+ roof_cover = 'bur'
+
+ # shutters
+ if year >= 2000:
+ shutters = BIM['WindBorneDebris']
+ else:
+ if BIM['WindBorneDebris']:
+ shutters = random.random() < 0.45
+ else:
+ shutters = False
+
+ # Wind Debris (widd in HAZSU)
+ # HAZUS A: Res/Comm, B: Varies by direction, C: Residential, D: None
+ WIDD = 'C' # residential (default)
+ if BIM['OccupancyClass'] in ['RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C',
+ 'RES3D']:
+ WIDD = 'C' # residential
+ elif BIM['OccupancyClass'] == 'AGR1':
+ WIDD = 'D' # None
+ else:
+ WIDD = 'A' # Res/Comm
+
+ # Metal RDA
+ # 1507.2.8.1 High Wind Attachment.
+ # Underlayment applied in areas subject to high winds (Vasd greater
+ # than 110 mph as determined in accordance with Section 1609.3.1) shall
+ # be applied with corrosion-resistant fasteners in accordance with
+ # the manufacturer’s instructions. Fasteners are to be applied along
+ # the overlap not more than 36 inches on center.
+ if BIM['V_ult'] > 142:
+ MRDA = 'std' # standard
+ else:
+ MRDA = 'sup' # superior
+
+ # Window area ratio
+ if BIM['WindowArea'] < 0.33:
+ WWR = 'low'
+ elif BIM['WindowArea'] < 0.5:
+ WWR = 'med'
+ else:
+ WWR = 'hig'
+
+ if BIM['NumberOfStories'] <= 2:
+ bldg_tag = 'M.ERB.L'
+ elif BIM['NumberOfStories'] <= 5:
+ bldg_tag = 'M.ERB.M'
+ else:
+ bldg_tag = 'M.ERB.H'
+
+ # extend the BIM dictionary
+ BIM.update(dict(
+ RoofCover = roof_cover,
+ RoofDeckAttachmentM = MRDA,
+ Shutters = shutters,
+ WindowAreaRatio = WWR,
+ WindDebrisClass = WIDD
+ ))
+
+ bldg_config = f"{bldg_tag}." \
+ f"{roof_cover}." \
+ f"{int(shutters)}." \
+ f"{WIDD}." \
+ f"{MRDA}." \
+ f"{WWR}." \
+ f"{int(BIM['TerrainRoughness'])}"
+
+ return bldg_config
diff --git a/pelicun/tests/dl_calculation/rulesets/WindMHRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMHRulesets.py
new file mode 100644
index 000000000..db6ebe8a3
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/WindMHRulesets.py
@@ -0,0 +1,113 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import random
+import numpy as np
+import datetime
+
+def MH_config(BIM):
+ """
+ Rules to identify a HAZUS WSF configuration based on BIM data
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ config: str
+ A string that identifies a specific configration within this buidling
+ class.
+ """
+
+ year = BIM['YearBuilt'] # just for the sake of brevity
+ if year <= 1976:
+ # MHPHUD
+ bldg_tag = 'MH.PHUD'
+ if BIM['WindBorneDebris']:
+ shutters = random.random() < 0.45
+ else:
+ shutters = False
+ # TieDowns
+ TD = random.random() < 0.45
+
+ elif year <= 1994:
+ # MH76HUD
+ bldg_tag = 'MH.76HUD'
+ if BIM['WindBorneDebris']:
+ shutters = random.random() < 0.45
+ else:
+ shutters = False
+ # TieDowns
+ TD = random.random() < 0.45
+
+ else:
+ # MH94HUD I, II, III
+ if BIM['V_ult'] >= 100.0:
+ shutters = True
+ else:
+ shutters = False
+ # TieDowns
+ if BIM['V_ult'] >= 70.0:
+ TD = True
+ else:
+ TD = False
+
+ bldg_tag = 'MH.94HUD' + BIM['WindZone']
+
+ # extend the BIM dictionary
+ BIM.update(dict(
+ TieDowns = TD,
+ Shutters = shutters,
+ ))
+
+ bldg_config = f"{bldg_tag}." \
+ f"{int(shutters)}." \
+ f"{int(TD)}." \
+ f"{int(BIM['TerrainRoughness'])}"
+
+ return bldg_config
+
diff --git a/pelicun/tests/dl_calculation/rulesets/WindMLRIRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMLRIRulesets.py
new file mode 100644
index 000000000..09b833976
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/WindMLRIRulesets.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import random
+import numpy as np
+import datetime
+
+def MLRI_config(BIM):
+ """
+ Rules to identify a HAZUS MLRI configuration based on BIM data
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ config: str
+ A string that identifies a specific configration within this buidling
+ class.
+ """
+
+ year = BIM['YearBuilt'] # just for the sake of brevity
+
+ # MR
+ MR = True
+
+ # Shutters
+ shutters = False
+
+ # Metal RDA
+ # 1507.2.8.1 High Wind Attachment.
+ # Underlayment applied in areas subject to high winds (Vasd greater
+ # than 110 mph as determined in accordance with Section 1609.3.1) shall
+ # be applied with corrosion-resistant fasteners in accordance with
+ # the manufacturer’s instructions. Fasteners are to be applied along
+ # the overlap not more than 36 inches on center.
+ if BIM['V_ult'] > 142:
+ MRDA = 'std' # standard
+ else:
+ MRDA = 'sup' # superior
+
+ if BIM['RoofShape'] in ['gab', 'hip']:
+ roof_cover = 'null'
+ roof_quality = 'god' # default supported by HAZUS
+ else:
+ if year >= 1975:
+ roof_cover = 'spm'
+ if BIM['YearBuilt'] >= (datetime.datetime.now().year - 35):
+ roof_quality = 'god'
+ else:
+ roof_quality = 'por'
+ else:
+ # year < 1975
+ roof_cover = 'bur'
+ if BIM['YearBuilt'] >= (datetime.datetime.now().year - 30):
+ roof_quality = 'god'
+ else:
+ roof_quality = 'por'
+
+ # extend the BIM dictionary
+ BIM.update(dict(
+ RoofCover = roof_cover,
+ RoofQuality = roof_quality,
+ RoofDeckAttachmentM = MRDA,
+ Shutters = shutters,
+ MasonryReinforcing = MR,
+ ))
+
+ bldg_config = f"M.LRI." \
+ f"{int(shutters)}." \
+ f"{int(MR)}." \
+ f"{roof_quality}." \
+ f"{MRDA}." \
+ f"{int(BIM['TerrainRoughness'])}"
+
+ return bldg_config
+
diff --git a/pelicun/tests/dl_calculation/rulesets/WindMLRMRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMLRMRulesets.py
new file mode 100644
index 000000000..c63f39313
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/WindMLRMRulesets.py
@@ -0,0 +1,246 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import random
+import numpy as np
+import datetime
+
+def MLRM_config(BIM):
+ """
+ Rules to identify a HAZUS MLRM configuration based on BIM data
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ config: str
+ A string that identifies a specific configration within this buidling
+ class.
+ """
+
+ year = BIM['YearBuilt'] # just for the sake of brevity
+
+ # Note the only roof option for commercial masonry in NJ appraisers manual
+ # is OSWJ, so this suggests they do not even see alternate roof system
+ # ref: Custom Inventory google spreadsheet H-37 10/01/20
+ # This could be commented for other regions if detailed data are available
+ BIM['RoofSystem'] = 'ows'
+
+ # Roof cover
+ # Roof cover does not apply to gable and hip roofs
+ if year >= 1975:
+ roof_cover = 'spm'
+ else:
+ # year < 1975
+ roof_cover = 'bur'
+
+ # Shutters
+ # IRC 2000-2015:
+ # R301.2.1.2 in NJ IRC 2015 says protection of openings required for
+ # buildings located in WindBorneDebris regions, mentions impact-rated protection for
+ # glazing, impact-resistance for garage door glazed openings, and finally
+ # states that wood structural panels with a thickness > 7/16" and a
+ # span <8' can be used, as long as they are precut, attached to the framing
+ # surrounding the opening, and the attachments are resistant to corrosion
+ # and are able to resist component and cladding loads;
+ # Earlier IRC editions provide similar rules.
+ shutters = BIM['WindBorneDebris']
+
+ # Masonry Reinforcing (MR)
+ # R606.6.4.1.2 Metal Reinforcement states that walls other than interior
+ # non-load-bearing walls shall be anchored at vertical intervals of not
+ # more than 8 inches with joint reinforcement of not less than 9 gage.
+ # Therefore this ruleset assumes that all exterior or load-bearing masonry
+ # walls will have reinforcement. Since our considerations deal with wind
+ # speed, I made the assumption that only exterior walls are being taken
+ # into consideration.
+ MR = True
+
+ # Wind Debris (widd in HAZSU)
+ # HAZUS A: Res/Comm, B: Varies by direction, C: Residential, D: None
+ WIDD = 'C' # residential (default)
+ if BIM['OccupancyClass'] in ['RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C',
+ 'RES3D']:
+ WIDD = 'C' # residential
+ elif BIM['OccupancyClass'] == 'AGR1':
+ WIDD = 'D' # None
+ else:
+ WIDD = 'A' # Res/Comm
+
+ if BIM['RoofSystem'] == 'ows':
+ # RDA
+ RDA = 'null' # Doesn't apply to OWSJ
+
+ # Roof deck age (DQ)
+ # Average lifespan of a steel joist roof is roughly 50 years according
+ # to the source below. Therefore, if constructed 50 years before the
+ # current year, the roof deck should be considered old.
+ # https://www.metalroofing.systems/metal-roofing-pros-cons/
+ if year >= (datetime.datetime.now().year - 50):
+ DQ = 'god' # new or average
+ else:
+ DQ = 'por' # old
+
+ # RWC
+ RWC = 'null' # Doesn't apply to OWSJ
+
+ # Metal RDA
+ # 1507.2.8.1 High Wind Attachment.
+ # Underlayment applied in areas subject to high winds (Vasd greater
+ # than 110 mph as determined in accordance with Section 1609.3.1) shall
+ # be applied with corrosion-resistant fasteners in accordance with
+ # the manufacturer’s instructions. Fasteners are to be applied along
+ # the overlap not more than 36 inches on center.
+ if BIM['V_ult'] > 142:
+ MRDA = 'std' # standard
+ else:
+ MRDA = 'sup' # superior
+
+ elif BIM['RoofSystem'] == 'trs':
+ # This clause should not be activated for NJ
+ # RDA
+ if BIM['TerrainRoughness'] >= 35: # suburban or light trees
+ if BIM['V_ult'] > 130.0:
+ RDA = '8s' # 8d @ 6"/6" 'D'
+ else:
+ RDA = '8d' # 8d @ 6"/12" 'B'
+ else: # light suburban or open
+ if BIM['V_ult'] > 110.0:
+ RDA = '8s' # 8d @ 6"/6" 'D'
+ else:
+ RDA = '8d' # 8d @ 6"/12" 'B'
+
+ # Metal RDA
+ MRDA = 'null' # Doesn't apply to Wood Truss
+
+ # Roof deck agea (DQ)
+ DQ = 'null' # Doesn't apply to Wood Truss
+
+ # RWC
+ if BIM['V_ult'] > 110:
+ RWC = 'strap' # Strap
+ else:
+ RWC = 'tnail' # Toe-nail
+
+ # shutters
+ if year >= 2000:
+ shutters = BIM['WindBorneDebris']
+ else:
+ if BIM['WindBorneDebris']:
+ shutters = random.random() < 0.46
+ else:
+ shutters = False
+
+ if BIM['MeanRoofHt'] < 15.0:
+ # extend the BIM dictionary
+ BIM.update(dict(
+ RoofCover = roof_cover,
+ RoofDeckAttachmentW = RDA,
+ RoofDeckAttachmentM = MRDA,
+ RoofDeckAge = DQ,
+ RoofToWallConnection = RWC,
+ Shutters = shutters,
+ MasonryReinforcing = MR,
+ WindowAreaRatio = WIDD
+ ))
+
+ # if it's MLRM1, configure outputs
+ bldg_config = f"M.LRM.1." \
+ f"{roof_cover}." \
+ f"{int(shutters)}." \
+ f"{int(MR)}." \
+ f"{WIDD}." \
+ f"{BIM['RoofSystem']}." \
+ f"{RDA}." \
+ f"{RWC}." \
+ f"{DQ}." \
+ f"{MRDA}." \
+ f"{int(BIM['TerrainRoughness'])}"
+
+ else:
+ unit_tag = 'null'
+ # MLRM2 needs more rulesets
+
+ if BIM['RoofSystem'] == 'trs':
+ joist_spacing = 'null'
+ elif BIM['RoofSystem'] == 'ows':
+ if BIM['NumberOfUnits'] == 1:
+ joist_spacing = 'null'
+ unit_tag = 'sgl'
+ else:
+ joist_spacing = 4
+ unit_tag = 'mlt'
+
+ # extend the BIM dictionary
+ BIM.update(dict(
+ RoofCover = roof_cover,
+ RoofDeckAttachmentW = RDA,
+ RoofDeckAttachmentM = MRDA,
+ RoofDeckAge = DQ,
+ RoofToWallConnection = RWC,
+ Shutters = shutters,
+ MasonryReinforcing = MR,
+ WindDebrisClass = WIDD,
+ UnitType=unit_tag
+ ))
+
+ bldg_config = f"M.LRM.2." \
+ f"{roof_cover}." \
+ f"{int(shutters)}." \
+ f"{int(MR)}." \
+ f"{WIDD}." \
+ f"{BIM['RoofSystem']}." \
+ f"{RDA}." \
+ f"{RWC}." \
+ f"{DQ}." \
+ f"{MRDA}." \
+ f"{unit_tag}." \
+ f"{joist_spacing}." \
+ f"{int(BIM['TerrainRoughness'])}"
+
+ return bldg_config
\ No newline at end of file
diff --git a/pelicun/tests/dl_calculation/rulesets/WindMMUHRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMMUHRulesets.py
new file mode 100644
index 000000000..3d27cbe09
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/WindMMUHRulesets.py
@@ -0,0 +1,209 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import random
+import datetime
+
+def MMUH_config(BIM):
+ """
+ Rules to identify a HAZUS MMUH configuration based on BIM data
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ config: str
+ A string that identifies a specific configration within this buidling
+ class.
+ """
+
+ year = BIM['YearBuilt'] # just for the sake of brevity
+
+ # Secondary Water Resistance (SWR)
+ # Minimum drainage recommendations are in place in NJ (See below).
+ # However, SWR indicates a code-plus practice.
+
+ SWR = "null" # Default
+ if BIM['RoofShape'] == 'flt':
+ SWR = 'null'
+ elif BIM['RoofShape'] in ['hip', 'gab']:
+ SWR = int(random.random() < 0.6)
+
+ # Roof cover & Roof quality
+ # Roof cover and quality do not apply to gable and hip roofs
+ if BIM['RoofShape'] in ['gab', 'hip']:
+ roof_cover = 'null'
+ roof_quality = 'null'
+
+ # NJ Building Code Section 1507 (in particular 1507.10 and 1507.12) address
+ # Built Up Roofs and Single Ply Membranes. However, the NJ Building Code
+ # only addresses installation and material standards of different roof
+ # covers, but not in what circumstance each must be used.
+ # SPMs started being used in the 1960s, but different types continued to be
+ # developed through the 1980s. Today, single ply membrane roofing is the
+ # most popular flat roof option. BURs have been used for over 100 years,
+ # and although they are still used today, they are used less than SPMs.
+ # Since there is no available ruleset to be taken from the NJ Building
+ # Code, the ruleset is based off this information.
+ # We assume that all flat roofs built before 1975 are BURs and all roofs
+ # built after 1975 are SPMs.
+ # Nothing in NJ Building Code or in the Hazus manual specifies what
+ # constitutes “good” and “poor” roof conditions, so ruleset is dependant
+ # on the age of the roof and average lifespan of BUR and SPM roofs.
+ # We assume that the average lifespan of a BUR roof is 30 years and the
+ # average lifespan of a SPM is 35 years. Therefore, BURs installed before
+ # 1990 are in poor condition, and SPMs installed before 1985 are in poor
+ # condition.
+ else:
+ if year >= 1975:
+ roof_cover = 'spm'
+ if BIM['YearBuilt'] >= (datetime.datetime.now().year - 35):
+ roof_quality = 'god'
+ else:
+ roof_quality = 'por'
+ else:
+ # year < 1975
+ roof_cover = 'bur'
+ if BIM['YearBuilt'] >= (datetime.datetime.now().year - 30):
+ roof_quality = 'god'
+ else:
+ roof_quality = 'por'
+
+ # Roof Deck Attachment (RDA)
+ # IRC 2009-2015:
+ # Requires 8d nails (with spacing 6”/12”) for sheathing thicknesses between
+ # ⅜”-1”, see Table 2304.10, Line 31. Fastener selection is contingent on
+ # thickness of sheathing in building codes.
+ # Wind Speed Considerations taken from Table 2304.6.1, Maximum Nominal
+ # Design Wind Speed, Vasd, Permitted For Wood Structural Panel Wall
+ # Sheathing Used to Resist Wind Pressures. Typical wall stud spacing is 16
+ # inches, according to table 2304.6.3(4). NJ code defines this with respect
+ # to exposures B and C only. These are mapped to HAZUS categories based on
+ # roughness length in the ruleset herein.
+ # The base rule was then extended to the exposures closest to suburban and
+ # light suburban, even though these are not considered by the code.
+ if BIM['TerrainRoughness'] >= 35: # suburban or light trees
+ if BIM['V_ult'] > 130.0:
+ RDA = '8s' # 8d @ 6"/6" 'D'
+ else:
+ RDA = '8d' # 8d @ 6"/12" 'B'
+ else: # light suburban or open
+ if BIM['V_ult'] > 110.0:
+ RDA = '8s' # 8d @ 6"/6" 'D'
+ else:
+ RDA = '8d' # 8d @ 6"/12" 'B'
+
+ # Roof-Wall Connection (RWC)
+ if BIM['V_ult'] > 110.0:
+ RWC = 'strap' # Strap
+ else:
+ RWC = 'tnail' # Toe-nail
+
+ # Shutters
+ # IRC 2000-2015:
+ # R301.2.1.2 in NJ IRC 2015 says protection of openings required for
+ # buildings located in WindBorneDebris regions, mentions impact-rated protection for
+ # glazing, impact-resistance for garage door glazed openings, and finally
+ # states that wood structural panels with a thickness > 7/16" and a
+ # span <8' can be used, as long as they are precut, attached to the framing
+ # surrounding the opening, and the attachments are resistant to corrosion
+ # and are able to resist component and cladding loads;
+ # Earlier IRC editions provide similar rules.
+ if year >= 2000:
+ shutters = BIM['WindBorneDebris']
+ # BOCA 1996 and earlier:
+ # Shutters were not required by code until the 2000 IBC. Before 2000, the
+ # percentage of commercial buildings that have shutters is assumed to be
+ # 46%. This value is based on a study on preparedness of small businesses
+ # for hurricane disasters, which says that in Sarasota County, 46% of
+ # business owners had taken action to wind-proof or flood-proof their
+ # facilities. In addition to that, 46% of business owners reported boarding
+ # up their businesses before Hurricane Katrina. In addition, compliance
+ # rates based on the Homeowners Survey data hover between 43 and 50 percent.
+ else:
+ if BIM['WindBorneDebris']:
+ shutters = random.random() < 0.46
+ else:
+ shutters = False
+
+ # Masonry Reinforcing (MR)
+ # R606.6.4.1.2 Metal Reinforcement states that walls other than interior
+ # non-load-bearing walls shall be anchored at vertical intervals of not
+ # more than 8 inches with joint reinforcement of not less than 9 gage.
+ # Therefore this ruleset assumes that all exterior or load-bearing masonry
+ # walls will have reinforcement. Since our considerations deal with wind
+ # speed, I made the assumption that only exterior walls are being taken
+ # into consideration.
+ MR = True
+
+ stories = min(BIM['NumberOfStories'], 3)
+
+ # extend the BIM dictionary
+ BIM.update(dict(
+ SecondaryWaterResistance = SWR,
+ RoofCover = roof_cover,
+ RoofQuality = roof_quality,
+ RoofDeckAttachmentW = RDA,
+ RoofToWallConnection = RWC,
+ Shutters = shutters,
+ MasonryReinforcing = MR,
+ ))
+
+ bldg_config = f"M.MUH." \
+ f"{int(stories)}." \
+ f"{BIM['RoofShape']}." \
+ f"{int(SWR)}." \
+ f"{roof_cover}." \
+ f"{roof_quality}." \
+ f"{RDA}." \
+ f"{RWC}." \
+ f"{int(shutters)}." \
+ f"{int(MR)}." \
+ f"{int(BIM['TerrainRoughness'])}"
+
+ return bldg_config
diff --git a/pelicun/tests/dl_calculation/rulesets/WindMSFRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMSFRulesets.py
new file mode 100644
index 000000000..a9878d9de
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/WindMSFRulesets.py
@@ -0,0 +1,262 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import random
+import datetime
+
+def MSF_config(BIM):
+ """
+ Rules to identify a HAZUS MSF configuration based on BIM data
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ config: str
+ A string that identifies a specific configration within this buidling
+ class.
+ """
+
+ year = BIM['YearBuilt'] # just for the sake of brevity
+
+ # Roof-Wall Connection (RWC)
+ if BIM['HazardProneRegion']:
+ RWC = 'strap' # Strap
+ else:
+ RWC = 'tnail' # Toe-nail
+
+ # Roof Frame Type
+ RFT = BIM['RoofSystem']
+
+ # Story Flag
+ stories = min(BIM['NumberOfStories'], 2)
+
+ # Shutters
+ # IRC 2000-2015:
+ # R301.2.1.2 in NJ IRC 2015 says protection of openings required for
+ # buildings located in WindBorneDebris regions, mentions impact-rated protection for
+ # glazing, impact-resistance for garage door glazed openings, and finally
+ # states that wood structural panels with a thickness > 7/16" and a
+ # span <8' can be used, as long as they are precut, attached to the framing
+ # surrounding the opening, and the attachments are resistant to corrosion
+ # and are able to resist component and cladding loads;
+ # Earlier IRC editions provide similar rules.
+ if year >= 2000:
+ shutters = BIM['WindBorneDebris']
+ # BOCA 1996 and earlier:
+ # Shutters were not required by code until the 2000 IBC. Before 2000, the
+ # percentage of commercial buildings that have shutters is assumed to be
+ # 46%. This value is based on a study on preparedness of small businesses
+ # for hurricane disasters, which says that in Sarasota County, 46% of
+ # business owners had taken action to wind-proof or flood-proof their
+ # facilities. In addition to that, 46% of business owners reported boarding
+ # up their businesses before Hurricane Katrina. In addition, compliance
+ # rates based on the Homeowners Survey data hover between 43 and 50 percent.
+ else:
+ if BIM['WindBorneDebris']:
+ shutters = random.random() < 0.45
+ else:
+ shutters = False
+
+
+ if BIM['RoofSystem'] == 'trs':
+
+ # Roof Deck Attachment (RDA)
+ # IRC codes:
+ # NJ code requires 8d nails (with spacing 6”/12”) for sheathing thicknesses
+ # between ⅜”-1” - see Table R602.3(1)
+ # Fastener selection is contingent on thickness of sheathing in building
+ # codes. Commentary for Table R602.3(1) indicates 8d nails with 6”/6”
+ # spacing (enhanced roof spacing) for ultimate wind speeds greater than
+ # a speed_lim. speed_lim depends on the year of construction
+ RDA = '6d' # Default (aka A) in Reorganized Rulesets - WIND
+ if year >= 2016:
+ # IRC 2015
+ speed_lim = 130.0 # mph
+ else:
+ # IRC 2000 - 2009
+ speed_lim = 100.0 # mph
+ if BIM['V_ult'] > speed_lim:
+ RDA = '8s' # 8d @ 6"/6" ('D' in the Reorganized Rulesets - WIND)
+ else:
+ RDA = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND)
+
+ # Secondary Water Resistance (SWR)
+ # Minimum drainage recommendations are in place in NJ (See below).
+ # However, SWR indicates a code-plus practice.
+ SWR = random.random() < 0.6
+
+ # Garage
+ # As per IRC 2015:
+ # Garage door glazed opening protection for windborne debris shall meet the
+ # requirements of an approved impact-resisting standard or ANSI/DASMA 115.
+ # Exception: Wood structural panels with a thickness of not less than 7/16
+ # inch and a span of not more than 8 feet shall be permitted for opening
+ # protection. Panels shall be predrilled as required for the anchorage
+ # method and shall be secured with the attachment hardware provided.
+ # Permitted for buildings where the ultimate design wind speed is 180 mph
+ # or less.
+ #
+ # Average lifespan of a garage is 30 years, so garages that are not in WBD
+ # (and therefore do not have any strength requirements) that are older than
+ # 30 years are considered to be weak, whereas those from the last 30 years
+ # are considered to be standard.
+ if BIM['Garage'] == -1:
+ # no garage data, using the default "none"
+ garage = 'no'
+ else:
+ if year > (datetime.datetime.now().year - 30):
+ if BIM['Garage'] < 1:
+ garage = 'no' # None
+ else:
+ if shutters:
+ garage = 'sup' # SFBC 1994
+ else:
+ garage = 'std' # Standard
+ else:
+ # year <= current year - 30
+ if BIM['Garage'] < 1:
+ garage = 'no' # None
+ else:
+ if shutters:
+ garage = 'sup'
+ else:
+ garage = 'wkd' # Weak
+
+ # Masonry Reinforcing (MR)
+ # R606.6.4.1.2 Metal Reinforcement states that walls other than interior
+ # non-load-bearing walls shall be anchored at vertical intervals of not
+ # more than 8 inches with joint reinforcement of not less than 9 gage.
+ # Therefore this ruleset assumes that all exterior or load-bearing masonry
+ # walls will have reinforcement. Since our considerations deal with wind
+ # speed, I made the assumption that only exterior walls are being taken
+ # into consideration.
+ MR = True
+
+ stories = min(BIM['NumberOfStories'], 2)
+
+ # extend the BIM dictionary
+ BIM.update(dict(
+ SecondaryWaterResistance = SWR,
+ RoofDeckAttachmentW = RDA,
+ RoofToWallConnection = RWC,
+ Shutters = shutters,
+ AugmentGarage = garage,
+ MasonryReinforcing = MR,
+ ))
+
+ bldg_config = f"M.SF." \
+ f"{int(stories)}." \
+ f"{BIM['RoofShape']}." \
+ f"{RWC}." \
+ f"{RFT}." \
+ f"{RDA}." \
+ f"{int(shutters)}." \
+ f"{int(SWR)}." \
+ f"{garage}." \
+ f"{int(MR)}." \
+ f"null." \
+ f"{int(BIM['TerrainRoughness'])}"
+
+ else:
+ # Roof system = OSJW
+ # r
+ # A 2015 study found that there were 750,000 metal roof installed in 2015,
+ # out of 5 million new roofs in the US annually. If these numbers stay
+ # relatively stable, that implies that roughtly 15% of roofs are smlt.
+ # ref. link: https://www.bdcnetwork.com/blog/metal-roofs-are-soaring-
+ # popularity-residential-marmet
+ roof_cover_options = ['smtl', 'cshl']
+ roof_cover = roof_cover_options[int(random.random() < 0.85)]
+
+ # Roof Deck Attachment (RDA)
+ # NJ IBC 1507.2.8.1 (for cshl)
+ # high wind attachments are required for DSWII > 142 mph
+ # NJ IBC 1507.4.5 (for smtl)
+ # high wind attachment are required for DSWII > 142 mph
+ if BIM['V_ult'] > 142.0:
+ RDA = 'sup' # superior
+ else:
+ RDA = 'std' # standard
+
+ # Secondary Water Resistance (SWR)
+ # Minimum drainage recommendations are in place in NJ (See below).
+ # However, SWR indicates a code-plus practice.
+ SWR = 'null' # Default
+ if BIM['RoofShape'] == 'flt':
+ SWR = int(True)
+ elif ((BIM['RoofShape'] in ['hip', 'gab']) and
+ (roof_cover=='cshl') and (RDA=='sup')):
+ SWR = int(random.random() < 0.6)
+
+ stories = min(BIM['NumberOfStories'], 2)
+
+ # extend the BIM dictionary
+ BIM.update(dict(
+ SecondaryWaterResistance = SWR,
+ RoofDeckAttachmentW = RDA,
+ RoofToWallConnection = RWC,
+ Shutters = shutters,
+ AugmentGarage = garage,
+ MasonryReinforcing = MR,
+ ))
+
+ bldg_config = f"M.SF." \
+ f"{int(stories)}." \
+ f"{BIM['RoofShape']}." \
+ f"{RWC}." \
+ f"{RFT}." \
+ f"{RDA}." \
+ f"{int(shutters)}." \
+ f"{SWR}." \
+ f"null." \
+ f"null." \
+ f"{roof_cover}." \
+ f"{int(BIM['TerrainRoughness'])}"
+
+ return bldg_config
diff --git a/pelicun/tests/dl_calculation/rulesets/WindMetaVarRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindMetaVarRulesets.py
new file mode 100644
index 000000000..baf5108d8
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/WindMetaVarRulesets.py
@@ -0,0 +1,465 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import random
+import numpy as np
+import pandas as pd
+import datetime
+import math
+
+def parse_BIM(BIM_in, location, hazards):
+ """
+ Parses the information provided in the AIM model.
+
+ The parameters below list the expected inputs
+
+ Parameters
+ ----------
+ stories: str
+ Number of stories
+ yearBuilt: str
+ Year of construction.
+ roofType: {'hip', 'hipped', 'gabled', 'gable', 'flat'}
+ One of the listed roof shapes that best describes the building.
+ occupancy: str
+ Occupancy type.
+ buildingDescription: str
+ MODIV code that provides additional details about the building
+ structType: {'Stucco', 'Frame', 'Stone', 'Brick'}
+ One of the listed structure types that best describes the building.
+ V_design: string
+ Ultimate Design Wind Speed was introduced in the 2012 IBC. Officially
+ called “Ultimate Design Wind Speed (Vult); equivalent to the design
+ wind speeds taken from hazard maps in ASCE 7 or ATC's API. Unit is
+ assumed to be mph.
+ area: float
+ Plan area in ft2.
+ z0: string
+ Roughness length that characterizes the surroundings.
+
+ Returns
+ -------
+ BIM_ap: dictionary
+ Parsed building characteristics.
+ """
+
+ # check location
+ if location not in ['LA', 'NJ']:
+ print(f'WARNING: The provided location is not recognized: {location}')
+
+ # check hazard
+ for hazard in hazards:
+ if hazard not in ['wind', 'inundation']:
+ print(f'WARNING: The provided hazard is not recognized: {hazard}')
+
+ # initialize the BIM dict
+ BIM_ap = BIM_in.copy()
+
+ if 'wind' in hazards:
+
+ # maps roof type to the internal representation
+ ap_RoofType = {
+ 'hip' : 'hip',
+ 'hipped': 'hip',
+ 'Hip' : 'hip',
+ 'gabled': 'gab',
+ 'gable' : 'gab',
+ 'Gable' : 'gab',
+ 'flat' : 'flt',
+ 'Flat' : 'flt'
+ }
+ # maps roof system to the internal representation
+ ap_RoofSyste = {
+ 'Wood': 'trs',
+ 'OWSJ': 'ows',
+ 'N/A': 'trs'
+ }
+ roof_system = BIM_in.get('RoofSystem','Wood')
+ if pd.isna(roof_system):
+ roof_system = 'Wood'
+
+ # maps number of units to the internal representation
+ ap_NoUnits = {
+ 'Single': 'sgl',
+ 'Multiple': 'mlt',
+ 'Multi': 'mlt',
+ 'nav': 'nav'
+ }
+
+ # maps for design level (Marginal Engineered is mapped to Engineered as default)
+ ap_DesignLevel = {
+ 'E': 'E',
+ 'NE': 'NE',
+ 'PE': 'PE',
+ 'ME': 'E'
+ }
+ design_level = BIM_in.get('DesignLevel','E')
+ if pd.isna(design_level):
+ design_level = 'E'
+
+ # Average January Temp.
+ ap_ajt = {
+ 'Above': 'above',
+ 'Below': 'below'
+ }
+
+ # Year built
+ alname_yearbuilt = ['YearBuiltNJDEP', 'yearBuilt', 'YearBuiltMODIV']
+ yearbuilt = None
+ try:
+ yearbuilt = BIM_in['YearBuilt']
+ except:
+ for i in alname_yearbuilt:
+ if i in BIM_in.keys():
+ yearbuilt = BIM_in[i]
+ break
+
+ # if none of the above works, set a default
+ if yearbuilt is None:
+ yearbuilt = 1985
+
+ # Number of Stories
+ alname_nstories = ['stories', 'NumberofStories0', 'NumberofStories', 'NumberofStories1']
+ nstories = None
+ try:
+ nstories = BIM_in['NumberOfStories']
+ except Exception as e:
+ for i in alname_nstories:
+ if i in BIM_in.keys():
+ nstories = BIM_in[i]
+ break
+
+ # if none of the above works, we need to raise an exception
+ if nstories is None:
+ raise e from None
+
+ # Plan Area
+ alname_area = ['area', 'PlanArea1', 'Area', 'PlanArea0']
+ area = None
+ try:
+ area = BIM_in['PlanArea']
+ except Exception as e:
+ for i in alname_area:
+ if i in BIM_in.keys():
+ area = BIM_in[i]
+ break
+
+ # if none of the above works, we need to raise an exception
+ if area is None:
+ raise e from None
+
+ # Design Wind Speed
+ alname_dws = ['DSWII', 'DWSII', 'DesignWindSpeed']
+
+ dws = BIM_in.get('DesignWindSpeed', None)
+ if dws is None:
+ for alname in alname_dws:
+ if alname in BIM_in.keys():
+ dws = BIM_in[alname]
+ break
+
+
+ alname_occupancy = ['occupancy']
+ oc = None
+ try:
+ oc = BIM_in['OccupancyClass']
+ except Exception as e:
+ for i in alname_occupancy:
+ if i in BIM_in.keys():
+ oc = BIM_in[i]
+ break
+
+ # if none of the above works, we need to raise an exception
+ if oc is None:
+ raise e from None
+
+ # if getting RES3 then converting it to default RES3A
+ if oc == 'RES3':
+ oc = 'RES3A'
+
+ # maps for flood zone
+ ap_FloodZone = {
+ # Coastal areas with a 1% or greater chance of flooding and an
+ # additional hazard associated with storm waves.
+ 6101: 'VE',
+ 6102: 'VE',
+ 6103: 'AE',
+ 6104: 'AE',
+ 6105: 'AO',
+ 6106: 'AE',
+ 6107: 'AH',
+ 6108: 'AO',
+ 6109: 'A',
+ 6110: 'X',
+ 6111: 'X',
+ 6112: 'X',
+ 6113: 'OW',
+ 6114: 'D',
+ 6115: 'NA',
+ 6119: 'NA'
+ }
+ if type(BIM_in['FloodZone']) == int:
+ # NJDEP code for flood zone (conversion to the FEMA designations)
+ floodzone_fema = ap_FloodZone[BIM_in['FloodZone']]
+ else:
+ # standard input should follow the FEMA flood zone designations
+ floodzone_fema = BIM_in['FloodZone']
+
+ # maps for BuildingType
+ ap_BuildingType_NJ = {
+ # Coastal areas with a 1% or greater chance of flooding and an
+ # additional hazard associated with storm waves.
+ 3001: 'Wood',
+ 3002: 'Steel',
+ 3003: 'Concrete',
+ 3004: 'Masonry',
+ 3005: 'Manufactured',
+ }
+ if location == 'NJ':
+ # NJDEP code for flood zone needs to be converted
+ buildingtype = ap_BuildingType_NJ[BIM_in['BuildingType']]
+ elif location == 'LA':
+ # standard input should provide the building type as a string
+ buildingtype = BIM_in['BuildingType']
+
+ # first, pull in the provided data
+ BIM_ap.update(dict(
+ OccupancyClass=str(oc),
+ BuildingType=buildingtype,
+ YearBuilt=int(yearbuilt),
+ # double check with Tracy for format - (NumberStories0 is 4-digit code)
+ # (NumberStories1 is image-processed story number)
+ NumberOfStories=int(nstories),
+ PlanArea=float(area),
+ FloodZone=floodzone_fema,
+ V_ult=float(dws),
+ AvgJanTemp=ap_ajt[BIM_in.get('AvgJanTemp','Below')],
+ RoofShape=ap_RoofType[BIM_in['RoofShape']],
+ RoofSlope=float(BIM_in.get('RoofSlope',0.25)), # default 0.25
+ SheathingThickness=float(BIM_in.get('SheathingThick',1.0)), # default 1.0
+ RoofSystem=str(ap_RoofSyste[roof_system]), # only valid for masonry structures
+ Garage=float(BIM_in.get('Garage',-1.0)),
+ LULC=BIM_in.get('LULC',-1),
+ z0 = float(BIM_in.get('z0',-1)), # if the z0 is already in the input file
+ Terrain = BIM_in.get('Terrain',-1),
+ MeanRoofHt=float(BIM_in.get('MeanRoofHt',15.0)), # default 15
+ DesignLevel=str(ap_DesignLevel[design_level]), # default engineered
+ WindowArea=float(BIM_in.get('WindowArea',0.20)),
+ WoodZone=str(BIM_in.get('WindZone', 'I'))
+ ))
+
+ if 'inundation' in hazards:
+
+ # maps for split level
+ ap_SplitLevel = {
+ 'NO': 0,
+ 'YES': 1
+ }
+
+ foundation = BIM_in.get('FoundationType',3501)
+ if pd.isna(foundation):
+ foundation = 3501
+
+ nunits = BIM_in.get('NoUnits',1)
+ if pd.isna(nunits):
+ nunits = 1
+
+ # maps for flood zone
+ ap_FloodZone = {
+ # Coastal areas with a 1% or greater chance of flooding and an
+ # additional hazard associated with storm waves.
+ 6101: 'VE',
+ 6102: 'VE',
+ 6103: 'AE',
+ 6104: 'AE',
+ 6105: 'AO',
+ 6106: 'AE',
+ 6107: 'AH',
+ 6108: 'AO',
+ 6109: 'A',
+ 6110: 'X',
+ 6111: 'X',
+ 6112: 'X',
+ 6113: 'OW',
+ 6114: 'D',
+ 6115: 'NA',
+ 6119: 'NA'
+ }
+ if type(BIM_in['FloodZone']) == int:
+ # NJDEP code for flood zone (conversion to the FEMA designations)
+ floodzone_fema = ap_FloodZone[BIM_in['FloodZone']]
+ else:
+ # standard input should follow the FEMA flood zone designations
+ floodzone_fema = BIM_in['FloodZone']
+
+ # add the parsed data to the BIM dict
+ BIM_ap.update(dict(
+ DesignLevel=str(ap_DesignLevel[design_level]), # default engineered
+ NumberOfUnits=int(nunits),
+ FirstFloorElevation=float(BIM_in.get('FirstFloorHt1',10.0)),
+ SplitLevel=bool(ap_SplitLevel[BIM_in.get('SplitLevel','NO')]), # dfault: no
+ FoundationType=int(foundation), # default: pile
+ City=BIM_in.get('City','NA'),
+ FloodZone =str(floodzone_fema)
+ ))
+
+ # add inferred, generic meta-variables
+
+ if 'wind' in hazards:
+
+ # Hurricane-Prone Region (HRP)
+ # Areas vulnerable to hurricane, defined as the U.S. Atlantic Ocean and
+ # Gulf of Mexico coasts where the ultimate design wind speed, V_ult is
+ # greater than a pre-defined limit.
+ if BIM_ap['YearBuilt'] >= 2016:
+ # The limit is 115 mph in IRC 2015
+ HPR = BIM_ap['V_ult'] > 115.0
+ else:
+ # The limit is 90 mph in IRC 2009 and earlier versions
+ HPR = BIM_ap['V_ult'] > 90.0
+
+ # Wind Borne Debris
+ # Areas within hurricane-prone regions are affected by debris if one of
+ # the following two conditions holds:
+ # (1) Within 1 mile (1.61 km) of the coastal mean high water line where
+ # the ultimate design wind speed is greater than flood_lim.
+ # (2) In areas where the ultimate design wind speed is greater than
+ # general_lim
+ # The flood_lim and general_lim limits depend on the year of construction
+ if BIM_ap['YearBuilt'] >= 2016:
+ # In IRC 2015:
+ flood_lim = 130.0 # mph
+ general_lim = 140.0 # mph
+ else:
+ # In IRC 2009 and earlier versions
+ flood_lim = 110.0 # mph
+ general_lim = 120.0 # mph
+ # Areas within hurricane-prone regions located in accordance with
+ # one of the following:
+ # (1) Within 1 mile (1.61 km) of the coastal mean high water line
+ # where the ultimate design wind speed is 130 mph (58m/s) or greater.
+ # (2) In areas where the ultimate design wind speed is 140 mph (63.5m/s)
+ # or greater. (Definitions: Chapter 2, 2015 NJ Residential Code)
+ if not HPR:
+ WBD = False
+ else:
+ WBD = (((BIM_ap['FloodZone'].startswith('A') or BIM_ap['FloodZone'].startswith('V')) and
+ BIM_ap['V_ult'] >= flood_lim) or (BIM_ap['V_ult'] >= general_lim))
+
+ # Terrain
+ # open (0.03) = 3
+ # light suburban (0.15) = 15
+ # suburban (0.35) = 35
+ # light trees (0.70) = 70
+ # trees (1.00) = 100
+ # Mapped to Land Use Categories in NJ (see https://www.state.nj.us/dep/gis/
+ # digidownload/metadata/lulc02/anderson2002.html) by T. Wu group
+ # (see internal report on roughness calculations, Table 4).
+ # These are mapped to Hazus defintions as follows:
+ # Open Water (5400s) with zo=0.01 and barren land (7600) with zo=0.04 assume Open
+ # Open Space Developed, Low Intensity Developed, Medium Intensity Developed
+ # (1110-1140) assumed zo=0.35-0.4 assume Suburban
+ # High Intensity Developed (1600) with zo=0.6 assume Lt. Tree
+ # Forests of all classes (4100-4300) assumed zo=0.6 assume Lt. Tree
+ # Shrub (4400) with zo=0.06 assume Open
+ # Grasslands, pastures and agricultural areas (2000 series) with
+ # zo=0.1-0.15 assume Lt. Suburban
+ # Woody Wetlands (6250) with zo=0.3 assume suburban
+ # Emergent Herbaceous Wetlands (6240) with zo=0.03 assume Open
+ # Note: HAZUS category of trees (1.00) does not apply to any LU/LC in NJ
+ terrain = 15 # Default in Reorganized Rulesets - WIND
+ LULC = BIM_ap['LULC']
+ TER = BIM_ap['Terrain']
+ if (BIM_ap['z0'] > 0):
+ terrain = int(100 * BIM_ap['z0'])
+ elif (LULC > 0):
+ if (BIM_ap['FloodZone'].startswith('V') or BIM_ap['FloodZone'] in ['A', 'AE', 'A1-30', 'AR', 'A99']):
+ terrain = 3
+ elif ((LULC >= 5000) and (LULC <= 5999)):
+ terrain = 3 # Open
+ elif ((LULC == 4400) or (LULC == 6240)) or (LULC == 7600):
+ terrain = 3 # Open
+ elif ((LULC >= 2000) and (LULC <= 2999)):
+ terrain = 15 # Light suburban
+ elif ((LULC >= 1110) and (LULC <= 1140)) or ((LULC >= 6250) and (LULC <= 6252)):
+ terrain = 35 # Suburban
+ elif ((LULC >= 4100) and (LULC <= 4300)) or (LULC == 1600):
+ terrain = 70 # light trees
+ elif (TER > 0):
+ if (BIM_ap['FloodZone'].startswith('V') or BIM_ap['FloodZone'] in ['A', 'AE', 'A1-30', 'AR', 'A99']):
+ terrain = 3
+ elif ((TER >= 50) and (TER <= 59)):
+ terrain = 3 # Open
+ elif ((TER == 44) or (TER == 62)) or (TER == 76):
+ terrain = 3 # Open
+ elif ((TER >= 20) and (TER <= 29)):
+ terrain = 15 # Light suburban
+ elif (TER == 11) or (TER == 61):
+ terrain = 35 # Suburban
+ elif ((TER >= 41) and (TER <= 43)) or (TER in [16, 17]):
+ terrain = 70 # light trees
+
+ BIM_ap.update(dict(
+ # Nominal Design Wind Speed
+ # Former term was “Basic Wind Speed”; it is now the “Nominal Design
+ # Wind Speed (V_asd). Unit: mph."
+ V_asd = np.sqrt(0.6 * BIM_ap['V_ult']),
+
+ HazardProneRegion=HPR,
+ WindBorneDebris=WBD,
+ TerrainRoughness=terrain,
+ ))
+
+ if 'inundation' in hazards:
+
+ BIM_ap.update(dict(
+ # Flood Risk
+ # Properties in the High Water Zone (within 1 mile of the coast) are at
+ # risk of flooding and other wind-borne debris action.
+ FloodRisk=True, # TODO: need high water zone for this and move it to inputs!
+ ))
+
+ return BIM_ap
+
diff --git a/pelicun/tests/dl_calculation/rulesets/WindSECBRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindSECBRulesets.py
new file mode 100644
index 000000000..d07f63fdf
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/WindSECBRulesets.py
@@ -0,0 +1,151 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import random
+
+def SECB_config(BIM):
+ """
+ Rules to identify a HAZUS SECB configuration based on BIM data
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ config: str
+ A string that identifies a specific configration within this buidling
+ class.
+ """
+
+ year = BIM['YearBuilt'] # just for the sake of brevity
+
+ # Roof cover
+ if BIM['RoofShape'] in ['gab', 'hip']:
+ roof_cover = 'bur'
+ # Warning: HAZUS does not have N/A option for CECB, so here we use bur
+ else:
+ if year >= 1975:
+ roof_cover = 'spm'
+ else:
+ # year < 1975
+ roof_cover = 'bur'
+
+ # shutters
+ if year >= 2000:
+ shutters = BIM['WindBorneDebris']
+ # BOCA 1996 and earlier:
+ # Shutters were not required by code until the 2000 IBC. Before 2000, the
+ # percentage of commercial buildings that have shutters is assumed to be
+ # 46%. This value is based on a study on preparedness of small businesses
+ # for hurricane disasters, which says that in Sarasota County, 46% of
+ # business owners had taken action to wind-proof or flood-proof their
+ # facilities. In addition to that, 46% of business owners reported boarding
+ # up their businesses before Hurricane Katrina. In addition, compliance
+ # rates based on the Homeowners Survey data hover between 43 and 50 percent.
+ else:
+ if BIM['WindBorneDebris']:
+ shutters = random.random() < 0.46
+ else:
+ shutters = False
+
+ # Wind Debris (widd in HAZSU)
+ # HAZUS A: Res/Comm, B: Varies by direction, C: Residential, D: None
+ WIDD = 'C' # residential (default)
+ if BIM['OccupancyClass'] in ['RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C',
+ 'RES3D']:
+ WIDD = 'C' # residential
+ elif BIM['OccupancyClass'] == 'AGR1':
+ WIDD = 'D' # None
+ else:
+ WIDD = 'A' # Res/Comm
+
+ # Window area ratio
+ if BIM['WindowArea'] < 0.33:
+ WWR = 'low'
+ elif BIM['WindowArea'] < 0.5:
+ WWR = 'med'
+ else:
+ WWR = 'hig'
+
+ # Metal RDA
+ # 1507.2.8.1 High Wind Attachment.
+ # Underlayment applied in areas subject to high winds (Vasd greater
+ # than 110 mph as determined in accordance with Section 1609.3.1) shall
+ # be applied with corrosion-resistant fasteners in accordance with
+ # the manufacturer’s instructions. Fasteners are to be applied along
+ # the overlap not more than 36 inches on center.
+ if BIM['V_ult'] > 142:
+ MRDA = 'std' # standard
+ else:
+ MRDA = 'sup' # superior
+
+ if BIM['NumberOfStories'] <= 2:
+ bldg_tag = 'S.ECB.L'
+ elif BIM['NumberOfStories'] <= 5:
+ bldg_tag = 'S.ECB.M'
+ else:
+ bldg_tag = 'S.ECB.H'
+
+ # extend the BIM dictionary
+ BIM.update(dict(
+ RoofCover = roof_cover,
+ WindowAreaRatio = WWR,
+ RoofDeckAttachmentM = MRDA,
+ Shutters = shutters,
+ WindDebrisClass=WIDD
+ ))
+
+ bldg_config = f"{bldg_tag}." \
+ f"{roof_cover}." \
+ f"{int(shutters)}." \
+ f"{WIDD}." \
+ f"{MRDA}." \
+ f"{WWR}." \
+ f"{int(BIM['TerrainRoughness'])}"
+
+ return bldg_config
+
diff --git a/pelicun/tests/dl_calculation/rulesets/WindSERBRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindSERBRulesets.py
new file mode 100644
index 000000000..d6711b347
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/WindSERBRulesets.py
@@ -0,0 +1,150 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import random
+
+def SERB_config(BIM):
+ """
+ Rules to identify a HAZUS SERB configuration based on BIM data
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ config: str
+ A string that identifies a specific configration within this buidling
+ class.
+ """
+
+ year = BIM['YearBuilt'] # just for the sake of brevity
+
+ # Roof cover
+ if BIM['RoofShape'] in ['gab', 'hip']:
+ roof_cover = 'bur'
+ # Warning: HAZUS does not have N/A option for CECB, so here we use bur
+ else:
+ if year >= 1975:
+ roof_cover = 'spm'
+ else:
+ # year < 1975
+ roof_cover = 'bur'
+
+ # shutters
+ if year >= 2000:
+ shutters = BIM['WindBorneDebris']
+ # BOCA 1996 and earlier:
+ # Shutters were not required by code until the 2000 IBC. Before 2000, the
+ # percentage of commercial buildings that have shutters is assumed to be
+ # 46%. This value is based on a study on preparedness of small businesses
+ # for hurricane disasters, which says that in Sarasota County, 46% of
+ # business owners had taken action to wind-proof or flood-proof their
+ # facilities. In addition to that, 46% of business owners reported boarding
+ # up their businesses before Hurricane Katrina. In addition, compliance
+ # rates based on the Homeowners Survey data hover between 43 and 50 percent.
+ else:
+ if BIM['WindBorneDebris']:
+ shutters = random.random() < 0.46
+ else:
+ shutters = False
+
+ # Wind Debris (widd in HAZSU)
+ # HAZUS A: Res/Comm, B: Varies by direction, C: Residential, D: None
+ WIDD = 'C' # residential (default)
+ if BIM['OccupancyClass'] in ['RES1', 'RES2', 'RES3A', 'RES3B', 'RES3C',
+ 'RES3D']:
+ WIDD = 'C' # residential
+ elif BIM['OccupancyClass'] == 'AGR1':
+ WIDD = 'D' # None
+ else:
+ WIDD = 'A' # Res/Comm
+
+ # Window area ratio
+ if BIM['WindowArea'] < 0.33:
+ WWR = 'low'
+ elif BIM['WindowArea'] < 0.5:
+ WWR = 'med'
+ else:
+ WWR = 'hig'
+
+ # Metal RDA
+ # 1507.2.8.1 High Wind Attachment.
+ # Underlayment applied in areas subject to high winds (Vasd greater
+ # than 110 mph as determined in accordance with Section 1609.3.1) shall
+ # be applied with corrosion-resistant fasteners in accordance with
+ # the manufacturer’s instructions. Fasteners are to be applied along
+ # the overlap not more than 36 inches on center.
+ if BIM['V_ult'] > 142:
+ MRDA = 'std' # standard
+ else:
+ MRDA = 'sup' # superior
+
+ if BIM['NumberOfStories'] <= 2:
+ bldg_tag = 'S.ERB.L'
+ elif BIM['NumberOfStories'] <= 5:
+ bldg_tag = 'S.ERB.M'
+ else:
+ bldg_tag = 'S.ERB.H'
+
+ # extend the BIM dictionary
+ BIM.update(dict(
+ RoofCover = roof_cover,
+ WindowAreaRatio = WWR,
+ RoofDeckAttachmentM = MRDA,
+ Shutters = shutters,
+ WindDebrisClass=WIDD
+ ))
+
+ bldg_config = f"{bldg_tag}." \
+ f"{roof_cover}." \
+ f"{int(shutters)}." \
+ f"{WIDD}." \
+ f"{MRDA}." \
+ f"{WWR}." \
+ f"{int(BIM['TerrainRoughness'])}"
+
+ return bldg_config
diff --git a/pelicun/tests/dl_calculation/rulesets/WindSPMBRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindSPMBRulesets.py
new file mode 100644
index 000000000..42f8a6407
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/WindSPMBRulesets.py
@@ -0,0 +1,126 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import random
+import numpy as np
+import datetime
+
+
+def SPMB_config(BIM):
+ """
+ Rules to identify a HAZUS SPMB configuration based on BIM data
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ config: str
+ A string that identifies a specific configration within this buidling
+ class.
+ """
+
+ year = BIM['YearBuilt'] # just for the sake of brevity
+
+ # Roof Deck Age (~ Roof Quality)
+ if BIM['YearBuilt'] >= (datetime.datetime.now().year - 50):
+ roof_quality = 'god'
+ else:
+ roof_quality = 'por'
+
+ # shutters
+ if year >= 2000:
+ shutters = BIM['WindBorneDebris']
+ # BOCA 1996 and earlier:
+ # Shutters were not required by code until the 2000 IBC. Before 2000, the
+ # percentage of commercial buildings that have shutters is assumed to be
+ # 46%. This value is based on a study on preparedness of small businesses
+ # for hurricane disasters, which says that in Sarasota County, 46% of
+ # business owners had taken action to wind-proof or flood-proof their
+ # facilities. In addition to that, 46% of business owners reported boarding
+ # up their businesses before Hurricane Katrina. In addition, compliance
+ # rates based on the Homeowners Survey data hover between 43 and 50 percent.
+ else:
+ if BIM['WindBorneDebris']:
+ shutters = random.random() < 0.46
+ else:
+ shutters = False
+
+ # Metal RDA
+ # 1507.2.8.1 High Wind Attachment.
+ # Underlayment applied in areas subject to high winds (Vasd greater
+ # than 110 mph as determined in accordance with Section 1609.3.1) shall
+ # be applied with corrosion-resistant fasteners in accordance with
+ # the manufacturer’s instructions. Fasteners are to be applied along
+ # the overlap not more than 36 inches on center.
+ if BIM['V_ult'] > 142:
+ MRDA = 'std' # standard
+ else:
+ MRDA = 'sup' # superior
+
+ if BIM['PlanArea'] <= 4000:
+ bldg_tag = 'S.PMB.S'
+ elif BIM['PlanArea'] <= 50000:
+ bldg_tag = 'S.PMB.M'
+ else:
+ bldg_tag = 'S.PMB.L'
+
+ # extend the BIM dictionary
+ BIM.update(dict(
+ RoofQuality = roof_quality,
+ RoofDeckAttachmentM = MRDA,
+ Shutters = shutters
+ ))
+
+ bldg_config = f"{bldg_tag}." \
+ f"{int(shutters)}." \
+ f"{roof_quality}." \
+ f"{MRDA}." \
+ f"{int(BIM['TerrainRoughness'])}"
+
+ return bldg_config
+
diff --git a/pelicun/tests/dl_calculation/rulesets/WindWMUHRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindWMUHRulesets.py
new file mode 100644
index 000000000..6d5fe338d
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/WindWMUHRulesets.py
@@ -0,0 +1,273 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import random
+import datetime
+
+def WMUH_config(BIM):
+ """
+ Rules to identify a HAZUS WMUH configuration based on BIM data
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ config: str
+ A string that identifies a specific configration within this buidling
+ class.
+ """
+
+ year = BIM['YearBuilt'] # just for the sake of brevity
+
+ # Secondary Water Resistance (SWR)
+ SWR = 0 # Default
+ if year > 2000:
+ if BIM['RoofShape'] == 'flt':
+ SWR = 'null' # because SWR is not a question for flat roofs
+ elif BIM['RoofShape'] in ['gab','hip']:
+ SWR = int(random.random() < 0.6)
+ elif year > 1987:
+ if BIM['RoofShape'] == 'flt':
+ SWR = 'null' # because SWR is not a question for flat roofs
+ elif (BIM['RoofShape'] == 'gab') or (BIM['RoofShape'] == 'hip'):
+ if BIM['RoofSlope'] < 0.33:
+ SWR = int(True)
+ else:
+ SWR = int(BIM['AvgJanTemp'] == 'below')
+ else:
+ # year <= 1987
+ if BIM['RoofShape'] == 'flt':
+ SWR = 'null' # because SWR is not a question for flat roofs
+ else:
+ SWR = int(random.random() < 0.3)
+
+ # Roof cover & Roof quality
+ # Roof cover and quality do not apply to gable and hip roofs
+ if BIM['RoofShape'] in ['gab', 'hip']:
+ roof_cover = 'null'
+ roof_quality = 'null'
+ # NJ Building Code Section 1507 (in particular 1507.10 and 1507.12) address
+ # Built Up Roofs and Single Ply Membranes. However, the NJ Building Code
+ # only addresses installation and material standards of different roof
+ # covers, but not in what circumstance each must be used.
+ # SPMs started being used in the 1960s, but different types continued to be
+ # developed through the 1980s. Today, single ply membrane roofing is the
+ # most popular flat roof option. BURs have been used for over 100 years,
+ # and although they are still used today, they are used less than SPMs.
+ # Since there is no available ruleset to be taken from the NJ Building
+ # Code, the ruleset is based off this information.
+ # We assume that all flat roofs built before 1975 are BURs and all roofs
+ # built after 1975 are SPMs.
+ # Nothing in NJ Building Code or in the Hazus manual specifies what
+ # constitutes “good” and “poor” roof conditions, so ruleset is dependant
+ # on the age of the roof and average lifespan of BUR and SPM roofs.
+ # We assume that the average lifespan of a BUR roof is 30 years and the
+ # average lifespan of a SPM is 35 years. Therefore, BURs installed before
+ # 1990 are in poor condition, and SPMs installed before 1985 are in poor
+ # condition.
+ else:
+ if year >= 1975:
+ roof_cover = 'spm'
+ if BIM['YearBuilt'] >= (datetime.datetime.now().year - 35):
+ roof_quality = 'god'
+ else:
+ roof_quality = 'por'
+ else:
+ # year < 1975
+ roof_cover = 'bur'
+ if BIM['YearBuilt'] >= (datetime.datetime.now().year - 30):
+ roof_quality = 'god'
+ else:
+ roof_quality = 'por'
+
+ # Roof Deck Attachment (RDA)
+ # IRC 2009-2015:
+ # Requires 8d nails (with spacing 6”/12”) for sheathing thicknesses between
+ # ⅜”-1”, see Table 2304.10, Line 31. Fastener selection is contingent on
+ # thickness of sheathing in building codes.
+ # Wind Speed Considerations taken from Table 2304.6.1, Maximum Nominal
+ # Design Wind Speed, Vasd, Permitted For Wood Structural Panel Wall
+ # Sheathing Used to Resist Wind Pressures. Typical wall stud spacing is 16
+ # inches, according to table 2304.6.3(4). NJ code defines this with respect
+ # to exposures B and C only. These are mapped to HAZUS categories based on
+ # roughness length in the ruleset herein.
+ # The base rule was then extended to the exposures closest to suburban and
+ # light suburban, even though these are not considered by the code.
+ if year > 2009:
+ if BIM['TerrainRoughness'] >= 35: # suburban or light trees
+ if BIM['V_ult'] > 168.0:
+ RDA = '8s' # 8d @ 6"/6" 'D'
+ else:
+ RDA = '8d' # 8d @ 6"/12" 'B'
+ else: # light suburban or open
+ if BIM['V_ult'] > 142.0:
+ RDA = '8s' # 8d @ 6"/6" 'D'
+ else:
+ RDA = '8d' # 8d @ 6"/12" 'B'
+ # IRC 2000-2006:
+ # Table 2304.9.1, Line 31 of the 2006
+ # NJ IBC requires 8d nails (with spacing 6”/12”) for sheathing thicknesses
+ # of ⅞”-1”. Fastener selection is contingent on thickness of sheathing in
+ # building codes. Table 2308.10.1 outlines the required rating of approved
+ # uplift connectors, but does not specify requirements that require a
+ # change of connector at a certain wind speed.
+ # Thus, all RDAs are assumed to be 8d @ 6”/12”.
+ elif year > 2000:
+ RDA = '8d' # 8d @ 6"/12" 'B'
+ # BOCA 1996:
+ # The BOCA 1996 Building Code Requires 8d nails (with spacing 6”/12”) for
+ # roof sheathing thickness up to 1". See Table 2305.2, Section 4.
+ # Attachment requirements are given based on sheathing thickness, basic
+ # wind speed, and the mean roof height of the building.
+ elif year > 1996:
+ if (BIM['V_ult'] >= 103 ) and (BIM['MeanRoofHt'] >= 25.0):
+ RDA = '8s' # 8d @ 6"/6" 'D'
+ else:
+ RDA = '8d' # 8d @ 6"/12" 'B'
+ # BOCA 1993:
+ # The BOCA 1993 Building Code Requires 8d nails (with spacing 6”/12”) for
+ # sheathing thicknesses of 19/32 inches or greater, and 6d nails (with
+ # spacing 6”/12”) for sheathing thicknesses of ½ inches or less.
+ # See Table 2305.2, Section 4.
+ elif year > 1993:
+ if BIM['SheathingThickness'] <= 0.5:
+ RDA = '6d' # 6d @ 6"/12" 'A'
+ else:
+ RDA = '8d' # 8d @ 6"/12" 'B'
+ else:
+ # year <= 1993
+ if BIM['SheathingThickness'] <= 0.5:
+ RDA = '6d' # 6d @ 6"/12" 'A'
+ else:
+ RDA = '8d' # 8d @ 6"/12" 'B'
+
+ # Roof-Wall Connection (RWC)
+ # IRC 2000-2015:
+ # 1507.2.8.1 High Wind Attachment. Underlayment applied in areas subject
+ # to high winds (Vasd greater than 110 mph as determined in accordance
+ # with Section 1609.3.1) shall be applied with corrosion-resistant
+ # fasteners in accordance with the manufacturer’s instructions. Fasteners
+ # are to be applied along the overlap not more than 36 inches on center.
+ # Underlayment installed where Vasd, in accordance with section 1609.3.1
+ # equals or exceeds 120 mph shall be attached in a grid pattern of 12
+ # inches between side laps with a 6-inch spacing at the side laps.
+ if year > 2000:
+ if BIM['V_ult'] > 142.0:
+ RWC = 'strap' # Strap
+ else:
+ RWC = 'tnail' # Toe-nail
+ # BOCA 1996 and earlier:
+ # There is no mention of straps or enhanced tie-downs of any kind in the
+ # BOCA codes, and there is no description of these adoptions in IBHS
+ # reports or the New Jersey Construction Code Communicator .
+ # Although there is no explicit information, it seems that hurricane straps
+ # really only came into effect in Florida after Hurricane Andrew (1992),
+ # and likely it took several years for these changes to happen. Because
+ # Florida is the leader in adopting hurricane protection measures into
+ # codes and because there is no mention of shutters or straps in the BOCA
+ # codes, it is assumed that New Jersey did not adopt these standards until
+ # the 2000 IBC.
+ else:
+ RWC = 'tnail' # Toe-nail
+
+ # Shutters
+ # IRC 2000-2015:
+ # 1609.1.2 Protection of Openings. In wind-borne debris regions, glazing in
+ # buildings shall be impact resistant or protected with an impact-resistant
+ # covering meeting the requirements of an approved impact-resistant
+ # covering meeting the requirements of an approved impact-resistant
+ # standard.
+ # Exceptions: Wood structural panels with a minimum thickness of 7/16 of an
+ # inch and a maximum panel span of 8 feet shall be permitted for opening
+ # protection in buildings with a mean roof height of 33 feet or less that
+ # are classified as a Group R-3 or R-4 occupancy.
+ # Earlier IRC editions provide similar rules.
+ if year >= 2000:
+ shutters = BIM['WindBorneDebris']
+ # BOCA 1996 and earlier:
+ # Shutters were not required by code until the 2000 IBC. Before 2000, the
+ # percentage of commercial buildings that have shutters is assumed to be
+ # 46%. This value is based on a study on preparedness of small businesses
+ # for hurricane disasters, which says that in Sarasota County, 46% of
+ # business owners had taken action to wind-proof or flood-proof their
+ # facilities. In addition to that, 46% of business owners reported boarding
+ # up their businesses before Hurricane Katrina. In addition, compliance
+ # rates based on the Homeowners Survey data hover between 43 and 50 percent.
+ else:
+ if BIM['WindBorneDebris']:
+ shutters = random.random() < 0.46
+ else:
+ shutters = False
+
+ # Stories
+ # Buildings with more than 3 stories are mapped to the 3-story configuration
+ stories = min(BIM['NumberOfStories'], 3)
+
+ # extend the BIM dictionary
+ BIM.update(dict(
+ SecondaryWaterResistance = SWR,
+ RoofCover = roof_cover,
+ RoofQuality = roof_quality,
+ RoofDeckAttachmentW = RDA,
+ RoofToWallConnection = RWC,
+ Shutters = shutters
+ ))
+
+ bldg_config = f"W.MUH." \
+ f"{int(stories)}." \
+ f"{BIM['RoofShape']}." \
+ f"{roof_cover}." \
+ f"{roof_quality}." \
+ f"{SWR}." \
+ f"{RDA}." \
+ f"{RWC}." \
+ f"{int(shutters)}." \
+ f"{int(BIM['TerrainRoughness'])}"
+
+ return bldg_config
+
diff --git a/pelicun/tests/dl_calculation/rulesets/WindWSFRulesets.py b/pelicun/tests/dl_calculation/rulesets/WindWSFRulesets.py
new file mode 100644
index 000000000..957ecbf9c
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/WindWSFRulesets.py
@@ -0,0 +1,284 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of the SimCenter Backend Applications
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# this file. If not, see .
+#
+# Contributors:
+# Adam Zsarnóczay
+# Kuanshi Zhong
+#
+# Based on rulesets developed by:
+# Karen Angeles
+# Meredith Lockhead
+# Tracy Kijewski-Correa
+
+import random
+import datetime
+
+def WSF_config(BIM):
+ """
+ Rules to identify a HAZUS WSF configuration based on BIM data
+
+ Parameters
+ ----------
+ BIM: dictionary
+ Information about the building characteristics.
+
+ Returns
+ -------
+ config: str
+ A string that identifies a specific configration within this buidling
+ class.
+ """
+
+ year = BIM['YearBuilt'] # just for the sake of brevity
+
+ # Secondary Water Resistance (SWR)
+ # Minimum drainage recommendations are in place in NJ (See below).
+ # However, SWR indicates a code-plus practice.
+ SWR = False # Default in Reorganzied Rulesets - WIND
+ if year > 2000:
+ # For buildings built after 2000, SWR is based on homeowner compliance
+ # data from NC Coastal Homeowner Survey (2017) to capture potential
+ # human behavior (% of sealed roofs in NC dataset).
+ SWR = random.random() < 0.6
+ elif year > 1983:
+ # CABO 1995:
+ # According to 903.2 in the 1995 CABO, for roofs with slopes between
+ # 2:12 and 4:12, an underlayment consisting of two layers of No. 15
+ # felt must be applied. In severe climates (less than or equal to 25
+ # degrees Fahrenheit average in January), these two layers must be
+ # cemented together.
+ # According to 903.3 in the 1995 CABO, roofs with slopes greater than
+ # or equal to 4:12 shall have an underlayment of not less than one ply
+ # of No. 15 felt.
+ #
+ # Similar rules are prescribed in CABO 1992, 1989, 1986, 1983
+ #
+ # Since low-slope roofs require two layers of felt, this is taken to
+ # be secondary water resistance. This ruleset is for asphalt shingles.
+ # Almost all other roof types require underlayment of some sort, but
+ # the ruleset is based on asphalt shingles because it is most
+ # conservative.
+ if BIM['RoofShape'] == 'flt': # note there is actually no 'flt'
+ SWR = True
+ elif BIM['RoofShape'] in ['gab','hip']:
+ if BIM['RoofSlope'] <= 0.17:
+ SWR = True
+ elif BIM['RoofSlope'] < 0.33:
+ SWR = (BIM['AvgJanTemp'] == 'below')
+
+ # Roof Deck Attachment (RDA)
+ # IRC codes:
+ # NJ code requires 8d nails (with spacing 6”/12”) for sheathing thicknesses
+ # between ⅜”-1” - see Table R602.3(1)
+ # Fastener selection is contingent on thickness of sheathing in building
+ # codes. Commentary for Table R602.3(1) indicates 8d nails with 6”/6”
+ # spacing (enhanced roof spacing) for ultimate wind speeds greater than
+ # a speed_lim. speed_lim depends on the year of construction
+ RDA = '6d' # Default (aka A) in Reorganized Rulesets - WIND
+ if year > 2000:
+ if year >= 2016:
+ # IRC 2015
+ speed_lim = 130.0 # mph
+ else:
+ # IRC 2000 - 2009
+ speed_lim = 100.0 # mph
+ if BIM['V_ult'] > speed_lim:
+ RDA = '8s' # 8d @ 6"/6" ('D' in the Reorganized Rulesets - WIND)
+ else:
+ RDA = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND)
+ elif year > 1995:
+ if ((BIM['SheathingThickness'] >= 0.3125) and (BIM['SheathingThickness'] <= 0.5)):
+ RDA = '6d' # 6d @ 6"/12" ('A' in the Reorganized Rulesets - WIND)
+ elif ((BIM['SheathingThickness'] >= 0.59375) and (BIM['SheathingThickness'] <= 1.125)):
+ RDA = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND)
+ elif year > 1986:
+ if ((BIM['SheathingThickness'] >= 0.3125) and (BIM['SheathingThickness'] <= 0.5)):
+ RDA = '6d' # 6d @ 6"/12" ('A' in the Reorganized Rulesets - WIND)
+ elif ((BIM['SheathingThickness'] >= 0.59375) and (BIM['SheathingThickness'] <= 1.0)):
+ RDA = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND)
+ else:
+ # year <= 1986
+ if ((BIM['SheathingThickness'] >= 0.3125) and (BIM['SheathingThickness'] <= 0.5)):
+ RDA = '6d' # 6d @ 6"/12" ('A' in the Reorganized Rulesets - WIND)
+ elif ((BIM['SheathingThickness'] >= 0.625) and (BIM['SheathingThickness'] <= 1.0)):
+ RDA = '8d' # 8d @ 6"/12" ('B' in the Reorganized Rulesets - WIND)
+
+ # Roof-Wall Connection (RWC)
+ # IRC 2015
+ # "Assume all homes not having wind speed consideration are Toe Nail
+ # (regardless of year)
+ # For homes with wind speed consideration, 2015 IRC Section R802.11: no
+ # specific connection type, must resist uplift forces using various
+ # guidance documents, e.g., straps would be required (based on WFCM 2015);
+ # will assume that if classified as HazardProneRegion, then enhanced
+ # connection would be used.
+ if year > 2015:
+ if BIM['HazardProneRegion']:
+ RWC = 'strap' # Strap
+ else:
+ RWC = 'tnail' # Toe-nail
+ # IRC 2000-2009
+ # In Section R802.11.1 Uplift Resistance of the NJ 2009 IRC, roof
+ # assemblies which are subject to wind uplift pressures of 20 pounds per
+ # square foot or greater are required to have attachments that are capable
+ # of providing resistance, in this case assumed to be straps.
+ # Otherwise, the connection is assumed to be toe nail.
+ # CABO 1992-1995:
+ # 802.11 Roof Tie-Down: Roof assemblies subject to wind uplift pressures of
+ # 20 lbs per sq ft or greater shall have rafter or truess ties. The
+ # resulting uplift forces from the rafter or turss ties shall be
+ # transmitted to the foundation.
+ # Roof uplift pressure varies by wind speed, exposure category, building
+ # aspect ratio and roof height. For a reference building (9 ft tall in
+ # exposure B -- WSF1) analysis suggests that wind speeds in excess of
+ # 110 mph begin to generate pressures of 20 psf in high pressure zones of
+ # the roof. Thus 110 mph is used as the critical velocity.
+ elif year > 1992:
+ if BIM['V_ult'] > 110:
+ RWC = 'strap' # Strap
+ else:
+ RWC = 'tnail' # Toe-nail
+ # CABO 1989 and earlier
+ # There is no mention of straps or enhanced tie-downs in the CABO codes
+ # older than 1992, and there is no description of these adoptions in IBHS
+ # reports or the New Jersey Construction Code Communicator .
+ # Although there is no explicit information, it seems that hurricane straps
+ # really only came into effect in Florida after Hurricane Andrew (1992).
+ # Because Florida is the leader in adopting hurricane protection measures
+ # into codes and because there is no mention of shutters or straps in the
+ # CABO codes, it is assumed that all roof-wall connections for residential
+ # buildings are toe nails before 1992.
+ else:
+ # year <= 1992
+ RWC = 'tnail' # Toe-nail
+
+ # Shutters
+ # IRC 2000-2015:
+ # R301.2.1.2 in NJ IRC 2015 says protection of openings required for
+ # buildings located in WindBorneDebris regions, mentions impact-rated protection
+ # for glazing, impact-resistance for garage door glazed openings, and finally
+ # states that wood structural panels with a thickness > 7/16" and a
+ # span <8' can be used, as long as they are precut, attached to the framing
+ # surrounding the opening, and the attachments are resistant to corrosion
+ # and are able to resist component and cladding loads;
+ # Earlier IRC editions provide similar rules.
+ if year > 2000:
+ shutters = BIM['WindBorneDebris']
+ # CABO:
+ # Based on Human Subjects Data, roughly 45% of houses built in the 1980s
+ # and 1990s had entries that implied they had shutters on at some or all of
+ # their windows. Therefore, 45% of houses in this time should be randomly
+ # assigned to have shutters.
+ # Data ranges checked:
+ # 1992 to 1995, 33/74 entries (44.59%) with shutters
+ # 1986 to 1992, 36/79 entries (45.57%) with shutters
+ # 1983 to 1986, 19/44 entries (43.18%) with shutters
+ else:
+ # year <= 2000
+ if BIM['WindBorneDebris']:
+ shutters = random.random() < 0.45
+ else:
+ shutters = False
+
+ # Garage
+ # As per IRC 2015:
+ # Garage door glazed opening protection for windborne debris shall meet the
+ # requirements of an approved impact-resisting standard or ANSI/DASMA 115.
+ # Exception: Wood structural panels with a thickness of not less than 7/16
+ # inch and a span of not more than 8 feet shall be permitted for opening
+ # protection. Panels shall be predrilled as required for the anchorage
+ # method and shall be secured with the attachment hardware provided.
+ # Permitted for buildings where the ultimate design wind speed is 180 mph
+ # or less.
+ #
+ # Average lifespan of a garage is 30 years, so garages that are not in
+ # WindBorneDebris (and therefore do not have any strength requirements) that
+ # are older than 30 years are considered to be weak, whereas those from the
+ # last 30 years are considered to be standard.
+ if BIM['Garage'] == -1:
+ # no garage data, using the default "standard"
+ garage = 'std'
+ shutters = 0 # HAZUS ties standard garage to w/o shutters
+ else:
+ if year > 2000:
+ if shutters:
+ if BIM['Garage'] < 1:
+ garage = 'no'
+ else:
+ garage = 'sup' # SFBC 1994
+ shutters = 1 # HAZUS ties SFBC 1994 to with shutters
+ else:
+ if BIM['Garage'] < 1:
+ garage = 'no' # None
+ else:
+ garage = 'std' # Standard
+ shutters = 0 # HAZUS ties standard garage to w/o shutters
+ elif year > (datetime.datetime.now().year - 30):
+ if BIM['Garage'] < 1:
+ garage = 'no' # None
+ else:
+ garage = 'std' # Standard
+ shutters = 0 # HAZUS ties standard garage to w/o shutters
+ else:
+ # year <= current year - 30
+ if BIM['Garage'] < 1:
+ garage = 'no' # None
+ else:
+ garage = 'wkd' # Weak
+ shutters = 0 # HAZUS ties weak garage to w/o shutters
+
+ # extend the BIM dictionary
+ BIM.update(dict(
+ SecondaryWaterResistance = SWR,
+ RoofDeckAttachmentW = RDA,
+ RoofToWallConnection = RWC,
+ Shutters = shutters,
+ Garage = garage
+ ))
+
+ # building configuration tag
+ bldg_config = f"W.SF." \
+ f"{int(min(BIM['NumberOfStories'],2))}." \
+ f"{BIM['RoofShape']}." \
+ f"{int(SWR)}." \
+ f"{RDA}." \
+ f"{RWC}." \
+ f"{garage}." \
+ f"{int(shutters)}." \
+ f"{int(BIM['TerrainRoughness'])}"
+
+ return bldg_config
+
diff --git a/pelicun/tests/dl_calculation/rulesets/__init__.py b/pelicun/tests/dl_calculation/rulesets/__init__.py
new file mode 100644
index 000000000..72c332008
--- /dev/null
+++ b/pelicun/tests/dl_calculation/rulesets/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
diff --git a/pelicun/tests/maintenance/__init__.py b/pelicun/tests/maintenance/__init__.py
new file mode 100644
index 000000000..72c332008
--- /dev/null
+++ b/pelicun/tests/maintenance/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
diff --git a/pelicun/tests/maintenance/search_in_functions.py b/pelicun/tests/maintenance/search_in_functions.py
new file mode 100644
index 000000000..f989ebd7c
--- /dev/null
+++ b/pelicun/tests/maintenance/search_in_functions.py
@@ -0,0 +1,105 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+
+"""Code inspection methods/functions."""
+
+from __future__ import annotations # noqa: I001
+from pathlib import Path
+
+import ast
+
+
+def visit_FunctionDef(
+ node: ast.FunctionDef,
+ filename: str,
+ search_string: str,
+ functions_with_string: list[str],
+) -> None:
+ """
+ Visit a function definition node and check if it contains the
+ search string.
+
+ Parameters
+ ----------
+ node: ast.FunctionDef
+ The AST node representing the function definition.
+ filename: str
+ The path to the Python file to be searched.
+ search_string: str
+ The string to search for within the function bodies.
+ functions_with_string: list[str]
+ The list to append function names that contain the search
+ string.
+
+ """
+ with Path(filename).open(encoding='utf-8') as f:
+ contents = f.read()
+
+ function_code = ast.get_source_segment(contents, node)
+ assert function_code is not None
+
+ if search_string in function_code:
+ functions_with_string.append(node.name)
+
+
+def find_functions_with_string(filename: str, search_string: str) -> list[str]:
+ """
+ Finds functions in a Python file that contain a specific string in
+ their body.
+
+ Parameters
+ ----------
+ filename: str
+ The path to the Python file to be searched.
+ search_string: str
+ The string to search for within the function bodies.
+
+ Returns
+ -------
+ list[str]
+ A list of function names that contain the search string in
+ their bodies.
+
+ """
+ with Path(filename).open(encoding='utf-8') as file:
+ contents = file.read()
+ tree = ast.parse(contents, filename=filename)
+
+ functions_with_string: list[str] = []
+
+ for node in ast.walk(tree):
+ if isinstance(node, ast.FunctionDef):
+ visit_FunctionDef(node, filename, search_string, functions_with_string)
+
+ return functions_with_string
diff --git a/pelicun/tests/test_base.py b/pelicun/tests/test_base.py
deleted file mode 100644
index 57f3f6d5b..000000000
--- a/pelicun/tests/test_base.py
+++ /dev/null
@@ -1,773 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2018 Leland Stanford Junior University
-# Copyright (c) 2018 The Regents of the University of California
-#
-# This file is part of pelicun.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# 1. Redistributions of source code must retain the above copyright notice,
-# this list of conditions and the following disclaimer.
-#
-# 2. Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# 3. Neither the name of the copyright holder nor the names of its contributors
-# may be used to endorse or promote products derived from this software without
-# specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-# You should have received a copy of the BSD 3-Clause License along with
-# pelicun. If not, see .
-#
-# Contributors:
-# Adam Zsarnóczay
-# John Vouvakis Manousakis
-
-"""
-These are unit and integration tests on the base module of pelicun.
-"""
-
-import os
-import io
-from contextlib import redirect_stdout
-import re
-import argparse
-import pytest
-import pandas as pd
-import numpy as np
-from pelicun import base
-
-# pylint: disable=missing-function-docstring
-
-# The tests maintain the order of definitions of the `base.py` file.
-
-
-def test_options_init():
- # Create a sample user_config_options dictionary
- user_config_options = {
- "Verbose": False,
- "Seed": None,
- "LogShowMS": False,
- "LogFile": 'test_log_file',
- "PrintLog": False,
- "DemandOffset": {"PFA": -1, "PFV": -1},
- "Sampling": {
- "SamplingMethod": "MonteCarlo",
- "SampleSize": 1000,
- "PreserveRawOrder": False,
- },
- "SamplingMethod": "MonteCarlo",
- "NonDirectionalMultipliers": {"ALL": 1.2},
- "EconomiesOfScale": {"AcrossFloors": True, "AcrossDamageStates": True},
- "RepairCostAndTimeCorrelation": 0.7,
- }
-
- # Create an Options object using the user_config_options
- # dictionary
- options = base.Options(user_config_options)
-
- # Check that the Options object was created successfully
- assert options is not None
-
- # Check that the values of the Options object attributes match the
- # values in the user_config_options dictionary
- assert options.sampling_method == 'MonteCarlo'
- assert options.units_file is None
- assert options.demand_offset == {'PFA': -1, 'PFV': -1}
- assert options.nondir_multi_dict == {'ALL': 1.2}
- assert options.rho_cost_time == 0.7
- assert options.eco_scale == {"AcrossFloors": True, "AcrossDamageStates": True}
-
- # Check that the Logger object attribute of the Options object is
- # initialized with the correct parameters
- assert options.log.verbose is False
- assert options.log.show_warnings is False
- assert options.log.log_show_ms is False
- assert os.path.basename(options.log.log_file) == 'test_log_file'
- assert options.log.print_log is False
-
- # remove the log file that was created
- os.remove('test_log_file')
-
- # test seed property and setter
- options.seed = 42
- assert options.seed == 42
-
-
-def test_nondir_multi():
- # Tests that the nondir_multi method of the Options class returns
- # the correct value for the specified EDP type. Tests that the
- # method uses the value associated with the 'ALL' key if the EDP
- # type is not present in the nondir_multi_dict attribute. Tests
- # that a ValueError is raised if the 'ALL' key is not present in the
- # nondir_multi_dict attribute.
-
- # Create an instance of the Options class with default values for all options,
- # except for the nondir_multi_dict attribute
- options = base.Options({'NonDirectionalMultipliers': {'PFA': 1.5, 'PFV': 1.00}})
-
- # Call the nondir_multi method with the specific EDP type as the argument
- assert options.nondir_multi('PFA') == 1.5
- assert options.nondir_multi('PFV') == 1.00
-
- # the 'ALL' key is automatically assigned to 1.2, even if the user
- # does not specify it
- assert 'ALL' in options.nondir_multi_dict
- assert options.nondir_multi('ALL') == 1.2
-
- # When an EDP type is not present in the nondir_multi_dict, the
- # value associated with 'ALL' is used.
- assert options.nondir_multi('spread love') == 1.2
-
- # We get an error if the 'ALL' key is not present, but this would
- # be unexpected.
- options.nondir_multi_dict.pop('ALL') # 'ALL' is gone now
- # the following will cause a ValueError
- with pytest.raises(ValueError):
- options.nondir_multi('Sa(T*)')
-
-
-def test_logger_init():
- # Test that the Logger object is initialized with the correct
- # attributes based on the input configuration
- log_config = {
- 'verbose': True,
- 'show_warnings': True,
- 'log_show_ms': False,
- 'log_file': 'log.txt',
- 'print_log': True,
- }
- log = base.Logger(**log_config)
- assert log.verbose is True
- assert log.show_warnings is True
- assert log.log_show_ms is False
- assert os.path.basename(log.log_file) == 'log.txt'
- assert log.print_log is True
- os.remove('log.txt')
-
- # test exceptions
- log_config = {
- 'verbose': True,
- 'show_warnings': True,
- 'log_show_ms': False,
- 'log_file': '/',
- 'print_log': True,
- }
- with pytest.raises((IsADirectoryError, FileExistsError, FileNotFoundError)):
- log = base.Logger(**log_config)
-
-
-def test_logger_msg():
- # Test that the msg method prints the correct message to the
- # console and log file
- log_config = {
- 'verbose': True,
- 'show_warnings': True,
- 'log_show_ms': True,
- 'log_file': 'log.txt',
- 'print_log': True,
- }
- log = base.Logger(**log_config)
- # Check that the message is printed to the console
- with io.StringIO() as buf, redirect_stdout(buf):
- log.msg('This is a message')
- output = buf.getvalue()
- assert 'This is a message' in output
- # Check that the message is written to the log file
- with open('log.txt', 'r', encoding='utf-8') as f:
- assert 'This is a message' in f.read()
- os.remove('log.txt')
-
-
-def test_logger_div():
- # We test the divider with and without the timestamp
- prepend_timestamp_args = (True, False)
- patterns = (
- r'[0-9][0-9]:[0-9][0-9]:[0-9][0-9]:[0-9][0-9][0-9][0-9][0-9][0-9]\s-+',
- r'\s+-+',
- )
- for case, pattern_str in zip(prepend_timestamp_args, patterns):
- pattern = re.compile(pattern_str)
- # Test that the div method adds a divider as intended
- log_config = {
- 'verbose': True,
- 'show_warnings': True,
- 'log_show_ms': True,
- 'log_file': 'log.txt',
- 'print_log': True,
- }
- log = base.Logger(**log_config)
-
- # check console output
- with io.StringIO() as buf, redirect_stdout(buf):
- log.div(prepend_timestamp=case)
- output = buf.getvalue()
- assert pattern.match(output)
- # check log file
- with open('log.txt', 'r', encoding='utf-8') as f:
- # simply check that it is not empty
- assert f.read()
-
- # remove the created log file
- os.remove('log.txt')
-
-
-def test_print_system_info():
- # create a logger object
- log_config = {
- 'verbose': True,
- 'show_warnings': True,
- 'log_show_ms': True,
- 'log_file': 'log.txt',
- 'print_log': True,
- }
- log = base.Logger(**log_config)
-
- # run print_system_info and get the console output
- with io.StringIO() as buf, redirect_stdout(buf):
- log.print_system_info()
- output = buf.getvalue()
-
- # verify the contents of the output
- assert 'System Information:\n' in output
-
- # remove the created log file
- os.remove('log.txt')
-
-
-def test_update_vals():
- primary = {'b': {'c': 4, 'd': 5}, 'g': 7}
- update = {'a': 1, 'b': {'c': 3, 'd': 5}, 'f': 6}
- base.update_vals(update, primary, 'update', 'primary')
- assert primary == {'b': {'c': 4, 'd': 5}, 'g': 7} # unchanged
- assert update == {'a': 1, 'b': {'c': 3, 'd': 5}, 'f': 6, 'g': 7} # updated
- # note: key 'g' created, 'f' left there, 'c', 'd' updated, as intended
-
- primary = {'a': {'b': {'c': 4}}}
- update = {'a': {'b': {'c': 3}}}
- base.update_vals(update, primary, 'update', 'primary')
- assert primary == {'a': {'b': {'c': 4}}} # unchanged
- assert update == {'a': {'b': {'c': 3}}} # updated
-
- primary = {'a': {'b': 4}}
- update = {'a': {'b': {'c': 3}}}
- with pytest.raises(ValueError):
- base.update_vals(update, primary, 'update', 'primary')
-
- primary = {'a': {'b': 3}}
- update = {'a': 1, 'b': 2}
- with pytest.raises(ValueError):
- base.update_vals(update, primary, 'update', 'primary')
-
-
-def test_merge_default_config():
- # Test merging an empty user config with the default config
- user_config = {}
- merged_config = base.merge_default_config(user_config)
- assert merged_config == base.load_default_options()
-
- # Test merging a user config with a single option set
- user_config = {'Verbose': True}
- merged_config = base.merge_default_config(user_config)
- assert merged_config == {**base.load_default_options(), **user_config}
-
- # Test merging a user config with multiple options set
- user_config = {'Verbose': True, 'Seed': 12345}
- merged_config = base.merge_default_config(user_config)
- assert merged_config == {**base.load_default_options(), **user_config}
-
- # Test merging a user config with a nested option set
- user_config = {'NonDirectionalMultipliers': {'PFA': 1.5}}
- merged_config = base.merge_default_config(user_config)
- assert merged_config == {**base.load_default_options(), **user_config}
-
- # Test merging a user config with a nested option set and a top-level option set
- user_config = {'Verbose': True, 'NonDirectionalMultipliers': {'PFA': 1.5}}
- merged_config = base.merge_default_config(user_config)
- assert merged_config == {**base.load_default_options(), **user_config}
-
-
-def test_convert_dtypes():
- # All columns able to be converted
-
- # Input DataFrame
- df_input = pd.DataFrame({'a': ['1', '2', '3'], 'b': ['4.0', '5.5', '6.75']})
-
- # Expected DataFrame
- df_expected = pd.DataFrame({'a': [1, 2, 3], 'b': [4.0, 5.5, 6.75]}).astype(
- {'a': 'int64', 'b': 'float64'}
- )
-
- # Convert data types
- df_result = base.convert_dtypes(df_input)
-
- pd.testing.assert_frame_equal(
- df_result, df_expected, check_index_type=False, check_column_type=False
- )
-
- # No columns that can be converted
-
- df_input = pd.DataFrame(
- {'a': ['foo', 'bar', 'baz'], 'b': ['2021-01-01', '2021-01-02', '2021-01-03']}
- )
- df_expected = df_input.copy()
- df_result = base.convert_dtypes(df_input)
- pd.testing.assert_frame_equal(
- df_result, df_expected, check_index_type=False, check_column_type=False
- )
-
- # Columns with mixed types
-
- df_input = pd.DataFrame(
- {
- 'a': ['1', '2', 'three'],
- 'b': ['4.0', '5.5', 'six'],
- 'c': ['7', 'eight', '9'],
- }
- )
- df_result = base.convert_dtypes(df_input)
- pd.testing.assert_frame_equal(
- df_result, df_input, check_index_type=False, check_column_type=False
- )
-
- # None values present
-
- df_input = pd.DataFrame({'a': [None, '2', '3'], 'b': ['4.0', None, '6.75']})
- df_expected = pd.DataFrame({'a': [np.nan, 2, 3], 'b': [4.0, np.nan, 6.75]})
- df_result = base.convert_dtypes(df_input)
- pd.testing.assert_frame_equal(
- df_result,
- df_expected,
- check_dtype=False,
- check_index_type=False,
- check_column_type=False,
- )
-
- # Empty dataframe
-
- df_input = pd.DataFrame({})
- df_expected = pd.DataFrame({})
- df_result = base.convert_dtypes(df_input)
- pd.testing.assert_frame_equal(
- df_result, df_expected, check_index_type=False, check_column_type=False
- )
-
-
-def test_convert_to_SimpleIndex():
- # Test conversion of a multiindex to a simple index following the
- # SimCenter dash convention
- index = pd.MultiIndex.from_tuples((('a', 'b'), ('c', 'd')))
- df = pd.DataFrame([[1, 2], [3, 4]], index=index)
- df.index.names = ['name_1', 'name_2']
- df_simple = base.convert_to_SimpleIndex(df, axis=0)
- assert df_simple.index.tolist() == ['a-b', 'c-d']
- assert df_simple.index.name == '-'.join(df.index.names)
-
- # Test inplace modification
- df_inplace = df.copy()
- base.convert_to_SimpleIndex(df_inplace, axis=0, inplace=True)
- assert df_inplace.index.tolist() == ['a-b', 'c-d']
- assert df_inplace.index.name == '-'.join(df.index.names)
-
- # Test conversion of columns
- index = pd.MultiIndex.from_tuples((('a', 'b'), ('c', 'd')))
- df = pd.DataFrame([[1, 2], [3, 4]], columns=index)
- df.columns.names = ['name_1', 'name_2']
- df_simple = base.convert_to_SimpleIndex(df, axis=1)
- assert df_simple.columns.tolist() == ['a-b', 'c-d']
- assert df_simple.columns.name == '-'.join(df.columns.names)
-
- # Test inplace modification
- df_inplace = df.copy()
- base.convert_to_SimpleIndex(df_inplace, axis=1, inplace=True)
- assert df_inplace.columns.tolist() == ['a-b', 'c-d']
- assert df_inplace.columns.name == '-'.join(df.columns.names)
-
- # Test invalid axis parameter
- with pytest.raises(ValueError):
- base.convert_to_SimpleIndex(df, axis=2)
-
-
-def test_convert_to_MultiIndex():
- # Test a case where the index needs to be converted to a MultiIndex
- data = pd.DataFrame({'A': (1, 2, 3), 'B': (4, 5, 6)})
- data.index = ('A-1', 'B-1', 'C-1')
- data_converted = base.convert_to_MultiIndex(data, axis=0, inplace=False)
- expected_index = pd.MultiIndex.from_arrays((('A', 'B', 'C'), ('1', '1', '1')))
- assert data_converted.index.equals(expected_index)
- # original data should not have changed
- assert data.index.equals(pd.Index(('A-1', 'B-1', 'C-1')))
-
- # Test a case where the index is already a MultiIndex
- data_converted = base.convert_to_MultiIndex(data_converted, axis=0, inplace=False)
- assert data_converted.index.equals(expected_index)
-
- # Test a case where the columns need to be converted to a MultiIndex
- data = pd.DataFrame({'A-1': (1, 2, 3), 'B-1': (4, 5, 6)})
- data_converted = base.convert_to_MultiIndex(data, axis=1, inplace=False)
- expected_columns = pd.MultiIndex.from_arrays((('A', 'B'), ('1', '1')))
- assert data_converted.columns.equals(expected_columns)
- # original data should not have changed
- assert data.columns.equals(pd.Index(('A-1', 'B-1')))
-
- # Test a case where the columns are already a MultiIndex
- data_converted = base.convert_to_MultiIndex(data_converted, axis=1, inplace=False)
- assert data_converted.columns.equals(expected_columns)
-
- # Test an invalid axis parameter
- with pytest.raises(ValueError):
- base.convert_to_MultiIndex(data_converted, axis=2, inplace=False)
-
- # inplace=True
- data = pd.DataFrame({'A': (1, 2, 3), 'B': (4, 5, 6)})
- data.index = ('A-1', 'B-1', 'C-1')
- base.convert_to_MultiIndex(data, axis=0, inplace=True)
- expected_index = pd.MultiIndex.from_arrays((('A', 'B', 'C'), ('1', '1', '1')))
- assert data.index.equals(expected_index)
-
-
-def test_show_matrix():
- # Test with a simple 2D array
- arr = ((1, 2, 3), (4, 5, 6))
- base.show_matrix(arr)
- assert True # if no AssertionError is thrown, then the test passes
-
- # Test with a DataFrame
- df = pd.DataFrame(((1, 2, 3), (4, 5, 6)), columns=('a', 'b', 'c'))
- base.show_matrix(df)
- assert True # if no AssertionError is thrown, then the test passes
-
- # Test with use_describe=True
- base.show_matrix(arr, use_describe=True)
- assert True # if no AssertionError is thrown, then the test passes
-
-
-def test__warning(capsys):
- msg = 'This is a test.'
- category = 'undefined'
- base._warning(msg, category, '{path to a file}', '{line number}')
- captured = capsys.readouterr()
- assert (
- captured.out
- == 'WARNING in {path to a file} at line {line number}\nThis is a test.\n\n'
- )
- base._warning(msg, category, 'some\\file', '{line number}')
- captured = capsys.readouterr()
- assert (
- captured.out
- == 'WARNING in some/file at line {line number}\nThis is a test.\n\n'
- )
- base._warning(msg, category, 'some/file', '{line number}')
- captured = capsys.readouterr()
- assert (
- captured.out
- == 'WARNING in some/file at line {line number}\nThis is a test.\n\n'
- )
-
-
-def test_describe():
- expected_idx = pd.Index(
- (
- 'count',
- 'mean',
- 'std',
- 'log_std',
- 'min',
- '0.1%',
- '2.3%',
- '10%',
- '15.9%',
- '50%',
- '84.1%',
- '90%',
- '97.7%',
- '99.9%',
- 'max',
- ),
- dtype='object',
- )
-
- # case 1:
- # passing a dataframe
-
- df = pd.DataFrame(((1.00, 2.00, 3.00), (4.00, 5.00, 6.00)), columns=['A', 'B', 'C'])
- desc = base.describe(df)
- assert np.all(desc.index == expected_idx)
- assert np.all(desc.columns == pd.Index(('A', 'B', 'C'), dtype='object'))
-
- # case 2:
- # passing a series
-
- sr = pd.Series((1.00, 2.00, 3.00), name='A')
- desc = base.describe(sr)
- assert np.all(desc.index == expected_idx)
- assert np.all(desc.columns == pd.Index(('A',), dtype='object'))
-
- # case 3:
- # passing a 2D numpy array
-
- desc = base.describe(np.array(((1.00, 2.00, 3.00), (4.00, 5.00, 6.00))))
- assert np.all(desc.index == expected_idx)
- assert np.all(desc.columns == pd.Index((0, 1, 2), dtype='object'))
-
- # case 4:
- # passing a 1D numpy array
-
- desc = base.describe(np.array((1.00, 2.00, 3.00)))
- assert np.all(desc.index == expected_idx)
- assert np.all(desc.columns == pd.Index((0,), dtype='object'))
-
-
-def test_str2bool():
- assert base.str2bool('True') is True
- assert base.str2bool('False') is False
- assert base.str2bool('yes') is True
- assert base.str2bool('no') is False
- assert base.str2bool('t') is True
- assert base.str2bool('f') is False
- assert base.str2bool('1') is True
- assert base.str2bool('0') is False
- assert base.str2bool(True) is True
- assert base.str2bool(False) is False
- with pytest.raises(argparse.ArgumentTypeError):
- base.str2bool('In most cases, it depends..')
-
-
-def test_float_or_None():
- # Test with a string that can be converted to a float
- assert base.float_or_None('3.14') == 3.14
-
- # Test with a string that represents an integer
- assert base.float_or_None('42') == 42.0
-
- # Test with a string that represents a negative number
- assert base.float_or_None('-3.14') == -3.14
-
- # Test with a string that can't be converted to a float
- assert base.float_or_None('hello') is None
-
- # Test with an empty string
- assert base.float_or_None('') is None
-
-
-def test_int_or_None():
- # Test the case when the string can be converted to int
- assert base.int_or_None('123') == 123
- assert base.int_or_None('-456') == -456
- assert base.int_or_None('0') == 0
- assert base.int_or_None('+789') == 789
-
- # Test the case when the string cannot be converted to int
- assert base.int_or_None('abc') is None
- assert base.int_or_None('123a') is None
- assert base.int_or_None(' ') is None
- assert base.int_or_None('') is None
-
-
-def test_process_loc():
- # Test when string can be converted to an int
- assert base.process_loc('5', 10) == [
- 5,
- ]
-
- # Test when string is in the form 'low-high'
- assert base.process_loc('2-5', 10) == [2, 3, 4, 5]
-
- # Test when string is 'all'
- assert base.process_loc('all', 10) == list(range(1, 11))
-
- # Test when string is 'top'
- assert base.process_loc('top', 10) == [
- 10,
- ]
-
- # Test when string is 'roof'
- assert base.process_loc('roof', 10) == [
- 10,
- ]
-
- # Test when string cannot be converted to an int or recognized
- with pytest.raises(ValueError):
- base.process_loc('abc', 10)
-
-
-def test_run_input_specs():
- assert os.path.basename(base.pelicun_path) == 'pelicun'
-
-
-def test_dict_raise_on_duplicates():
- res = base.dict_raise_on_duplicates([('A', '1'), ('B', '2')])
- assert res == {'A': '1', 'B': '2'}
- with pytest.raises(ValueError):
- base.dict_raise_on_duplicates([('A', '1'), ('A', '2')])
-
-
-def test_parse_units():
- # Test the default units are parsed correctly
- units = base.parse_units()
- assert isinstance(units, dict)
- expect = {
- "sec": 1.0,
- "minute": 60.0,
- "hour": 3600.0,
- "day": 86400.0,
- "m": 1.0,
- "mm": 0.001,
- "cm": 0.01,
- "km": 1000.0,
- "in": 0.0254,
- "inch": 0.0254,
- "ft": 0.3048,
- "mile": 1609.344,
- "m2": 1.0,
- "mm2": 1e-06,
- "cm2": 0.0001,
- "km2": 1000000.0,
- "in2": 0.00064516,
- "inch2": 0.00064516,
- "ft2": 0.09290304,
- "mile2": 2589988.110336,
- "m3": 1.0,
- "in3": 1.6387064e-05,
- "inch3": 1.6387064e-05,
- "ft3": 0.028316846592,
- "cmps": 0.01,
- "mps": 1.0,
- "mph": 0.44704,
- "inps": 0.0254,
- "inchps": 0.0254,
- "ftps": 0.3048,
- "mps2": 1.0,
- "inps2": 0.0254,
- "inchps2": 0.0254,
- "ftps2": 0.3048,
- "g": 9.80665,
- "kg": 1.0,
- "ton": 1000.0,
- "lb": 0.453592,
- "N": 1.0,
- "kN": 1000.0,
- "lbf": 4.4482179868,
- "kip": 4448.2179868,
- "kips": 4448.2179868,
- "Pa": 1.0,
- "kPa": 1000.0,
- "MPa": 1000000.0,
- "GPa": 1000000000.0,
- "psi": 6894.751669043338,
- "ksi": 6894751.669043338,
- "Mpsi": 6894751669.043338,
- "A": 1.0,
- "V": 1.0,
- "kV": 1000.0,
- "ea": 1.0,
- "unitless": 1.0,
- "rad": 1.0,
- "C": 1.0,
- "USD_2011": 1.0,
- "USD": 1.0,
- "loss_ratio": 1.0,
- "worker_day": 1.0,
- "EA": 1.0,
- "SF": 0.09290304,
- "LF": 0.3048,
- "TN": 1000.0,
- "AP": 1.0,
- "CF": 0.0004719474432,
- "KV": 1000.0,
- "J": 1.0,
- "MJ": 1000000.0,
- "test_two": 2.00,
- "test_three": 3.00,
- }
- for thing, value in units.items():
- assert thing in expect
- assert value == expect[thing]
-
- # Test that additional units are parsed correctly
- additional_units_file = (
- 'pelicun/tests/data/base/test_parse_units/additional_units_a.json'
- )
- units = base.parse_units(additional_units_file)
- assert isinstance(units, dict)
- assert 'year' in units
- assert units['year'] == 1.00
-
- # Test that an exception is raised if the additional units file is not found
- with pytest.raises(FileNotFoundError):
- units = base.parse_units('invalid/file/path.json')
-
- # Test that an exception is raised if the additional units file is
- # not a valid JSON file
- invalid_json_file = 'pelicun/tests/data/base/test_parse_units/invalid.json'
- with pytest.raises(Exception):
- units = base.parse_units(invalid_json_file)
-
- # Test that an exception is raised if a unit is defined twice in
- # the additional units file
- duplicate_units_file = 'pelicun/tests/data/base/test_parse_units/duplicate2.json'
- with pytest.raises(ValueError):
- units = base.parse_units(duplicate_units_file)
-
- # Test that an exception is raised if a unit conversion factor is not a float
- invalid_units_file = 'pelicun/tests/data/base/test_parse_units/not_float.json'
- with pytest.raises(TypeError):
- units = base.parse_units(invalid_units_file)
-
- # Test that we get an error if some first-level key does not point
- # to a dictionary
- invalid_units_file = 'pelicun/tests/data/base/test_parse_units/not_dict.json'
- with pytest.raises(ValueError):
- units = base.parse_units(invalid_units_file)
-
-
-def test_unit_conversion():
- # Test scalar conversion from feet to meters
- assert base.convert_units(1.00, 'ft', 'm') == 0.3048
-
- # Test list conversion from feet to meters
- feet_values = [1.0, 2.0, 3.0]
- meter_values = [0.3048, 0.6096, 0.9144]
- np.testing.assert_array_almost_equal(
- base.convert_units(feet_values, 'ft', 'm'), meter_values
- )
-
- # Test numpy array conversion from feet to meters
- feet_values = np.array([1.0, 2.0, 3.0])
- meter_values = np.array([0.3048, 0.6096, 0.9144])
- np.testing.assert_array_almost_equal(
- base.convert_units(feet_values, 'ft', 'm'), meter_values
- )
-
- # Test conversion with explicit category
- assert base.convert_units(1.00, 'ft', 'm', category='length') == 0.3048
-
- # Test error handling for invalid input type
- with pytest.raises(TypeError) as excinfo:
- base.convert_units("one", 'ft', 'm')
- assert str(excinfo.value) == 'Invalid input type for `values`'
-
- # Test error handling for unknown unit
- with pytest.raises(ValueError) as excinfo:
- base.convert_units(1.00, 'xyz', 'm')
- assert str(excinfo.value) == 'Unknown unit `xyz`'
-
- # Test error handling for mismatched category
- with pytest.raises(ValueError) as excinfo:
- base.convert_units(1.00, 'ft', 'm', category='volume')
- assert str(excinfo.value) == 'Unknown unit: `ft`'
diff --git a/pelicun/tests/test_model.py b/pelicun/tests/test_model.py
deleted file mode 100644
index a0a6fea12..000000000
--- a/pelicun/tests/test_model.py
+++ /dev/null
@@ -1,2166 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2018 Leland Stanford Junior University
-# Copyright (c) 2018 The Regents of the University of California
-#
-# This file is part of pelicun.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# 1. Redistributions of source code must retain the above copyright notice,
-# this list of conditions and the following disclaimer.
-#
-# 2. Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# 3. Neither the name of the copyright holder nor the names of its contributors
-# may be used to endorse or promote products derived from this software without
-# specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-#
-# You should have received a copy of the BSD 3-Clause License along with
-# pelicun. If not, see .
-#
-# Contributors:
-# Adam Zsarnóczay
-# John Vouvakis Manousakis
-
-"""
-These are unit and integration tests on the model module of pelicun.
-"""
-
-import os
-import tempfile
-from copy import deepcopy
-import pytest
-import numpy as np
-import pandas as pd
-from pelicun import model
-from pelicun import assessment
-
-# pylint: disable=missing-function-docstring
-# pylint: disable=missing-class-docstring
-# pylint: disable=arguments-renamed
-
-# __ __ _ _ _
-# | \/ | ___| |_| |__ ___ __| |___
-# | |\/| |/ _ \ __| '_ \ / _ \ / _` / __|
-# | | | | __/ |_| | | | (_) | (_| \__ \
-# |_| |_|\___|\__|_| |_|\___/ \__,_|___/
-#
-# The following tests verify the methods of the objects of the module.
-
-
-class TestModelModule:
- @pytest.fixture
- def assessment_factory(self):
- def create_instance(verbose):
- x = assessment.Assessment()
- x.log.verbose = verbose
- return x
-
- return create_instance
-
- @pytest.fixture(params=[True, False])
- def assessment_instance(self, request, assessment_factory):
- return deepcopy(assessment_factory(request.param))
-
-
-class TestDemandModel(TestModelModule):
- @pytest.fixture
- def demand_model(self, assessment_instance):
- return deepcopy(assessment_instance.demand)
-
- @pytest.fixture
- def demand_model_with_sample(self, assessment_instance):
- mdl = assessment_instance.demand
- mdl.load_sample(
- 'pelicun/tests/data/model/'
- 'test_DemandModel_load_sample/demand_sample_A.csv'
- )
- return deepcopy(mdl)
-
- @pytest.fixture
- def calibrated_demand_model(self, demand_model_with_sample):
- config = {
- "ALL": {
- "DistributionFamily": "normal",
- "AddUncertainty": 0.00,
- },
- "PID": {
- "DistributionFamily": "lognormal",
- "TruncateUpper": "0.06",
- },
- "SA": {
- "DistributionFamily": "empirical",
- },
- }
- demand_model_with_sample.calibrate_model(config)
- return deepcopy(demand_model_with_sample)
-
- @pytest.fixture
- def demand_model_with_sample_B(self, assessment_instance):
- mdl = assessment_instance.demand
- mdl.load_sample(
- 'pelicun/tests/data/model/'
- 'test_DemandModel_load_sample/demand_sample_B.csv'
- )
- return deepcopy(mdl)
-
- @pytest.fixture
- def demand_model_with_sample_C(self, assessment_instance):
- mdl = assessment_instance.demand
- mdl.load_sample(
- 'pelicun/tests/data/model/'
- 'test_DemandModel_load_sample/demand_sample_C.csv'
- )
- return deepcopy(mdl)
-
- @pytest.fixture
- def demand_model_with_sample_D(self, assessment_instance):
- mdl = assessment_instance.demand
- mdl.load_sample(
- 'pelicun/tests/data/model/'
- 'test_DemandModel_load_sample/demand_sample_D.csv'
- )
- return deepcopy(mdl)
-
- def test_init(self, demand_model):
- assert demand_model.log_msg
- assert demand_model.log_div
-
- assert demand_model.marginal_params is None
- assert demand_model.correlation is None
- assert demand_model.empirical_data is None
- assert demand_model.units is None
- assert demand_model._RVs is None
- assert demand_model.sample is None
-
- def test_save_sample(self, demand_model_with_sample):
- # instantiate a temporary directory in memory
- temp_dir = tempfile.mkdtemp()
- # save the sample there
- demand_model_with_sample.save_sample(f'{temp_dir}/temp.csv')
- with open(f'{temp_dir}/temp.csv', 'r', encoding='utf-8') as f:
- contents = f.read()
- assert contents == (
- ',PFA-0-1,PFA-1-1,PID-1-1,SA_0.23-0-1\n'
- 'Units,inps2,inps2,rad,inps2\n'
- '0,158.62478,397.04389,0.02672,342.149\n'
- )
- res = demand_model_with_sample.save_sample(save_units=False)
- assert res.to_dict() == {
- ('PFA', '0', '1'): {0: 158.62478},
- ('PFA', '1', '1'): {0: 397.04389},
- ('PID', '1', '1'): {0: 0.02672},
- ('SA_0.23', '0', '1'): {0: 342.149},
- }
-
- def test_load_sample(self, demand_model_with_sample, demand_model_with_sample_B):
- # retrieve the loaded sample and units
- obtained_sample = demand_model_with_sample.sample
- obtained_units = demand_model_with_sample.units
-
- obtained_sample_2 = demand_model_with_sample_B.sample
- obtained_units_2 = demand_model_with_sample_B.units
-
- # demand_sample_A.csv and demand_sample_B.csv only differ in the
- # headers, where the first includes a tag for the hazard
- # level. Therefore, the two files are expected to result to the
- # same `obtained_sample`
-
- pd.testing.assert_frame_equal(
- obtained_sample,
- obtained_sample_2,
- check_index_type=False,
- check_column_type=False,
- )
- pd.testing.assert_series_equal(
- obtained_units,
- obtained_units_2,
- check_index_type=False,
- )
-
- # compare against the expected values for the sample
- expected_sample = pd.DataFrame(
- [
- [4.029069, 10.084915, 0.02672, 8.690585],
- ],
- columns=pd.MultiIndex.from_tuples(
- (
- ('PFA', '0', '1'),
- ('PFA', '1', '1'),
- ('PID', '1', '1'),
- ('SA_0.23', '0', '1'),
- ),
- names=('type', 'loc', 'dir'),
- ),
- index=[0],
- )
- pd.testing.assert_frame_equal(
- expected_sample,
- obtained_sample,
- check_index_type=False,
- check_column_type=False,
- )
-
- # compare against the expected values for the units
- expected_units = pd.Series(
- ('inps2', 'inps2', 'rad', 'inps2'),
- index=pd.MultiIndex.from_tuples(
- (
- ('PFA', '0', '1'),
- ('PFA', '1', '1'),
- ('PID', '1', '1'),
- ('SA_0.23', '0', '1'),
- ),
- names=['type', 'loc', 'dir'],
- ),
- name='Units',
- )
- pd.testing.assert_series_equal(
- expected_units,
- obtained_units,
- check_index_type=False,
- )
-
- def test_estimate_RID(self, demand_model_with_sample):
- demands = demand_model_with_sample.sample['PID']
- params = {'yield_drift': 0.01}
- res = demand_model_with_sample.estimate_RID(demands, params)
- assert list(res.columns) == [('RID', '1', '1')]
- assert (
- demand_model_with_sample.estimate_RID(demands, params, method='xyz') is None
- )
-
- def test_calibrate_model(self, calibrated_demand_model, demand_model_with_sample_C):
- assert calibrated_demand_model.marginal_params['Family'].to_list() == [
- 'normal',
- 'normal',
- 'lognormal',
- 'empirical',
- ]
- assert (
- calibrated_demand_model.marginal_params.at[
- ('PID', '1', '1'), 'TruncateUpper'
- ]
- == 0.06
- )
-
- def test_calibrate_model_censoring(
- self, calibrated_demand_model, demand_model_with_sample_C
- ):
- # with a config featuring censoring the RIDs
- config = {
- "ALL": {
- "DistributionFamily": "normal",
- "AddUncertainty": 0.00,
- },
- "PID": {
- "DistributionFamily": "lognormal",
- "CensorUpper": "0.05",
- },
- }
- demand_model_with_sample_C.calibrate_model(config)
-
- def test_calibrate_model_truncation(
- self, calibrated_demand_model, demand_model_with_sample_C
- ):
- # with a config that specifies a truncation limit smaller than
- # the samples
- config = {
- "ALL": {
- "DistributionFamily": "normal",
- "AddUncertainty": 0.00,
- },
- "PID": {
- "DistributionFamily": "lognormal",
- "TruncateUpper": "0.04",
- },
- }
- demand_model_with_sample_C.calibrate_model(config)
-
- def test_save_load_model_with_empirical(
- self, calibrated_demand_model, assessment_instance
- ):
- # a model that has empirical marginal parameters
- temp_dir = tempfile.mkdtemp()
- calibrated_demand_model.save_model(f'{temp_dir}/temp')
- assert os.path.exists(f'{temp_dir}/temp_marginals.csv')
- assert os.path.exists(f'{temp_dir}/temp_empirical.csv')
- assert os.path.exists(f'{temp_dir}/temp_correlation.csv')
-
- # Load model to a different DemandModel instance to verify
- new_demand_model = assessment_instance.demand
- new_demand_model.load_model(f'{temp_dir}/temp')
- pd.testing.assert_frame_equal(
- calibrated_demand_model.marginal_params,
- new_demand_model.marginal_params,
- atol=1e-4,
- check_index_type=False,
- check_column_type=False,
- )
- pd.testing.assert_frame_equal(
- calibrated_demand_model.correlation,
- new_demand_model.correlation,
- atol=1e-4,
- check_index_type=False,
- check_column_type=False,
- )
- pd.testing.assert_frame_equal(
- calibrated_demand_model.empirical_data,
- new_demand_model.empirical_data,
- atol=1e-4,
- check_index_type=False,
- check_column_type=False,
- )
-
- # # todo: this currently fails
- # def test_save_load_model_without_empirical(
- # self, demand_model_with_sample_C, assessment_instance
- # ):
- # # a model that does not have empirical marginal parameters
- # temp_dir = tempfile.mkdtemp()
- # config = {
- # "ALL": {
- # "DistributionFamily": "normal",
- # "AddUncertainty": 0.00,
- # },
- # "PID": {
- # "DistributionFamily": "lognormal",
- # "TruncateUpper": "0.04",
- # },
- # }
- # demand_model_with_sample_C.calibrate_model(config)
- # demand_model_with_sample_C.save_model(f'{temp_dir}/temp')
- # assert os.path.exists(f'{temp_dir}/temp_marginals.csv')
- # assert os.path.exists(f'{temp_dir}/temp_empirical.csv')
- # assert os.path.exists(f'{temp_dir}/temp_correlation.csv')
-
- # # Load model to a different DemandModel instance to verify
- # new_demand_model = assessment_instance.demand
- # new_demand_model.load_model(f'{temp_dir}/temp')
- # pd.testing.assert_frame_equal(
- # demand_model_with_sample_C.marginal_params,
- # new_demand_model.marginal_params,
- # )
- # pd.testing.assert_frame_equal(
- # demand_model_with_sample_C.correlation,
- # new_demand_model.correlation
- # )
- # pd.testing.assert_frame_equal(
- # demand_model_with_sample_C.empirical_data,
- # new_demand_model.empirical_data,
- # )
-
- def test_generate_sample_exceptions(self, demand_model):
- # generating a sample from a non calibrated model should fail
- with pytest.raises(ValueError):
- demand_model.generate_sample({"SampleSize": 3, 'PreserveRawOrder': False})
-
- def test_generate_sample(self, calibrated_demand_model):
- calibrated_demand_model.generate_sample(
- {"SampleSize": 3, 'PreserveRawOrder': False}
- )
-
- # get the generated demand sample
- res = calibrated_demand_model.save_sample(save_units=True)
- assert isinstance(res, tuple)
-
- obtained_sample, obtained_units = res
-
- # compare against the expected values for the sample
- expected_sample = pd.DataFrame(
- (
- (158.624160, 397.042985, 0.02672, 342.148783),
- (158.624160, 397.042985, 0.02672, 342.148783),
- (158.624160, 397.042985, 0.02672, 342.148783),
- ),
- columns=pd.MultiIndex.from_tuples(
- (
- ('PFA', '0', '1'),
- ('PFA', '1', '1'),
- ('PID', '1', '1'),
- ('SA_0.23', '0', '1'),
- ),
- names=('type', 'loc', 'dir'),
- ),
- index=pd.Index((0, 1, 2), dtype='object'),
- )
- pd.testing.assert_frame_equal(
- expected_sample,
- obtained_sample,
- check_exact=False,
- atol=1e-4,
- check_index_type=False,
- check_column_type=False,
- )
-
- # compare against the expected values for the units
- expected_units = pd.Series(
- ('inps2', 'inps2', 'rad', 'inps2'),
- index=pd.MultiIndex.from_tuples(
- (
- ('PFA', '0', '1'),
- ('PFA', '1', '1'),
- ('PID', '1', '1'),
- ('SA_0.23', '0', '1'),
- ),
- names=('type', 'loc', 'dir'),
- ),
- name='Units',
- )
- pd.testing.assert_series_equal(
- expected_units,
- obtained_units,
- check_index_type=False,
- )
-
- def test_generate_sample_with_demand_cloning(self, assessment_instance):
- # # used for debugging
- # assessment_instance = assessment.Assessment()
-
- demand_model = assessment_instance.demand
-
- mdl = assessment_instance.demand
- # contains PGV-0-1, PGV-1-1, PGV-2-1, and PGA-0-1
- # PGA-0-1 is not cloned.
- mdl.load_sample(
- 'pelicun/tests/data/model/'
- 'test_DemandModel_generate_sample_with_demand_cloning/sample.csv'
- )
- demand_model.calibrate_model(
- {
- "ALL": {
- "DistributionFamily": "lognormal",
- },
- }
- )
- demand_model.generate_sample(
- {
- 'SampleSize': 1000,
- 'DemandCloning': {
- 'PGV-0-1': ['PGV-0-1', 'PGV-0-2', 'PGV-0-3'],
- 'PGV-1-1': ['PGV-1-1', 'PGV-1-2', 'PGV-1-3'],
- 'PGV-2-1': ['PGV-2-1', 'PGV-2-2', 'PGV-2-3'],
- 'not_present': ['X-0-0', 'Y-0-0', 'Z-0-0'],
- },
- }
- )
- # we'll just get a warning for the `not_present` entry
- assert demand_model.sample.columns.to_list() == [
- ('PGA', '0', '1'),
- ('PGV', '0', '1'),
- ('PGV', '0', '2'),
- ('PGV', '0', '3'),
- ('PGV', '1', '1'),
- ('PGV', '1', '2'),
- ('PGV', '1', '3'),
- ('PGV', '2', '1'),
- ('PGV', '2', '2'),
- ('PGV', '2', '3'),
- ]
- assert np.array_equal(
- demand_model.sample[('PGV', '0', '1')].values,
- demand_model.sample[('PGV', '0', '3')].values,
- )
- # exceptions
- # Duplicate entries in demand cloning configuration
- with pytest.raises(ValueError):
- demand_model.generate_sample(
- {
- 'SampleSize': 1000,
- 'DemandCloning': {
- 'PGV-0-1': ['PGV-0-1', 'PGV-0-2', 'PGV-0-3'],
- 'PGV-1-1': ['PGV-0-1', 'PGV-1-2', 'PGV-1-3'],
- 'PGV-2-1': ['PGV-0-1', 'PGV-2-2', 'PGV-2-3'],
- },
- }
- )
-
-
-class TestPelicunModel(TestModelModule):
- @pytest.fixture
- def pelicun_model(self, assessment_instance):
- return deepcopy(model.PelicunModel(assessment_instance))
-
- def test_init(self, pelicun_model):
- assert pelicun_model.log_msg
- assert pelicun_model.log_div
-
- def test_convert_marginal_params(self, pelicun_model):
- # one row, only Theta_0, no conversion
- marginal_params = pd.DataFrame(
- [['1.0']],
- columns=['Theta_0'],
- index=pd.MultiIndex.from_tuples(
- (('A', '0', '1'),), names=('cmp', 'loc', 'dir')
- ),
- )
- units = pd.Series(['ea'], index=marginal_params.index)
- arg_units = None
- res = pelicun_model.convert_marginal_params(marginal_params, units, arg_units)
-
- # >>> res
- # Theta_0
- # cmp loc dir
- # A 0 1 1.0
-
- assert 'Theta_0' in res.columns
- assert res.to_dict() == {'Theta_0': {('A', '0', '1'): 1.0}}
-
- # many rows, with conversions
- marginal_params = pd.DataFrame(
- [
- [np.nan, 1.0, np.nan, np.nan, np.nan, np.nan],
- ['normal', np.nan, 1.0, np.nan, -0.50, 0.50],
- ['lognormal', 1.0, 0.5, np.nan, 0.50, 1.50],
- ['uniform', 0.0, 10.0, np.nan, np.nan, np.nan],
- ],
- columns=[
- 'Family',
- 'Theta_0',
- 'Theta_1',
- 'Theta_2',
- 'TruncateLower',
- 'TruncateUpper',
- ],
- index=pd.MultiIndex.from_tuples(
- (
- ('A', '0', '1'),
- ('B', '0', '1'),
- ('C', '0', '1'),
- ('D', '0', '1'),
- ),
- names=('cmp', 'loc', 'dir'),
- ),
- )
- units = pd.Series(['ea', 'ft', 'in', 'in2'], index=marginal_params.index)
- arg_units = None
- res = pelicun_model.convert_marginal_params(marginal_params, units, arg_units)
-
- expected_df = pd.DataFrame(
- {
- 'Family': [np.nan, 'normal', 'lognormal', 'uniform'],
- 'Theta_0': [1.0000, np.nan, 0.0254, 0.0000],
- 'Theta_1': [np.nan, 1.000000, 0.500000, 0.0064516],
- 'Theta_2': [np.nan, np.nan, np.nan, np.nan],
- 'TruncateLower': [np.nan, -0.1524, 0.0127, np.nan],
- 'TruncateUpper': [np.nan, 0.1524, 0.0381, np.nan],
- },
- index=pd.MultiIndex.from_tuples(
- (
- ('A', '0', '1'),
- ('B', '0', '1'),
- ('C', '0', '1'),
- ('D', '0', '1'),
- ),
- names=('cmp', 'loc', 'dir'),
- ),
- )
-
- pd.testing.assert_frame_equal(
- expected_df, res, check_index_type=False, check_column_type=False
- )
-
- # a case with arg_units
- marginal_params = pd.DataFrame(
- [['500.0,400.00|20,10']],
- columns=['Theta_0'],
- index=pd.MultiIndex.from_tuples(
- (('A', '0', '1'),), names=('cmp', 'loc', 'dir')
- ),
- )
- units = pd.Series(['test_three'], index=marginal_params.index)
- arg_units = pd.Series(['test_two'], index=marginal_params.index)
- res = pelicun_model.convert_marginal_params(marginal_params, units, arg_units)
-
- # >>> res
- # Theta_0
- # cmp loc dir
- # A 0 1 750,600|40,20
-
- # note: '40,20' = '20,10' * 2.00 (test_two)
- # note: '750,600' = '500,400' * 3.00 / 2.00 (test_three/test_two)
-
- expected_df = pd.DataFrame(
- {
- 'Theta_0': ['750,600|40,20'],
- },
- index=pd.MultiIndex.from_tuples(
- (('A', '0', '1'),),
- names=('cmp', 'loc', 'dir'),
- ),
- )
- pd.testing.assert_frame_equal(
- expected_df, res, check_index_type=False, check_column_type=False
- )
-
-
-class TestAssetModel(TestPelicunModel):
- @pytest.fixture
- def asset_model(self, assessment_instance):
- return deepcopy(assessment_instance.asset)
-
- def test_init(self, asset_model):
- assert asset_model.log_msg
- assert asset_model.log_div
- assert asset_model.cmp_marginal_params is None
- assert asset_model.cmp_units is None
- assert asset_model._cmp_RVs is None
- assert asset_model._cmp_sample is None
-
- def test_save_cmp_sample(self, asset_model):
- asset_model._cmp_sample = pd.DataFrame(
- {
- ('component_a', f'{i}', f'{j}', '0'): 8.0
- for i in range(1, 3)
- for j in range(1, 3)
- },
- index=range(10),
- columns=pd.MultiIndex.from_tuples(
- (
- ('component_a', f'{i}', f'{j}', '0')
- for i in range(1, 3)
- for j in range(1, 3)
- ),
- names=('cmp', 'loc', 'dir', 'uid'),
- ),
- )
-
- asset_model.cmp_units = pd.Series(
- data=['ea'], index=['component_a'], name='Units'
- )
-
- res = asset_model.save_cmp_sample()
- assert isinstance(res, pd.DataFrame)
-
- temp_dir = tempfile.mkdtemp()
- # save the sample there
- asset_model.save_cmp_sample(f'{temp_dir}/temp.csv')
-
- # load the component sample to a different AssetModel
- asmt = assessment.Assessment()
- asset_model = asmt.asset
- asset_model.load_cmp_sample(f'{temp_dir}/temp.csv')
-
- # also test loading sample to variables
- # (but we don't inspect them)
- _ = asset_model.save_cmp_sample(save_units=False)
- _, _ = asset_model.save_cmp_sample(save_units=True)
-
- def test_load_cmp_model_1(self, asset_model):
- cmp_marginals = pd.read_csv(
- 'pelicun/tests/data/model/test_AssetModel/CMP_marginals.csv',
- index_col=0,
- )
- asset_model.load_cmp_model({'marginals': cmp_marginals})
-
- expected_cmp_marginal_params = pd.DataFrame(
- {
- 'Theta_0': (8.0, 8.0, 8.0, 8.0, 8.0, 8.0),
- 'Blocks': (1, 1, 1, 1, 1, 1),
- },
- index=pd.MultiIndex.from_tuples(
- (
- ('component_a', '0', '1', '0'),
- ('component_a', '0', '2', '0'),
- ('component_a', '1', '1', '0'),
- ('component_a', '1', '2', '0'),
- ('component_a', '2', '1', '0'),
- ('component_a', '2', '2', '0'),
- ),
- names=('cmp', 'loc', 'dir', 'uid'),
- ),
- ).astype({'Theta_0': 'float64', 'Blocks': 'int64'})
-
- pd.testing.assert_frame_equal(
- expected_cmp_marginal_params,
- asset_model.cmp_marginal_params,
- check_index_type=False,
- check_column_type=False,
- check_dtype=False,
- )
-
- expected_cmp_units = pd.Series(data=['ea'], index=['component_a'], name='Units')
-
- pd.testing.assert_series_equal(
- expected_cmp_units,
- asset_model.cmp_units,
- check_index_type=False,
- )
-
- def test_load_cmp_model_2(self, asset_model):
- # component marginals utilizing the keywords '--', 'all', 'top', 'roof'
- cmp_marginals = pd.read_csv(
- 'pelicun/tests/data/model/test_AssetModel/CMP_marginals_2.csv',
- index_col=0,
- )
- asset_model._asmnt.stories = 4
- asset_model.load_cmp_model({'marginals': cmp_marginals})
-
- assert asset_model.cmp_marginal_params.to_dict() == {
- 'Theta_0': {
- ('component_a', '0', '1', '0'): 1.0,
- ('component_a', '0', '2', '0'): 1.0,
- ('component_a', '1', '1', '0'): 1.0,
- ('component_a', '1', '2', '0'): 1.0,
- ('component_a', '2', '1', '0'): 1.0,
- ('component_a', '2', '2', '0'): 1.0,
- ('component_a', '3', '1', '0'): 1.0,
- ('component_a', '3', '2', '0'): 1.0,
- ('component_b', '1', '1', '0'): 1.0,
- ('component_b', '2', '1', '0'): 1.0,
- ('component_b', '3', '1', '0'): 1.0,
- ('component_b', '4', '1', '0'): 1.0,
- ('component_c', '0', '1', '0'): 1.0,
- ('component_c', '1', '1', '0'): 1.0,
- ('component_c', '2', '1', '0'): 1.0,
- ('component_d', '4', '1', '0'): 1.0,
- ('component_e', '5', '1', '0'): 1.0,
- },
- 'Blocks': {
- ('component_a', '0', '1', '0'): 1,
- ('component_a', '0', '2', '0'): 1,
- ('component_a', '1', '1', '0'): 1,
- ('component_a', '1', '2', '0'): 1,
- ('component_a', '2', '1', '0'): 1,
- ('component_a', '2', '2', '0'): 1,
- ('component_a', '3', '1', '0'): 1,
- ('component_a', '3', '2', '0'): 1,
- ('component_b', '1', '1', '0'): 1,
- ('component_b', '2', '1', '0'): 1,
- ('component_b', '3', '1', '0'): 1,
- ('component_b', '4', '1', '0'): 1,
- ('component_c', '0', '1', '0'): 1,
- ('component_c', '1', '1', '0'): 1,
- ('component_c', '2', '1', '0'): 1,
- ('component_d', '4', '1', '0'): 1,
- ('component_e', '5', '1', '0'): 1,
- },
- }
-
- expected_cmp_units = pd.Series(
- data=['ea'] * 5,
- index=[f'component_{x}' for x in ('a', 'b', 'c', 'd', 'e')],
- name='Units',
- )
-
- pd.testing.assert_series_equal(
- expected_cmp_units,
- asset_model.cmp_units,
- check_index_type=False,
- )
-
- def test_load_cmp_model_csv(self, asset_model):
- # load by directly specifying the csv file
- cmp_marginals = 'pelicun/tests/data/model/test_AssetModel/CMP'
- asset_model.load_cmp_model(cmp_marginals)
-
- def test_load_cmp_model_exceptions(self, asset_model):
- cmp_marginals = pd.read_csv(
- 'pelicun/tests/data/model/test_AssetModel/CMP_marginals_invalid_loc.csv',
- index_col=0,
- )
- asset_model._asmnt.stories = 4
- with pytest.raises(ValueError):
- asset_model.load_cmp_model({'marginals': cmp_marginals})
-
- cmp_marginals = pd.read_csv(
- 'pelicun/tests/data/model/test_AssetModel/CMP_marginals_invalid_dir.csv',
- index_col=0,
- )
- asset_model._asmnt.stories = 4
- with pytest.raises(ValueError):
- asset_model.load_cmp_model({'marginals': cmp_marginals})
-
- def test_generate_cmp_sample(self, asset_model):
- asset_model.cmp_marginal_params = pd.DataFrame(
- {'Theta_0': (8.0, 8.0, 8.0, 8.0), 'Blocks': (1.0, 1.0, 1.0, 1.0)},
- index=pd.MultiIndex.from_tuples(
- (
- ('component_a', '1', '1', '0'),
- ('component_a', '1', '2', '0'),
- ('component_a', '2', '1', '0'),
- ('component_a', '2', '2', '0'),
- ),
- names=('cmp', 'loc', 'dir', 'uid'),
- ),
- )
-
- asset_model.cmp_units = pd.Series(
- data=['ea'], index=['component_a'], name='Units'
- )
-
- asset_model.generate_cmp_sample(sample_size=10)
-
- assert asset_model._cmp_RVs is not None
-
- expected_cmp_sample = pd.DataFrame(
- {
- ('component_a', f'{i}', f'{j}'): 8.0
- for i in range(1, 3)
- for j in range(1, 3)
- },
- index=range(10),
- columns=pd.MultiIndex.from_tuples(
- (
- ('component_a', f'{i}', f'{j}', '0')
- for i in range(1, 3)
- for j in range(1, 3)
- ),
- names=('cmp', 'loc', 'dir', 'uid'),
- ),
- )
-
- pd.testing.assert_frame_equal(
- expected_cmp_sample,
- asset_model.cmp_sample,
- check_index_type=False,
- check_column_type=False,
- )
-
- # currently this is not working
- # def test_load_cmp_model_block_weights(self, asset_model):
- # cmp_marginals = pd.read_csv(
- # 'pelicun/tests/data/model/test_AssetModel/CMP_marginals_block_weights.csv',
- # index_col=0,
- # )
- # asset_model.load_cmp_model({'marginals': cmp_marginals})
-
- def test_generate_cmp_sample_exceptions_1(self, asset_model):
- # without marginal parameters
- with pytest.raises(ValueError):
- asset_model.generate_cmp_sample(sample_size=10)
-
- def test_generate_cmp_sample_exceptions_2(self, asset_model):
- # without specifying sample size
- cmp_marginals = pd.read_csv(
- 'pelicun/tests/data/model/test_AssetModel/CMP_marginals.csv',
- index_col=0,
- )
- asset_model.load_cmp_model({'marginals': cmp_marginals})
- with pytest.raises(ValueError):
- asset_model.generate_cmp_sample()
- # but it should work if a demand sample is available
- asset_model._asmnt.demand.sample = np.empty(shape=(10, 2))
- asset_model.generate_cmp_sample()
-
-
-class TestDamageModel(TestPelicunModel):
- @pytest.fixture
- def cmp_sample_A(self):
- # This sample contains 8 units of B.10.31.001 assigned to
- # locations 1, 2 and directions 1, 2
- return pd.DataFrame(
- {
- ('B.10.31.001', f'{i}', f'{j}', '0'): 8.0
- for i in range(1, 3)
- for j in range(1, 3)
- },
- index=range(10),
- columns=pd.MultiIndex.from_tuples(
- (
- ('B.10.31.001', f'{i}', f'{j}', '0')
- for i in range(1, 3)
- for j in range(1, 3)
- ),
- names=('cmp', 'loc', 'dir', 'uid'),
- ),
- )
-
- @pytest.fixture
- def calibration_config_A(self):
- return {
- "ALL": {"DistributionFamily": "lognormal"},
- "PID": {
- "DistributionFamily": "lognormal",
- "TruncateLower": "",
- "TruncateUpper": "0.06",
- },
- }
-
- @pytest.fixture
- def damage_model(self, assessment_instance):
- return deepcopy(assessment_instance.damage)
-
- @pytest.fixture
- def damage_model_model_loaded(self, damage_model, cmp_sample_A):
- asmt = damage_model._asmnt
- asmt.get_default_data('damage_DB_FEMA_P58_2nd')
- asmt.asset._cmp_sample = cmp_sample_A
- damage_model.load_damage_model(['PelicunDefault/damage_DB_FEMA_P58_2nd.csv'])
- return deepcopy(damage_model)
-
- @pytest.fixture
- def damage_model_with_sample(self, assessment_instance):
- dmg_process = None
- assessment_instance.demand.sample = pd.DataFrame(
- np.column_stack(
- (
- np.array((4.94, 2.73, 4.26, 2.79)),
- np.array((4.74, 2.23, 4.14, 2.28)),
- np.array((0.02, 0.022, 0.021, 0.02)),
- np.array((0.02, 0.022, 0.021, 0.02)),
- )
- ),
- columns=pd.MultiIndex.from_tuples(
- (
- ('PFA', '1', '1'),
- ('PFA', '1', '2'),
- ('PID', '1', '1'),
- ('PID', '1', '2'),
- ),
- names=['type', 'loc', 'dir'],
- ),
- index=range(4),
- )
- assessment_instance.asset.cmp_marginal_params = pd.DataFrame(
- np.full((4, 2), 2.00),
- index=pd.MultiIndex.from_tuples(
- (
- ('cmp_1', '1', '1', '0'),
- ('cmp_1', '1', '2', '0'),
- ('cmp_2', '1', '1', '0'),
- ('cmp_2', '1', '2', '0'),
- ),
- names=['cmp', 'loc', 'dir', 'uid'],
- ),
- columns=('Theta_0', 'Blocks'),
- )
- assessment_instance.asset.generate_cmp_sample(sample_size=4)
- assessment_instance.damage.damage_params = pd.DataFrame(
- np.array(
- (
- (
- 1.0,
- 0.0,
- 'Peak Interstory Drift Ratio',
- 'ea',
- 0.0,
- None,
- 'lognormal',
- 1e-2,
- 0.40,
- None,
- 'lognormal',
- 2e-2,
- 0.40,
- None,
- 'lognormal',
- 3e-2,
- 0.40,
- None,
- 'lognormal',
- 4e-2,
- 0.40,
- ),
- (
- 1.0,
- 0.0,
- 'Peak Interstory Drift Ratio',
- 'ea',
- 0.0,
- None,
- 'lognormal',
- 1e-2,
- 0.40,
- None,
- 'lognormal',
- 2e-2,
- 0.40,
- None,
- 'lognormal',
- 3e-2,
- 0.40,
- None,
- 'lognormal',
- 4e-2,
- 0.40,
- ),
- )
- ),
- index=['cmp_1', 'cmp_2'],
- columns=pd.MultiIndex.from_tuples(
- (
- ('Demand', 'Directional'),
- ('Demand', 'Offset'),
- ('Demand', 'Type'),
- ('Demand', 'Unit'),
- ('Incomplete', ''),
- ('LS1', 'DamageStateWeights'),
- ('LS1', 'Family'),
- ('LS1', 'Theta_0'),
- ('LS1', 'Theta_1'),
- ('LS2', 'DamageStateWeights'),
- ('LS2', 'Family'),
- ('LS2', 'Theta_0'),
- ('LS2', 'Theta_1'),
- ('LS3', 'DamageStateWeights'),
- ('LS3', 'Family'),
- ('LS3', 'Theta_0'),
- ('LS3', 'Theta_1'),
- ('LS4', 'DamageStateWeights'),
- ('LS4', 'Family'),
- ('LS4', 'Theta_0'),
- ('LS4', 'Theta_1'),
- )
- ),
- )
- assessment_instance.damage.calculate(sample_size=4, dmg_process=dmg_process)
- assessment_instance.asset.cmp_units = pd.Series(
- ['ea'] * len(assessment_instance.damage.sample.columns),
- index=assessment_instance.damage.sample.columns,
- name='Units',
- dtype='object',
- )
- return deepcopy(assessment_instance.damage)
-
- def test_init(self, damage_model):
- assert damage_model.log_msg
- assert damage_model.log_div
-
- assert damage_model.damage_params is None
- assert damage_model.sample is None
-
- def test_save_load_sample(self, damage_model_with_sample, assessment_instance):
- # saving to a file
- temp_dir = tempfile.mkdtemp()
- # convert the sample's index from RangeIndex to int64 (to
- # match the datatype when it is loaded back; the contents are
- # the same)
- damage_model_with_sample.sample.index = (
- damage_model_with_sample.sample.index.astype('int64')
- )
- damage_model_with_sample.save_sample(f'{temp_dir}/damage_model_sample.csv')
- # loading from the file
- assessment_instance.damage.load_sample(f'{temp_dir}/damage_model_sample.csv')
- sample_from_file = assessment_instance.damage.sample
-
- # saving to a variable
- sample_from_variable = damage_model_with_sample.save_sample(save_units=False)
- pd.testing.assert_frame_equal(
- sample_from_file,
- sample_from_variable,
- check_index_type=False,
- check_column_type=False,
- )
- _, units_from_variable = damage_model_with_sample.save_sample(save_units=True)
- assert np.all(units_from_variable.to_numpy() == 'ea')
-
- def test_load_damage_model(self, damage_model_model_loaded):
- # should no longer be None
- assert damage_model_model_loaded.damage_params is not None
-
- assert list(damage_model_model_loaded.damage_params.columns) == [
- ("Demand", "Directional"),
- ("Demand", "Offset"),
- ("Demand", "Type"),
- ("Demand", "Unit"),
- ("Incomplete", ""),
- ("LS1", "DamageStateWeights"),
- ("LS1", "Family"),
- ("LS1", "Theta_0"),
- ("LS1", "Theta_1"),
- ("LS2", "DamageStateWeights"),
- ("LS2", "Family"),
- ("LS2", "Theta_0"),
- ("LS2", "Theta_1"),
- ("LS3", "DamageStateWeights"),
- ("LS3", "Family"),
- ("LS3", "Theta_0"),
- ("LS3", "Theta_1"),
- ("LS4", "DamageStateWeights"),
- ("LS4", "Family"),
- ("LS4", "Theta_0"),
- ("LS4", "Theta_1"),
- ]
-
- assert list(damage_model_model_loaded.damage_params.index) == ['B.10.31.001']
-
- contents = damage_model_model_loaded.damage_params.to_numpy().reshape(-1)
-
- expected_contents = np.array(
- [
- 1.0,
- 0.0,
- 'Peak Interstory Drift Ratio',
- 'unitless',
- 0.0,
- '0.950000 | 0.050000',
- 'lognormal',
- 0.04,
- 0.4,
- None,
- 'lognormal',
- 0.08,
- 0.4,
- None,
- 'lognormal',
- 0.11,
- 0.4,
- np.nan,
- None,
- np.nan,
- np.nan,
- ],
- dtype=object,
- )
-
- # this comparison was tricky
- for x, y in zip(contents, expected_contents):
- if isinstance(x, str):
- assert x == y
- elif x is None:
- continue
- elif np.isnan(x):
- continue
-
- def test__create_dmg_RVs(self, damage_model_model_loaded):
- pg_batch = damage_model_model_loaded._get_pg_batches(block_batch_size=1)
-
- batches = pg_batch.index.get_level_values(0).unique()
- for PGB_i in batches:
- PGB = pg_batch.loc[PGB_i]
- # ensure the following works in each case
- damage_model_model_loaded._create_dmg_RVs(PGB)
-
- # check the output for a single case
- PGB_i = batches[-1]
- PGB = pg_batch.loc[PGB_i]
-
- capacity_RV_reg, lsds_RV_reg = damage_model_model_loaded._create_dmg_RVs(PGB)
-
- assert capacity_RV_reg is not None
- assert lsds_RV_reg is not None
-
- assert list(capacity_RV_reg._variables.keys()) == [
- 'FRG-B.10.31.001-2-2-0-1-1',
- 'FRG-B.10.31.001-2-2-0-1-2',
- 'FRG-B.10.31.001-2-2-0-1-3',
- ]
-
- assert not capacity_RV_reg._sets
-
- assert list(lsds_RV_reg._variables.keys()) == [
- 'LSDS-B.10.31.001-2-2-0-1-1',
- 'LSDS-B.10.31.001-2-2-0-1-2',
- 'LSDS-B.10.31.001-2-2-0-1-3',
- ]
-
- assert not lsds_RV_reg._sets
-
- # test capacity adjustment: *1.20
- scaling_specification = {'B.10.31.001-2-2': '*1.20'}
- (
- adjusted_capacity_RV_reg,
- lsds_RV_reg,
- ) = damage_model_model_loaded._create_dmg_RVs(PGB, scaling_specification)
- for limit_state in ('1', '2', '3'):
- val_initial = capacity_RV_reg.RV[
- f'FRG-B.10.31.001-2-2-0-1-{limit_state}'
- ].theta
- val_scaling = adjusted_capacity_RV_reg.RV[
- f'FRG-B.10.31.001-2-2-0-1-{limit_state}'
- ].theta
- assert val_scaling[0] == val_initial[0] * 1.20
- assert val_scaling[1] == val_initial[1]
- assert pd.isna(val_scaling[2]) and pd.isna(val_scaling[2])
-
- # test capacity adjustment: /1.20
- scaling_specification = {'B.10.31.001-2-2': '/1.20'}
- (
- adjusted_capacity_RV_reg,
- lsds_RV_reg,
- ) = damage_model_model_loaded._create_dmg_RVs(PGB, scaling_specification)
- for limit_state in ('1', '2', '3'):
- val_initial = capacity_RV_reg.RV[
- f'FRG-B.10.31.001-2-2-0-1-{limit_state}'
- ].theta
- val_scaling = adjusted_capacity_RV_reg.RV[
- f'FRG-B.10.31.001-2-2-0-1-{limit_state}'
- ].theta
- assert val_scaling[0] == val_initial[0] / 1.20
- assert val_scaling[1] == val_initial[1]
- assert pd.isna(val_scaling[2]) and pd.isna(val_scaling[2])
-
- # test capacity adjustment: +0.50
- scaling_specification = {'B.10.31.001-2-2': '+0.50'}
- (
- adjusted_capacity_RV_reg,
- lsds_RV_reg,
- ) = damage_model_model_loaded._create_dmg_RVs(PGB, scaling_specification)
- for limit_state in ('1', '2', '3'):
- val_initial = capacity_RV_reg.RV[
- f'FRG-B.10.31.001-2-2-0-1-{limit_state}'
- ].theta
- val_scaling = adjusted_capacity_RV_reg.RV[
- f'FRG-B.10.31.001-2-2-0-1-{limit_state}'
- ].theta
- assert val_scaling[0] == val_initial[0] + 0.50
- assert val_scaling[1] == val_initial[1]
- assert pd.isna(val_scaling[2]) and pd.isna(val_scaling[2])
-
- # test capacity adjustment: -0.05
- scaling_specification = {'B.10.31.001-2-2': '-0.05'}
- (
- adjusted_capacity_RV_reg,
- lsds_RV_reg,
- ) = damage_model_model_loaded._create_dmg_RVs(PGB, scaling_specification)
- for limit_state in ('1', '2', '3'):
- val_initial = capacity_RV_reg.RV[
- f'FRG-B.10.31.001-2-2-0-1-{limit_state}'
- ].theta
- val_scaling = adjusted_capacity_RV_reg.RV[
- f'FRG-B.10.31.001-2-2-0-1-{limit_state}'
- ].theta
- assert val_scaling[0] == val_initial[0] - 0.05
- assert val_scaling[1] == val_initial[1]
- assert pd.isna(val_scaling[2]) and pd.isna(val_scaling[2])
-
- # edge cases: invalid capacity adjustment
- scaling_specification = {'B.10.31.001-2-2': 'import os; do_malicious_things'}
- with pytest.raises(ValueError):
- damage_model_model_loaded._create_dmg_RVs(PGB, scaling_specification)
-
- def test__generate_dmg_sample(self, damage_model_model_loaded):
- pg_batch = damage_model_model_loaded._get_pg_batches(block_batch_size=1)
- batches = pg_batch.index.get_level_values(0).unique()
- PGB_i = batches[-1]
- PGB = pg_batch.loc[PGB_i]
- sample_size = 10
-
- # test the _generate_dmg_sample method
- (
- capacity_sample,
- lsds_sample,
- ) = damage_model_model_loaded._generate_dmg_sample(sample_size, PGB)
-
- # run a few checks on the results of the method
-
- # note: the method generates random results. We avoid checking
- # those for equality, because subsequent changes in the code might
- # break the tests. The functionality of the uq module, which is
- # used to generate the random samples, is tested with a dedicated
- # test suite.
-
- for res in (capacity_sample, lsds_sample):
- assert res.shape == (10, 3)
-
- assert list(res.columns) == [
- ('B.10.31.001', '2', '2', '0', '1', '1'),
- ('B.10.31.001', '2', '2', '0', '1', '2'),
- ('B.10.31.001', '2', '2', '0', '1', '3'),
- ]
-
- assert list(res.index) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-
- def test__get_required_demand_type(self, damage_model_model_loaded):
- pg_batch = damage_model_model_loaded._get_pg_batches(block_batch_size=1)
- batches = pg_batch.index.get_level_values(0).unique()
- PGB_i = batches[-1]
- PGB = pg_batch.loc[PGB_i]
-
- EDP_req = damage_model_model_loaded._get_required_demand_type(PGB)
-
- assert EDP_req == {'PID-2-2': [('B.10.31.001', '2', '2', '0')]}
-
- def test__assemble_required_demand_data(
- self, damage_model_model_loaded, calibration_config_A
- ):
- demand_model = damage_model_model_loaded._asmnt.demand
- demand_model.load_sample(
- 'pelicun/tests/data/model/'
- 'test_DamageModel_assemble_required_demand_data/'
- 'demand_sample.csv'
- )
- demand_model.calibrate_model(calibration_config_A)
-
- pg_batch = damage_model_model_loaded._get_pg_batches(block_batch_size=1)
- batches = pg_batch.index.get_level_values(0).unique()
-
- expected_demand_dicts = [
- {'PID-1-1': np.array([0.001])},
- {'PID-1-2': np.array([0.002])},
- {'PID-2-1': np.array([0.003])},
- {'PID-2-2': np.array([0.004])},
- ]
-
- for i, PGB_i in enumerate(batches):
- PGB = pg_batch.loc[PGB_i]
- EDP_req = damage_model_model_loaded._get_required_demand_type(PGB)
- demand_dict = damage_model_model_loaded._assemble_required_demand_data(
- EDP_req
- )
- assert demand_dict == expected_demand_dicts[i]
-
- def test__evaluate_damage_state_and_prepare_dmg_quantities(
- self,
- damage_model_model_loaded,
- calibration_config_A,
- ):
- damage_model = damage_model_model_loaded
- demand_model = damage_model_model_loaded._asmnt.demand
-
- demand_model.load_sample(
- 'pelicun/tests/data/model/'
- 'test_DamageModel__evaluate_damage_state_and_prepare_dmg_quantities/'
- 'demand_sample.csv'
- )
- # calibrate the model
- demand_model.calibrate_model(calibration_config_A)
-
- pg_batch = damage_model._get_pg_batches(block_batch_size=1)
- batches = pg_batch.index.get_level_values(0).unique()
-
- PGB_i = batches[-1]
- PGB = pg_batch.loc[PGB_i]
- EDP_req = damage_model._get_required_demand_type(PGB)
- demand_dict = damage_model._assemble_required_demand_data(EDP_req)
-
- sample_size = 10
- capacity_sample, lsds_sample = damage_model._generate_dmg_sample(
- sample_size, PGB
- )
-
- ds_sample = damage_model._evaluate_damage_state(
- demand_dict, EDP_req, capacity_sample, lsds_sample
- )
-
- qnt_sample = damage_model._prepare_dmg_quantities(ds_sample, dropzero=False)
-
- # note: the realized number of damage states is random, limiting
- # our assertions
- assert ds_sample.shape[0] == 10
- assert qnt_sample.shape[0] == 10
- assert list(qnt_sample.index) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
- assert list(ds_sample.index) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
-
- assert list(ds_sample.columns)[0] == ('B.10.31.001', '2', '2', '0', '1')
- assert list(qnt_sample.columns)[0] == ('B.10.31.001', '2', '2', '0', '0')
-
- def test__perform_dmg_task(self, assessment_instance):
- damage_model = assessment_instance.damage
-
- #
- # when CMP.B reaches DS1, CMP.A should be DS4
- #
-
- ds_sample = pd.DataFrame(
- {
- ('CMP.A', '1', '1', '0'): [0, 0, 0],
- ('CMP.A', '1', '1', '1'): [0, 0, 0],
- ('CMP.B', '1', '1', '0'): [0, 0, 1],
- ('CMP.B', '1', '1', '1'): [1, 0, 0],
- },
- dtype='int32',
- )
- ds_sample.columns.names = ['cmp', 'loc', 'dir', 'uid']
-
- dmg_process = {"1_CMP.B": {"DS1": "CMP.A_DS4"}}
- for task in dmg_process.items():
- damage_model._perform_dmg_task(task, ds_sample)
- after = ds_sample
-
- assert after.to_dict() == {
- ('CMP.A', '1', '1', '0'): {0: 4, 1: 0, 2: 4},
- ('CMP.A', '1', '1', '1'): {0: 4, 1: 0, 2: 4},
- ('CMP.B', '1', '1', '0'): {0: 0, 1: 0, 2: 1},
- ('CMP.B', '1', '1', '1'): {0: 1, 1: 0, 2: 0},
- }
-
- #
- # when CMP.B reaches DS1, CMP.A should be NA (-1)
- #
-
- ds_sample = pd.DataFrame(
- {
- ('CMP.A', '1', '1', '0'): [0, 0, 0],
- ('CMP.A', '1', '1', '1'): [0, 0, 0],
- ('CMP.B', '1', '1', '0'): [0, 0, 1],
- ('CMP.B', '1', '1', '1'): [1, 0, 0],
- },
- dtype='int32',
- )
- ds_sample.columns.names = ['cmp', 'loc', 'dir', 'uid']
-
- dmg_process = {"1_CMP.B": {"DS1": "CMP.A_NA"}}
- for task in dmg_process.items():
- damage_model._perform_dmg_task(task, ds_sample)
- after = ds_sample
-
- assert after.to_dict() == {
- ('CMP.A', '1', '1', '0'): {0: -1, 1: 0, 2: -1},
- ('CMP.A', '1', '1', '1'): {0: -1, 1: 0, 2: -1},
- ('CMP.B', '1', '1', '0'): {0: 0, 1: 0, 2: 1},
- ('CMP.B', '1', '1', '1'): {0: 1, 1: 0, 2: 0},
- }
-
- #
- # `-LOC` keyword
- # when CMP.B reaches DS1, CMP.A should be DS4
- # matching locations
- #
-
- ds_sample = pd.DataFrame(
- {
- ('CMP.A', '1', '1', '0'): [0, 0, 0],
- ('CMP.A', '2', '1', '0'): [0, 0, 0],
- ('CMP.B', '1', '1', '0'): [0, 0, 1],
- ('CMP.B', '2', '1', '0'): [1, 0, 0],
- },
- dtype='int32',
- )
- ds_sample.columns.names = ['cmp', 'loc', 'dir', 'uid']
-
- dmg_process = {"1_CMP.B-LOC": {"DS1": "CMP.A_DS4"}}
- for task in dmg_process.items():
- damage_model._perform_dmg_task(task, ds_sample)
- after = ds_sample
-
- assert after.to_dict() == {
- ('CMP.A', '1', '1', '0'): {0: 0, 1: 0, 2: 4},
- ('CMP.A', '2', '1', '0'): {0: 4, 1: 0, 2: 0},
- ('CMP.B', '1', '1', '0'): {0: 0, 1: 0, 2: 1},
- ('CMP.B', '2', '1', '0'): {0: 1, 1: 0, 2: 0},
- }
-
- #
- # ALL keyword
- #
- # Whenever CMP.A reaches DS1, all other components should be
- # set to DS2.
- #
-
- ds_sample = pd.DataFrame(
- {
- ('CMP.A', '1', '1', '0'): [1, 0, 0],
- ('CMP.B', '1', '1', '0'): [0, 0, 0],
- ('CMP.C', '1', '1', '0'): [0, 0, 0],
- ('CMP.D', '1', '1', '0'): [0, 0, 0],
- },
- dtype='int32',
- )
- ds_sample.columns.names = ['cmp', 'loc', 'dir', 'uid']
-
- dmg_process = {"1_CMP.A": {"DS1": "ALL_DS2"}}
- for task in dmg_process.items():
- damage_model._perform_dmg_task(task, ds_sample)
- after = ds_sample
-
- assert after.to_dict() == {
- ('CMP.A', '1', '1', '0'): {0: 1, 1: 0, 2: 0},
- ('CMP.B', '1', '1', '0'): {0: 2, 1: 0, 2: 0},
- ('CMP.C', '1', '1', '0'): {0: 2, 1: 0, 2: 0},
- ('CMP.D', '1', '1', '0'): {0: 2, 1: 0, 2: 0},
- }
-
- #
- # NA keyword
- #
- # NA translates to -1 representing nan
- #
-
- ds_sample = pd.DataFrame(
- {
- ('CMP.A', '1', '1', '0'): [0, 0, 0],
- ('CMP.A', '1', '1', '1'): [0, 0, 0],
- ('CMP.B', '1', '1', '0'): [0, 0, 1],
- ('CMP.B', '1', '1', '1'): [1, 0, 0],
- },
- dtype='int32',
- )
- ds_sample.columns.names = ['cmp', 'loc', 'dir', 'uid']
-
- dmg_process = {"1_CMP.B": {"DS1": "CMP.A_NA"}}
- for task in dmg_process.items():
- damage_model._perform_dmg_task(task, ds_sample)
- after = ds_sample
-
- assert after.to_dict() == {
- ('CMP.A', '1', '1', '0'): {0: -1, 1: 0, 2: -1},
- ('CMP.A', '1', '1', '1'): {0: -1, 1: 0, 2: -1},
- ('CMP.B', '1', '1', '0'): {0: 0, 1: 0, 2: 1},
- ('CMP.B', '1', '1', '1'): {0: 1, 1: 0, 2: 0},
- }
-
- #
- # NA keyword combined with `-LOC`
- #
-
- ds_sample = pd.DataFrame(
- {
- ('CMP.A', '1', '1', '0'): [0, 0, 0],
- ('CMP.A', '2', '1', '0'): [0, 0, 0],
- ('CMP.B', '1', '1', '0'): [0, 0, 1],
- ('CMP.B', '2', '1', '0'): [1, 0, 0],
- },
- dtype='int32',
- )
- ds_sample.columns.names = ['cmp', 'loc', 'dir', 'uid']
-
- dmg_process = {"1_CMP.B-LOC": {"DS1": "CMP.A_NA"}}
- for task in dmg_process.items():
- damage_model._perform_dmg_task(task, ds_sample)
- after = ds_sample
-
- assert after.to_dict() == {
- ('CMP.A', '1', '1', '0'): {0: 0, 1: 0, 2: -1},
- ('CMP.A', '2', '1', '0'): {0: -1, 1: 0, 2: 0},
- ('CMP.B', '1', '1', '0'): {0: 0, 1: 0, 2: 1},
- ('CMP.B', '2', '1', '0'): {0: 1, 1: 0, 2: 0},
- }
-
- #
- # NA keyword combined with `-LOC` and `ALL`
- #
-
- ds_sample = pd.DataFrame(
- {
- ('CMP.A', '1', '1', '0'): [0, 0, 1],
- ('CMP.A', '2', '1', '0'): [1, 0, 0],
- ('CMP.B', '1', '1', '0'): [0, 0, 0],
- ('CMP.B', '2', '1', '0'): [0, 0, 0],
- ('CMP.C', '1', '1', '0'): [0, 0, 0],
- ('CMP.C', '2', '1', '0'): [0, 0, 0],
- },
- dtype='int32',
- )
- ds_sample.columns.names = ['cmp', 'loc', 'dir', 'uid']
-
- dmg_process = {"1_CMP.A-LOC": {"DS1": "ALL_NA"}}
- for task in dmg_process.items():
- damage_model._perform_dmg_task(task, ds_sample)
- after = ds_sample
-
- assert after.to_dict() == {
- ('CMP.A', '1', '1', '0'): {0: 0, 1: 0, 2: 1},
- ('CMP.A', '2', '1', '0'): {0: 1, 1: 0, 2: 0},
- ('CMP.B', '1', '1', '0'): {0: 0, 1: 0, 2: -1},
- ('CMP.B', '2', '1', '0'): {0: -1, 1: 0, 2: 0},
- ('CMP.C', '1', '1', '0'): {0: 0, 1: 0, 2: -1},
- ('CMP.C', '2', '1', '0'): {0: -1, 1: 0, 2: 0},
- }
-
- def test__get_pg_batches_1(self, assessment_instance):
- damage_model = assessment_instance.damage
- asset_model = assessment_instance.asset
-
- asset_model.cmp_marginal_params = pd.DataFrame(
- np.full((4, 2), 2.00),
- index=pd.MultiIndex.from_tuples(
- (
- ('cmp_1', '1', '1', '0'),
- ('cmp_1', '1', '2', '0'),
- ('cmp_2', '1', '1', '0'),
- ('cmp_2', '1', '2', '0'),
- ),
- names=['cmp', 'loc', 'dir', 'uid'],
- ),
- columns=('Theta_0', 'Blocks'),
- )
-
- damage_model.damage_params = pd.DataFrame(
- np.empty(2), index=('cmp_1', 'cmp_2'), columns=['ID']
- )
-
- df_1 = damage_model._get_pg_batches(1)
- assert [i[0] for i in df_1.index] == [1, 2, 3, 4]
-
- df_4 = damage_model._get_pg_batches(4)
- assert [i[0] for i in df_4.index] == [1, 1, 2, 2]
-
- df_8 = damage_model._get_pg_batches(8)
- assert [i[0] for i in df_8.index] == [1, 1, 1, 1]
-
- def test__get_pg_batches_2(self, damage_model_model_loaded):
- # make sure that the method works for different batch sizes
- for i in (1, 4, 8, 10, 100):
- damage_model_model_loaded._get_pg_batches(block_batch_size=i)
-
- # verify the result is correct for certain cases
- res = damage_model_model_loaded._get_pg_batches(block_batch_size=1)
- expected_res = pd.DataFrame(
- np.array((1, 1, 1, 1)),
- index=pd.MultiIndex.from_tuples(
- (
- (1, 'B.10.31.001', '1', '1', '0'),
- (2, 'B.10.31.001', '1', '2', '0'),
- (3, 'B.10.31.001', '2', '1', '0'),
- (4, 'B.10.31.001', '2', '2', '0'),
- ),
- names=('Batch', 'cmp', 'loc', 'dir', 'uid'),
- ),
- columns=('Blocks',),
- ).astype('Int64')
-
- pd.testing.assert_frame_equal(
- expected_res, res, check_index_type=False, check_column_type=False
- )
-
- res = damage_model_model_loaded._get_pg_batches(block_batch_size=1000)
- expected_res = pd.DataFrame(
- np.array((1, 1, 1, 1)),
- index=pd.MultiIndex.from_tuples(
- (
- (1, 'B.10.31.001', '1', '1', '0'),
- (1, 'B.10.31.001', '1', '2', '0'),
- (1, 'B.10.31.001', '2', '1', '0'),
- (1, 'B.10.31.001', '2', '2', '0'),
- ),
- names=('Batch', 'cmp', 'loc', 'dir', 'uid'),
- ),
- columns=('Blocks',),
- ).astype('Int64')
-
- pd.testing.assert_frame_equal(
- expected_res, res, check_index_type=False, check_column_type=False
- )
-
- def test_calculate(self, damage_model_with_sample):
- # note: Due to inherent randomness, we can't assert the actual
- # values of this result
- assert damage_model_with_sample.sample.values.all() >= 0.00
- assert damage_model_with_sample.sample.values.all() <= 2.00
-
- def test_calculate_multilinear_CDF(self, damage_model):
- # # used for debugging
- # assessment_instance = assessment.Assessment()
- # damage_model = assessment_instance.damage
-
- demand_model = damage_model._asmnt.demand
- assessment_instance = damage_model._asmnt
- asset_model = assessment_instance.asset
-
- # A damage calculation test utilizing a multilinear CDF RV for
- # the capcity.
-
- sample_size = 1000
-
- # define the demand
- conversion_factor = assessment_instance.unit_conversion_factors['inps2']
- demand_model.sample = pd.DataFrame(
- np.full(sample_size, 0.50 * conversion_factor),
- columns=(('PGV', '0', '1'),),
- )
-
- # Define the component in the asset model
- asset_model.cmp_marginal_params = pd.DataFrame(
- {
- 'Theta_0': (1.0,),
- 'Blocks': (1,),
- },
- index=pd.MultiIndex.from_tuples(
- (('test_component', '0', '1', '0'),),
- names=('cmp', 'loc', 'dir', 'uid'),
- ),
- )
- # generate component samples
- asset_model.generate_cmp_sample()
-
- # define fragility curve with multilinear_CDF
- damage_model.load_damage_model(
- [
- 'pelicun/tests/data/model/'
- 'test_DamageModel_calculate_multilinear_CDF/'
- 'damage_model.csv'
- ]
- )
-
- # calculate damage
- damage_model.calculate(sample_size)
-
- res = damage_model.sample.value_counts()
- assert res.to_dict() == {(1.0, 0.0): 750, (0.0, 1.0): 250}
-
-
-class TestLossModel(TestPelicunModel):
- @pytest.fixture
- def loss_model(self, assessment_instance):
- return deepcopy(model.LossModel(assessment_instance))
-
- def test_init(self, loss_model):
- assert loss_model.log_msg
- assert loss_model.log_div
-
- assert loss_model.sample is None
- assert loss_model.loss_type == 'Generic'
-
- def test_load_sample_save_sample(self, loss_model):
- loss_model.loss_params = pd.DataFrame(
- (
- (
- "normal",
- None,
- "25704,17136|5,20",
- 0.390923,
- "USD_2011",
- 0.0,
- "1 EA",
- ),
- (
- "normal",
- 0.0,
- "22.68,15.12|5,20",
- 0.464027,
- "worker_day",
- 0.0,
- "1 EA",
- ),
- ),
- index=pd.MultiIndex.from_tuples(
- (("B.10.41.001a", "Cost"), ("B.10.41.001a", "Time"))
- ),
- columns=pd.MultiIndex.from_tuples(
- (
- ("DS1", "Family"),
- ("DS1", "LongLeadTime"),
- ("DS1", "Theta_0"),
- ("DS1", "Theta_1"),
- ("DV", "Unit"),
- ("Incomplete", ""),
- ("Quantity", "Unit"),
- )
- ),
- )
-
- sample = pd.DataFrame(
- (
- (100.00, 1.00),
- (100.00, 1.00),
- ),
- index=(0, 1),
- columns=pd.MultiIndex.from_tuples(
- (
- ("Cost", "B.10.41.001a", "B.10.41.001a", "1", "1", "1"),
- ("Time", "B.10.41.001a", "B.10.41.001a", "1", "1", "1"),
- ),
- names=("dv", "loss", "dmg", "ds", "loc", "dir"),
- ),
- )
-
- loss_model.load_sample(sample)
-
- pd.testing.assert_frame_equal(
- sample,
- loss_model.sample,
- check_index_type=False,
- check_column_type=False,
- )
-
- output = loss_model.save_sample(None)
- output.index = output.index.astype('int64')
-
- pd.testing.assert_frame_equal(
- sample, output, check_index_type=False, check_column_type=False
- )
-
- def test_load_model(self, loss_model):
- data_path_1 = pd.DataFrame(
- ((0, "1 EA", "USD_2011", 10000000.00), (0, "1 EA", "worker_day", 12500)),
- columns=pd.MultiIndex.from_tuples(
- (
- ("Incomplete", None),
- ("Quantity", "Unit"),
- ("DV", "Unit"),
- ("DS1", "Theta_0"),
- )
- ),
- index=pd.MultiIndex.from_tuples(
- (
- ("replacement", "Cost"),
- ("replacement", "Time"),
- )
- ),
- )
- data_path_2 = 'PelicunDefault/loss_repair_DB_FEMA_P58_2nd.csv'
-
- mapping_path = pd.DataFrame(
- (("B.10.31.001"), ("D.50.92.033k")),
- columns=["Generic"],
- index=["DMG-cmp_1", "DMG-cmp_2"],
- )
-
- assert loss_model.loss_map is None
- assert loss_model.loss_params is None
-
- loss_model.load_model([data_path_1, data_path_2], mapping_path)
-
- assert loss_model.loss_map.to_dict() == {
- 'Driver': {0: ('DMG', 'cmp_1'), 1: ('DMG', 'cmp_2')},
- 'Consequence': {0: 'B.10.31.001', 1: 'D.50.92.033k'},
- }
- cmp_ids = loss_model.loss_params.index.get_level_values(0).unique()
- assert "B.10.31.001" in cmp_ids
- assert "D.50.92.033k" in cmp_ids
-
- def test_aggregate_losses(self, loss_model):
- with pytest.raises(NotImplementedError):
- loss_model.aggregate_losses()
-
- def test__generate_DV_sample(self, loss_model):
- with pytest.raises(NotImplementedError):
- loss_model._generate_DV_sample(None, None)
-
-
-class TestRepairModel(TestPelicunModel):
- @pytest.fixture
- def repair_model(self, assessment_instance):
- return deepcopy(assessment_instance.repair)
-
- @pytest.fixture
- def loss_params_A(self):
- return pd.DataFrame(
- (
- (
- "normal",
- None,
- "25704,17136|5,20",
- 0.390923,
- "USD_2011",
- 0.0,
- "1 EA",
- ),
- (
- "normal",
- 0.0,
- "22.68,15.12|5,20",
- 0.464027,
- "worker_day",
- 0.0,
- "1 EA",
- ),
- ),
- index=pd.MultiIndex.from_tuples(
- (("some.test.component", "Cost"), ("some.test.component", "Time"))
- ),
- columns=pd.MultiIndex.from_tuples(
- (
- ("DS1", "Family"),
- ("DS1", "LongLeadTime"),
- ("DS1", "Theta_0"),
- ("DS1", "Theta_1"),
- ("DV", "Unit"),
- ("Incomplete", ""),
- ("Quantity", "Unit"),
- )
- ),
- )
-
- def test_init(self, repair_model):
- assert repair_model.log_msg
- assert repair_model.log_div
-
- assert repair_model.sample is None
- assert repair_model.loss_type == 'Repair'
-
- def test__create_DV_RVs(self, repair_model, loss_params_A):
- repair_model.loss_params = loss_params_A
-
- repair_model.loss_map = pd.DataFrame(
- ((("DMG", "some.test.component"), "some.test.component"),),
- columns=("Driver", "Consequence"),
- )
-
- case_list = pd.MultiIndex.from_tuples(
- (
- ("some.test.component", "1", "1", "0", "0"),
- ("some.test.component", "2", "2", "0", "1"),
- ("some.test.component", "3", "1", "0", "1"),
- ),
- names=("cmp", "loc", "dir", "uid", "ds"),
- )
-
- rv_reg = repair_model._create_DV_RVs(case_list)
- assert list(rv_reg.RV.keys()) == [
- 'Cost-0-1-2-2-0',
- 'Time-0-1-2-2-0',
- 'Cost-0-1-3-1-0',
- 'Time-0-1-3-1-0',
- ]
- rvs = list(rv_reg.RV.values())
- for rv in rvs:
- print(rv.theta)
- assert rv.distribution == 'normal'
- np.testing.assert_array_equal(rvs[0].theta, np.array((1.00, 0.390923, np.nan)))
- np.testing.assert_array_equal(rvs[1].theta, np.array((1.00, 0.464027, np.nan)))
- np.testing.assert_array_equal(rvs[2].theta, np.array((1.00, 0.390923, np.nan)))
- np.testing.assert_array_equal(rvs[3].theta, np.array((1.00, 0.464027, np.nan)))
-
- def test__calc_median_consequence(self, repair_model, loss_params_A):
- repair_model.loss_params = loss_params_A
-
- repair_model.loss_map = pd.DataFrame(
- ((("DMG", "some.test.component"), "some.test.component"),),
- columns=("Driver", "Consequence"),
- )
-
- eco_qnt = pd.DataFrame(
- (
- (10.00, 0.00),
- (0.00, 10.00),
- ),
- columns=pd.MultiIndex.from_tuples(
- (("some.test.component", "0"), ("some.test.component", "1")),
- names=["cmp", "ds"],
- ),
- )
-
- medians = repair_model._calc_median_consequence(eco_qnt)
- assert medians['Cost'].to_dict() == {(0, '1'): {0: 25704.0, 1: 22848.0}}
- assert medians['Time'].to_dict() == {(0, '1'): {0: 22.68, 1: 20.16}}
-
- def test__generate_DV_sample(self, repair_model):
- expected_sample = {
- (True, True): {
- (
- 'Cost',
- 'some.test.component',
- 'some.test.component',
- '1',
- '2',
- '2',
- '0',
- ): {0: 25704, 1: 0, 2: 25704, 3: 0},
- (
- 'Cost',
- 'some.test.component',
- 'some.test.component',
- '1',
- '3',
- '1',
- '0',
- ): {0: 0, 1: 0, 2: 0, 3: 25704},
- (
- 'Time',
- 'some.test.component',
- 'some.test.component',
- '1',
- '2',
- '2',
- '0',
- ): {0: 22.68, 1: 0.0, 2: 22.68, 3: 0.0},
- (
- 'Time',
- 'some.test.component',
- 'some.test.component',
- '1',
- '3',
- '1',
- '0',
- ): {0: 0.0, 1: 0.0, 2: 0.0, 3: 22.68},
- },
- (True, False): {
- (
- 'Cost',
- 'some.test.component',
- 'some.test.component',
- '1',
- '2',
- '2',
- '0',
- ): {0: 25704, 1: 0, 2: 25704, 3: 0},
- (
- 'Cost',
- 'some.test.component',
- 'some.test.component',
- '1',
- '3',
- '1',
- '0',
- ): {0: 0, 1: 0, 2: 0, 3: 25704},
- (
- 'Time',
- 'some.test.component',
- 'some.test.component',
- '1',
- '2',
- '2',
- '0',
- ): {0: 22.68, 1: 0.0, 2: 22.68, 3: 0.0},
- (
- 'Time',
- 'some.test.component',
- 'some.test.component',
- '1',
- '3',
- '1',
- '0',
- ): {0: 0.0, 1: 0.0, 2: 0.0, 3: 22.68},
- },
- }
-
- for ecods, ecofl in (
- (True, True),
- (True, False),
- ): # todo: (False, True), (False, False) fails
- assessment_instance = repair_model._asmnt
-
- assessment_instance.options.eco_scale["AcrossFloors"] = ecofl
- assessment_instance.options.eco_scale["AcrossDamageStates"] = ecods
-
- dmg_quantities = pd.DataFrame(
- (
- (0.00, 1.00, 0.00),
- (1.00, 0.00, 0.00),
- (0.00, 1.00, 0.00),
- (0.00, 0.00, 1.00),
- ),
- columns=pd.MultiIndex.from_tuples(
- (
- ("some.test.component", "1", "1", "0", "0"),
- ("some.test.component", "2", "2", "0", "1"),
- ("some.test.component", "3", "1", "0", "1"),
- ),
- names=("cmp", "loc", "dir", "uid", "ds"),
- ),
- )
-
- repair_model.loss_map = pd.DataFrame(
- ((("DMG", "some.test.component"), "some.test.component"),),
- columns=("Driver", "Consequence"),
- )
-
- repair_model.loss_params = pd.DataFrame(
- (
- (
- None,
- None,
- "25704,17136|5,20",
- 0.390923,
- "USD_2011",
- 0.0,
- "1 EA",
- ),
- (
- None,
- 0.0,
- "22.68,15.12|5,20",
- 0.464027,
- "worker_day",
- 0.0,
- "1 EA",
- ),
- ),
- index=pd.MultiIndex.from_tuples(
- (
- ("some.test.component", "Cost"),
- ("some.test.component", "Time"),
- )
- ),
- columns=pd.MultiIndex.from_tuples(
- (
- ("DS1", "Family"),
- ("DS1", "LongLeadTime"),
- ("DS1", "Theta_0"),
- ("DS1", "Theta_1"),
- ("DV", "Unit"),
- ("Incomplete", ""),
- ("Quantity", "Unit"),
- )
- ),
- )
-
- repair_model._generate_DV_sample(dmg_quantities, 4)
-
- assert repair_model.sample.to_dict() == expected_sample[(ecods, ecofl)]
-
- def test_aggregate_losses(self, repair_model, loss_params_A):
- repair_model.sample = pd.DataFrame(
- ((100.00, 1.00),),
- columns=pd.MultiIndex.from_tuples(
- (
- (
- "Cost",
- "some.test.component",
- "some.test.component",
- "1",
- "1",
- "1",
- ),
- (
- "Time",
- "some.test.component",
- "some.test.component",
- "1",
- "1",
- "1",
- ),
- ),
- names=("dv", "loss", "dmg", "ds", "loc", "dir"),
- ),
- )
-
- repair_model.loss_params = loss_params_A
-
- df_agg = repair_model.aggregate_losses()
-
- assert df_agg.to_dict() == {
- ('repair_cost', ''): {0: 100.0},
- ('repair_time', 'parallel'): {0: 1.0},
- ('repair_time', 'sequential'): {0: 1.0},
- }
-
-
-# _____ _ _
-# | ___| _ _ __ ___| |_(_) ___ _ __ ___
-# | |_ | | | | '_ \ / __| __| |/ _ \| '_ \/ __|
-# | _|| |_| | | | | (__| |_| | (_) | | | \__ \
-# |_| \__,_|_| |_|\___|\__|_|\___/|_| |_|___/
-#
-# The following tests verify the functions of the module.
-
-
-class TestModelFunctions:
- def test_prep_constant_median_DV(self):
- median = 10.00
- constant_median_DV = model.loss_model.prep_constant_median_DV(median)
- assert constant_median_DV() == median
- values = (1.0, 2.0, 3.0, 4.0, 5.0)
- for value in values:
- assert constant_median_DV(value) == 10.00
-
- def test_prep_bounded_multilinear_median_DV(self):
- medians = np.array((1.00, 2.00, 3.00, 4.00, 5.00))
- quantities = np.array((0.00, 1.00, 2.00, 3.00, 4.00))
- f = model.loss_model.prep_bounded_multilinear_median_DV(medians, quantities)
-
- result = f(2.5)
- expected = 3.5
- assert result == expected
-
- result = f(0.00)
- expected = 1.00
- assert result == expected
-
- result = f(4.00)
- expected = 5.0
- assert result == expected
-
- result = f(-1.00)
- expected = 1.00
- assert result == expected
-
- result = f(5.00)
- expected = 5.00
- assert result == expected
-
- result_list = f([2.5, 3.5])
- expected_list = [3.5, 4.5]
- assert np.allclose(result_list, expected_list)
-
- with pytest.raises(ValueError):
- f(None)
diff --git a/pelicun/tests/util.py b/pelicun/tests/util.py
index 874c13ae8..3505c5321 100644
--- a/pelicun/tests/util.py
+++ b/pelicun/tests/util.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
@@ -33,26 +32,19 @@
#
# You should have received a copy of the BSD 3-Clause License along with
# pelicun. If not, see .
-#
-# Contributors:
-# Adam Zsarnóczay
-# John Vouvakis Manousakis
-"""
-These are utility functions for the unit and integration tests.
-"""
+"""These are utility functions for the unit and integration tests."""
-import pickle
-import os
+from __future__ import annotations
-# pylint: disable=useless-suppression
-# pylint: disable=unused-variable
-# pylint: disable=pointless-statement
+import pickle # noqa: S403
+from pathlib import Path
-def export_pickle(filepath, obj, makedirs=True):
+def export_pickle(filepath, obj, makedirs=True) -> None: # noqa: ANN001, FBT002
"""
Auxiliary function to export a pickle object.
+
Parameters
----------
filepath: str
@@ -63,24 +55,26 @@ def export_pickle(filepath, obj, makedirs=True):
makedirs: bool
If True, then the directories preceding the filename
will be created if they do not exist.
+
"""
# extract the directory name
- dirname = os.path.dirname(filepath)
+ dirname = Path(filepath).parent
# if making directories is requested,
if makedirs:
# and the path does not exist
- if not os.path.exists(dirname):
+ if not Path(dirname).exists():
# create the directory
- os.makedirs(dirname)
+ Path(dirname).mkdir(parents=True)
# open the file with the given filepath
- with open(filepath, 'wb') as f:
+ with Path(filepath).open('wb') as f:
# and store the object in the file
pickle.dump(obj, f)
-def import_pickle(filepath):
+def import_pickle(filepath): # noqa: ANN001, ANN201
"""
Auxiliary function to import a pickle object.
+
Parameters
----------
filepath: str
@@ -92,6 +86,6 @@ def import_pickle(filepath):
"""
# open the file with the given filepath
- with open(filepath, 'rb') as f:
+ with Path(filepath).open('rb') as f:
# and retrieve the pickled object
- return pickle.load(f)
+ return pickle.load(f) # noqa: S301
diff --git a/pelicun/tests/validation/__init__.py b/pelicun/tests/validation/__init__.py
new file mode 100644
index 000000000..72c332008
--- /dev/null
+++ b/pelicun/tests/validation/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
diff --git a/pelicun/tools/export_DB.py b/pelicun/tests/validation/inactive/3d_interpolation.py
similarity index 56%
rename from pelicun/tools/export_DB.py
rename to pelicun/tests/validation/inactive/3d_interpolation.py
index 24d4563cf..09a627ac6 100644
--- a/pelicun/tools/export_DB.py
+++ b/pelicun/tests/validation/inactive/3d_interpolation.py
@@ -1,5 +1,4 @@
-# -*- coding: utf-8 -*-
-#
+# noqa: N999
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -33,61 +32,53 @@
#
# You should have received a copy of the BSD 3-Clause License along with
# pelicun. If not, see .
-#
-# Contributors:
-# Adam Zsarnóczay
-
-import sys
-import json
-import argparse
-from pathlib import Path
-import pandas as pd
-
-from pelicun.db import convert_Series_to_dict
-
-def export_DB(data_path, target_dir):
- data_path = Path(data_path).resolve()
- target_dir = Path(target_dir).resolve()
- target_dir.mkdir(exist_ok=True)
+"""
+With this code we verify that scipy's `RegularGridInterpolator` does
+what we expect.
- # start with the data
+"""
- target_dir_data = target_dir / 'data'
- target_dir_data.mkdir(exist_ok=True)
-
- DB_df = pd.read_hdf(data_path, 'data')
+import numpy as np
+import pandas as pd
+from scipy.interpolate import RegularGridInterpolator
- for row_id, row in DB_df.iterrows():
- row_dict = convert_Series_to_dict(row)
- with open(target_dir_data / f'{row_id}.json', 'w', encoding='utf-8') as f:
- json.dump(row_dict, f, indent=2)
+def main():
+ # Define domains
+ num_samples = 100
+ dom1 = np.linspace(0, 1, num_samples)
+ dom2 = np.linspace(0, 1, num_samples)
+ dom3 = np.linspace(0, 1, num_samples)
- # add population if it exists
+ # Define 3D array
+ vg1, vg2, vg3 = np.meshgrid(dom1, dom2, dom3)
+ values = vg1 + np.sqrt(vg2) + np.sin(vg3)
- try:
- DB_df = pd.read_hdf(data_path, 'pop')
+ # Define test inputs for interpolation.
+ x1 = np.random.rand(10)
+ x2 = np.random.rand(10)
+ x3 = np.random.rand(10)
+ test_values = np.column_stack((x1, x2, x3))
- pop_dict = {}
+ # Create the interpolation function
+ interp_func = RegularGridInterpolator((dom1, dom2, dom3), values)
- for row_id, row in DB_df.iterrows():
- pop_dict.update({row_id: convert_Series_to_dict(row)})
+ # Perform the interpolation
+ interpolated_value = interp_func(test_values)
- with open(target_dir / 'population.json', 'w', encoding='utf-8') as f:
- json.dump(pop_dict, f, indent=2)
+ # Compare output with the exact value.
+ df = pd.DataFrame(
+ {
+ 'exact': x1 + np.sqrt(x2) + np.sin(x3),
+ 'interpolated': interpolated_value,
+ }
+ )
+ print(df)
- except (ValueError, NotImplementedError, FileNotFoundError):
- pass
+ # Note: This does work with a 2D case, and it could scale to more than
+ # 3 dimensions.
if __name__ == '__main__':
- args = sys.argv[1:]
-
- parser = argparse.ArgumentParser()
- parser.add_argument('--DL_DB_path')
- parser.add_argument('--target_dir')
-
- args_namespace = parser.parse_args(args)
-
- export_DB(args_namespace.DL_DB_path, args_namespace.target_dir)
+ main()
diff --git a/pelicun/tests/validation/inactive/__init__.py b/pelicun/tests/validation/inactive/__init__.py
new file mode 100644
index 000000000..72c332008
--- /dev/null
+++ b/pelicun/tests/validation/inactive/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
diff --git a/pelicun/tests/validation/inactive/pandas_convert_speed.py b/pelicun/tests/validation/inactive/pandas_convert_speed.py
new file mode 100644
index 000000000..82e25414b
--- /dev/null
+++ b/pelicun/tests/validation/inactive/pandas_convert_speed.py
@@ -0,0 +1,89 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+
+import time
+
+import numpy as np
+import pandas as pd
+
+
+def benchmark():
+ # Create a large DataFrame
+ df = pd.DataFrame(np.random.rand(1000000, 10), columns=list('ABCDEFGHIJ'))
+
+ # Measure time for df.to_dict(orient='list')
+ start_time = time.time()
+ df.to_dict(orient='list')
+ end_time = time.time()
+ print(f'Time taken with to_dict(orient="list"): {end_time - start_time} seconds')
+
+ # Measure time for dictionary comprehension
+ start_time = time.time()
+ {col: df[col].tolist() for col in df.columns}
+ end_time = time.time()
+ print(
+ f'Time taken with dictionary comprehension: {end_time - start_time} seconds'
+ )
+
+ # Measure time for dictionary comprehension without to list
+ start_time = time.time()
+ {col: df[col] for col in df.columns}
+ end_time = time.time()
+ print(
+ f'Time taken with dictionary comprehension '
+ f'without to_list: {end_time - start_time} seconds'
+ )
+
+ # Measure time for .values
+ start_time = time.time()
+ df.values
+ end_time = time.time()
+ print(f'Time taken with .values: {end_time - start_time} seconds')
+
+ # Measure time for using df.to_numpy()
+ start_time = time.time()
+ data_array = df.to_numpy()
+ {col: data_array[:, i].tolist() for i, col in enumerate(df.columns)}
+ end_time = time.time()
+ print(f'Time taken with df.to_numpy(): {end_time - start_time} seconds')
+
+ # Measure time for using df.to_dict()
+ start_time = time.time()
+ df.to_dict()
+ end_time = time.time()
+ print(f'Time taken with df.to_dict(): {end_time - start_time} seconds')
+
+
+if __name__ == '__main__':
+ benchmark()
diff --git a/pelicun/tests/validation/inactive/readme.md b/pelicun/tests/validation/inactive/readme.md
new file mode 100644
index 000000000..ceb464bca
--- /dev/null
+++ b/pelicun/tests/validation/inactive/readme.md
@@ -0,0 +1,3 @@
+This directory contains code that is not meant to be tested or
+updated, but was used to verify outputs of various external libraries
+we utilize and ensure they are in line with our expectations.
diff --git a/pelicun/tests/validation/v0/__init__.py b/pelicun/tests/validation/v0/__init__.py
new file mode 100644
index 000000000..72c332008
--- /dev/null
+++ b/pelicun/tests/validation/v0/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
diff --git a/pelicun/tests/validation/v0/data/CMP_marginals.csv b/pelicun/tests/validation/v0/data/CMP_marginals.csv
new file mode 100755
index 000000000..c752f72e0
--- /dev/null
+++ b/pelicun/tests/validation/v0/data/CMP_marginals.csv
@@ -0,0 +1,2 @@
+,Units,Location,Direction,Theta_0,Blocks,Comment
+cmp.A,ea,1,1,1,,Testing component A
diff --git a/pelicun/tests/validation/v0/data/loss_functions.csv b/pelicun/tests/validation/v0/data/loss_functions.csv
new file mode 100644
index 000000000..23c7b1939
--- /dev/null
+++ b/pelicun/tests/validation/v0/data/loss_functions.csv
@@ -0,0 +1,2 @@
+-,DV-Unit,Demand-Directional,Demand-Offset,Demand-Type,Demand-Unit,LossFunction-Theta_0,LossFunction-Theta_1,LossFunction-Family
+cmp.A-Cost,loss_ratio,1,0,Peak Floor Acceleration,g,"0.00,1000.00|0.00,1000.00",,
diff --git a/pelicun/tests/validation/v0/readme.md b/pelicun/tests/validation/v0/readme.md
new file mode 100644
index 000000000..ab3283ee0
--- /dev/null
+++ b/pelicun/tests/validation/v0/readme.md
@@ -0,0 +1,4 @@
+# Loss function validation test
+
+In this example, a single loss function is defined as a 1:1 mapping of the input EDP.
+This means that the resulting loss distribution will be the same as the EDP distribution, allowing us to test and confirm that this is what happens.
diff --git a/pelicun/tests/validation/v0/test_validation_0.py b/pelicun/tests/validation/v0/test_validation_0.py
new file mode 100644
index 000000000..c43ab719e
--- /dev/null
+++ b/pelicun/tests/validation/v0/test_validation_0.py
@@ -0,0 +1,132 @@
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+
+"""
+Validation test on loss functions.
+
+In this example, a single loss function is defined as a 1:1 mapping of
+the input EDP. This means that the resulting loss distribution will
+be the same as the EDP distribution, allowing us to test and confirm
+that this is what happens.
+
+"""
+
+from __future__ import annotations
+
+import numpy as np
+import pandas as pd
+
+from pelicun import assessment, file_io
+
+
+def test_validation_loss_function() -> None:
+ sample_size = 100000
+
+ # initialize a pelicun assessment
+ asmnt = assessment.Assessment({'PrintLog': False, 'Seed': 42})
+
+ #
+ # Demands
+ #
+
+ demands = pd.DataFrame(
+ {
+ 'Theta_0': [0.50],
+ 'Theta_1': [0.90],
+ 'Family': ['lognormal'],
+ 'Units': ['mps2'],
+ },
+ index=pd.MultiIndex.from_tuples(
+ [
+ ('PFA', '0', '1'),
+ ],
+ ),
+ )
+
+ asmnt.demand.load_model({'marginals': demands})
+
+ asmnt.demand.generate_sample({'SampleSize': sample_size})
+
+ #
+ # Asset
+ #
+
+ asmnt.stories = 1
+
+ cmp_marginals = pd.read_csv(
+ 'pelicun/tests/validation/v0/data/CMP_marginals.csv', index_col=0
+ )
+ cmp_marginals['Blocks'] = cmp_marginals['Blocks']
+ asmnt.asset.load_cmp_model({'marginals': cmp_marginals})
+
+ asmnt.asset.generate_cmp_sample(sample_size)
+
+ #
+ # Damage
+ #
+
+ # nothing to do here.
+
+ #
+ # Losses
+ #
+
+ asmnt.loss.decision_variables = ('Cost',)
+
+ loss_map = pd.DataFrame(['cmp.A'], columns=['Repair'], index=['cmp.A'])
+ asmnt.loss.add_loss_map(loss_map)
+
+ loss_functions = file_io.load_data(
+ 'pelicun/tests/validation/v0/data/loss_functions.csv',
+ reindex=False,
+ unit_conversion_factors=asmnt.unit_conversion_factors,
+ )
+ assert isinstance(loss_functions, pd.DataFrame)
+ asmnt.loss.load_model_parameters([loss_functions])
+ asmnt.loss.calculate()
+
+ loss, _ = asmnt.loss.aggregate_losses(future=True)
+ assert isinstance(loss, pd.DataFrame)
+
+ loss_vals = loss['repair_cost'].to_numpy()
+
+ # sample median should be close to 0.05
+ assert np.allclose(np.median(loss_vals), 0.05, atol=1e-2)
+ # dispersion should be close to 0.9
+ assert np.allclose(np.log(loss_vals).std(), 0.90, atol=1e-2)
+
+ # TODO(JVM): also test save/load sample
+ # asmnt.loss.save_sample('/tmp/sample.csv')
+ # asmnt.loss.load_sample('/tmp/sample.csv')
diff --git a/pelicun/tests/validation/v1/__init__.py b/pelicun/tests/validation/v1/__init__.py
new file mode 100644
index 000000000..72c332008
--- /dev/null
+++ b/pelicun/tests/validation/v1/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
diff --git a/pelicun/tests/validation/v1/data/CMP_marginals.csv b/pelicun/tests/validation/v1/data/CMP_marginals.csv
new file mode 100755
index 000000000..c752f72e0
--- /dev/null
+++ b/pelicun/tests/validation/v1/data/CMP_marginals.csv
@@ -0,0 +1,2 @@
+,Units,Location,Direction,Theta_0,Blocks,Comment
+cmp.A,ea,1,1,1,,Testing component A
diff --git a/pelicun/tests/validation/v1/data/damage_db.csv b/pelicun/tests/validation/v1/data/damage_db.csv
new file mode 100644
index 000000000..50319d1d5
--- /dev/null
+++ b/pelicun/tests/validation/v1/data/damage_db.csv
@@ -0,0 +1,2 @@
+ID,Demand-Directional,Demand-Offset,Demand-Type,Demand-Unit,LS1-Family,LS1-Theta_0,LS1-Theta_1,LS2-Family,LS2-Theta_0,LS2-Theta_1
+cmp.A,1,0,Story Drift Ratio,rad,lognormal,0.015,0.5,lognormal,0.02,0.5
diff --git a/pelicun/tests/validation/v1/readme.md b/pelicun/tests/validation/v1/readme.md
new file mode 100644
index 000000000..396db6f71
--- /dev/null
+++ b/pelicun/tests/validation/v1/readme.md
@@ -0,0 +1,20 @@
+# Damage state probability validation test
+
+Here we test whether we get the correct damage state probabilities for a single component with two damage states.
+For such a component, assuming the EDP demand and the fragility curve capacities are all lognormal, there is a closed-form solution for the probability of each damage state.
+We utilize those equations to ensure that the probabilities obtained from our Monte-Carlo sample are in line with our expectations.
+
+## Equations
+
+If $\mathrm{Y} \sim \textrm{LogNormal}(\delta, \beta)$, then $\mathrm{X} = \log(\mathrm{Y}) \sim \textrm{Normal}(\mu, \sigma)$ with $\mu = \log(\delta)$ and $\sigma = \beta$.
+
+```math
+\begin{align*}
+\mathrm{P}(\mathrm{DS}=0) &= 1 - \Phi\left(\frac{\log(\delta_D) - \log(\delta_{C1})}{\sqrt{\beta_{D}^2 + \beta_{C1}^2}}\right), \\
+\mathrm{P}(\mathrm{DS}=1) &= \Phi\left(\frac{\log(\delta_D) - \log(\delta_{C1})}{\sqrt{\beta_D^2 + \beta_{C1}^2}}\right) - \Phi\left(\frac{\log(\delta_{D}) - \log(\delta_{C2})}{\sqrt{\beta_D^2 + \beta_{C2}^2}}\right), \\
+\mathrm{P}(\mathrm{DS}=2) &= \Phi\left(\frac{\log(\delta_D) - \log(\delta_{C2})}{\sqrt{\beta_D^2 + \beta_{C2}^2}}\right), \\
+\end{align*}
+```
+where $\Phi$ is the cumulative distribution function of the standard normal distribution, $\delta_{C1}$, $\delta_{C2}$, $\beta_{C1}$, $\beta_{C2}$ are the medians and dispersions of the fragility curve capacities, and $\delta_{D}$, $\beta_{D}$ is the median and dispersion of the EDP demand.
+
+The equations inherently assume that the capacity RVs for the damage states are perfectly correlated, which is the case for sequential damage states.
diff --git a/pelicun/tests/validation/v1/test_validation_1.py b/pelicun/tests/validation/v1/test_validation_1.py
new file mode 100644
index 000000000..0b94b8598
--- /dev/null
+++ b/pelicun/tests/validation/v1/test_validation_1.py
@@ -0,0 +1,165 @@
+#
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+
+"""
+Validation test for the probability of each damage state of a
+component.
+
+"""
+
+from __future__ import annotations
+
+import tempfile
+
+import numpy as np
+import pandas as pd
+from scipy.stats import norm # type: ignore
+
+from pelicun import assessment, file_io
+
+
+def test_validation_ds_probabilities() -> None:
+ sample_size = 1000000
+
+ asmnt = assessment.Assessment({'PrintLog': False, 'Seed': 42})
+
+ #
+ # Demands
+ #
+
+ demands = pd.DataFrame(
+ {
+ 'Theta_0': [0.015],
+ 'Theta_1': [0.60],
+ 'Family': ['lognormal'],
+ 'Units': ['rad'],
+ },
+ index=pd.MultiIndex.from_tuples(
+ [
+ ('PID', '1', '1'),
+ ],
+ ),
+ )
+
+ # load the demand model
+ asmnt.demand.load_model({'marginals': demands})
+
+ # generate samples
+ asmnt.demand.generate_sample({'SampleSize': sample_size})
+
+ #
+ # Asset
+ #
+
+ # specify number of stories
+ asmnt.stories = 1
+
+ # load component definitions
+ cmp_marginals = pd.read_csv(
+ 'pelicun/tests/validation/v1/data/CMP_marginals.csv', index_col=0
+ )
+ cmp_marginals['Blocks'] = cmp_marginals['Blocks']
+ asmnt.asset.load_cmp_model({'marginals': cmp_marginals})
+
+ # generate sample
+ asmnt.asset.generate_cmp_sample(sample_size)
+
+ #
+ # Damage
+ #
+
+ damage_db = file_io.load_data(
+ 'pelicun/tests/validation/v1/data/damage_db.csv',
+ reindex=False,
+ unit_conversion_factors=asmnt.unit_conversion_factors,
+ )
+ assert isinstance(damage_db, pd.DataFrame)
+
+ cmp_set = set(asmnt.asset.list_unique_component_ids())
+
+ # load the models into pelicun
+ asmnt.damage.load_model_parameters([damage_db], cmp_set)
+
+ # calculate damages
+ asmnt.damage.calculate()
+
+ probs = asmnt.damage.ds_model.probabilities()
+
+ #
+ # Analytical calculation of the probability of each damage state
+ #
+
+ demand_median = 0.015
+ demand_beta = 0.60
+ capacity_1_median = 0.015
+ capacity_2_median = 0.02
+ capacity_beta = 0.50
+
+ # If Y is LogNormal(delta, beta), then X = Log(Y) is Normal(mu, sigma)
+ # with mu = log(delta) and sigma = beta
+ demand_mean = np.log(demand_median)
+ capacity_1_mean = np.log(capacity_1_median)
+ capacity_2_mean = np.log(capacity_2_median)
+ demand_std = demand_beta
+ capacity_std = capacity_beta
+
+ p0 = 1.00 - norm.cdf(
+ (demand_mean - capacity_1_mean) / np.sqrt(demand_std**2 + capacity_std**2)
+ )
+ p1 = norm.cdf(
+ (demand_mean - capacity_1_mean) / np.sqrt(demand_std**2 + capacity_std**2)
+ ) - norm.cdf(
+ (demand_mean - capacity_2_mean) / np.sqrt(demand_std**2 + capacity_std**2)
+ )
+ p2 = norm.cdf(
+ (demand_mean - capacity_2_mean) / np.sqrt(demand_std**2 + capacity_std**2)
+ )
+
+ assert np.allclose(probs.iloc[0, 0], p0, atol=1e-2) # type: ignore
+ assert np.allclose(probs.iloc[0, 1], p1, atol=1e-2) # type: ignore
+ assert np.allclose(probs.iloc[0, 2], p2, atol=1e-2) # type: ignore
+
+ #
+ # Also test load/save sample
+ #
+
+ assert asmnt.damage.ds_model.sample is not None
+ asmnt.damage.ds_model.sample = asmnt.damage.ds_model.sample.iloc[0:100, :]
+ # (we reduce the number of realizations to conserve resources)
+ before = asmnt.damage.ds_model.sample.copy()
+ temp_dir = tempfile.mkdtemp()
+ asmnt.damage.save_sample(f'{temp_dir}/mdl.csv')
+ asmnt.damage.load_sample(f'{temp_dir}/mdl.csv')
+ pd.testing.assert_frame_equal(before, asmnt.damage.ds_model.sample)
diff --git a/pelicun/tests/validation/v2/__init__.py b/pelicun/tests/validation/v2/__init__.py
new file mode 100644
index 000000000..72c332008
--- /dev/null
+++ b/pelicun/tests/validation/v2/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
diff --git a/pelicun/tests/validation/v2/data/CMP_marginals.csv b/pelicun/tests/validation/v2/data/CMP_marginals.csv
new file mode 100755
index 000000000..e085a1f00
--- /dev/null
+++ b/pelicun/tests/validation/v2/data/CMP_marginals.csv
@@ -0,0 +1,8 @@
+,Units,Location,Direction,Theta_0,Blocks,Comment
+D.30.31.013i,ea,roof,0,1,,Chiller (parameters have been modified for testing)
+missing.component,ea,0,1,1,,Testing
+testing.component,ea,1,1,1,,Testing
+testing.component.2,ea,1,1,2,4,Testing
+collapse,ea,0,1,1,,Collapsed building
+excessiveRID,ea,all,"1,2",1,,Excessive residual drift
+irreparable,ea,0,1,1,,Irreparable building
diff --git a/pelicun/tests/validation/v2/data/additional_consequences.csv b/pelicun/tests/validation/v2/data/additional_consequences.csv
new file mode 100644
index 000000000..889ca6ae9
--- /dev/null
+++ b/pelicun/tests/validation/v2/data/additional_consequences.csv
@@ -0,0 +1,3 @@
+-,Incomplete-,Quantity-Unit,DV-Unit,DS1-Theta_0
+replacement-Cost,0,1 EA,USD_2011,21600000
+replacement-Time,0,1 EA,worker_day,12500
diff --git a/pelicun/tests/validation/v2/data/additional_damage_db.csv b/pelicun/tests/validation/v2/data/additional_damage_db.csv
new file mode 100644
index 000000000..5d7c885ac
--- /dev/null
+++ b/pelicun/tests/validation/v2/data/additional_damage_db.csv
@@ -0,0 +1,5 @@
+ID,Demand-Directional,Demand-Offset,Demand-Type,Demand-Unit,Incomplete-,LS1-DamageStateWeights,LS1-Family,LS1-Theta_0,LS1-Theta_1,LS2-DamageStateWeights,LS2-Family,LS2-Theta_0,LS2-Theta_1,LS3-DamageStateWeights,LS3-Family,LS3-Theta_0,LS3-Theta_1,LS4-DamageStateWeights,LS4-Family,LS4-Theta_0,LS4-Theta_1
+D.30.31.013i,0.0,0.0,Peak Floor Acceleration,g,0,0.340000 | 0.330000 | 0.330000,lognormal,0.40,0.5,,,,,,,,,,,,
+excessiveRID,1.0,0.0,Residual Interstory Drift Ratio,rad,0,,lognormal,0.01,0.3,,,,,,,,,,,,
+irreparable,1.0,0.0,Peak Spectral Acceleration|1.13,g,0,,,10000000000.0,,,,,,,,,,,,,
+collapse,1.0,0.0,Peak Spectral Acceleration|1.13,g,0,,lognormal,1.50,0.5,,,,,,,,,,,,
diff --git a/pelicun/tests/validation/v2/data/additional_loss_functions.csv b/pelicun/tests/validation/v2/data/additional_loss_functions.csv
new file mode 100644
index 000000000..7d9bf4b93
--- /dev/null
+++ b/pelicun/tests/validation/v2/data/additional_loss_functions.csv
@@ -0,0 +1,5 @@
+-,DV-Unit,Demand-Directional,Demand-Offset,Demand-Type,Demand-Unit,LossFunction-Theta_0,LossFunction-Theta_1,LossFunction-Family
+testing.component-Cost,USD_2011,0,0,Peak Floor Acceleration,g,"0.00,100000.00|0.00,5.00",0.3,lognormal
+testing.component-Time,USD_2011,0,0,Peak Floor Acceleration,g,"0.00,50.00|0.00,5.00",0.3,lognormal
+testing.component.2-Cost,USD_2011,0,0,Peak Floor Acceleration,g,"0.00,10.00|0.00,5.00",,
+testing.component.2-Time,USD_2011,0,0,Peak Floor Acceleration,g,"0.00,5.00|0.00,5.00",,
diff --git a/pelicun/tests/validation/v2/data/demand_data.csv b/pelicun/tests/validation/v2/data/demand_data.csv
new file mode 100644
index 000000000..7f2efa3f0
--- /dev/null
+++ b/pelicun/tests/validation/v2/data/demand_data.csv
@@ -0,0 +1,19 @@
+--,Units,Family,Theta_0,Theta_1
+PFA-0-1,g,lognormal,0.45,0.40
+PFA-0-2,g,lognormal,0.45,0.40
+PFA-1-1,g,lognormal,0.45,0.40
+PFA-1-2,g,lognormal,0.45,0.40
+PFA-2-1,g,lognormal,0.45,0.40
+PFA-2-2,g,lognormal,0.45,0.40
+PFA-3-1,g,lognormal,0.45,0.40
+PFA-3-2,g,lognormal,0.45,0.40
+PFA-4-1,g,lognormal,0.45,0.40
+PFA-4-2,g,lognormal,0.45,0.40
+PID-1-1,rad,lognormal,0.03,0.35
+PID-1-2,rad,lognormal,0.03,0.35
+PID-2-1,rad,lognormal,0.03,0.35
+PID-2-2,rad,lognormal,0.03,0.35
+PID-3-1,rad,lognormal,0.03,0.35
+PID-3-2,rad,lognormal,0.03,0.35
+PID-4-1,rad,lognormal,0.03,0.35
+PID-4-2,rad,lognormal,0.03,0.35
diff --git a/pelicun/tests/validation/v2/data/loss_functions.csv b/pelicun/tests/validation/v2/data/loss_functions.csv
new file mode 100644
index 000000000..23c7b1939
--- /dev/null
+++ b/pelicun/tests/validation/v2/data/loss_functions.csv
@@ -0,0 +1,2 @@
+-,DV-Unit,Demand-Directional,Demand-Offset,Demand-Type,Demand-Unit,LossFunction-Theta_0,LossFunction-Theta_1,LossFunction-Family
+cmp.A-Cost,loss_ratio,1,0,Peak Floor Acceleration,g,"0.00,1000.00|0.00,1000.00",,
diff --git a/pelicun/tests/validation/v2/readme.md b/pelicun/tests/validation/v2/readme.md
new file mode 100644
index 000000000..af0895204
--- /dev/null
+++ b/pelicun/tests/validation/v2/readme.md
@@ -0,0 +1 @@
+# Testing the save/load sample methods of LossModel
diff --git a/pelicun/tests/validation/v2/test_validation_2.py b/pelicun/tests/validation/v2/test_validation_2.py
new file mode 100644
index 000000000..111113f7d
--- /dev/null
+++ b/pelicun/tests/validation/v2/test_validation_2.py
@@ -0,0 +1,242 @@
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
+
+"""
+Tests a complete loss estimation workflow combining damage state and
+loss function driven components.
+The code is based on PRJ-3411v5 hosted on DesignSafe.
+
+"""
+
+import tempfile
+
+import numpy as np
+import pandas as pd
+import pytest
+
+import pelicun
+from pelicun import assessment, file_io
+from pelicun.pelicun_warnings import PelicunWarning
+
+
+def test_combined_workflow() -> None:
+ temp_dir = tempfile.mkdtemp()
+
+ sample_size = 10000
+
+ # Initialize a pelicun assessment
+ asmnt = assessment.Assessment(
+ {'PrintLog': True, 'Seed': 415, 'LogFile': f'{temp_dir}/log_file.txt'}
+ )
+
+ asmnt.options.list_all_ds = True
+ asmnt.options.eco_scale['AcrossFloors'] = True
+ asmnt.options.eco_scale['AcrossDamageStates'] = True
+
+ demand_data = file_io.load_data(
+ 'pelicun/tests/validation/v2/data/demand_data.csv',
+ unit_conversion_factors=None,
+ reindex=False,
+ )
+ ndims = len(demand_data)
+ perfect_correlation = pd.DataFrame(
+ np.ones((ndims, ndims)),
+ columns=demand_data.index, # type: ignore
+ index=demand_data.index, # type: ignore
+ )
+
+ #
+ # Additional damage state-driven components
+ #
+
+ damage_db = pelicun.file_io.load_data(
+ 'pelicun/tests/validation/v2/data/additional_damage_db.csv',
+ reindex=False,
+ unit_conversion_factors=asmnt.unit_conversion_factors,
+ )
+ consequences = pelicun.file_io.load_data(
+ 'pelicun/tests/validation/v2/data/additional_consequences.csv',
+ reindex=False,
+ unit_conversion_factors=asmnt.unit_conversion_factors,
+ )
+
+ #
+ # Additional loss function-driven components
+ #
+
+ loss_functions = pelicun.file_io.load_data(
+ 'pelicun/tests/validation/v2/data/additional_loss_functions.csv',
+ reindex=False,
+ unit_conversion_factors=asmnt.unit_conversion_factors,
+ )
+
+ #
+ # Demands
+ #
+
+ # Load the demand model
+ asmnt.demand.load_model(
+ {'marginals': demand_data, 'correlation': perfect_correlation}
+ )
+
+ # Generate samples
+ asmnt.demand.generate_sample({'SampleSize': sample_size})
+
+ def add_more_edps() -> None:
+ """Adds SA_1.13 and residual drift to the demand sample."""
+ # Add residual drift and Sa
+ demand_sample = asmnt.demand.save_sample()
+
+ # RIDs are all fixed for testing.
+ rid = pd.concat(
+ [
+ pd.DataFrame(
+ np.full(demand_sample['PID'].shape, 0.0050), # type: ignore
+ index=demand_sample['PID'].index, # type: ignore
+ columns=demand_sample['PID'].columns, # type: ignore
+ )
+ ],
+ axis=1,
+ keys=['RID'],
+ )
+ demand_sample_ext = pd.concat([demand_sample, rid], axis=1) # type: ignore
+
+ demand_sample_ext['SA_1.13', 0, 1] = 1.50
+
+ # Add units to the data
+ demand_sample_ext.T.insert(0, 'Units', '')
+
+ # PFA and SA are in "g" in this example, while PID and RID are "rad"
+ demand_sample_ext.loc['Units', ['PFA', 'SA_1.13']] = 'g'
+ demand_sample_ext.loc['Units', ['PID', 'RID']] = 'rad'
+
+ asmnt.demand.load_sample(demand_sample_ext)
+
+ add_more_edps()
+
+ #
+ # Asset
+ #
+
+ # Specify number of stories
+ asmnt.stories = 1
+
+ # Load component definitions
+ cmp_marginals = pd.read_csv(
+ 'pelicun/tests/validation/v2/data/CMP_marginals.csv', index_col=0
+ )
+ cmp_marginals['Blocks'] = cmp_marginals['Blocks']
+ asmnt.asset.load_cmp_model({'marginals': cmp_marginals})
+
+ # Generate sample
+ asmnt.asset.generate_cmp_sample(sample_size)
+
+ # # Used to test that the example works when an existing sample is
+ # # loaded.
+ # asmnt.asset.save_cmp_sample(filepath='/tmp/cmp_sample.csv', save_units=True)
+ # asmnt.asset.cmp_sample
+ # asmnt.asset.load_cmp_sample(filepath='/tmp/cmp_sample.csv')
+ # asmnt.asset.cmp_sample
+
+ #
+ # Damage
+ #
+
+ cmp_set = set(asmnt.asset.list_unique_component_ids())
+
+ # Load the models into pelicun
+ asmnt.damage.load_model_parameters(
+ [
+ damage_db, # type: ignore
+ 'PelicunDefault/damage_DB_FEMA_P58_2nd.csv',
+ ],
+ cmp_set,
+ )
+
+ # Prescribe the damage process
+ dmg_process = {
+ '1_collapse': {'DS1': 'ALL_NA'},
+ '2_excessiveRID': {'DS1': 'irreparable_DS1'},
+ }
+
+ # Calculate damages
+
+ asmnt.damage.calculate(dmg_process=dmg_process)
+
+ # Test load sample, save sample
+ asmnt.damage.save_sample(f'{temp_dir}/out.csv')
+ asmnt.damage.load_sample(f'{temp_dir}/out.csv')
+
+ assert asmnt.damage.ds_model.sample is not None
+ asmnt.damage.ds_model.sample.mean()
+
+ #
+ # Losses
+ #
+
+ # Create the loss map
+ loss_map = pd.DataFrame(
+ ['replacement', 'replacement'],
+ columns=['Repair'],
+ index=['collapse', 'irreparable'],
+ )
+
+ # Load the loss model
+ asmnt.loss.decision_variables = ('Cost', 'Time')
+ asmnt.loss.add_loss_map(loss_map, loss_map_policy='fill')
+ with pytest.warns(PelicunWarning):
+ asmnt.loss.load_model_parameters(
+ [
+ consequences, # type: ignore
+ loss_functions, # type: ignore
+ 'PelicunDefault/loss_repair_DB_FEMA_P58_2nd.csv',
+ ]
+ )
+
+ # Perform the calculation
+ asmnt.loss.calculate()
+
+ # Test load sample, save sample
+ with pytest.warns(PelicunWarning):
+ asmnt.loss.save_sample(f'{temp_dir}/sample.csv')
+ asmnt.loss.load_sample(f'{temp_dir}/sample.csv')
+
+ #
+ # Loss sample aggregation
+ #
+
+ # Get the aggregated losses
+ with pytest.warns(PelicunWarning):
+ agg_df = asmnt.loss.aggregate_losses()
+ assert agg_df is not None
diff --git a/pelicun/tools/DL_calculation.py b/pelicun/tools/DL_calculation.py
index 4711e3798..af1edaafe 100644
--- a/pelicun/tools/DL_calculation.py
+++ b/pelicun/tools/DL_calculation.py
@@ -1,5 +1,4 @@
-# -*- coding: utf-8 -*-
-#
+# # noqa: N999
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
@@ -36,107 +35,89 @@
#
# Contributors:
# Adam Zsarnóczay
+# John Vouvakis Manousakis
+
+"""Main functionality to run a pelicun calculation from the command line."""
+
+from __future__ import annotations
-from time import gmtime
-from time import strftime
-import sys
-import os
-import json
import argparse
+import json
+import os
+import sys
from pathlib import Path
+from time import gmtime, strftime
+from typing import Hashable
+import colorama
+import jsonschema
import numpy as np
import pandas as pd
+from colorama import Fore, Style
+from jsonschema import validate
-import pelicun
+from pelicun import base
+from pelicun.assessment import DLCalculationAssessment
from pelicun.auto import auto_populate
-from pelicun.base import str2bool
-from pelicun.base import convert_to_MultiIndex
-from pelicun.base import convert_to_SimpleIndex
-from pelicun.base import describe
-from pelicun.base import EDP_to_demand_type
-from pelicun.file_io import load_data
-from pelicun.assessment import Assessment
-from pelicun.base import update_vals
-
-
-# this is exceptional code
-# (so let's run pylint everywhere /except/ here.)
-# pylint: disable=consider-using-namedtuple-or-dataclass
-# pylint: disable=too-many-locals
-# pylint: disable=too-many-statements
-# pylint: disable=too-many-nested-blocks
-# pylint: disable=too-many-branches
-
-# pd.set_option('display.max_rows', None)
-
-
-def log_msg(msg):
- formatted_msg = f'{strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())} {msg}'
-
- print(formatted_msg)
-
+from pelicun.base import (
+ convert_to_MultiIndex,
+ convert_to_SimpleIndex,
+ describe,
+ get,
+ is_specified,
+ is_unspecified,
+ str2bool,
+ update,
+ update_vals,
+)
+from pelicun.pelicun_warnings import PelicunInvalidConfigError
+
+colorama.init()
+sys.path.insert(0, Path(__file__).resolve().parent.absolute().as_posix())
+
+
+def log_msg(msg: str, color_codes: tuple[str, str] | None = None) -> None:
+ """
+ Print a formatted log message with a timestamp.
-sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
+ Parameters
+ ----------
+ msg : str
+ The message to print.
+ color_codes : tuple, optional
+ Color codes for formatting the message. Default is None.
-idx = pd.IndexSlice
+ """
+ if color_codes:
+ cpref, csuff = color_codes
+ print( # noqa: T201
+ f'{strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())} '
+ f'{cpref}'
+ f'{msg}'
+ f'{csuff}'
+ )
+ else:
+ print(f'{strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())} {msg}') # noqa: T201
-# TODO: separate Damage Processes for
-# Hazus Earthquake - Buildings and - Transportation
-# TODO: Loss map for Hazus EQ Transportation
-damage_processes = {
- 'FEMA P-58': {
- "1_excessive.coll.DEM": {"DS1": "collapse_DS1"},
- "2_collapse": {"DS1": "ALL_NA"},
- "3_excessiveRID": {"DS1": "irreparable_DS1"},
- },
- # TODO: expand with ground failure logic
- 'Hazus Earthquake': {
- "1_STR": {"DS5": "collapse_DS1"},
- "2_LF": {"DS5": "collapse_DS1"},
- "3_excessive.coll.DEM": {"DS1": "collapse_DS1"},
- "4_collapse": {"DS1": "ALL_NA"},
- "5_excessiveRID": {"DS1": "irreparable_DS1"},
- },
- 'Hazus Hurricane': {},
-}
-
-default_DBs = {
- 'fragility': {
- 'FEMA P-58': 'damage_DB_FEMA_P58_2nd.csv',
- 'Hazus Earthquake - Buildings': 'damage_DB_Hazus_EQ_bldg.csv',
- 'Hazus Earthquake - Stories': 'damage_DB_Hazus_EQ_story.csv',
- 'Hazus Earthquake - Transportation': 'damage_DB_Hazus_EQ_trnsp.csv',
- 'Hazus Earthquake - Water': 'damage_DB_Hazus_EQ_water.csv',
- 'Hazus Hurricane': 'damage_DB_SimCenter_Hazus_HU_bldg.csv',
- },
- 'repair': {
- 'FEMA P-58': 'loss_repair_DB_FEMA_P58_2nd.csv',
- 'Hazus Earthquake - Buildings': 'loss_repair_DB_Hazus_EQ_bldg.csv',
- 'Hazus Earthquake - Stories': 'loss_repair_DB_Hazus_EQ_story.csv',
- 'Hazus Earthquake - Transportation': 'loss_repair_DB_Hazus_EQ_trnsp.csv',
- 'Hazus Hurricane': 'loss_repair_DB_SimCenter_Hazus_HU_bldg.csv',
- },
-}
# list of output files help perform safe initialization of output dir
-output_files = [
- "DEM_sample.zip",
- "DEM_stats.csv",
- "CMP_sample.zip",
- "CMP_stats.csv",
- "DMG_sample.zip",
- "DMG_stats.csv",
- "DMG_grp.zip",
- "DMG_grp_stats.csv",
- "DV_repair_sample.zip",
- "DV_repair_stats.csv",
- "DV_repair_grp.zip",
- "DV_repair_grp_stats.csv",
- "DV_repair_agg.zip",
- "DV_repair_agg_stats.csv",
- "DL_summary.csv",
- "DL_summary_stats.csv",
+known_output_files = [
+ 'DEM_sample.zip',
+ 'DEM_stats.csv',
+ 'CMP_sample.zip',
+ 'CMP_stats.csv',
+ 'DMG_sample.zip',
+ 'DMG_stats.csv',
+ 'DMG_grp.zip',
+ 'DMG_grp_stats.csv',
+ 'DV_repair_sample.zip',
+ 'DV_repair_stats.csv',
+ 'DV_repair_grp.zip',
+ 'DV_repair_grp_stats.csv',
+ 'DV_repair_agg.zip',
+ 'DV_repair_agg_stats.csv',
+ 'DL_summary.csv',
+ 'DL_summary_stats.csv',
]
full_out_config = {
@@ -195,32 +176,67 @@ def log_msg(msg):
}
-def convert_df_to_dict(df, axis=1):
- out_dict = {}
+def convert_df_to_dict(data: pd.DataFrame | pd.Series, axis: int = 1) -> dict:
+ """
+ Convert a pandas DataFrame to a dictionary.
+
+ Parameters
+ ----------
+ data : pd.DataFrame
+ The DataFrame to be converted.
+ axis : int, optional
+ The axis to consider for the conversion.
+ * If 1 (default), the DataFrame is used as-is.
+ * If 0, the DataFrame is transposed before conversion.
+
+ Returns
+ -------
+ dict
+ A dictionary representation of the DataFrame. The structure of
+ the dictionary depends on the levels in the DataFrame's
+ MultiIndex columns.
+
+ Raises
+ ------
+ ValueError
+ If the axis is not 0 or 1.
+
+ Notes
+ -----
+ * If the columns have multiple levels, the function will
+ recursively convert sub-DataFrames.
+ * If the column labels at any level are numeric, they will be
+ converted to a list of floats.
+ * If the column labels are non-numeric, a dictionary will be
+ created with the index labels as keys and the corresponding data
+ as values.
+
+ """
+ out_dict: dict[Hashable, object] = {}
if axis == 1:
- df_in = df
+ df_in = data
elif axis == 0:
- df_in = df.T
+ df_in = data.T
else:
- pass
- # TODO: raise error
+ msg = '`axis` must be `0` or `1`'
+ raise ValueError(msg)
- MI = df_in.columns
+ multiindex = df_in.columns
- for label in MI.unique(level=0):
+ for label in multiindex.unique(level=0):
out_dict.update({label: np.nan})
sub_df = df_in[label]
skip_sub = True
- if MI.nlevels > 1:
+ if multiindex.nlevels > 1:
skip_sub = False
- if isinstance(sub_df, pd.Series):
- skip_sub = True
- elif (len(sub_df.columns) == 1) and (sub_df.columns[0] == ""):
+ if isinstance(sub_df, pd.Series) or (
+ (len(sub_df.columns) == 1) and (sub_df.columns[0] == '') # noqa: PLC1901
+ ):
skip_sub = True
if not skip_sub:
@@ -236,83 +252,18 @@ def convert_df_to_dict(df, axis=1):
return out_dict
-def add_units(raw_demands, length_unit):
- demands = raw_demands.T
-
- demands.insert(0, "Units", np.nan)
-
- if length_unit == 'in':
- length_unit = 'inch'
-
- demands = convert_to_MultiIndex(demands, axis=0).sort_index(axis=0).T
-
- if demands.columns.nlevels == 4:
- DEM_level = 1
- else:
- DEM_level = 0
-
- # drop demands with no EDP type identified
- demands.drop(
- demands.columns[demands.columns.get_level_values(DEM_level) == ''],
- axis=1,
- inplace=True,
- )
-
- # assign units
- demand_cols = demands.columns.get_level_values(DEM_level)
-
- # remove additional info from demand names
- demand_cols = [d.split('_')[0] for d in demand_cols]
-
- # acceleration
- acc_EDPs = ['PFA', 'PGA', 'SA']
- EDP_mask = np.isin(demand_cols, acc_EDPs)
-
- if np.any(EDP_mask):
- demands.iloc[0, EDP_mask] = length_unit + 'ps2'
-
- # speed
- speed_EDPs = ['PFV', 'PWS', 'PGV', 'SV']
- EDP_mask = np.isin(demand_cols, speed_EDPs)
-
- if np.any(EDP_mask):
- demands.iloc[0, EDP_mask] = length_unit + 'ps'
-
- # displacement
- disp_EDPs = ['PFD', 'PIH', 'SD', 'PGD']
- EDP_mask = np.isin(demand_cols, disp_EDPs)
-
- if np.any(EDP_mask):
- demands.iloc[0, EDP_mask] = length_unit
-
- # drift ratio
- rot_EDPs = ['PID', 'PRD', 'DWD', 'RDR', 'PMD', 'RID']
- EDP_mask = np.isin(demand_cols, rot_EDPs)
-
- if np.any(EDP_mask):
- demands.iloc[0, EDP_mask] = 'unitless'
-
- # convert back to simple header and return the DF
- return convert_to_SimpleIndex(demands, axis=1)
-
-
-def regional_output_demand():
- pass
-
-
def run_pelicun(
- config_path,
- demand_file,
- output_path,
- coupled_EDP,
- realizations,
- auto_script_path,
- detailed_results,
- regional,
- output_format,
- custom_model_dir,
- **kwargs,
-):
+ config_path: str,
+ demand_file: str,
+ output_path: str | None,
+ realizations: int,
+ auto_script_path: str | None,
+ custom_model_dir: str | None,
+ output_format: list | None,
+ *,
+ detailed_results: bool,
+ coupled_edp: bool,
+) -> None:
"""
Use settings in the config JSON to prepare and run a Pelicun calculation.
@@ -324,8 +275,6 @@ def run_pelicun(
Path pointing to the location of a CSV file with the demand data.
output_path: string, optional
Path pointing to the location where results shall be saved.
- coupled_EDP: bool, optional
- If True, EDPs are not resampled and processed in order.
realizations: int, optional
Number of realizations to generate.
auto_script_path: string, optional
@@ -335,1600 +284,1279 @@ def run_pelicun(
custom_model_dir: string, optional
Path pointing to a directory with files that define user-provided model
parameters for a customized damage and loss assessment.
+ output_format: list, optional.
+ Type of output format, JSON or CSV.
+ Valid options: ['csv', 'json'], ['csv'], ['json'], [], None
detailed_results: bool, optional
If False, only the main statistics are saved.
+ coupled_edp: bool, optional
+ If True, EDPs are not resampled and processed in order.
"""
-
- log_msg('First line of DL_calculation')
-
# Initial setup -----------------------------------------------------------
# get the absolute path to the config file
- config_path = Path(config_path).resolve()
+ config_path_p = Path(config_path).resolve()
- # If the output path was not specified, results are saved in the directory
- # of the input file.
+ # If the output path was not specified, results are saved in the
+ # directory of the input file.
if output_path is None:
- output_path = config_path.parents[0]
+ output_path_p = config_path_p.parents[0]
else:
- output_path = Path(output_path)
-
- # Initialize the array that we'll use to collect the output file names
- output_files = []
-
- # Initialize the output folder - i.e., remove existing output files from
- # there
- files = os.listdir(output_path)
- for filename in files:
- if filename in output_files:
- os.remove(output_path / filename)
- # TODO: show some kind of a warning here if os.remove fails
-
- # open the config file and parse it
- with open(config_path, 'r', encoding='utf-8') as f:
- config = json.load(f)
-
- # f"{config['commonFileDir']}/CustomDLModels/"
- custom_dl_file_path = custom_model_dir
-
- if ('DL' not in config) or (not config['DL']):
- log_msg("Damage and Loss configuration missing from config file. ")
-
- if auto_script_path is not None:
- log_msg("Trying to auto-populate")
-
- config_ap, CMP = auto_populate(config, auto_script_path)
-
- if not config_ap['DL']:
-
- log_msg(
- "The prescribed auto-population script failed to identify "
- "a valid damage and loss configuration for this asset. "
- "Terminating analysis."
- )
-
- return 0
-
- # look for possibly specified assessment options
- try:
- assessment_options = config['Applications']['DL']['ApplicationData'][
- 'Options'
- ]
- except KeyError:
- assessment_options = None
-
- if assessment_options:
- # extend options defined via the auto-population script to
- # include those in the original `config`
- config_ap['Applications']['DL']['ApplicationData'].pop('Options')
- update_vals(
- config_ap['DL']['Options'],
- assessment_options,
- "config_ap['DL']['Options']",
- 'assessment_options',
- )
-
- # add the demand information
- config_ap['DL']['Demands'].update(
- {'DemandFilePath': f'{demand_file}', 'SampleSize': f'{realizations}'}
- )
-
- if coupled_EDP is True:
- config_ap['DL']['Demands'].update({"CoupledDemands": True})
-
- else:
- config_ap['DL']['Demands'].update(
- {"Calibration": {"ALL": {"DistributionFamily": "lognormal"}}}
- )
-
- # save the component data
- CMP.to_csv(output_path / 'CMP_QNT.csv')
-
- # update the config file with the location
- config_ap['DL']['Asset'].update(
- {"ComponentAssignmentFile": str(output_path / 'CMP_QNT.csv')}
- )
-
- # if detailed results are not requested, add a lean output config
- if detailed_results is False:
- config_ap['DL'].update({'Outputs': regional_out_config})
- else:
- config_ap['DL'].update({'Outputs': full_out_config})
- # add output settings from regional output config
- if 'Settings' not in config_ap['DL']['Outputs']:
- config_ap['DL']['Outputs'].update({'Settings': {}})
-
- config_ap['DL']['Outputs']['Settings'].update(
- regional_out_config['Settings']
- )
-
- # save the extended config to a file
- config_ap_path = Path(config_path.stem + '_ap.json').resolve()
-
- with open(config_ap_path, 'w') as f:
- json.dump(config_ap, f, indent=2)
-
- config['DL'] = config_ap.get('DL', None)
-
- else:
- log_msg("Terminating analysis.")
-
- return -1
-
- #
- # sample size: backwards compatibility
- #
- sample_size_str = (
- # expected location
- config.get('Options', {})
- .get('Sampling', {})
- .get('SampleSize', None)
+ output_path_p = Path(output_path).resolve()
+ # create the directory if it does not exist
+ if not output_path_p.exists():
+ output_path_p.mkdir(parents=True)
+
+ # parse the config file
+ config = _parse_config_file(
+ config_path_p,
+ output_path_p,
+ Path(auto_script_path).resolve() if auto_script_path is not None else None,
+ demand_file,
+ realizations,
+ output_format,
+ coupled_edp=coupled_edp,
+ detailed_results=detailed_results,
)
- if not sample_size_str:
- # try previous location
- sample_size_str = (
- config.get('DL', {}).get('Demands', {}).get('SampleSize', None)
- )
- if not sample_size_str:
- # give up
- print('Sample size not provided in config file.')
- return -1
- sample_size = int(sample_size_str)
- # provide all outputs if the files are not specified
- if ('Outputs' not in config['DL']) or (not config['DL']['Outputs']):
- config['DL']['Outputs'] = full_out_config
-
- # provide outputs in CSV by default
- if ('Format' in config['DL']['Outputs'].keys()) is False:
- config['DL']['Outputs'].update({'Format': {'CSV': True, 'JSON': False}})
+ # List to keep track of the generated output files.
+ out_files: list[str] = []
+
+ _remove_existing_files(output_path_p, known_output_files)
+
+ # Run the assessment
+ assessment = DLCalculationAssessment(config_options=get(config, 'DL/Options'))
+
+ assessment.calculate_demand(
+ demand_path=Path(get(config, 'DL/Demands/DemandFilePath')).resolve(),
+ collapse_limits=get(config, 'DL/Demands/CollapseLimits', default=None),
+ length_unit=get(config, 'GeneralInformation/units/length', default=None),
+ demand_calibration=get(config, 'DL/Demands/Calibration', default=None),
+ sample_size=get(config, 'DL/Options/Sampling/SampleSize'),
+ demand_cloning=get(config, 'DL/Demands/DemandCloning', default=None),
+ residual_drift_inference=get(
+ config, 'DL/Demands/InferResidualDrift', default=None
+ ),
+ coupled_demands=get(config, 'DL/Demands/CoupledDemands', default=False),
+ )
- # override file format specification if the output_format is provided
- if output_format is not None:
- config['DL']['Outputs'].update(
- {
- 'Format': {
- 'CSV': 'csv' in output_format,
- 'JSON': 'json' in output_format,
- }
- }
+ if is_specified(config, 'DL/Asset'):
+ assessment.calculate_asset(
+ num_stories=get(config, 'DL/Asset/NumberOfStories', default=None),
+ component_assignment_file=get(
+ config, 'DL/Asset/ComponentAssignmentFile', default=None
+ ),
+ collapse_fragility_demand_type=get(
+ config, 'DL/Damage/CollapseFragility/DemandType', default=None
+ ),
+ component_sample_file=get(
+ config, 'DL/Asset/ComponentSampleFile', default=None
+ ),
+ add_irreparable_damage_columns=get(
+ config, 'DL/Damage/IrreparableDamage', default=False
+ ),
)
- # add empty Settings to output config to simplify code below
- if ('Settings' in config['DL']['Outputs'].keys()) is False:
- config['DL']['Outputs'].update({'Settings': pbe_settings})
-
- if ('Asset' not in config['DL']) or (not config['DL']['Asset']):
- log_msg("Asset configuration missing. Terminating analysis.")
- return -1
-
- if ('Demands' not in config['DL']) or (not config['DL']['Demands']):
- log_msg("Demand configuration missing. Terminating analysis.")
- return -1
-
- # get the length unit from the config file
- try:
- length_unit = config['GeneralInformation']['units']['length']
- except KeyError:
- log_msg(
- "No default length unit provided in the input file. "
- "Terminating analysis. "
+ if is_specified(config, 'DL/Damage'):
+ assessment.calculate_damage(
+ length_unit=get(config, 'GeneralInformation/units/length'),
+ component_database=get(config, 'DL/Asset/ComponentDatabase'),
+ component_database_path=get(
+ config, 'DL/Asset/ComponentDatabasePath', default=None
+ ),
+ collapse_fragility=get(
+ config, 'DL/Damage/CollapseFragility', default=None
+ ),
+ irreparable_damage=get(
+ config, 'DL/Damage/IrreparableDamage', default=None
+ ),
+ damage_process_approach=get(
+ config, 'DL/Damage/DamageProcess', default=None
+ ),
+ damage_process_file_path=get(
+ config, 'DL/Damage/DamageProcessFilePath', default=None
+ ),
+ custom_model_dir=custom_model_dir,
+ scaling_specification=get(config, 'DL/Damage/ScalingSpecification'),
+ is_for_water_network_assessment=is_specified(
+ config, 'DL/Asset/ComponentDatabase/Water'
+ ),
)
- return -1
-
- # initialize the Pelicun Assessement
- options = config['DL'].get("Options", {})
- options.update({"LogFile": "pelicun_log.txt", "Verbose": True})
-
- # If the user did not prescribe anything for ListAllDamageStates,
- # then use True as default for DL_calculations regardless of what
- # the Pelicun default is.
- if "ListAllDamageStates" not in options:
- options.update({"ListAllDamageStates": True})
-
- PAL = Assessment(options)
-
- # Demand Assessment -----------------------------------------------------------
-
- # check if there is a demand file location specified in the config file
- if config['DL']['Demands'].get('DemandFilePath', False):
- demand_path = Path(config['DL']['Demands']['DemandFilePath']).resolve()
-
+ if is_unspecified(config, 'DL/Losses/Repair'):
+ agg_repair = None
else:
- # otherwise assume that there is a response.csv file next to the config file
- demand_path = config_path.parent / 'response.csv'
-
- # try to load the demands
- raw_demands = pd.read_csv(demand_path, index_col=0)
-
- # remove excessive demands that are considered collapses, if needed
- if config['DL']['Demands'].get('CollapseLimits', False):
- raw_demands = convert_to_MultiIndex(raw_demands, axis=1)
-
- if 'Units' in raw_demands.index:
- raw_units = raw_demands.loc['Units', :]
- raw_demands.drop('Units', axis=0, inplace=True)
-
- else:
- raw_units = None
-
- DEM_to_drop = np.full(raw_demands.shape[0], False)
-
- for DEM_type, limit in config['DL']['Demands']['CollapseLimits'].items():
- if raw_demands.columns.nlevels == 4:
- DEM_to_drop += raw_demands.loc[:, idx[:, DEM_type, :, :]].max(
- axis=1
- ) > float(limit)
-
- else:
- DEM_to_drop += raw_demands.loc[:, idx[DEM_type, :, :]].max(
- axis=1
- ) > float(limit)
-
- raw_demands = raw_demands.loc[~DEM_to_drop, :]
-
- if isinstance(raw_units, pd.Series):
- raw_demands = pd.concat([raw_demands, raw_units.to_frame().T], axis=0)
-
- log_msg(
- f"{np.sum(DEM_to_drop)} realizations removed from the demand "
- f"input because they exceed the collapse limit. The remaining "
- f"sample size: {raw_demands.shape[0]}"
+ # Currently we only support `Repair` consequences.
+ # We will need to make changes here when we start to include
+ # more consequences.
+
+ agg_repair, _ = assessment.calculate_loss(
+ loss_map_approach=get(config, 'DL/Losses/Repair/MapApproach'),
+ occupancy_type=get(config, 'DL/Asset/OccupancyType'),
+ consequence_database=get(config, 'DL/Losses/Repair/ConsequenceDatabase'),
+ consequence_database_path=get(
+ config, 'DL/Losses/Repair/ConsequenceDatabasePath'
+ ),
+ custom_model_dir=custom_model_dir,
+ damage_process_approach=get(
+ config, 'DL/Damage/DamageProcess', default='User Defined'
+ ),
+ replacement_cost_parameters=get(
+ config, 'DL/Losses/Repair/ReplacementCost'
+ ),
+ replacement_time_parameters=get(
+ config, 'DL/Losses/Repair/ReplacementTime'
+ ),
+ replacement_carbon_parameters=get(
+ config, 'DL/Losses/Repair/ReplacementCarbon'
+ ),
+ replacement_energy_parameters=get(
+ config, 'DL/Losses/Repair/ReplacementEnergy'
+ ),
+ loss_map_path=get(config, 'DL/Losses/Repair/MapFilePath'),
+ decision_variables=_parse_decision_variables(config),
)
- # add units to the demand data if needed
- if "Units" not in raw_demands.index:
- demands = add_units(raw_demands, length_unit)
-
- else:
- demands = raw_demands
-
- # load the available demand sample
- PAL.demand.load_sample(demands)
+ summary, summary_stats = _result_summary(assessment, agg_repair)
+
+ # Save the results into files
+
+ if is_specified(config, 'DL/Outputs/Demand'):
+ output_config = get(config, 'DL/Outputs/Demand')
+ _demand_save(output_config, assessment, output_path_p, out_files)
+
+ if is_specified(config, 'DL/Outputs/Asset'):
+ output_config = get(config, 'DL/Outputs/Asset')
+ _asset_save(
+ output_config,
+ assessment,
+ output_path_p,
+ out_files,
+ aggregate_colocated=get(
+ config,
+ 'DL/Outputs/Settings/AggregateColocatedComponentResults',
+ default=False,
+ ),
+ )
- # get the calibration information
- if config['DL']['Demands'].get('Calibration', False):
- # then use it to calibrate the demand model
- PAL.demand.calibrate_model(config['DL']['Demands']['Calibration'])
+ if is_specified(config, 'DL/Outputs/Damage'):
+ output_config = get(config, 'DL/Outputs/Damage')
+ _damage_save(
+ output_config,
+ assessment,
+ output_path_p,
+ out_files,
+ aggregate_colocated=get(
+ config,
+ 'DL/Outputs/Settings/AggregateColocatedComponentResults',
+ default=False,
+ ),
+ condense_ds=get(
+ config,
+ 'DL/Outputs/Settings/CondenseDS',
+ default=False,
+ ),
+ )
- else:
- # if no calibration is requested,
- # set all demands to use empirical distribution
- PAL.demand.calibrate_model({"ALL": {"DistributionFamily": "empirical"}})
-
- # and generate a new demand sample
- PAL.demand.generate_sample(
- {
- "SampleSize": sample_size,
- 'PreserveRawOrder': config['DL']['Demands'].get('CoupledDemands', False),
- 'DemandCloning': config['DL']['Demands'].get('DemandCloning', False),
- }
- )
+ if is_specified(config, 'DL/Outputs/Loss/Repair'):
+ output_config = get(config, 'DL/Outputs/Loss/Repair')
+ assert agg_repair is not None
+ _loss_save(
+ output_config,
+ assessment,
+ output_path_p,
+ out_files,
+ agg_repair,
+ aggregate_colocated=get(
+ config,
+ 'DL/Outputs/Settings/AggregateColocatedComponentResults',
+ default=False,
+ ),
+ )
+ _summary_save(summary, summary_stats, output_path_p, out_files)
+ _create_json_files_if_requested(config, out_files, output_path_p)
+ _remove_csv_files_if_not_requested(config, out_files, output_path_p)
- # get the generated demand sample
- demand_sample, demand_units = PAL.demand.save_sample(save_units=True)
- demand_sample = pd.concat([demand_sample, demand_units.to_frame().T])
+def _parse_decision_variables(config: dict) -> tuple[str, ...]:
+ """
+ Parse decision variables from the config file.
- # get residual drift estimates, if needed
- if config['DL']['Demands'].get('InferResidualDrift', False):
- RID_config = config['DL']['Demands']['InferResidualDrift']
+ Parameters
+ ----------
+ config : dict
+ The configuration dictionary.
- if RID_config['method'] == 'FEMA P-58':
- RID_list = []
- PID = demand_sample['PID'].copy()
- PID.drop('Units', inplace=True)
- PID = PID.astype(float)
+ Returns
+ -------
+ list
+ List of decision variables.
- for direction, delta_yield in RID_config.items():
- if direction == 'method':
- continue
+ """
+ decision_variables: list[str] = []
+ if get(config, 'DL/Losses/Repair/DecisionVariables', default=False) is not False:
+ for dv_i, dv_status in get(
+ config, 'DL/Losses/Repair/DecisionVariables'
+ ).items():
+ if dv_status is True:
+ decision_variables.append(dv_i)
+ return tuple(decision_variables)
+
+
+def _remove_csv_files_if_not_requested(
+ config: dict, out_files: list[str], output_path: Path
+) -> None:
+ """
+ Remove CSV files if not requested in config.
- RID = PAL.demand.estimate_RID(
- PID.loc[:, idx[:, direction]],
- {'yield_drift': float(delta_yield)},
- )
+ Parameters
+ ----------
+ config : dict
+ Configuration dictionary.
+ out_files : list
+ List of output file names.
+ output_path : Path
+ Path to the output directory.
+ """
+ # Don't proceed if CSV files were requested.
+ if get(config, 'DL/Outputs/Format/CSV', default=False) is True:
+ return
- RID_list.append(RID)
+ for filename in out_files:
+ # keep the DL_summary and DL_summary_stats files
+ if 'DL_summary' in filename:
+ continue
+ Path(output_path / filename).unlink()
- RID = pd.concat(RID_list, axis=1)
- RID_units = pd.Series(
- [
- 'unitless',
- ]
- * RID.shape[1],
- index=RID.columns,
- name='Units',
- )
- RID_sample = pd.concat([RID, RID_units.to_frame().T])
- demand_sample = pd.concat([demand_sample, RID_sample], axis=1)
+def _summary_save(
+ summary: pd.DataFrame,
+ summary_stats: pd.DataFrame,
+ output_path: Path,
+ out_files: list[str],
+) -> None:
+ """
+ Save summary results to CSV files.
- # add a constant one demand
- demand_sample[('ONE', '0', '1')] = np.ones(demand_sample.shape[0])
- demand_sample.loc['Units', ('ONE', '0', '1')] = 'unitless'
+ Parameters
+ ----------
+ summary : pd.DataFrame
+ Summary DataFrame.
+ summary_stats : pd.DataFrame
+ Summary statistics DataFrame.
+ output_path : Path
+ Path to the output directory.
+ out_files : list
+ List of output file names.
- PAL.demand.load_sample(convert_to_SimpleIndex(demand_sample, axis=1))
+ """
+ # save summary sample
+ if summary is not None:
+ summary.to_csv(output_path / 'DL_summary.csv', index_label='#')
+ out_files.append('DL_summary.csv')
- # save results
- if 'Demand' in config['DL']['Outputs']:
- out_reqs = [
- out if val else ""
- for out, val in config['DL']['Outputs']['Demand'].items()
- ]
+ # save summary statistics
+ if summary_stats is not None:
+ summary_stats.to_csv(output_path / 'DL_summary_stats.csv')
+ out_files.append('DL_summary_stats.csv')
+
+
+def _parse_config_file( # noqa: C901
+ config_path: Path,
+ output_path: Path,
+ auto_script_path: Path | None,
+ demand_file: str,
+ realizations: int,
+ output_format: list | None,
+ *,
+ coupled_edp: bool,
+ detailed_results: bool,
+) -> dict[str, object]:
+ """
+ Parse and validate the config file for Pelicun.
- if np.any(np.isin(['Sample', 'Statistics'], out_reqs)):
- demand_sample, demand_units = PAL.demand.save_sample(save_units=True)
+ Parameters
+ ----------
+ config_path : str
+ Path to the configuration file.
+ output_path : Path
+ Directory for output files.
+ auto_script_path : str
+ Path to the auto-generation script.
+ demand_file : str
+ Path to the demand data file.
+ realizations : int
+ Number of realizations.
+ coupled_EDP : bool
+ Whether to consider coupled EDPs.
+ detailed_results : bool
+ Whether to generate detailed results.
+ output_format : str
+ Output format (CSV, JSON).
+
+ Returns
+ -------
+ dict
+ Parsed and validated configuration.
+
+ Raises
+ ------
+ PelicunInvalidConfigError
+ If the provided config file does not conform to the schema or
+ there are issues with the specified values.
- demand_units = demand_units.to_frame().T
+ """
+ # open the config file and parse it
+ with Path(config_path).open(encoding='utf-8') as f:
+ config = json.load(f)
- if 'Sample' in out_reqs:
- demand_sample_s = pd.concat([demand_sample, demand_units])
- demand_sample_s = convert_to_SimpleIndex(demand_sample_s, axis=1)
- demand_sample_s.to_csv(
- output_path / "DEM_sample.zip",
- index_label=demand_sample_s.columns.name,
- compression=dict(method='zip', archive_name='DEM_sample.csv'),
- )
- output_files.append('DEM_sample.zip')
-
- if 'Statistics' in out_reqs:
- demand_stats = describe(demand_sample)
- demand_stats = pd.concat([demand_stats, demand_units])
- demand_stats = convert_to_SimpleIndex(demand_stats, axis=1)
- demand_stats.to_csv(
- output_path / "DEM_stats.csv",
- index_label=demand_stats.columns.name,
- )
- output_files.append('DEM_stats.csv')
+ # load the schema
+ with Path(f'{base.pelicun_path}/settings/input_schema.json').open(
+ encoding='utf-8'
+ ) as f:
+ schema = json.load(f)
- # - - - - -
- # This is almost surely not needed any more
- """
- if regional == True:
+ # Validate the configuration against the schema
+ try:
+ validate(instance=config, schema=schema)
+ except jsonschema.exceptions.ValidationError as exc:
+ msg = 'The provided config file does not conform to the schema.'
+ raise PelicunInvalidConfigError(msg) from exc
- demand_sample = PAL.demand.save_sample()
+ if is_unspecified(config, 'DL'):
+ log_msg('Damage and Loss configuration missing from config file. ')
- mean = demand_sample.mean()
- median = demand_sample.median()
- std = demand_sample.std()
- beta = np.log(demand_sample).std()
+ if auto_script_path is None:
+ msg = 'No `DL` entry in config file.'
+ raise PelicunInvalidConfigError(msg)
- res = pd.concat([mean,std,median,beta],
- keys=['mean','std','median','beta']).to_frame().T
+ log_msg('Trying to auto-populate')
- res = res.reorder_levels([1,2,3,0], axis=1)
+ config_ap, comp = auto_populate(config, auto_script_path)
- res.sort_index(axis=1, inplace=True)
+ if is_unspecified(config_ap, 'DL'):
+ msg = (
+ 'No `DL` entry in config file, and '
+ 'the prescribed auto-population script failed to identify '
+ 'a valid damage and loss configuration for this asset. '
+ )
+ raise PelicunInvalidConfigError(msg)
- res.dropna(axis=1, how='all', inplace=True)
+ # look for possibly specified assessment options
+ try:
+ assessment_options = config['Applications']['DL']['ApplicationData'][
+ 'Options'
+ ]
+ except KeyError:
+ assessment_options = None
+
+ if assessment_options:
+ # extend options defined via the auto-population script to
+ # include those in the original `config`
+ config_ap['Applications']['DL']['ApplicationData'].pop('Options')
+ update_vals(
+ config_ap['DL']['Options'],
+ assessment_options,
+ "config_ap['DL']['Options']",
+ 'assessment_options',
+ )
- res.columns.rename(['type', 'loc', 'dir', 'stat'], inplace=True)
+ # add the demand information
+ update(config_ap, '/DL/Demands/DemandFilePath', demand_file)
+ update(config_ap, '/DL/Demands/SampleSize', str(realizations))
- res.to_csv(output_path/"EDP.csv", index_label=res.columns.name)
- output_files.append('EDP.csv')
- """
- # - - - - -
+ if coupled_edp is True:
+ update(config_ap, 'DL/Demands/CoupledDemands', value=True)
- # Asset Definition ------------------------------------------------------------
+ else:
+ update(
+ config_ap,
+ 'DL/Demands/Calibration',
+ {'ALL': {'DistributionFamily': 'lognormal'}},
+ )
- # set the number of stories
- if config['DL']['Asset'].get('NumberOfStories', False):
- PAL.stories = int(config['DL']['Asset']['NumberOfStories'])
+ # save the component data
+ comp.to_csv(output_path / 'CMP_QNT.csv')
- # load a component model and generate a sample
- if config['DL']['Asset'].get('ComponentAssignmentFile', False):
- cmp_marginals = pd.read_csv(
- config['DL']['Asset']['ComponentAssignmentFile'],
- index_col=0,
- encoding_errors='replace',
+ # update the config file with the location
+ update(
+ config_ap,
+ 'DL/Asset/ComponentAssignmentFile',
+ str(output_path / 'CMP_QNT.csv'),
)
- DEM_types = demand_sample.columns.unique(level=0)
-
- # add component(s) to support collapse calculation
- if 'CollapseFragility' in config['DL']['Damage'].keys():
- coll_DEM = config['DL']['Damage']['CollapseFragility']["DemandType"]
- if coll_DEM.startswith('SA'):
- # we have a global demand and evaluate collapse directly
- pass
+ # if detailed results are not requested, add a lean output config
+ if detailed_results is False:
+ update(config_ap, 'DL/Outputs', regional_out_config)
+ else:
+ update(config_ap, 'DL/Outputs', full_out_config)
+ # add output settings from regional output config
+ if is_unspecified(config_ap, 'DL/Outputs/Settings'):
+ update(config_ap, 'DL/Outputs/Settings', {})
- else:
- # we need story-specific collapse assessment
+ config_ap['DL']['Outputs']['Settings'].update(
+ regional_out_config['Settings']
+ )
- if coll_DEM in DEM_types:
- # excessive coll_DEM is added on every floor to detect large RIDs
- cmp_marginals.loc['excessive.coll.DEM', 'Units'] = 'ea'
+ # save the extended config to a file
+ config_ap_path = Path(config_path.stem + '_ap.json').resolve()
- locs = demand_sample[coll_DEM].columns.unique(level=0)
- cmp_marginals.loc['excessive.coll.DEM', 'Location'] = ','.join(
- locs
- )
+ with Path(config_ap_path).open('w', encoding='utf-8') as f:
+ json.dump(config_ap, f, indent=2)
- dirs = demand_sample[coll_DEM].columns.unique(level=1)
- cmp_marginals.loc['excessive.coll.DEM', 'Direction'] = ','.join(
- dirs
- )
+ update(config, 'DL', get(config_ap, 'DL'))
- cmp_marginals.loc['excessive.coll.DEM', 'Theta_0'] = 1.0
+ # sample size
+ sample_size_str = get(config, 'DL/Options/Sampling/SampleSize')
+ if not sample_size_str:
+ sample_size_str = get(config, 'DL/Demands/SampleSize')
+ if not sample_size_str:
+ msg = 'Sample size not provided in config file.'
+ raise PelicunInvalidConfigError(msg)
+ update(config, 'DL/Options/Sampling/SampleSize', int(sample_size_str))
- else:
- log_msg(
- f'WARNING: No {coll_DEM} among available demands. Collapse '
- 'cannot be evaluated.'
- )
+ # provide all outputs if the files are not specified
+ if is_unspecified(config, 'DL/Outputs'):
+ update(config, 'DL/Outputs', full_out_config)
- # always add a component to support basic collapse calculation
- cmp_marginals.loc['collapse', 'Units'] = 'ea'
- cmp_marginals.loc['collapse', 'Location'] = 0
- cmp_marginals.loc['collapse', 'Direction'] = 1
- cmp_marginals.loc['collapse', 'Theta_0'] = 1.0
+ # provide outputs in CSV by default
+ if is_unspecified(config, 'DL/Outputs/Format'):
+ update(config, 'DL/Outputs/Format', {'CSV': True, 'JSON': False})
- # add components to support irreparable damage calculation
- if 'IrreparableDamage' in config['DL']['Damage'].keys():
- if 'RID' in DEM_types:
- # excessive RID is added on every floor to detect large RIDs
- cmp_marginals.loc['excessiveRID', 'Units'] = 'ea'
+ # override file format specification if the output_format is
+ # provided
+ if output_format is not None:
+ update(
+ config,
+ 'DL/Outputs/Format',
+ {
+ 'CSV': 'csv' in output_format,
+ 'JSON': 'json' in output_format,
+ },
+ )
- locs = demand_sample['RID'].columns.unique(level=0)
- cmp_marginals.loc['excessiveRID', 'Location'] = ','.join(locs)
+ # add empty Settings to output config to simplify code below
+ if is_unspecified(config, 'DL/Outputs/Settings'):
+ update(config, 'DL/Outputs/Settings', pbe_settings)
+
+ if is_unspecified(config, 'DL/Demands'):
+ msg = 'Demand configuration missing.'
+ raise PelicunInvalidConfigError(msg)
+
+ if is_unspecified(config, 'DL/Asset'):
+ msg = 'Asset configuration missing.'
+ raise PelicunInvalidConfigError(msg)
+
+ update(
+ config,
+ 'DL/Options/LogFile',
+ 'pelicun_log.txt',
+ only_if_empty_or_none=True,
+ )
+ update(
+ config,
+ 'DL/Options/Verbose',
+ value=True,
+ only_if_empty_or_none=True,
+ )
- dirs = demand_sample['RID'].columns.unique(level=1)
- cmp_marginals.loc['excessiveRID', 'Direction'] = ','.join(dirs)
+ # if the user did not prescribe anything for ListAllDamageStates,
+ # then use True as default for DL_calculations regardless of what
+ # the Pelicun default is.
+ update(
+ config,
+ 'DL/Options/ListAllDamageStates',
+ value=True,
+ only_if_empty_or_none=True,
+ )
- cmp_marginals.loc['excessiveRID', 'Theta_0'] = 1.0
+ # if the demand file location is not specified in the config file
+ # assume there is a `response.csv` file next to the config file.
+ update(
+ config,
+ 'DL/Demands/DemandFilePath',
+ config_path.parent / 'response.csv',
+ only_if_empty_or_none=True,
+ )
- # irreparable is a global component to recognize is any of the
- # excessive RIDs were triggered
- cmp_marginals.loc['irreparable', 'Units'] = 'ea'
- cmp_marginals.loc['irreparable', 'Location'] = 0
- cmp_marginals.loc['irreparable', 'Direction'] = 1
- cmp_marginals.loc['irreparable', 'Theta_0'] = 1.0
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ # backwards-compatibility for v3.2 and earlier | remove after v4.0
+ if get(config, 'DL/Losses/BldgRepair', default=False):
+ update(config, 'DL/Losses/Repair', get(config, 'DL/Losses/BldgRepair'))
+ if get(config, 'DL/Outputs/Loss/BldgRepair', default=False):
+ update(
+ config,
+ 'DL/Outputs/Loss/Repair',
+ get(config, 'DL/Outputs/Loss/BldgRepair'),
+ )
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+ # Cast NumberOfStories to int
+ if is_specified(config, 'DL/Asset/NumberOfStories'):
+ update(
+ config,
+ 'DL/Asset/NumberOfStories',
+ int(get(config, 'DL/Asset/NumberOfStories')),
+ )
- else:
- log_msg(
- 'WARNING: No residual interstory drift ratio among'
- 'available demands. Irreparable damage cannot be '
- 'evaluated.'
+ # Ensure `DL/Demands/InferResidualDrift` contains a `method`
+ if is_specified(config, 'DL/Demands/InferResidualDrift') and is_unspecified(
+ config, 'DL/Demands/InferResidualDrift/method'
+ ):
+ msg = 'No method is specified in residual drift inference configuration.'
+ raise PelicunInvalidConfigError(msg)
+
+ # Ensure `DL/Damage/CollapseFragility` contains all required keys.
+ if is_specified(config, 'DL/Damage/CollapseFragility'):
+ for thing in ('CapacityDistribution', 'CapacityMedian', 'Theta_1'):
+ if is_unspecified(config, f'DL/Damage/CollapseFragility/{thing}'):
+ msg = (
+ f'`{thing}` is missing from DL/Damage/CollapseFragility'
+ f' in the configuration file.'
)
+ raise PelicunInvalidConfigError(msg)
+
+ # Ensure `DL/Damage/IrreparableDamage` contains all required keys.
+ if is_specified(config, 'DL/Damage/IrreparableDamage'):
+ for thing in ('DriftCapacityMedian', 'DriftCapacityLogStd'):
+ if is_unspecified(config, f'DL/Damage/IrreparableDamage/{thing}'):
+ msg = (
+ f'`{thing}` is missing from DL/Damage/IrreparableDamage'
+ f' in the configuration file.'
+ )
+ raise PelicunInvalidConfigError(msg)
+
+ # If the damage process approach is `User Defined` there needs to
+ # be a damage process file path.
+ if get(config, 'DL/Damage/DamageProcess') == 'User Defined' and is_unspecified(
+ config, 'DL/Damage/DamageProcessFilePath'
+ ):
+ msg = (
+ 'When `DL/Damage/DamageProcess` is set to `User Defined`, '
+ 'a path needs to be specified under '
+ '`DL/Damage/DamageProcessFilePath`.'
+ )
+ raise PelicunInvalidConfigError(msg)
+
+ # Getting results requires running the calculations.
+ if is_specified(config, 'DL/Outputs/Asset') and is_unspecified(
+ config, 'DL/Asset'
+ ):
+ msg = (
+ 'No asset data specified in config file. '
+ 'Cannot generate asset model outputs.'
+ )
+ raise PelicunInvalidConfigError(msg)
+
+ if is_specified(config, 'DL/Outputs/Damage') and is_unspecified(
+ config, 'DL/Damage'
+ ):
+ msg = (
+ 'No damage data specified in config file. '
+ 'Cannot generate damage model outputs.'
+ )
+ raise PelicunInvalidConfigError(msg)
+
+ if is_specified(config, 'DL/Outputs/Loss') and is_unspecified(
+ config, 'DL/Losses'
+ ):
+ msg = (
+ 'No loss data specified in config file. '
+ 'Cannot generate loss model outputs.'
+ )
+ raise PelicunInvalidConfigError(msg)
- # load component model
- PAL.asset.load_cmp_model({'marginals': cmp_marginals})
-
- # generate component quantity sample
- PAL.asset.generate_cmp_sample()
-
- # if requested, load the quantity sample from a file
- elif config['DL']['Asset'].get('ComponentSampleFile', False):
- PAL.asset.load_cmp_sample(config['DL']['Asset']['ComponentSampleFile'])
-
- # if requested, save results
- if 'Asset' in config['DL']['Outputs']:
- cmp_sample, cmp_units = PAL.asset.save_cmp_sample(save_units=True)
- cmp_units = cmp_units.to_frame().T
-
+ # Ensure only one of `component_assignment_file` or
+ # `component_sample_file` is provided.
+ if is_specified(config, 'DL/Asset'):
if (
- config['DL']['Outputs']['Settings'].get(
- 'AggregateColocatedComponentResults', False
- )
- is True
+ (get(config, 'DL/Asset/ComponentAssignmentFile') is None)
+ and (get(config, 'DL/Asset/ComponentSampleFile') is None)
+ ) or (
+ (get(config, 'DL/Asset/ComponentAssignmentFile') is not None)
+ and (get(config, 'DL/Asset/ComponentSampleFile') is not None)
):
- cmp_units = cmp_units.groupby(level=[0, 1, 2], axis=1).first()
+ msg = (
+ 'In the asset model configuration, it is '
+ 'required to specify one of `component_assignment_file` '
+ 'or `component_sample_file`, but not both.'
+ )
+ raise PelicunInvalidConfigError(msg)
- cmp_groupby_uid = cmp_sample.groupby(level=[0, 1, 2], axis=1)
+ return config
- cmp_sample = cmp_groupby_uid.sum().mask(
- cmp_groupby_uid.count() == 0, np.nan
- )
- out_reqs = [
- out if val else ""
- for out, val in config['DL']['Outputs']['Asset'].items()
- ]
+def _create_json_files_if_requested(
+ config: dict, out_files: list[str], output_path: Path
+) -> None:
+ """
+ Create JSON files if requested in the config.
- if np.any(np.isin(['Sample', 'Statistics'], out_reqs)):
- if 'Sample' in out_reqs:
- cmp_sample_s = pd.concat([cmp_sample, cmp_units])
+ Parameters
+ ----------
+ config : dict
+ Configuration dictionary.
+ out_files : list
+ List of output file names.
+ output_path : Path
+ Path to the output directory.
- cmp_sample_s = convert_to_SimpleIndex(cmp_sample_s, axis=1)
- cmp_sample_s.to_csv(
- output_path / "CMP_sample.zip",
- index_label=cmp_sample_s.columns.name,
- compression=dict(method='zip', archive_name='CMP_sample.csv'),
- )
- output_files.append('CMP_sample.zip')
+ """
+ # If not requested, simply return
+ if get(config, 'DL/Outputs/Format/JSON', default=False) is False:
+ return
- if 'Statistics' in out_reqs:
- cmp_stats = describe(cmp_sample)
- cmp_stats = pd.concat([cmp_stats, cmp_units])
+ for filename in out_files:
+ filename_json = filename[:-3] + 'json'
- cmp_stats = convert_to_SimpleIndex(cmp_stats, axis=1)
- cmp_stats.to_csv(
- output_path / "CMP_stats.csv", index_label=cmp_stats.columns.name
- )
- output_files.append('CMP_stats.csv')
-
- # - - - - -
- # This is almost surely not needed any more
- """
- if regional == True:
-
- #flatten the dictionary
- AIM_flat_dict = {}
- for key, item in GI_config.items():
- if isinstance(item, dict):
- if key not in ['units', 'location']:
- for sub_key, sub_item in item.items():
- AIM_flat_dict.update({f'{key}_{sub_key}': sub_item})
- else:
- AIM_flat_dict.update({key: [item,]})
-
-
- # do not save polygons
- for header_to_remove in ['geometry', 'Footprint']:
- try:
- AIM_flat_dict.pop(header_to_remove)
- except:
- pass
-
- # create the output DF
- df_res = pd.DataFrame.from_dict(AIM_flat_dict)
-
- df_res.dropna(axis=1, how='all', inplace=True)
-
- df_res.to_csv(output_path/'AIM.csv')
- output_files.append('AIM.csv')
- """
- # - - - - -
-
- # Damage Assessment -----------------------------------------------------------
-
- # if a damage assessment is requested
- if 'Damage' in config['DL']:
- # load the fragility information
if (
- config['DL']['Asset']['ComponentDatabase']
- in default_DBs['fragility'].keys()
+ get(config, 'DL/Outputs/Settings/SimpleIndexInJSON', default=False)
+ is True
):
- component_db = [
- 'PelicunDefault/'
- + default_DBs['fragility'][
- config['DL']['Asset']['ComponentDatabase']
- ],
- ]
+ data = pd.read_csv(output_path / filename, index_col=0)
else:
- component_db = []
-
- if config['DL']['Asset'].get('ComponentDatabasePath', False) is not False:
- extra_comps = config['DL']['Asset']['ComponentDatabasePath']
-
- if 'CustomDLDataFolder' in extra_comps:
- extra_comps = extra_comps.replace(
- 'CustomDLDataFolder', custom_dl_file_path
- )
-
- component_db += [
- extra_comps,
- ]
- component_db = component_db[::-1]
-
- # prepare additional fragility data
-
- # get the database header from the default P58 db
- P58_data = PAL.get_default_data('damage_DB_FEMA_P58_2nd')
-
- adf = pd.DataFrame(columns=P58_data.columns)
-
- if 'CollapseFragility' in config['DL']['Damage'].keys():
- coll_config = config['DL']['Damage']['CollapseFragility']
-
- if 'excessive.coll.DEM' in cmp_marginals.index:
- # if there is story-specific evaluation
- coll_CMP_name = 'excessive.coll.DEM'
- else:
- # otherwise, for global collapse evaluation
- coll_CMP_name = 'collapse'
-
- adf.loc[coll_CMP_name, ('Demand', 'Directional')] = 1
- adf.loc[coll_CMP_name, ('Demand', 'Offset')] = 0
-
- coll_DEM = coll_config["DemandType"]
-
- if '_' in coll_DEM:
- coll_DEM, coll_DEM_spec = coll_DEM.split('_')
- else:
- coll_DEM_spec = None
-
- coll_DEM_name = None
- for demand_name, demand_short in EDP_to_demand_type.items():
- if demand_short == coll_DEM:
- coll_DEM_name = demand_name
- break
-
- if coll_DEM_name is None:
- return -1
-
- if coll_DEM_spec is None:
- adf.loc[coll_CMP_name, ('Demand', 'Type')] = coll_DEM_name
-
- else:
- adf.loc[coll_CMP_name, ('Demand', 'Type')] = (
- f'{coll_DEM_name}|{coll_DEM_spec}'
- )
-
- coll_DEM_unit = add_units(
- pd.DataFrame(
- columns=[
- f'{coll_DEM}-1-1',
- ]
- ),
- length_unit,
- ).iloc[0, 0]
-
- adf.loc[coll_CMP_name, ('Demand', 'Unit')] = coll_DEM_unit
-
- adf.loc[coll_CMP_name, ('LS1', 'Family')] = coll_config.get(
- 'CapacityDistribution', np.nan
+ data = convert_to_MultiIndex(
+ pd.read_csv(output_path / filename, index_col=0), axis=1
)
- adf.loc[coll_CMP_name, ('LS1', 'Theta_0')] = coll_config.get(
- 'CapacityMedian', np.nan
+ if 'Units' in data.index:
+ df_units = convert_to_SimpleIndex(
+ data.loc['Units', :].to_frame().T, # type: ignore
+ axis=1,
)
- adf.loc[coll_CMP_name, ('LS1', 'Theta_1')] = coll_config.get(
- 'Theta_1', np.nan
- )
+ data = data.drop('Units', axis=0)
- adf.loc[coll_CMP_name, 'Incomplete'] = 0
-
- if coll_CMP_name != 'collapse':
- # for story-specific evaluation, we need to add a placeholder
- # fragility that will never trigger, but helps us aggregate
- # results in the end
- adf.loc['collapse', ('Demand', 'Directional')] = 1
- adf.loc['collapse', ('Demand', 'Offset')] = 0
- adf.loc['collapse', ('Demand', 'Type')] = 'One'
- adf.loc['collapse', ('Demand', 'Unit')] = 'unitless'
- adf.loc['collapse', ('LS1', 'Theta_0')] = 1e10
- adf.loc['collapse', 'Incomplete'] = 0
-
- elif "Water" not in config['DL']['Asset']['ComponentDatabase']:
- # add a placeholder collapse fragility that will never trigger
- # collapse, but allow damage processes to work with collapse
-
- adf.loc['collapse', ('Demand', 'Directional')] = 1
- adf.loc['collapse', ('Demand', 'Offset')] = 0
- adf.loc['collapse', ('Demand', 'Type')] = 'One'
- adf.loc['collapse', ('Demand', 'Unit')] = 'unitless'
- adf.loc['collapse', ('LS1', 'Theta_0')] = 1e10
- adf.loc['collapse', 'Incomplete'] = 0
-
- if 'IrreparableDamage' in config['DL']['Damage'].keys():
- irrep_config = config['DL']['Damage']['IrreparableDamage']
-
- # add excessive RID fragility according to settings provided in the
- # input file
- adf.loc['excessiveRID', ('Demand', 'Directional')] = 1
- adf.loc['excessiveRID', ('Demand', 'Offset')] = 0
- adf.loc['excessiveRID', ('Demand', 'Type')] = (
- 'Residual Interstory Drift Ratio'
- )
+ out_dict = convert_df_to_dict(data)
- adf.loc['excessiveRID', ('Demand', 'Unit')] = 'unitless'
- adf.loc['excessiveRID', ('LS1', 'Theta_0')] = irrep_config[
- 'DriftCapacityMedian'
- ]
-
- adf.loc['excessiveRID', ('LS1', 'Family')] = "lognormal"
+ out_dict.update(
+ {
+ 'Units': {
+ col: df_units.loc['Units', col] for col in df_units.columns
+ }
+ }
+ )
- adf.loc['excessiveRID', ('LS1', 'Theta_1')] = irrep_config[
- 'DriftCapacityLogStd'
- ]
+ else:
+ out_dict = convert_df_to_dict(data)
- adf.loc['excessiveRID', 'Incomplete'] = 0
-
- # add a placeholder irreparable fragility that will never trigger
- # damage, but allow damage processes to aggregate excessiveRID here
- adf.loc['irreparable', ('Demand', 'Directional')] = 1
- adf.loc['irreparable', ('Demand', 'Offset')] = 0
- adf.loc['irreparable', ('Demand', 'Type')] = 'One'
- adf.loc['irreparable', ('Demand', 'Unit')] = 'unitless'
- adf.loc['irreparable', ('LS1', 'Theta_0')] = 1e10
- adf.loc['irreparable', 'Incomplete'] = 0
-
- # TODO: we can improve this by creating a water
- # network-specific assessment class
- if "Water" in config['DL']['Asset']['ComponentDatabase']:
- # add a placeholder aggregate fragility that will never trigger
- # damage, but allow damage processes to aggregate the
- # various pipeline damages
- adf.loc['aggregate', ('Demand', 'Directional')] = 1
- adf.loc['aggregate', ('Demand', 'Offset')] = 0
- adf.loc['aggregate', ('Demand', 'Type')] = 'Peak Ground Velocity'
- adf.loc['aggregate', ('Demand', 'Unit')] = 'mps'
- adf.loc['aggregate', ('LS1', 'Theta_0')] = 1e10
- adf.loc['aggregate', ('LS2', 'Theta_0')] = 1e10
- adf.loc['aggregate', 'Incomplete'] = 0
-
- PAL.damage.load_damage_model(component_db + [adf])
-
- # load the damage process if needed
- dmg_process = None
- if config['DL']['Damage'].get('DamageProcess', False) is not False:
- dp_approach = config['DL']['Damage']['DamageProcess']
-
- if dp_approach in damage_processes:
- dmg_process = damage_processes[dp_approach]
-
- # For Hazus Earthquake, we need to specify the component ids
- if dp_approach == 'Hazus Earthquake':
- cmp_sample = PAL.asset.save_cmp_sample()
-
- cmp_list = cmp_sample.columns.unique(level=0)
-
- cmp_map = {'STR': '', 'LF': '', 'NSA': ''}
-
- for cmp in cmp_list:
- for cmp_type in cmp_map:
- if cmp_type + '.' in cmp:
- cmp_map[cmp_type] = cmp
-
- new_dmg_process = dmg_process.copy()
- for source_cmp, action in dmg_process.items():
- # first, look at the source component id
- new_source = None
- for cmp_type, cmp_id in cmp_map.items():
- if (cmp_type in source_cmp) and (cmp_id != ''):
- new_source = source_cmp.replace(cmp_type, cmp_id)
- break
-
- if new_source is not None:
- new_dmg_process[new_source] = action
- del new_dmg_process[source_cmp]
- else:
- new_source = source_cmp
-
- # then, look at the target component ids
- for ds_i, target_vals in action.items():
- if isinstance(target_vals, str):
- for cmp_type, cmp_id in cmp_map.items():
- if (cmp_type in target_vals) and (cmp_id != ''):
- target_vals = target_vals.replace(
- cmp_type, cmp_id
- )
-
- new_target_vals = target_vals
-
- else:
- # we assume that target_vals is a list of str
- new_target_vals = []
-
- for target_val in target_vals:
- for cmp_type, cmp_id in cmp_map.items():
- if (cmp_type in target_val) and (
- cmp_id != ''
- ):
- target_val = target_val.replace(
- cmp_type, cmp_id
- )
-
- new_target_vals.append(target_val)
-
- new_dmg_process[new_source][ds_i] = new_target_vals
-
- dmg_process = new_dmg_process
-
- elif dp_approach == "User Defined":
- # load the damage process from a file
- with open(
- config['DL']['Damage']['DamageProcessFilePath'],
- 'r',
- encoding='utf-8',
- ) as f:
- dmg_process = json.load(f)
-
- elif dp_approach == "None":
- # no damage process applied for the calculation
- dmg_process = None
+ with Path(output_path / filename_json).open('w', encoding='utf-8') as f:
+ json.dump(out_dict, f, indent=2)
- else:
- log_msg(
- f"Prescribed Damage Process not recognized: " f"{dp_approach}"
- )
- # calculate damages
- PAL.damage.calculate(sample_size, dmg_process=dmg_process)
+def _result_summary(
+ assessment: DLCalculationAssessment, agg_repair: pd.DataFrame | None
+) -> tuple[pd.DataFrame, pd.DataFrame]:
+ """
+ Generate a summary of the results.
- # if requested, save results
- if 'Damage' in config['DL']['Outputs']:
- damage_sample, damage_units = PAL.damage.save_sample(save_units=True)
- damage_units = damage_units.to_frame().T
+ Parameters
+ ----------
+ assessment : AssessmentBase
+ The assessment object.
+ agg_repair : pd.DataFrame
+ Aggregate repair data.
- if (
- config['DL']['Outputs']['Settings'].get(
- 'AggregateColocatedComponentResults', False
- )
- is True
- ):
- damage_units = damage_units.groupby(
- level=[0, 1, 2, 4], axis=1
- ).first()
+ Returns
+ -------
+ tuple
+ Summary DataFrame and summary statistics DataFrame.
- damage_groupby_uid = damage_sample.groupby(
- level=[0, 1, 2, 4], axis=1
- )
+ """
+ damage_sample = assessment.damage.save_sample()
+ if damage_sample is None or agg_repair is None:
+ return pd.DataFrame(), pd.DataFrame()
- damage_sample = damage_groupby_uid.sum().mask(
- damage_groupby_uid.count() == 0, np.nan
- )
+ assert isinstance(damage_sample, pd.DataFrame)
+ damage_sample = damage_sample.groupby(level=['cmp', 'ds'], axis=1).sum() # type: ignore
+ assert isinstance(damage_sample, pd.DataFrame)
+ damage_sample_s = convert_to_SimpleIndex(damage_sample, axis=1)
- out_reqs = [
- out if val else ""
- for out, val in config['DL']['Outputs']['Damage'].items()
- ]
+ if 'collapse-1' in damage_sample_s.columns:
+ damage_sample_s['collapse'] = damage_sample_s['collapse-1']
+ else:
+ damage_sample_s['collapse'] = np.zeros(damage_sample_s.shape[0])
- if np.any(
- np.isin(
- ['Sample', 'Statistics', 'GroupedSample', 'GroupedStatistics'],
- out_reqs,
- )
- ):
- if 'Sample' in out_reqs:
- damage_sample_s = pd.concat([damage_sample, damage_units])
-
- damage_sample_s = convert_to_SimpleIndex(damage_sample_s, axis=1)
- damage_sample_s.to_csv(
- output_path / "DMG_sample.zip",
- index_label=damage_sample_s.columns.name,
- compression=dict(
- method='zip', archive_name='DMG_sample.csv'
- ),
- )
- output_files.append('DMG_sample.zip')
-
- if 'Statistics' in out_reqs:
- damage_stats = describe(damage_sample)
- damage_stats = pd.concat([damage_stats, damage_units])
-
- damage_stats = convert_to_SimpleIndex(damage_stats, axis=1)
- damage_stats.to_csv(
- output_path / "DMG_stats.csv",
- index_label=damage_stats.columns.name,
- )
- output_files.append('DMG_stats.csv')
-
- if np.any(np.isin(['GroupedSample', 'GroupedStatistics'], out_reqs)):
- if (
- config['DL']['Outputs']['Settings'].get(
- 'AggregateColocatedComponentResults', False
- )
- is True
- ):
- damage_groupby = damage_sample.groupby(
- level=[0, 1, 3], axis=1
- )
-
- damage_units = damage_units.groupby(
- level=[0, 1, 3], axis=1
- ).first()
-
- else:
- damage_groupby = damage_sample.groupby(
- level=[0, 1, 4], axis=1
- )
-
- damage_units = damage_units.groupby(
- level=[0, 1, 4], axis=1
- ).first()
-
- grp_damage = damage_groupby.sum().mask(
- damage_groupby.count() == 0, np.nan
- )
-
- # if requested, condense DS output
- if (
- config['DL']['Outputs']['Settings'].get('CondenseDS', False)
- is True
- ):
- # replace non-zero values with 1
- grp_damage = grp_damage.mask(
- grp_damage.astype(np.float64).values > 0, 1
- )
-
- # get the corresponding DS for each column
- ds_list = grp_damage.columns.get_level_values(2).astype(int)
-
- # replace ones with the corresponding DS in each cell
- grp_damage = grp_damage.mul(ds_list, axis=1)
-
- # aggregate across damage state indices
- damage_groupby_2 = grp_damage.groupby(level=[0, 1], axis=1)
-
- # choose the max value
- # i.e., the governing DS for each comp-loc pair
- grp_damage = damage_groupby_2.max().mask(
- damage_groupby_2.count() == 0, np.nan
- )
-
- # aggregate units to the same format
- # assume identical units across locations for each comp
- damage_units = damage_units.groupby(
- level=[0, 1], axis=1
- ).first()
-
- else:
- # otherwise, aggregate damage quantities for each comp
- damage_groupby_2 = grp_damage.groupby(level=0, axis=1)
-
- # preserve NaNs
- grp_damage = damage_groupby_2.sum().mask(
- damage_groupby_2.count() == 0, np.nan
- )
-
- # and aggregate units to the same format
- damage_units = damage_units.groupby(level=0, axis=1).first()
-
- if 'GroupedSample' in out_reqs:
- grp_damage_s = pd.concat([grp_damage, damage_units])
-
- grp_damage_s = convert_to_SimpleIndex(grp_damage_s, axis=1)
- grp_damage_s.to_csv(
- output_path / "DMG_grp.zip",
- index_label=grp_damage_s.columns.name,
- compression=dict(
- method='zip', archive_name='DMG_grp.csv'
- ),
- )
- output_files.append('DMG_grp.zip')
-
- if 'GroupedStatistics' in out_reqs:
- grp_stats = describe(grp_damage)
- grp_stats = pd.concat([grp_stats, damage_units])
-
- grp_stats = convert_to_SimpleIndex(grp_stats, axis=1)
- grp_stats.to_csv(
- output_path / "DMG_grp_stats.csv",
- index_label=grp_stats.columns.name,
- )
- output_files.append('DMG_grp_stats.csv')
-
- # - - - - -
- # This is almost surely not needed any more
- """
- if regional == True:
-
- damage_sample = PAL.damage.save_sample()
-
- # first, get the collapse probability
- df_res_c = pd.DataFrame([0,],
- columns=pd.MultiIndex.from_tuples([('probability',' '),]),
- index=[0, ])
-
- if ("collapse", 0, 1, 1) in damage_sample.columns:
- df_res_c['probability'] = (
- damage_sample[("collapse", 0, 1, 1)].mean())
-
- else:
- df_res_c['probability'] = 0.0
-
- df_res = pd.concat([df_res_c,], axis=1, keys=['collapse',])
-
- df_res.to_csv(output_path/'DM.csv')
- output_files.append('DM.csv')
- """
- # - - - - -
-
- # Loss Assessment -----------------------------------------------------------
-
- # if a loss assessment is requested
- if 'Losses' in config['DL']:
- out_config_loss = config['DL']['Outputs'].get('Loss', {})
-
- # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- # backwards-compatibility for v3.2 and earlier | remove after v4.0
- if config['DL']['Losses'].get('BldgRepair', False):
- config['DL']['Losses']['Repair'] = config['DL']['Losses']['BldgRepair']
-
- if out_config_loss.get('BldgRepair', False):
- out_config_loss['Repair'] = out_config_loss['BldgRepair']
- # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- # if requested, calculate repair consequences
- if config['DL']['Losses'].get('Repair', False):
- repair_config = config['DL']['Losses']['Repair']
-
- # load the fragility information
- if repair_config['ConsequenceDatabase'] in default_DBs['repair'].keys():
- consequence_db = [
- 'PelicunDefault/'
- + default_DBs['repair'][repair_config['ConsequenceDatabase']],
- ]
-
- conseq_df = PAL.get_default_data(
- default_DBs['repair'][repair_config['ConsequenceDatabase']][:-4]
- )
- else:
- consequence_db = []
+ if 'irreparable-1' in damage_sample_s.columns:
+ damage_sample_s['irreparable'] = damage_sample_s['irreparable-1']
+ else:
+ damage_sample_s['irreparable'] = np.zeros(damage_sample_s.shape[0])
- conseq_df = pd.DataFrame()
+ if agg_repair is not None:
+ agg_repair_s = convert_to_SimpleIndex(agg_repair, axis=1)
- if repair_config.get('ConsequenceDatabasePath', False) is not False:
- extra_comps = repair_config['ConsequenceDatabasePath']
+ else:
+ agg_repair_s = pd.DataFrame()
- if 'CustomDLDataFolder' in extra_comps:
- extra_comps = extra_comps.replace(
- 'CustomDLDataFolder', custom_dl_file_path
- )
+ summary = pd.concat(
+ [agg_repair_s, damage_sample_s[['collapse', 'irreparable']]], axis=1
+ )
- consequence_db += [
- extra_comps,
- ]
+ summary_stats = describe(summary)
- extra_conseq_df = load_data(
- extra_comps,
- unit_conversion_factors=None,
- orientation=1,
- reindex=False,
- )
+ return summary, summary_stats
- if isinstance(conseq_df, pd.DataFrame):
- conseq_df = pd.concat([conseq_df, extra_conseq_df])
- else:
- conseq_df = extra_conseq_df
-
- consequence_db = consequence_db[::-1]
-
- # remove duplicates from conseq_df
- conseq_df = conseq_df.loc[conseq_df.index.unique(), :]
-
- # add the replacement consequence to the data
- adf = pd.DataFrame(
- columns=conseq_df.columns,
- index=pd.MultiIndex.from_tuples(
- [
- ('replacement', 'Cost'),
- ('replacement', 'Time'),
- ('replacement', 'Carbon'),
- ('replacement', 'Energy'),
- ]
- ),
- )
- # DL_method = repair_config['ConsequenceDatabase']
- DL_method = config['DL']['Damage'].get('DamageProcess', 'User Defined')
+def _parse_requested_output_file_names(output_config: dict) -> set[str]:
+ """
+ Parse the output file names from the output configuration.
- rc = ('replacement', 'Cost')
- if 'ReplacementCost' in repair_config.keys():
- rCost_config = repair_config['ReplacementCost']
+ Parameters
+ ----------
+ output_config : dict
+ Configuration for output files.
- adf.loc[rc, ('Quantity', 'Unit')] = "1 EA"
+ Returns
+ -------
+ set
+ Set of requested output file names.
- adf.loc[rc, ('DV', 'Unit')] = rCost_config["Unit"]
+ """
+ out_reqs = []
+ for out, val in output_config.items():
+ if val is True:
+ out_reqs.append(out)
+ return set(out_reqs)
+
+
+def _demand_save(
+ output_config: dict,
+ assessment: DLCalculationAssessment,
+ output_path: Path,
+ out_files: list[str],
+) -> None:
+ """
+ Save demand results to files based on the output config.
- adf.loc[rc, ('DS1', 'Theta_0')] = rCost_config["Median"]
+ Parameters
+ ----------
+ output_config : dict
+ Configuration for output files.
+ assessment : AssessmentBase
+ The assessment object.
+ output_path : Path
+ Path to the output directory.
+ out_files : list
+ List of output file names.
- if pd.isna(rCost_config.get('Distribution', np.nan)) is False:
- adf.loc[rc, ('DS1', 'Family')] = rCost_config["Distribution"]
- adf.loc[rc, ('DS1', 'Theta_1')] = rCost_config["Theta_1"]
+ """
+ out_reqs = _parse_requested_output_file_names(output_config)
- else:
- # add a default replacement cost value as a placeholder
- # the default value depends on the consequence database
+ demand_sample, demand_units_series = assessment.demand.save_sample(
+ save_units=True
+ )
+ assert isinstance(demand_sample, pd.DataFrame)
+ assert isinstance(demand_units_series, pd.Series)
+ demand_units = demand_units_series.to_frame().T
+
+ if 'Sample' in out_reqs:
+ demand_sample_s = pd.concat([demand_sample, demand_units])
+ demand_sample_s = convert_to_SimpleIndex(demand_sample_s, axis=1)
+ demand_sample_s.to_csv(
+ output_path / 'DEM_sample.zip',
+ index_label=demand_sample_s.columns.name,
+ compression={'method': 'zip', 'archive_name': 'DEM_sample.csv'},
+ )
+ out_files.append('DEM_sample.zip')
+
+ if 'Statistics' in out_reqs:
+ demand_stats = describe(demand_sample)
+ demand_stats = pd.concat([demand_stats, demand_units])
+ demand_stats = convert_to_SimpleIndex(demand_stats, axis=1)
+ demand_stats.to_csv(
+ output_path / 'DEM_stats.csv',
+ index_label=demand_stats.columns.name,
+ )
+ out_files.append('DEM_stats.csv')
- # for FEMA P-58, use 0 USD
- if DL_method == 'FEMA P-58':
- adf.loc[rc, ('Quantity', 'Unit')] = '1 EA'
- adf.loc[rc, ('DV', 'Unit')] = 'USD_2011'
- adf.loc[rc, ('DS1', 'Theta_0')] = 0
- # for Hazus EQ and HU, use 1.0 as a loss_ratio
- elif DL_method in ['Hazus Earthquake', 'Hazus Hurricane']:
- adf.loc[rc, ('Quantity', 'Unit')] = '1 EA'
- adf.loc[rc, ('DV', 'Unit')] = 'loss_ratio'
+def _asset_save(
+ output_config: dict,
+ assessment: DLCalculationAssessment,
+ output_path: Path,
+ out_files: list[str],
+ *,
+ aggregate_colocated: bool = False,
+) -> None:
+ """
+ Save asset results to files based on the output config.
- # store the replacement cost that corresponds to total loss
- adf.loc[rc, ('DS1', 'Theta_0')] = 100.0
+ Parameters
+ ----------
+ output_config : dict
+ Configuration for output files.
+ assessment : AssessmentBase
+ The assessment object.
+ output_path : Path
+ Path to the output directory.
+ out_files : list
+ List of output file names.
+ aggregate_colocated : bool, optional
+ Whether to aggregate colocated components. Default is False.
- # otherwise, use 1 (and expect to have it defined by the user)
- else:
- adf.loc[rc, ('Quantity', 'Unit')] = '1 EA'
- adf.loc[rc, ('DV', 'Unit')] = 'loss_ratio'
- adf.loc[rc, ('DS1', 'Theta_0')] = 1
+ """
+ output = assessment.asset.save_cmp_sample(save_units=True)
+ assert isinstance(output, tuple)
+ cmp_sample, cmp_units_series = output
+ cmp_units = cmp_units_series.to_frame().T
+
+ if aggregate_colocated:
+ cmp_units = cmp_units.groupby(level=['cmp', 'loc', 'dir'], axis=1).first() # type: ignore
+ cmp_groupby_uid = cmp_sample.groupby(level=['cmp', 'loc', 'dir'], axis=1) # type: ignore
+ cmp_sample = cmp_groupby_uid.sum().mask(cmp_groupby_uid.count() == 0, np.nan)
+
+ out_reqs = _parse_requested_output_file_names(output_config)
+
+ if 'Sample' in out_reqs:
+ cmp_sample_s = pd.concat([cmp_sample, cmp_units])
+
+ cmp_sample_s = convert_to_SimpleIndex(cmp_sample_s, axis=1)
+ cmp_sample_s.to_csv(
+ output_path / 'CMP_sample.zip',
+ index_label=cmp_sample_s.columns.name,
+ compression={'method': 'zip', 'archive_name': 'CMP_sample.csv'},
+ )
+ out_files.append('CMP_sample.zip')
- rt = ('replacement', 'Time')
- if 'ReplacementTime' in repair_config.keys():
- rTime_config = repair_config['ReplacementTime']
- rt = ('replacement', 'Time')
+ if 'Statistics' in out_reqs:
+ cmp_stats = describe(cmp_sample)
+ cmp_stats = pd.concat([cmp_stats, cmp_units])
- adf.loc[rt, ('Quantity', 'Unit')] = "1 EA"
+ cmp_stats = convert_to_SimpleIndex(cmp_stats, axis=1)
+ cmp_stats.to_csv(
+ output_path / 'CMP_stats.csv', index_label=cmp_stats.columns.name
+ )
+ out_files.append('CMP_stats.csv')
+
+
+def _damage_save(
+ output_config: dict,
+ assessment: DLCalculationAssessment,
+ output_path: Path,
+ out_files: list[str],
+ *,
+ aggregate_colocated: bool = False,
+ condense_ds: bool = False,
+) -> None:
+ """
+ Save damage results to files based on the output config.
- adf.loc[rt, ('DV', 'Unit')] = rTime_config["Unit"]
+ Parameters
+ ----------
+ output_config : dict
+ Configuration for output files.
+ assessment : AssessmentBase
+ The assessment object.
+ output_path : Path
+ Path to the output directory.
+ out_files : list
+ List of output file names.
+ aggregate_colocated : bool, optional
+ Whether to aggregate colocated components. Default is False.
+ condense_ds : bool, optional
+ Whether to condense damage states. Default is False.
- adf.loc[rt, ('DS1', 'Theta_0')] = rTime_config["Median"]
+ """
+ output = assessment.damage.save_sample(save_units=True)
+ assert isinstance(output, tuple)
+ damage_sample, damage_units_series = output
+ damage_units = damage_units_series.to_frame().T
+
+ if aggregate_colocated:
+ damage_units = damage_units.groupby( # type: ignore
+ level=['cmp', 'loc', 'dir', 'ds'], axis=1
+ ).first()
+ damage_groupby_uid = damage_sample.groupby( # type: ignore
+ level=['cmp', 'loc', 'dir', 'ds'], axis=1
+ )
+ damage_sample = damage_groupby_uid.sum().mask(
+ damage_groupby_uid.count() == 0, np.nan
+ )
- if pd.isna(rTime_config.get('Distribution', np.nan)) is False:
- adf.loc[rt, ('DS1', 'Family')] = rTime_config["Distribution"]
- adf.loc[rt, ('DS1', 'Theta_1')] = rTime_config["Theta_1"]
- else:
- # add a default replacement time value as a placeholder
- # the default value depends on the consequence database
-
- # for FEMA P-58, use 0 worker_days
- if DL_method == 'FEMA P-58':
- adf.loc[rt, ('Quantity', 'Unit')] = '1 EA'
- adf.loc[rt, ('DV', 'Unit')] = 'worker_day'
- adf.loc[rt, ('DS1', 'Theta_0')] = 0
-
- # for Hazus EQ, use 1.0 as a loss_ratio
- elif DL_method == 'Hazus Earthquake - Buildings':
- adf.loc[rt, ('Quantity', 'Unit')] = '1 EA'
- adf.loc[rt, ('DV', 'Unit')] = 'day'
-
- # load the replacement time that corresponds to total loss
- occ_type = config['DL']['Asset']['OccupancyType']
- adf.loc[rt, ('DS1', 'Theta_0')] = conseq_df.loc[
- (f"STR.{occ_type}", 'Time'), ('DS5', 'Theta_0')
- ]
-
- # otherwise, use 1 (and expect to have it defined by the user)
- else:
- adf.loc[rt, ('Quantity', 'Unit')] = '1 EA'
- adf.loc[rt, ('DV', 'Unit')] = 'loss_ratio'
- adf.loc[rt, ('DS1', 'Theta_0')] = 1
-
- rcarb = ('replacement', 'Carbon')
- if 'ReplacementCarbon' in repair_config.keys():
- rCarbon_config = repair_config['ReplacementCarbon']
- rcarb = ('replacement', 'Carbon')
-
- adf.loc[rcarb, ('Quantity', 'Unit')] = "1 EA"
-
- adf.loc[rcarb, ('DV', 'Unit')] = rCarbon_config["Unit"]
-
- adf.loc[rcarb, ('DS1', 'Theta_0')] = rCarbon_config["Median"]
-
- if pd.isna(rCarbon_config.get('Distribution', np.nan)) is False:
- adf.loc[rcarb, ('DS1', 'Family')] = rCarbon_config[
- "Distribution"
- ]
- adf.loc[rcarb, ('DS1', 'Theta_1')] = rCarbon_config["Theta_1"]
- else:
- # add a default replacement carbon value as a placeholder
- # the default value depends on the consequence database
+ out_reqs = _parse_requested_output_file_names(output_config)
- # for FEMA P-58, use 0 kg
- if DL_method == 'FEMA P-58':
- adf.loc[rcarb, ('Quantity', 'Unit')] = '1 EA'
- adf.loc[rcarb, ('DV', 'Unit')] = 'kg'
- adf.loc[rcarb, ('DS1', 'Theta_0')] = 0
+ if 'Sample' in out_reqs:
+ damage_sample_s = pd.concat([damage_sample, damage_units])
- else:
- # for everything else, remove this consequence
- adf.drop(rcarb, inplace=True)
+ damage_sample_s = convert_to_SimpleIndex(damage_sample_s, axis=1)
+ damage_sample_s.to_csv(
+ output_path / 'DMG_sample.zip',
+ index_label=damage_sample_s.columns.name,
+ compression={
+ 'method': 'zip',
+ 'archive_name': 'DMG_sample.csv',
+ },
+ )
+ out_files.append('DMG_sample.zip')
- ren = ('replacement', 'Energy')
- if 'ReplacementEnergy' in repair_config.keys():
- rEnergy_config = repair_config['ReplacementEnergy']
- ren = ('replacement', 'Energy')
+ if 'Statistics' in out_reqs:
+ damage_stats = describe(damage_sample)
+ damage_stats = pd.concat([damage_stats, damage_units])
- adf.loc[ren, ('Quantity', 'Unit')] = "1 EA"
+ damage_stats = convert_to_SimpleIndex(damage_stats, axis=1)
+ damage_stats.to_csv(
+ output_path / 'DMG_stats.csv',
+ index_label=damage_stats.columns.name,
+ )
+ out_files.append('DMG_stats.csv')
- adf.loc[ren, ('DV', 'Unit')] = rEnergy_config["Unit"]
+ if out_reqs.intersection({'GroupedSample', 'GroupedStatistics'}):
+ damage_groupby = damage_sample.groupby(level=['cmp', 'loc', 'ds'], axis=1) # type: ignore
+ damage_units = damage_units.groupby(
+ level=['cmp', 'loc', 'ds'], axis=1
+ ).first() # type: ignore
- adf.loc[ren, ('DS1', 'Theta_0')] = rEnergy_config["Median"]
+ grp_damage = damage_groupby.sum().mask(damage_groupby.count() == 0, np.nan)
- if pd.isna(rEnergy_config.get('Distribution', np.nan)) is False:
- adf.loc[ren, ('DS1', 'Family')] = rEnergy_config["Distribution"]
- adf.loc[ren, ('DS1', 'Theta_1')] = rEnergy_config["Theta_1"]
- else:
- # add a default replacement energy value as a placeholder
- # the default value depends on the consequence database
-
- # for FEMA P-58, use 0 kg
- if DL_method == 'FEMA P-58':
- adf.loc[ren, ('Quantity', 'Unit')] = '1 EA'
- adf.loc[ren, ('DV', 'Unit')] = 'MJ'
- adf.loc[ren, ('DS1', 'Theta_0')] = 0
-
- else:
- # for everything else, remove this consequence
- adf.drop(ren, inplace=True)
-
- # prepare the loss map
- loss_map = None
- if repair_config['MapApproach'] == "Automatic":
- # get the damage sample
- dmg_sample = PAL.damage.save_sample()
-
- # create a mapping for all components that are also in
- # the prescribed consequence database
- dmg_cmps = dmg_sample.columns.unique(level='cmp')
- loss_cmps = conseq_df.index.unique(level=0)
-
- drivers = []
- loss_models = []
-
- if DL_method in ['FEMA P-58', 'Hazus Hurricane']:
- # with these methods, we assume fragility and consequence data
- # have the same IDs
-
- for dmg_cmp in dmg_cmps:
- if dmg_cmp == 'collapse':
- continue
-
- if dmg_cmp in loss_cmps:
- drivers.append(f'DMG-{dmg_cmp}')
- loss_models.append(dmg_cmp)
-
- elif DL_method in [
- 'Hazus Earthquake',
- 'Hazus Earthquake Transportation',
- ]:
- # with Hazus Earthquake we assume that consequence
- # archetypes are only differentiated by occupancy type
- occ_type = config['DL']['Asset'].get('OccupancyType', None)
-
- for dmg_cmp in dmg_cmps:
- if dmg_cmp == 'collapse':
- continue
-
- cmp_class = dmg_cmp.split('.')[0]
- if occ_type is not None:
- loss_cmp = f'{cmp_class}.{occ_type}'
- else:
- loss_cmp = cmp_class
-
- if loss_cmp in loss_cmps:
- drivers.append(f'DMG-{dmg_cmp}')
- loss_models.append(loss_cmp)
-
- loss_map = pd.DataFrame(
- loss_models, columns=['Repair'], index=drivers
- )
+ # if requested, condense DS output
+ if condense_ds:
+ # replace non-zero values with 1
+ grp_damage = grp_damage.mask(
+ grp_damage.astype(np.float64).to_numpy() > 0, 1
+ )
- elif repair_config['MapApproach'] == "User Defined":
- if repair_config.get('MapFilePath', False) is not False:
- loss_map_path = repair_config['MapFilePath']
+ # get the corresponding DS for each column
+ ds_list = grp_damage.columns.get_level_values('ds').astype(int)
- loss_map_path = loss_map_path.replace(
- 'CustomDLDataFolder', custom_dl_file_path
- )
+ # replace ones with the corresponding DS in each cell
+ grp_damage = grp_damage.mul(ds_list, axis=1)
- else:
- print("User defined loss map path missing. Terminating analysis")
- return -1
+ # aggregate across damage state indices
+ damage_groupby_2 = grp_damage.groupby(level=['cmp', 'loc'], axis=1)
- loss_map = pd.read_csv(loss_map_path, index_col=0)
+ # choose the max value
+ # i.e., the governing DS for each comp-loc pair
+ grp_damage = damage_groupby_2.max().mask(
+ damage_groupby_2.count() == 0, np.nan
+ )
- # prepare additional loss map entries, if needed
- if 'DMG-collapse' not in loss_map.index:
- loss_map.loc['DMG-collapse', 'Repair'] = 'replacement'
- loss_map.loc['DMG-irreparable', 'Repair'] = 'replacement'
+ # aggregate units to the same format
+ # assume identical units across locations for each comp
+ damage_units = damage_units.groupby(level=['cmp', 'loc'], axis=1).first() # type: ignore
- # assemble the list of requested decision variables
- DV_list = []
- if repair_config.get('DecisionVariables', False) is not False:
- for DV_i, DV_status in repair_config['DecisionVariables'].items():
- if DV_status is True:
- DV_list.append(DV_i)
+ else:
+ # otherwise, aggregate damage quantities for each comp
+ damage_groupby_2 = grp_damage.groupby(level='cmp', axis=1)
- else:
- DV_list = None
-
- PAL.repair.load_model(
- consequence_db
- + [
- adf,
- ],
- loss_map,
- decision_variables=DV_list,
+ # preserve NaNs
+ grp_damage = damage_groupby_2.sum().mask(
+ damage_groupby_2.count() == 0, np.nan
)
- PAL.repair.calculate(sample_size)
-
- agg_repair = PAL.repair.aggregate_losses()
-
- # if requested, save results
- if out_config_loss.get('Repair', False):
- repair_sample, repair_units = PAL.repair.save_sample(save_units=True)
- repair_units = repair_units.to_frame().T
-
- if (
- config['DL']['Outputs']['Settings'].get(
- 'AggregateColocatedComponentResults', False
- )
- is True
- ):
- repair_units = repair_units.groupby(
- level=[0, 1, 2, 3, 4, 5], axis=1
- ).first()
-
- repair_groupby_uid = repair_sample.groupby(
- level=[0, 1, 2, 3, 4, 5], axis=1
- )
-
- repair_sample = repair_groupby_uid.sum().mask(
- repair_groupby_uid.count() == 0, np.nan
- )
-
- out_reqs = [
- out if val else ""
- for out, val in out_config_loss['Repair'].items()
- ]
-
- if np.any(
- np.isin(
- [
- 'Sample',
- 'Statistics',
- 'GroupedSample',
- 'GroupedStatistics',
- 'AggregateSample',
- 'AggregateStatistics',
- ],
- out_reqs,
- )
- ):
- if 'Sample' in out_reqs:
- repair_sample_s = repair_sample.copy()
- repair_sample_s = pd.concat([repair_sample_s, repair_units])
-
- repair_sample_s = convert_to_SimpleIndex(
- repair_sample_s, axis=1
- )
- repair_sample_s.to_csv(
- output_path / "DV_repair_sample.zip",
- index_label=repair_sample_s.columns.name,
- compression=dict(
- method='zip',
- archive_name='DV_repair_sample.csv',
- ),
- )
- output_files.append('DV_repair_sample.zip')
-
- if 'Statistics' in out_reqs:
- repair_stats = describe(repair_sample)
- repair_stats = pd.concat([repair_stats, repair_units])
-
- repair_stats = convert_to_SimpleIndex(repair_stats, axis=1)
- repair_stats.to_csv(
- output_path / "DV_repair_stats.csv",
- index_label=repair_stats.columns.name,
- )
- output_files.append('DV_repair_stats.csv')
-
- if np.any(
- np.isin(['GroupedSample', 'GroupedStatistics'], out_reqs)
- ):
- repair_groupby = repair_sample.groupby(
- level=[0, 1, 2], axis=1
- )
-
- repair_units = repair_units.groupby(
- level=[0, 1, 2], axis=1
- ).first()
-
- grp_repair = repair_groupby.sum().mask(
- repair_groupby.count() == 0, np.nan
- )
-
- if 'GroupedSample' in out_reqs:
- grp_repair_s = pd.concat([grp_repair, repair_units])
-
- grp_repair_s = convert_to_SimpleIndex(
- grp_repair_s, axis=1
- )
- grp_repair_s.to_csv(
- output_path / "DV_repair_grp.zip",
- index_label=grp_repair_s.columns.name,
- compression=dict(
- method='zip',
- archive_name='DV_repair_grp.csv',
- ),
- )
- output_files.append('DV_repair_grp.zip')
-
- if 'GroupedStatistics' in out_reqs:
- grp_stats = describe(grp_repair)
- grp_stats = pd.concat([grp_stats, repair_units])
-
- grp_stats = convert_to_SimpleIndex(grp_stats, axis=1)
- grp_stats.to_csv(
- output_path / "DV_repair_grp_stats.csv",
- index_label=grp_stats.columns.name,
- )
- output_files.append('DV_repair_grp_stats.csv')
-
- if np.any(
- np.isin(['AggregateSample', 'AggregateStatistics'], out_reqs)
- ):
- if 'AggregateSample' in out_reqs:
- agg_repair_s = convert_to_SimpleIndex(agg_repair, axis=1)
- agg_repair_s.to_csv(
- output_path / "DV_repair_agg.zip",
- index_label=agg_repair_s.columns.name,
- compression=dict(
- method='zip',
- archive_name='DV_repair_agg.csv',
- ),
- )
- output_files.append('DV_repair_agg.zip')
-
- if 'AggregateStatistics' in out_reqs:
- agg_stats = convert_to_SimpleIndex(
- describe(agg_repair), axis=1
- )
- agg_stats.to_csv(
- output_path / "DV_repair_agg_stats.csv",
- index_label=agg_stats.columns.name,
- )
- output_files.append('DV_repair_agg_stats.csv')
-
- # Result Summary -----------------------------------------------------------
-
- if 'damage_sample' not in locals():
- damage_sample = PAL.damage.save_sample()
-
- damage_sample = damage_sample.groupby(level=[0, 3], axis=1).sum()
- damage_sample_s = convert_to_SimpleIndex(damage_sample, axis=1)
-
- if 'collapse-1' in damage_sample_s.columns:
- damage_sample_s['collapse'] = damage_sample_s['collapse-1']
- else:
- damage_sample_s['collapse'] = np.zeros(damage_sample_s.shape[0])
+ # and aggregate units to the same format
+ damage_units = damage_units.groupby(level='cmp', axis=1).first() # type: ignore
- if 'irreparable-1' in damage_sample_s.columns:
- damage_sample_s['irreparable'] = damage_sample_s['irreparable-1']
- else:
- damage_sample_s['irreparable'] = np.zeros(damage_sample_s.shape[0])
+ if 'GroupedSample' in out_reqs:
+ grp_damage_s = pd.concat([grp_damage, damage_units])
- if 'Losses' in config['DL']:
- if 'agg_repair' not in locals():
- agg_repair = PAL.repair.aggregate_losses()
+ grp_damage_s = convert_to_SimpleIndex(grp_damage_s, axis=1)
+ grp_damage_s.to_csv(
+ output_path / 'DMG_grp.zip',
+ index_label=grp_damage_s.columns.name,
+ compression={
+ 'method': 'zip',
+ 'archive_name': 'DMG_grp.csv',
+ },
+ )
+ out_files.append('DMG_grp.zip')
- agg_repair_s = convert_to_SimpleIndex(agg_repair, axis=1)
+ if 'GroupedStatistics' in out_reqs:
+ grp_stats = describe(grp_damage)
+ grp_stats = pd.concat([grp_stats, damage_units])
- else:
- agg_repair_s = pd.DataFrame()
+ grp_stats = convert_to_SimpleIndex(grp_stats, axis=1)
+ grp_stats.to_csv(
+ output_path / 'DMG_grp_stats.csv',
+ index_label=grp_stats.columns.name,
+ )
+ out_files.append('DMG_grp_stats.csv')
+
+
+def _loss_save(
+ output_config: dict,
+ assessment: DLCalculationAssessment,
+ output_path: Path,
+ out_files: list[str],
+ agg_repair: pd.DataFrame,
+ *,
+ aggregate_colocated: bool = False,
+) -> None:
+ """
+ Save loss results to files based on the output config.
- summary = pd.concat(
- [agg_repair_s, damage_sample_s[['collapse', 'irreparable']]], axis=1
- )
+ Parameters
+ ----------
+ output_config : dict
+ Configuration for output files.
+ assessment : AssessmentBase
+ The assessment object.
+ output_path : Path
+ Path to the output directory.
+ out_files : list
+ List of output file names.
+ agg_repair : pd.DataFrame
+ Aggregate repair data.
+ aggregate_colocated : bool, optional
+ Whether to aggregate colocated components. Default is False.
- summary_stats = describe(summary)
+ """
+ out = assessment.loss.ds_model.save_sample(save_units=True)
+ assert isinstance(out, tuple)
+ repair_sample, repair_units_series = out
+ repair_units = repair_units_series.to_frame().T
+
+ if aggregate_colocated:
+ repair_units = repair_units.groupby( # type: ignore
+ level=['dv', 'loss', 'dmg', 'ds', 'loc', 'dir'], axis=1
+ ).first()
+ repair_groupby_uid = repair_sample.groupby( # type: ignore
+ level=['dv', 'loss', 'dmg', 'ds', 'loc', 'dir'], axis=1
+ )
+ repair_sample = repair_groupby_uid.sum().mask(
+ repair_groupby_uid.count() == 0, np.nan
+ )
- # save summary sample
- summary.to_csv(output_path / "DL_summary.csv", index_label='#')
- output_files.append('DL_summary.csv')
+ out_reqs = _parse_requested_output_file_names(output_config)
- # save summary statistics
- summary_stats.to_csv(output_path / "DL_summary_stats.csv")
- output_files.append('DL_summary_stats.csv')
+ if 'Sample' in out_reqs:
+ repair_sample_s = repair_sample.copy()
+ repair_sample_s = pd.concat([repair_sample_s, repair_units])
- # create json outputs if needed
- if config['DL']['Outputs']['Format']['JSON'] is True:
- for filename in output_files:
- filename_json = filename[:-3] + 'json'
+ repair_sample_s = convert_to_SimpleIndex(repair_sample_s, axis=1)
+ repair_sample_s.to_csv(
+ output_path / 'DV_repair_sample.zip',
+ index_label=repair_sample_s.columns.name,
+ compression={
+ 'method': 'zip',
+ 'archive_name': 'DV_repair_sample.csv',
+ },
+ )
+ out_files.append('DV_repair_sample.zip')
- if (
- config['DL']['Outputs']['Settings'].get('SimpleIndexInJSON', False)
- is True
- ):
- df = pd.read_csv(output_path / filename, index_col=0)
- else:
- df = convert_to_MultiIndex(
- pd.read_csv(output_path / filename, index_col=0), axis=1
- )
+ if 'Statistics' in out_reqs:
+ repair_stats = describe(repair_sample)
+ repair_stats = pd.concat([repair_stats, repair_units])
- if "Units" in df.index:
- df_units = convert_to_SimpleIndex(
- df.loc['Units', :].to_frame().T, axis=1
- )
+ repair_stats = convert_to_SimpleIndex(repair_stats, axis=1)
+ repair_stats.to_csv(
+ output_path / 'DV_repair_stats.csv',
+ index_label=repair_stats.columns.name,
+ )
+ out_files.append('DV_repair_stats.csv')
+
+ if out_reqs.intersection({'GroupedSample', 'GroupedStatistics'}):
+ repair_groupby = repair_sample.groupby(level=['dv', 'loss', 'dmg'], axis=1) # type: ignore
+ repair_units = repair_units.groupby( # type: ignore
+ level=['dv', 'loss', 'dmg'], axis=1
+ ).first()
+ grp_repair = repair_groupby.sum().mask(repair_groupby.count() == 0, np.nan)
+
+ if 'GroupedSample' in out_reqs:
+ grp_repair_s = pd.concat([grp_repair, repair_units])
+
+ grp_repair_s = convert_to_SimpleIndex(grp_repair_s, axis=1)
+ grp_repair_s.to_csv(
+ output_path / 'DV_repair_grp.zip',
+ index_label=grp_repair_s.columns.name,
+ compression={
+ 'method': 'zip',
+ 'archive_name': 'DV_repair_grp.csv',
+ },
+ )
+ out_files.append('DV_repair_grp.zip')
- df.drop("Units", axis=0, inplace=True)
+ if 'GroupedStatistics' in out_reqs:
+ grp_stats = describe(grp_repair)
+ grp_stats = pd.concat([grp_stats, repair_units])
- out_dict = convert_df_to_dict(df)
+ grp_stats = convert_to_SimpleIndex(grp_stats, axis=1)
+ grp_stats.to_csv(
+ output_path / 'DV_repair_grp_stats.csv',
+ index_label=grp_stats.columns.name,
+ )
+ out_files.append('DV_repair_grp_stats.csv')
+
+ if out_reqs.intersection({'AggregateSample', 'AggregateStatistics'}):
+ if 'AggregateSample' in out_reqs:
+ agg_repair_s = convert_to_SimpleIndex(agg_repair, axis=1)
+ agg_repair_s.to_csv(
+ output_path / 'DV_repair_agg.zip',
+ index_label=agg_repair_s.columns.name,
+ compression={
+ 'method': 'zip',
+ 'archive_name': 'DV_repair_agg.csv',
+ },
+ )
+ out_files.append('DV_repair_agg.zip')
- out_dict.update(
- {
- "Units": {
- col: df_units.loc["Units", col]
- for col in df_units.columns
- }
- }
- )
+ if 'AggregateStatistics' in out_reqs:
+ agg_stats = convert_to_SimpleIndex(describe(agg_repair), axis=1)
+ agg_stats.to_csv(
+ output_path / 'DV_repair_agg_stats.csv',
+ index_label=agg_stats.columns.name,
+ )
+ out_files.append('DV_repair_agg_stats.csv')
- else:
- out_dict = convert_df_to_dict(df)
- with open(output_path / filename_json, 'w') as f:
- json.dump(out_dict, f, indent=2)
+def _remove_existing_files(output_path: Path, known_output_files: list[str]) -> None:
+ """
+ Remove known existing files from the specified output path.
- # remove csv outputs if they were not requested
- if config['DL']['Outputs']['Format']['CSV'] is False:
- for filename in output_files:
- # keep the DL_summary and DL_summary_stats files
- if 'DL_summary' in filename:
- continue
+ This function initializes the output folder by removing files that
+ already exist in the `known_output_files` list.
- os.remove(output_path / filename)
+ Parameters
+ ----------
+ output_path : Path
+ The path to the output folder where files are located.
+ known_output_files : list of str
+ A list of filenames that are expected to exist and should be
+ removed from the output folder.
+
+ Raises
+ ------
+ OSError
+ If an error occurs while attempting to remove a file, an
+ OSError will be raised with the specific details of the
+ failure.
- return 0
+ """
+ # Initialize the output folder - i.e., remove existing output files from
+ # there
+ files = os.listdir(output_path)
+ for filename in files:
+ if filename in known_output_files:
+ try:
+ (output_path / filename).unlink()
+ except OSError as exc:
+ msg = (
+ f'Error occurred while removing '
+ f'`{output_path / filename}`: {exc}'
+ )
+ raise OSError(msg) from exc
-def main():
- args = sys.argv[1:]
+def main() -> None:
+ """Parse arguments and run the pelicun calculation."""
+ args_list = sys.argv[1:]
parser = argparse.ArgumentParser()
- parser.add_argument('-c', '--filenameDL')
- parser.add_argument('-d', '--demandFile', default=None)
- parser.add_argument('-s', '--Realizations', default=None)
- parser.add_argument('--dirnameOutput', default=None)
- parser.add_argument('--event_time', default=None)
parser.add_argument(
- '--detailed_results', default=True, type=str2bool, nargs='?', const=True
+ '-c',
+ '--filenameDL',
+ help='Path to the damage and loss (DL) configuration file.',
)
parser.add_argument(
- '--coupled_EDP', default=False, type=str2bool, nargs='?', const=False
+ '-d',
+ '--demandFile',
+ default=None,
+ help='Path to the file containing demand data.',
)
parser.add_argument(
- '--log_file', default=True, type=str2bool, nargs='?', const=True
+ '-s',
+ '--Realizations',
+ default=None,
+ help='Number of realizations to run in the probabilistic model.',
)
parser.add_argument(
- '--ground_failure', default=False, type=str2bool, nargs='?', const=False
+ '--dirnameOutput',
+ default=None,
+ help='Directory where output files will be stored.',
)
- parser.add_argument('--auto_script', default=None)
- parser.add_argument('--resource_dir', default=None)
- parser.add_argument('--custom_model_dir', default=None)
parser.add_argument(
- '--regional', default=False, type=str2bool, nargs='?', const=False
+ '--detailed_results',
+ default=True,
+ type=str2bool,
+ nargs='?',
+ const=True,
+ help='Generate detailed results (True/False). Defaults to True.',
)
- parser.add_argument('--output_format', default=None)
- # parser.add_argument('-d', '--demandFile', default=None)
- # parser.add_argument('--DL_Method', default = None)
- # parser.add_argument('--outputBIM', default='BIM.csv')
- # parser.add_argument('--outputEDP', default='EDP.csv')
- # parser.add_argument('--outputDM', default='DM.csv')
- # parser.add_argument('--outputDV', default='DV.csv')
-
- if not args:
- print(f'Welcome. This is pelicun version {pelicun.__version__}')
- print(
- 'To access the documentation visit '
- 'https://nheri-simcenter.github.io/pelicun/index.html'
- )
- print()
+ parser.add_argument(
+ '--coupled_EDP',
+ default=False,
+ type=str2bool,
+ nargs='?',
+ const=False,
+ help=(
+ 'Consider coupled Engineering Demand Parameters (EDPs) '
+ 'in calculations (True/False). Defaults to False.'
+ ),
+ )
+ parser.add_argument(
+ '--log_file',
+ default=True,
+ type=str2bool,
+ nargs='?',
+ const=True,
+ help='Generate a log file (True/False). Defaults to True.',
+ )
+ parser.add_argument(
+ '--auto_script',
+ default=None,
+ help='Optional path to a config auto-generation script.',
+ )
+ parser.add_argument(
+ '--custom_model_dir',
+ default=None,
+ help='Directory containing custom model data.',
+ )
+ parser.add_argument(
+ '--output_format',
+ default=None,
+ help='Desired output format for the results.',
+ )
+ # TODO(JVM): fix color warnings
+ # parser.add_argument(
+ # '--color_warnings',
+ # default=False,
+ # type=str2bool,
+ # nargs='?',
+ # const=False,
+ # help=(
+ # 'Enable colored warnings in the console '
+ # 'output (True/False). Defaults to False.'
+ # ),
+ # )
+ parser.add_argument(
+ '--ground_failure',
+ default=False,
+ type=str2bool,
+ nargs='?',
+ const=False,
+ help='Currently not used. Soon to be deprecated.',
+ )
+ parser.add_argument(
+ '--regional',
+ default=False,
+ type=str2bool,
+ nargs='?',
+ const=False,
+ help='Currently not used. Soon to be deprecated.',
+ )
+ parser.add_argument('--resource_dir', default=None)
+
+ if not args_list:
parser.print_help()
return
- args = parser.parse_args(args)
+ args = parser.parse_args(args_list)
- log_msg('Initializing pelicun calculation...')
+ log_msg('Initializing pelicun calculation.')
- # print(args)
- out = run_pelicun(
- args.filenameDL,
+ run_pelicun(
+ config_path=args.filenameDL,
demand_file=args.demandFile,
output_path=args.dirnameOutput,
realizations=args.Realizations,
- detailed_results=args.detailed_results,
- coupled_EDP=args.coupled_EDP,
- log_file=args.log_file,
- event_time=args.event_time,
- ground_failure=args.ground_failure,
auto_script_path=args.auto_script,
- resource_dir=args.resource_dir,
custom_model_dir=args.custom_model_dir,
- regional=args.regional,
output_format=args.output_format,
+ detailed_results=args.detailed_results,
+ coupled_edp=args.coupled_EDP,
)
- if out == -1:
- log_msg("pelicun calculation failed.")
- else:
- log_msg('pelicun calculation completed.')
+ log_msg('pelicun calculation completed.')
if __name__ == '__main__':
diff --git a/pelicun/tools/HDF_to_CSV.py b/pelicun/tools/HDF_to_CSV.py
index cb9e04acc..3cd27bddc 100644
--- a/pelicun/tools/HDF_to_CSV.py
+++ b/pelicun/tools/HDF_to_CSV.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
@@ -33,26 +32,26 @@
#
# You should have received a copy of the BSD 3-Clause License along with
# pelicun. If not, see .
-#
-# Contributors:
-# Adam Zsarnóczay
-import pandas as pd
-import sys
+from __future__ import annotations
+
import argparse
+import sys
from pathlib import Path
+import pandas as pd
+
-def convert_HDF(HDF_path):
- HDF_ext = HDF_path.split('.')[-1]
- CSV_base = HDF_path[: -len(HDF_ext) - 1]
+def convert_HDF(hdf_path) -> None: # noqa: N802
+ hdf_ext = hdf_path.split('.')[-1]
+ csv_base = hdf_path[: -len(hdf_ext) - 1]
- HDF_path = Path(HDF_path).resolve()
+ hdf_path = Path(hdf_path).resolve()
- store = pd.HDFStore(HDF_path)
+ store = pd.HDFStore(hdf_path)
- for key in store.keys():
- store[key].to_csv(f'{CSV_base}_{key[1:].replace("/","_")}.csv')
+ for key in store:
+ store[key].to_csv(f'{csv_base}_{key[1:].replace("/", "_")}.csv')
store.close()
@@ -63,6 +62,6 @@ def convert_HDF(HDF_path):
parser = argparse.ArgumentParser()
parser.add_argument('HDF_path')
- args = parser.parse_args(args)
+ parser_args = parser.parse_args(args)
- convert_HDF(args.HDF_path)
+ convert_HDF(parser_args.HDF_path)
diff --git a/pelicun/tools/__init__.py b/pelicun/tools/__init__.py
new file mode 100644
index 000000000..cf08aa216
--- /dev/null
+++ b/pelicun/tools/__init__.py
@@ -0,0 +1,34 @@
+# noqa: D104
+# Copyright (c) 2018 Leland Stanford Junior University
+# Copyright (c) 2018 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
diff --git a/pelicun/uq.py b/pelicun/uq.py
index 8e74952d5..56933dac2 100644
--- a/pelicun/uq.py
+++ b/pelicun/uq.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
@@ -33,42 +32,39 @@
#
# You should have received a copy of the BSD 3-Clause License along with
# pelicun. If not, see .
-#
-# Contributors:
-# Adam Zsarnóczay
-# John Vouvakis Manousakis
-
-"""
-This module defines constants, classes and methods for uncertainty
-quantification in pelicun.
-.. rubric:: Contents
+"""Constants, classes and methods for uncertainty quantification."""
-.. autosummary::
+from __future__ import annotations
- scale_distribution
- mvn_orthotope_density
- fit_distribution_to_sample
- fit_distribution_to_percentiles
+from abc import ABC, abstractmethod
+from typing import TYPE_CHECKING
- RandomVariable
- RandomVariableSet
- RandomVariableRegistry
+import colorama
+import numpy as np
+import pandas as pd
+from scipy.linalg import cholesky, svd # type: ignore
+from scipy.optimize import minimize # type: ignore
+from scipy.stats import multivariate_normal as mvn # type: ignore
+from scipy.stats import norm, uniform, weibull_min # type: ignore
+from scipy.stats._mvn import (
+ mvndst, # type: ignore # noqa: PLC2701
+)
+if TYPE_CHECKING:
+ from collections.abc import Callable
-"""
+ from pelicun.base import Logger
-from abc import ABC, abstractmethod
-from scipy.stats import uniform, norm
-from scipy.stats import multivariate_normal as mvn
-from scipy.stats._mvn import mvndst # pylint: disable=no-name-in-module
-from scipy.linalg import cholesky, svd
-from scipy.optimize import minimize
-import numpy as np
-import pandas as pd
+colorama.init()
-def scale_distribution(scale_factor, family, theta, truncation_limits=None):
+def scale_distribution(
+ scale_factor: float,
+ family: str,
+ theta: np.ndarray,
+ truncation_limits: np.ndarray | None = None,
+) -> tuple[np.ndarray, np.ndarray | None]:
"""
Scale parameters of a random distribution.
@@ -76,27 +72,31 @@ def scale_distribution(scale_factor, family, theta, truncation_limits=None):
----------
scale_factor: float
Value by which to scale the parameters.
- family: {'normal', 'lognormal', 'uniform'}
- Defines the type of probability distribution for the random variable.
+ family: {'normal' (or 'normal_cov'), 'normal_std', 'lognormal',
+ 'uniform'}
+ Defines the type of probability distribution for the random
+ variable.
theta: float ndarray of length 2
- Set of parameters that define the cumulative distribution function of
- the variable given its distribution type. See the expected parameters
- explained in the RandomVariable class. Each parameter can be defined by
- one or more values. If a set of values are provided for one parameter,
- they define ordinates of a multilinear function that is used to get
- the parameter values given an independent variable.
+ Set of parameters that define the cumulative distribution
+ function of the variable given its distribution type. See the
+ expected parameters explained in the RandomVariable
+ class. Each parameter can be defined by one or more values. If
+ a set of values are provided for one parameter, they define
+ ordinates of a multilinear function that is used to get the
+ parameter values given an independent variable.
truncation_limits: float ndarray of length 2, default: None
- Defines the [a,b] truncation limits for the distribution. Use None to
- assign no limit in one direction.
+ Defines the [a,b] truncation limits for the distribution. Use
+ None to assign no limit in one direction.
Returns
-------
tuple
A tuple containing the scaled parameters and truncation
limits:
- - theta_new (float ndarray of length 2): Scaled parameters of
+
+ * theta_new (float ndarray of length 2): Scaled parameters of
the distribution.
- - truncation_limits (float ndarray of length 2 or None):
+ * truncation_limits (float ndarray of length 2 or None):
Scaled truncation limits for the distribution, or None if no
truncation is applied.
@@ -106,18 +106,22 @@ def scale_distribution(scale_factor, family, theta, truncation_limits=None):
If the specified distribution family is unsupported.
"""
-
if truncation_limits is not None:
- truncation_limits = truncation_limits * scale_factor
+ truncation_limits = truncation_limits.copy()
+ truncation_limits *= scale_factor
# undefined family is considered deterministic
if pd.isna(family):
family = 'deterministic'
theta_new = np.full_like(theta, np.nan)
- if family == 'normal':
+ if family == 'normal_std':
+ theta_new[0] = theta[0] * scale_factor # mean
+ theta_new[1] = theta[1] * scale_factor # STD
+
+ elif family in {'normal', 'normal_cov'}:
theta_new[0] = theta[0] * scale_factor
- theta_new[1] = theta[1] # because we use cov instead of std
+ theta_new[1] = theta[1] # because it is CoV
elif family == 'lognormal':
theta_new[0] = theta[0] * scale_factor
@@ -127,19 +131,22 @@ def scale_distribution(scale_factor, family, theta, truncation_limits=None):
theta_new[0] = theta[0] * scale_factor
theta_new[1] = theta[1] * scale_factor
- elif family == 'deterministic':
- theta_new[0] = theta[0] * scale_factor
-
- elif family == 'multilinear_CDF':
+ elif family in {'deterministic', 'multilinear_CDF'}:
theta_new[0] = theta[0] * scale_factor
else:
- raise ValueError(f'Unsupported distribution: {family}')
+ msg = f'Unsupported distribution: {family}'
+ raise ValueError(msg)
return theta_new, truncation_limits
-def mvn_orthotope_density(mu, COV, lower=np.nan, upper=np.nan):
+def mvn_orthotope_density(
+ mu: float | np.ndarray,
+ cov: np.ndarray,
+ lower: float | np.ndarray = np.nan,
+ upper: float | np.ndarray = np.nan,
+) -> tuple[float, float]:
"""
Estimate the probability density within a hyperrectangle for an MVN distr.
@@ -152,7 +159,7 @@ def mvn_orthotope_density(mu, COV, lower=np.nan, upper=np.nan):
----------
mu: float scalar or ndarray
Mean(s) of the non-truncated distribution.
- COV: float ndarray
+ cov: float ndarray
Covariance matrix of the non-truncated distribution
lower: float vector, optional, default: np.nan
Lower bound(s) for the truncated distributions. A scalar value can be
@@ -176,13 +183,12 @@ def mvn_orthotope_density(mu, COV, lower=np.nan, upper=np.nan):
Estimate of the error in the calculated probability density.
"""
-
# process the inputs and get the number of dimensions
mu = np.atleast_1d(mu)
- COV = np.atleast_2d(COV)
+ cov = np.atleast_2d(cov)
- sig = np.sqrt(np.diag(COV))
- corr = COV / np.outer(sig, sig)
+ sig = np.sqrt(np.diag(cov))
+ corr = cov / np.outer(sig, sig)
ndim = mu.size
@@ -217,10 +223,7 @@ def mvn_orthotope_density(mu, COV, lower=np.nan, upper=np.nan):
np.putmask(infin, lowinf * uppinf, -1)
# prepare the correlation coefficients
- if ndim == 1:
- correl = 0
- else:
- correl = corr[np.tril_indices(ndim, -1)]
+ correl = np.array([0.0]) if ndim == 1 else corr[np.tril_indices(ndim, -1)]
# estimate the density
eps_alpha, alpha, _ = mvndst(lower, upper, infin, correl)
@@ -228,26 +231,28 @@ def mvn_orthotope_density(mu, COV, lower=np.nan, upper=np.nan):
return alpha, eps_alpha
-def _get_theta(params, inits, dist_list):
+def _get_theta(
+ params: np.ndarray, inits: np.ndarray, dist_list: np.ndarray
+) -> np.ndarray:
"""
- Returns the parameters of the target distributions.
+ Return the parameters of the target distributions.
- Uses the parameter values from the optimization algorithm (that are relative
- to the initial values) and the initial values to transform them to the
- parameters of the target distributions.
+ Uses the parameter values from the optimization algorithm (that
+ are relative to the initial values) and the initial values to
+ transform them to the parameters of the target distributions.
Parameters
----------
params: float ndarray, Nx2
- Numpy array containing the parameter values
+ Numpy array containing the parameter values.
inits: float ndarray, Nx2
- Numpy array containing the initial values
- dist_list: list of str
- List of strings containing the names of the distributions.
+ Numpy array containing the initial values.
+ dist_list: str ndarray
+ Array of strings containing the names of the distributions.
Returns
-------
- Theta
+ theta: float ndarray
The estimated parameters.
Raises
@@ -256,13 +261,26 @@ def _get_theta(params, inits, dist_list):
If any of the distributions is unsupported.
"""
-
theta = np.zeros(inits.shape)
for i, (params_i, inits_i, dist_i) in enumerate(zip(params, inits, dist_list)):
- if dist_i in {'normal', 'lognormal'}:
- # Note that the standard deviation is fit in log space, hence the
- # unusual-looking transformation here
+ if dist_i in {'normal', 'normal_std', 'lognormal'}:
+ # Standard deviation is used directly for 'normal' and
+ # 'lognormal'
+ sig = (
+ np.exp(np.log(inits_i[1]) + params_i[1])
+ if dist_i == 'lognormal'
+ else inits_i[1] + params_i[1]
+ )
+
+ # The mean uses the standard transformation
+ mu = inits_i[0] + params_i[0]
+
+ theta[i, 0] = mu
+ theta[i, 1] = sig
+
+ elif dist_i == 'normal_cov':
+ # Note that the CoV is used for 'normal_cov'
sig = np.exp(np.log(inits_i[1]) + params_i[1])
# The mean uses the standard transformation
@@ -272,12 +290,15 @@ def _get_theta(params, inits, dist_list):
theta[i, 1] = sig
else:
- raise ValueError(f'Unsupported distribution: {dist_i}')
+ msg = f'Unsupported distribution: {dist_i}'
+ raise ValueError(msg)
return theta
-def _get_limit_probs(limits, distribution, theta):
+def _get_limit_probs(
+ limits: np.ndarray, distribution: str, theta: np.ndarray
+) -> tuple[float, float]:
"""
Get the CDF value at the specified limits.
@@ -301,29 +322,28 @@ def _get_limit_probs(limits, distribution, theta):
If any of the distributions is unsupported.
"""
-
- if distribution in {'normal', 'normal-stdev', 'lognormal'}:
+ if distribution in {'normal', 'normal_std', 'normal_cov', 'lognormal'}:
a, b = limits
mu = theta[0]
- sig = theta[1]
+ sig = theta[1] if distribution != 'normal_COV' else np.abs(mu) * theta[1]
- if np.isnan(a):
- p_a = 0.0
- else:
- p_a = norm.cdf((a - mu) / sig)
+ p_a = 0.0 if np.isnan(a) else norm.cdf((a - mu) / sig)
- if np.isnan(b):
- p_b = 1.0
- else:
- p_b = norm.cdf((b - mu) / sig)
+ p_b = 1.0 if np.isnan(b) else norm.cdf((b - mu) / sig)
else:
- raise ValueError(f'Unsupported distribution: {distribution}')
+ msg = f'Unsupported distribution: {distribution}'
+ raise ValueError(msg)
return p_a, p_b
-def _get_std_samples(samples, theta, tr_limits, dist_list):
+def _get_std_samples(
+ samples: np.ndarray,
+ theta: np.ndarray,
+ tr_limits: np.ndarray,
+ dist_list: np.ndarray,
+) -> np.ndarray:
"""
Transform samples to standard normal space.
@@ -333,12 +353,12 @@ def _get_std_samples(samples, theta, tr_limits, dist_list):
2D array of samples. Each row represents a sample.
theta: float ndarray Dx2
2D array of theta values that represent each dimension of the
- samples
+ samples.
tr_limits: float ndarray Dx2
2D array with rows that represent [a, b] pairs of truncation
- limits
+ limits.
dist_list: str ndarray of length D
- 1D array containing the names of the distributions
+ 1D array containing the names of the distributions.
Returns
-------
@@ -353,13 +373,12 @@ def _get_std_samples(samples, theta, tr_limits, dist_list):
If any of the distributions is unsupported.
"""
-
std_samples = np.zeros(samples.shape)
for i, (samples_i, theta_i, tr_lim_i, dist_i) in enumerate(
zip(samples, theta, tr_limits, dist_list)
):
- if dist_i in {'normal', 'normal-stdev', 'lognormal'}:
+ if dist_i in {'normal', 'normal_std', 'normal_cov', 'lognormal'}:
lim_low = tr_lim_i[0]
lim_high = tr_lim_i[1]
@@ -367,33 +386,37 @@ def _get_std_samples(samples, theta, tr_limits, dist_list):
True in (samples_i > lim_high).tolist()
or True in (samples_i < lim_low).tolist()
):
- raise ValueError(
+ msg = (
'One or more sample values lie outside '
'of the specified truncation limits.'
)
+ raise ValueError(msg)
# first transform from normal to uniform
- uni_samples = norm.cdf(samples_i, loc=theta_i[0], scale=theta_i[1])
+ uni_sample = norm.cdf(samples_i, loc=theta_i[0], scale=theta_i[1])
# replace 0 and 1 values with the nearest float
- uni_samples[uni_samples == 0] = np.nextafter(0, 1)
- uni_samples[uni_samples == 1] = np.nextafter(1, -1)
+ uni_sample[uni_sample == 0] = np.nextafter(0, 1)
+ uni_sample[uni_sample == 1] = np.nextafter(1, -1)
# consider truncation if needed
p_a, p_b = _get_limit_probs(tr_lim_i, dist_i, theta_i)
- uni_samples = (uni_samples - p_a) / (p_b - p_a)
+ uni_sample = (uni_sample - p_a) / (p_b - p_a)
# then transform from uniform to standard normal
- std_samples[i] = norm.ppf(uni_samples, loc=0.0, scale=1.0)
+ std_samples[i] = norm.ppf(uni_sample, loc=0.0, scale=1.0)
else:
- raise ValueError(f'Unsupported distribution: {dist_i}')
+ msg = f'Unsupported distribution: {dist_i}'
+ raise ValueError(msg)
return std_samples
-def _get_std_corr_matrix(std_samples):
+def _get_std_corr_matrix(std_samples: np.ndarray) -> np.ndarray | None:
"""
+ Estimate the correlation matrix.
+
Estimate the correlation matrix of the given standard normal
samples. Ensure that the correlation matrix is positive
semidefinite.
@@ -415,9 +438,9 @@ def _get_std_corr_matrix(std_samples):
If any of the elements of std_samples is np.inf or np.nan
"""
-
if True in np.isinf(std_samples) or True in np.isnan(std_samples):
- raise ValueError('std_samples array must not contain inf or NaN values')
+ msg = 'std_samples array must not contain inf or NaN values'
+ raise ValueError(msg)
n_dims, n_samples = std_samples.shape
@@ -440,7 +463,7 @@ def _get_std_corr_matrix(std_samples):
# otherwise, we can try to fix the matrix using SVD
except np.linalg.LinAlgError:
try:
- U, s, _ = svd(
+ u_matrix, s_vector, _ = svd(
rho_hat,
)
@@ -448,13 +471,15 @@ def _get_std_corr_matrix(std_samples):
# if this also fails, we give up
return None
- S = np.diagflat(s)
+ s_diag = np.diagflat(s_vector)
- rho_hat = U @ S @ U.T
+ rho_hat = u_matrix @ s_diag @ u_matrix.T
np.fill_diagonal(rho_hat, 1.0)
# check if we introduced any unreasonable values
- if (np.max(rho_hat) > 1.01) or (np.min(rho_hat) < -1.01):
+ vmax = 1.01
+ vmin = -1.01
+ if (np.max(rho_hat) > vmax) or (np.min(rho_hat) < vmin):
return None
# round values to 1.0 and -1.0, if needed
@@ -467,9 +492,9 @@ def _get_std_corr_matrix(std_samples):
return rho_hat
-def _mvn_scale(x, rho):
+def _mvn_scale(x: np.ndarray, rho: np.ndarray) -> np.ndarray:
"""
- Scaling utility function
+ Scaling utility function.
Parameters
----------
@@ -491,26 +516,29 @@ def _mvn_scale(x, rho):
rho_0 = np.eye(n_dims, n_dims)
a = mvn.pdf(x, mean=np.zeros(n_dims), cov=rho_0)
- a[a < 1.0e-10] = 1.0e-10
+ small_num = 1.0e-10
+ a[a < small_num] = small_num
b = mvn.pdf(x, mean=np.zeros(n_dims), cov=rho)
return b / a
-def _neg_log_likelihood(
- params,
- inits,
- bnd_lower,
- bnd_upper,
- samples,
- dist_list,
- tr_limits,
- det_limits,
- censored_count,
- enforce_bounds=False,
-):
+def _neg_log_likelihood( # noqa: C901
+ params: np.ndarray,
+ inits: np.ndarray,
+ bnd_lower: np.ndarray,
+ bnd_upper: np.ndarray,
+ samples: np.ndarray,
+ dist_list: np.ndarray,
+ tr_limits: np.ndarray,
+ det_limits: list[np.ndarray],
+ censored_count: int,
+ enforce_bounds: bool = False, # noqa: FBT001, FBT002
+) -> float:
"""
+ Calculate negative log likelihood.
+
Calculate the negative log likelihood of the given data samples
given the parameter values and distribution information.
@@ -520,29 +548,30 @@ def _neg_log_likelihood(
Parameters
----------
- params : ndarray
+ params: ndarray
1D array with the parameter values to be assessed.
- inits : ndarray
+ inits: ndarray
1D array with the initial estimates for the distribution
parameters.
- bnd_lower : ndarray
+ bnd_lower: ndarray
1D array with the lower bounds for the distribution
parameters.
- bnd_upper : ndarray
+ bnd_upper: ndarray
1D array with the upper bounds for the distribution
parameters.
- samples : ndarray
+ samples: ndarray
2D array with the data samples. Each column corresponds to a
different random variable.
- dist_list : list
- List with the distribution types for each random variable.
- tr_limits : list
- List with the truncation limits for each random variable.
- det_limits : list
+ dist_list: str ndarray of length D
+ 1D array containing the names of the distributions
+ tr_limits: float ndarray Dx2
+ 2D array with rows that represent [a, b] pairs of truncation
+ limits.
+ det_limits: list
List with the detection limits for each random variable.
- censored_count : int
+ censored_count: int
Number of censored samples in the data.
- enforce_bounds : bool, optional
+ enforce_bounds: bool, optional
If True, the parameters are only considered valid if they are
within the bounds defined by bnd_lower and bnd_upper. The
default value is False.
@@ -551,10 +580,11 @@ def _neg_log_likelihood(
-------
float
The negative log likelihood of the data given the distribution parameters.
- """
+ """
# First, check if the parameters are within the pre-defined bounds
- # TODO: check if it is more efficient to use a bounded minimization algo
+ # TODO(AZ): check if it is more efficient to use a bounded
+ # minimization algo
if enforce_bounds:
if not ((params > bnd_lower) & (params < bnd_upper)).all(0):
# if they are not, then return a large value to discourage the
@@ -585,7 +615,7 @@ def _neg_log_likelihood(
# Calculate the likelihood for each available sample
# Note that we are performing this without any transformation to be able
# to respect truncation limits
- if dist_i in {'normal', 'lognormal'}:
+ if dist_i in {'normal', 'normal_std', 'normal_cov', 'lognormal'}:
likelihoods[i] = (
norm.pdf(samples_i, loc=theta_i[0], scale=theta_i[1]) / tr_alpha
)
@@ -614,8 +644,8 @@ def _neg_log_likelihood(
p_l, p_u = _get_limit_probs(det_lim_i, dist_i, theta_i)
# rescale detection limits to consider truncation
- p_l, p_u = [np.min([np.max([lim, p_a]), p_b]) for lim in (p_l, p_u)]
- p_l, p_u = [(lim - p_a) / (p_b - p_a) for lim in (p_l, p_u)]
+ p_l, p_u = (np.min([np.max([lim, p_a]), p_b]) for lim in (p_l, p_u))
+ p_l, p_u = ((lim - p_a) / (p_b - p_a) for lim in (p_l, p_u))
# transform limits to standard normal space
det_lower[i], det_upper[i] = norm.ppf([p_l, p_u], loc=0.0, scale=1.0)
@@ -643,8 +673,8 @@ def _neg_log_likelihood(
# take the product of likelihoods calculated in each dimension
scale = _mvn_scale(std_samples.T, rho_hat)
- # TODO: We can almost surely replace the product of likelihoods with a call
- # to mvn()
+ # TODO(AZ): We can almost surely replace the product of likelihoods
+ # with a call to mvn()
likelihoods = np.prod(likelihoods, axis=0) * scale
# Zeros are a result of limited floating point precision. Replace them
@@ -653,28 +683,27 @@ def _neg_log_likelihood(
likelihoods = np.clip(likelihoods, a_min=np.nextafter(0, 1), a_max=None)
# calculate the total negative log likelihood
- NLL = -(
+ negative_log_likelihood = -(
np.sum(np.log(likelihoods)) # from samples
+ censored_count * np.log(cen_likelihood)
) # censoring influence
- # normalize the NLL with the sample count
- NLL = NLL / samples.size
-
# print(theta[0], params, NLL)
- return NLL
-
-
-def fit_distribution_to_sample(
- raw_samples,
- distribution,
- truncation_limits=(np.nan, np.nan),
- censored_count=0,
- detection_limits=(np.nan, np.nan),
- multi_fit=False,
- logger_object=None,
-):
+ # normalize the NLL with the sample count
+ return negative_log_likelihood / samples.size
+
+
+def fit_distribution_to_sample( # noqa: C901
+ raw_sample: np.ndarray,
+ distribution: str | list[str],
+ truncation_limits: tuple[float, float] = (np.nan, np.nan),
+ censored_count: int = 0,
+ detection_limits: tuple[float, float] = (np.nan, np.nan),
+ *,
+ multi_fit: bool = False,
+ logger_object: Logger | None = None,
+) -> tuple[np.ndarray, np.ndarray]:
"""
Fit a distribution to sample using maximum likelihood estimation.
@@ -686,7 +715,7 @@ def fit_distribution_to_sample(
Parameters
----------
- raw_samples: float ndarray
+ raw_sample: float ndarray
Raw data that serves as the basis of estimation. The number of samples
equals the number of columns and each row introduces a new feature. In
other words: a list of sample lists is expected where each sample list
@@ -695,7 +724,7 @@ def fit_distribution_to_sample(
Defines the target probability distribution type. Different types of
distributions can be mixed by providing a list rather than a single
value. Each element of the list corresponds to one of the features in
- the raw_samples.
+ the raw_sample.
truncation_limits: float ndarray, optional, default: [None, None]
Lower and/or upper truncation limits for the specified distributions.
A two-element vector can be used for a univariate case, while two lists
@@ -732,9 +761,10 @@ def fit_distribution_to_sample(
theta: float ndarray
Estimates of the parameters of the fitted probability
distribution in each dimension. The following parameters
- are returned for the supported distributions: normal -
- mean, coefficient of variation; lognormal - median, log
- standard deviation;
+ are returned for the supported distributions: normal,
+ normal_cov - mean, coefficient of variation; normal_std -
+ mean, standard deviation; lognormal - median, log standard
+ deviation;
Rho: float 2D ndarray, optional
In the multivariate case, returns the estimate of the
correlation matrix.
@@ -745,8 +775,7 @@ def fit_distribution_to_sample(
If NaN values are produced during standard normal space transformation
"""
-
- samples = np.atleast_2d(raw_samples)
+ samples = np.atleast_2d(raw_sample)
tr_limits = np.atleast_2d(truncation_limits)
det_limits = np.atleast_2d(detection_limits)
dist_list = np.atleast_1d(distribution)
@@ -780,7 +809,7 @@ def fit_distribution_to_sample(
sig_init = np.ones_like(mu_init) * np.nan
for d_i, distr in enumerate(dist_list):
- if distr in {'normal', 'normal-stdev', 'lognormal'}:
+ if distr in {'normal', 'normal_cov', 'normal_std', 'lognormal'}:
# use the first two moments
mu_init[d_i] = np.mean(samples[d_i])
@@ -812,10 +841,10 @@ def fit_distribution_to_sample(
# There is nothing to gain from a time-consuming optimization if..
# the number of samples is too small
- if (n_samples < 3) or (
+ min_sample_size_for_optimization = 3
+ if (n_samples < min_sample_size_for_optimization) or (
# there are no truncation or detection limits involved
- np.all(np.isnan(tr_limits))
- and np.all(np.isnan(det_limits))
+ np.all(np.isnan(tr_limits)) and np.all(np.isnan(det_limits))
):
# In this case, it is typically hard to improve on the method of
# moments estimates for the parameters of the marginal distributions
@@ -875,9 +904,7 @@ def fit_distribution_to_sample(
theta = _get_theta(
out,
inits_i,
- [
- dist_list[dim],
- ],
+ np.array([dist_list[dim]]),
)
inits[dim] = theta[0]
@@ -916,13 +943,14 @@ def fit_distribution_to_sample(
# samples using that type of correlation (i.e., Gaussian copula)
std_samples = _get_std_samples(samples, theta, tr_limits, dist_list)
if True in np.isnan(std_samples) or True in np.isinf(std_samples):
- raise ValueError(
+ msg = (
'Something went wrong.'
'\n'
'Conversion to standard normal space was unsuccessful. \n'
'The given samples might deviate '
'substantially from the specified distribution.'
)
+ raise ValueError(msg)
rho_hat = _get_std_corr_matrix(std_samples)
if rho_hat is None:
# If there is not enough data to produce a valid correlation matrix
@@ -931,16 +959,16 @@ def fit_distribution_to_sample(
np.fill_diagonal(rho_hat, 1.0)
if logger_object:
- logger_object.msg(
- "\nWARNING: Demand sample size too small to reliably estimate "
- "the correlation matrix. Assuming uncorrelated demands.",
- prepend_timestamp=False,
- prepend_blank_space=False,
+ logger_object.warning(
+ 'Demand sample size too small to reliably estimate '
+ 'the correlation matrix. Assuming uncorrelated demands.'
)
else:
- print(
- "\nWARNING: Demand sample size too small to reliably estimate "
- "the correlation matrix. Assuming uncorrelated demands."
+ print( # noqa: T201
+ '\nWARNING: Demand sample size '
+ 'too small to reliably estimate '
+ 'the correlation matrix. Assuming '
+ 'uncorrelated demands.'
)
for d_i, distr in enumerate(dist_list):
@@ -950,31 +978,34 @@ def fit_distribution_to_sample(
# theta_mod = theta.T.copy()
# theta_mod[0] = np.exp(theta_mod[0])
# theta = theta_mod.T
- # Convert the std to cov if the distribution is normal
- elif distr == 'normal':
+ # Convert the std to cov if the distribution is normal_cov
+ elif distr in {'normal', 'normal_cov'}:
# replace standard deviation with coefficient of variation
# note: this results in cov=inf if the mean is zero.
- if np.abs(theta[d_i][0]) < 1.0e-40:
+ almost_zero = 1.0e-40
+ if np.abs(theta[d_i][0]) < almost_zero:
theta[d_i][1] = np.inf
else:
- theta[d_i][1] = theta[d_i][1] / np.abs(theta[d_i][0])
+ theta[d_i][1] /= np.abs(theta[d_i][0])
return theta, rho_hat
-def _OLS_percentiles(params, values, perc, family):
+def _OLS_percentiles( # noqa: N802
+ params: tuple[float, float], values: np.ndarray, perc: np.ndarray, family: str
+) -> float:
"""
Estimate percentiles using ordinary least squares (OLS).
Parameters
----------
- params : tuple of floats
+ params: tuple of floats
The parameters of the selected distribution family.
- values : float ndarray
+ values: float ndarray
The sample values for which the percentiles are requested.
- perc : float ndarray
+ perc: float ndarray
The requested percentile(s).
- family : str
+ family: str
The distribution family to use for the percentile estimation.
Can be either 'normal' or 'lognormal'.
@@ -989,7 +1020,6 @@ def _OLS_percentiles(params, values, perc, family):
If `family` is not 'normal' or 'lognormal'.
"""
-
if family == 'normal':
theta_0 = params[0]
theta_1 = params[1]
@@ -1012,24 +1042,27 @@ def _OLS_percentiles(params, values, perc, family):
val_hat = np.exp(norm.ppf(perc, loc=np.log(theta_0), scale=theta_1))
else:
- raise ValueError(f"Distribution family not recognized: {family}")
+ msg = f'Distribution family not recognized: {family}'
+ raise ValueError(msg)
return np.sum((val_hat - values) ** 2.0)
-def fit_distribution_to_percentiles(values, percentiles, families):
+def fit_distribution_to_percentiles(
+ values: list[float], percentiles: list[float], families: list[str]
+) -> tuple[str, list[float]]:
"""
Fit distribution to pre-defined values at a finite number of percentiles.
Parameters
----------
- values: array of float
+ values: list of float
Pre-defined values at the given percentiles. At least two values are
expected.
- percentiles: array of float
+ percentiles: list of float
Percentiles where values are defined. At least two percentiles are
expected.
- families: array of strings {'normal', 'lognormal'}
+ families: list of strings {'normal', 'lognormal'}
Defines the distribution family candidates.
Returns
@@ -1042,13 +1075,12 @@ def fit_distribution_to_percentiles(values, percentiles, families):
Parameters of the fitted distribution.
"""
-
out_list = []
- percentiles = np.array(percentiles)
+ percentiles_np = np.array(percentiles)
- median_id = np.argmin(np.abs(percentiles - 0.5))
- extreme_id = np.argmax(percentiles - 0.5)
+ median_id = np.argmin(np.abs(percentiles_np - 0.5))
+ extreme_id = np.argmax(percentiles_np - 0.5)
for family in families:
inits = [
@@ -1057,25 +1089,21 @@ def fit_distribution_to_percentiles(values, percentiles, families):
if family == 'normal':
inits.append(
- (
- np.abs(values[extreme_id] - inits[0])
- / np.abs(norm.ppf(percentiles[extreme_id], loc=0, scale=1))
- )
+ np.abs(values[extreme_id] - inits[0])
+ / np.abs(norm.ppf(percentiles_np[extreme_id], loc=0, scale=1))
)
elif family == 'lognormal':
inits.append(
- (
- np.abs(np.log(values[extreme_id] / inits[0]))
- / np.abs(norm.ppf(percentiles[extreme_id], loc=0, scale=1))
- )
+ np.abs(np.log(values[extreme_id] / inits[0]))
+ / np.abs(norm.ppf(percentiles_np[extreme_id], loc=0, scale=1))
)
out_list.append(
minimize(
_OLS_percentiles,
inits,
- args=(values, percentiles, family),
+ args=(values, percentiles_np, family),
method='BFGS',
)
)
@@ -1085,20 +1113,28 @@ def fit_distribution_to_percentiles(values, percentiles, families):
return families[best_out_id], out_list[best_out_id].x
-class BaseRandomVariable(ABC):
- """
- Base abstract class for different types of random variables.
+class BaseRandomVariable(ABC): # noqa: B024
+ """Base abstract class for different types of random variables."""
- """
+ __slots__: list[str] = [
+ 'RV_set',
+ '_sample',
+ '_sample_DF',
+ '_uni_sample',
+ 'anchor',
+ 'distribution',
+ 'f_map',
+ 'name',
+ ]
def __init__(
self,
- name,
- f_map=None,
- anchor=None,
- ):
+ name: str,
+ f_map: Callable | None = None,
+ anchor: BaseRandomVariable | None = None,
+ ) -> None:
"""
- Initializes a RandomVariable object.
+ Instantiate a RandomVariable object.
Parameters
----------
@@ -1113,28 +1149,21 @@ def __init__(
the attributes of this variable and its anchor do not have to be
identical.
- Raises
- ------
- ValueError
- If there are issues with the specified distribution theta
- parameters.
-
"""
-
self.name = name
- self.distribution = None
+ self.distribution: str | None = None
self.f_map = f_map
- self._uni_samples = None
- self.RV_set = None
- self._sample_DF = None
- self._sample = None
+ self._uni_sample: np.ndarray | None = None
+ self.RV_set: RandomVariableSet | None = None
+ self._sample_DF: pd.Series | None = None
+ self._sample: np.ndarray | None = None
if anchor is None:
self.anchor = self
else:
self.anchor = anchor
@property
- def sample(self):
+ def sample(self) -> np.ndarray | None:
"""
Return the empirical or generated sample.
@@ -1149,7 +1178,7 @@ def sample(self):
return self._sample
@sample.setter
- def sample(self, value):
+ def sample(self, value: np.ndarray) -> None:
"""
Assign a sample to the random variable.
@@ -1163,7 +1192,7 @@ def sample(self, value):
self._sample_DF = pd.Series(value)
@property
- def sample_DF(self):
+ def sample_DF(self) -> pd.Series | None: # noqa: N802
"""
Return the empirical or generated sample in a pandas Series.
@@ -1174,12 +1203,13 @@ def sample_DF(self):
"""
if self.f_map is not None:
+ assert self._sample_DF is not None
return self._sample_DF.apply(self.f_map)
return self._sample_DF
@property
- def uni_sample(self):
+ def uni_sample(self) -> np.ndarray | None:
"""
Return the sample from the controlling uniform distribution.
@@ -1189,12 +1219,14 @@ def uni_sample(self):
The sample from the controlling uniform distribution.
"""
- return self.anchor._uni_samples
+ if self.anchor is self:
+ return self._uni_sample
+ return self.anchor.uni_sample
@uni_sample.setter
- def uni_sample(self, value):
+ def uni_sample(self, value: np.ndarray) -> None:
"""
- Assign the controlling sample to the random variable
+ Assign the controlling sample to the random variable.
Parameters
----------
@@ -1202,25 +1234,25 @@ def uni_sample(self, value):
An array of floating point values in the [0, 1] domain.
"""
- self._uni_samples = value
+ self._uni_sample = value
class RandomVariable(BaseRandomVariable):
- """
- Random variable that needs `values` in `inverse_transform`
- """
+ """Random variable that needs `values` in `inverse_transform`."""
+
+ __slots__: list[str] = []
@abstractmethod
def __init__(
self,
- name,
- theta,
- truncation_limits=np.array((np.nan, np.nan)),
- f_map=None,
- anchor=None,
- ):
+ name: str,
+ theta: np.ndarray,
+ truncation_limits: np.ndarray | None = None,
+ f_map: Callable | None = None,
+ anchor: BaseRandomVariable | None = None,
+ ) -> None:
"""
- Instantiates a normal random variable.
+ Instantiate a normal random variable.
Parameters
----------
@@ -1244,6 +1276,8 @@ def __init__(
identical.
"""
+ if truncation_limits is None:
+ truncation_limits = np.array((np.nan, np.nan))
super().__init__(
name=name,
f_map=f_map,
@@ -1251,42 +1285,45 @@ def __init__(
)
@abstractmethod
- def inverse_transform(self, values):
+ def inverse_transform(self, values: np.ndarray) -> np.ndarray:
"""
+ Evaluate the inverse CDF.
+
Uses inverse probability integral transformation on the
provided values.
"""
- def inverse_transform_sampling(self):
+ def inverse_transform_sampling(self) -> None:
"""
- Creates a sample using inverse probability integral
- transformation.
+ Create a sample with inverse transform sampling.
Raises
------
ValueError
If there is no available uniform sample.
+
"""
if self.uni_sample is None:
- raise ValueError('No available uniform sample.')
+ msg = 'No available uniform sample.'
+ raise ValueError(msg)
self.sample = self.inverse_transform(self.uni_sample)
class UtilityRandomVariable(BaseRandomVariable):
- """
- Random variable that needs `sample_size` in `inverse_transform`
- """
+ """Random variable that needs `sample_size` in `inverse_transform`."""
+
+ __slots__: list[str] = []
@abstractmethod
def __init__(
self,
- name,
- f_map=None,
- anchor=None,
- ):
+ name: str,
+ f_map: Callable | None = None,
+ anchor: BaseRandomVariable | None = None,
+ ) -> None:
"""
- Instantiates a normal random variable.
+ Instantiate a normal random variable.
Parameters
----------
@@ -1309,35 +1346,36 @@ def __init__(
)
@abstractmethod
- def inverse_transform(self, sample_size):
+ def inverse_transform(self, sample_size: int) -> np.ndarray:
"""
+ Evaluate the inverse CDF.
+
Uses inverse probability integral transformation on the
provided values.
"""
- def inverse_transform_sampling(self, sample_size):
- """
- Creates a sample using inverse probability integral
- transformation.
- """
+ def inverse_transform_sampling(self, sample_size: int) -> None:
+ """Create a sample with inverse transform sampling."""
self.sample = self.inverse_transform(sample_size)
class NormalRandomVariable(RandomVariable):
- """
- Normal random variable.
+ """Normal random variable."""
- """
+ __slots__: list[str] = ['theta', 'truncation_limits']
def __init__(
self,
- name,
- theta,
- truncation_limits=np.array((np.nan, np.nan)),
- f_map=None,
- anchor=None,
- ):
+ name: str,
+ theta: np.ndarray,
+ truncation_limits: np.ndarray | None = None,
+ f_map: Callable | None = None,
+ anchor: BaseRandomVariable | None = None,
+ ) -> None:
+ """Instantiate a Normal random variable."""
+ if truncation_limits is None:
+ truncation_limits = np.array((np.nan, np.nan))
super().__init__(
name=name,
theta=theta,
@@ -1349,10 +1387,9 @@ def __init__(
self.theta = np.atleast_1d(theta)
self.truncation_limits = truncation_limits
- def cdf(self, values):
+ def cdf(self, values: np.ndarray) -> np.ndarray:
"""
- Returns the Cumulative Density Function (CDF) at the specified
- values.
+ Return the CDF at the given values.
Parameters
----------
@@ -1365,8 +1402,7 @@ def cdf(self, values):
1D float ndarray containing CDF values
"""
- mu, cov = self.theta[:2]
- sig = np.abs(mu) * cov
+ mu, sig = self.theta[:2]
if np.any(~np.isnan(self.truncation_limits)):
a, b = self.truncation_limits
@@ -1376,7 +1412,7 @@ def cdf(self, values):
if np.isnan(b):
b = np.inf
- p_a, p_b = [norm.cdf((lim - mu) / sig) for lim in (a, b)]
+ p_a, p_b = (norm.cdf((lim - mu) / sig) for lim in (a, b))
# cap the values at the truncation limits
values = np.minimum(np.maximum(values, a), b)
@@ -1392,8 +1428,10 @@ def cdf(self, values):
return result
- def inverse_transform(self, values):
+ def inverse_transform(self, values: np.ndarray) -> np.ndarray:
"""
+ Evaluate the inverse CDF.
+
Evaluates the inverse of the Cumulative Density Function (CDF)
for the given values. Used to generate random variable
realizations.
@@ -1411,13 +1449,11 @@ def inverse_transform(self, values):
Raises
------
ValueError
- If the probability massss within the truncation limits is
+ If the probability mass within the truncation limits is
too small
"""
-
- mu, cov = self.theta[:2]
- sig = np.abs(mu) * cov
+ mu, sig = self.theta[:2]
if np.any(~np.isnan(self.truncation_limits)):
a, b = self.truncation_limits
@@ -1427,16 +1463,17 @@ def inverse_transform(self, values):
if np.isnan(b):
b = np.inf
- p_a, p_b = [norm.cdf((lim - mu) / sig) for lim in (a, b)]
+ p_a, p_b = (norm.cdf((lim - mu) / sig) for lim in (a, b))
if p_b - p_a == 0:
- raise ValueError(
- "The probability mass within the truncation limits is "
- "too small and the truncated distribution cannot be "
- "sampled with sufficiently high accuracy. This is most "
- "probably due to incorrect truncation limits set for "
- "the distribution."
+ msg = (
+ 'The probability mass within the truncation limits is '
+ 'too small and the truncated distribution cannot be '
+ 'sampled with sufficiently high accuracy. This is most '
+ 'probably due to incorrect truncation limits set for '
+ 'the distribution.'
)
+ raise ValueError(msg)
result = norm.ppf(values * (p_b - p_a) + p_a, loc=mu, scale=sig)
@@ -1446,20 +1483,87 @@ def inverse_transform(self, values):
return result
-class LogNormalRandomVariable(RandomVariable):
+class Normal_STD(NormalRandomVariable):
"""
- Lognormal random variable.
+ Normal random variable with standard deviation.
+
+ This class represents a normal random variable defined by mean and
+ standard deviation.
"""
+ __slots__: list[str] = []
+
def __init__(
self,
- name,
- theta,
- truncation_limits=np.array((np.nan, np.nan)),
- f_map=None,
- anchor=None,
- ):
+ name: str,
+ theta: np.ndarray,
+ truncation_limits: np.ndarray | None = None,
+ f_map: Callable | None = None,
+ anchor: BaseRandomVariable | None = None,
+ ) -> None:
+ """Instantiate a Normal_STD random variable."""
+ mean, std = theta[:2]
+ theta = np.array([mean, std])
+ super().__init__(name, theta, truncation_limits, f_map, anchor)
+
+
+class Normal_COV(NormalRandomVariable):
+ """
+ Normal random variable with coefficient of variation.
+
+ This class represents a normal random variable defined by mean and
+ coefficient of variation.
+
+ """
+
+ __slots__: list[str] = []
+
+ def __init__(
+ self,
+ name: str,
+ theta: np.ndarray,
+ truncation_limits: np.ndarray | None = None,
+ f_map: Callable | None = None,
+ anchor: BaseRandomVariable | None = None,
+ ) -> None:
+ """
+ Instantiate a Normal_COV random variable.
+
+ Raises
+ ------
+ ValueError
+ If the specified mean is zero.
+
+ """
+ mean, cov = theta[:2]
+
+ almost_zero = 1e-40
+ if np.abs(mean) < almost_zero:
+ msg = 'The mean of Normal_COV RVs cannot be zero.'
+ raise ValueError(msg)
+
+ std = mean * cov
+ theta = np.array([mean, std])
+ super().__init__(name, theta, truncation_limits, f_map, anchor)
+
+
+class LogNormalRandomVariable(RandomVariable):
+ """Lognormal random variable."""
+
+ __slots__: list[str] = ['theta', 'truncation_limits']
+
+ def __init__(
+ self,
+ name: str,
+ theta: np.ndarray,
+ truncation_limits: np.ndarray | None = None,
+ f_map: Callable | None = None,
+ anchor: BaseRandomVariable | None = None,
+ ) -> None:
+ """Instantiate a LogNormal random variable."""
+ if truncation_limits is None:
+ truncation_limits = np.array((np.nan, np.nan))
super().__init__(
name=name,
theta=theta,
@@ -1471,10 +1575,9 @@ def __init__(
self.theta = np.atleast_1d(theta)
self.truncation_limits = truncation_limits
- def cdf(self, values):
+ def cdf(self, values: np.ndarray) -> np.ndarray:
"""
- Returns the Cumulative Density Function (CDF) at the specified
- values.
+ Return the CDF at the given values.
Parameters
----------
@@ -1484,7 +1587,7 @@ def cdf(self, values):
Returns
-------
ndarray
- CDF values
+ 1D float ndarray containing CDF values
"""
theta, beta = self.theta[:2]
@@ -1497,9 +1600,9 @@ def cdf(self, values):
if np.isnan(b):
b = np.inf
- p_a, p_b = [
+ p_a, p_b = (
norm.cdf((np.log(lim) - np.log(theta)) / beta) for lim in (a, b)
- ]
+ )
# cap the values at the truncation limits
values = np.minimum(np.maximum(values, a), b)
@@ -1517,11 +1620,12 @@ def cdf(self, values):
return result
- def inverse_transform(self, values):
+ def inverse_transform(self, values: np.ndarray) -> np.ndarray:
"""
- Evaluates the inverse of the Cumulative Density Function (CDF)
- for the given values. Used to generate random variable
- realizations.
+ Evaluate the inverse CDF.
+
+ Uses inverse probability integral transformation on the
+ provided values.
Parameters
----------
@@ -1534,7 +1638,6 @@ def inverse_transform(self, values):
Inverse CDF values
"""
-
theta, beta = self.theta[:2]
if np.any(~np.isnan(self.truncation_limits)):
@@ -1548,9 +1651,9 @@ def inverse_transform(self, values):
if np.isnan(b):
b = np.inf
- p_a, p_b = [
+ p_a, p_b = (
norm.cdf((np.log(lim) - np.log(theta)) / beta) for lim in (a, b)
- ]
+ )
result = np.exp(
norm.ppf(values * (p_b - p_a) + p_a, loc=np.log(theta), scale=beta)
@@ -1563,19 +1666,21 @@ def inverse_transform(self, values):
class UniformRandomVariable(RandomVariable):
- """
- Uniform random variable.
+ """Uniform random variable."""
- """
+ __slots__: list[str] = ['theta', 'truncation_limits']
def __init__(
self,
- name,
- theta,
- truncation_limits=np.array((np.nan, np.nan)),
- f_map=None,
- anchor=None,
- ):
+ name: str,
+ theta: np.ndarray,
+ truncation_limits: np.ndarray | None = None,
+ f_map: Callable | None = None,
+ anchor: BaseRandomVariable | None = None,
+ ) -> None:
+ """Instantiate a Uniform random variable."""
+ if truncation_limits is None:
+ truncation_limits = np.array((np.nan, np.nan))
super().__init__(
name=name,
theta=theta,
@@ -1587,10 +1692,9 @@ def __init__(
self.theta = np.atleast_1d(theta)
self.truncation_limits = truncation_limits
- def cdf(self, values):
+ def cdf(self, values: np.ndarray) -> np.ndarray:
"""
- Returns the Cumulative Density Function (CDF) at the specified
- values.
+ Return the CDF at the given values.
Parameters
----------
@@ -1600,7 +1704,7 @@ def cdf(self, values):
Returns
-------
ndarray
- CDF values
+ 1D float ndarray containing CDF values
"""
a, b = self.theta[:2]
@@ -1613,15 +1717,14 @@ def cdf(self, values):
if np.any(~np.isnan(self.truncation_limits)):
a, b = self.truncation_limits
- result = uniform.cdf(values, loc=a, scale=(b - a))
+ return uniform.cdf(values, loc=a, scale=(b - a))
- return result
-
- def inverse_transform(self, values):
+ def inverse_transform(self, values: np.ndarray) -> np.ndarray:
"""
- Evaluates the inverse of the Cumulative Density Function (CDF)
- for the given values. Used to generate random variable
- realizations.
+ Evaluate the inverse CDF.
+
+ Uses inverse probability integral transformation on the
+ provided values.
Parameters
----------
@@ -1644,27 +1747,158 @@ def inverse_transform(self, values):
if np.any(~np.isnan(self.truncation_limits)):
a, b = self.truncation_limits
- result = uniform.ppf(values, loc=a, scale=(b - a))
+ return uniform.ppf(values, loc=a, scale=(b - a))
+
+
+class WeibullRandomVariable(RandomVariable):
+ """Weibull random variable."""
+
+ __slots__: list[str] = ['theta', 'truncation_limits']
+
+ def __init__(
+ self,
+ name: str,
+ theta: np.ndarray,
+ truncation_limits: np.ndarray | None = None,
+ f_map: Callable | None = None,
+ anchor: BaseRandomVariable | None = None,
+ ) -> None:
+ """Instantiate a Weibull random variable."""
+ if truncation_limits is None:
+ truncation_limits = np.array((np.nan, np.nan))
+ super().__init__(
+ name=name,
+ theta=theta,
+ truncation_limits=truncation_limits,
+ f_map=f_map,
+ anchor=anchor,
+ )
+ self.distribution = 'weibull'
+ self.theta = np.atleast_1d(theta)
+ self.truncation_limits = truncation_limits
+
+ def cdf(self, values: np.ndarray) -> np.ndarray:
+ """
+ Return the CDF at the given values.
+
+ Parameters
+ ----------
+ values: 1D float ndarray
+ Values for which to evaluate the CDF
+
+ Returns
+ -------
+ ndarray
+ 1D float ndarray containing CDF values
+
+ """
+ lambda_, kappa = self.theta[:2]
+
+ if np.any(~np.isnan(self.truncation_limits)):
+ a, b = self.truncation_limits
+
+ if np.isnan(a):
+ # Weibull is not defined for negative values
+ a = 0.0
+ if np.isnan(b):
+ b = np.inf
+
+ p_a, p_b = (weibull_min.cdf(lim, kappa, scale=lambda_) for lim in (a, b))
+
+ # cap the values at the truncation limits
+ values = np.minimum(np.maximum(values, a), b)
+
+ # get the cdf from a non-truncated weibull
+ p_vals = weibull_min.cdf(values, kappa, scale=lambda_)
+
+ # adjust for truncation
+ result = (p_vals - p_a) / (p_b - p_a)
+
+ else:
+ values = np.maximum(
+ values, 0.0
+ ) # Weibull is not defined for negative values
+
+ result = weibull_min.cdf(values, kappa, scale=lambda_)
+
+ return result
+
+ def inverse_transform(self, values: np.ndarray) -> np.ndarray:
+ """
+ Evaluate the inverse CDF.
+
+ Uses inverse probability integral transformation on the
+ provided values.
+
+ Parameters
+ ----------
+ values: 1D float ndarray
+ Values for which to evaluate the inverse CDF
+
+ Returns
+ -------
+ ndarray
+ Inverse CDF values
+
+ """
+ lambda_, kappa = self.theta[:2]
+
+ if np.any(~np.isnan(self.truncation_limits)):
+ a, b = self.truncation_limits
+
+ if np.isnan(a):
+ a = 0.0 # Weibull is not defined for negative values
+ else:
+ a = np.maximum(0.0, a)
+
+ if np.isnan(b):
+ b = np.inf
+
+ p_a, p_b = (weibull_min.cdf(lim, kappa, scale=lambda_) for lim in (a, b))
+
+ result = weibull_min.ppf(
+ values * (p_b - p_a) + p_a, kappa, scale=lambda_
+ )
+
+ else:
+ result = weibull_min.ppf(values, kappa, scale=lambda_)
return result
class MultilinearCDFRandomVariable(RandomVariable):
"""
- Multilinear CDF random variable. This RV is defined by specifying
- the points that define its Cumulative Density Function (CDF), and
- linear interpolation between them.
+ Multilinear CDF random variable.
+
+ This RV is defined by specifying the points that define its
+ Cumulative Density Function (CDF), and linear interpolation
+ between them.
"""
+ __slots__: list[str] = ['theta']
+
def __init__(
self,
- name,
- theta,
- truncation_limits=np.array((np.nan, np.nan)),
- f_map=None,
- anchor=None,
- ):
+ name: str,
+ theta: np.ndarray,
+ truncation_limits: np.ndarray | None = None,
+ f_map: Callable | None = None,
+ anchor: BaseRandomVariable | None = None,
+ ) -> None:
+ """
+ Instantiate a MultilinearCDF random variable.
+
+ Raises
+ ------
+ ValueError
+ In case of incompatible input parameters.
+ NotImplementedError
+ If truncation limits are specified.
+
+ """
+ if truncation_limits is None:
+ truncation_limits = np.array((np.nan, np.nan))
super().__init__(
name=name,
theta=theta,
@@ -1675,52 +1909,52 @@ def __init__(
self.distribution = 'multilinear_CDF'
if not np.all(np.isnan(truncation_limits)):
- raise NotImplementedError(
- f'{self.distribution} RVs do not support truncation'
- )
+ msg = f'{self.distribution} RVs do not support truncation'
+ raise NotImplementedError(msg)
y_1 = theta[0, 1]
if y_1 != 0.00:
- raise ValueError(
- "For multilinear CDF random variables, y_1 should be set to 0.00"
- )
+ msg = 'For multilinear CDF random variables, y_1 should be set to 0.00'
+ raise ValueError(msg)
y_n = theta[-1, 1]
if y_n != 1.00:
- raise ValueError(
- "For multilinear CDF random variables, y_n should be set to 1.00"
- )
+ msg = 'For multilinear CDF random variables, y_n should be set to 1.00'
+ raise ValueError(msg)
x_s = theta[:, 0]
if not np.array_equal(np.sort(x_s), x_s):
- raise ValueError(
- "For multilinear CDF random variables, "
- "Xs should be specified in ascending order"
+ msg = (
+ 'For multilinear CDF random variables, '
+ 'Xs should be specified in ascending order'
)
+ raise ValueError(msg)
if np.any(np.isclose(np.diff(x_s), 0.00)):
- raise ValueError(
- "For multilinear CDF random variables, "
- "Xs should be specified in strictly ascending order"
+ msg = (
+ 'For multilinear CDF random variables, '
+ 'Xs should be specified in strictly ascending order'
)
+ raise ValueError(msg)
y_s = theta[:, 1]
if not np.array_equal(np.sort(y_s), y_s):
- raise ValueError(
- "For multilinear CDF random variables, "
- "Ys should be specified in ascending order"
+ msg = (
+ 'For multilinear CDF random variables, '
+ 'Ys should be specified in ascending order'
)
+ raise ValueError(msg)
if np.any(np.isclose(np.diff(y_s), 0.00)):
- raise ValueError(
- "For multilinear CDF random variables, "
- "Ys should be specified in strictly ascending order"
+ msg = (
+ 'For multilinear CDF random variables, '
+ 'Ys should be specified in strictly ascending order'
)
+ raise ValueError(msg)
self.theta = np.atleast_1d(theta)
- def cdf(self, values):
+ def cdf(self, values: np.ndarray) -> np.ndarray:
"""
- Returns the Cumulative Density Function (CDF) at the specified
- values.
+ Return the CDF at the given values.
Parameters
----------
@@ -1730,22 +1964,21 @@ def cdf(self, values):
Returns
-------
ndarray
- CDF values
+ 1D float ndarray containing CDF values
"""
x_i = [-np.inf] + [x[0] for x in self.theta] + [np.inf]
y_i = [0.00] + [x[1] for x in self.theta] + [1.00]
# Using Numpy's interp for linear interpolation
- result = np.interp(values, x_i, y_i, left=0.00, right=1.00)
+ return np.interp(values, x_i, y_i, left=0.00, right=1.00)
- return result
-
- def inverse_transform(self, values):
+ def inverse_transform(self, values: np.ndarray) -> np.ndarray:
"""
- Evaluates the inverse of the Cumulative Density Function (CDF)
- for the given values. Used to generate random variable
- realizations.
+ Evaluate the inverse CDF.
+
+ Uses inverse probability integral transformation on the
+ provided values.
Parameters
----------
@@ -1758,7 +1991,6 @@ def inverse_transform(self, values):
Inverse CDF values
"""
-
x_i = [x[0] for x in self.theta]
y_i = [x[1] for x in self.theta]
@@ -1769,42 +2001,43 @@ def inverse_transform(self, values):
# extrapolate).
# note: swapping the roles of x_i and y_i for inverse
# interpolation
- result = np.interp(values, y_i, x_i)
-
- return result
+ return np.interp(values, y_i, x_i)
class EmpiricalRandomVariable(RandomVariable):
- """
- Empirical random variable.
+ """Empirical random variable."""
- """
+ __slots__: list[str] = ['_raw_sample']
def __init__(
self,
- name,
- raw_samples,
- truncation_limits=np.array((np.nan, np.nan)),
- f_map=None,
- anchor=None,
- ):
+ name: str,
+ theta: np.ndarray,
+ truncation_limits: np.ndarray | None = None,
+ f_map: Callable | None = None,
+ anchor: BaseRandomVariable | None = None,
+ ) -> None:
+ """Instantiate an Empirical random variable."""
+ if truncation_limits is None:
+ truncation_limits = np.array((np.nan, np.nan))
super().__init__(
name=name,
- theta=raw_samples,
+ theta=theta,
truncation_limits=truncation_limits,
f_map=f_map,
anchor=anchor,
)
self.distribution = 'empirical'
if not np.all(np.isnan(truncation_limits)):
- raise NotImplementedError(
- f'{self.distribution} RVs do not support truncation'
- )
+ msg = f'{self.distribution} RVs do not support truncation'
+ raise NotImplementedError(msg)
- self._raw_samples = np.atleast_1d(raw_samples)
+ self._raw_sample = np.atleast_1d(theta)
- def inverse_transform(self, values):
+ def inverse_transform(self, values: np.ndarray) -> np.ndarray:
"""
+ Evaluate the inverse CDF.
+
Maps given values to their corresponding positions within the
empirical data array, simulating an inverse transformation
based on the empirical distribution. This can be seen as a
@@ -1824,33 +2057,31 @@ def inverse_transform(self, values):
normalized positions.
"""
- s_ids = (values * len(self._raw_samples)).astype(int)
- result = self._raw_samples[s_ids]
- return result
+ s_ids = (values * len(self._raw_sample)).astype(int)
+ return self._raw_sample[s_ids]
class CoupledEmpiricalRandomVariable(UtilityRandomVariable):
- """
- Coupled empirical random variable.
+ """Coupled empirical random variable."""
- """
+ __slots__: list[str] = ['_raw_sample']
def __init__(
self,
- name,
- raw_samples,
- truncation_limits=np.array((np.nan, np.nan)),
- f_map=None,
- anchor=None,
- ):
+ name: str,
+ theta: np.ndarray,
+ truncation_limits: np.ndarray | None = None,
+ f_map: Callable | None = None,
+ anchor: BaseRandomVariable | None = None,
+ ) -> None:
"""
- Instantiates a coupled empirical random variable.
+ Instantiate a coupled empirical random variable.
Parameters
----------
name: string
A unique string that identifies the random variable.
- raw_samples: 1D float ndarray
+ theta: 1D float ndarray
Samples from which to draw empirical realizations.
truncation_limits: 2D float ndarray
Not supported for CoupledEmpirical RVs.
@@ -1870,6 +2101,8 @@ def __init__(
When truncation limits are provided
"""
+ if truncation_limits is None:
+ truncation_limits = np.array((np.nan, np.nan))
super().__init__(
name=name,
f_map=f_map,
@@ -1877,14 +2110,15 @@ def __init__(
)
self.distribution = 'coupled_empirical'
if not np.all(np.isnan(truncation_limits)):
- raise NotImplementedError(
- f'{self.distribution} RVs do not support truncation'
- )
+ msg = f'{self.distribution} RVs do not support truncation'
+ raise NotImplementedError(msg)
- self._raw_samples = np.atleast_1d(raw_samples)
+ self._raw_sample = np.atleast_1d(theta)
- def inverse_transform(self, sample_size):
+ def inverse_transform(self, sample_size: int) -> np.ndarray:
"""
+ Evaluate the inverse CDF.
+
Generates a new sample array from the existing empirical data
by repeating the dataset until it matches the requested sample
size.
@@ -1904,31 +2138,31 @@ def inverse_transform(self, sample_size):
dataset.
"""
-
- raw_sample_count = len(self._raw_samples)
- new_sample = np.tile(self._raw_samples, int(sample_size / raw_sample_count) + 1)
- result = new_sample[:sample_size]
- return result
+ raw_sample_count = len(self._raw_sample)
+ new_sample = np.tile(
+ self._raw_sample, int(sample_size / raw_sample_count) + 1
+ )
+ return new_sample[:sample_size]
class DeterministicRandomVariable(UtilityRandomVariable):
- """
- Deterministic random variable.
+ """Deterministic random variable."""
- """
+ __slots__: list[str] = ['theta']
def __init__(
self,
- name,
- theta,
- truncation_limits=np.array((np.nan, np.nan)),
- f_map=None,
- anchor=None,
- ):
+ name: str,
+ theta: np.ndarray,
+ truncation_limits: np.ndarray | None = None,
+ f_map: Callable | None = None,
+ anchor: BaseRandomVariable | None = None,
+ ) -> None:
"""
- Instantiates a deterministic random variable. This behaves
- like a RandomVariable object but represents a specific,
- deterministic value.
+ Instantiate a deterministic random variable.
+
+ This behaves like a RandomVariable object but represents a
+ specific, deterministic value.
Parameters
----------
@@ -1954,6 +2188,8 @@ def __init__(
When truncation limits are provided
"""
+ if truncation_limits is None:
+ truncation_limits = np.array((np.nan, np.nan))
super().__init__(
name=name,
f_map=f_map,
@@ -1961,15 +2197,14 @@ def __init__(
)
self.distribution = 'deterministic'
if not np.all(np.isnan(truncation_limits)):
- raise NotImplementedError(
- f'{self.distribution} RVs do not support truncation'
- )
+ msg = f'{self.distribution} RVs do not support truncation'
+ raise NotImplementedError(msg)
self.theta = np.atleast_1d(theta)
- def inverse_transform(self, sample_size):
+ def inverse_transform(self, sample_size: int) -> np.ndarray:
"""
- Generates samples that correspond to the value.
+ Evaluate the inverse CDF.
Parameters
----------
@@ -1982,25 +2217,35 @@ def inverse_transform(self, sample_size):
Sample array containing the deterministic value.
"""
-
- result = np.full(sample_size, self.theta[0])
- return result
+ return np.full(sample_size, self.theta[0])
class MultinomialRandomVariable(RandomVariable):
- """
- Multinomial random variable.
+ """Multinomial random variable."""
- """
+ __slots__: list[str] = ['theta']
def __init__(
self,
- name,
- theta,
- truncation_limits=np.array((np.nan, np.nan)),
- f_map=None,
- anchor=None,
- ):
+ name: str,
+ theta: np.ndarray,
+ truncation_limits: np.ndarray | None = None,
+ f_map: Callable | None = None,
+ anchor: BaseRandomVariable | None = None,
+ ) -> None:
+ """
+ Instantiate a Multinomial random variable.
+
+ Raises
+ ------
+ ValueError
+ In case of incompatible input parameters.
+ NotImplementedError
+ If truncation limits are specified.
+
+ """
+ if truncation_limits is None:
+ truncation_limits = np.array((np.nan, np.nan))
super().__init__(
name=name,
theta=theta,
@@ -2009,22 +2254,24 @@ def __init__(
anchor=anchor,
)
if not np.all(np.isnan(truncation_limits)):
- raise NotImplementedError(
- f'{self.distribution} RVs do not support truncation'
- )
+ msg = f'{self.distribution} RVs do not support truncation'
+ raise NotImplementedError(msg)
self.distribution = 'multinomial'
if np.sum(theta) > 1.00:
- raise ValueError(
- f"The set of p values provided for a multinomial "
- f"distribution shall sum up to less than or equal to 1.0. "
- f"The provided values sum up to {np.sum(theta)}. p = "
- f"{theta} ."
+ msg = (
+ f'The set of p values provided for a multinomial '
+ f'distribution shall sum up to less than or equal to 1.0. '
+ f'The provided values sum up to {np.sum(theta)}. p = '
+ f'{theta} .'
)
+ raise ValueError(msg)
self.theta = np.atleast_1d(theta)
- def inverse_transform(self, values):
+ def inverse_transform(self, values: np.ndarray) -> np.ndarray:
"""
+ Evaluate the inverse CDF.
+
Transforms continuous values into discrete events based
on the cumulative probabilities of the multinomial
distribution derived by `theta`.
@@ -2048,13 +2295,13 @@ def inverse_transform(self, values):
values[values < p_i] = 10 + i
values[values <= 1.0] = 10 + len(p_cum)
- result = values - 10
-
- return result
+ return values - 10
class RandomVariableSet:
"""
+ Random variable set.
+
Represents a set of random variables, each of which is described
by its own probability distribution. The set allows the user to
define correlations between the random variables, and provides
@@ -2072,30 +2319,36 @@ class RandomVariableSet:
Defines the correlation matrix that describes the correlation between
the random variables in the set. Currently, only the Gaussian copula
is supported.
+
"""
- def __init__(self, name, RV_list, Rho):
+ __slots__: list[str] = ['_Rho', '_variables', 'name']
+
+ def __init__(
+ self, name: str, rv_list: list[BaseRandomVariable], rho: np.ndarray
+ ) -> None:
+ """Instantiate a random variable set."""
self.name = name
- if len(RV_list) > 1:
+ if len(rv_list) > 1:
# put the RVs in a dictionary for more efficient access
- reorder = np.argsort([RV.name for RV in RV_list])
- self._variables = {RV_list[i].name: RV_list[i] for i in reorder}
+ reorder = np.argsort([RV.name for RV in rv_list])
+ self._variables = {rv_list[i].name: rv_list[i] for i in reorder}
# reorder the entries in the correlation matrix to correspond to the
# sorted list of RVs
- self._Rho = np.asarray(Rho[(reorder)].T[(reorder)].T)
+ self._Rho = np.asarray(rho[(reorder)].T[(reorder)].T)
else: # if there is only one variable (for testing, probably)
- self._variables = {rv.name: rv for rv in RV_list}
- self._Rho = np.asarray(Rho)
+ self._variables = {rv.name: rv for rv in rv_list}
+ self._Rho = np.asarray(rho)
# assign this RV_set to the variables
- for _, var in self._variables.items():
+ for var in self._variables.values():
var.RV_set = self
@property
- def RV(self):
+ def RV(self) -> dict[str, RandomVariable]: # noqa: N802
"""
Returns the random variable(s) assigned to the set.
@@ -2108,7 +2361,7 @@ def RV(self):
return self._variables
@property
- def size(self):
+ def size(self) -> int:
"""
Returns the size (i.e., number of variables in the) RV set.
@@ -2121,7 +2374,7 @@ def size(self):
return len(self._variables)
@property
- def sample(self):
+ def sample(self) -> dict[str, np.ndarray | None]:
"""
Returns the sample of the variables in the set.
@@ -2133,9 +2386,9 @@ def sample(self):
"""
return {name: rv.sample for name, rv in self._variables.items()}
- def Rho(self, var_subset=None):
+ def Rho(self, var_subset: list[str] | None = None) -> np.ndarray: # noqa: N802
"""
- Returns the (subset of the) correlation matrix.
+ Return the (subset of the) correlation matrix.
Returns
-------
@@ -2148,7 +2401,7 @@ def Rho(self, var_subset=None):
var_ids = [list(self._variables.keys()).index(var_i) for var_i in var_subset]
return (self._Rho[var_ids]).T[var_ids]
- def apply_correlation(self):
+ def apply_correlation(self) -> None:
"""
Apply correlation to n dimensional uniform samples.
@@ -2158,37 +2411,41 @@ def apply_correlation(self):
correlations while preserving as much as possible from the correlation
matrix.
"""
-
- U_RV = np.array([RV.uni_sample for RV_name, RV in self.RV.items()])
+ u_rv = np.array([RV.uni_sample for RV_name, RV in self.RV.items()])
# First try doing the Cholesky transformation
try:
- N_RV = norm.ppf(U_RV)
+ n_rv = norm.ppf(u_rv)
- L = cholesky(self._Rho, lower=True)
+ l_mat = cholesky(self._Rho, lower=True)
- NC_RV = L @ N_RV
+ nc_rv = l_mat @ n_rv
- UC_RV = norm.cdf(NC_RV)
+ uc_rv = norm.cdf(nc_rv)
except np.linalg.LinAlgError:
# if the Cholesky doesn't work, we need to use the more
# time-consuming but more robust approach based on SVD
- N_RV = norm.ppf(U_RV)
+ n_rv = norm.ppf(u_rv)
- U, s, _ = svd(
+ u_mat, s_mat, _ = svd(
self._Rho,
)
- S = np.diagflat(np.sqrt(s))
+ s_diag = np.diagflat(np.sqrt(s_mat))
- NC_RV = (N_RV.T @ S @ U.T).T
+ nc_rv = (n_rv.T @ s_diag @ u_mat.T).T
- UC_RV = norm.cdf(NC_RV)
+ uc_rv = norm.cdf(nc_rv)
- for (RV_name, RV), uc_RV in zip(self.RV.items(), UC_RV):
- RV.uni_sample = uc_RV
+ for rv, ucrv in zip(self.RV.values(), uc_rv):
+ rv.uni_sample = ucrv
- def orthotope_density(self, lower=np.nan, upper=np.nan, var_subset=None):
+ def orthotope_density(
+ self,
+ lower: np.ndarray | float = np.nan,
+ upper: np.ndarray | float = np.nan,
+ var_subset: list[str] | None = None,
+ ) -> np.ndarray:
"""
Estimate the probability density within an orthotope for the RV set.
@@ -2201,16 +2458,16 @@ def orthotope_density(self, lower=np.nan, upper=np.nan, var_subset=None):
Parameters
----------
- lower: float ndarray, optional, default: None
+ lower: float ndarray, optional, default: np.nan
Lower bound(s) of the orthotope. A scalar value can be used for a
univariate RV; a list of bounds is expected in multivariate cases.
If the orthotope is not bounded from below in a dimension, use
- 'None' to that dimension.
- upper: float ndarray, optional, default: None
+ 'np.nan' to that dimension.
+ upper: float ndarray, optional, default: np.nan
Upper bound(s) of the orthotope. A scalar value can be used for a
univariate RV; a list of bounds is expected in multivariate cases.
If the orthotope is not bounded from above in a dimension, use
- 'None' to that dimension.
+ 'np.nan' to that dimension.
var_subset: list of strings, optional, default: None
If provided, allows for selecting only a subset of the variables in
the RV_set for the density calculation.
@@ -2224,13 +2481,17 @@ def orthotope_density(self, lower=np.nan, upper=np.nan, var_subset=None):
Estimate of the error in alpha.
"""
+ if isinstance(lower, float):
+ lower = np.array([lower])
+ if isinstance(upper, float):
+ upper = np.array([upper])
if np.any(~np.isnan(lower)):
target_shape = lower.shape
elif np.any(~np.isnan(upper)):
target_shape = upper.shape
else:
- return 1.0
+ return np.array([1.0])
lower_std = np.full(target_shape, np.nan)
upper_std = np.full(target_shape, np.nan)
@@ -2255,40 +2516,41 @@ def orthotope_density(self, lower=np.nan, upper=np.nan, var_subset=None):
lower_std = lower_std.T
upper_std = upper_std.T
- OD = [
+ od = [
mvn_orthotope_density(
mu=np.zeros(len(variables)),
- COV=self.Rho(var_subset),
+ cov=self.Rho(var_subset),
lower=l_i,
upper=u_i,
)[0]
for l_i, u_i in zip(lower_std, upper_std)
]
- return np.asarray(OD)
+ return np.asarray(od)
class RandomVariableRegistry:
- """
- Description
+ """Random variable registry."""
- Parameters
- ----------
-
- """
+ __slots__: list[str] = ['_rng', '_sets', '_variables']
- def __init__(self, rng):
+ def __init__(self, rng: np.random.Generator) -> None:
"""
+ Instantiate a random variable registry.
+
+ Parameters
+ ----------
rng: numpy.random._generator.Generator
Random variable generator object.
- e.g.: np.random.default_rng(seed)
+ e.g.: np.random.default_rng(seed).
+
"""
self._rng = rng
- self._variables = {}
- self._sets = {}
+ self._variables: dict[str, BaseRandomVariable] = {}
+ self._sets: dict[str, RandomVariableSet] = {}
@property
- def RV(self):
+ def RV(self) -> dict[str, BaseRandomVariable]: # noqa: N802
"""
Returns all random variable(s) in the registry.
@@ -2300,9 +2562,9 @@ def RV(self):
"""
return self._variables
- def RVs(self, keys):
+ def RVs(self, keys: list[str]) -> dict[str, BaseRandomVariable]: # noqa: N802
"""
- Returns a subset of the random variables in the registry
+ Return a subset of the random variables in the registry.
Parameters
----------
@@ -2317,7 +2579,7 @@ def RVs(self, keys):
"""
return {name: self._variables[name] for name in keys}
- def add_RV(self, RV):
+ def add_RV(self, rv: BaseRandomVariable) -> None: # noqa: N802
"""
Add a new random variable to the registry.
@@ -2327,12 +2589,13 @@ def add_RV(self, RV):
When the RV already exists in the registry
"""
- if RV.name in self._variables:
- raise ValueError(f'RV {RV.name} already exists in the registry.')
- self._variables.update({RV.name: RV})
+ if rv.name in self._variables:
+ msg = f'RV {rv.name} already exists in the registry.'
+ raise ValueError(msg)
+ self._variables.update({rv.name: rv})
@property
- def RV_set(self):
+ def RV_set(self) -> dict[str, RandomVariableSet]: # noqa: N802
"""
Return the random variable set(s) in the registry.
@@ -2344,14 +2607,12 @@ def RV_set(self):
"""
return self._sets
- def add_RV_set(self, RV_set):
- """
- Add a new set of random variables to the registry
- """
- self._sets.update({RV_set.name: RV_set})
+ def add_RV_set(self, rv_set: RandomVariableSet) -> None: # noqa: N802
+ """Add a new set of random variables to the registry."""
+ self._sets.update({rv_set.name: rv_set})
@property
- def RV_sample(self):
+ def RV_sample(self) -> dict[str, np.ndarray | None]: # noqa: N802
"""
Return the sample for every random variable in the registry.
@@ -2363,13 +2624,12 @@ def RV_sample(self):
"""
return {name: rv.sample for name, rv in self.RV.items()}
- def generate_sample(self, sample_size, method):
+ def generate_sample(self, sample_size: int, method: str) -> None:
"""
- Generates samples for all variables in the registry.
+ Generate samples for all variables in the registry.
Parameters
----------
-
sample_size: int
The number of samples requested per variable.
method: str
@@ -2386,10 +2646,9 @@ def generate_sample(self, sample_size, method):
When the RV parent class is Unknown
"""
-
# Generate a dictionary with IDs of the free (non-anchored and
# non-deterministic) variables
- RV_list = [
+ rv_list = [
RV_name
for RV_name, RV in self.RV.items()
if (
@@ -2397,51 +2656,49 @@ def generate_sample(self, sample_size, method):
or (RV.distribution in {'deterministic', 'coupled_empirical'})
)
]
- RV_ID = {RV_name: ID for ID, RV_name in enumerate(RV_list)}
- RV_count = len(RV_ID)
+ rv_id = {RV_name: ID for ID, RV_name in enumerate(rv_list)}
+ rv_count = len(rv_id)
# Generate controlling samples from a uniform distribution for free RVs
if 'LHS' in method:
bin_low = np.array(
- [self._rng.permutation(sample_size) for i in range(RV_count)]
+ [self._rng.permutation(sample_size) for i in range(rv_count)]
)
if method == 'LHS_midpoint':
- U_RV = np.ones([RV_count, sample_size]) * 0.5
- U_RV = (bin_low + U_RV) / sample_size
+ u_rv = np.ones([rv_count, sample_size]) * 0.5
+ u_rv = (bin_low + u_rv) / sample_size
elif method == 'LHS':
- U_RV = self._rng.random(size=[RV_count, sample_size])
- U_RV = (bin_low + U_RV) / sample_size
+ u_rv = self._rng.random(size=[rv_count, sample_size])
+ u_rv = (bin_low + u_rv) / sample_size
elif method == 'MonteCarlo':
- U_RV = self._rng.random(size=[RV_count, sample_size])
+ u_rv = self._rng.random(size=[rv_count, sample_size])
# Assign the controlling samples to the RVs
- for RV_name, RV_id in RV_ID.items():
- self.RV[RV_name].uni_sample = U_RV[RV_id]
+ for rv_name, rvid in rv_id.items():
+ self.RV[rv_name].uni_sample = u_rv[rvid]
# Apply correlations for the pre-defined RV sets
- for RV_set in self.RV_set.values():
+ for rv_set in self.RV_set.values():
# prepare the correlated uniform distribution for the set
- RV_set.apply_correlation()
+ rv_set.apply_correlation()
# Convert from uniform to the target distribution for every RV
- for RV in self.RV.values():
- if RV.__class__.__mro__[1] is RandomVariable:
- # no sample size needed, since that information is
- # available in the uniform sample
- RV.inverse_transform_sampling()
- elif RV.__class__.__mro__[1] is UtilityRandomVariable:
- RV.inverse_transform_sampling(sample_size)
+ for rv in self.RV.values():
+ if isinstance(rv, UtilityRandomVariable):
+ rv.inverse_transform_sampling(sample_size)
+ elif isinstance(rv, RandomVariable):
+ rv.inverse_transform_sampling()
else:
- raise NotImplementedError('Unknown RV parent class.')
+ msg = 'Unknown RV parent class.'
+ raise NotImplementedError(msg)
-def rv_class_map(distribution_name):
+def rv_class_map(distribution_name: str) -> type[BaseRandomVariable]:
"""
- Maps convenient distribution names to their corresponding random
- variable class.
+ Map convenient distributions to their corresponding class.
Parameters
----------
@@ -2459,14 +2716,16 @@ def rv_class_map(distribution_name):
If the given distribution name does not correspond to a
distribution class.
-
"""
if pd.isna(distribution_name):
distribution_name = 'deterministic'
distribution_map = {
- 'normal': NormalRandomVariable,
+ 'normal': Normal_COV,
+ 'normal_std': Normal_STD,
+ 'normal_cov': Normal_COV,
'lognormal': LogNormalRandomVariable,
'uniform': UniformRandomVariable,
+ 'weibull': WeibullRandomVariable,
'multilinear_CDF': MultilinearCDFRandomVariable,
'empirical': EmpiricalRandomVariable,
'coupled_empirical': CoupledEmpiricalRandomVariable,
@@ -2474,5 +2733,6 @@ def rv_class_map(distribution_name):
'multinomial': MultinomialRandomVariable,
}
if distribution_name not in distribution_map:
- raise ValueError(f'Unsupported distribution: {distribution_name}')
+ msg = f'Unsupported distribution: {distribution_name}'
+ raise ValueError(msg)
return distribution_map[distribution_name]
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 000000000..cf54647e5
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,45 @@
+[tool.ruff]
+line-length = 85
+exclude = [
+ "rulesets",
+ "pelicun/tests/dl_calculation/e7/auto_HU_NJ.py",
+ "pelicun/tests/dl_calculation/e8/auto_HU_LA.py",
+ "pelicun/tests/dl_calculation/e9/custom_pop.py"
+]
+
+[tool.ruff.lint]
+# Enable all known categories
+select = ["ALL"]
+ignore = ["ANN101", "D211", "D212", "Q000", "Q003", "COM812", "D203", "ISC001", "E501", "ERA001", "PGH003", "FIX002", "TD003", "S101", "N801", "S311", "G004", "SIM102", "SIM108", "NPY002", "F401"]
+preview = true
+
+[tool.ruff.lint.pydocstyle]
+convention = "numpy"
+
+[tool.ruff.lint.pylint]
+max-args=15
+max-locals=50
+max-returns=11
+max-branches=50
+max-statements=150
+max-bool-expr=5
+
+[tool.ruff.lint.per-file-ignores]
+"pelicun/tests/*" = ["D", "N802", "SLF001", "PLR2004", "PLR6301"]
+"pelicun/resources/auto/*" = ['PLR', 'T', 'N', 'ANN', 'D', 'PTH', 'INP', 'DOC', 'RET', 'TD']
+"pelicun/tools/HDF_to_CSV.py" = ["ALL"]
+"pelicun/tests/validation/inactive/*" = ["T201", "B018", "ANN", "PD"]
+"pelicun/tests/dl_calculation/rulesets/*" = ["N999"]
+"doc/source/examples/notebooks/*" = ["INP001", "CPY001", "D400", "B018", "F821", "T201", "T203"]
+
+[tool.ruff.format]
+quote-style = "single"
+
+[tool.codespell]
+ignore-words = ["ignore_words.txt"]
+skip = ["*.html", "./htmlcov/*", "./doc_src/build/*", "./pelicun.egg-info/*", "./doc_src/*", "./doc/build/*", "*/rulesets/*", "custom_pop.py", "*/SimCenterDBDL/*", "auto_HU_NJ.py", "auto_HU_LA.py", "custom_pop.py"]
+
+[tool.mypy]
+ignore_missing_imports = true
+exclude = "flycheck"
+namespace_packages = false
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 000000000..2762a03a5
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,7 @@
+[pytest]
+filterwarnings =
+ ignore:.*errors='ignore' is deprecated and will raise.*:FutureWarning
+ ignore:.*Downcasting object dtype arrays on.*:FutureWarning
+ ignore:.*invalid value encountered in multiply.*:RuntimeWarning
+ ignore:.*invalid value encountered in add.*:RuntimeWarning
+ ignore:.*DataFrameGroupBy\.apply operated on the grouping columns.*:DeprecationWarning
diff --git a/run_checks.sh b/run_checks.sh
new file mode 100755
index 000000000..7b5487bec
--- /dev/null
+++ b/run_checks.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+# Spell-check
+echo "Spell-checking."
+echo
+codespell .
+if [ $? -ne 0 ]; then
+ echo "Spell-checking failed."
+ exit 1
+fi
+
+# Check formatting with ruff
+echo "Checking formatting with 'ruff format --diff'."
+echo
+ruff format --diff
+if [ $? -ne 0 ]; then
+ echo "ruff format failed."
+ exit 1
+fi
+
+# Run ruff for linting
+echo "Linting with 'ruff check --fix'."
+echo
+ruff check --fix --output-format concise
+if [ $? -ne 0 ]; then
+ echo "ruff check failed."
+ exit 1
+fi
+
+# Run mypy for type checking
+echo "Type checking with mypy."
+echo
+mypy pelicun
+if [ $? -ne 0 ]; then
+ echo "mypy failed. Exiting."
+ exit 1
+fi
+
+# Run pytest for testing and generate coverage report
+echo "Running unit-tests."
+echo
+python -m pytest pelicun/tests --cov=pelicun --cov-report html -n auto
+if [ $? -ne 0 ]; then
+ echo "pytest failed. Exiting."
+ exit 1
+fi
+
+echo "All checks passed successfully."
+echo
diff --git a/setup.py b/setup.py
index 38f62bf53..e63c272f7 100644
--- a/setup.py
+++ b/setup.py
@@ -1,28 +1,63 @@
-"""
-setup.py file of the `pelicun` package.
+#
+# Copyright (c) 2023 Leland Stanford Junior University
+# Copyright (c) 2023 The Regents of the University of California
+#
+# This file is part of pelicun.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the BSD 3-Clause License along with
+# pelicun. If not, see .
-"""
+"""setup.py file of the `pelicun` package."""
+
+from pathlib import Path
+
+from setuptools import find_packages, setup
-import io
-from setuptools import setup, find_packages
import pelicun
-def read(*filenames, **kwargs):
- """
- Utility function to read multiple files into a string
- """
+def read(*filenames, **kwargs) -> None: # noqa: ANN002, ANN003
+ """Read multiple files into a string."""
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
- with io.open(filename, encoding=encoding) as f:
+ with Path(filename).open(encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.md')
+# TODO(JVM): update documentation requirements, remove those no longer
+# used.
+
setup(
name='pelicun',
version=pelicun.__version__,
@@ -45,23 +80,41 @@ def read(*filenames, **kwargs):
'numpy>=1.22.0, <2.0',
'scipy>=1.7.0, <2.0',
'pandas>=1.4.0, <3.0',
- #'tables>=3.7.0',
+ 'colorama>=0.4.0, <0.5.0',
+ 'numexpr>=2.8, <3.0',
+ 'jsonschema>=4.22.0, <5.0',
+ # 'tables>=3.7.0',
],
extras_require={
'development': [
+ 'codespell',
'flake8',
- 'pylint',
- 'black',
- 'pytest',
- 'pytest-cov',
+ 'flake8-bugbear',
+ 'flake8-rst',
+ 'flake8-rst',
+ 'flake8-rst-docstrings',
'glob2',
+ 'jsonpath2',
'jupyter',
'jupytext',
+ 'mypy',
+ 'nbsphinx',
+ 'numpydoc',
+ 'pandas-stubs',
+ 'pydocstyle',
+ 'pylint',
+ 'pylint-pytest',
+ 'pytest',
+ 'pytest-cov',
+ 'pytest-xdist',
+ 'rendre>0.0.14',
+ 'ruff==0.7.0',
'sphinx',
'sphinx-autoapi',
- 'nbsphinx',
- 'flake8-rst',
- 'flake8-rst-docstrings',
+ 'sphinx-rtd-theme',
+ 'sphinx_design',
+ 'sphinxcontrib-bibtex',
+ 'types-colorama',
],
},
classifiers=[